content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myBasicFunctions.R
\name{rversionAbove}
\alias{rversionAbove}
\title{Test if the R version used is above the given version}
\usage{
rversionAbove(majorT, minorT = 0)
}
\arguments{
\item{majorT}{a string or numerical value of the major version to use as comparison (for example 3)}
\item{minorT}{a string or numerical value of the minor version to use as comparison (for example 5 or "5.0"). By default it is 0.}
}
\value{
\code{TRUE} if you are using a R version which is at least the one privided and \code{FALSE} if your version is below.
}
\description{
Test if the R version used is above the given version
}
|
/man/rversionAbove.Rd
|
no_license
|
lldelisle/usefulLDfunctions
|
R
| false
| true
| 692
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/myBasicFunctions.R
\name{rversionAbove}
\alias{rversionAbove}
\title{Test if the R version used is above the given version}
\usage{
rversionAbove(majorT, minorT = 0)
}
\arguments{
\item{majorT}{a string or numerical value of the major version to use as comparison (for example 3)}
\item{minorT}{a string or numerical value of the minor version to use as comparison (for example 5 or "5.0"). By default it is 0.}
}
\value{
\code{TRUE} if you are using a R version which is at least the one privided and \code{FALSE} if your version is below.
}
\description{
Test if the R version used is above the given version
}
|
rm(list=ls())
#reading data into R:
powerconsumption <- read.table('D:/Xiao Xin/My Folder/Coursera/Data Science/04 Exploratory Data Analysis/Assignment 1/household_power_consumption.txt',sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the full data to obtain the data related to two days:
sub_powerconsumption <- subset(powerconsumption, (powerconsumption$Date == "1/2/2007" | powerconsumption$Date== "2/2/2007"))
# Changing the class of Date variable from character to Date:
sub_powerconsumption$Date <- as.Date(sub_powerconsumption$Date, format = "%d/%m/%Y")
# Creating the plot1:
png("plot1.png", width = 480, height = 480)
hist(sub_powerconsumption$Global_active_power, main="Global Active Power",col='red',ylab= "Frequency", xlab="Global Active Power(kilowatts)")
dev.off()
|
/ExData_Plot1.R
|
no_license
|
xin1207/ExDataPlot1
|
R
| false
| false
| 863
|
r
|
rm(list=ls())
#reading data into R:
powerconsumption <- read.table('D:/Xiao Xin/My Folder/Coursera/Data Science/04 Exploratory Data Analysis/Assignment 1/household_power_consumption.txt',sep=";",nrows= 2075260, header=TRUE, quote= "", strip.white=TRUE, stringsAsFactors = FALSE, na.strings= "?")
# Subsetting the full data to obtain the data related to two days:
sub_powerconsumption <- subset(powerconsumption, (powerconsumption$Date == "1/2/2007" | powerconsumption$Date== "2/2/2007"))
# Changing the class of Date variable from character to Date:
sub_powerconsumption$Date <- as.Date(sub_powerconsumption$Date, format = "%d/%m/%Y")
# Creating the plot1:
png("plot1.png", width = 480, height = 480)
hist(sub_powerconsumption$Global_active_power, main="Global Active Power",col='red',ylab= "Frequency", xlab="Global Active Power(kilowatts)")
dev.off()
|
######################################
######################################
## FUNCTION FOR IMPLEMENTING SPARSE ##
## BAYESIAN GAM MODELS. ##
######################################
######################################
# This function implements sparse Bayesian generalized additive models in the exponential
# dispersion family with the spike-and-slab group lasso (SSGL) penalty.
# INPUTS:
# y = n x 1 vector of observations (y_1, ...., y_n)
# X = n x p design matrix, where ith row is (x_{i1},..., x_{ip})
# X.test = n.test x p design matrix for test data. If missing, then the program sets X.test=X
# and computes in-sample predictions on training data X. X.test must have the same
# number of columns as X, but not necessarily the same number of rows.
# df = number of basis functions to use. Default is d=6
# family = the exponential dispersion family.Allows for "gaussian", "binomial", "poisson",
# "negativebinomial", or "gamma".
# nb.size = known size parameter for negative binomial regression. Default is nb.size=1
# gamma.shape = known shape parameter for gamma regression. Default is gamma.shape=1
# nlambda0 = number of spike hyperparameters to use. Default is 100
# lambda0 = a grid of spike hyperparameters. If the user does not specify this, then the program
# chooses a grid automatically
# lambda1 = slab hyperparameter in SSGL. Default is lambda1=1
# a = shape hyperparameter for B(a,b) prior on mixing proportion. Default is a=1
# b = shape hyperparameter for B(a,b) prior on mixing proportion. Default is b=
# max.iter = maximum number of iterations. Default is 100
# tol = convergence criteria. Default is 1e-6
# print.iter = boolean variable whether to print the current lambda0 in our grid. Default is TRUE
# OUTPUT:
# lambda0 = grid of lambda0's in descending order.
# f.pred = list of n.test x p matrices, where the lth matrix corresponds to the lth entry in our
# lambda0 grid. The jth column in each matrix corresponds to the function estimates for
# the jth covariate.
# mu.pred = n.test x L matrix of predicted mean response values based on test data in X.test. If
# X.test was left blank or X.test=X, then in-sample predictions on X.train are returned.
# classifications = p x L matrix of group classifications, where G is the number of groups. "1" indicates
# that the group was selected and "0" indicates that the group was not selected.
# The lth column in this matrix corresponds to the lth entry in our lambda0 grid.
# beta0 = L x 1 vector of intercept estimates. The lth entry of beta0 corresponds to the lth entry in
# our lambda grid.
# beta = dp x L matrix of regression coefficient estimates. The lth column of beta corresponds to the
# lth entry in our lambda0 grid.
# loss = L x 1 vector of negative log-likelihoods for each fit. The lth entry in loss corresponds to
# the lth entry in our lambda0 grid.
SBGAM = function(y, X, X.test, df=6,
family=c("gaussian","binomial","poisson","negativebinomial","gamma"),
nb.size=1, gamma.shape=1, nlambda0=20, lambda0, lambda1, a, b,
max.iter=100, tol = 1e-6, print.iter=TRUE) {
##################
##################
### PRE-CHECKS ###
##################
##################
## Coercion
family = match.arg(family)
## Number of groups and covariates overall
n = dim(X)[1]
p = dim(X)[2]
d = as.integer(df) # force d to be an integer
# Set weights all equal to 1, because the group sizes are all equal
weights = rep(1,p)
## Check that dimensions are conformal
if( length(y) != dim(X)[1] )
stop("Non-conformal dimensions and X.")
## Check that degrees of freedom is >=3
if( df <= 2 )
stop("Please enter a positive integer greater than or equal to three for degrees of freedom.")
## Check that the ladder is increasing and that all relevant hyperparameters are positive
## Check that the data can be used for the respective family
if(family=="poisson" || family=="negativebinomial"){
if(any(y<0))
stop("All counts y must be greater than or equal to zero.")
if(any(y-floor(y)!=0))
stop("All counts y must be whole numbers.")
}
if(family=="negativebinomial"){
## Check that nb.size is strictly positive
if (nb.size<=0)
stop("Size parameter for negative binomial density must be strictly positive.")
## Check that d*p is less than or equal to n
if(d*p > n) {
stop("For group-regularized negative binomial regression, we require the total
number of basis coefficients to be less than or equal to sample size.
Consider reducing the number of covariates.")
}
}
if(family=="binomial"){
if(any(y<0))
stop("All binary responses must be either '0' or '1.'")
if(any(y>1))
stop("All binary responses must be either '0' or '1.'")
if(any(y-floor(y)!=0))
stop("All binary responses must be either '0' or '1.'")
}
if(family=="gamma"){
if(any(y<=0))
stop("All responses y must be strictly positive.")
if(gamma.shape<=0)
stop("Shape parameter for gamma density must be strictly positive.")
## Check that J is less than or equal to n
if(d*p > n) {
stop("For group-regularized gamma regression, we require the total number
of basis coefficients to be less than or equal to sample size.
Consider reducing the number of covariates.")
}
}
## Set test data as training data if test data not provided.
X = as.matrix(X)
if(missing(X.test)) X.test = X
n.test = dim(X.test)[1]
## Check that X and X.test have the same number of columns
if(dim(X.test)[2] != dim(X)[2])
stop("X and X.test should have the same number of columns.")
## Number of lambdas
if(nlambda0 < 1)
stop("The number of lambda0's must be at least one.")
## If user specified lambda, check that all lambdas are greater than 0
if(!missing(lambda0)) {
nlambda = length(lambda0) # Override nlambda with the length of lambda
if (any(lambda0<=0))
stop("All lambda0's should be strictly positive.")
}
## Default parameters for missing arguments
if(missing(lambda1)) lambda1 = 1
if(missing(a)) a = 1
if(missing(b)) b = p
## Check hyperparameters to be safe
if ((lambda1 <= 0) || (a <= 0) || (b <= 0))
stop("Please make sure that all hyperparameters are strictly positive.")
################################
################################
### CONSTRUCT B-SPLINE BASIS ###
### EXPANSION MATRICES ###
################################
################################
## Designate the groups of basis coefficients
groups = rep(1:p, each=d)
## Create n x dp B-spline matrix X.tilde = [X.tilde_1, ..., X.tilde_p], where each X.tilde_j is n x d
## X.tilde is for training data
X.tilde = matrix(0, nrow=n, ncol=d*p)
if(family=="gaussian" || family=="binomial" || family=="poisson"){
for(j in 1:p){
X.tilde[,((j-1)*d+1):(j*d)] = splines::bs(X[,j], df=d, intercept=TRUE)
}
} else if(family=="negativebinomial" || family=="gamma"){
for(j in 1:p){
## Negative binomial and gamma regression are based on LSA to the MLE,
## so we need intercept=FALSE, otherwise MLE will return NA values
X.tilde[,((j-1)*d+1):(j*d)] = splines::bs(X[,j], df=d, intercept=FALSE)
}
}
## Create n.test x dp B-spline matrix X.tilde.test = [X.tilde.test_1, ..., X.tilde.test_p]
## X.tilde.test is for test data
X.tilde.test = matrix(0, nrow=n.test, ncol=d*p)
if(family=="gaussian" || family=="binomial" || family=="poisson"){
for(j in 1:p){
X.tilde.test[,((j-1)*d+1):(j*d)] = splines::bs(X.test[,j], df=d, intercept=TRUE)
}
} else if(family=="negativebinomial" || family=="gamma"){
for(j in 1:p){
## Negative binomial and gamma regression are based on LSA to the MLE,
# so we need intercept=FALSE, otherwise MLE will return NA values
X.tilde.test[,((j-1)*d+1):(j*d)] = splines::bs(X.test[,j], df=d, intercept=FALSE)
}
}
#######################################
#######################################
### Fit the appropriate group model ###
#######################################
#######################################
## Fit sparse GAM with SSGL penalty
if(!missing(lambda0)){
ssgl.mod = SSGL(y=y, X=X.tilde, X.test=X.tilde.test, groups=groups, family=family,
nb.size=nb.size, gamma.shape=gamma.shape, weights=weights,
nlambda0=nlambda0, lambda0=lambda0, a=a, b=b, max.iter=max.iter,
tol=tol, print.iter=print.iter)
} else {
ssgl.mod = SSGL(y=y, X=X.tilde, X.test=X.tilde.test, groups=groups, family=family,
nb.size=nb.size, gamma.shape=gamma.shape, weights=weights,
nlambda0=nlambda0, a=a, b=b, max.iter=max.iter,
tol=tol, print.iter=print.iter)
}
## Lambdas
lambda0 = ssgl.mod$lambda0
L = length(lambda0)
## Estimates of regression coefficients
beta0 = ssgl.mod$beta0
beta = ssgl.mod$beta
## Estimate of loss function
loss = ssgl.mod$loss
## Predictions for mu.pred
mu.pred = ssgl.mod$mu.pred
## Classifications
classifications = ssgl.mod$classifications
## Compute the function evaluations for the individual functions
f.pred.ind = matrix(0, nrow=n.test, ncol=p)
## Fill in f.hat list
if (L>1){
f.pred = vector(mode = "list", length = L)
for(l in 1:L){
for(j in 1:p){
active = which(groups == j)
f.pred.ind[,j] = X.tilde.test[,(d*(j-1)+1):(d*j)] %*% as.matrix(beta[active,l])
}
f.pred[[l]] = f.pred.ind
}
} else if(L==1){
for(j in 1:p){
active=which(groups==j)
f.pred.ind[,j] = X.tilde.test[,(d*(j-1)+1):(d*j)] %*% as.matrix(beta[active])
}
f.pred = f.pred.ind
}
#####################
#####################
### Return a list ###
#####################
#####################
SBGAM.output <- list(lambda0 = lambda0,
f.pred = f.pred,
mu.pred = mu.pred,
classifications = classifications,
beta0 = beta0,
beta = beta,
loss = loss)
# Return list
return(SBGAM.output)
}
|
/R/SBGAM.R
|
no_license
|
cran/sparseGAM
|
R
| false
| false
| 10,717
|
r
|
######################################
######################################
## FUNCTION FOR IMPLEMENTING SPARSE ##
## BAYESIAN GAM MODELS. ##
######################################
######################################
# This function implements sparse Bayesian generalized additive models in the exponential
# dispersion family with the spike-and-slab group lasso (SSGL) penalty.
# INPUTS:
# y = n x 1 vector of observations (y_1, ...., y_n)
# X = n x p design matrix, where ith row is (x_{i1},..., x_{ip})
# X.test = n.test x p design matrix for test data. If missing, then the program sets X.test=X
# and computes in-sample predictions on training data X. X.test must have the same
# number of columns as X, but not necessarily the same number of rows.
# df = number of basis functions to use. Default is d=6
# family = the exponential dispersion family.Allows for "gaussian", "binomial", "poisson",
# "negativebinomial", or "gamma".
# nb.size = known size parameter for negative binomial regression. Default is nb.size=1
# gamma.shape = known shape parameter for gamma regression. Default is gamma.shape=1
# nlambda0 = number of spike hyperparameters to use. Default is 100
# lambda0 = a grid of spike hyperparameters. If the user does not specify this, then the program
# chooses a grid automatically
# lambda1 = slab hyperparameter in SSGL. Default is lambda1=1
# a = shape hyperparameter for B(a,b) prior on mixing proportion. Default is a=1
# b = shape hyperparameter for B(a,b) prior on mixing proportion. Default is b=
# max.iter = maximum number of iterations. Default is 100
# tol = convergence criteria. Default is 1e-6
# print.iter = boolean variable whether to print the current lambda0 in our grid. Default is TRUE
# OUTPUT:
# lambda0 = grid of lambda0's in descending order.
# f.pred = list of n.test x p matrices, where the lth matrix corresponds to the lth entry in our
# lambda0 grid. The jth column in each matrix corresponds to the function estimates for
# the jth covariate.
# mu.pred = n.test x L matrix of predicted mean response values based on test data in X.test. If
# X.test was left blank or X.test=X, then in-sample predictions on X.train are returned.
# classifications = p x L matrix of group classifications, where G is the number of groups. "1" indicates
# that the group was selected and "0" indicates that the group was not selected.
# The lth column in this matrix corresponds to the lth entry in our lambda0 grid.
# beta0 = L x 1 vector of intercept estimates. The lth entry of beta0 corresponds to the lth entry in
# our lambda grid.
# beta = dp x L matrix of regression coefficient estimates. The lth column of beta corresponds to the
# lth entry in our lambda0 grid.
# loss = L x 1 vector of negative log-likelihoods for each fit. The lth entry in loss corresponds to
# the lth entry in our lambda0 grid.
SBGAM = function(y, X, X.test, df=6,
family=c("gaussian","binomial","poisson","negativebinomial","gamma"),
nb.size=1, gamma.shape=1, nlambda0=20, lambda0, lambda1, a, b,
max.iter=100, tol = 1e-6, print.iter=TRUE) {
##################
##################
### PRE-CHECKS ###
##################
##################
## Coercion
family = match.arg(family)
## Number of groups and covariates overall
n = dim(X)[1]
p = dim(X)[2]
d = as.integer(df) # force d to be an integer
# Set weights all equal to 1, because the group sizes are all equal
weights = rep(1,p)
## Check that dimensions are conformal
if( length(y) != dim(X)[1] )
stop("Non-conformal dimensions and X.")
## Check that degrees of freedom is >=3
if( df <= 2 )
stop("Please enter a positive integer greater than or equal to three for degrees of freedom.")
## Check that the ladder is increasing and that all relevant hyperparameters are positive
## Check that the data can be used for the respective family
if(family=="poisson" || family=="negativebinomial"){
if(any(y<0))
stop("All counts y must be greater than or equal to zero.")
if(any(y-floor(y)!=0))
stop("All counts y must be whole numbers.")
}
if(family=="negativebinomial"){
## Check that nb.size is strictly positive
if (nb.size<=0)
stop("Size parameter for negative binomial density must be strictly positive.")
## Check that d*p is less than or equal to n
if(d*p > n) {
stop("For group-regularized negative binomial regression, we require the total
number of basis coefficients to be less than or equal to sample size.
Consider reducing the number of covariates.")
}
}
if(family=="binomial"){
if(any(y<0))
stop("All binary responses must be either '0' or '1.'")
if(any(y>1))
stop("All binary responses must be either '0' or '1.'")
if(any(y-floor(y)!=0))
stop("All binary responses must be either '0' or '1.'")
}
if(family=="gamma"){
if(any(y<=0))
stop("All responses y must be strictly positive.")
if(gamma.shape<=0)
stop("Shape parameter for gamma density must be strictly positive.")
## Check that J is less than or equal to n
if(d*p > n) {
stop("For group-regularized gamma regression, we require the total number
of basis coefficients to be less than or equal to sample size.
Consider reducing the number of covariates.")
}
}
## Set test data as training data if test data not provided.
X = as.matrix(X)
if(missing(X.test)) X.test = X
n.test = dim(X.test)[1]
## Check that X and X.test have the same number of columns
if(dim(X.test)[2] != dim(X)[2])
stop("X and X.test should have the same number of columns.")
## Number of lambdas
if(nlambda0 < 1)
stop("The number of lambda0's must be at least one.")
## If user specified lambda, check that all lambdas are greater than 0
if(!missing(lambda0)) {
nlambda = length(lambda0) # Override nlambda with the length of lambda
if (any(lambda0<=0))
stop("All lambda0's should be strictly positive.")
}
## Default parameters for missing arguments
if(missing(lambda1)) lambda1 = 1
if(missing(a)) a = 1
if(missing(b)) b = p
## Check hyperparameters to be safe
if ((lambda1 <= 0) || (a <= 0) || (b <= 0))
stop("Please make sure that all hyperparameters are strictly positive.")
################################
################################
### CONSTRUCT B-SPLINE BASIS ###
### EXPANSION MATRICES ###
################################
################################
## Designate the groups of basis coefficients
groups = rep(1:p, each=d)
## Create n x dp B-spline matrix X.tilde = [X.tilde_1, ..., X.tilde_p], where each X.tilde_j is n x d
## X.tilde is for training data
X.tilde = matrix(0, nrow=n, ncol=d*p)
if(family=="gaussian" || family=="binomial" || family=="poisson"){
for(j in 1:p){
X.tilde[,((j-1)*d+1):(j*d)] = splines::bs(X[,j], df=d, intercept=TRUE)
}
} else if(family=="negativebinomial" || family=="gamma"){
for(j in 1:p){
## Negative binomial and gamma regression are based on LSA to the MLE,
## so we need intercept=FALSE, otherwise MLE will return NA values
X.tilde[,((j-1)*d+1):(j*d)] = splines::bs(X[,j], df=d, intercept=FALSE)
}
}
## Create n.test x dp B-spline matrix X.tilde.test = [X.tilde.test_1, ..., X.tilde.test_p]
## X.tilde.test is for test data
X.tilde.test = matrix(0, nrow=n.test, ncol=d*p)
if(family=="gaussian" || family=="binomial" || family=="poisson"){
for(j in 1:p){
X.tilde.test[,((j-1)*d+1):(j*d)] = splines::bs(X.test[,j], df=d, intercept=TRUE)
}
} else if(family=="negativebinomial" || family=="gamma"){
for(j in 1:p){
## Negative binomial and gamma regression are based on LSA to the MLE,
# so we need intercept=FALSE, otherwise MLE will return NA values
X.tilde.test[,((j-1)*d+1):(j*d)] = splines::bs(X.test[,j], df=d, intercept=FALSE)
}
}
#######################################
#######################################
### Fit the appropriate group model ###
#######################################
#######################################
## Fit sparse GAM with SSGL penalty
if(!missing(lambda0)){
ssgl.mod = SSGL(y=y, X=X.tilde, X.test=X.tilde.test, groups=groups, family=family,
nb.size=nb.size, gamma.shape=gamma.shape, weights=weights,
nlambda0=nlambda0, lambda0=lambda0, a=a, b=b, max.iter=max.iter,
tol=tol, print.iter=print.iter)
} else {
ssgl.mod = SSGL(y=y, X=X.tilde, X.test=X.tilde.test, groups=groups, family=family,
nb.size=nb.size, gamma.shape=gamma.shape, weights=weights,
nlambda0=nlambda0, a=a, b=b, max.iter=max.iter,
tol=tol, print.iter=print.iter)
}
## Lambdas
lambda0 = ssgl.mod$lambda0
L = length(lambda0)
## Estimates of regression coefficients
beta0 = ssgl.mod$beta0
beta = ssgl.mod$beta
## Estimate of loss function
loss = ssgl.mod$loss
## Predictions for mu.pred
mu.pred = ssgl.mod$mu.pred
## Classifications
classifications = ssgl.mod$classifications
## Compute the function evaluations for the individual functions
f.pred.ind = matrix(0, nrow=n.test, ncol=p)
## Fill in f.hat list
if (L>1){
f.pred = vector(mode = "list", length = L)
for(l in 1:L){
for(j in 1:p){
active = which(groups == j)
f.pred.ind[,j] = X.tilde.test[,(d*(j-1)+1):(d*j)] %*% as.matrix(beta[active,l])
}
f.pred[[l]] = f.pred.ind
}
} else if(L==1){
for(j in 1:p){
active=which(groups==j)
f.pred.ind[,j] = X.tilde.test[,(d*(j-1)+1):(d*j)] %*% as.matrix(beta[active])
}
f.pred = f.pred.ind
}
#####################
#####################
### Return a list ###
#####################
#####################
SBGAM.output <- list(lambda0 = lambda0,
f.pred = f.pred,
mu.pred = mu.pred,
classifications = classifications,
beta0 = beta0,
beta = beta,
loss = loss)
# Return list
return(SBGAM.output)
}
|
#===========================================
# Smart Metering Uncertainty Forecasting
#
# Author Estevao "Steve" Alvarenga
# efsa@bath.edu
# Created in 10/Feb/17
#-------------------------------------------
# smuf_main combined functions optim & fcst
#===========================================
#===========================================
# Initialising
#===========================================
setwd("~/GitRepos/smuf_rdev")
source("smuf_main-fxs.R")
savfile = "smuf_run_0704_sd12_med.rds"
wm01_00 <- readRDS("smuf_import-complete.rds")
importpar <- readRDS("smuf_import-parameter.rds")
s01 <- importpar[1]
s02 <- importpar[2]
s03 <- importpar[3]
sum_of_h <- importpar[4]
data_size <- importpar[5]
#===========================================
# Integrated Parameters
#===========================================
#cus_list to 1000, stp to 150 (detectcores), hrz_lim larger (0:167)*113), turn on CV
cus_list <- seq(1,100)
frontierstp <- 16 # Number of demand bins (Stepwise frontier for portfolio optimisation)
frontierexp <- 1.2 # Exponentiality of frontier steps
max.gen <- 300 # For genetic opt
waitgen <- 50 # For genetic opt
win_size <- c(4,24) # Small and large win_size (select only 2)
win_selec <- win_size[2]
cross_overh <- 4 # Cross-over forced for fx_fcst_kds_quickvector
ahead_t <- seq(1, (12/sum_of_h)) # Up to s02
hrz_lim <- 0 #seq(5,6)*113 # Rolling forecasts steps {seq(0:167)*113} is comprehensive
in_sample_fr <- 1/6 # Fraction for diving in- and out-sample
crossvalsize <- 1 # Number of weeks in the end of in_sample used for crossvalidation
crossvalstps <- 16 # Steps used for multiple crossvalidation (Only KDE)
crossvalfocus <- c(12) # What period is focused when running crossvalidation
is_wins_weeks <- 12 # Number of weeks used for in-sample (KDE uses win_size) & seasonality
sampling <- 1024 # For monte-carlo CRPS calculation
armalags <- c(5,5) # Max lags for ARIMA fit in ARMA-GARCH model (use smuf_lags.R)
gof.min <- 0.2 # GoF crossover value to change ARMA-GARCH to KDS
#===========================================
# Call simulator
#===========================================
OptCVKD = F
OptCVAG = F
source("smuf_main-optgrp.R")
saveRDS(list(bighlpopgr,bighlpcrps), file=savfile)
|
/smuf_run_0704_sd12_med.R
|
no_license
|
efsalvarenga/smuf_rdev
|
R
| false
| false
| 2,631
|
r
|
#===========================================
# Smart Metering Uncertainty Forecasting
#
# Author Estevao "Steve" Alvarenga
# efsa@bath.edu
# Created in 10/Feb/17
#-------------------------------------------
# smuf_main combined functions optim & fcst
#===========================================
#===========================================
# Initialising
#===========================================
setwd("~/GitRepos/smuf_rdev")
source("smuf_main-fxs.R")
savfile = "smuf_run_0704_sd12_med.rds"
wm01_00 <- readRDS("smuf_import-complete.rds")
importpar <- readRDS("smuf_import-parameter.rds")
s01 <- importpar[1]
s02 <- importpar[2]
s03 <- importpar[3]
sum_of_h <- importpar[4]
data_size <- importpar[5]
#===========================================
# Integrated Parameters
#===========================================
#cus_list to 1000, stp to 150 (detectcores), hrz_lim larger (0:167)*113), turn on CV
cus_list <- seq(1,100)
frontierstp <- 16 # Number of demand bins (Stepwise frontier for portfolio optimisation)
frontierexp <- 1.2 # Exponentiality of frontier steps
max.gen <- 300 # For genetic opt
waitgen <- 50 # For genetic opt
win_size <- c(4,24) # Small and large win_size (select only 2)
win_selec <- win_size[2]
cross_overh <- 4 # Cross-over forced for fx_fcst_kds_quickvector
ahead_t <- seq(1, (12/sum_of_h)) # Up to s02
hrz_lim <- 0 #seq(5,6)*113 # Rolling forecasts steps {seq(0:167)*113} is comprehensive
in_sample_fr <- 1/6 # Fraction for diving in- and out-sample
crossvalsize <- 1 # Number of weeks in the end of in_sample used for crossvalidation
crossvalstps <- 16 # Steps used for multiple crossvalidation (Only KDE)
crossvalfocus <- c(12) # What period is focused when running crossvalidation
is_wins_weeks <- 12 # Number of weeks used for in-sample (KDE uses win_size) & seasonality
sampling <- 1024 # For monte-carlo CRPS calculation
armalags <- c(5,5) # Max lags for ARIMA fit in ARMA-GARCH model (use smuf_lags.R)
gof.min <- 0.2 # GoF crossover value to change ARMA-GARCH to KDS
#===========================================
# Call simulator
#===========================================
OptCVKD = F
OptCVAG = F
source("smuf_main-optgrp.R")
saveRDS(list(bighlpopgr,bighlpcrps), file=savfile)
|
# """
# 3_post_imputation_functions
#
# These functions are Post-imputation Leave Parameter Functions [on post-imputation ACS data set]
# that execute the policy simulation after leave taking behavior has been established.
#
#
# """
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Table of Contents
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 0. LEAVEPROGRAM
# 1. impute_leave_length
# 1A. RunRandDraw - see 3_impute_functions.R, function 1Bc
# 2. CLONEFACTOR
# 3. PAY_SCHEDULE
# 4. ELIGIBILITYRULES
# 4A. FORMULA
# 5. EXTENDLEAVES
# 5A. runLogitEstimate - see 3_impute_functions.R, function 1Ba
# 6. UPTAKE
# 6A. check_caps
# 7. BENEFITS
# 8. BENEFITEFFECT
# 9. TOPOFF
# 10. DEPENDENTALLOWANCE
# 11. DIFF_ELIG
# 12. CLEANUP
# 12a. check_caps
# ============================ #
# 0. LEAVEPROGRAM
# ============================ #
# Baseline changes for addition of a leave program
# follows baseline changes of ACM model (see p.11 of ACM model description paper). Main change needed to base cleaning:
# Leave needers who did not take a leave in the absence of a program, and
# who said the reason that they did not take a leave was because they could not afford to
# take one, take a leave in the presence of a program.
LEAVEPROGRAM <- function(d, sens_var) {
for (i in leave_types) {
take_var=paste0("take_",i)
need_var=paste0("need_",i)
d[,take_var] <- ifelse(d[,sens_var]==1 & d[,need_var]==1 & !is.na(d[,sens_var]) & !is.na(d[,need_var]),1,d[,take_var])
}
return(d)
}
# ============================ #
# 1. impute_leave_length
# ============================ #
# function to impute leave length once leave taking behavior has been imputed
# currently impute method is hardcoded as a random draw from a specified distribution of FMLA observations
# but this is a candidate for modual imputation
impute_leave_length <- function(d_train, d_test, conditional, test_cond, ext_resp_len,len_method) {
#Days of leave taken - currently takes length from most recent leave only
yvars <- c(own = "length_own",
illspouse = "length_illspouse",
illchild = "length_illchild",
illparent = "length_illparent",
matdis = "length_matdis",
bond = "length_bond")
# Leave lengths are the same, except for own leaves, which are instead taken from the distribution of leave takers in FMLA survey reporting
# receiving some pay from state programs.
train_filts <- c(own = "length_own>0 & is.na(length_own)==FALSE",
illspouse = "length_illspouse>0 & is.na(length_illspouse)==FALSE & nevermarried == 0 & divorced == 0",
illchild = "length_illchild>0 & is.na(length_illchild)==FALSE",
illparent = "length_illparent>0 & is.na(length_illparent)==FALSE",
matdis = "length_matdis>0 & is.na(length_matdis)==FALSE & female == 1 & nochildren == 0",
bond = "length_bond>0 & is.na(length_bond)==FALSE & nochildren == 0")
test_filts <- c(own = "take_own==1",
illspouse = "take_illspouse==1 & nevermarried == 0 & divorced == 0",
illchild = "take_illchild==1",
illparent = "take_illparent==1",
matdis = "take_matdis==1 & female == 1 & nochildren == 0",
bond = "take_bond==1 & nochildren == 0")
# using random draw from leave distribution rather than KNN prediction for computational issues
#INPUTS: variable requiring imputation, conditionals to filter test and training data on,
# ACS or FMLA observations requiring imputed leave length (test data), FMLA observations constituting the
# sample from which to impute length from (training data), and presence/absence of program
predict <- mapply(runRandDraw, yvar=yvars, train_filt=train_filts,test_filt=test_filts,
MoreArgs = list(d_train=d_train, d_test=d_test, ext_resp_len=ext_resp_len,
len_method= len_method), SIMPLIFY = FALSE)
# Outputs: data sets of imputed leave length values for ACS or FMLA observations requiring them
# merge imputed values with fmla data
count=0
for (i in predict) {
count=count+1
if (!is.null(i)) {
d_test <- merge(i, d_test, by="id",all.y=TRUE)
}
else {
d_test[paste0('length_',leave_types[count])] <- 0
d_test[paste0('squo_length_',leave_types[count])] <- 0
}
}
vars_name=c()
for (i in leave_types) {
vars_name= c(vars_name, paste("length",i, sep="_"))
}
# replace leave taking and length NA's with zeros now
# wanted to distinguish between NAs and zeros in FMLA survey,
# but no need for that in ACS now that we're "certain" of ACS leave taking behavior
# We are "certain" because we only imputed leave takers/non-takers, discarding those with
# uncertain/ineligible status (take_[type]=NA).
for (i in leave_types) {
len_var=paste("length_",i,sep="")
squo_var=paste0('squo_', len_var)
take_var=paste("take_",i,sep="")
d_test[len_var] <- with(d_test, ifelse(is.na(get(len_var)),0,get(len_var)))
d_test[take_var] <- with(d_test, ifelse(is.na(get(take_var)),0,get(take_var)))
d_test[squo_var] <- with(d_test, ifelse(is.na(get(squo_var)),0,get(squo_var)))
}
return(d_test)
}
# ============================ #
# 1A. runRandDraw
# ============================ #
# see 3_impute_functions.R, function 1Bc
# ============================ #
# 2. CLONEFACTOR
# ============================ #
# allow users to clone ACS individuals
CLONEFACTOR <- function(d, clone_factor) {
if (clone_factor > 0) {
d$clone_flag=0
num_clone <- round(clone_factor*nrow(d), digits=0)
d_clones <- data.frame(sample(d$id,num_clone,replace=TRUE))
colnames(d_clones)[1] <- "id"
d_clones <- join(d_clones,d,by='id', type='left')
d_clones$clone_flag=1
d <- rbind(d,d_clones)
# reset id var
d['id'] <- as.numeric(rownames(d))
}
return(d)
}
# ============================ #
# 3. PAY_SCHEDULE
# ============================ #
# Calculate pay schedule for employer paid leave
PAY_SCHEDULE <- function(d) {
# two possible pay schedules: paid the same amount each week, or paid in full until exhausted
# Here we randomly assign one of these three pay schedules
# based on conditional probabilities of total pay received and pay schedules
# probabilities are obtained from 2001 Westat survey which ACM used for this purpose
# dist <- read.csv("pay_dist_prob.csv")
# columns from this csv written manually to avoid dependency on csv file
# proportion of pay received (prop_pay in FMLA data)
# Westat 2001 survey: About how much of your usual pay did you receive in total?
Total_paid=c("Less than half","Half","More than half")
# Prob of 1st pay schedule - some pay, all weeks
# Westat 2001 survey: Receive receive some pay for each pay period that you were on leave?
Always_paid=c(0.6329781, 0.8209731, 0.9358463)
# Prob of 2nd pay schedule - full pay, some weeks
# Westat 2001 survey: If not, when you did receive pay, was it for your full salary?
Fully_paid=c(0.3273122,0.3963387,0.3633615)
# Prob of 3rd pay schedule - some pay, some weeks
# Neither paid each pay period, nor receive full pay when they did receive pay.
Neither_paid=1-Fully_paid
d_prob=data.frame(Total_paid,Always_paid,Fully_paid,Neither_paid)
# denote bucket of proportion of pay
d <- d %>% mutate(Total_paid= ifelse(prop_pay>0 & prop_pay<.5,"Less than half",NA))
d <- d %>% mutate(Total_paid= ifelse(prop_pay==.5, "Half" ,Total_paid))
d <- d %>% mutate(Total_paid= ifelse(prop_pay>.5 & prop_pay<1, "More than half",Total_paid))
# merge probabilities in
d <- join(d,d_prob, type="left",match="all",by=c("Total_paid"))
# assign pay schedules
d['rand']=runif(nrow(d))
d['rand2']=runif(nrow(d))
d <- d %>% mutate(pay_schedule= ifelse(rand<Always_paid,"some pay, all weeks",NA))
d <- d %>% mutate(pay_schedule= ifelse(rand>=Always_paid & rand2<Fully_paid,"all pay, some weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(rand>=Always_paid & rand2>=Fully_paid,"some pay, some weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(prop_pay==1,"all pay, all weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(prop_pay==0,"no pay",pay_schedule))
# total_length - number of days leave taken of all types
d['total_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d <- d %>% mutate(total_length=ifelse(get(paste(take_var)) == 1, total_length+get(paste('length_',i,sep="")), total_length))
}
# count up number of types of leaves
d['total_leaves']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d <- d %>% mutate(total_leaves = ifelse(get(paste(take_var))==1, total_leaves+1,total_leaves))
}
# Keep track of what day employer benefits will be exhausted for those receiving pay in some but not all of their leave
# all pay, some weeks
d <- d %>% mutate(exhausted_by=ifelse(pay_schedule=="all pay, some weeks",round(total_length*prop_pay, digits=0), NA))
# some pay, some weeks - like ACM, assumes equal distribution of partiality among pay proportion and weeks taken
d <- d %>% mutate(exhausted_by=ifelse(pay_schedule=="some pay, some weeks",round(total_length*sqrt(prop_pay), digits=0), exhausted_by))
# clean up vars
d <- d[, !(names(d) %in% c('rand','rand2','Always_paid','Total_paid','Fully_paid', 'Neither_paid'))]
return(d)
}
# ============================ #
# 4. ELIGIBILITYRULES
# ============================ #
# apply user-specified eligibility criteria and set initial
ELIGIBILITYRULES <- function(d, earnings=NULL, weeks=NULL, ann_hours=NULL, minsize=NULL,
base_bene_level, week_bene_min, formula_prop_cuts=NULL, formula_value_cuts=NULL,
formula_bene_levels=NULL, elig_rule_logic, FEDGOV, STATEGOV, LOCALGOV, SELFEMP) {
# ----- apply eligibility rules logic to calculate initial participation ---------------
# TODO: This should be redone in a more simple fashion once the input expected from the GUI is hammered out.
# strip terms from those criteria in elig_rule_logic that have corresponding NULL values
for (i in c('earnings', 'weeks', 'ann_hours', 'minsize')) {
if (is.null(get(i))) {
elig_rule_logic <- gsub(i,'TRUE',elig_rule_logic)
}
}
# replace terms in logic string with appropriate conditionals
elig_rule_logic <- gsub('earnings','WAGP>=earnings',elig_rule_logic)
elig_rule_logic <- gsub('weeks','weeks_worked>=weeks',elig_rule_logic)
elig_rule_logic <- gsub('ann_hours','weeks_worked*WKHP>=ann_hours',elig_rule_logic)
elig_rule_logic <- gsub('minsize','emp_size>=minsize',elig_rule_logic)
# create elig_worker flag based on elig_rule_logic
d <- d %>% mutate(eligworker= ifelse(eval(parse(text=elig_rule_logic)), 1,0))
# apply government worker filters
if (FEDGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==5,0,eligworker))
}
if (STATEGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==4,0,eligworker))
}
if (LOCALGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==3,0,eligworker))
}
# apply self employment filter
if (SELFEMP==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==6 | COW==7,0,eligworker))
}
# ------ benefit calc --------------
# if formulary benefits are not specificed, everyone will simply receive base_bene_level
d["benefit_prop"] <- base_bene_level
# adjust proportion of pay received if formulary benefits are specified;
# different benefit levels for different incomes with cuts defined by either
# proportion of mean state wage, or absolute wage values
if (!is.null(formula_prop_cuts) | !is.null(formula_value_cuts)) {
if (is.null(formula_bene_levels)) {
stop('if formula_prop_cuts or formula_value_cuts are specified,
formula_bene_levels must also be specified')
}
d <- FORMULA(d, formula_prop_cuts, formula_value_cuts, formula_bene_levels)
}
# A non-zero minimum weekly benefit payment will increase effective benefit prop for those that
# would otherwise receive lower than that. Adjust bene_prop to account for that when
# simulating participation decision. We're creating a throwaway bene_prop variable,
# as we still want to use actual bene_prop for determining benefits received, then will
# increase weekly payments at the end after all participation is determined.
d <- d %>% mutate(benefit_prop_temp = max(week_bene_min/(WAGP/weeks_worked), benefit_prop))
# calculate general participation decision based on employer pay vs state program pay
# those who will receive more under the program will participate
d["particip"] <- 0
d["particip"] <- ifelse(d[,"eligworker"]==1 & d[,"prop_pay"]<d[,"benefit_prop_temp"],1,0)
# those who exhaust employer benefits before leave ends will participate
d["particip"] <- ifelse(d[,"eligworker"]==1 & !is.na(d[,'exhausted_by']),1,d[,"particip"])
return(d)
}
# ============================ #
# 4A. FORMULA
# ============================ #
# subfunction to implement formulaic benefit payouts by wage,
# rather than a flat proportion for all participants
# TODO: There might be a simpler way to do this once the input expected from the GUI is hammered out.
FORMULA <- function(d, formula_prop_cuts=NULL, formula_value_cuts=NULL, formula_bene_levels) {
#-----------Validation Checks---------------
# make sure exactly one of prop cuts and value cuts are specified
if (!is.null(formula_prop_cuts) & !is.null(formula_value_cuts)) {
stop("formula_prop_cuts and formula_value_cuts are both specified. Only one should be specified")
}
if (is.null(formula_prop_cuts) & is.null(formula_value_cuts)) {
stop("Neither formula_prop_cuts and formula_value_cuts are specified. One must be specified")
}
# checks to make sure formula_cuts and values are positive and ascending
if (!is.null(formula_prop_cuts)) {
# make sure formula cuts and bene levels are proper length
if (length(formula_prop_cuts)+1 != length(formula_bene_levels)) {
stop("formula_bene_levels length must be one greater than formula_prop_cuts length")
}
prev_val=0
for (i in formula_prop_cuts) {
if (!is.numeric(i)) {
stop("formula_prop_cuts must be numeric")
}
if (0>i) {
stop("formula_prop_cuts must be positive")
}
if (prev_val>i) {
stop("formula_prop_cuts must be in ascending order")
}
prev_val=i
}
}
if (!is.null(formula_value_cuts)) {
# make sure formula cuts and bene levels are proper length
if (length(formula_value_cuts)+1 != length(formula_bene_levels)) {
stop("formula_bene_levels length must be one greater than formula_value_cuts length")
}
prev_val=0
for (i in formula_value_cuts) {
if (!is.numeric(i)) {
stop("formula_value_cuts must be numeric")
}
if (0>i) {
stop("formula_value_cuts must be positive")
}
if (prev_val>i) {
stop("formula_value_cuts must be nonduplicated, and in ascending order")
}
prev_val=i
}
}
#------------------Adjust benefit levels: proportionate cuts----------------------
if (!is.null(formula_prop_cuts)) {
# establish mean wage of population, and everyone's proportion of that value
mean_wage=mean(d$WAGP/d$weeks_worked)
d['mean_wage_prop']=(d$WAGP/d$weeks_worked)/mean_wage
# adjust benefit_prop accordingly
# first interval of formula_bene_levels
len_cuts=length(formula_prop_cuts)
len_lvls=length(formula_bene_levels)
d <- d %>% mutate(benefit_prop = ifelse(formula_prop_cuts[1]>mean_wage_prop,
formula_bene_levels[1], benefit_prop))
# last interval
d <- d %>% mutate(benefit_prop = ifelse(formula_prop_cuts[len_cuts]<=mean_wage_prop,
formula_bene_levels[len_lvls], benefit_prop))
# rest of the intervals in between
prev_val=formula_prop_cuts[1]
lvl=1
for (i in formula_prop_cuts[2:len_cuts]) {
print(i)
lvl=lvl+1
d <- d %>% mutate(benefit_prop = ifelse(i>mean_wage_prop & prev_val<=mean_wage_prop,
formula_bene_levels[lvl], benefit_prop))
prev_val=i
}
}
#------------------Adjust benefit levels: absolute value cuts----------------------
if (!is.null(formula_value_cuts)) {
# adjust benefit_prop accordingly
# first interval of formula_bene_levels
len_cuts=length(formula_value_cuts)
len_lvls=length(formula_bene_levels)
d <- d %>% mutate(benefit_prop = ifelse(formula_value_cuts[1]>WAGP,
formula_bene_levels[1], benefit_prop))
# last interval
d <- d %>% mutate(benefit_prop = ifelse(formula_value_cuts[len_cuts]<=WAGP,
formula_bene_levels[len_lvls], benefit_prop))
# rest of the intervals in between
prev_val=formula_value_cuts[1]
lvl=1
for (i in formula_value_cuts[2:len_cuts]) {
lvl=lvl+1
d <- d %>% mutate(benefit_prop = ifelse(i>WAGP & prev_val<=WAGP,
formula_bene_levels[lvl], benefit_prop))
prev_val=i
}
}
return(d)
}
# ============================ #
# 5. EXTENDLEAVES
# ============================ #
# Option to simulate extension of leaves in the presence of an FMLA program
EXTENDLEAVES <-function(d_train, d_test,wait_period, ext_base_effect,
extend_prob, extend_days, extend_prop, fmla_protect) {
# copy original leave lengths
for (i in leave_types) {
len_var=paste("length_",i,sep="")
orig_var=paste("orig_len_",i,sep="")
d_test[orig_var] <- with(d_test, get(len_var))
}
# Base extension effect from ACM model (referred to as the "old" extension simulation there)
# this is a candidate for modular imputation methods
d_test["extend_flag"]=0
if (ext_base_effect==TRUE) {
# specifications
# using ACM specifications
formula <- "longerLeave ~ age + agesq + female"
# subsetting data
filt <- "TRUE"
# weights
weight <- "~ fixed_weight"
# Run Estimation
# INPUT: FMLA (training) data set, ACS (test) data set, logit regression model specification,
# filter conditions, weight to use
d_filt <- runLogitEstimate(d_train=d_train, d_test=d_test, formula=formula, test_filt=filt,
train_filt=filt, weight=weight, varname='longer_leave', create_dummies=TRUE)
d_test <- merge(d_filt, d_test, by='id', all.y=TRUE)
# OUTPUT: ACS data with imputed column indicating those taking a longer leave.
# Following ACM implementation:
# i. For workers who have leave lengths in the absence of a program that are
# less than the waiting period for the program: the leave is extended for 1 week into the program.
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
d_test["extend_flag"] <- with(d_test, ifelse(get(len_var)<wait_period & particip==1 &
longer_leave == 1 & get(take_var)==1
,1,extend_flag))
d_test[len_var] <- with(d_test, ifelse(get(len_var)<wait_period & particip== 1 &
longer_leave == 1 & get(take_var)==1
,get(len_var)+wait_period+5,get(len_var)))
d_test["total_length"] <- with(d_test, ifelse(get(len_var)<wait_period & particip== 1 &
longer_leave == 1 & get(take_var)==1
,total_length+wait_period+5, total_length))
}
# ii. For workers who do not receive any employer pay or who exhaust their
# employer pay and then go on the program: The probability of extending a leave using
# program benefits is set to 25 percent; and for those who do extend their leave, the
# extension is equal to 25 percent of their length in the absences of a program.
d_test['rand']=runif(nrow(d_test))
d_test <- d_test %>% mutate(longer_leave=ifelse(.25>rand,1,0))
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
d_test["extend_flag"] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,1,extend_flag))
d_test[len_var] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,get(len_var)*1.25,get(len_var)))
d_test["total_length"] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,total_length+get(len_var)*.25, total_length))
}
# iii. For workers who exhaust program benefits and then receive employer pay:
# In this case the simulator assigns a 50 percent probability of taking an extended leave
# until their employer pay is exhausted.
# Not implemented, don't really get why this would be allowed or with what probability if it was
# clean up vars
d_test <- d_test[, !(names(d_test) %in% c("longerLeave_prob"))]
}
# Additional option to extend leave a+bx additional days with c probability if the user wishes.
# a = extend_days
# b = extend_prop
# c = extend_prob
# simplified from the ACM model; there they allowed it to be customized by leave type, just allowing for overall adjustments for now.
if (extend_prob > 0) {
d_test['rand']=runif(nrow(d_test))
d_test["extend_flag"] <- with(d_test, ifelse(rand<extend_prob & particip==1 & resp_len==1 & total_length!=0,1,extend_flag))
for (i in leave_types) {
len_var=paste("length_",i,sep="")
d_test[len_var] <- with(d_test, ifelse(rand<extend_prob & particip==1 & resp_len==1 & get(paste(len_var))!=0,
round(get(paste(len_var))*extend_prop),get(paste(len_var))))
d_test[len_var] <- with(d_test, ifelse(rand<extend_prob & particip==1& resp_len==1 & get(paste(len_var))!=0,
round(get(paste(len_var))+(extend_days/total_leaves)),get(paste(len_var))))
}
# clean up vars
d_test <- d_test[, !(names(d_test) %in% c("rand","extend_amt"))]
}
# FMLA Protection Constraint option
# If enabled, leaves that are extended in the presence of a program that
# originally were less than 12 weeks in length are constrained to be no longer than
# 12 weeks in the presence of the program.
if (fmla_protect==TRUE) {
d_test["fmla_constrain_flag"] <- 0
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
orig_var=paste("orig_len_",i,sep="")
d_test["fmla_constrain_flag"] <- with(d_test, ifelse(extend_flag==1 & get(len_var)>60 & get(orig_var)<=60
,1,fmla_constrain_flag))
d_test[len_var] <- with(d_test, ifelse(extend_flag==1 & get(len_var)>60 & get(orig_var)<=60
,60,get(len_var)))
}
}
# adjust total_length to match extensions of individual leaves
d_test['total_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d_test <- d_test %>% mutate(total_length=ifelse(get(paste(take_var)) == 1, total_length+get(paste('length_',i,sep="")), total_length))
}
return(d_test)
}
# ============================ #
# 5A. runLogitEstimate
# ============================ #
# see 3_impute_functions.R, function 1Ba
# ============================ #
# 6. UPTAKE
# ============================ #
# specifies uptake rate of those that are eligible for the paid leave program
# default is "full" - all who are eligible and would receive more money than employer would pay
# would pay choose to participate
UPTAKE <- function(d, own_uptake, matdis_uptake, bond_uptake, illparent_uptake,
illspouse_uptake, illchild_uptake, full_particip_needer, wait_period,
maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL) {
# calculate uptake -> days of leave that program benefits are collected
d['particip_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
uptake_val=paste(i,"_uptake",sep="")
plen_var= paste("plen_",i, sep="")
d['rand']=runif(nrow(d))
d <- d %>% mutate(particip_length=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,
particip_length+get(paste('length_',i,sep=""))-wait_period, particip_length))
d[plen_var] <- with(d, ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,
get(paste('length_',i,sep=""))-wait_period, 0))
d <- d %>% mutate(change_flag=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,1,0))
# Option for if leave needers always take up benefits when they receive more than their employer pays in leave
if (full_particip_needer==TRUE) {
d <- d %>% mutate(particip_length=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,
particip_length+get(paste('length_',i,sep=""))-wait_period, particip_length))
d[plen_var] <- with(d, ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,
get(paste('length_',i,sep=""))-wait_period, get(plen_var)))
d <- d %>% mutate(change_flag=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,1, change_flag))
}
# subtract days spent on employer benefits from those that exhausting employer benefits (received pay for some days of leave)
# Also accounting for wait period here, as that can tick down as a person is still collecting employer benefits
d <- d %>% mutate(particip_length= ifelse(change_flag==1 & !is.na(exhausted_by),
ifelse(get(paste('length_',i,sep="")) > exhausted_by & exhausted_by>wait_period,
particip_length - exhausted_by + wait_period, particip_length), particip_length))
d[plen_var] <- with(d, ifelse(change_flag==1 & !is.na(exhausted_by),
ifelse(get(paste('length_',i,sep="")) > exhausted_by & exhausted_by>wait_period,
get(plen_var) - exhausted_by + wait_period, get(plen_var)), get(plen_var)))
}
# make sure those with particip_length 0 are also particip 0
d <- d %>% mutate(particip= ifelse(particip_length==0,0, particip))
# cap particip_length at max program days
# INPUT: ACS data set
d <- check_caps(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL)
# OUTPUT: ACS data set with participating leave length capped at user-specified program maximums
# clean up vars
d <- d[, !(names(d) %in% c('rand', 'change_flag','reduce'))]
return(d)
}
# ============================ #
# 6A. check_caps
# ============================ #
# cap particip_length at max program days
check_caps <- function(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL) {
# for each individual leave type
for (i in leave_types) {
plen_var= paste("plen_",i, sep="")
max_val=paste("maxlen_",i,sep="")
d[plen_var] <- with(d, ifelse(get(plen_var)>get(max_val),get(max_val), get(plen_var)))
}
# apply cap for DI and PFL classes of leaves
if (maxlen_DI!=maxlen_bond+maxlen_matdis) {
d <- d %>% mutate(DI_plen=plen_matdis+plen_own)
d['DI_plen'] <- with(d, ifelse(DI_plen>maxlen_DI,maxlen_DI,DI_plen))
# evenly distributed cap among leave types
d['reduce'] <- with(d, ifelse(plen_matdis+plen_own!=0, DI_plen/(plen_matdis+plen_own),0))
d['plen_matdis']=round(d[,'plen_matdis']*d[,'reduce'])
d['plen_own']=round(d[,'plen_own']*d[,'reduce'])
}
if (maxlen_PFL!=maxlen_illparent+maxlen_illspouse+maxlen_illchild+maxlen_bond) {
d <- d %>% mutate(PFL_plen=plen_bond+plen_illparent+plen_illchild+plen_illspouse)
d['PFL_plen'] <- with(d, ifelse(PFL_plen>maxlen_PFL,maxlen_PFL,PFL_plen))
# evenly distributed cap among leave types
d['reduce'] <- with(d, ifelse(plen_bond+plen_illparent+plen_illchild+plen_illspouse!=0,
PFL_plen/(plen_bond+plen_illparent+plen_illchild+plen_illspouse),0))
d['plen_bond']=round(d[,'plen_bond']*d[,'reduce'])
d['plen_illchild']=round(d[,'plen_illchild']*d[,'reduce'])
d['plen_illspouse']=round(d[,'plen_illspouse']*d[,'reduce'])
d['plen_illparent']=round(d[,'plen_illparent']*d[,'reduce'])
}
# apply cap for all leaves
if (maxlen_total!=maxlen_DI+maxlen_PFL | maxlen_total!=maxlen_illparent+maxlen_illspouse+maxlen_illchild+maxlen_bond+maxlen_bond+maxlen_matdis) {
d['particip_length']=0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
d <- d %>% mutate(particip_length=particip_length+get(plen_var))
}
d['particip_length'] <- with(d, ifelse(particip_length>maxlen_total,maxlen_total,particip_length))
d['reduce'] <- with(d, ifelse(plen_matdis+plen_own+plen_bond+plen_illparent+plen_illchild+plen_illspouse!=0,
particip_length/(plen_matdis+plen_own+plen_bond+plen_illparent+plen_illchild+plen_illspouse),0))
# evenly distributed cap among leave types
d['plen_matdis']=round(d[,'plen_matdis']*d[,'reduce'])
d['plen_own']=round(d[,'plen_own']*d[,'reduce'])
d['plen_bond']=round(d[,'plen_bond']*d[,'reduce'])
d['plen_illchild']=round(d[,'plen_illchild']*d[,'reduce'])
d['plen_illspouse']=round(d[,'plen_illspouse']*d[,'reduce'])
d['plen_illparent']=round(d[,'plen_illparent']*d[,'reduce'])
# recalculate DI/PFL/total lengths
d <- d %>% mutate(DI_plen=plen_matdis+plen_own)
d <- d %>% mutate(PFL_plen=plen_bond+plen_illparent+plen_illchild+plen_illspouse)
d <- d %>% mutate(particip_length=DI_plen+ PFL_plen)
}
return(d)
}
# ============================ #
# 7. BENEFITS
# ============================ #
# Adding base values for new ACS variables involving imputed FMLA values
BENEFITS <- function(d) {
# base benefits received from program
d <- d %>% mutate(base_benefits=WAGP/(round(weeks_worked*5))*particip_length*benefit_prop)
d <- d %>% mutate(base_benefits=ifelse(is.na(base_benefits),0,base_benefits))
# base pay received from employer based on schedule
# pay received is same across all pay schedules
d <- d %>% mutate(base_leave_pay=WAGP/(round(weeks_worked*5))*total_length* prop_pay)
d <- d %>% mutate(base_leave_pay=ifelse(is.na(base_leave_pay),0,base_leave_pay))
# actual pay and benefits - to be modified by remaining parameter functions
d <- d %>% mutate(actual_leave_pay=base_leave_pay)
d <- d %>% mutate(actual_benefits=base_benefits)
return(d)
}
# ============================ #
# 8. BENEFITEFFECT
# ============================ #
# Accounting for some "cost" of applying for the program when deciding between employer paid leave and program
BENEFITEFFECT <- function(d) {
# Create uptake probabilities dataframe
# obtained from 2001 Westat survey which ACM used for this purpose
# d_prob <- read.csv("bene_effect_prob.csv")
# Hardcoding above CSV to remove dependency
# Three columns of data set
#Family income category
finc_cat=rep(seq.int(10000,100000, by = 10000),4)
#Benefit difference
bene_diff=c(rep(0,10),rep(25,10),rep(50,10),rep(125,10))
#Probability of taking up benefits
uptake_prob=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.12, 0.08, 0.05,
0.04, 0.02, 0.02, 0.01, 0.01, 0, 0, 0.59, 0.48,
0.38, 0.28, 0.21, 0.15, 0.1, 0.07, 0.05, 0.03,
1, 1, 1, 1, 1, 1, 0.99, 0.99, 0.98, 0.98)
# create data frame
d_prob=data.frame(finc_cat,bene_diff,uptake_prob)
# define benefit difference to match 2001 Westat survey categories
d <- d %>% mutate(bene_diff=(actual_benefits-actual_leave_pay)/particip_length*5)
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=25, 0, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=50 & bene_diff>25, 25, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=125 & bene_diff>50, 50, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff>125, 125, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(is.na(bene_diff), 0, bene_diff))
d['bene_diff']=as.integer(d[,'bene_diff'])
# PLACEHOLDER dealing with missing faminc values for the test ACS data set.
# need to come up with systematic way of addressing missing values in ACS eventually
d <- d %>% mutate(faminc=ifelse(is.na(faminc),WAGP,faminc))
# define family income to match 2001 Westat survey categories
d <- d %>% mutate(finc_cat=ifelse(faminc<=10000,10000,NA))
inc_cut <- seq(10000, 90000, by=10000)
for (i in inc_cut) {
d <- d %>% mutate(finc_cat=ifelse(faminc>i & faminc<=i+10000,i,finc_cat))
}
d <- d %>% mutate(finc_cat=ifelse(faminc>100000,100000,finc_cat))
d['finc_cat']=as.numeric(d[,'finc_cat'])
# recalculate uptake based on bene_diff
d <- join(d,d_prob, type="left",match="all",by=c("bene_diff", "finc_cat"))
d['rand']=runif(nrow(d))
# exclude those participants that will not be affected by benefit effect
# those who exhaust employer benefits before leave ends will always participate
d["universe"] <- ifelse(d[,"eligworker"]==1 & !is.na(d[,'exhausted_by']),0,1)
# those who choose to extend leaves in the presence of the program will always participate
d["universe"] <- ifelse(d[,"eligworker"]==1 & d[,'extend_flag']==1,0,d[,'universe'])
# flag those who are not claiming benefits due to benefit effect
d <- d %>% mutate(bene_effect_flg=ifelse(rand>uptake_prob & particip==1 & universe==1,1,0))
# update leave vars
d <- d %>% mutate(actual_benefits=ifelse(rand>uptake_prob & particip==1 & universe==1,0,actual_benefits))
d <- d %>% mutate(particip_length=ifelse(rand>uptake_prob & particip==1 & universe==1,0,particip_length))
for (i in leave_types) {
plen_var= paste("plen_",i, sep="")
d[plen_var] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,get(plen_var)))
}
d['DI_plen'] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,DI_plen))
d['PFL_plen'] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,PFL_plen))
d <- d %>% mutate(particip=ifelse(rand>uptake_prob & particip==1 & universe==1,0,particip))
d <- d[, !(names(d) %in% c('rand','bene_diff','finc_cat','uptake_prob','universe'))]
return(d)
}
# ============================ #
# 9. TOPOFF
# ============================ #
# employers who would pay their employees
# 100 percent of wages while on leave would instead require their employees to participate
# in the program and would "top-off" the program benefits by paying the difference
# between program benefits and full pay.
# User can specify percent of employers that engage in this, and minimum length of leave this is required for
TOPOFF <- function(d, topoff_rate, topoff_minlength) {
len_vars <- c("length_own", "length_illspouse", "length_illchild","length_illparent","length_matdis","length_bond")
d['topoff_rate'] <- topoff_rate
d['topoff_min'] <- topoff_minlength
d['rand'] <- runif(nrow(d))
d <- d %>% mutate(topoff= ifelse(rand<topoff_rate & prop_pay==1,1,0))
d <- d %>% mutate(topoff_count=0)
for (i in leave_types) {
len_var=paste("length_",i,sep="")
plen_var=paste("plen_",i,sep="")
take_var=paste("take_",i,sep="")
d['topoff_temp'] <- with(d,ifelse(topoff==1 & topoff_min<=get(paste(len_var)) & get(paste(take_var))==1,1,0))
d[plen_var] <- with(d,ifelse(topoff_temp==1,get(len_var),get(plen_var)))
d <- d %>% mutate(topoff_count= ifelse(topoff_temp==1 ,topoff_count+1,topoff_count))
}
d['particip_length']=0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
d <- d %>% mutate(particip_length=particip_length+get(plen_var))
}
# recalculate benefits based on updated participation length
# actual benefits received from program
# note: topoff will override benefiteffect changes
d <- d %>% mutate(actual_benefits=WAGP/(round(weeks_worked*5))*particip_length*benefit_prop)
d <- d %>% mutate(actual_benefits=ifelse(is.na(actual_benefits),0,actual_benefits))
#subtract benefits from pay
d <- d %>% mutate(actual_leave_pay=ifelse(topoff_count>0,base_leave_pay-actual_benefits,actual_leave_pay))
d <- d %>% mutate(topoff_flg= ifelse(topoff_count>0,1,0))
# adjust participation flag. leave taken assumed to not be affected by top off behavior
d <- d %>% mutate(particip=ifelse(topoff_count>0,1,particip))
# clean up vars
d <- d[, !(names(d) %in% c('rand','topoff_rate','topoff_temp','topoff_min','topoff', 'topoff_count'))]
return(d)
}
# ============================ #
# 10. DEPENDENTALLOWANCE
# ============================ #
# include a flat weekly dependent allowance for families with children
DEPENDENTALLOWANCE <- function(d,dependent_allow) {
d <- d %>% mutate(actual_benefits=ifelse(particip==1 & nochildren==0, actual_benefits+(dependent_allow)*particip_length/5,actual_benefits))
return(d)
}
# ============================ #
# 11. DIFF_ELIG
# ============================ #
# Some state programs have differential eligibility by leave type.
# For example, NJ's private plan option means about 30% of the PFL eligble population is not
# eligible for DI.
# However, eligibility is currently programmed as universally binary.
# As a workaround for now, this function allows users to simulate this differential eligibility
# by removing some specified proportion of participation for specific leave types
# at random from the population
DIFF_ELIG <- function(d, own_elig_adj, illspouse_elig_adj, illchild_elig_adj,
illparent_elig_adj, matdis_elig_adj, bond_elig_adj) {
adjs_vals <- paste0(leave_types, '_elig_adj')
plen_vars <- paste0('plen_',leave_types)
zip <- mapply(list, adjs_vals, plen_vars, SIMPLIFY=F)
# for each pair of leave type/adj val...
for (i in zip) {
adjs_val=i[[1]]
plen_var=i[[2]]
# select proportion of participants equal of adj value. rest will no longer collect benefits
# for that type (simulating they are ineligible)
nsamp <- ceiling(get(adjs_val)*nrow(filter(d, get(plen_var)>0)))
psamp <- sample_n(filter(d, get(plen_var)>0), nsamp)
d[d[,'id'] %in% psamp[,'id'],'pflag'] <-1
d['pflag'] <- d['pflag'] %>% replace(., is.na(.), 0)
d[plen_var] <- with(d, ifelse(get(plen_var)>0 & pflag==0, 0, get(plen_var)))
d <- d[, !(names(d) %in% c('pflag'))]
# rest of the appropriate vars to adjust are handled in the CLEANUP function next
}
return(d)
}
# ============================ #
# 12. CLEANUP
# ============================ #
# Final variable alterations and consistency checks
CLEANUP <- function(d, week_bene_cap,week_bene_cap_prop,week_bene_min, maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total,maxlen_DI,maxlen_PFL) {
# Check leave length participation caps again
# INPUT: ACS data set
d <- check_caps(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL)
# OUTPUT: ACS data set with participating leave length capped at user-specified program maximums
# cap benefit payments at program's weekly benefit cap
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits>ceiling(week_bene_cap*particip_length)/5,
ceiling(week_bene_cap*particip_length)/5, actual_benefits))
# cap benefits payments as a function of mean weekly wage in the population
if (!is.null(week_bene_cap_prop)) {
cap <- mean(d$WAGP/d$weeks_worked)*week_bene_cap_prop
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits>ceiling(cap*particip_length)/5,
ceiling(cap*particip_length)/5, actual_benefits))
}
# establish minimum weekly benefits for program participants
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits<ceiling(week_bene_min*particip_length)/5,
ceiling(week_bene_min*particip_length)/5, actual_benefits))
# make sure those with particip_length 0 are also particip 0
d <- d %>% mutate(particip= ifelse(particip_length==0,0, particip))
# calculate leave specific benefits
d['ptake_PFL'] <-0
d['ptake_DI'] <-0
d['bene_DI'] <- 0
d['bene_PFL'] <- 0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
ben_var=paste("bene_",i,sep="")
d[ben_var] <- with(d, actual_benefits*(get(plen_var)/particip_length))
d[ben_var] <- with(d, ifelse(is.na(get(ben_var)),0,get(ben_var)))
# benefits for PFL, DI leave types
if (i=='own'|i=='matdis') {
d['bene_DI'] <- with(d, bene_DI+ get(ben_var))
}
if (i=='bond'|i=='illspouse'|i=='illparent'|i=='illchild') {
d['bene_PFL'] <- with(d, bene_PFL + get(ben_var))
}
# create ptake_* vars
# dummies for those that took a given type of leave, and collected non-zero benefits for it
take_var=paste("take_",i,sep="")
ptake_var=paste("ptake_",i,sep="")
d[ptake_var] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,0))
# dummies for PFL, DI leave types
if (i=='own'|i=='matdis') {
d['ptake_DI'] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,ptake_DI))
}
if (i=='bond'|i=='illspouse'|i=='illparent'|i=='illchild') {
d['ptake_PFL'] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,ptake_PFL))
}
}
return(d)
}
# ============================ #
# 12A. check_caps
# ============================ #
# see function 6A.
|
/4_post_impute_functions.R
|
no_license
|
clzhang1547/microsim_R
|
R
| false
| false
| 43,773
|
r
|
# """
# 3_post_imputation_functions
#
# These functions are Post-imputation Leave Parameter Functions [on post-imputation ACS data set]
# that execute the policy simulation after leave taking behavior has been established.
#
#
# """
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Table of Contents
#~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 0. LEAVEPROGRAM
# 1. impute_leave_length
# 1A. RunRandDraw - see 3_impute_functions.R, function 1Bc
# 2. CLONEFACTOR
# 3. PAY_SCHEDULE
# 4. ELIGIBILITYRULES
# 4A. FORMULA
# 5. EXTENDLEAVES
# 5A. runLogitEstimate - see 3_impute_functions.R, function 1Ba
# 6. UPTAKE
# 6A. check_caps
# 7. BENEFITS
# 8. BENEFITEFFECT
# 9. TOPOFF
# 10. DEPENDENTALLOWANCE
# 11. DIFF_ELIG
# 12. CLEANUP
# 12a. check_caps
# ============================ #
# 0. LEAVEPROGRAM
# ============================ #
# Baseline changes for addition of a leave program
# follows baseline changes of ACM model (see p.11 of ACM model description paper). Main change needed to base cleaning:
# Leave needers who did not take a leave in the absence of a program, and
# who said the reason that they did not take a leave was because they could not afford to
# take one, take a leave in the presence of a program.
LEAVEPROGRAM <- function(d, sens_var) {
for (i in leave_types) {
take_var=paste0("take_",i)
need_var=paste0("need_",i)
d[,take_var] <- ifelse(d[,sens_var]==1 & d[,need_var]==1 & !is.na(d[,sens_var]) & !is.na(d[,need_var]),1,d[,take_var])
}
return(d)
}
# ============================ #
# 1. impute_leave_length
# ============================ #
# function to impute leave length once leave taking behavior has been imputed
# currently impute method is hardcoded as a random draw from a specified distribution of FMLA observations
# but this is a candidate for modual imputation
impute_leave_length <- function(d_train, d_test, conditional, test_cond, ext_resp_len,len_method) {
#Days of leave taken - currently takes length from most recent leave only
yvars <- c(own = "length_own",
illspouse = "length_illspouse",
illchild = "length_illchild",
illparent = "length_illparent",
matdis = "length_matdis",
bond = "length_bond")
# Leave lengths are the same, except for own leaves, which are instead taken from the distribution of leave takers in FMLA survey reporting
# receiving some pay from state programs.
train_filts <- c(own = "length_own>0 & is.na(length_own)==FALSE",
illspouse = "length_illspouse>0 & is.na(length_illspouse)==FALSE & nevermarried == 0 & divorced == 0",
illchild = "length_illchild>0 & is.na(length_illchild)==FALSE",
illparent = "length_illparent>0 & is.na(length_illparent)==FALSE",
matdis = "length_matdis>0 & is.na(length_matdis)==FALSE & female == 1 & nochildren == 0",
bond = "length_bond>0 & is.na(length_bond)==FALSE & nochildren == 0")
test_filts <- c(own = "take_own==1",
illspouse = "take_illspouse==1 & nevermarried == 0 & divorced == 0",
illchild = "take_illchild==1",
illparent = "take_illparent==1",
matdis = "take_matdis==1 & female == 1 & nochildren == 0",
bond = "take_bond==1 & nochildren == 0")
# using random draw from leave distribution rather than KNN prediction for computational issues
#INPUTS: variable requiring imputation, conditionals to filter test and training data on,
# ACS or FMLA observations requiring imputed leave length (test data), FMLA observations constituting the
# sample from which to impute length from (training data), and presence/absence of program
predict <- mapply(runRandDraw, yvar=yvars, train_filt=train_filts,test_filt=test_filts,
MoreArgs = list(d_train=d_train, d_test=d_test, ext_resp_len=ext_resp_len,
len_method= len_method), SIMPLIFY = FALSE)
# Outputs: data sets of imputed leave length values for ACS or FMLA observations requiring them
# merge imputed values with fmla data
count=0
for (i in predict) {
count=count+1
if (!is.null(i)) {
d_test <- merge(i, d_test, by="id",all.y=TRUE)
}
else {
d_test[paste0('length_',leave_types[count])] <- 0
d_test[paste0('squo_length_',leave_types[count])] <- 0
}
}
vars_name=c()
for (i in leave_types) {
vars_name= c(vars_name, paste("length",i, sep="_"))
}
# replace leave taking and length NA's with zeros now
# wanted to distinguish between NAs and zeros in FMLA survey,
# but no need for that in ACS now that we're "certain" of ACS leave taking behavior
# We are "certain" because we only imputed leave takers/non-takers, discarding those with
# uncertain/ineligible status (take_[type]=NA).
for (i in leave_types) {
len_var=paste("length_",i,sep="")
squo_var=paste0('squo_', len_var)
take_var=paste("take_",i,sep="")
d_test[len_var] <- with(d_test, ifelse(is.na(get(len_var)),0,get(len_var)))
d_test[take_var] <- with(d_test, ifelse(is.na(get(take_var)),0,get(take_var)))
d_test[squo_var] <- with(d_test, ifelse(is.na(get(squo_var)),0,get(squo_var)))
}
return(d_test)
}
# ============================ #
# 1A. runRandDraw
# ============================ #
# see 3_impute_functions.R, function 1Bc
# ============================ #
# 2. CLONEFACTOR
# ============================ #
# allow users to clone ACS individuals
CLONEFACTOR <- function(d, clone_factor) {
if (clone_factor > 0) {
d$clone_flag=0
num_clone <- round(clone_factor*nrow(d), digits=0)
d_clones <- data.frame(sample(d$id,num_clone,replace=TRUE))
colnames(d_clones)[1] <- "id"
d_clones <- join(d_clones,d,by='id', type='left')
d_clones$clone_flag=1
d <- rbind(d,d_clones)
# reset id var
d['id'] <- as.numeric(rownames(d))
}
return(d)
}
# ============================ #
# 3. PAY_SCHEDULE
# ============================ #
# Calculate pay schedule for employer paid leave
PAY_SCHEDULE <- function(d) {
# two possible pay schedules: paid the same amount each week, or paid in full until exhausted
# Here we randomly assign one of these three pay schedules
# based on conditional probabilities of total pay received and pay schedules
# probabilities are obtained from 2001 Westat survey which ACM used for this purpose
# dist <- read.csv("pay_dist_prob.csv")
# columns from this csv written manually to avoid dependency on csv file
# proportion of pay received (prop_pay in FMLA data)
# Westat 2001 survey: About how much of your usual pay did you receive in total?
Total_paid=c("Less than half","Half","More than half")
# Prob of 1st pay schedule - some pay, all weeks
# Westat 2001 survey: Receive receive some pay for each pay period that you were on leave?
Always_paid=c(0.6329781, 0.8209731, 0.9358463)
# Prob of 2nd pay schedule - full pay, some weeks
# Westat 2001 survey: If not, when you did receive pay, was it for your full salary?
Fully_paid=c(0.3273122,0.3963387,0.3633615)
# Prob of 3rd pay schedule - some pay, some weeks
# Neither paid each pay period, nor receive full pay when they did receive pay.
Neither_paid=1-Fully_paid
d_prob=data.frame(Total_paid,Always_paid,Fully_paid,Neither_paid)
# denote bucket of proportion of pay
d <- d %>% mutate(Total_paid= ifelse(prop_pay>0 & prop_pay<.5,"Less than half",NA))
d <- d %>% mutate(Total_paid= ifelse(prop_pay==.5, "Half" ,Total_paid))
d <- d %>% mutate(Total_paid= ifelse(prop_pay>.5 & prop_pay<1, "More than half",Total_paid))
# merge probabilities in
d <- join(d,d_prob, type="left",match="all",by=c("Total_paid"))
# assign pay schedules
d['rand']=runif(nrow(d))
d['rand2']=runif(nrow(d))
d <- d %>% mutate(pay_schedule= ifelse(rand<Always_paid,"some pay, all weeks",NA))
d <- d %>% mutate(pay_schedule= ifelse(rand>=Always_paid & rand2<Fully_paid,"all pay, some weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(rand>=Always_paid & rand2>=Fully_paid,"some pay, some weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(prop_pay==1,"all pay, all weeks",pay_schedule))
d <- d %>% mutate(pay_schedule= ifelse(prop_pay==0,"no pay",pay_schedule))
# total_length - number of days leave taken of all types
d['total_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d <- d %>% mutate(total_length=ifelse(get(paste(take_var)) == 1, total_length+get(paste('length_',i,sep="")), total_length))
}
# count up number of types of leaves
d['total_leaves']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d <- d %>% mutate(total_leaves = ifelse(get(paste(take_var))==1, total_leaves+1,total_leaves))
}
# Keep track of what day employer benefits will be exhausted for those receiving pay in some but not all of their leave
# all pay, some weeks
d <- d %>% mutate(exhausted_by=ifelse(pay_schedule=="all pay, some weeks",round(total_length*prop_pay, digits=0), NA))
# some pay, some weeks - like ACM, assumes equal distribution of partiality among pay proportion and weeks taken
d <- d %>% mutate(exhausted_by=ifelse(pay_schedule=="some pay, some weeks",round(total_length*sqrt(prop_pay), digits=0), exhausted_by))
# clean up vars
d <- d[, !(names(d) %in% c('rand','rand2','Always_paid','Total_paid','Fully_paid', 'Neither_paid'))]
return(d)
}
# ============================ #
# 4. ELIGIBILITYRULES
# ============================ #
# apply user-specified eligibility criteria and set initial
ELIGIBILITYRULES <- function(d, earnings=NULL, weeks=NULL, ann_hours=NULL, minsize=NULL,
base_bene_level, week_bene_min, formula_prop_cuts=NULL, formula_value_cuts=NULL,
formula_bene_levels=NULL, elig_rule_logic, FEDGOV, STATEGOV, LOCALGOV, SELFEMP) {
# ----- apply eligibility rules logic to calculate initial participation ---------------
# TODO: This should be redone in a more simple fashion once the input expected from the GUI is hammered out.
# strip terms from those criteria in elig_rule_logic that have corresponding NULL values
for (i in c('earnings', 'weeks', 'ann_hours', 'minsize')) {
if (is.null(get(i))) {
elig_rule_logic <- gsub(i,'TRUE',elig_rule_logic)
}
}
# replace terms in logic string with appropriate conditionals
elig_rule_logic <- gsub('earnings','WAGP>=earnings',elig_rule_logic)
elig_rule_logic <- gsub('weeks','weeks_worked>=weeks',elig_rule_logic)
elig_rule_logic <- gsub('ann_hours','weeks_worked*WKHP>=ann_hours',elig_rule_logic)
elig_rule_logic <- gsub('minsize','emp_size>=minsize',elig_rule_logic)
# create elig_worker flag based on elig_rule_logic
d <- d %>% mutate(eligworker= ifelse(eval(parse(text=elig_rule_logic)), 1,0))
# apply government worker filters
if (FEDGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==5,0,eligworker))
}
if (STATEGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==4,0,eligworker))
}
if (LOCALGOV==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==3,0,eligworker))
}
# apply self employment filter
if (SELFEMP==FALSE) {
d <- d %>% mutate(eligworker = ifelse(COW==6 | COW==7,0,eligworker))
}
# ------ benefit calc --------------
# if formulary benefits are not specificed, everyone will simply receive base_bene_level
d["benefit_prop"] <- base_bene_level
# adjust proportion of pay received if formulary benefits are specified;
# different benefit levels for different incomes with cuts defined by either
# proportion of mean state wage, or absolute wage values
if (!is.null(formula_prop_cuts) | !is.null(formula_value_cuts)) {
if (is.null(formula_bene_levels)) {
stop('if formula_prop_cuts or formula_value_cuts are specified,
formula_bene_levels must also be specified')
}
d <- FORMULA(d, formula_prop_cuts, formula_value_cuts, formula_bene_levels)
}
# A non-zero minimum weekly benefit payment will increase effective benefit prop for those that
# would otherwise receive lower than that. Adjust bene_prop to account for that when
# simulating participation decision. We're creating a throwaway bene_prop variable,
# as we still want to use actual bene_prop for determining benefits received, then will
# increase weekly payments at the end after all participation is determined.
d <- d %>% mutate(benefit_prop_temp = max(week_bene_min/(WAGP/weeks_worked), benefit_prop))
# calculate general participation decision based on employer pay vs state program pay
# those who will receive more under the program will participate
d["particip"] <- 0
d["particip"] <- ifelse(d[,"eligworker"]==1 & d[,"prop_pay"]<d[,"benefit_prop_temp"],1,0)
# those who exhaust employer benefits before leave ends will participate
d["particip"] <- ifelse(d[,"eligworker"]==1 & !is.na(d[,'exhausted_by']),1,d[,"particip"])
return(d)
}
# ============================ #
# 4A. FORMULA
# ============================ #
# subfunction to implement formulaic benefit payouts by wage,
# rather than a flat proportion for all participants
# TODO: There might be a simpler way to do this once the input expected from the GUI is hammered out.
FORMULA <- function(d, formula_prop_cuts=NULL, formula_value_cuts=NULL, formula_bene_levels) {
#-----------Validation Checks---------------
# make sure exactly one of prop cuts and value cuts are specified
if (!is.null(formula_prop_cuts) & !is.null(formula_value_cuts)) {
stop("formula_prop_cuts and formula_value_cuts are both specified. Only one should be specified")
}
if (is.null(formula_prop_cuts) & is.null(formula_value_cuts)) {
stop("Neither formula_prop_cuts and formula_value_cuts are specified. One must be specified")
}
# checks to make sure formula_cuts and values are positive and ascending
if (!is.null(formula_prop_cuts)) {
# make sure formula cuts and bene levels are proper length
if (length(formula_prop_cuts)+1 != length(formula_bene_levels)) {
stop("formula_bene_levels length must be one greater than formula_prop_cuts length")
}
prev_val=0
for (i in formula_prop_cuts) {
if (!is.numeric(i)) {
stop("formula_prop_cuts must be numeric")
}
if (0>i) {
stop("formula_prop_cuts must be positive")
}
if (prev_val>i) {
stop("formula_prop_cuts must be in ascending order")
}
prev_val=i
}
}
if (!is.null(formula_value_cuts)) {
# make sure formula cuts and bene levels are proper length
if (length(formula_value_cuts)+1 != length(formula_bene_levels)) {
stop("formula_bene_levels length must be one greater than formula_value_cuts length")
}
prev_val=0
for (i in formula_value_cuts) {
if (!is.numeric(i)) {
stop("formula_value_cuts must be numeric")
}
if (0>i) {
stop("formula_value_cuts must be positive")
}
if (prev_val>i) {
stop("formula_value_cuts must be nonduplicated, and in ascending order")
}
prev_val=i
}
}
#------------------Adjust benefit levels: proportionate cuts----------------------
if (!is.null(formula_prop_cuts)) {
# establish mean wage of population, and everyone's proportion of that value
mean_wage=mean(d$WAGP/d$weeks_worked)
d['mean_wage_prop']=(d$WAGP/d$weeks_worked)/mean_wage
# adjust benefit_prop accordingly
# first interval of formula_bene_levels
len_cuts=length(formula_prop_cuts)
len_lvls=length(formula_bene_levels)
d <- d %>% mutate(benefit_prop = ifelse(formula_prop_cuts[1]>mean_wage_prop,
formula_bene_levels[1], benefit_prop))
# last interval
d <- d %>% mutate(benefit_prop = ifelse(formula_prop_cuts[len_cuts]<=mean_wage_prop,
formula_bene_levels[len_lvls], benefit_prop))
# rest of the intervals in between
prev_val=formula_prop_cuts[1]
lvl=1
for (i in formula_prop_cuts[2:len_cuts]) {
print(i)
lvl=lvl+1
d <- d %>% mutate(benefit_prop = ifelse(i>mean_wage_prop & prev_val<=mean_wage_prop,
formula_bene_levels[lvl], benefit_prop))
prev_val=i
}
}
#------------------Adjust benefit levels: absolute value cuts----------------------
if (!is.null(formula_value_cuts)) {
# adjust benefit_prop accordingly
# first interval of formula_bene_levels
len_cuts=length(formula_value_cuts)
len_lvls=length(formula_bene_levels)
d <- d %>% mutate(benefit_prop = ifelse(formula_value_cuts[1]>WAGP,
formula_bene_levels[1], benefit_prop))
# last interval
d <- d %>% mutate(benefit_prop = ifelse(formula_value_cuts[len_cuts]<=WAGP,
formula_bene_levels[len_lvls], benefit_prop))
# rest of the intervals in between
prev_val=formula_value_cuts[1]
lvl=1
for (i in formula_value_cuts[2:len_cuts]) {
lvl=lvl+1
d <- d %>% mutate(benefit_prop = ifelse(i>WAGP & prev_val<=WAGP,
formula_bene_levels[lvl], benefit_prop))
prev_val=i
}
}
return(d)
}
# ============================ #
# 5. EXTENDLEAVES
# ============================ #
# Option to simulate extension of leaves in the presence of an FMLA program
EXTENDLEAVES <-function(d_train, d_test,wait_period, ext_base_effect,
extend_prob, extend_days, extend_prop, fmla_protect) {
# copy original leave lengths
for (i in leave_types) {
len_var=paste("length_",i,sep="")
orig_var=paste("orig_len_",i,sep="")
d_test[orig_var] <- with(d_test, get(len_var))
}
# Base extension effect from ACM model (referred to as the "old" extension simulation there)
# this is a candidate for modular imputation methods
d_test["extend_flag"]=0
if (ext_base_effect==TRUE) {
# specifications
# using ACM specifications
formula <- "longerLeave ~ age + agesq + female"
# subsetting data
filt <- "TRUE"
# weights
weight <- "~ fixed_weight"
# Run Estimation
# INPUT: FMLA (training) data set, ACS (test) data set, logit regression model specification,
# filter conditions, weight to use
d_filt <- runLogitEstimate(d_train=d_train, d_test=d_test, formula=formula, test_filt=filt,
train_filt=filt, weight=weight, varname='longer_leave', create_dummies=TRUE)
d_test <- merge(d_filt, d_test, by='id', all.y=TRUE)
# OUTPUT: ACS data with imputed column indicating those taking a longer leave.
# Following ACM implementation:
# i. For workers who have leave lengths in the absence of a program that are
# less than the waiting period for the program: the leave is extended for 1 week into the program.
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
d_test["extend_flag"] <- with(d_test, ifelse(get(len_var)<wait_period & particip==1 &
longer_leave == 1 & get(take_var)==1
,1,extend_flag))
d_test[len_var] <- with(d_test, ifelse(get(len_var)<wait_period & particip== 1 &
longer_leave == 1 & get(take_var)==1
,get(len_var)+wait_period+5,get(len_var)))
d_test["total_length"] <- with(d_test, ifelse(get(len_var)<wait_period & particip== 1 &
longer_leave == 1 & get(take_var)==1
,total_length+wait_period+5, total_length))
}
# ii. For workers who do not receive any employer pay or who exhaust their
# employer pay and then go on the program: The probability of extending a leave using
# program benefits is set to 25 percent; and for those who do extend their leave, the
# extension is equal to 25 percent of their length in the absences of a program.
d_test['rand']=runif(nrow(d_test))
d_test <- d_test %>% mutate(longer_leave=ifelse(.25>rand,1,0))
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
d_test["extend_flag"] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,1,extend_flag))
d_test[len_var] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,get(len_var)*1.25,get(len_var)))
d_test["total_length"] <- with(d_test, ifelse((prop_pay==0 | !is.na(exhausted_by)) & particip==1 &
longer_leave == 1 & get(take_var)==1 & extend_flag==0 & get(len_var)*1.25>wait_period
,total_length+get(len_var)*.25, total_length))
}
# iii. For workers who exhaust program benefits and then receive employer pay:
# In this case the simulator assigns a 50 percent probability of taking an extended leave
# until their employer pay is exhausted.
# Not implemented, don't really get why this would be allowed or with what probability if it was
# clean up vars
d_test <- d_test[, !(names(d_test) %in% c("longerLeave_prob"))]
}
# Additional option to extend leave a+bx additional days with c probability if the user wishes.
# a = extend_days
# b = extend_prop
# c = extend_prob
# simplified from the ACM model; there they allowed it to be customized by leave type, just allowing for overall adjustments for now.
if (extend_prob > 0) {
d_test['rand']=runif(nrow(d_test))
d_test["extend_flag"] <- with(d_test, ifelse(rand<extend_prob & particip==1 & resp_len==1 & total_length!=0,1,extend_flag))
for (i in leave_types) {
len_var=paste("length_",i,sep="")
d_test[len_var] <- with(d_test, ifelse(rand<extend_prob & particip==1 & resp_len==1 & get(paste(len_var))!=0,
round(get(paste(len_var))*extend_prop),get(paste(len_var))))
d_test[len_var] <- with(d_test, ifelse(rand<extend_prob & particip==1& resp_len==1 & get(paste(len_var))!=0,
round(get(paste(len_var))+(extend_days/total_leaves)),get(paste(len_var))))
}
# clean up vars
d_test <- d_test[, !(names(d_test) %in% c("rand","extend_amt"))]
}
# FMLA Protection Constraint option
# If enabled, leaves that are extended in the presence of a program that
# originally were less than 12 weeks in length are constrained to be no longer than
# 12 weeks in the presence of the program.
if (fmla_protect==TRUE) {
d_test["fmla_constrain_flag"] <- 0
for (i in leave_types) {
len_var=paste("length_",i,sep="")
take_var=paste("take_",i,sep="")
orig_var=paste("orig_len_",i,sep="")
d_test["fmla_constrain_flag"] <- with(d_test, ifelse(extend_flag==1 & get(len_var)>60 & get(orig_var)<=60
,1,fmla_constrain_flag))
d_test[len_var] <- with(d_test, ifelse(extend_flag==1 & get(len_var)>60 & get(orig_var)<=60
,60,get(len_var)))
}
}
# adjust total_length to match extensions of individual leaves
d_test['total_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
d_test <- d_test %>% mutate(total_length=ifelse(get(paste(take_var)) == 1, total_length+get(paste('length_',i,sep="")), total_length))
}
return(d_test)
}
# ============================ #
# 5A. runLogitEstimate
# ============================ #
# see 3_impute_functions.R, function 1Ba
# ============================ #
# 6. UPTAKE
# ============================ #
# specifies uptake rate of those that are eligible for the paid leave program
# default is "full" - all who are eligible and would receive more money than employer would pay
# would pay choose to participate
UPTAKE <- function(d, own_uptake, matdis_uptake, bond_uptake, illparent_uptake,
illspouse_uptake, illchild_uptake, full_particip_needer, wait_period,
maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL) {
# calculate uptake -> days of leave that program benefits are collected
d['particip_length']=0
for (i in leave_types) {
take_var=paste("take_",i,sep="")
uptake_val=paste(i,"_uptake",sep="")
plen_var= paste("plen_",i, sep="")
d['rand']=runif(nrow(d))
d <- d %>% mutate(particip_length=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,
particip_length+get(paste('length_',i,sep=""))-wait_period, particip_length))
d[plen_var] <- with(d, ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,
get(paste('length_',i,sep=""))-wait_period, 0))
d <- d %>% mutate(change_flag=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand<get(uptake_val) & particip==1 & get(paste(take_var)) == 1,1,0))
# Option for if leave needers always take up benefits when they receive more than their employer pays in leave
if (full_particip_needer==TRUE) {
d <- d %>% mutate(particip_length=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,
particip_length+get(paste('length_',i,sep=""))-wait_period, particip_length))
d[plen_var] <- with(d, ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,
get(paste('length_',i,sep=""))-wait_period, get(plen_var)))
d <- d %>% mutate(change_flag=ifelse(wait_period<get(paste('length_',i,sep="")) &
rand>=get(uptake_val) & particip==1 & get(paste(take_var))== 1 & resp_len==1,1, change_flag))
}
# subtract days spent on employer benefits from those that exhausting employer benefits (received pay for some days of leave)
# Also accounting for wait period here, as that can tick down as a person is still collecting employer benefits
d <- d %>% mutate(particip_length= ifelse(change_flag==1 & !is.na(exhausted_by),
ifelse(get(paste('length_',i,sep="")) > exhausted_by & exhausted_by>wait_period,
particip_length - exhausted_by + wait_period, particip_length), particip_length))
d[plen_var] <- with(d, ifelse(change_flag==1 & !is.na(exhausted_by),
ifelse(get(paste('length_',i,sep="")) > exhausted_by & exhausted_by>wait_period,
get(plen_var) - exhausted_by + wait_period, get(plen_var)), get(plen_var)))
}
# make sure those with particip_length 0 are also particip 0
d <- d %>% mutate(particip= ifelse(particip_length==0,0, particip))
# cap particip_length at max program days
# INPUT: ACS data set
d <- check_caps(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL)
# OUTPUT: ACS data set with participating leave length capped at user-specified program maximums
# clean up vars
d <- d[, !(names(d) %in% c('rand', 'change_flag','reduce'))]
return(d)
}
# ============================ #
# 6A. check_caps
# ============================ #
# cap particip_length at max program days
check_caps <- function(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL) {
# for each individual leave type
for (i in leave_types) {
plen_var= paste("plen_",i, sep="")
max_val=paste("maxlen_",i,sep="")
d[plen_var] <- with(d, ifelse(get(plen_var)>get(max_val),get(max_val), get(plen_var)))
}
# apply cap for DI and PFL classes of leaves
if (maxlen_DI!=maxlen_bond+maxlen_matdis) {
d <- d %>% mutate(DI_plen=plen_matdis+plen_own)
d['DI_plen'] <- with(d, ifelse(DI_plen>maxlen_DI,maxlen_DI,DI_plen))
# evenly distributed cap among leave types
d['reduce'] <- with(d, ifelse(plen_matdis+plen_own!=0, DI_plen/(plen_matdis+plen_own),0))
d['plen_matdis']=round(d[,'plen_matdis']*d[,'reduce'])
d['plen_own']=round(d[,'plen_own']*d[,'reduce'])
}
if (maxlen_PFL!=maxlen_illparent+maxlen_illspouse+maxlen_illchild+maxlen_bond) {
d <- d %>% mutate(PFL_plen=plen_bond+plen_illparent+plen_illchild+plen_illspouse)
d['PFL_plen'] <- with(d, ifelse(PFL_plen>maxlen_PFL,maxlen_PFL,PFL_plen))
# evenly distributed cap among leave types
d['reduce'] <- with(d, ifelse(plen_bond+plen_illparent+plen_illchild+plen_illspouse!=0,
PFL_plen/(plen_bond+plen_illparent+plen_illchild+plen_illspouse),0))
d['plen_bond']=round(d[,'plen_bond']*d[,'reduce'])
d['plen_illchild']=round(d[,'plen_illchild']*d[,'reduce'])
d['plen_illspouse']=round(d[,'plen_illspouse']*d[,'reduce'])
d['plen_illparent']=round(d[,'plen_illparent']*d[,'reduce'])
}
# apply cap for all leaves
if (maxlen_total!=maxlen_DI+maxlen_PFL | maxlen_total!=maxlen_illparent+maxlen_illspouse+maxlen_illchild+maxlen_bond+maxlen_bond+maxlen_matdis) {
d['particip_length']=0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
d <- d %>% mutate(particip_length=particip_length+get(plen_var))
}
d['particip_length'] <- with(d, ifelse(particip_length>maxlen_total,maxlen_total,particip_length))
d['reduce'] <- with(d, ifelse(plen_matdis+plen_own+plen_bond+plen_illparent+plen_illchild+plen_illspouse!=0,
particip_length/(plen_matdis+plen_own+plen_bond+plen_illparent+plen_illchild+plen_illspouse),0))
# evenly distributed cap among leave types
d['plen_matdis']=round(d[,'plen_matdis']*d[,'reduce'])
d['plen_own']=round(d[,'plen_own']*d[,'reduce'])
d['plen_bond']=round(d[,'plen_bond']*d[,'reduce'])
d['plen_illchild']=round(d[,'plen_illchild']*d[,'reduce'])
d['plen_illspouse']=round(d[,'plen_illspouse']*d[,'reduce'])
d['plen_illparent']=round(d[,'plen_illparent']*d[,'reduce'])
# recalculate DI/PFL/total lengths
d <- d %>% mutate(DI_plen=plen_matdis+plen_own)
d <- d %>% mutate(PFL_plen=plen_bond+plen_illparent+plen_illchild+plen_illspouse)
d <- d %>% mutate(particip_length=DI_plen+ PFL_plen)
}
return(d)
}
# ============================ #
# 7. BENEFITS
# ============================ #
# Adding base values for new ACS variables involving imputed FMLA values
BENEFITS <- function(d) {
# base benefits received from program
d <- d %>% mutate(base_benefits=WAGP/(round(weeks_worked*5))*particip_length*benefit_prop)
d <- d %>% mutate(base_benefits=ifelse(is.na(base_benefits),0,base_benefits))
# base pay received from employer based on schedule
# pay received is same across all pay schedules
d <- d %>% mutate(base_leave_pay=WAGP/(round(weeks_worked*5))*total_length* prop_pay)
d <- d %>% mutate(base_leave_pay=ifelse(is.na(base_leave_pay),0,base_leave_pay))
# actual pay and benefits - to be modified by remaining parameter functions
d <- d %>% mutate(actual_leave_pay=base_leave_pay)
d <- d %>% mutate(actual_benefits=base_benefits)
return(d)
}
# ============================ #
# 8. BENEFITEFFECT
# ============================ #
# Accounting for some "cost" of applying for the program when deciding between employer paid leave and program
BENEFITEFFECT <- function(d) {
# Create uptake probabilities dataframe
# obtained from 2001 Westat survey which ACM used for this purpose
# d_prob <- read.csv("bene_effect_prob.csv")
# Hardcoding above CSV to remove dependency
# Three columns of data set
#Family income category
finc_cat=rep(seq.int(10000,100000, by = 10000),4)
#Benefit difference
bene_diff=c(rep(0,10),rep(25,10),rep(50,10),rep(125,10))
#Probability of taking up benefits
uptake_prob=c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.12, 0.08, 0.05,
0.04, 0.02, 0.02, 0.01, 0.01, 0, 0, 0.59, 0.48,
0.38, 0.28, 0.21, 0.15, 0.1, 0.07, 0.05, 0.03,
1, 1, 1, 1, 1, 1, 0.99, 0.99, 0.98, 0.98)
# create data frame
d_prob=data.frame(finc_cat,bene_diff,uptake_prob)
# define benefit difference to match 2001 Westat survey categories
d <- d %>% mutate(bene_diff=(actual_benefits-actual_leave_pay)/particip_length*5)
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=25, 0, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=50 & bene_diff>25, 25, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff<=125 & bene_diff>50, 50, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(bene_diff>125, 125, bene_diff))
d <- d %>% mutate(bene_diff=ifelse(is.na(bene_diff), 0, bene_diff))
d['bene_diff']=as.integer(d[,'bene_diff'])
# PLACEHOLDER dealing with missing faminc values for the test ACS data set.
# need to come up with systematic way of addressing missing values in ACS eventually
d <- d %>% mutate(faminc=ifelse(is.na(faminc),WAGP,faminc))
# define family income to match 2001 Westat survey categories
d <- d %>% mutate(finc_cat=ifelse(faminc<=10000,10000,NA))
inc_cut <- seq(10000, 90000, by=10000)
for (i in inc_cut) {
d <- d %>% mutate(finc_cat=ifelse(faminc>i & faminc<=i+10000,i,finc_cat))
}
d <- d %>% mutate(finc_cat=ifelse(faminc>100000,100000,finc_cat))
d['finc_cat']=as.numeric(d[,'finc_cat'])
# recalculate uptake based on bene_diff
d <- join(d,d_prob, type="left",match="all",by=c("bene_diff", "finc_cat"))
d['rand']=runif(nrow(d))
# exclude those participants that will not be affected by benefit effect
# those who exhaust employer benefits before leave ends will always participate
d["universe"] <- ifelse(d[,"eligworker"]==1 & !is.na(d[,'exhausted_by']),0,1)
# those who choose to extend leaves in the presence of the program will always participate
d["universe"] <- ifelse(d[,"eligworker"]==1 & d[,'extend_flag']==1,0,d[,'universe'])
# flag those who are not claiming benefits due to benefit effect
d <- d %>% mutate(bene_effect_flg=ifelse(rand>uptake_prob & particip==1 & universe==1,1,0))
# update leave vars
d <- d %>% mutate(actual_benefits=ifelse(rand>uptake_prob & particip==1 & universe==1,0,actual_benefits))
d <- d %>% mutate(particip_length=ifelse(rand>uptake_prob & particip==1 & universe==1,0,particip_length))
for (i in leave_types) {
plen_var= paste("plen_",i, sep="")
d[plen_var] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,get(plen_var)))
}
d['DI_plen'] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,DI_plen))
d['PFL_plen'] <- with(d, ifelse(rand>uptake_prob & particip==1 & universe==1,0,PFL_plen))
d <- d %>% mutate(particip=ifelse(rand>uptake_prob & particip==1 & universe==1,0,particip))
d <- d[, !(names(d) %in% c('rand','bene_diff','finc_cat','uptake_prob','universe'))]
return(d)
}
# ============================ #
# 9. TOPOFF
# ============================ #
# employers who would pay their employees
# 100 percent of wages while on leave would instead require their employees to participate
# in the program and would "top-off" the program benefits by paying the difference
# between program benefits and full pay.
# User can specify percent of employers that engage in this, and minimum length of leave this is required for
TOPOFF <- function(d, topoff_rate, topoff_minlength) {
len_vars <- c("length_own", "length_illspouse", "length_illchild","length_illparent","length_matdis","length_bond")
d['topoff_rate'] <- topoff_rate
d['topoff_min'] <- topoff_minlength
d['rand'] <- runif(nrow(d))
d <- d %>% mutate(topoff= ifelse(rand<topoff_rate & prop_pay==1,1,0))
d <- d %>% mutate(topoff_count=0)
for (i in leave_types) {
len_var=paste("length_",i,sep="")
plen_var=paste("plen_",i,sep="")
take_var=paste("take_",i,sep="")
d['topoff_temp'] <- with(d,ifelse(topoff==1 & topoff_min<=get(paste(len_var)) & get(paste(take_var))==1,1,0))
d[plen_var] <- with(d,ifelse(topoff_temp==1,get(len_var),get(plen_var)))
d <- d %>% mutate(topoff_count= ifelse(topoff_temp==1 ,topoff_count+1,topoff_count))
}
d['particip_length']=0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
d <- d %>% mutate(particip_length=particip_length+get(plen_var))
}
# recalculate benefits based on updated participation length
# actual benefits received from program
# note: topoff will override benefiteffect changes
d <- d %>% mutate(actual_benefits=WAGP/(round(weeks_worked*5))*particip_length*benefit_prop)
d <- d %>% mutate(actual_benefits=ifelse(is.na(actual_benefits),0,actual_benefits))
#subtract benefits from pay
d <- d %>% mutate(actual_leave_pay=ifelse(topoff_count>0,base_leave_pay-actual_benefits,actual_leave_pay))
d <- d %>% mutate(topoff_flg= ifelse(topoff_count>0,1,0))
# adjust participation flag. leave taken assumed to not be affected by top off behavior
d <- d %>% mutate(particip=ifelse(topoff_count>0,1,particip))
# clean up vars
d <- d[, !(names(d) %in% c('rand','topoff_rate','topoff_temp','topoff_min','topoff', 'topoff_count'))]
return(d)
}
# ============================ #
# 10. DEPENDENTALLOWANCE
# ============================ #
# include a flat weekly dependent allowance for families with children
DEPENDENTALLOWANCE <- function(d,dependent_allow) {
d <- d %>% mutate(actual_benefits=ifelse(particip==1 & nochildren==0, actual_benefits+(dependent_allow)*particip_length/5,actual_benefits))
return(d)
}
# ============================ #
# 11. DIFF_ELIG
# ============================ #
# Some state programs have differential eligibility by leave type.
# For example, NJ's private plan option means about 30% of the PFL eligble population is not
# eligible for DI.
# However, eligibility is currently programmed as universally binary.
# As a workaround for now, this function allows users to simulate this differential eligibility
# by removing some specified proportion of participation for specific leave types
# at random from the population
DIFF_ELIG <- function(d, own_elig_adj, illspouse_elig_adj, illchild_elig_adj,
illparent_elig_adj, matdis_elig_adj, bond_elig_adj) {
adjs_vals <- paste0(leave_types, '_elig_adj')
plen_vars <- paste0('plen_',leave_types)
zip <- mapply(list, adjs_vals, plen_vars, SIMPLIFY=F)
# for each pair of leave type/adj val...
for (i in zip) {
adjs_val=i[[1]]
plen_var=i[[2]]
# select proportion of participants equal of adj value. rest will no longer collect benefits
# for that type (simulating they are ineligible)
nsamp <- ceiling(get(adjs_val)*nrow(filter(d, get(plen_var)>0)))
psamp <- sample_n(filter(d, get(plen_var)>0), nsamp)
d[d[,'id'] %in% psamp[,'id'],'pflag'] <-1
d['pflag'] <- d['pflag'] %>% replace(., is.na(.), 0)
d[plen_var] <- with(d, ifelse(get(plen_var)>0 & pflag==0, 0, get(plen_var)))
d <- d[, !(names(d) %in% c('pflag'))]
# rest of the appropriate vars to adjust are handled in the CLEANUP function next
}
return(d)
}
# ============================ #
# 12. CLEANUP
# ============================ #
# Final variable alterations and consistency checks
CLEANUP <- function(d, week_bene_cap,week_bene_cap_prop,week_bene_min, maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total,maxlen_DI,maxlen_PFL) {
# Check leave length participation caps again
# INPUT: ACS data set
d <- check_caps(d,maxlen_own, maxlen_matdis, maxlen_bond, maxlen_illparent, maxlen_illspouse, maxlen_illchild,
maxlen_total, maxlen_DI, maxlen_PFL)
# OUTPUT: ACS data set with participating leave length capped at user-specified program maximums
# cap benefit payments at program's weekly benefit cap
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits>ceiling(week_bene_cap*particip_length)/5,
ceiling(week_bene_cap*particip_length)/5, actual_benefits))
# cap benefits payments as a function of mean weekly wage in the population
if (!is.null(week_bene_cap_prop)) {
cap <- mean(d$WAGP/d$weeks_worked)*week_bene_cap_prop
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits>ceiling(cap*particip_length)/5,
ceiling(cap*particip_length)/5, actual_benefits))
}
# establish minimum weekly benefits for program participants
d <- d %>% mutate(actual_benefits= ifelse(actual_benefits<ceiling(week_bene_min*particip_length)/5,
ceiling(week_bene_min*particip_length)/5, actual_benefits))
# make sure those with particip_length 0 are also particip 0
d <- d %>% mutate(particip= ifelse(particip_length==0,0, particip))
# calculate leave specific benefits
d['ptake_PFL'] <-0
d['ptake_DI'] <-0
d['bene_DI'] <- 0
d['bene_PFL'] <- 0
for (i in leave_types) {
plen_var=paste("plen_",i,sep="")
ben_var=paste("bene_",i,sep="")
d[ben_var] <- with(d, actual_benefits*(get(plen_var)/particip_length))
d[ben_var] <- with(d, ifelse(is.na(get(ben_var)),0,get(ben_var)))
# benefits for PFL, DI leave types
if (i=='own'|i=='matdis') {
d['bene_DI'] <- with(d, bene_DI+ get(ben_var))
}
if (i=='bond'|i=='illspouse'|i=='illparent'|i=='illchild') {
d['bene_PFL'] <- with(d, bene_PFL + get(ben_var))
}
# create ptake_* vars
# dummies for those that took a given type of leave, and collected non-zero benefits for it
take_var=paste("take_",i,sep="")
ptake_var=paste("ptake_",i,sep="")
d[ptake_var] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,0))
# dummies for PFL, DI leave types
if (i=='own'|i=='matdis') {
d['ptake_DI'] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,ptake_DI))
}
if (i=='bond'|i=='illspouse'|i=='illparent'|i=='illchild') {
d['ptake_PFL'] <- with(d, ifelse(get(ben_var)>0 & get(take_var)>0,1,ptake_PFL))
}
}
return(d)
}
# ============================ #
# 12A. check_caps
# ============================ #
# see function 6A.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CattleSIM.R
\docType{data}
\name{CattleSIM}
\alias{CattleSIM}
\title{Cattle dataset with simulated management units}
\usage{
data(CattleSIM)
}
\description{
The cattle dataset is simulated with QMSim software (Sargolzaei and Schenkel, 2009).
This dataset includes 2,500 individuals across six generations (from founder to generation 5),
each with 10,000 single nucleotide polymorphisms spread over 29 autosomes. Single phenotype with heritability of 0.6 was simulated.
In addition, six fixed effects including one cluster effect and 5 management unit simulation scenarios were simulated.
Fixed effect of the cluster is simulated using the K-medoid algorithm (Kaufman and Rousseeuw, 1990) to assign 2,500 individuals into eight clusters.
MUSC1 is simulated by randomly allocating eight clusters into two sets, which is regarded as the least connected design. From MUSC2 to 5,
randomly sampled individuals (i.e., 140, 210, 280, and 350) were exchanged between the two sets in MUSC1 to steadily increase
the degree of relatedness.
}
\examples{
# Load cattle dataset
data(CattleSIM)
# Check management units simulation
str(CattleSIM)
}
\references{
Sargolzaei, M., and F. S. Schenkel. 2009. Qmsim: a large-scale
genome simulator for livestock. Bioinformatics 25:680–681. doi:10.1093/bioinformatics/btp045
Kaufman, L. and P. Rousseeuw. 1990. Finding groups in data:
an introduction to cluster analysis. John Wiley and Sons, New York.
}
\author{
Haipeng Yu and Gota Morota
Maintainer: Haipeng Yu \email{haipengyu@vt.edu}
}
\keyword{datasets}
|
/man/CattleSIM.Rd
|
no_license
|
QGresources/GCA
|
R
| false
| true
| 1,633
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CattleSIM.R
\docType{data}
\name{CattleSIM}
\alias{CattleSIM}
\title{Cattle dataset with simulated management units}
\usage{
data(CattleSIM)
}
\description{
The cattle dataset is simulated with QMSim software (Sargolzaei and Schenkel, 2009).
This dataset includes 2,500 individuals across six generations (from founder to generation 5),
each with 10,000 single nucleotide polymorphisms spread over 29 autosomes. Single phenotype with heritability of 0.6 was simulated.
In addition, six fixed effects including one cluster effect and 5 management unit simulation scenarios were simulated.
Fixed effect of the cluster is simulated using the K-medoid algorithm (Kaufman and Rousseeuw, 1990) to assign 2,500 individuals into eight clusters.
MUSC1 is simulated by randomly allocating eight clusters into two sets, which is regarded as the least connected design. From MUSC2 to 5,
randomly sampled individuals (i.e., 140, 210, 280, and 350) were exchanged between the two sets in MUSC1 to steadily increase
the degree of relatedness.
}
\examples{
# Load cattle dataset
data(CattleSIM)
# Check management units simulation
str(CattleSIM)
}
\references{
Sargolzaei, M., and F. S. Schenkel. 2009. Qmsim: a large-scale
genome simulator for livestock. Bioinformatics 25:680–681. doi:10.1093/bioinformatics/btp045
Kaufman, L. and P. Rousseeuw. 1990. Finding groups in data:
an introduction to cluster analysis. John Wiley and Sons, New York.
}
\author{
Haipeng Yu and Gota Morota
Maintainer: Haipeng Yu \email{haipengyu@vt.edu}
}
\keyword{datasets}
|
testlist <- list(rates = c(-4.38753740307468e+307, -2.11965588216799e-289, -1.83593039382815e-307, -2.6064446869563e+304, 7.18372580480888e-310, 5.94568495811084e-302, 7.29112203361732e-304, 0, 0, 2.75909158454893e-306, -5.48612407015086e+303, NaN, 3.56048348188083e-306, 34082834.0004893, NaN, 2.80614289249855e-312, -2.2308331518689e-289, 1.39067116173462e-309, 0, 2.56647495436064e-301, -1.26836452888033e-30, 6.01362129181413e-317, -1.26836459057627e-30, 4.00791842552789e-306, -1.26826829408472e-30, 2.40086640681612e-300, 5.67865049360052e-270, 3.94108708383232e-312 ), thresholds = c(1.24351972100265e-13, 7.2903536873204e-304, 1.09614956738224e-314, -2.8213831420555e-277, 5.43230922552326e-312, 0, 0, 2.6949346545259e-312, -4.38889926233518e+306, 2.81218450820696e-312, 7.55038392918931e-15, -2.18003413361522e-289, 2.84809454421907e-306, 5.5869437297374e-319, 9.35293053394057e-309, NaN, -1.28189852548343e-30, -9.36473653341481e-280, -3.04032340213361e-288, 2.56647495438759e-301, 5.67865049362949e-270), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610052617-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 1,103
|
r
|
testlist <- list(rates = c(-4.38753740307468e+307, -2.11965588216799e-289, -1.83593039382815e-307, -2.6064446869563e+304, 7.18372580480888e-310, 5.94568495811084e-302, 7.29112203361732e-304, 0, 0, 2.75909158454893e-306, -5.48612407015086e+303, NaN, 3.56048348188083e-306, 34082834.0004893, NaN, 2.80614289249855e-312, -2.2308331518689e-289, 1.39067116173462e-309, 0, 2.56647495436064e-301, -1.26836452888033e-30, 6.01362129181413e-317, -1.26836459057627e-30, 4.00791842552789e-306, -1.26826829408472e-30, 2.40086640681612e-300, 5.67865049360052e-270, 3.94108708383232e-312 ), thresholds = c(1.24351972100265e-13, 7.2903536873204e-304, 1.09614956738224e-314, -2.8213831420555e-277, 5.43230922552326e-312, 0, 0, 2.6949346545259e-312, -4.38889926233518e+306, 2.81218450820696e-312, 7.55038392918931e-15, -2.18003413361522e-289, 2.84809454421907e-306, 5.5869437297374e-319, 9.35293053394057e-309, NaN, -1.28189852548343e-30, -9.36473653341481e-280, -3.04032340213361e-288, 2.56647495438759e-301, 5.67865049362949e-270), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
# CIAT, 2016 Updated: February 2017
# Author: Bunn & Castro
# Target: predict RF - Cluster
# Load libraries
library(tidyverse)
library(raster)
library(rgdal)
library(cclust)
library(outliers)
library(dismo)
library(gtools)
library(multcomp)
library(sp)
library(rgeos)
library(outliers)
library(FactoMineR)
library(pROC)
library(randomForest)
library(stringr)
# Load files
set.seed(1234)
OSys <- Sys.info(); OSys <- OSys[names(OSys)=='sysname']
if(OSys == 'Linux'){
path <- '//mnt/Workspace_cluster_9/Coffee_Cocoa2/_cam'
} else {
if(OSys == 'Windows'){
path <- '//dapadfs/Workspace_cluster_9/Coffee_Cocoa2/_cam'
}
}
run <- '_run6'
# load(paste0(path, '/_rData/', run, '/clusterpresdata.rData'))
load(paste0(path, '/_rData/', run, '/clusterdata.rData'))
load(paste0(path, '/_RF/', run, '/_models/rflist_5.rData'))
gcmlist <- 'current'
ar5biofolder <- paste0(path, '/_raster/_climate/_current/_asc')
resultsfolder <- paste0(path, '/_RF/', run, '/_results/_raw')
modelfolder <- paste0(path, '/_RF/', run, '/_models')
gcm <- gcmlist
toMatch <- "bio"
gcmfiles <- list.files(ar5biofolder, full.names = TRUE, pattern = ".asc$") %>%
mixedsort() %>%
grep('bio', ., value = T)
climatelayers <- stack(gcmfiles)
# Values
climatevalues <- data.frame(getValues(climatelayers))
rff <- do.call(randomForest::combine, rflist)
rasterClust <- raster::predict(rff, climatevalues) # To cluster be use of modalraster
rasterRFclust_mask <- climatelayers[[1]]
values(rasterRFclust_mask) <- rasterClust
writeRaster(rasterRFclust_mask, paste0(path, '/_RF/', run, '/_results/_raw/_current/RF_5Clust_Current.asc'), overwrite = T)
# Probabilistic
rasterProbs <- raster::predict(rff, climatevalues, type = "prob")
max(rasterProbs[,1], na.rm = TRUE)
rasterRF <- rowSums(rasterProbs[,3:7])
uncertainty <- apply(rasterProbs, 1, max) #valor m?ximo por fila
rasterRFprob <- climatelayers[[1]]
values(rasterRFprob) <- rasterRF
rasterRFuncertainty <- climatelayers[[1]]
values(rasterRFuncertainty) <- uncertainty
no.clusters <- 5
writeRaster(rasterRFprob, paste(resultsfolder, "/_current/RF_", no.clusters, "Prob_", gcm, ".asc", sep=""), format="ascii", overwrite = T)
writeRaster(rasterRFuncertainty, paste(resultsfolder, "/_current/RF_", no.clusters, "Unc_", gcm, ".asc", sep=""), format="ascii", overwrite = T)
plot(rasterRFprob)
title(main = gcm, sub="Suitability")
plot(rasterRFclust_mask)
title(main=gcm,sub="SuitClass")
plot(rasterRFuncertainty)
title(main=gcm,sub="Uncertainty")
|
/_RF/_runs/2_predictRF.R
|
no_license
|
fabiolexcastro/centralAmericaCocoa
|
R
| false
| false
| 2,631
|
r
|
# CIAT, 2016 Updated: February 2017
# Author: Bunn & Castro
# Target: predict RF - Cluster
# Load libraries
library(tidyverse)
library(raster)
library(rgdal)
library(cclust)
library(outliers)
library(dismo)
library(gtools)
library(multcomp)
library(sp)
library(rgeos)
library(outliers)
library(FactoMineR)
library(pROC)
library(randomForest)
library(stringr)
# Load files
set.seed(1234)
OSys <- Sys.info(); OSys <- OSys[names(OSys)=='sysname']
if(OSys == 'Linux'){
path <- '//mnt/Workspace_cluster_9/Coffee_Cocoa2/_cam'
} else {
if(OSys == 'Windows'){
path <- '//dapadfs/Workspace_cluster_9/Coffee_Cocoa2/_cam'
}
}
run <- '_run6'
# load(paste0(path, '/_rData/', run, '/clusterpresdata.rData'))
load(paste0(path, '/_rData/', run, '/clusterdata.rData'))
load(paste0(path, '/_RF/', run, '/_models/rflist_5.rData'))
gcmlist <- 'current'
ar5biofolder <- paste0(path, '/_raster/_climate/_current/_asc')
resultsfolder <- paste0(path, '/_RF/', run, '/_results/_raw')
modelfolder <- paste0(path, '/_RF/', run, '/_models')
gcm <- gcmlist
toMatch <- "bio"
gcmfiles <- list.files(ar5biofolder, full.names = TRUE, pattern = ".asc$") %>%
mixedsort() %>%
grep('bio', ., value = T)
climatelayers <- stack(gcmfiles)
# Values
climatevalues <- data.frame(getValues(climatelayers))
rff <- do.call(randomForest::combine, rflist)
rasterClust <- raster::predict(rff, climatevalues) # To cluster be use of modalraster
rasterRFclust_mask <- climatelayers[[1]]
values(rasterRFclust_mask) <- rasterClust
writeRaster(rasterRFclust_mask, paste0(path, '/_RF/', run, '/_results/_raw/_current/RF_5Clust_Current.asc'), overwrite = T)
# Probabilistic
rasterProbs <- raster::predict(rff, climatevalues, type = "prob")
max(rasterProbs[,1], na.rm = TRUE)
rasterRF <- rowSums(rasterProbs[,3:7])
uncertainty <- apply(rasterProbs, 1, max) #valor m?ximo por fila
rasterRFprob <- climatelayers[[1]]
values(rasterRFprob) <- rasterRF
rasterRFuncertainty <- climatelayers[[1]]
values(rasterRFuncertainty) <- uncertainty
no.clusters <- 5
writeRaster(rasterRFprob, paste(resultsfolder, "/_current/RF_", no.clusters, "Prob_", gcm, ".asc", sep=""), format="ascii", overwrite = T)
writeRaster(rasterRFuncertainty, paste(resultsfolder, "/_current/RF_", no.clusters, "Unc_", gcm, ".asc", sep=""), format="ascii", overwrite = T)
plot(rasterRFprob)
title(main = gcm, sub="Suitability")
plot(rasterRFclust_mask)
title(main=gcm,sub="SuitClass")
plot(rasterRFuncertainty)
title(main=gcm,sub="Uncertainty")
|
#' Main DoubletDecon v1.0.1
#'
#' This is the main function. This function identifies clusters of doublets with a combination of deconvolution analysis and unique gene expression and individual doublet cells with deconvolution analysis.
#' @param rawDataFile Name of file containing ICGS or Seurat expression data (gene by cell)
#' @param groupsFile Name of file containing group assignments (3 column: cell, group(numeric), group(numeric or character))
#' @param filename Unique filename to be incorporated into the names of outputs from the functions.
#' @param location Directory where output should be stored
#' @param fullDataFile Name of file containing full expression data (gene by cell). Default is NULL.
#' @param removeCC Remove cell cycle gene cluster by KEGG enrichment. Default is FALSE.
#' @param species Species as scientific species name, KEGG ID, three letter species abbreviation, or NCBI ID. Default is "mmu".
#' @param rhop x in mean+x*SD to determine upper cutoff for correlation in the blacklist. Default is 1.
#' @param write Write output files as .txt files. Default is TRUE.
#' @param PMF Use step 2 (unique gene expression) in doublet determination criteria. Default is TRUE.
#' @param useFull Use full gene list for PMF analysis. Requires fullDataFile. Default is FALSE.
#' @param heatmap Boolean value for whether to generate heatmaps. Default is TRUE. Can be slow to datasets larger than ~3000 cells.
#' @param centroids Use centroids as references in deconvolution instead of medoids. Default is TRUE.
#' @param num_doubs The user defined number of doublets to make for each pair of clusters. Default is 100.
#' @param only50 use only synthetic doublets created with 50\%/50\% mix of parent cells, as opposed to the extended option of 30\%/70\% and 70\%/30\%, default is FALSE.
#' @param min_uniq minimum number of unique genes required for a cluster to be rescued
#' @param nCores number of cores to be used during rescue step. Default is -1 for automatically detected.
#' @return data_processed = new expression file (cleaned).
#' @return groups_processed = new groups file (cleaned).
#' @return PMF_results = pseudo marker finder t-test results (gene by cluster).
#' @return DRS_doublet_table = each cell and whether it is called a doublet by deconvolution analysis.
#' @return DRS_results = results of deconvolution analysis (cell by cluster) in percentages.
#' @return Decon_called_freq = percentage of doublets called in each cluster by deconvolution analysis.
#' @return Final_doublets_groups = new groups file containing only doublets.
#' @return Final_nondoublets_groups = new groups file containing only non doublets.
#' @keywords doublet decon main
#' @export
Main_Doublet_Decon<-function(rawDataFile, groupsFile, filename, location, fullDataFile=NULL, removeCC=FALSE, species="mmu", rhop=1, write=TRUE, PMF=TRUE, useFull=FALSE, heatmap=TRUE, centroids=TRUE, num_doubs=100, only50=FALSE, min_uniq=4, nCores=-1){
#load required packages
cat("Loading packages...", sep="\n")
library(DeconRNASeq)
library(gplots)
library(plyr)
library(MCL)
library(clusterProfiler)
library(mygene)
library(tidyr)
library(R.utils)
library(dplyr)
library(foreach)
library(doParallel)
library(stringr)
#Set up log file
log_file_name=paste0(location, filename,".log")
log_con <- file(log_file_name)
cat(paste0("filename: ",filename), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("location: ",location), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("removeCC: ",removeCC), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("species: ",species), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("rhop: ",rhop), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("write: ",write), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("PMF: ",PMF), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("useFull: ",useFull), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("heatmap: ",heatmap), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("centroids: ",centroids), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("num_doubs: ",num_doubs), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("only50: ",only50), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("min_uniq: ",min_uniq), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("nCores: ",nCores), file=log_file_name, append=TRUE, sep="\n")
#Check variables
if(is.character(rawDataFile)!=TRUE & is.data.frame(rawDataFile)!=TRUE){print("ERROR: rawDataFile must be a character string!")}
if(is.character(groupsFile)!=TRUE & is.data.frame(groupsFile)!=TRUE & is.matrix(groupsFile)!=TRUE){print("ERROR: groupsFile must be a character string!")}
if(is.character(filename)!=TRUE){print("ERROR: filename must be a character string!")}
if(is.character(location)!=TRUE){print("ERROR: location must be a character string!")}
if(is.character(fullDataFile)!=TRUE & is.null(fullDataFile)!=TRUE & is.data.frame(fullDataFile)!=TRUE){print("ERROR: fullDataFile must be a character string or NULL!")}
if(is.logical(removeCC)!=TRUE){print("ERROR: removeCC must be TRUE or FALSE!")}
if(is.character(species)!=TRUE){print("ERROR: species must be a character string!")}
if(is.numeric(rhop)!=TRUE){print("ERROR: rhop must be numeric!")}
if(is.logical(write)!=TRUE){print("ERROR: write must be TRUE or FALSE!")}
if(is.logical(PMF)!=TRUE){print("ERROR: PMF must be TRUE or FALSE!")}
if(is.logical(useFull)!=TRUE){print("ERROR: useFull must be TRUE or FALSE!")}
if(is.logical(heatmap)!=TRUE){print("ERROR: heatmap must be TRUE or FALSE!")}
if(is.logical(centroids)!=TRUE){print("ERROR: centroids must be TRUE or FALSE!")}
if(is.numeric(num_doubs)!=TRUE){print("ERROR: numdoubs must be numeric!")}
if(is.logical(only50)!=TRUE){print("ERROR: only50 must be TRUE or FALSE!")}
if(is.numeric(min_uniq)!=TRUE){print("ERROR: min_uniq must be numeric!")}
#Read in data
cat("Reading data...", file=log_file_name, append=TRUE, sep="\n")
cat("Reading data...", sep="\n")
ICGS2_flag=F #set for checking if the input file is in ICGS2 format
if(class(rawDataFile)=="character"){
#NEW: test for ICGS2
rawDataHeader=read.table(rawDataFile, sep="\t",header=F, row.names=1, nrows=1, stringsAsFactors = F)
if(length(grep(":", rawDataHeader[2]))==1){
ICGS2_flag=T
ICGS2=ICGS2_to_ICGS1(rawDataFile, groupsFile, log_file_name)
rawData=ICGS2$rawData
}else{
rawData=read.table(rawDataFile, sep="\t",header=T, row.names=1, stringsAsFactors = T)
}
}else{
cat("WARNING: if using ICGS2 file input, please import 'rawDataFile' and 'groupsFile' as path/location instead of an R object." , sep="\n")
rawData=rawDataFile
}
if(class(groupsFile)=="character"){
if(ICGS2_flag==T){
groups=ICGS2$groups
}else{
groups=read.table(groupsFile, sep="\t",header=F, row.names=1, stringsAsFactors = T)
}
}else{
groups=groupsFile
}
#Clean up data and groups file
cat("Processing raw data...", file=log_file_name, append=TRUE, sep="\n")
cat("Processing raw data...", sep="\n")
data=Clean_Up_Input(rawData, groups, log_file_name=log_file_name)
og_processed_data=data$processed
groups=data$groups
#Centroids or medoids?
if(centroids==TRUE){
centroid_flag=TRUE
}else{
centroid_flag=FALSE
}
#Original data heatmap
if(heatmap==TRUE){
cat("Creating original data heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating original data heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(data$processed[2:nrow(data$processed), 2:ncol(data$processed)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(data$processed[2:nrow(data$processed), 2:ncol(data$processed)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
dendrogram = "none", #do not generate dendrogram
col=mycol, #colors used in heatmap
ColSideColors = as.color(Renumber(data$processed[1,2:ncol(data$processed)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(data$processed[2:nrow(data$processed),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Original data: ", filename))) #main title
}
#Remove cell cycle gene cluster (optional)
if(removeCC==TRUE){
cat("Removing cell cycle clusters...", file=log_file_name, append=TRUE, sep="\n")
cat("Removing cell cycle clusters...", sep="\n")
data=Remove_Cell_Cycle(data$processed, species, log_file_name)
}else{
data=data$processed
}
if(write==TRUE){
write.table(data, paste0(location, "data_processed_", filename, ".txt"), sep="\t")
write.table(groups, paste0(location, "groups_processed_", filename, ".txt"), sep="\t")
}
#Calculate medoids, medoid correlations, blacklist to create new combine medoids
cat("Combining similar clusters...", file=log_file_name, append=TRUE, sep="\n")
cat("Combining similar clusters...", sep="\n")
BL=Blacklist_Groups(data, groups, rhop, centroid_flag, log_file_name)
newMedoids=BL$newMedoids
groupsMedoids=BL$newGroups
#Create synthetic doublets to get average synthetic profiles
cat("Creating synthetic doublet profiles...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating synthetic doublet profiles...", sep="\n")
if(.Platform$OS.type=="unix"){
sink("/dev/null") #hides DeconRNASeq output
synthProfilesx=Synthetic_Doublets(data, groups, groupsMedoids, newMedoids, num_doubs, log_file_name=log_file_name, only50=only50, location=location)
sink()
}else{
synthProfilesx=Synthetic_Doublets(data, groups, groupsMedoids, newMedoids, num_doubs, log_file_name=log_file_name, only50=only50, location=location)
}
synthProfiles=synthProfilesx$averagesAverages
doubletCellsInput2=synthProfilesx$doubletCellsInput2
if(write==TRUE){
write.table(doubletCellsInput2, paste0(location, "Synth_doublet_info_", filename, ".txt"), sep="\t")
}
#Calculate doublets using DeconRNASeq
cat("Step 1: Removing possible doublets...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 1: Removing possible doublets...", sep="\n")
if(.Platform$OS.type=="unix"){
sink("/dev/null") #hides DeconRNASeq output
doubletTable=Is_A_Doublet(data, newMedoids, groups, synthProfiles, log_file_name=log_file_name)
sink()
}else{
doubletTable=Is_A_Doublet(data, newMedoids, groups, synthProfiles, log_file_name=log_file_name)
}
if(write==TRUE){
write.table(doubletTable$isADoublet, paste0(location, "DRS_doublet_table_", filename, ".txt"), sep="\t")
write.table(doubletTable$resultsreadable, paste0(location, "DRS_results_", filename, ".txt"), sep="\t")
}
#Recluster doublets and non-doublets
cat("Step 2: Re-clustering possible doublets...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 2: Re-clustering possible doublets...", sep="\n")
reclusteredData=Recluster(isADoublet=doubletTable$isADoublet, data, groups, log_file_name = log_file_name)
data=reclusteredData$newData2$processed
groups=reclusteredData$newData2$groups
write.table(data, paste0(location, "data_processed_reclust_", filename, ".txt"), sep="\t", col.names = NA, quote=FALSE)
write.table(groups, paste0(location, "groups_processed_reclust_", filename, ".txt"), sep="\t")
#Run Pseudo Marker Finder to identify clusters with no unique gene expression
if(PMF==FALSE){
cat("SKIPPING Step 3: Rescuing cells with unique gene expression...", file=log_file_name, append=TRUE, sep="\n")
cat("SKIPPING Step 3: Rescuing cells with unique gene expression...", sep="\n")
PMFresults=NULL
}else{
cat("Step 3: Rescuing cells with unique gene expression...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 3: Rescuing cells with unique gene expression...", sep="\n")
if(useFull==TRUE){
PMFresults=Pseudo_Marker_Finder(as.data.frame(groups), redu_data2=paste0(location, "data_processed_reclust_", filename, ".txt"), full_data2=fullDataFile, min_uniq=min_uniq, log_file_name=log_file_name, nCores=nCores)
}else{
PMFresults=Pseudo_Marker_Finder(as.data.frame(groups), redu_data2=paste0(location, "data_processed_reclust_", filename, ".txt"), full_data2=NULL, min_uniq=min_uniq, log_file_name=log_file_name, nCores=nCores)
}
if(write==TRUE){
write.table(PMFresults, paste0(location, "new_PMF_results_", filename, ".txt"), sep="\t")
}
}
#Doublet Detection method 2: Pseudo_Marker_Finder
allClusters=unique(groups[,1])
if(PMF==FALSE){
newDoubletClusters=allClusters
}else{
hallmarkClusters=as.numeric(unique(PMFresults[,2]))
newDoubletClusters=setdiff(allClusters, hallmarkClusters)
}
#Doublet Detection method 1: Is_A_Doublet
uniqueClusters=as.character(unique(groups[,2]))
DeconCalledFreq=as.data.frame(matrix(nrow=length(allClusters), ncol=1), row.names = uniqueClusters)
for(clus in 1:length(allClusters)){ #modified this line, was originally "clus in allClusters"
temp1=subset(doubletTable$isADoublet, Group_Cluster==uniqueClusters[clus])
if(nrow(temp1)==0){ #not an original cluster, only a new doublet cluster
DeconCalledFreq[clus,1]=100
}else{
DeconCalledFreq[clus,1]=(length(which(temp1$isADoublet==TRUE))/nrow(temp1))*100
}
}
#Combine to find real doublets
if(PMF==FALSE){
finalDoublets=row.names(doubletTable$isADoublet)[which(doubletTable$isADoublet$isADoublet==TRUE)] #this gives you the names of cells called as doublets by deconvolution
}else{
finalDoublets=intersect(row.names(doubletTable$isADoublet)[which(doubletTable$isADoublet$isADoublet==TRUE)],row.names(subset(groups, groups[,1] %in% newDoubletClusters)))
}
#Results
finalDoubletCellCall=groups[row.names(groups) %in% finalDoublets,]
finalNotDoubletCellCall=groups[!(row.names(groups) %in% finalDoublets),]
if(write==TRUE){
write.table(finalDoubletCellCall, paste0(location, "Final_doublets_groups_", filename, ".txt"), sep="\t")
write.table(finalNotDoubletCellCall, paste0(location, "Final_nondoublets_groups_", filename, ".txt"), sep="\t")
}
#Subset expression matrix for doublets and save
doublets_matrix=cbind(og_processed_data[,1],og_processed_data[,which(colnames(og_processed_data) %in% row.names(finalDoubletCellCall))])
if(write==TRUE){
write.table(doublets_matrix, paste0(location, "Final_doublets_exp_", filename, ".txt"), sep="\t")
}
#Heatmap of cells removed as doubets
if(heatmap==TRUE){
cat("Creating doublets heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating doublets heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(doublets_matrix[2:nrow(doublets_matrix), 2:ncol(doublets_matrix)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(doublets_matrix[2:nrow(doublets_matrix), 2:ncol(doublets_matrix)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
col=mycol, #colors used in heatmap
dendrogram="none", #turn of dendrogram generation
ColSideColors = as.color(Renumber(doublets_matrix[1,2:ncol(doublets_matrix)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(doublets_matrix[2:nrow(doublets_matrix),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Doublets: ", filename))) #main title)
}
#Subset expression matrix for non-doublets and save
nondoublets_matrix=cbind(og_processed_data[,1],og_processed_data[,which(colnames(og_processed_data) %in% row.names(finalNotDoubletCellCall))])
if(write==TRUE){
write.table(nondoublets_matrix, paste0(location, "Final_nondoublets_exp_", filename, ".txt"), sep="\t")
}
#New heatmap of non-doublet cells
if(heatmap==TRUE){
cat("Creating non-doublets heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating non-doublets heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(nondoublets_matrix[2:nrow(nondoublets_matrix), 2:ncol(nondoublets_matrix)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(nondoublets_matrix[2:nrow(nondoublets_matrix), 2:ncol(nondoublets_matrix)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
col=mycol, #colors used in heatmap
dendrogram="none", #turn of dendrogram generation
ColSideColors = as.color(Renumber(nondoublets_matrix[1,2:ncol(nondoublets_matrix)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(nondoublets_matrix[2:nrow(nondoublets_matrix),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Non-Doublets: ", filename))) #main title
}
#last message
cat("Finished!", file=log_file_name, append=TRUE, sep="\n")
cat("Finished!", sep="\n")
#close the log file connection
close(log_con)
return(list(data_processed=data,
groups_processed=groups,
DRS_doublet_table=doubletTable$isADoublet,
DRS_results=doubletTable$resultsreadable,
PMF_results=PMFresults,
Final_doublets_groups=finalDoubletCellCall,
Final_nondoublets_groups=finalNotDoubletCellCall,
Final_doublets_exp=doublets_matrix,
Final_nondoublets_exp=nondoublets_matrix,
Synth_doublet_info=doubletCellsInput2))
}
|
/R/Main_Doublet_Decon.R
|
no_license
|
EDePasquale/DoubletDecon
|
R
| false
| false
| 20,122
|
r
|
#' Main DoubletDecon v1.0.1
#'
#' This is the main function. This function identifies clusters of doublets with a combination of deconvolution analysis and unique gene expression and individual doublet cells with deconvolution analysis.
#' @param rawDataFile Name of file containing ICGS or Seurat expression data (gene by cell)
#' @param groupsFile Name of file containing group assignments (3 column: cell, group(numeric), group(numeric or character))
#' @param filename Unique filename to be incorporated into the names of outputs from the functions.
#' @param location Directory where output should be stored
#' @param fullDataFile Name of file containing full expression data (gene by cell). Default is NULL.
#' @param removeCC Remove cell cycle gene cluster by KEGG enrichment. Default is FALSE.
#' @param species Species as scientific species name, KEGG ID, three letter species abbreviation, or NCBI ID. Default is "mmu".
#' @param rhop x in mean+x*SD to determine upper cutoff for correlation in the blacklist. Default is 1.
#' @param write Write output files as .txt files. Default is TRUE.
#' @param PMF Use step 2 (unique gene expression) in doublet determination criteria. Default is TRUE.
#' @param useFull Use full gene list for PMF analysis. Requires fullDataFile. Default is FALSE.
#' @param heatmap Boolean value for whether to generate heatmaps. Default is TRUE. Can be slow to datasets larger than ~3000 cells.
#' @param centroids Use centroids as references in deconvolution instead of medoids. Default is TRUE.
#' @param num_doubs The user defined number of doublets to make for each pair of clusters. Default is 100.
#' @param only50 use only synthetic doublets created with 50\%/50\% mix of parent cells, as opposed to the extended option of 30\%/70\% and 70\%/30\%, default is FALSE.
#' @param min_uniq minimum number of unique genes required for a cluster to be rescued
#' @param nCores number of cores to be used during rescue step. Default is -1 for automatically detected.
#' @return data_processed = new expression file (cleaned).
#' @return groups_processed = new groups file (cleaned).
#' @return PMF_results = pseudo marker finder t-test results (gene by cluster).
#' @return DRS_doublet_table = each cell and whether it is called a doublet by deconvolution analysis.
#' @return DRS_results = results of deconvolution analysis (cell by cluster) in percentages.
#' @return Decon_called_freq = percentage of doublets called in each cluster by deconvolution analysis.
#' @return Final_doublets_groups = new groups file containing only doublets.
#' @return Final_nondoublets_groups = new groups file containing only non doublets.
#' @keywords doublet decon main
#' @export
Main_Doublet_Decon<-function(rawDataFile, groupsFile, filename, location, fullDataFile=NULL, removeCC=FALSE, species="mmu", rhop=1, write=TRUE, PMF=TRUE, useFull=FALSE, heatmap=TRUE, centroids=TRUE, num_doubs=100, only50=FALSE, min_uniq=4, nCores=-1){
#load required packages
cat("Loading packages...", sep="\n")
library(DeconRNASeq)
library(gplots)
library(plyr)
library(MCL)
library(clusterProfiler)
library(mygene)
library(tidyr)
library(R.utils)
library(dplyr)
library(foreach)
library(doParallel)
library(stringr)
#Set up log file
log_file_name=paste0(location, filename,".log")
log_con <- file(log_file_name)
cat(paste0("filename: ",filename), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("location: ",location), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("removeCC: ",removeCC), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("species: ",species), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("rhop: ",rhop), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("write: ",write), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("PMF: ",PMF), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("useFull: ",useFull), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("heatmap: ",heatmap), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("centroids: ",centroids), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("num_doubs: ",num_doubs), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("only50: ",only50), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("min_uniq: ",min_uniq), file=log_file_name, append=TRUE, sep="\n")
cat(paste0("nCores: ",nCores), file=log_file_name, append=TRUE, sep="\n")
#Check variables
if(is.character(rawDataFile)!=TRUE & is.data.frame(rawDataFile)!=TRUE){print("ERROR: rawDataFile must be a character string!")}
if(is.character(groupsFile)!=TRUE & is.data.frame(groupsFile)!=TRUE & is.matrix(groupsFile)!=TRUE){print("ERROR: groupsFile must be a character string!")}
if(is.character(filename)!=TRUE){print("ERROR: filename must be a character string!")}
if(is.character(location)!=TRUE){print("ERROR: location must be a character string!")}
if(is.character(fullDataFile)!=TRUE & is.null(fullDataFile)!=TRUE & is.data.frame(fullDataFile)!=TRUE){print("ERROR: fullDataFile must be a character string or NULL!")}
if(is.logical(removeCC)!=TRUE){print("ERROR: removeCC must be TRUE or FALSE!")}
if(is.character(species)!=TRUE){print("ERROR: species must be a character string!")}
if(is.numeric(rhop)!=TRUE){print("ERROR: rhop must be numeric!")}
if(is.logical(write)!=TRUE){print("ERROR: write must be TRUE or FALSE!")}
if(is.logical(PMF)!=TRUE){print("ERROR: PMF must be TRUE or FALSE!")}
if(is.logical(useFull)!=TRUE){print("ERROR: useFull must be TRUE or FALSE!")}
if(is.logical(heatmap)!=TRUE){print("ERROR: heatmap must be TRUE or FALSE!")}
if(is.logical(centroids)!=TRUE){print("ERROR: centroids must be TRUE or FALSE!")}
if(is.numeric(num_doubs)!=TRUE){print("ERROR: numdoubs must be numeric!")}
if(is.logical(only50)!=TRUE){print("ERROR: only50 must be TRUE or FALSE!")}
if(is.numeric(min_uniq)!=TRUE){print("ERROR: min_uniq must be numeric!")}
#Read in data
cat("Reading data...", file=log_file_name, append=TRUE, sep="\n")
cat("Reading data...", sep="\n")
ICGS2_flag=F #set for checking if the input file is in ICGS2 format
if(class(rawDataFile)=="character"){
#NEW: test for ICGS2
rawDataHeader=read.table(rawDataFile, sep="\t",header=F, row.names=1, nrows=1, stringsAsFactors = F)
if(length(grep(":", rawDataHeader[2]))==1){
ICGS2_flag=T
ICGS2=ICGS2_to_ICGS1(rawDataFile, groupsFile, log_file_name)
rawData=ICGS2$rawData
}else{
rawData=read.table(rawDataFile, sep="\t",header=T, row.names=1, stringsAsFactors = T)
}
}else{
cat("WARNING: if using ICGS2 file input, please import 'rawDataFile' and 'groupsFile' as path/location instead of an R object." , sep="\n")
rawData=rawDataFile
}
if(class(groupsFile)=="character"){
if(ICGS2_flag==T){
groups=ICGS2$groups
}else{
groups=read.table(groupsFile, sep="\t",header=F, row.names=1, stringsAsFactors = T)
}
}else{
groups=groupsFile
}
#Clean up data and groups file
cat("Processing raw data...", file=log_file_name, append=TRUE, sep="\n")
cat("Processing raw data...", sep="\n")
data=Clean_Up_Input(rawData, groups, log_file_name=log_file_name)
og_processed_data=data$processed
groups=data$groups
#Centroids or medoids?
if(centroids==TRUE){
centroid_flag=TRUE
}else{
centroid_flag=FALSE
}
#Original data heatmap
if(heatmap==TRUE){
cat("Creating original data heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating original data heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(data$processed[2:nrow(data$processed), 2:ncol(data$processed)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(data$processed[2:nrow(data$processed), 2:ncol(data$processed)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
dendrogram = "none", #do not generate dendrogram
col=mycol, #colors used in heatmap
ColSideColors = as.color(Renumber(data$processed[1,2:ncol(data$processed)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(data$processed[2:nrow(data$processed),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Original data: ", filename))) #main title
}
#Remove cell cycle gene cluster (optional)
if(removeCC==TRUE){
cat("Removing cell cycle clusters...", file=log_file_name, append=TRUE, sep="\n")
cat("Removing cell cycle clusters...", sep="\n")
data=Remove_Cell_Cycle(data$processed, species, log_file_name)
}else{
data=data$processed
}
if(write==TRUE){
write.table(data, paste0(location, "data_processed_", filename, ".txt"), sep="\t")
write.table(groups, paste0(location, "groups_processed_", filename, ".txt"), sep="\t")
}
#Calculate medoids, medoid correlations, blacklist to create new combine medoids
cat("Combining similar clusters...", file=log_file_name, append=TRUE, sep="\n")
cat("Combining similar clusters...", sep="\n")
BL=Blacklist_Groups(data, groups, rhop, centroid_flag, log_file_name)
newMedoids=BL$newMedoids
groupsMedoids=BL$newGroups
#Create synthetic doublets to get average synthetic profiles
cat("Creating synthetic doublet profiles...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating synthetic doublet profiles...", sep="\n")
if(.Platform$OS.type=="unix"){
sink("/dev/null") #hides DeconRNASeq output
synthProfilesx=Synthetic_Doublets(data, groups, groupsMedoids, newMedoids, num_doubs, log_file_name=log_file_name, only50=only50, location=location)
sink()
}else{
synthProfilesx=Synthetic_Doublets(data, groups, groupsMedoids, newMedoids, num_doubs, log_file_name=log_file_name, only50=only50, location=location)
}
synthProfiles=synthProfilesx$averagesAverages
doubletCellsInput2=synthProfilesx$doubletCellsInput2
if(write==TRUE){
write.table(doubletCellsInput2, paste0(location, "Synth_doublet_info_", filename, ".txt"), sep="\t")
}
#Calculate doublets using DeconRNASeq
cat("Step 1: Removing possible doublets...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 1: Removing possible doublets...", sep="\n")
if(.Platform$OS.type=="unix"){
sink("/dev/null") #hides DeconRNASeq output
doubletTable=Is_A_Doublet(data, newMedoids, groups, synthProfiles, log_file_name=log_file_name)
sink()
}else{
doubletTable=Is_A_Doublet(data, newMedoids, groups, synthProfiles, log_file_name=log_file_name)
}
if(write==TRUE){
write.table(doubletTable$isADoublet, paste0(location, "DRS_doublet_table_", filename, ".txt"), sep="\t")
write.table(doubletTable$resultsreadable, paste0(location, "DRS_results_", filename, ".txt"), sep="\t")
}
#Recluster doublets and non-doublets
cat("Step 2: Re-clustering possible doublets...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 2: Re-clustering possible doublets...", sep="\n")
reclusteredData=Recluster(isADoublet=doubletTable$isADoublet, data, groups, log_file_name = log_file_name)
data=reclusteredData$newData2$processed
groups=reclusteredData$newData2$groups
write.table(data, paste0(location, "data_processed_reclust_", filename, ".txt"), sep="\t", col.names = NA, quote=FALSE)
write.table(groups, paste0(location, "groups_processed_reclust_", filename, ".txt"), sep="\t")
#Run Pseudo Marker Finder to identify clusters with no unique gene expression
if(PMF==FALSE){
cat("SKIPPING Step 3: Rescuing cells with unique gene expression...", file=log_file_name, append=TRUE, sep="\n")
cat("SKIPPING Step 3: Rescuing cells with unique gene expression...", sep="\n")
PMFresults=NULL
}else{
cat("Step 3: Rescuing cells with unique gene expression...", file=log_file_name, append=TRUE, sep="\n")
cat("Step 3: Rescuing cells with unique gene expression...", sep="\n")
if(useFull==TRUE){
PMFresults=Pseudo_Marker_Finder(as.data.frame(groups), redu_data2=paste0(location, "data_processed_reclust_", filename, ".txt"), full_data2=fullDataFile, min_uniq=min_uniq, log_file_name=log_file_name, nCores=nCores)
}else{
PMFresults=Pseudo_Marker_Finder(as.data.frame(groups), redu_data2=paste0(location, "data_processed_reclust_", filename, ".txt"), full_data2=NULL, min_uniq=min_uniq, log_file_name=log_file_name, nCores=nCores)
}
if(write==TRUE){
write.table(PMFresults, paste0(location, "new_PMF_results_", filename, ".txt"), sep="\t")
}
}
#Doublet Detection method 2: Pseudo_Marker_Finder
allClusters=unique(groups[,1])
if(PMF==FALSE){
newDoubletClusters=allClusters
}else{
hallmarkClusters=as.numeric(unique(PMFresults[,2]))
newDoubletClusters=setdiff(allClusters, hallmarkClusters)
}
#Doublet Detection method 1: Is_A_Doublet
uniqueClusters=as.character(unique(groups[,2]))
DeconCalledFreq=as.data.frame(matrix(nrow=length(allClusters), ncol=1), row.names = uniqueClusters)
for(clus in 1:length(allClusters)){ #modified this line, was originally "clus in allClusters"
temp1=subset(doubletTable$isADoublet, Group_Cluster==uniqueClusters[clus])
if(nrow(temp1)==0){ #not an original cluster, only a new doublet cluster
DeconCalledFreq[clus,1]=100
}else{
DeconCalledFreq[clus,1]=(length(which(temp1$isADoublet==TRUE))/nrow(temp1))*100
}
}
#Combine to find real doublets
if(PMF==FALSE){
finalDoublets=row.names(doubletTable$isADoublet)[which(doubletTable$isADoublet$isADoublet==TRUE)] #this gives you the names of cells called as doublets by deconvolution
}else{
finalDoublets=intersect(row.names(doubletTable$isADoublet)[which(doubletTable$isADoublet$isADoublet==TRUE)],row.names(subset(groups, groups[,1] %in% newDoubletClusters)))
}
#Results
finalDoubletCellCall=groups[row.names(groups) %in% finalDoublets,]
finalNotDoubletCellCall=groups[!(row.names(groups) %in% finalDoublets),]
if(write==TRUE){
write.table(finalDoubletCellCall, paste0(location, "Final_doublets_groups_", filename, ".txt"), sep="\t")
write.table(finalNotDoubletCellCall, paste0(location, "Final_nondoublets_groups_", filename, ".txt"), sep="\t")
}
#Subset expression matrix for doublets and save
doublets_matrix=cbind(og_processed_data[,1],og_processed_data[,which(colnames(og_processed_data) %in% row.names(finalDoubletCellCall))])
if(write==TRUE){
write.table(doublets_matrix, paste0(location, "Final_doublets_exp_", filename, ".txt"), sep="\t")
}
#Heatmap of cells removed as doubets
if(heatmap==TRUE){
cat("Creating doublets heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating doublets heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(doublets_matrix[2:nrow(doublets_matrix), 2:ncol(doublets_matrix)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(doublets_matrix[2:nrow(doublets_matrix), 2:ncol(doublets_matrix)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
col=mycol, #colors used in heatmap
dendrogram="none", #turn of dendrogram generation
ColSideColors = as.color(Renumber(doublets_matrix[1,2:ncol(doublets_matrix)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(doublets_matrix[2:nrow(doublets_matrix),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Doublets: ", filename))) #main title)
}
#Subset expression matrix for non-doublets and save
nondoublets_matrix=cbind(og_processed_data[,1],og_processed_data[,which(colnames(og_processed_data) %in% row.names(finalNotDoubletCellCall))])
if(write==TRUE){
write.table(nondoublets_matrix, paste0(location, "Final_nondoublets_exp_", filename, ".txt"), sep="\t")
}
#New heatmap of non-doublet cells
if(heatmap==TRUE){
cat("Creating non-doublets heatmap...", file=log_file_name, append=TRUE, sep="\n")
cat("Creating non-doublets heatmap...", sep="\n")
breaks=seq(0, #start point of color key
as.numeric(quantile(data.matrix(nondoublets_matrix[2:nrow(nondoublets_matrix), 2:ncol(nondoublets_matrix)]), 0.99)), #end point of color key
by=0.05) #length of sub-division
mycol <- colorpanel(n=length(breaks)-1, low="black", high= "yellow") #heatmap colors
suppressWarnings(DDheatmap(data.matrix(nondoublets_matrix[2:nrow(nondoublets_matrix), 2:ncol(nondoublets_matrix)]), #the data matrix
Colv=FALSE, # No clustering of columns
Rowv = FALSE, #no clustering of rows
col=mycol, #colors used in heatmap
dendrogram="none", #turn of dendrogram generation
ColSideColors = as.color(Renumber(nondoublets_matrix[1,2:ncol(nondoublets_matrix)]), alpha=1, seed=4), #column color bar
RowSideColors = as.color(Renumber(nondoublets_matrix[2:nrow(nondoublets_matrix),1]), alpha=1, seed=2), # row color bar
breaks=breaks, #color key details
trace="none", #no trace on map
na.rm=TRUE, #ignore missing values
margins = c(5,5), # size and layout of heatmap window
labRow=NA, #turn off gene labels
labCol=NA, #turn off cell labels
xlab = "Samples", #x axis title
ylab = "Genes", # y axis title
main = paste0("Non-Doublets: ", filename))) #main title
}
#last message
cat("Finished!", file=log_file_name, append=TRUE, sep="\n")
cat("Finished!", sep="\n")
#close the log file connection
close(log_con)
return(list(data_processed=data,
groups_processed=groups,
DRS_doublet_table=doubletTable$isADoublet,
DRS_results=doubletTable$resultsreadable,
PMF_results=PMFresults,
Final_doublets_groups=finalDoubletCellCall,
Final_nondoublets_groups=finalNotDoubletCellCall,
Final_doublets_exp=doublets_matrix,
Final_nondoublets_exp=nondoublets_matrix,
Synth_doublet_info=doubletCellsInput2))
}
|
#' Type-Token Ratio
#'
#' Calculate type-token ratio by grouping variable.
#'
#' @param text.var The text variable
#' @param grouping.var The grouping variables. Default \code{NULL} generates
#' one word list for all text. Also takes a single grouping variable or a list
#' of 1 or more grouping variables.
#' @param n.words An integer specifying the number of words in each chunk.
#' @param \ldots ignored.
#' @return Returns a list of class \code{type_text_ratio}. This object
#' contains a type-token ratio for the overall text and a data frame
#' type-token ratios per grouping vriable.
#' @references Baker, P. (2006) Using Corpora in Discourse Analysis. London: Continuum.
#' @export
#' @examples
#' with(raj, type_text_ratio(dialogue, person))
#' plot(with(raj, type_text_ratio(dialogue, person)))
type_text_ratio <- function(text.var, grouping.var = NULL, n.words = 1000, ...) {
if(is.null(grouping.var)) {
G <- "all"
} else {
if (is.list(grouping.var)) {
m <- unlist(as.character(substitute(grouping.var))[-1])
m <- sapply(strsplit(m, "$", fixed=TRUE), function(x) {
x[length(x)]
}
)
G <- paste(m, collapse="&")
} else {
G <- as.character(substitute(grouping.var))
G <- G[length(G)]
}
}
if(is.null(grouping.var)){
grouping <- rep("all", length(text.var))
} else {
if (is.list(grouping.var) & length(grouping.var)>1) {
grouping <- paste2(grouping.var)
} else {
grouping <- unlist(grouping.var)
}
}
DF <- data.frame(grouping, text.var, wc = wc(text.var),
check.names = FALSE, stringsAsFactors = FALSE)
DF[["grouping"]] <- factor(DF[["grouping"]])
## Split into chuks of 2000 words
text2000 <- chunker(DF[["text.var"]], DF[["grouping"]], n.words = n.words)
## word counts per grouping
key <- qdapTools::matrix2df(
data.frame(wc = sapply(split(DF[["wc"]], DF[["grouping"]]), sum)),
"group.var"
)
## calculate type-token ration per group
## mean of 2000 word chunks
out <- qdapTools::vect2df(sapply(text2000, function(x){
mean(sapply(unlist(x), ttr))
}), "group.var", "ttr")
out <- data.frame(
out[, "group.var", drop =FALSE],
wc = qdapTools::lookup(out[["group.var"]], key),
out[, "ttr", drop =FALSE]
)
names(out)[1] <- G
all_ttr <- mean(sapply(unlist(chunker(DF[["text.var"]], n.words = n.words)), ttr))
o <- list(all = all_ttr, ttr = out)
class(o) <- "type_token_ratio"
attributes(o)[["group.name"]] <- G
text.env <- new.env(FALSE)
text.env[["text.var"]] <- DF[["text.var"]]
attributes(o)[["text.var"]] <- text.env
group.env <- new.env(FALSE)
group.env[["grouping.var"]] <- DF[["grouping"]]
attributes(o)[["grouping.var"]] <- group.env
attributes(o)[["n.words"]] <- n.words
o
}
#' Prints a type_token_ratio Object
#'
#' Prints a type_token_ratio object.
#'
#' @param x The type_token_ratio object.
#' @param digits The number of type-token ratio digits to print.
#' @param \ldots ignored
#' @method print type_token_ratio
#' @export
print.type_token_ratio <-
function(x, digits = 3, ...) {
WD <- options()[["width"]]
options(width=3000)
y <- x[["ttr"]]
y[["ttr"]] <- round(y[["ttr"]], digits)
print(y)
cat(sprintf("\nType-token ratio for entire text: %s\n", round(x[["all"]], digits)))
options(width=WD)
}
#' Plots a type_token_ratio Object
#'
#' Plots a type_token_ratio object.
#'
#' @param x The type_token_ratio object.
#' @param \ldots ignored.
#' @importFrom scales alpha
#' @method plot type_token_ratio
#' @export
plot.type_token_ratio <- function(x, ...){
nms <- paste(sapply(strsplit(attributes(x)[["group.name"]], "&")[[1]], Caps), collapse = " & ")
ggplot2::ggplot(data = x[["ttr"]], ggplot2::aes_string(x = "ttr",
y = attributes(x)[["group.name"]])) +
ggplot2::geom_vline(xintercept = x[["all"]], size = .7, linetype = "longdash", alpha = .4) +
ggplot2::geom_point(ggplot2::aes_string(size="wc"), alpha = .3) +
ggplot2::geom_point(color="red") +
ggplot2::xlab("Type-Toke Ratio") +
ggplot2::ylab(nms) +
ggplot2::theme_bw() +
ggplot2::annotate("text", x = x[["all"]], y = ceiling(nrow(x[["ttr"]])/2),
size =2.3, alpha = .3, label = "Type-Token\nRatio\nAll Text") +
ggplot2::scale_size_continuous(name="Word\nCount")
}
ttr <- function(x){
y <- table(bag_o_words(x))
length(unlist(y))/sum(unlist(y))
}
|
/R/type_token_ratio.R
|
no_license
|
AlexOcculate/qdap
|
R
| false
| false
| 4,682
|
r
|
#' Type-Token Ratio
#'
#' Calculate type-token ratio by grouping variable.
#'
#' @param text.var The text variable
#' @param grouping.var The grouping variables. Default \code{NULL} generates
#' one word list for all text. Also takes a single grouping variable or a list
#' of 1 or more grouping variables.
#' @param n.words An integer specifying the number of words in each chunk.
#' @param \ldots ignored.
#' @return Returns a list of class \code{type_text_ratio}. This object
#' contains a type-token ratio for the overall text and a data frame
#' type-token ratios per grouping vriable.
#' @references Baker, P. (2006) Using Corpora in Discourse Analysis. London: Continuum.
#' @export
#' @examples
#' with(raj, type_text_ratio(dialogue, person))
#' plot(with(raj, type_text_ratio(dialogue, person)))
type_text_ratio <- function(text.var, grouping.var = NULL, n.words = 1000, ...) {
if(is.null(grouping.var)) {
G <- "all"
} else {
if (is.list(grouping.var)) {
m <- unlist(as.character(substitute(grouping.var))[-1])
m <- sapply(strsplit(m, "$", fixed=TRUE), function(x) {
x[length(x)]
}
)
G <- paste(m, collapse="&")
} else {
G <- as.character(substitute(grouping.var))
G <- G[length(G)]
}
}
if(is.null(grouping.var)){
grouping <- rep("all", length(text.var))
} else {
if (is.list(grouping.var) & length(grouping.var)>1) {
grouping <- paste2(grouping.var)
} else {
grouping <- unlist(grouping.var)
}
}
DF <- data.frame(grouping, text.var, wc = wc(text.var),
check.names = FALSE, stringsAsFactors = FALSE)
DF[["grouping"]] <- factor(DF[["grouping"]])
## Split into chuks of 2000 words
text2000 <- chunker(DF[["text.var"]], DF[["grouping"]], n.words = n.words)
## word counts per grouping
key <- qdapTools::matrix2df(
data.frame(wc = sapply(split(DF[["wc"]], DF[["grouping"]]), sum)),
"group.var"
)
## calculate type-token ration per group
## mean of 2000 word chunks
out <- qdapTools::vect2df(sapply(text2000, function(x){
mean(sapply(unlist(x), ttr))
}), "group.var", "ttr")
out <- data.frame(
out[, "group.var", drop =FALSE],
wc = qdapTools::lookup(out[["group.var"]], key),
out[, "ttr", drop =FALSE]
)
names(out)[1] <- G
all_ttr <- mean(sapply(unlist(chunker(DF[["text.var"]], n.words = n.words)), ttr))
o <- list(all = all_ttr, ttr = out)
class(o) <- "type_token_ratio"
attributes(o)[["group.name"]] <- G
text.env <- new.env(FALSE)
text.env[["text.var"]] <- DF[["text.var"]]
attributes(o)[["text.var"]] <- text.env
group.env <- new.env(FALSE)
group.env[["grouping.var"]] <- DF[["grouping"]]
attributes(o)[["grouping.var"]] <- group.env
attributes(o)[["n.words"]] <- n.words
o
}
#' Prints a type_token_ratio Object
#'
#' Prints a type_token_ratio object.
#'
#' @param x The type_token_ratio object.
#' @param digits The number of type-token ratio digits to print.
#' @param \ldots ignored
#' @method print type_token_ratio
#' @export
print.type_token_ratio <-
function(x, digits = 3, ...) {
WD <- options()[["width"]]
options(width=3000)
y <- x[["ttr"]]
y[["ttr"]] <- round(y[["ttr"]], digits)
print(y)
cat(sprintf("\nType-token ratio for entire text: %s\n", round(x[["all"]], digits)))
options(width=WD)
}
#' Plots a type_token_ratio Object
#'
#' Plots a type_token_ratio object.
#'
#' @param x The type_token_ratio object.
#' @param \ldots ignored.
#' @importFrom scales alpha
#' @method plot type_token_ratio
#' @export
plot.type_token_ratio <- function(x, ...){
nms <- paste(sapply(strsplit(attributes(x)[["group.name"]], "&")[[1]], Caps), collapse = " & ")
ggplot2::ggplot(data = x[["ttr"]], ggplot2::aes_string(x = "ttr",
y = attributes(x)[["group.name"]])) +
ggplot2::geom_vline(xintercept = x[["all"]], size = .7, linetype = "longdash", alpha = .4) +
ggplot2::geom_point(ggplot2::aes_string(size="wc"), alpha = .3) +
ggplot2::geom_point(color="red") +
ggplot2::xlab("Type-Toke Ratio") +
ggplot2::ylab(nms) +
ggplot2::theme_bw() +
ggplot2::annotate("text", x = x[["all"]], y = ceiling(nrow(x[["ttr"]])/2),
size =2.3, alpha = .3, label = "Type-Token\nRatio\nAll Text") +
ggplot2::scale_size_continuous(name="Word\nCount")
}
ttr <- function(x){
y <- table(bag_o_words(x))
length(unlist(y))/sum(unlist(y))
}
|
##
## DEVSTUFF
##
use_testthat()
use_test("simulater.R")
source("R/utils.R")
source("R/simulater.R")
simulation <- simulater(2e5, 10, 15, 10, n_noise = 10, stn = 1,
funs = list(sin = function(x) sin(x),
exp = function(x) exp(x)))
simulation$formula
|
/data-raw/devstuff.R
|
no_license
|
tkrabel/simulater
|
R
| false
| false
| 339
|
r
|
##
## DEVSTUFF
##
use_testthat()
use_test("simulater.R")
source("R/utils.R")
source("R/simulater.R")
simulation <- simulater(2e5, 10, 15, 10, n_noise = 10, stn = 1,
funs = list(sin = function(x) sin(x),
exp = function(x) exp(x)))
simulation$formula
|
test_that("pack_folder does not use full path", {
dir_ <- tempfile()
dir.create(dir_)
file <- tempfile(tmpdir = dir_)
cat("test", file = file)
pack_folder(dir_, target = "test.zip")
dir_ <- tempfile()
unpack_folder(file = "test.zip", dir_)
expect_equal(list.files(dir_), basename(file))
unlink("test.zip", force = TRUE)
})
test_that("pack_folder behavior", {
dir_ <- tempfile()
dir.create(dir_)
file <- tempfile(tmpdir = dir_)
cat("test", file = file)
expect_error(pack_folder(dir_, target = "dummy_dir/test.zip"))
})
|
/tests/testthat/test-zip.R
|
permissive
|
davidgohel/officer
|
R
| false
| false
| 550
|
r
|
test_that("pack_folder does not use full path", {
dir_ <- tempfile()
dir.create(dir_)
file <- tempfile(tmpdir = dir_)
cat("test", file = file)
pack_folder(dir_, target = "test.zip")
dir_ <- tempfile()
unpack_folder(file = "test.zip", dir_)
expect_equal(list.files(dir_), basename(file))
unlink("test.zip", force = TRUE)
})
test_that("pack_folder behavior", {
dir_ <- tempfile()
dir.create(dir_)
file <- tempfile(tmpdir = dir_)
cat("test", file = file)
expect_error(pack_folder(dir_, target = "dummy_dir/test.zip"))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_coach_report.R
\name{make_coach_report}
\alias{make_coach_report}
\title{Make Coach Report Function}
\usage{
make_coach_report(data, coach, dir = getwd(),
date.current = date.current.report)
}
\description{
This function takes the output of the clean_merge_data function and produces coach reports as .csv files in the working directory.
}
\examples{
make_coach_report()
}
\keyword{coach}
\keyword{engagement}
\keyword{report}
|
/man/make_coach_report.Rd
|
no_license
|
agarcia-r/fitbit
|
R
| false
| true
| 512
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/make_coach_report.R
\name{make_coach_report}
\alias{make_coach_report}
\title{Make Coach Report Function}
\usage{
make_coach_report(data, coach, dir = getwd(),
date.current = date.current.report)
}
\description{
This function takes the output of the clean_merge_data function and produces coach reports as .csv files in the working directory.
}
\examples{
make_coach_report()
}
\keyword{coach}
\keyword{engagement}
\keyword{report}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19955
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19955
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/cycle-sched/cycle_sched_6_8_1.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6515
c no.of clauses 19955
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19955
c
c QBFLIB/Tentrup/cycle-sched/cycle_sched_6_8_1.sat.qdimacs 6515 19955 E1 [] 0 324 6191 19955 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/cycle-sched/cycle_sched_6_8_1.sat/cycle_sched_6_8_1.sat.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 650
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 19955
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 19955
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/cycle-sched/cycle_sched_6_8_1.sat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6515
c no.of clauses 19955
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 19955
c
c QBFLIB/Tentrup/cycle-sched/cycle_sched_6_8_1.sat.qdimacs 6515 19955 E1 [] 0 324 6191 19955 NONE
|
/prueba_pca_mmc.R
|
no_license
|
mmc00/aguila
|
R
| false
| false
| 9,192
|
r
| ||
# Description: changing model to reflect differences in tagging procedures (year added as fixed factor)
# Author: George C Jarvis
# Date: Sat Dec 07 10:24:59 2019
# Notes: I'm going to add year as a fixed factor to the model and code the full model as
# egg count~Treatment*Year*avg.inhab+(1|Trial:Year)
# I have to figure out if the model is running it correctly (i.e. all of the interactions, and also the error df)
#
# 2019.8.12 update: go to "reproduction per week" section
# --------------
rm(list=ls())
library(sciplot)
library(lme4)
library(car)
library(lmerTest)
library(dplyr)
library(ggplot2)
library(MASS)
library(nlme)
library(pwr)
library(HH)#for ancova and plots
library(vegan)
#importing dataset, adding number of gobies on each reef, ordering treatments####
repro<-read.csv("Data/new.data.2019.9.30.csv")
repro<-na.omit(repro) # no NA's to omit
#data manipulation####
#adding column for average density, rounded to nearest whole number of fish
repro$avg.inhab<-(ceiling((repro$Recollection+20)/2))
#adding a column for year (as a proxy for tagging procedure), where trials 1-3 = 2017, and 4-6 = 2018
repro$Year <- ifelse(repro$Trial <=3, 2017, 2018)
#want it as a factor? Going to make a variable with it as a factor, run the model, and see if I get different results
repro$Year.fact<- as.factor(repro$Year)
#modeling####
# including year, and trial nested within year, year as numeric
#mod1<-lmer(Egg.count~Treatment*avg.inhab*Year*(1|Year:Trial), data=repro)
#hist(resid(mod1))
#qqnorm(resid(mod1))
#qqline(resid(mod1))
#anova(mod1, type = "III")
#Anova(mod1)
#summary(mod1) #none of it is sig. can reduce model? also want to make sure I get the same result when year is run as factor
# including year, and trial nested within year, year as factor --> this is the right way to do it, b/c year = tagging method
#mod1.1<-lmer(Egg.count~Treatment*avg.inhab*Year.fact*(1|Year.fact:Trial), data=repro)
#hist(resid(mod1.1))
#qqnorm(resid(mod1.1))
#qqline(resid(mod1.1))
#anova(mod1.1, type = "III")
#Anova(mod1.1)
#summary(mod1.1) #see sig. effect of avg. inhab. (makes sense), year (makes sense), and avg.inhab*year (makes sense)
# SEE PLOT FOR MOD1.1 BELOW TO SEE HOW AVG.INHAB*YEAR AFFECTED REPRODUCTIVE OUTPUT (not super surprising results)
#the interaction makes sense - the number of eggs laid by number of inhabitants changed,
# because the durations of the experiments were much longer. All this interaction is telling us is that the tagging methods
# allowed for greater retention of gobies over time - which is useful information for someone trying to do this exp. again
#this is the full model --> (Egg.count~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial)
#now running without the interaction for the random effect to see how the results change
#I think that this is the best model, it tests the fixed effect of year, plus the random variation in intercept among
# trials within year. I don't think you build the models with interactions with the random variable (i.e. * vs. +)
mod1.1.1<-lmer(Egg.count~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.1.1))
qqnorm(resid(mod1.1.1))
qqline(resid(mod1.1.1))
anova(mod1.1.1)
Anova(mod1.1.1)
summary(mod1.1.1) #see sig. effect of avg. inhab. (makes sense), year (makes sense), and avg.inhab*year (makes sense)
#QUESTION: go with chi-squared test or with ANOVA test? Seems to be that I can do either? Doesn't change the result, just the stats
#reduced model 1 --> removed three-way interaction, nonsignificant
#running reduced model taking out the three-way interaction between treatment*avg.inhab*year
mod1.2<-lmer(Egg.count~(Treatment*avg.inhab)+(Treatment*Year.fact)+(avg.inhab*Year.fact)+
Treatment+avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.2))
qqnorm(resid(mod1.2))
qqline(resid(mod1.2))
anova(mod1.2)
Anova(mod1.2)
summary(mod1.2) #no sig. interaction between treatment*year (2 categorical factors), will remove them as per Mark's suggestion
#further reduced model 1 --> removed nonsignificant interaction between treatment and year
#running a further reduced model without treatment*year interaction, seeing how it affects results
mod1.3<-lmer(Egg.count~(Treatment*avg.inhab)+(avg.inhab*Year.fact)+Treatment+
avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.3))
qqnorm(resid(mod1.3))
qqline(resid(mod1.3))
anova(mod1.3)
Anova(mod1.3)
summary(mod1.3) # same results, will check out a model comparison?
#comparing all possible models
anova(mod1,mod1.1,mod1.1.1,mod1.2,mod1.3) #seems like model 1.3 (most reduced model) is best? Will ask M. Steele
#comparing models that I sent to M. Steele for review (mod1.1.1, 1.2, and 1.3)
anova(mod1.1.1,mod1.2,mod1.3) #seems like model 1.3 (most reduced model) is best? Will ask M. Steele
#plotting####
#ordering "Treatment" and "T.6.comparison"
repro$Treatment.ord<-ordered(repro$Treatment, levels=c("Low","Medium","High"))
repro$T6.comparison.ord<-ordered(repro$T6.comparison, levels=c("Low","Medium","High","Uncaged"))
#grouped by trial
bargraph.CI(x.factor = Treatment.ord, response = Egg.count,
group= Year.fact, legend=TRUE, main="Reproduction by risk and year",
data = repro, ylab="egg count")
bargraph.CI(x.factor = Treatment.ord, response = Egg.count,
group= Trial, legend=TRUE, main="all trials, HR combined grouped by trial",
data = repro)
bargraph.CI(x.factor = avg.inhab, response = Egg.count,
group= Year.fact, legend=TRUE, main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count",
data = repro)
lineplot.CI(avg.inhab,Egg.count,group=Year.fact,legend = TRUE,main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count", data=repro)
# avg.inhab*year stats show that there was a greater effect of avg.inhab on reproductive
# output in 2018 than in 2017. In fact, it doesn't seem like there was much of a difference in output by avg.inhab,
# which suggests that those effects manifest over time (i.e. can't see them in short (weeklong) trials)
#reproduction by week (2019.8.12)####
#converting total output to output per week to better reflect the differences
# in the data based on the duration of the trial
#CALCULATING OUTPUT/WEEK: take the output from my raw data and dividing it
# by the number of weeks that the trial lasted:
#-trials 1-3 = 1 week
#-trials 4-5 = 4 weeks
#-trial 6 = 2 weeks (I emailed M.steele about this on 2019.8.12 to see what he though
# about the difference in trial duration within year, but hopefully this is okay)
#going to try and do this in dplyr with the conditional mutation
# NOTE: I rounded the egg counts to the nearest whole number, b/c non-while eggs didn't
# seem like a great variable
repro<-repro %>%
mutate(egg.week = ifelse(Trial<4, Egg.count/1,
ifelse(Trial == 4| Trial == 5, (ceiling(Egg.count/4)),
ifelse(Trial == 6, (ceiling(Egg.count/2)), NA))))
#View(repro)
#base code for future reference, NOTE, you have to name the df, or else the new variable
# won't show up in the
#df1<-df %>%
# mutate(g = ifelse(a == 2 | a == 5 | a == 7 | (a == 1 & b == 4), 2,
# ifelse(a == 0 | a == 1 | a == 4 | a == 3 | c == 4, 3, NA)))
#now want to set up the new model, including new response ("egg.year") and year
#full model first, jus to check and see if stats hold up
mod2<-lmer(egg.week~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod2))
qqnorm(resid(mod2))
qqline(resid(mod2))
anova(mod2)
Anova(mod2)
summary(mod2) #only see sig. effect of avg.inhab, going to run reduced models
#notes for model reduction:
# 1. remove all nonsignificant (p>0.05) interactions with covariate
# -unless there is an interaction with a higher order interaction involving it.
# -In the case of mod2, there aren't any higher-order interactions
# 2. keep all categorical factors and their interactions (trt., year)
#going to run reduced model, minus three-way interaction
mod2.1<-lmer(egg.week~(Treatment*avg.inhab)+(avg.inhab*Year.fact)+Treatment+
avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod2.1))
qqnorm(resid(mod2.1))
qqline(resid(mod2.1))
anova(mod2.1)
Anova(mod2.1)
summary(mod2.1) # only factor that is sig. is the avg.inhab, will
# reduce model further to reflect fixed effects with only categorical interactions,
# will also include the random effect of trial nested within year
#running the reduced model
mod2.2<-lmer(egg.week~Treatment*Year.fact+avg.inhab+(1|Year.fact:Trial),
data=repro)
hist(resid(mod2.2))
qqnorm(resid(mod2.2))
qqline(resid(mod2.2))
anova(mod2.2)
Anova(mod2.2)
summary(mod2.2) # same results, will check out a model comparison?
#I don't think this is correct, it doesn't include treatment*year interaction
#maybe running this reduced model
#mod2.2<-lmer(egg.week~Treatment+avg.inhab+Year.fact+(1|Year.fact:Trial),
# data=repro)
#hist(resid(mod2.2))
#qqnorm(resid(mod2.2))
#qqline(resid(mod2.2))
#anova(mod2.2)
#Anova(mod2.2)
#summary(mod2.2) # same results, will check out a model comparison?
#comparing new models
anova(mod2,mod2.1,mod2.2) #mod 2.2 seems to be the best mdoel in terms of AIC
#it's interesting that there was no effect of year on reproduction per week
# going to look at that now
#plotting with eggs per week####
#ordering "Treatment" and "T.6.comparison"
repro$Treatment.ord<-ordered(repro$Treatment, levels=c("Low","Medium","High"))
repro$T6.comparison.ord<-ordered(repro$T6.comparison, levels=c("Low","Medium","High","Uncaged"))
bargraph.CI(x.factor = Treatment.ord, response = egg.week,
group= Year.fact, legend=TRUE, main="Reproduction per week between years",
data = repro, ylab="egg count per reef per week")
bargraph.CI(x.factor = Treatment.ord, response = egg.week,
group= Trial, legend=TRUE, main="all trials, HR combined grouped by trial",
data = repro)
bargraph.CI(x.factor = avg.inhab, response = egg.week,
group= Year.fact, legend=TRUE, main="weekly reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count per reef per week",
data = repro)
lineplot.CI(avg.inhab,egg.week,group=Year.fact,legend = TRUE,main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count", data=repro)
#NOTE: re: lineplot, I went back and checked the raw data and saw that there was one
# reef (reef 7 in trial 3) where I recollected 11 gobies (avg.inhab=15.5, which rounds up to 16)
# but there were no eggs laid. That's why there's a 0 value at 16 for 2017
View(repro)
|
/2018.data.for.analyses.R/2018 egg counts/Scripts/2019.7.12.adding.year.factor.R
|
no_license
|
gcjarvis/Goby_reproduction_by_risk
|
R
| false
| false
| 10,771
|
r
|
# Description: changing model to reflect differences in tagging procedures (year added as fixed factor)
# Author: George C Jarvis
# Date: Sat Dec 07 10:24:59 2019
# Notes: I'm going to add year as a fixed factor to the model and code the full model as
# egg count~Treatment*Year*avg.inhab+(1|Trial:Year)
# I have to figure out if the model is running it correctly (i.e. all of the interactions, and also the error df)
#
# 2019.8.12 update: go to "reproduction per week" section
# --------------
rm(list=ls())
library(sciplot)
library(lme4)
library(car)
library(lmerTest)
library(dplyr)
library(ggplot2)
library(MASS)
library(nlme)
library(pwr)
library(HH)#for ancova and plots
library(vegan)
#importing dataset, adding number of gobies on each reef, ordering treatments####
repro<-read.csv("Data/new.data.2019.9.30.csv")
repro<-na.omit(repro) # no NA's to omit
#data manipulation####
#adding column for average density, rounded to nearest whole number of fish
repro$avg.inhab<-(ceiling((repro$Recollection+20)/2))
#adding a column for year (as a proxy for tagging procedure), where trials 1-3 = 2017, and 4-6 = 2018
repro$Year <- ifelse(repro$Trial <=3, 2017, 2018)
#want it as a factor? Going to make a variable with it as a factor, run the model, and see if I get different results
repro$Year.fact<- as.factor(repro$Year)
#modeling####
# including year, and trial nested within year, year as numeric
#mod1<-lmer(Egg.count~Treatment*avg.inhab*Year*(1|Year:Trial), data=repro)
#hist(resid(mod1))
#qqnorm(resid(mod1))
#qqline(resid(mod1))
#anova(mod1, type = "III")
#Anova(mod1)
#summary(mod1) #none of it is sig. can reduce model? also want to make sure I get the same result when year is run as factor
# including year, and trial nested within year, year as factor --> this is the right way to do it, b/c year = tagging method
#mod1.1<-lmer(Egg.count~Treatment*avg.inhab*Year.fact*(1|Year.fact:Trial), data=repro)
#hist(resid(mod1.1))
#qqnorm(resid(mod1.1))
#qqline(resid(mod1.1))
#anova(mod1.1, type = "III")
#Anova(mod1.1)
#summary(mod1.1) #see sig. effect of avg. inhab. (makes sense), year (makes sense), and avg.inhab*year (makes sense)
# SEE PLOT FOR MOD1.1 BELOW TO SEE HOW AVG.INHAB*YEAR AFFECTED REPRODUCTIVE OUTPUT (not super surprising results)
#the interaction makes sense - the number of eggs laid by number of inhabitants changed,
# because the durations of the experiments were much longer. All this interaction is telling us is that the tagging methods
# allowed for greater retention of gobies over time - which is useful information for someone trying to do this exp. again
#this is the full model --> (Egg.count~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial)
#now running without the interaction for the random effect to see how the results change
#I think that this is the best model, it tests the fixed effect of year, plus the random variation in intercept among
# trials within year. I don't think you build the models with interactions with the random variable (i.e. * vs. +)
mod1.1.1<-lmer(Egg.count~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.1.1))
qqnorm(resid(mod1.1.1))
qqline(resid(mod1.1.1))
anova(mod1.1.1)
Anova(mod1.1.1)
summary(mod1.1.1) #see sig. effect of avg. inhab. (makes sense), year (makes sense), and avg.inhab*year (makes sense)
#QUESTION: go with chi-squared test or with ANOVA test? Seems to be that I can do either? Doesn't change the result, just the stats
#reduced model 1 --> removed three-way interaction, nonsignificant
#running reduced model taking out the three-way interaction between treatment*avg.inhab*year
mod1.2<-lmer(Egg.count~(Treatment*avg.inhab)+(Treatment*Year.fact)+(avg.inhab*Year.fact)+
Treatment+avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.2))
qqnorm(resid(mod1.2))
qqline(resid(mod1.2))
anova(mod1.2)
Anova(mod1.2)
summary(mod1.2) #no sig. interaction between treatment*year (2 categorical factors), will remove them as per Mark's suggestion
#further reduced model 1 --> removed nonsignificant interaction between treatment and year
#running a further reduced model without treatment*year interaction, seeing how it affects results
mod1.3<-lmer(Egg.count~(Treatment*avg.inhab)+(avg.inhab*Year.fact)+Treatment+
avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod1.3))
qqnorm(resid(mod1.3))
qqline(resid(mod1.3))
anova(mod1.3)
Anova(mod1.3)
summary(mod1.3) # same results, will check out a model comparison?
#comparing all possible models
anova(mod1,mod1.1,mod1.1.1,mod1.2,mod1.3) #seems like model 1.3 (most reduced model) is best? Will ask M. Steele
#comparing models that I sent to M. Steele for review (mod1.1.1, 1.2, and 1.3)
anova(mod1.1.1,mod1.2,mod1.3) #seems like model 1.3 (most reduced model) is best? Will ask M. Steele
#plotting####
#ordering "Treatment" and "T.6.comparison"
repro$Treatment.ord<-ordered(repro$Treatment, levels=c("Low","Medium","High"))
repro$T6.comparison.ord<-ordered(repro$T6.comparison, levels=c("Low","Medium","High","Uncaged"))
#grouped by trial
bargraph.CI(x.factor = Treatment.ord, response = Egg.count,
group= Year.fact, legend=TRUE, main="Reproduction by risk and year",
data = repro, ylab="egg count")
bargraph.CI(x.factor = Treatment.ord, response = Egg.count,
group= Trial, legend=TRUE, main="all trials, HR combined grouped by trial",
data = repro)
bargraph.CI(x.factor = avg.inhab, response = Egg.count,
group= Year.fact, legend=TRUE, main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count",
data = repro)
lineplot.CI(avg.inhab,Egg.count,group=Year.fact,legend = TRUE,main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count", data=repro)
# avg.inhab*year stats show that there was a greater effect of avg.inhab on reproductive
# output in 2018 than in 2017. In fact, it doesn't seem like there was much of a difference in output by avg.inhab,
# which suggests that those effects manifest over time (i.e. can't see them in short (weeklong) trials)
#reproduction by week (2019.8.12)####
#converting total output to output per week to better reflect the differences
# in the data based on the duration of the trial
#CALCULATING OUTPUT/WEEK: take the output from my raw data and dividing it
# by the number of weeks that the trial lasted:
#-trials 1-3 = 1 week
#-trials 4-5 = 4 weeks
#-trial 6 = 2 weeks (I emailed M.steele about this on 2019.8.12 to see what he though
# about the difference in trial duration within year, but hopefully this is okay)
#going to try and do this in dplyr with the conditional mutation
# NOTE: I rounded the egg counts to the nearest whole number, b/c non-while eggs didn't
# seem like a great variable
repro<-repro %>%
mutate(egg.week = ifelse(Trial<4, Egg.count/1,
ifelse(Trial == 4| Trial == 5, (ceiling(Egg.count/4)),
ifelse(Trial == 6, (ceiling(Egg.count/2)), NA))))
#View(repro)
#base code for future reference, NOTE, you have to name the df, or else the new variable
# won't show up in the
#df1<-df %>%
# mutate(g = ifelse(a == 2 | a == 5 | a == 7 | (a == 1 & b == 4), 2,
# ifelse(a == 0 | a == 1 | a == 4 | a == 3 | c == 4, 3, NA)))
#now want to set up the new model, including new response ("egg.year") and year
#full model first, jus to check and see if stats hold up
mod2<-lmer(egg.week~Treatment*avg.inhab*Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod2))
qqnorm(resid(mod2))
qqline(resid(mod2))
anova(mod2)
Anova(mod2)
summary(mod2) #only see sig. effect of avg.inhab, going to run reduced models
#notes for model reduction:
# 1. remove all nonsignificant (p>0.05) interactions with covariate
# -unless there is an interaction with a higher order interaction involving it.
# -In the case of mod2, there aren't any higher-order interactions
# 2. keep all categorical factors and their interactions (trt., year)
#going to run reduced model, minus three-way interaction
mod2.1<-lmer(egg.week~(Treatment*avg.inhab)+(avg.inhab*Year.fact)+Treatment+
avg.inhab+Year.fact+(1|Year.fact:Trial), data=repro)
hist(resid(mod2.1))
qqnorm(resid(mod2.1))
qqline(resid(mod2.1))
anova(mod2.1)
Anova(mod2.1)
summary(mod2.1) # only factor that is sig. is the avg.inhab, will
# reduce model further to reflect fixed effects with only categorical interactions,
# will also include the random effect of trial nested within year
#running the reduced model
mod2.2<-lmer(egg.week~Treatment*Year.fact+avg.inhab+(1|Year.fact:Trial),
data=repro)
hist(resid(mod2.2))
qqnorm(resid(mod2.2))
qqline(resid(mod2.2))
anova(mod2.2)
Anova(mod2.2)
summary(mod2.2) # same results, will check out a model comparison?
#I don't think this is correct, it doesn't include treatment*year interaction
#maybe running this reduced model
#mod2.2<-lmer(egg.week~Treatment+avg.inhab+Year.fact+(1|Year.fact:Trial),
# data=repro)
#hist(resid(mod2.2))
#qqnorm(resid(mod2.2))
#qqline(resid(mod2.2))
#anova(mod2.2)
#Anova(mod2.2)
#summary(mod2.2) # same results, will check out a model comparison?
#comparing new models
anova(mod2,mod2.1,mod2.2) #mod 2.2 seems to be the best mdoel in terms of AIC
#it's interesting that there was no effect of year on reproduction per week
# going to look at that now
#plotting with eggs per week####
#ordering "Treatment" and "T.6.comparison"
repro$Treatment.ord<-ordered(repro$Treatment, levels=c("Low","Medium","High"))
repro$T6.comparison.ord<-ordered(repro$T6.comparison, levels=c("Low","Medium","High","Uncaged"))
bargraph.CI(x.factor = Treatment.ord, response = egg.week,
group= Year.fact, legend=TRUE, main="Reproduction per week between years",
data = repro, ylab="egg count per reef per week")
bargraph.CI(x.factor = Treatment.ord, response = egg.week,
group= Trial, legend=TRUE, main="all trials, HR combined grouped by trial",
data = repro)
bargraph.CI(x.factor = avg.inhab, response = egg.week,
group= Year.fact, legend=TRUE, main="weekly reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count per reef per week",
data = repro)
lineplot.CI(avg.inhab,egg.week,group=Year.fact,legend = TRUE,main="reproduction by number of gobies",
xlab="avg.inhab", ylab="egg count", data=repro)
#NOTE: re: lineplot, I went back and checked the raw data and saw that there was one
# reef (reef 7 in trial 3) where I recollected 11 gobies (avg.inhab=15.5, which rounds up to 16)
# but there were no eggs laid. That's why there's a 0 value at 16 for 2017
View(repro)
|
test_that("survey_count requires sid and season", {
data("wastd_data")
expect_error(wastd_data$surveys %>% survey_count())
expect_error(wastd_data$surveys %>% survey_count(1L))
})
test_that("survey_count filters surveys to site_id and season", {
data("wastd_data")
one_season <- unique(wastd_data$surveys$season)[1]
one_site_id <- unique(wastd_data$surveys$site_id)[1]
manual_result <- wastd_data$surveys %>%
dplyr::filter(site_id == one_site_id, season == one_season) %>%
nrow()
calculated_result <- survey_count(
wastd_data$surveys,
one_site_id,
one_season
)
expect_equal(calculated_result, manual_result)
})
test_that("survey_ground_covered works", {
data("wastd_data")
one_site_id <- unique(wastd_data$surveys$site_id)[1]
one_season <- unique(wastd_data$surveys$season)[1]
beach_kms <- 2
manual_result <- wastd_data$surveys %>%
dplyr::filter(site_id == one_site_id, season == one_season) %>%
nrow() * beach_kms
calculated_result <- survey_ground_covered(
wastd_data$surveys,
one_site_id,
beach_kms,
one_season
)
expect_equal(calculated_result, manual_result)
})
test_that("surveys_per_site_name_and_date returns a tibble", {
data("wastd_data")
x <- surveys_per_site_name_and_date(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(names(x), c("season", "turtle_date", "site_name", "n"))
})
test_that("survey_hours_per_site_name_and_date returns a tibble", {
data("wastd_data")
x <- survey_hours_per_site_name_and_date(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c("season", "turtle_date", "site_name", "hours_surveyed")
)
})
test_that("survey_hours_per_person returns a tibble", {
data("wastd_data")
x <- survey_hours_per_person(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(names(x), c("season", "reporter", "hours_surveyed"))
})
test_that("list_survey_count returns a reactable", {
data("wastd_data")
x <- list_survey_count(wastd_data$surveys)
expect_equal(class(x), c("reactable", "htmlwidget"))
})
test_that("list_survey_effort returns a reactable", {
data("wastd_data")
x <- list_survey_effort(wastd_data$surveys)
expect_equal(class(x), c("reactable", "htmlwidget"))
})
test_that("plot_survey_count returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
suppressWarnings(
x <- plot_survey_count(wastd_data$surveys)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_count_place.png")))
suppressWarnings(
x <- plot_survey_count(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_count_place.png")))
})
test_that("plot_survey_effort returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
suppressWarnings(
x <- plot_survey_effort(wastd_data$surveys)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_effort_place.png")))
suppressWarnings(
x <- plot_survey_effort(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_effort_place.png")))
})
test_that("survey_hours_heatmap returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
x <- survey_hours_heatmap(wastd_data$surveys)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_hours_heatmap_place.png")))
x <- survey_hours_heatmap(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_hours_heatmap_place.png")))
})
test_that("survey_count_heatmap returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
x <- survey_count_heatmap(wastd_data$surveys)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_count_heatmap_place.png")))
x <- survey_count_heatmap(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_count_heatmap_place.png")))
})
test_that("survey_season_stats returns a tibble", {
data("wastd_data")
x <- survey_season_stats(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_false(is.na(x$season))
expect_false(is.na(x$first_day))
expect_false(is.na(x$last_day))
expect_false(is.na(x$season_length_days))
expect_equal(class(x$season), "numeric")
expect_equal(class(x$first_day), c("POSIXct", "POSIXt"))
expect_equal(class(x$last_day), c("POSIXct", "POSIXt"))
expect_equal(class(x$season_length_days), "numeric")
expect_equal(
names(x),
c(
"season",
"first_day",
"last_day",
"season_length_days",
"number_surveys",
"hours_surveyed"
)
)
})
test_that("survey_season_site_stats returns a tibble", {
data("wastd_data")
x <- survey_season_site_stats(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"season",
"site_name",
"first_day",
"last_day",
"season_length_days",
"number_surveys",
"hours_surveyed"
)
)
})
test_that("survey_show_detail returns a tibble", {
data("wastd_data")
x <- survey_show_detail(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"change_url",
"site_name",
"season",
"turtle_date",
"calendar_date_awst",
"is_production",
"start_time",
"end_time",
"duration_hours",
"start_comments",
"end_comments",
"status"
)
)
})
test_that("duplicate_surveys returns a tibble", {
data("wastd_data")
x <- duplicate_surveys(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"season",
"calendar_date_awst",
"site_name",
"site_id",
"n",
"wastd"
)
)
})
# usethis::use_r("summarise_surveys")
|
/tests/testthat/test-summarise_surveys.R
|
no_license
|
dbca-wa/wastdr
|
R
| false
| false
| 6,501
|
r
|
test_that("survey_count requires sid and season", {
data("wastd_data")
expect_error(wastd_data$surveys %>% survey_count())
expect_error(wastd_data$surveys %>% survey_count(1L))
})
test_that("survey_count filters surveys to site_id and season", {
data("wastd_data")
one_season <- unique(wastd_data$surveys$season)[1]
one_site_id <- unique(wastd_data$surveys$site_id)[1]
manual_result <- wastd_data$surveys %>%
dplyr::filter(site_id == one_site_id, season == one_season) %>%
nrow()
calculated_result <- survey_count(
wastd_data$surveys,
one_site_id,
one_season
)
expect_equal(calculated_result, manual_result)
})
test_that("survey_ground_covered works", {
data("wastd_data")
one_site_id <- unique(wastd_data$surveys$site_id)[1]
one_season <- unique(wastd_data$surveys$season)[1]
beach_kms <- 2
manual_result <- wastd_data$surveys %>%
dplyr::filter(site_id == one_site_id, season == one_season) %>%
nrow() * beach_kms
calculated_result <- survey_ground_covered(
wastd_data$surveys,
one_site_id,
beach_kms,
one_season
)
expect_equal(calculated_result, manual_result)
})
test_that("surveys_per_site_name_and_date returns a tibble", {
data("wastd_data")
x <- surveys_per_site_name_and_date(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(names(x), c("season", "turtle_date", "site_name", "n"))
})
test_that("survey_hours_per_site_name_and_date returns a tibble", {
data("wastd_data")
x <- survey_hours_per_site_name_and_date(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c("season", "turtle_date", "site_name", "hours_surveyed")
)
})
test_that("survey_hours_per_person returns a tibble", {
data("wastd_data")
x <- survey_hours_per_person(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(names(x), c("season", "reporter", "hours_surveyed"))
})
test_that("list_survey_count returns a reactable", {
data("wastd_data")
x <- list_survey_count(wastd_data$surveys)
expect_equal(class(x), c("reactable", "htmlwidget"))
})
test_that("list_survey_effort returns a reactable", {
data("wastd_data")
x <- list_survey_effort(wastd_data$surveys)
expect_equal(class(x), c("reactable", "htmlwidget"))
})
test_that("plot_survey_count returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
suppressWarnings(
x <- plot_survey_count(wastd_data$surveys)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_count_place.png")))
suppressWarnings(
x <- plot_survey_count(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_count_place.png")))
})
test_that("plot_survey_effort returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
suppressWarnings(
x <- plot_survey_effort(wastd_data$surveys)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_effort_place.png")))
suppressWarnings(
x <- plot_survey_effort(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_effort_place.png")))
})
test_that("survey_hours_heatmap returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
x <- survey_hours_heatmap(wastd_data$surveys)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_hours_heatmap_place.png")))
x <- survey_hours_heatmap(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_hours_heatmap_place.png")))
})
test_that("survey_count_heatmap returns a ggplot", {
data("wastd_data")
t <- tempdir()
fs::dir_ls(t) %>% fs::file_delete()
x <- survey_count_heatmap(wastd_data$surveys)
expect_equal(class(x), c("gg", "ggplot"))
expect_false(fs::file_exists(fs::path(t, "TEST_survey_count_heatmap_place.png")))
x <- survey_count_heatmap(wastd_data$surveys,
export = TRUE, local_dir = t,
prefix = "TEST", placename = "PLACE"
)
expect_equal(class(x), c("gg", "ggplot"))
expect_true(fs::file_exists(fs::path(t, "TEST_survey_count_heatmap_place.png")))
})
test_that("survey_season_stats returns a tibble", {
data("wastd_data")
x <- survey_season_stats(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_false(is.na(x$season))
expect_false(is.na(x$first_day))
expect_false(is.na(x$last_day))
expect_false(is.na(x$season_length_days))
expect_equal(class(x$season), "numeric")
expect_equal(class(x$first_day), c("POSIXct", "POSIXt"))
expect_equal(class(x$last_day), c("POSIXct", "POSIXt"))
expect_equal(class(x$season_length_days), "numeric")
expect_equal(
names(x),
c(
"season",
"first_day",
"last_day",
"season_length_days",
"number_surveys",
"hours_surveyed"
)
)
})
test_that("survey_season_site_stats returns a tibble", {
data("wastd_data")
x <- survey_season_site_stats(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"season",
"site_name",
"first_day",
"last_day",
"season_length_days",
"number_surveys",
"hours_surveyed"
)
)
})
test_that("survey_show_detail returns a tibble", {
data("wastd_data")
x <- survey_show_detail(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"change_url",
"site_name",
"season",
"turtle_date",
"calendar_date_awst",
"is_production",
"start_time",
"end_time",
"duration_hours",
"start_comments",
"end_comments",
"status"
)
)
})
test_that("duplicate_surveys returns a tibble", {
data("wastd_data")
x <- duplicate_surveys(wastd_data$surveys)
expect_true(tibble::is_tibble(x))
expect_equal(
names(x),
c(
"season",
"calendar_date_awst",
"site_name",
"site_id",
"n",
"wastd"
)
)
})
# usethis::use_r("summarise_surveys")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/powerstress.R
\name{enorm}
\alias{enorm}
\title{Explicit Normalization
Normalizes distances}
\usage{
enorm(x, w = 1)
}
\arguments{
\item{x}{numeric matrix}
\item{w}{weight}
}
\value{
a constant
}
\description{
Explicit Normalization
Normalizes distances
}
|
/man/enorm.Rd
|
no_license
|
cran/cops
|
R
| false
| true
| 335
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/powerstress.R
\name{enorm}
\alias{enorm}
\title{Explicit Normalization
Normalizes distances}
\usage{
enorm(x, w = 1)
}
\arguments{
\item{x}{numeric matrix}
\item{w}{weight}
}
\value{
a constant
}
\description{
Explicit Normalization
Normalizes distances
}
|
\name{summ_matches}
\alias{summ_matches}
\title{
Computes table of absolute standardized differences
}
\description{
Computes absolute standardized differences for both
continuous and binary variables. Called by \code{\link{opt_nearfar}} to
summarize results of near-far match.
}
\usage{
summ_matches(dta, iv, covs, match)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dta}{The name of the data frame on which matching was performed}
\item{iv}{The name of the instrumental variable, e.g., iv="QOB"}
\item{covs}{A vector of the names of the covariates to make ``near'',
e.g., covs=c("age", "sex", "race")}
\item{match}{A two-column matrix of row indices of paired matches}
}
\value{
A table of mean variable values for both the ``encouraged'' and
``discouraged'' groups across all variables plus absolute standardized
differences for each variable
}
\author{Joseph Rigdon \email{jrigdon@stanford.edu}}
\seealso{\code{\link{opt_nearfar}}}
\examples{
k2 = matches(dta=mtcars, covs=c("cyl", "disp"), sinks=0.2, iv="carb",
cutpoint=2, imp.var=c("cyl"), tol.var=0.03)
summ_matches(dta=mtcars, iv="carb", covs=c("cyl", "disp"), match=k2)
}
|
/man/summ_matches.Rd
|
no_license
|
cran/nearfar
|
R
| false
| false
| 1,184
|
rd
|
\name{summ_matches}
\alias{summ_matches}
\title{
Computes table of absolute standardized differences
}
\description{
Computes absolute standardized differences for both
continuous and binary variables. Called by \code{\link{opt_nearfar}} to
summarize results of near-far match.
}
\usage{
summ_matches(dta, iv, covs, match)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dta}{The name of the data frame on which matching was performed}
\item{iv}{The name of the instrumental variable, e.g., iv="QOB"}
\item{covs}{A vector of the names of the covariates to make ``near'',
e.g., covs=c("age", "sex", "race")}
\item{match}{A two-column matrix of row indices of paired matches}
}
\value{
A table of mean variable values for both the ``encouraged'' and
``discouraged'' groups across all variables plus absolute standardized
differences for each variable
}
\author{Joseph Rigdon \email{jrigdon@stanford.edu}}
\seealso{\code{\link{opt_nearfar}}}
\examples{
k2 = matches(dta=mtcars, covs=c("cyl", "disp"), sinks=0.2, iv="carb",
cutpoint=2, imp.var=c("cyl"), tol.var=0.03)
summ_matches(dta=mtcars, iv="carb", covs=c("cyl", "disp"), match=k2)
}
|
getwd()
setwd("C:\\Users\\welcome\\Documents\\ExcelR\\Day 05 Basic Stat _ R\\Data Sets\\") # set a working directory of your choice
victims <- readLines("C:\\Users\\welcome\\Documents\\ExcelR\\Day 05 Basic Stat _ R\\Data Sets\\victims.txt")
victims
?as
df <- as.data.frame (victims)
df
class(df)
?length
length(df$victims)
nrow(df)
ncol(df)
dim(df)
str(df)
#https://rstudio.com/wp-content/uploads/2016/09/RegExCheatsheet.pdf
#https://cran.r-project.org/web/packages/stringr/vignettes/regular-expressions.html
?grepl
comments <- grepl("^%", victims)
comments
comments <- grepl("\\d+", victims)
comments
text <- victims[!comments]
text
victims
comments_grepl <- grepl("^%", victims)
comments_grepl
comments_grep <- grep ("^%", victims)
comments_grep
text_grep <- victims[!comments_grep]
text_grep
text[1]
x <- text[1]
x
y <- sub ("[[:digit:]]", "", x) # (pattern, replacement, x)
y
text[1]
x <- text[1]
y <- gsub ("\\d+", "", x)
y
text[9]
r <- regexpr("9", text[9])
r
r <- gregexpr("9", text[9])
r
text
splitlines <- strsplit(text, split = ",")
splitlines
Lines <- matrix (unlist(splitlines), nrow=length(splitlines), byrow = TRUE)
Lines
colnames(Lines) <- c("Name", "BirthYear", "DeathYear")
#in a data frame should be treated as factor variables or as just plain strings
titanic_victims <- as.data.frame(Lines, stringsAsFactors = FALSE)
titanic_victims
class(titanic_victims$BirthYear)
titanic_victims <- as.data.frame(Lines, stringsAsFactors = TRUE)
titanic_victims$DeathYear
class(titanic_victims$BirthYear)
titanic_victims$BirthYear <- as.numeric(titanic_victims$BirthYear)
titanic_victims$BirthYear
titanic_victims <- transform (titanic_victims, BirthYear = as.numeric(BirthYear),
DeathYear = as.numeric(DeathYear))
class(titanic_victims$BirthYear)
class(titanic_victims$DeathYear)
titanic_victims
str(titanic_victims)
mean(titanic_victims$BirthYear)
round(mean(titanic_victims$BirthYear))
#=============================================================================
install.packages("lattice")
library(lattice)
data(barley)
View(barley)
dim(barley)
str(barley)
?lapply
lapply(barley, function(x) length(unique(x)))
sapply(barley, function(x) length(unique(x)))
apply(barley, 2,function(x) length(unique(x)))
?apply
levels(as.factor(barley$site))
tapply (barley$yield, barley$site, sum)
tapply (barley$yield, barley$site, mean)
|
/R code 2.R
|
no_license
|
vaitybharati/R_basics_calc-2
|
R
| false
| false
| 2,568
|
r
|
getwd()
setwd("C:\\Users\\welcome\\Documents\\ExcelR\\Day 05 Basic Stat _ R\\Data Sets\\") # set a working directory of your choice
victims <- readLines("C:\\Users\\welcome\\Documents\\ExcelR\\Day 05 Basic Stat _ R\\Data Sets\\victims.txt")
victims
?as
df <- as.data.frame (victims)
df
class(df)
?length
length(df$victims)
nrow(df)
ncol(df)
dim(df)
str(df)
#https://rstudio.com/wp-content/uploads/2016/09/RegExCheatsheet.pdf
#https://cran.r-project.org/web/packages/stringr/vignettes/regular-expressions.html
?grepl
comments <- grepl("^%", victims)
comments
comments <- grepl("\\d+", victims)
comments
text <- victims[!comments]
text
victims
comments_grepl <- grepl("^%", victims)
comments_grepl
comments_grep <- grep ("^%", victims)
comments_grep
text_grep <- victims[!comments_grep]
text_grep
text[1]
x <- text[1]
x
y <- sub ("[[:digit:]]", "", x) # (pattern, replacement, x)
y
text[1]
x <- text[1]
y <- gsub ("\\d+", "", x)
y
text[9]
r <- regexpr("9", text[9])
r
r <- gregexpr("9", text[9])
r
text
splitlines <- strsplit(text, split = ",")
splitlines
Lines <- matrix (unlist(splitlines), nrow=length(splitlines), byrow = TRUE)
Lines
colnames(Lines) <- c("Name", "BirthYear", "DeathYear")
#in a data frame should be treated as factor variables or as just plain strings
titanic_victims <- as.data.frame(Lines, stringsAsFactors = FALSE)
titanic_victims
class(titanic_victims$BirthYear)
titanic_victims <- as.data.frame(Lines, stringsAsFactors = TRUE)
titanic_victims$DeathYear
class(titanic_victims$BirthYear)
titanic_victims$BirthYear <- as.numeric(titanic_victims$BirthYear)
titanic_victims$BirthYear
titanic_victims <- transform (titanic_victims, BirthYear = as.numeric(BirthYear),
DeathYear = as.numeric(DeathYear))
class(titanic_victims$BirthYear)
class(titanic_victims$DeathYear)
titanic_victims
str(titanic_victims)
mean(titanic_victims$BirthYear)
round(mean(titanic_victims$BirthYear))
#=============================================================================
install.packages("lattice")
library(lattice)
data(barley)
View(barley)
dim(barley)
str(barley)
?lapply
lapply(barley, function(x) length(unique(x)))
sapply(barley, function(x) length(unique(x)))
apply(barley, 2,function(x) length(unique(x)))
?apply
levels(as.factor(barley$site))
tapply (barley$yield, barley$site, sum)
tapply (barley$yield, barley$site, mean)
|
# C2W3assignment
## The makeCacheAtrix function and the cachesolve function can only work by pair
## functions do
## this function aims at preparing the cacheSolve function to work.
## It is composed of 4 functions that can be called by name like 'x$getinverse'
# the set function is to reset the concerned matrix without recalling the function
# the get function is to return the concerned matrix
# the setinverse function will be use later to enter the inverse matrix once calculated (to cache it)
# the getinverse is to read the actual inverse if it has been calculated
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
#once you call the matrix you don't know its inverse as you didn't calculated it
set <- function(y) {
x <<- y
i <<- NULL
#of course if you reset the matrix, its inverse is not the same anymore
# the <<- sign is to assign the value outside the environment of this subfunction
# to the parent environment which is the makeCachematrix function.
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
#this writing maybe strange but the 2 objects at the different sides of the = sign are not the same :
# name = function in order to call the function with $
}
## this function can only work with a matrix set by the makeCacheMatrix function.
#It will firs check if the inverse has already been calculated
#if yes it will return the value got from the makeCacheMatrix
#otherwise it will calculate the inverse, return it to the cache and print it
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
#first the cachesolve function check wether the inverse is not in the cache memory already
message("getting cached data")
return(i)
#here the inverse is already on the cache (in makeCacheMAtric function)
}
data <- x$get()
i <- solve(data, ...)
#at this step, as i was null, the Solve calculation has to be done
x$setinverse(i)
#the inverse is return to the makeCacheMatrix function :
# the inverse is in the cache
i
#the value of the inverse is printed in the console
}
|
/cachematrix.R
|
no_license
|
smx87/C2W3assignment
|
R
| false
| false
| 2,236
|
r
|
# C2W3assignment
## The makeCacheAtrix function and the cachesolve function can only work by pair
## functions do
## this function aims at preparing the cacheSolve function to work.
## It is composed of 4 functions that can be called by name like 'x$getinverse'
# the set function is to reset the concerned matrix without recalling the function
# the get function is to return the concerned matrix
# the setinverse function will be use later to enter the inverse matrix once calculated (to cache it)
# the getinverse is to read the actual inverse if it has been calculated
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
#once you call the matrix you don't know its inverse as you didn't calculated it
set <- function(y) {
x <<- y
i <<- NULL
#of course if you reset the matrix, its inverse is not the same anymore
# the <<- sign is to assign the value outside the environment of this subfunction
# to the parent environment which is the makeCachematrix function.
}
get <- function() x
setinverse <- function(inverse) i <<- inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
#this writing maybe strange but the 2 objects at the different sides of the = sign are not the same :
# name = function in order to call the function with $
}
## this function can only work with a matrix set by the makeCacheMatrix function.
#It will firs check if the inverse has already been calculated
#if yes it will return the value got from the makeCacheMatrix
#otherwise it will calculate the inverse, return it to the cache and print it
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
#first the cachesolve function check wether the inverse is not in the cache memory already
message("getting cached data")
return(i)
#here the inverse is already on the cache (in makeCacheMAtric function)
}
data <- x$get()
i <- solve(data, ...)
#at this step, as i was null, the Solve calculation has to be done
x$setinverse(i)
#the inverse is return to the makeCacheMatrix function :
# the inverse is in the cache
i
#the value of the inverse is printed in the console
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapreduce_spark.R
\name{sparkControl}
\alias{sparkControl}
\title{Specify Control Parameters for Spark Job}
\usage{
sparkControl()
}
\description{
Specify control parameters for a Spark job. See \code{rhwatch} for details about each of the parameters.
}
|
/man/sparkControl.Rd
|
permissive
|
lhsego/datadr
|
R
| false
| true
| 334
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mapreduce_spark.R
\name{sparkControl}
\alias{sparkControl}
\title{Specify Control Parameters for Spark Job}
\usage{
sparkControl()
}
\description{
Specify control parameters for a Spark job. See \code{rhwatch} for details about each of the parameters.
}
|
subroutine inddup(x,y,n,rw,frac,dup)
implicit double precision(a-h,o-z)
logical dup(n)
dimension x(n), y(n), rw(4)
xtol = frac*(rw(2)-rw(1))
ytol = frac*(rw(4)-rw(3))
dup(1) = .false.
do i = 2,n {
dup(i) = .false.
do j = 1,i-1 {
dx = abs(x(i)-x(j))
dy = abs(y(i)-y(j))
if(dx < xtol & dy < ytol) {
dup(i) = .true.
break
}
}
}
return
end
|
/deldir/code.discarded/inddup.r
|
permissive
|
solgenomics/R_libs
|
R
| false
| false
| 356
|
r
|
subroutine inddup(x,y,n,rw,frac,dup)
implicit double precision(a-h,o-z)
logical dup(n)
dimension x(n), y(n), rw(4)
xtol = frac*(rw(2)-rw(1))
ytol = frac*(rw(4)-rw(3))
dup(1) = .false.
do i = 2,n {
dup(i) = .false.
do j = 1,i-1 {
dx = abs(x(i)-x(j))
dy = abs(y(i)-y(j))
if(dx < xtol & dy < ytol) {
dup(i) = .true.
break
}
}
}
return
end
|
# Of the four types of sources indicated by the type variable, which of these four
# sources have seen decreases in emissions from 1999-2008 for Baltimore City?
# read in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# load packages
library(tidyverse)
# convert as factor
NEI$type <- as.factor(NEI$type)
# filter data
MD_emissions2 <- NEI %>%
select(fips, Emissions, year, type) %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
summarize(total = sum(Emissions))
# create PNG file
png('plot3.png')
# plot
ggplot(MD_emissions2, aes(year, total, color = type)) +
geom_point(size = 2, alpha = 0.5) +
geom_smooth() +
labs(title = "Emissions by Source: 1999 - 2008",
x = "Year", y = "Emissions (by tons)") +
theme_bw()
dev.off()
|
/plot3.R
|
no_license
|
byanuaria/Exploratory-Data-Analysis2
|
R
| false
| false
| 842
|
r
|
# Of the four types of sources indicated by the type variable, which of these four
# sources have seen decreases in emissions from 1999-2008 for Baltimore City?
# read in data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# load packages
library(tidyverse)
# convert as factor
NEI$type <- as.factor(NEI$type)
# filter data
MD_emissions2 <- NEI %>%
select(fips, Emissions, year, type) %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
summarize(total = sum(Emissions))
# create PNG file
png('plot3.png')
# plot
ggplot(MD_emissions2, aes(year, total, color = type)) +
geom_point(size = 2, alpha = 0.5) +
geom_smooth() +
labs(title = "Emissions by Source: 1999 - 2008",
x = "Year", y = "Emissions (by tons)") +
theme_bw()
dev.off()
|
library(circlize)
library(ComplexHeatmap)
library(GetoptLong)
set.seed(123)
nr1 = 10; nr2 = 8; nr3 = 6
nc1 = 6; nc2 = 8; nc3 = 10
mat = cbind(rbind(matrix(rnorm(nr1*nc1, mean = 1, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc1, mean = 0, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc1, mean = 0, sd = 0.5), nr = nr3)),
rbind(matrix(rnorm(nr1*nc2, mean = 0, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc2, mean = 1, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc2, mean = 0, sd = 0.5), nr = nr3)),
rbind(matrix(rnorm(nr1*nc3, mean = 0.5, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc3, mean = 0.5, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc3, mean = 1, sd = 0.5), nr = nr3))
)
rownames(mat) = paste0("row", seq_len(nrow(mat)))
colnames(mat) = paste0("column", seq_len(nrow(mat)))
ht = Heatmap(mat)
draw(ht, test = TRUE)
ht
ht = Heatmap(mat, col = colorRamp2(c(-3, 0, 3), c("green", "white", "red")))
draw(ht, test = TRUE)
ht = Heatmap(mat, name = "test")
draw(ht, test = TRUE)
ht = Heatmap(mat, rect_gp = gpar(col = "black"))
draw(ht, test = TRUE)
ht = Heatmap(mat, border = "red")
draw(ht, test = TRUE)
######## test title ##########
ht = Heatmap(mat, row_title = "blablabla")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_side = "right")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_gp = gpar(fontsize = 20, font = 2))
draw(ht, test = TRUE)
# ht = Heatmap(mat, row_title = "blablabla", row_title_rot = 45)
# draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_gp = gpar(fill = "red", col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_side = "bottom")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_gp = gpar(fontsize = 20, font = 2))
draw(ht, test = TRUE)
# ht = Heatmap(mat, column_title = "blablabla", column_title_rot = 45)
# draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_rot = 90)
draw(ht, test = TRUE)
### test clustering ####
ht = Heatmap(mat, cluster_rows = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = "pearson")
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = function(x) dist(x))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = function(x, y) 1 - cor(x, y))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_method_rows = "single")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_side = "right")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_width = unit(4, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_gp = gpar(lwd = 2, col = "red"))
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(mat)))
ht = Heatmap(mat, cluster_rows = dend)
draw(ht, test = TRUE)
library(dendextend)
dend = color_branches(dend, k = 3)
ht = Heatmap(mat, cluster_rows = dend)
draw(ht, test = TRUE)
ht = Heatmap(mat, cluster_columns = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = "pearson")
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = function(x) dist(x))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = function(x, y) 1 - cor(x, y))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_method_columns = "single")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_side = "bottom")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_height = unit(4, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_gp = gpar(lwd = 2, col = "red"))
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(t(mat))))
ht = Heatmap(mat, cluster_columns = dend)
draw(ht, test = TRUE)
dend = color_branches(dend, k = 3)
ht = Heatmap(mat, cluster_columns = dend)
draw(ht, test = TRUE)
### test row/column order
od = c(seq(1, 24, by = 2), seq(2, 24, by = 2))
ht = Heatmap(mat, row_order = od)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_order = od, cluster_rows = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_order = od)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_order = od, cluster_columns = TRUE)
draw(ht, test = TRUE)
#### test row/column names #####
ht = Heatmap(unname(mat))
draw(ht, test = TRUE)
ht = Heatmap(mat, show_row_names = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_side = "left")
draw(ht, test = TRUE)
random_str2 = function(k) {
sapply(1:k, function(i) paste(sample(letters, sample(5:10, 1)), collapse = ""))
}
ht = Heatmap(mat, row_labels = random_str2(24))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_gp = gpar(fontsize = 20))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_gp = gpar(fontsize = 1:24/2 + 5))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_rot = 45)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_rot = 45, row_names_side = "left")
draw(ht, test = TRUE)
ht = Heatmap(mat, show_column_names = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_side = "top")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_labels = random_str2(24))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_gp = gpar(fontsize = 20))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_gp = gpar(fontsize = 1:24/2 + 5))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_rot = 45)
draw(ht, test = TRUE)
### test annotations ####
anno = HeatmapAnnotation(
foo = 1:24,
df = data.frame(type = c(rep("A", 12), rep("B", 12))),
bar = anno_barplot(24:1))
ht = Heatmap(mat, top_annotation = anno)
draw(ht, test = TRUE)
ht = Heatmap(mat, bottom_annotation = anno)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno)
draw(ht, test = TRUE)
### test split ####
ht = Heatmap(mat, km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("B", "A")))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), 12), row_gap = unit(5, "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)),
row_gap = unit(c(1, 2, 3), "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "foo")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s", row_title_gp = gpar(fill = 2:4, col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = NULL)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_names_gp = gpar(col = 2:4))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)), row_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)), row_km = 3, row_title = "cluster%s,group%s", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2, row_title = "foo")
ht = Heatmap(mat, row_split = 2, row_title = "cluster%s")
dend = as.dendrogram(hclust(dist(mat)))
ht = Heatmap(mat, cluster_rows = dend, row_split = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2, row_names_gp = gpar(col = 2:3))
draw(ht, test = TRUE)
### column split
ht = Heatmap(mat, column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_gap = unit(1, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)),
column_gap = unit(c(1, 2, 3), "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "foo")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s", column_title_rot = 90)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s", column_title_gp = gpar(fill = 2:3, col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = NULL)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_names_gp = gpar(col = 2:3))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("A", "B")), column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("B", "A")), column_km = 2)
ht = Heatmap(mat, column_split = rep(c("A", "B"), times = c(6, 18)), column_km = 2,
column_title = "cluster%s,group%s", column_title_rot = 90)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = 3)
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(t(mat))))
ht = Heatmap(mat, cluster_columns = dend, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno, column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno, column_split = 3)
draw(ht, test = TRUE)
### combine row and column split
ht = Heatmap(mat, row_km = 3, column_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 3, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), 12),
column_split = rep(c("C", "D"), 12))
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno,
row_split = rep(c("A", "B"), 12),
row_names_gp = gpar(col = 2:3), row_gap = unit(2, "mm"),
column_split = 3,
column_names_gp = gpar(col = 2:4), column_gap = unit(4, "mm")
)
draw(ht, test = TRUE)
#### character matrix
mat3 = matrix(sample(letters[1:6], 100, replace = TRUE), 10, 10)
rownames(mat3) = {x = letters[1:10]; x[1] = "aaaaaaaaaaaaaaaaaaaaaaa";x}
ht = Heatmap(mat3, rect_gp = gpar(col = "white"))
draw(ht, test = TRUE)
### cell_fun
mat = matrix(1:9, 3, 3)
rownames(mat) = letters[1:3]
colnames(mat) = letters[1:3]
ht = Heatmap(mat, rect_gp = gpar(col = "white"), cell_fun = function(j, i, x, y, width, height, fill) grid.text(mat[i, j], x = x, y = y),
cluster_rows = FALSE, cluster_columns = FALSE, row_names_side = "left", column_names_side = "top",
column_names_rot = 0)
draw(ht, test = TRUE)
### test the size
ht = Heatmap(mat)
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
ht = Heatmap(mat, width = unit(10, "cm"), height = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, width = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, heatmap_width = unit(10, "cm"), heatmap_height = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, heatmap_width = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, use_raster = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 2, use_raster = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 2, column_km = 2, use_raster = TRUE)
draw(ht, test = TRUE)
#### test global padding
ra = rowAnnotation(foo = 1:24)
ht = Heatmap(mat, show_column_names = FALSE) + ra
draw(ht)
ht = Heatmap(matrix(rnorm(100), 10), row_km = 2, row_title = "")
draw(ht)
if(0) {
ht = Heatmap(matrix(rnorm(100), 10), heatmap_width = unit(5, "mm"))
draw(ht)
}
|
/tests/test-Heatmap-class.R
|
permissive
|
sqjin/ComplexHeatmap
|
R
| false
| false
| 12,137
|
r
|
library(circlize)
library(ComplexHeatmap)
library(GetoptLong)
set.seed(123)
nr1 = 10; nr2 = 8; nr3 = 6
nc1 = 6; nc2 = 8; nc3 = 10
mat = cbind(rbind(matrix(rnorm(nr1*nc1, mean = 1, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc1, mean = 0, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc1, mean = 0, sd = 0.5), nr = nr3)),
rbind(matrix(rnorm(nr1*nc2, mean = 0, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc2, mean = 1, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc2, mean = 0, sd = 0.5), nr = nr3)),
rbind(matrix(rnorm(nr1*nc3, mean = 0.5, sd = 0.5), nr = nr1),
matrix(rnorm(nr2*nc3, mean = 0.5, sd = 0.5), nr = nr2),
matrix(rnorm(nr3*nc3, mean = 1, sd = 0.5), nr = nr3))
)
rownames(mat) = paste0("row", seq_len(nrow(mat)))
colnames(mat) = paste0("column", seq_len(nrow(mat)))
ht = Heatmap(mat)
draw(ht, test = TRUE)
ht
ht = Heatmap(mat, col = colorRamp2(c(-3, 0, 3), c("green", "white", "red")))
draw(ht, test = TRUE)
ht = Heatmap(mat, name = "test")
draw(ht, test = TRUE)
ht = Heatmap(mat, rect_gp = gpar(col = "black"))
draw(ht, test = TRUE)
ht = Heatmap(mat, border = "red")
draw(ht, test = TRUE)
######## test title ##########
ht = Heatmap(mat, row_title = "blablabla")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_side = "right")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_gp = gpar(fontsize = 20, font = 2))
draw(ht, test = TRUE)
# ht = Heatmap(mat, row_title = "blablabla", row_title_rot = 45)
# draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_title = "blablabla", row_title_gp = gpar(fill = "red", col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_side = "bottom")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_gp = gpar(fontsize = 20, font = 2))
draw(ht, test = TRUE)
# ht = Heatmap(mat, column_title = "blablabla", column_title_rot = 45)
# draw(ht, test = TRUE)
ht = Heatmap(mat, column_title = "blablabla", column_title_rot = 90)
draw(ht, test = TRUE)
### test clustering ####
ht = Heatmap(mat, cluster_rows = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = "pearson")
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = function(x) dist(x))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_rows = function(x, y) 1 - cor(x, y))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_method_rows = "single")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_side = "right")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_width = unit(4, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_dend_gp = gpar(lwd = 2, col = "red"))
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(mat)))
ht = Heatmap(mat, cluster_rows = dend)
draw(ht, test = TRUE)
library(dendextend)
dend = color_branches(dend, k = 3)
ht = Heatmap(mat, cluster_rows = dend)
draw(ht, test = TRUE)
ht = Heatmap(mat, cluster_columns = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = "pearson")
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = function(x) dist(x))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_distance_columns = function(x, y) 1 - cor(x, y))
draw(ht, test = TRUE)
ht = Heatmap(mat, clustering_method_columns = "single")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_side = "bottom")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_height = unit(4, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_dend_gp = gpar(lwd = 2, col = "red"))
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(t(mat))))
ht = Heatmap(mat, cluster_columns = dend)
draw(ht, test = TRUE)
dend = color_branches(dend, k = 3)
ht = Heatmap(mat, cluster_columns = dend)
draw(ht, test = TRUE)
### test row/column order
od = c(seq(1, 24, by = 2), seq(2, 24, by = 2))
ht = Heatmap(mat, row_order = od)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_order = od, cluster_rows = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_order = od)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_order = od, cluster_columns = TRUE)
draw(ht, test = TRUE)
#### test row/column names #####
ht = Heatmap(unname(mat))
draw(ht, test = TRUE)
ht = Heatmap(mat, show_row_names = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_side = "left")
draw(ht, test = TRUE)
random_str2 = function(k) {
sapply(1:k, function(i) paste(sample(letters, sample(5:10, 1)), collapse = ""))
}
ht = Heatmap(mat, row_labels = random_str2(24))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_gp = gpar(fontsize = 20))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_gp = gpar(fontsize = 1:24/2 + 5))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_rot = 45)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_names_rot = 45, row_names_side = "left")
draw(ht, test = TRUE)
ht = Heatmap(mat, show_column_names = FALSE)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_side = "top")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_labels = random_str2(24))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_gp = gpar(fontsize = 20))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_gp = gpar(fontsize = 1:24/2 + 5))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_names_rot = 45)
draw(ht, test = TRUE)
### test annotations ####
anno = HeatmapAnnotation(
foo = 1:24,
df = data.frame(type = c(rep("A", 12), rep("B", 12))),
bar = anno_barplot(24:1))
ht = Heatmap(mat, top_annotation = anno)
draw(ht, test = TRUE)
ht = Heatmap(mat, bottom_annotation = anno)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno)
draw(ht, test = TRUE)
### test split ####
ht = Heatmap(mat, km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("B", "A")))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), 12), row_gap = unit(5, "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)),
row_gap = unit(c(1, 2, 3), "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "foo")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s")
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = "cluster%s", row_title_gp = gpar(fill = 2:4, col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_title = NULL)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, row_names_gp = gpar(col = 2:4))
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)), row_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), times = c(6, 18)), row_km = 3, row_title = "cluster%s,group%s", row_title_rot = 0)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2, row_title = "foo")
ht = Heatmap(mat, row_split = 2, row_title = "cluster%s")
dend = as.dendrogram(hclust(dist(mat)))
ht = Heatmap(mat, cluster_rows = dend, row_split = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 2, row_names_gp = gpar(col = 2:3))
draw(ht, test = TRUE)
### column split
ht = Heatmap(mat, column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_gap = unit(1, "cm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = rep(c("A", "B"), times = c(6, 18)))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = data.frame(rep(c("A", "B"), 12), rep(c("C", "D"), each = 12)),
column_gap = unit(c(1, 2, 3), "mm"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "foo")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s")
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s", column_title_rot = 90)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = "cluster%s", column_title_gp = gpar(fill = 2:3, col = "white"))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_title = NULL)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_km = 2, column_names_gp = gpar(col = 2:3))
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("A", "B")), column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = factor(rep(c("A", "B"), times = c(6, 18)), levels = c("B", "A")), column_km = 2)
ht = Heatmap(mat, column_split = rep(c("A", "B"), times = c(6, 18)), column_km = 2,
column_title = "cluster%s,group%s", column_title_rot = 90)
draw(ht, test = TRUE)
ht = Heatmap(mat, column_split = 3)
draw(ht, test = TRUE)
dend = as.dendrogram(hclust(dist(t(mat))))
ht = Heatmap(mat, cluster_columns = dend, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno, column_km = 2)
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno, bottom_annotation = anno, column_split = 3)
draw(ht, test = TRUE)
### combine row and column split
ht = Heatmap(mat, row_km = 3, column_km = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = 3, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 3, column_split = 3)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_split = rep(c("A", "B"), 12),
column_split = rep(c("C", "D"), 12))
draw(ht, test = TRUE)
ht = Heatmap(mat, top_annotation = anno,
row_split = rep(c("A", "B"), 12),
row_names_gp = gpar(col = 2:3), row_gap = unit(2, "mm"),
column_split = 3,
column_names_gp = gpar(col = 2:4), column_gap = unit(4, "mm")
)
draw(ht, test = TRUE)
#### character matrix
mat3 = matrix(sample(letters[1:6], 100, replace = TRUE), 10, 10)
rownames(mat3) = {x = letters[1:10]; x[1] = "aaaaaaaaaaaaaaaaaaaaaaa";x}
ht = Heatmap(mat3, rect_gp = gpar(col = "white"))
draw(ht, test = TRUE)
### cell_fun
mat = matrix(1:9, 3, 3)
rownames(mat) = letters[1:3]
colnames(mat) = letters[1:3]
ht = Heatmap(mat, rect_gp = gpar(col = "white"), cell_fun = function(j, i, x, y, width, height, fill) grid.text(mat[i, j], x = x, y = y),
cluster_rows = FALSE, cluster_columns = FALSE, row_names_side = "left", column_names_side = "top",
column_names_rot = 0)
draw(ht, test = TRUE)
### test the size
ht = Heatmap(mat)
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
ht = Heatmap(mat, width = unit(10, "cm"), height = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, width = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, heatmap_width = unit(10, "cm"), heatmap_height = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, heatmap_width = unit(10, "cm"))
ht = prepare(ht)
ht@heatmap_param[c("width", "height")]
ht@matrix_param[c("width", "height")]
draw(ht, test = TRUE)
ht = Heatmap(mat, use_raster = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 2, use_raster = TRUE)
draw(ht, test = TRUE)
ht = Heatmap(mat, row_km = 2, column_km = 2, use_raster = TRUE)
draw(ht, test = TRUE)
#### test global padding
ra = rowAnnotation(foo = 1:24)
ht = Heatmap(mat, show_column_names = FALSE) + ra
draw(ht)
ht = Heatmap(matrix(rnorm(100), 10), row_km = 2, row_title = "")
draw(ht)
if(0) {
ht = Heatmap(matrix(rnorm(100), 10), heatmap_width = unit(5, "mm"))
draw(ht)
}
|
##############################################################################
# Copyright (c) 2012-2016 Russell V. Lenth #
# #
# This file is part of the emmeans package for R (*emmeans*) #
# #
# *emmeans* is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# *emmeans* is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with R and *emmeans*. If not, see #
# <https://www.r-project.org/Licenses/> and/or #
# <http://www.gnu.org/licenses/>. #
##############################################################################
# Support for MCMCglmm class and possibly more MCMC-based models
# Method to create a coda 'mcmc' or 'mcmc.list' object from a ref.grid
# (dots not supported, unfortunately)
# If sep.chains is TRUE and there is more than one chain, an mcmc.list is returned
# NOTE: S3 registration of as.mcmc and as.mcmc.list is done dynamically in zzz.R
#' Support for MCMC-based estimation
#'
#' When a model is fitted using Markov chain Monte Carlo (MCMC) methods,
#' its reference grid contains a \code{post.beta} slot. These functions
#' transform those posterior samples to posterior samples of EMMs or
#' related contrasts. They can then be summarized or plotted using,
#' e.g., functions in the \pkg{coda} package.
#'
#' @rdname mcmc-support
#' @aliases mcmc-support
#' @param x An object of class \code{emmGrid}
#' @param names Logical scalar or vector specifying whether variable names are
#' appended to levels in the column labels for the \code{as.mcmc} or
#' \code{as.mcmc.list} result -- e.g., column names of \code{treat A} and
#' \code{treat B} versus just \code{A} and \code{B}. When there is more than
#' one variable involved, the elements of \code{names} are used cyclically.
#' @param sep.chains Logical value. If \code{TRUE}, and there is more than one
#' MCMC chain available, an \code{\link[coda]{mcmc.list}} object is returned
#' by \code{as.mcmc}, with separate EMMs posteriors in each chain.
#' @param likelihood Character value or function. If given, simulations are made from
#' the corresponding posterior predictive distribution. If not given, we obtain
#' the posterior distribution of the parameters in \code{object}. See Prediction
#' section below.
#' @param NE.include Logical value. If \code{TRUE}, non-estimable columns are
#' kept but returned as columns of \code{NA} values (this may create errors or
#' warnings in subsequent analyses using, say, \pkg{coda}). If \code{FALSE},
#' non-estimable columns are dropped, and a warning is issued. (If all are
#' non-estimable, an error is thrown.)
#' @param ... arguments passed to other methods
#'
#' @return An object of class \code{\link[coda]{mcmc}} or \code{\link[coda]{mcmc.list}}.
#'
#' @section Details:
#' When the object's \code{post.beta} slot is non-trivial, \code{as.mcmc} will
#' return an \code{\link[coda]{mcmc}} or \code{\link[coda]{mcmc.list}} object
#' that can be summarized or plotted using methods in the \pkg{coda} package.
#' In these functions, \code{post.beta} is transformed by post-multiplying it by
#' \code{t(linfct)}, creating a sample from the posterior distribution of LS
#' means. In \code{as.mcmc}, if \code{sep.chains} is \code{TRUE} and there is in
#' fact more than one chain, an \code{mcmc.list} is returned with each chain's
#' results. The \code{as.mcmc.list} method is guaranteed to return an
#' \code{mcmc.list}, even if it comprises just one chain.
#'
#' @section Prediction:
#' When \code{likelihood} is specified, it is used to simulate values from the
#' posterior predictive distribution corresponding to the given likelihood and
#' the posterior distribution of parameter values. Denote the likelihood
#' function as \eqn{f(y|\theta,\phi)}, where \eqn{y} is a response, \eqn{\theta}
#' is the parameter estimated in \code{object}, and \eqn{\phi} comprises zero or
#' more additional parameters to be specified. If \code{likelihood} is a
#' function, that function should take as its first argument a vector of
#' \eqn{\theta} values (each corresponding to one row of \code{object@grid}).
#' Any \eqn{\phi} values should be specified as additional named function
#' arguments, and passed to \code{likelihood} via \code{...}. This function should
#' simulate values of \eqn{y}.
#'
#' A few standard likelihoods are available by specifying \code{likelihood} as
#' a character value. They are:
#' \describe{
#' \item{\code{"normal"}}{The normal distribution with mean \eqn{\theta} and
#' standard deviation specified by additional argument \code{sigma}}
#' \item{\code{"binomial"}}{The binomial distribution with success probability
#' \eqn{theta}, and number of trials specified by \code{trials}}
#' \item{\code{"poisson"}}{The Poisson distribution with mean \eqn{theta}
#' (no additional parameters)}
#' \item{\code{"gamma"}}{The gamma distribution with scale parameter \eqn{\theta}
#' and shape parameter specified by \code{shape}}
#' }
#'
#' @method as.mcmc emmGrid
#' @export as.mcmc.emmGrid
#' @examples
#' if(requireNamespace("coda")) {
#' ### A saved reference grid for a mixed logistic model (see lme4::cbpp)
#' cbpp.rg <- do.call(emmobj,
#' readRDS(system.file("extdata", "cbpplist", package = "emmeans")))
#' # Predictive distribution for herds of size 20
#' # (perhaps a bias adjustment should be applied; see "sophisticated" vignette)
#' pred.incidence <- coda::as.mcmc(regrid(cbpp.rg), likelihood = "binomial", trials = 20)
#' }
as.mcmc.emmGrid = function(x, names = TRUE, sep.chains = TRUE,
likelihood, NE.include = FALSE, ...) {
if (is.na(x@post.beta[1])) {
stop("No posterior sample -- can't make an 'mcmc' object")
}
# notes on estimability issues:
# 1. Use @bhat to determine which coefs to use
# 2. @nabasis as in freq models
# 3. @post.beta we will EXCLUDE cols corresp to NAs in @bhat
# See stanreg support for hints/details
use = which(!is.na(x@bhat))
est = estimability::is.estble(x@linfct, x@nbasis)
if (!any(est))
stop("Aborted -- No estimates in the grid are estimable")
else if(!all(est) && !NE.include) {
rows = paste(which(!est), collapse = ", ")
warning("Cases ", rows, " were dropped due to non-estimability", call. = FALSE)
}
mat = x@post.beta %*% t(x@linfct[, use, drop = FALSE])
if (NE.include)
mat[, !est] = NA
else {
mat = mat[, est, drop = FALSE]
x@grid = x@grid[est, , drop = FALSE]
}
if(!is.null(offset <- x@grid[[".offset."]])) {
n = nrow(mat)
mat = mat + matrix(rep(offset, each = n), nrow = n)
}
if (!missing(likelihood)) {
if (is.character(likelihood)) {
likelihood = match.arg(likelihood, c("normal", "binomial", "poisson", "gamma"))
likelihood = switch(likelihood,
normal = function(theta, sigma, ...)
rnorm(length(theta), mean = theta, sd = sigma),
binomial = function(theta, trials, ...)
rbinom(length(theta), size = trials, prob = theta),
poisson = function(theta, ...)
rpois(length(theta), lambda = theta),
gamma = function(theta, shape, ...)
rgamma(length(theta), scale = theta, shape = shape)
#, stop("There is no predefined likelihood named '", likelihood, "'")
)
}
mat = apply(mat, 2, likelihood, ...)
##! TODO: Add "multinomial" support. This will require a flag to observe
##! the 'by' variable(s), then we get parameter values from the columns
##! corresponding to each 'by' group
}
nm = setdiff(names(x@grid), c(".wgt.",".offset."))
if (any(names)) {
names = rep(names, length(nm))
for (i in seq_along(nm))
if(names[i]) x@grid[nm[i]] = paste(nm[i], x@grid[[nm[i]]])
}
if(is.null(dimnames(mat)))
dimnames(mat) = list(seq_len(nrow(mat)), seq_len(ncol(mat)))
dimnames(mat)[[2]] = do.call(paste, c(unname(x@grid[, nm, drop = FALSE]), sep=", "))
n.chains = attr(x@post.beta, "n.chains")
if (!sep.chains || is.null(n.chains) || (n.chains == 1))
coda::mcmc(mat)
else {
n = nrow(mat) / n.chains
seqn = seq_len(n)
chains = lapply(seq_len(n.chains), function(i) coda::mcmc(mat[n*(i - 1) + seqn, , drop = FALSE]))
coda::mcmc.list(chains)
}
}
### as.mcmc.list - guaranteed to return a list
#' @rdname mcmc-support
#' @method as.mcmc.list emmGrid
as.mcmc.list.emmGrid = function(x, names = TRUE, ...) {
result = as.mcmc.emmGrid(x, names = names, sep.chains = TRUE, ...)
if(!inherits(result, "mcmc.list"))
result = coda::mcmc.list(result)
result
}
#' Summarize an emmGrid from a Bayesian model
#'
#' This function computes point estimates and HPD intervals for each
#' factor combination in \code{object@emmGrid}. While this function
#' may be called independently, it is called automatically by the S3 method
#' \code{\link{summary.emmGrid}} when the object is based on a Bayesian model.
#' (Note: the \code{level} argument, or its default, is passed as \code{prob}).
#'
#' @param object an \code{emmGrid} object having a non-missing \code{post.beta} slot
#' @param prob numeric probability content for HPD intervals (note: when not specified,
#' the current \code{level} option is used; see \code{\link{emm_options}})
#' @param by factors to use as \code{by} variables
#' @param type prediction type as in \code{\link{summary.emmGrid}}
#' @param point.est function to use to compute the point estimates from the
#' posterior sample for each grid point
#' @param bias.adjust Logical value for whether to adjust for bias in
#' back-transforming (\code{type = "response"}). This requires a value of
#' \code{sigma} to exist in the object or be specified.
#' @param sigma Error SD assumed for bias correction (when
#' \code{type = "response"}. If not specified,
#' \code{object@misc$sigma} is used, and an error is thrown if it is not found.
#' \emph{Note:} \code{sigma} may be a vector, as long as it conforms to the
#' number of observations in the posterior sample.
#' @param ... required but not used
#'
#' @return an object of class \code{summary_emm}
#'
#' @seealso summary.emmGrid
#'
#' @export
#'
#' @examples
#' if(require("coda")) {
#' # Create an emmGrid object from a system file
#' cbpp.rg <- do.call(emmobj,
#' readRDS(system.file("extdata", "cbpplist", package = "emmeans")))
#' hpd.summary(emmeans(cbpp.rg, "period"))
#' }
#'
hpd.summary = function(object, prob, by, type, point.est = median,
bias.adjust = get_emm_option("back.bias.adj"), sigma,
...) {
if(!is.null(object@misc$.predFlag))
stop("Prediction intervals for MCMC models should be done using 'frequentist = TRUE'\n",
"or using 'as.mcmc(object, ..., likelihood = ...)'")
.requireNS("coda", "Bayesian summary requires the 'coda' package")
### require("coda") ### Nope this is a CRAN no-no
# Steal some init code from summary.emmGrid:
opt = get_emm_option("summary")
if(!is.null(opt)) {
opt$object = object
object = do.call("update.emmGrid", opt)
}
misc = object@misc
use.elts = if (is.null(misc$display))
rep(TRUE, nrow(object@grid))
else
misc$display
grid = object@grid[use.elts, , drop = FALSE]
if(missing(prob))
prob = misc$level
if(missing(by))
by = misc$by.vars
if (missing(type))
type = .get.predict.type(misc)
else
type = .validate.type(type)
# if there are two transformations and we want response, then we need to undo both
if ((type == "response") && (!is.null(misc$tran2)))
object = regrid(object, transform = "mu")
if ((type %in% c("mu", "unlink")) && (!is.null(t2 <- misc$tran2))) {
if (!is.character(t2))
t2 = "tran"
object = update(object, inv.lbl = paste0(t2, "(resp)"))
}
link = .get.link(misc)
inv = (type %in% c("response", "mu", "unlink")) # flag to inverse-transform
if (inv && is.null(link))
inv = FALSE
### OK, finally, here is the real stuff
pe.lbl = as.character(substitute(point.est))
if(length(pe.lbl) > 1)
pe.lbl = "user-supplied function"
mesg = c(misc$initMesg, paste("Point estimate displayed:", pe.lbl))
mcmc = as.mcmc.emmGrid(object, names = FALSE, sep.chains = FALSE,
NE.include = TRUE, ...)
mcmc = mcmc[, use.elts, drop = FALSE]
if (inv) {
if (bias.adjust) {
if (missing(sigma))
sigma = object@misc@sigma
link = .make.bias.adj.link(link, sigma)
}
for (j in seq_along(mcmc[1, ]))
mcmc[, j] = with(link, linkinv(mcmc[, j]))
mesg = c(mesg, paste("Results are back-transformed from the", link$name, "scale"))
if(bias.adjust)
mesg = c(mesg, paste("Bias adjustment applied based on sigma =",
.fmt.sigma(sigma)))
}
else if(!is.null(link))
mesg = c(mesg, paste("Results are given on the", link$name, "(not the response) scale."))
est = !is.na(mcmc[1, ])
mcmc[, !est] = 0 # temp so we don't get errors
mesg = c(mesg, paste("HPD interval probability:", prob))
pt.est = data.frame(apply(mcmc, 2, point.est))
names(pt.est) = object@misc$estName
summ = as.data.frame(coda::HPDinterval(mcmc, prob = prob))[c("lower","upper")]
names(summ) = cnm = paste0(names(summ), ".HPD")
lblnms = setdiff(names(grid),
c(object@roles$responses, ".offset.", ".wgt."))
lbls = grid[lblnms]
if (inv) {
if (!is.null(misc$inv.lbl)) {
names(pt.est) = misc$estName = misc$inv.lbl
if (!is.null(misc$log.contrast)) # contrast of logs - relabel as ratios
for (ell in seq_along(lbls)){
lbls[[ell]] = factor(lbls[[ell]])
levels(lbls[[ell]]) = gsub(" - ", " / ", levels(lbls[[ell]]))
}
}
else
names(pt.est) = misc$estName = "response"
}
summ[!est, ] = NA
pt.est[!est, ] = NA
summ = cbind(lbls, pt.est, summ)
attr(summ, "estName") = misc$estName
attr(summ, "clNames") = cnm
if (is.null(misc$pri.vars) || length(misc$pri.vars) == 0)
misc$pri.vars = names(object@levels)
attr(summ, "pri.vars") = setdiff(union(misc$pri.vars, misc$by.vars), by)
attr(summ, "by.vars") = by
attr(summ, "mesg") = unique(mesg)
class(summ) = c("summary_emm", "data.frame")
summ
}
# Currently, data is required, as call is not stored
recover_data.MCMCglmm = function(object, data, trait, ...) {
if (is.null(data) && !is.null(object$data)) # allow for including data in object
data = eval(object$data)
# if a multivariate response, stack the data with `trait` variable
yvars = .all.vars(update(object$Fixed$formula, ". ~ 1"))
if ("trait" %in% names(data)) {
# don't do anything, just use what's provided
}
else if(length(yvars) > 1) {
# for (v in yvars) data[[v]] = NULL
dat = data
for (i in seq_len(length(yvars) - 1))
data = rbind(data, dat)
data$trait = factor(rep(yvars, each = nrow(dat)))
}
else if(!missing(trait)) {
# we'll create a fake "trait" variable with specified variable
n = nrow(data)
levs = levels(data[[trait]])
attr(data, "misc") = list(resp.levs = levs, trait = trait)
data$trait = rep(levs[-1], n)[1:n] # way overkill, but easy coding
}
attr(data, "call") = object$Fixed
attr(data, "terms") = trms = delete.response(terms(object$Fixed$formula))
attr(data, "predictors") = .all.vars(delete.response(trms))
data
}
# misc may be NULL or a list generated by trait spec
emm_basis.MCMCglmm = function(object, trms, xlev, grid, vcov.,
mode = c("default", "multinomial"), misc, ...) {
nobs.MCMCglmm = function(object, ...) 1 # prevents warning about nobs
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = NULL)
Sol = as.matrix(object$Sol)[, seq_len(object$Fixed$nfl)] # toss out random effects if included
bhat = apply(Sol, 2, mean)
if (missing(vcov.))
V = cov(Sol)
else
V = .my.vcov(object, vcov.)
if (is.null(misc))
misc = list()
mode = match.arg(mode)
if (mode == "multinomial") {
misc$postGridHook = .MCMCglmm.multinom.postGrid
}
else { # try to figure out the link
fam = unique(object$family)
if (length(fam) > 1)
stop("There is more than one 'family' in this model - too complex for emmeans support")
link = switch(fam,
poisson = "log",
multinomial = "log",
categorical = "logit",
ordinal = "logit") # maybe more later?
if (!is.null(link))
misc = .std.link.labels(list(link = link), misc)
}
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = Sol)
}
.MCMCglmm.multinom.postGrid = function(object, ...) {
linfct = object@linfct
misc = object@misc
post.lp = object@post.beta %*% t(linfct)
sel = .find.by.rows(object@grid, "trait")
k = length(sel)
cols = unlist(sel)
scal = sqrt(1 + 2 * (16 * sqrt(3) / (15 * pi))^2 / (k + 1)) # scaling const for logistic
# I'm assuming here that diag(IJ) = 2 / (k + 1)
object@post.beta = post.p = t(apply(post.lp, 1, function(l) {
expX = exp(cbind(0, matrix(l[cols], ncol = k)) / scal)
as.numeric(apply(expX, 1, function(z) z / sum(z)))
})) # These results come out with response levels varying the fastest.
object@bhat = apply(post.p, 2, mean)
object@V = cov(post.p)
preds = c(misc$trait, object@roles$predictors)
object@roles$predictors = preds[preds != "trait"]
object@levels[["trait"]] = NULL
object@levels = c(list(misc$resp.levs), object@levels)
names(object@levels)[1] = misc$trait
object@grid = do.call(expand.grid, object@levels)
misc$postGridHook = misc$tran = misc$inv.lbl =
misc$trait = misc$resp.levs = NULL
misc$display = object@model.info$nesting = NULL
misc$estName = "prob"
object@linfct = diag(1, ncol(post.p))
object@misc = misc
object
}
### Support for MCMCpack , maybe others that produce mcmc objects
### Whether it works depends on:
### 1. if there is a "call" attribute with a formula or fixed member
### 2. if it's right, even then
### Alternatively, maybe providing formula and data will do the trick
recover_data.mcmc = function(object, formula, data, ...) {
if (missing(formula)) {
cl = attr(object, "call")
if (is.null(cl$formula))
cl$formula = cl$fixed
if (is.null(cl$formula))
return("No fixed-effects formula found")
data = NULL
}
else {
if (missing(formula) || missing(data))
return("Requires both formula and data to proceed")
cl = call("mcmc.proxy", formula = formula, data = quote(data))
}
trms = delete.response(terms(eval(cl$formula, parent.frame())))
recover_data(cl, trms, NULL, data, ...)
}
emm_basis.mcmc = function(object, trms, xlev, grid, vcov., ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = NULL)
samp = as.matrix(object)[, seq_len(ncol(X)), drop = FALSE]
bhat = apply(samp, 2, mean)
if (missing(vcov.))
V = cov(samp)
else
V = .my.vcov(object, vcov.)
misc = list()
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
### Support for mcmc.list
recover_data.mcmc.list = function(object, formula, data, ...) {
recover_data.mcmc(object[[1]], formula, data, ...)
}
emm_basis.mcmc.list = function(object, trms, xlev, grid, vcov., ...) {
result = emm_basis.mcmc(object[[1]], trms, xlev, grid, vcov, ...)
cols = seq_len(ncol(result$post.beta))
for (i in 2:length(object))
result$post.beta = rbind(result$post.beta,
as.matrix(object[[i]])[, cols, drop = FALSE])
attr(result$post.beta, "n.chains") = length(object)
result
}
### support for CARBayes package - currently MUST supply data and have
### default contrasts matching what was used in fitting the mdoel
recover_data.carbayes = function(object, data, ...) {
if(is.null(data)) # Try to recover data from parent frame
data = model.frame(object$formula, data = parent.frame())
cl = call("carbayes.proxy", formula = object$formula, data = quote(data))
trms = delete.response(terms(eval(object$formula, parent.frame())))
recover_data(cl, trms, NULL, data, ...)
}
emm_basis.carbayes = function(object, trms, xlev, grid, ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = attr(object$X, "contrasts"))
samp = as.matrix(object$samples$beta)
bhat = apply(samp, 2, mean)
V = cov(samp)
misc = list()
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
### Support for the rstanarm package (stanreg objects)
###
recover_data.stanreg = function(object, ...) {
recover_data.lm(object, ...)
}
# note: mode and rescale are ignored for some models
emm_basis.stanreg = function(object, trms, xlev, grid, mode, rescale, ...) {
misc = list()
if (!is.null(object$family)) {
if (is.character(object$family)) # work around bug for stan_polr
misc$tran = object$method
else
misc = .std.link.labels(object$family, misc)
}
# Previous code...
### m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
### if(is.null(contr <- object$contrasts))
### contr = attr(model.matrix(object), "contrasts")
### X = model.matrix(trms, m, contrasts.arg = contr)
### bhat = rstanarm::fixef(object)
### nms = intersect(colnames(X), names(bhat))
### bhat = bhat[nms]
### V = vcov(object)[nms, nms, drop = FALSE]
# Instead, use internal routine in rstanarm to get the model matrix
# Later, we'll get bhat and V from the posterior sample because
# the vcov(object) doesn't always jibe with fixef(object)
if(is.null(object$contrasts)) # old version of rstanarm where contrasts may get lost.
object$contrasts = attr(model.matrix(object), "contrasts")
pp_data = get("pp_data", envir = getNamespace("rstanarm"))
X = pp_data(object, newdata = grid, re.form = ~0, ...)[[1]]
nms = colnames(X)
if(!is.null(object$zeta)) { # Polytomous regression model
if (missing(mode))
mode = "latent"
else
mode = match.arg(mode,
c("latent", "linear.predictor", "cum.prob", "exc.prob", "prob", "mean.class"))
xint = match("(Intercept)", nms, nomatch = 0L)
if (xint > 0L)
X = X[, -xint, drop = FALSE]
k = length(object$zeta)
if (mode == "latent") {
### if (missing(rescale)) #------ (Disabl rescale)
rescale = c(0,1)
X = rescale[2] * cbind(X, matrix(- 1/k, nrow = nrow(X), ncol = k))
### bhat = c(bhat, object$zeta - rescale[1] / rescale[2])
misc = list(offset.mult = rescale[2])
}
else {
### bhat = c(bhat, object$zeta)
j = matrix(1, nrow=k, ncol=1)
J = matrix(1, nrow=nrow(X), ncol=1)
X = cbind(kronecker(-j, X), kronecker(diag(1,k), J))
link = object$method
if (link == "logistic") link = "logit"
misc = list(ylevs = list(cut = names(object$zeta)),
tran = link, inv.lbl = "cumprob", offset.mult = -1)
if (mode != "linear.predictor") {
misc$mode = mode
misc$postGridHook = ".clm.postGrid" # we probably need to adapt this
}
}
nms = colnames(X) = c(nms, names(object$zeta))
misc$respName = as.character.default(terms(object))[2]
}
samp = as.matrix(object$stanfit)[, nms, drop = FALSE]
attr(samp, "n.chains") = object$stanfit@sim$chains
bhat = apply(samp, 2, mean)
V = cov(samp)
# estimability...
nbasis = estimability::all.estble
all.nms = colnames(X)
if (length(nms) < length(all.nms)) {
if(is.null(contr <- object$contrasts))
contr = attr(model.matrix(object), "contrasts")
coef = NA * X[1, ]
coef[names(bhat)] = bhat
bhat = coef
mmat = model.matrix(trms, object$data, contrasts.arg = contr)
nbasis = estimability::nonest.basis(mmat)
}
list(X = X, bhat = bhat, nbasis = nbasis, V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
|
/R/MCMC-support.R
|
no_license
|
MatthieuRouland/emmeans
|
R
| false
| false
| 26,534
|
r
|
##############################################################################
# Copyright (c) 2012-2016 Russell V. Lenth #
# #
# This file is part of the emmeans package for R (*emmeans*) #
# #
# *emmeans* is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 2 of the License, or #
# (at your option) any later version. #
# #
# *emmeans* is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with R and *emmeans*. If not, see #
# <https://www.r-project.org/Licenses/> and/or #
# <http://www.gnu.org/licenses/>. #
##############################################################################
# Support for MCMCglmm class and possibly more MCMC-based models
# Method to create a coda 'mcmc' or 'mcmc.list' object from a ref.grid
# (dots not supported, unfortunately)
# If sep.chains is TRUE and there is more than one chain, an mcmc.list is returned
# NOTE: S3 registration of as.mcmc and as.mcmc.list is done dynamically in zzz.R
#' Support for MCMC-based estimation
#'
#' When a model is fitted using Markov chain Monte Carlo (MCMC) methods,
#' its reference grid contains a \code{post.beta} slot. These functions
#' transform those posterior samples to posterior samples of EMMs or
#' related contrasts. They can then be summarized or plotted using,
#' e.g., functions in the \pkg{coda} package.
#'
#' @rdname mcmc-support
#' @aliases mcmc-support
#' @param x An object of class \code{emmGrid}
#' @param names Logical scalar or vector specifying whether variable names are
#' appended to levels in the column labels for the \code{as.mcmc} or
#' \code{as.mcmc.list} result -- e.g., column names of \code{treat A} and
#' \code{treat B} versus just \code{A} and \code{B}. When there is more than
#' one variable involved, the elements of \code{names} are used cyclically.
#' @param sep.chains Logical value. If \code{TRUE}, and there is more than one
#' MCMC chain available, an \code{\link[coda]{mcmc.list}} object is returned
#' by \code{as.mcmc}, with separate EMMs posteriors in each chain.
#' @param likelihood Character value or function. If given, simulations are made from
#' the corresponding posterior predictive distribution. If not given, we obtain
#' the posterior distribution of the parameters in \code{object}. See Prediction
#' section below.
#' @param NE.include Logical value. If \code{TRUE}, non-estimable columns are
#' kept but returned as columns of \code{NA} values (this may create errors or
#' warnings in subsequent analyses using, say, \pkg{coda}). If \code{FALSE},
#' non-estimable columns are dropped, and a warning is issued. (If all are
#' non-estimable, an error is thrown.)
#' @param ... arguments passed to other methods
#'
#' @return An object of class \code{\link[coda]{mcmc}} or \code{\link[coda]{mcmc.list}}.
#'
#' @section Details:
#' When the object's \code{post.beta} slot is non-trivial, \code{as.mcmc} will
#' return an \code{\link[coda]{mcmc}} or \code{\link[coda]{mcmc.list}} object
#' that can be summarized or plotted using methods in the \pkg{coda} package.
#' In these functions, \code{post.beta} is transformed by post-multiplying it by
#' \code{t(linfct)}, creating a sample from the posterior distribution of LS
#' means. In \code{as.mcmc}, if \code{sep.chains} is \code{TRUE} and there is in
#' fact more than one chain, an \code{mcmc.list} is returned with each chain's
#' results. The \code{as.mcmc.list} method is guaranteed to return an
#' \code{mcmc.list}, even if it comprises just one chain.
#'
#' @section Prediction:
#' When \code{likelihood} is specified, it is used to simulate values from the
#' posterior predictive distribution corresponding to the given likelihood and
#' the posterior distribution of parameter values. Denote the likelihood
#' function as \eqn{f(y|\theta,\phi)}, where \eqn{y} is a response, \eqn{\theta}
#' is the parameter estimated in \code{object}, and \eqn{\phi} comprises zero or
#' more additional parameters to be specified. If \code{likelihood} is a
#' function, that function should take as its first argument a vector of
#' \eqn{\theta} values (each corresponding to one row of \code{object@grid}).
#' Any \eqn{\phi} values should be specified as additional named function
#' arguments, and passed to \code{likelihood} via \code{...}. This function should
#' simulate values of \eqn{y}.
#'
#' A few standard likelihoods are available by specifying \code{likelihood} as
#' a character value. They are:
#' \describe{
#' \item{\code{"normal"}}{The normal distribution with mean \eqn{\theta} and
#' standard deviation specified by additional argument \code{sigma}}
#' \item{\code{"binomial"}}{The binomial distribution with success probability
#' \eqn{theta}, and number of trials specified by \code{trials}}
#' \item{\code{"poisson"}}{The Poisson distribution with mean \eqn{theta}
#' (no additional parameters)}
#' \item{\code{"gamma"}}{The gamma distribution with scale parameter \eqn{\theta}
#' and shape parameter specified by \code{shape}}
#' }
#'
#' @method as.mcmc emmGrid
#' @export as.mcmc.emmGrid
#' @examples
#' if(requireNamespace("coda")) {
#' ### A saved reference grid for a mixed logistic model (see lme4::cbpp)
#' cbpp.rg <- do.call(emmobj,
#' readRDS(system.file("extdata", "cbpplist", package = "emmeans")))
#' # Predictive distribution for herds of size 20
#' # (perhaps a bias adjustment should be applied; see "sophisticated" vignette)
#' pred.incidence <- coda::as.mcmc(regrid(cbpp.rg), likelihood = "binomial", trials = 20)
#' }
as.mcmc.emmGrid = function(x, names = TRUE, sep.chains = TRUE,
likelihood, NE.include = FALSE, ...) {
if (is.na(x@post.beta[1])) {
stop("No posterior sample -- can't make an 'mcmc' object")
}
# notes on estimability issues:
# 1. Use @bhat to determine which coefs to use
# 2. @nabasis as in freq models
# 3. @post.beta we will EXCLUDE cols corresp to NAs in @bhat
# See stanreg support for hints/details
use = which(!is.na(x@bhat))
est = estimability::is.estble(x@linfct, x@nbasis)
if (!any(est))
stop("Aborted -- No estimates in the grid are estimable")
else if(!all(est) && !NE.include) {
rows = paste(which(!est), collapse = ", ")
warning("Cases ", rows, " were dropped due to non-estimability", call. = FALSE)
}
mat = x@post.beta %*% t(x@linfct[, use, drop = FALSE])
if (NE.include)
mat[, !est] = NA
else {
mat = mat[, est, drop = FALSE]
x@grid = x@grid[est, , drop = FALSE]
}
if(!is.null(offset <- x@grid[[".offset."]])) {
n = nrow(mat)
mat = mat + matrix(rep(offset, each = n), nrow = n)
}
if (!missing(likelihood)) {
if (is.character(likelihood)) {
likelihood = match.arg(likelihood, c("normal", "binomial", "poisson", "gamma"))
likelihood = switch(likelihood,
normal = function(theta, sigma, ...)
rnorm(length(theta), mean = theta, sd = sigma),
binomial = function(theta, trials, ...)
rbinom(length(theta), size = trials, prob = theta),
poisson = function(theta, ...)
rpois(length(theta), lambda = theta),
gamma = function(theta, shape, ...)
rgamma(length(theta), scale = theta, shape = shape)
#, stop("There is no predefined likelihood named '", likelihood, "'")
)
}
mat = apply(mat, 2, likelihood, ...)
##! TODO: Add "multinomial" support. This will require a flag to observe
##! the 'by' variable(s), then we get parameter values from the columns
##! corresponding to each 'by' group
}
nm = setdiff(names(x@grid), c(".wgt.",".offset."))
if (any(names)) {
names = rep(names, length(nm))
for (i in seq_along(nm))
if(names[i]) x@grid[nm[i]] = paste(nm[i], x@grid[[nm[i]]])
}
if(is.null(dimnames(mat)))
dimnames(mat) = list(seq_len(nrow(mat)), seq_len(ncol(mat)))
dimnames(mat)[[2]] = do.call(paste, c(unname(x@grid[, nm, drop = FALSE]), sep=", "))
n.chains = attr(x@post.beta, "n.chains")
if (!sep.chains || is.null(n.chains) || (n.chains == 1))
coda::mcmc(mat)
else {
n = nrow(mat) / n.chains
seqn = seq_len(n)
chains = lapply(seq_len(n.chains), function(i) coda::mcmc(mat[n*(i - 1) + seqn, , drop = FALSE]))
coda::mcmc.list(chains)
}
}
### as.mcmc.list - guaranteed to return a list
#' @rdname mcmc-support
#' @method as.mcmc.list emmGrid
as.mcmc.list.emmGrid = function(x, names = TRUE, ...) {
result = as.mcmc.emmGrid(x, names = names, sep.chains = TRUE, ...)
if(!inherits(result, "mcmc.list"))
result = coda::mcmc.list(result)
result
}
#' Summarize an emmGrid from a Bayesian model
#'
#' This function computes point estimates and HPD intervals for each
#' factor combination in \code{object@emmGrid}. While this function
#' may be called independently, it is called automatically by the S3 method
#' \code{\link{summary.emmGrid}} when the object is based on a Bayesian model.
#' (Note: the \code{level} argument, or its default, is passed as \code{prob}).
#'
#' @param object an \code{emmGrid} object having a non-missing \code{post.beta} slot
#' @param prob numeric probability content for HPD intervals (note: when not specified,
#' the current \code{level} option is used; see \code{\link{emm_options}})
#' @param by factors to use as \code{by} variables
#' @param type prediction type as in \code{\link{summary.emmGrid}}
#' @param point.est function to use to compute the point estimates from the
#' posterior sample for each grid point
#' @param bias.adjust Logical value for whether to adjust for bias in
#' back-transforming (\code{type = "response"}). This requires a value of
#' \code{sigma} to exist in the object or be specified.
#' @param sigma Error SD assumed for bias correction (when
#' \code{type = "response"}. If not specified,
#' \code{object@misc$sigma} is used, and an error is thrown if it is not found.
#' \emph{Note:} \code{sigma} may be a vector, as long as it conforms to the
#' number of observations in the posterior sample.
#' @param ... required but not used
#'
#' @return an object of class \code{summary_emm}
#'
#' @seealso summary.emmGrid
#'
#' @export
#'
#' @examples
#' if(require("coda")) {
#' # Create an emmGrid object from a system file
#' cbpp.rg <- do.call(emmobj,
#' readRDS(system.file("extdata", "cbpplist", package = "emmeans")))
#' hpd.summary(emmeans(cbpp.rg, "period"))
#' }
#'
hpd.summary = function(object, prob, by, type, point.est = median,
bias.adjust = get_emm_option("back.bias.adj"), sigma,
...) {
if(!is.null(object@misc$.predFlag))
stop("Prediction intervals for MCMC models should be done using 'frequentist = TRUE'\n",
"or using 'as.mcmc(object, ..., likelihood = ...)'")
.requireNS("coda", "Bayesian summary requires the 'coda' package")
### require("coda") ### Nope this is a CRAN no-no
# Steal some init code from summary.emmGrid:
opt = get_emm_option("summary")
if(!is.null(opt)) {
opt$object = object
object = do.call("update.emmGrid", opt)
}
misc = object@misc
use.elts = if (is.null(misc$display))
rep(TRUE, nrow(object@grid))
else
misc$display
grid = object@grid[use.elts, , drop = FALSE]
if(missing(prob))
prob = misc$level
if(missing(by))
by = misc$by.vars
if (missing(type))
type = .get.predict.type(misc)
else
type = .validate.type(type)
# if there are two transformations and we want response, then we need to undo both
if ((type == "response") && (!is.null(misc$tran2)))
object = regrid(object, transform = "mu")
if ((type %in% c("mu", "unlink")) && (!is.null(t2 <- misc$tran2))) {
if (!is.character(t2))
t2 = "tran"
object = update(object, inv.lbl = paste0(t2, "(resp)"))
}
link = .get.link(misc)
inv = (type %in% c("response", "mu", "unlink")) # flag to inverse-transform
if (inv && is.null(link))
inv = FALSE
### OK, finally, here is the real stuff
pe.lbl = as.character(substitute(point.est))
if(length(pe.lbl) > 1)
pe.lbl = "user-supplied function"
mesg = c(misc$initMesg, paste("Point estimate displayed:", pe.lbl))
mcmc = as.mcmc.emmGrid(object, names = FALSE, sep.chains = FALSE,
NE.include = TRUE, ...)
mcmc = mcmc[, use.elts, drop = FALSE]
if (inv) {
if (bias.adjust) {
if (missing(sigma))
sigma = object@misc@sigma
link = .make.bias.adj.link(link, sigma)
}
for (j in seq_along(mcmc[1, ]))
mcmc[, j] = with(link, linkinv(mcmc[, j]))
mesg = c(mesg, paste("Results are back-transformed from the", link$name, "scale"))
if(bias.adjust)
mesg = c(mesg, paste("Bias adjustment applied based on sigma =",
.fmt.sigma(sigma)))
}
else if(!is.null(link))
mesg = c(mesg, paste("Results are given on the", link$name, "(not the response) scale."))
est = !is.na(mcmc[1, ])
mcmc[, !est] = 0 # temp so we don't get errors
mesg = c(mesg, paste("HPD interval probability:", prob))
pt.est = data.frame(apply(mcmc, 2, point.est))
names(pt.est) = object@misc$estName
summ = as.data.frame(coda::HPDinterval(mcmc, prob = prob))[c("lower","upper")]
names(summ) = cnm = paste0(names(summ), ".HPD")
lblnms = setdiff(names(grid),
c(object@roles$responses, ".offset.", ".wgt."))
lbls = grid[lblnms]
if (inv) {
if (!is.null(misc$inv.lbl)) {
names(pt.est) = misc$estName = misc$inv.lbl
if (!is.null(misc$log.contrast)) # contrast of logs - relabel as ratios
for (ell in seq_along(lbls)){
lbls[[ell]] = factor(lbls[[ell]])
levels(lbls[[ell]]) = gsub(" - ", " / ", levels(lbls[[ell]]))
}
}
else
names(pt.est) = misc$estName = "response"
}
summ[!est, ] = NA
pt.est[!est, ] = NA
summ = cbind(lbls, pt.est, summ)
attr(summ, "estName") = misc$estName
attr(summ, "clNames") = cnm
if (is.null(misc$pri.vars) || length(misc$pri.vars) == 0)
misc$pri.vars = names(object@levels)
attr(summ, "pri.vars") = setdiff(union(misc$pri.vars, misc$by.vars), by)
attr(summ, "by.vars") = by
attr(summ, "mesg") = unique(mesg)
class(summ) = c("summary_emm", "data.frame")
summ
}
# Currently, data is required, as call is not stored
recover_data.MCMCglmm = function(object, data, trait, ...) {
if (is.null(data) && !is.null(object$data)) # allow for including data in object
data = eval(object$data)
# if a multivariate response, stack the data with `trait` variable
yvars = .all.vars(update(object$Fixed$formula, ". ~ 1"))
if ("trait" %in% names(data)) {
# don't do anything, just use what's provided
}
else if(length(yvars) > 1) {
# for (v in yvars) data[[v]] = NULL
dat = data
for (i in seq_len(length(yvars) - 1))
data = rbind(data, dat)
data$trait = factor(rep(yvars, each = nrow(dat)))
}
else if(!missing(trait)) {
# we'll create a fake "trait" variable with specified variable
n = nrow(data)
levs = levels(data[[trait]])
attr(data, "misc") = list(resp.levs = levs, trait = trait)
data$trait = rep(levs[-1], n)[1:n] # way overkill, but easy coding
}
attr(data, "call") = object$Fixed
attr(data, "terms") = trms = delete.response(terms(object$Fixed$formula))
attr(data, "predictors") = .all.vars(delete.response(trms))
data
}
# misc may be NULL or a list generated by trait spec
emm_basis.MCMCglmm = function(object, trms, xlev, grid, vcov.,
mode = c("default", "multinomial"), misc, ...) {
nobs.MCMCglmm = function(object, ...) 1 # prevents warning about nobs
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = NULL)
Sol = as.matrix(object$Sol)[, seq_len(object$Fixed$nfl)] # toss out random effects if included
bhat = apply(Sol, 2, mean)
if (missing(vcov.))
V = cov(Sol)
else
V = .my.vcov(object, vcov.)
if (is.null(misc))
misc = list()
mode = match.arg(mode)
if (mode == "multinomial") {
misc$postGridHook = .MCMCglmm.multinom.postGrid
}
else { # try to figure out the link
fam = unique(object$family)
if (length(fam) > 1)
stop("There is more than one 'family' in this model - too complex for emmeans support")
link = switch(fam,
poisson = "log",
multinomial = "log",
categorical = "logit",
ordinal = "logit") # maybe more later?
if (!is.null(link))
misc = .std.link.labels(list(link = link), misc)
}
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = Sol)
}
.MCMCglmm.multinom.postGrid = function(object, ...) {
linfct = object@linfct
misc = object@misc
post.lp = object@post.beta %*% t(linfct)
sel = .find.by.rows(object@grid, "trait")
k = length(sel)
cols = unlist(sel)
scal = sqrt(1 + 2 * (16 * sqrt(3) / (15 * pi))^2 / (k + 1)) # scaling const for logistic
# I'm assuming here that diag(IJ) = 2 / (k + 1)
object@post.beta = post.p = t(apply(post.lp, 1, function(l) {
expX = exp(cbind(0, matrix(l[cols], ncol = k)) / scal)
as.numeric(apply(expX, 1, function(z) z / sum(z)))
})) # These results come out with response levels varying the fastest.
object@bhat = apply(post.p, 2, mean)
object@V = cov(post.p)
preds = c(misc$trait, object@roles$predictors)
object@roles$predictors = preds[preds != "trait"]
object@levels[["trait"]] = NULL
object@levels = c(list(misc$resp.levs), object@levels)
names(object@levels)[1] = misc$trait
object@grid = do.call(expand.grid, object@levels)
misc$postGridHook = misc$tran = misc$inv.lbl =
misc$trait = misc$resp.levs = NULL
misc$display = object@model.info$nesting = NULL
misc$estName = "prob"
object@linfct = diag(1, ncol(post.p))
object@misc = misc
object
}
### Support for MCMCpack , maybe others that produce mcmc objects
### Whether it works depends on:
### 1. if there is a "call" attribute with a formula or fixed member
### 2. if it's right, even then
### Alternatively, maybe providing formula and data will do the trick
recover_data.mcmc = function(object, formula, data, ...) {
if (missing(formula)) {
cl = attr(object, "call")
if (is.null(cl$formula))
cl$formula = cl$fixed
if (is.null(cl$formula))
return("No fixed-effects formula found")
data = NULL
}
else {
if (missing(formula) || missing(data))
return("Requires both formula and data to proceed")
cl = call("mcmc.proxy", formula = formula, data = quote(data))
}
trms = delete.response(terms(eval(cl$formula, parent.frame())))
recover_data(cl, trms, NULL, data, ...)
}
emm_basis.mcmc = function(object, trms, xlev, grid, vcov., ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = NULL)
samp = as.matrix(object)[, seq_len(ncol(X)), drop = FALSE]
bhat = apply(samp, 2, mean)
if (missing(vcov.))
V = cov(samp)
else
V = .my.vcov(object, vcov.)
misc = list()
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
### Support for mcmc.list
recover_data.mcmc.list = function(object, formula, data, ...) {
recover_data.mcmc(object[[1]], formula, data, ...)
}
emm_basis.mcmc.list = function(object, trms, xlev, grid, vcov., ...) {
result = emm_basis.mcmc(object[[1]], trms, xlev, grid, vcov, ...)
cols = seq_len(ncol(result$post.beta))
for (i in 2:length(object))
result$post.beta = rbind(result$post.beta,
as.matrix(object[[i]])[, cols, drop = FALSE])
attr(result$post.beta, "n.chains") = length(object)
result
}
### support for CARBayes package - currently MUST supply data and have
### default contrasts matching what was used in fitting the mdoel
recover_data.carbayes = function(object, data, ...) {
if(is.null(data)) # Try to recover data from parent frame
data = model.frame(object$formula, data = parent.frame())
cl = call("carbayes.proxy", formula = object$formula, data = quote(data))
trms = delete.response(terms(eval(object$formula, parent.frame())))
recover_data(cl, trms, NULL, data, ...)
}
emm_basis.carbayes = function(object, trms, xlev, grid, ...) {
m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
X = model.matrix(trms, m, contrasts.arg = attr(object$X, "contrasts"))
samp = as.matrix(object$samples$beta)
bhat = apply(samp, 2, mean)
V = cov(samp)
misc = list()
list(X = X, bhat = bhat, nbasis = matrix(NA), V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
### Support for the rstanarm package (stanreg objects)
###
recover_data.stanreg = function(object, ...) {
recover_data.lm(object, ...)
}
# note: mode and rescale are ignored for some models
emm_basis.stanreg = function(object, trms, xlev, grid, mode, rescale, ...) {
misc = list()
if (!is.null(object$family)) {
if (is.character(object$family)) # work around bug for stan_polr
misc$tran = object$method
else
misc = .std.link.labels(object$family, misc)
}
# Previous code...
### m = model.frame(trms, grid, na.action = na.pass, xlev = xlev)
### if(is.null(contr <- object$contrasts))
### contr = attr(model.matrix(object), "contrasts")
### X = model.matrix(trms, m, contrasts.arg = contr)
### bhat = rstanarm::fixef(object)
### nms = intersect(colnames(X), names(bhat))
### bhat = bhat[nms]
### V = vcov(object)[nms, nms, drop = FALSE]
# Instead, use internal routine in rstanarm to get the model matrix
# Later, we'll get bhat and V from the posterior sample because
# the vcov(object) doesn't always jibe with fixef(object)
if(is.null(object$contrasts)) # old version of rstanarm where contrasts may get lost.
object$contrasts = attr(model.matrix(object), "contrasts")
pp_data = get("pp_data", envir = getNamespace("rstanarm"))
X = pp_data(object, newdata = grid, re.form = ~0, ...)[[1]]
nms = colnames(X)
if(!is.null(object$zeta)) { # Polytomous regression model
if (missing(mode))
mode = "latent"
else
mode = match.arg(mode,
c("latent", "linear.predictor", "cum.prob", "exc.prob", "prob", "mean.class"))
xint = match("(Intercept)", nms, nomatch = 0L)
if (xint > 0L)
X = X[, -xint, drop = FALSE]
k = length(object$zeta)
if (mode == "latent") {
### if (missing(rescale)) #------ (Disabl rescale)
rescale = c(0,1)
X = rescale[2] * cbind(X, matrix(- 1/k, nrow = nrow(X), ncol = k))
### bhat = c(bhat, object$zeta - rescale[1] / rescale[2])
misc = list(offset.mult = rescale[2])
}
else {
### bhat = c(bhat, object$zeta)
j = matrix(1, nrow=k, ncol=1)
J = matrix(1, nrow=nrow(X), ncol=1)
X = cbind(kronecker(-j, X), kronecker(diag(1,k), J))
link = object$method
if (link == "logistic") link = "logit"
misc = list(ylevs = list(cut = names(object$zeta)),
tran = link, inv.lbl = "cumprob", offset.mult = -1)
if (mode != "linear.predictor") {
misc$mode = mode
misc$postGridHook = ".clm.postGrid" # we probably need to adapt this
}
}
nms = colnames(X) = c(nms, names(object$zeta))
misc$respName = as.character.default(terms(object))[2]
}
samp = as.matrix(object$stanfit)[, nms, drop = FALSE]
attr(samp, "n.chains") = object$stanfit@sim$chains
bhat = apply(samp, 2, mean)
V = cov(samp)
# estimability...
nbasis = estimability::all.estble
all.nms = colnames(X)
if (length(nms) < length(all.nms)) {
if(is.null(contr <- object$contrasts))
contr = attr(model.matrix(object), "contrasts")
coef = NA * X[1, ]
coef[names(bhat)] = bhat
bhat = coef
mmat = model.matrix(trms, object$data, contrasts.arg = contr)
nbasis = estimability::nonest.basis(mmat)
}
list(X = X, bhat = bhat, nbasis = nbasis, V = V,
dffun = function(k, dfargs) Inf, dfargs = list(),
misc = misc, post.beta = samp)
}
|
summary.tpopt <- function(object, ...)
{
res <- object
cat("\n###############################################################################\n")
cat("Call:\n\n")
print(res$call)
cat("\n###############################################################################\n")
cat("Models:\n")
print(res$eta)
cat("Fixed parameters:\n")
print(res$theta.fix)
cat("\n###############################################################################\n")
cat("Design:\n")
print(rbind(x = res$x, w = res$w))
cat("\n###############################################################################\n")
cat("Efficiency by iteration:\n")
print(res$efficiency)
cat("\n###############################################################################\n")
cat("Time:\n")
print(res$time)
cat("\n###############################################################################\n")
}
|
/rodd/R/summary.tpopt.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 953
|
r
|
summary.tpopt <- function(object, ...)
{
res <- object
cat("\n###############################################################################\n")
cat("Call:\n\n")
print(res$call)
cat("\n###############################################################################\n")
cat("Models:\n")
print(res$eta)
cat("Fixed parameters:\n")
print(res$theta.fix)
cat("\n###############################################################################\n")
cat("Design:\n")
print(rbind(x = res$x, w = res$w))
cat("\n###############################################################################\n")
cat("Efficiency by iteration:\n")
print(res$efficiency)
cat("\n###############################################################################\n")
cat("Time:\n")
print(res$time)
cat("\n###############################################################################\n")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostic_plots.R
\name{acf_plot}
\alias{acf_plot}
\title{Plot autocorrelations of a single Markov Chain}
\usage{
acf_plot(samples, lag_max = 30, latex = FALSE)
}
\arguments{
\item{samples}{Numeric vector, matrix with a single column or list
containing such a vector or matrix as the only element.}
\item{lag_max}{Positive integer representing the maximum number of lags
that are shown on the x - axis. \cr
Default: 30}
\item{latex}{Logical. If TRUE, mathematical symbols such as greek letters
in the plot title with subscripts and superscripts are properly
rendered.
This option requires the column of the input matrix to be
labeled accordingly (as in the output of the \code{mcmc_ridge()}
function). \cr
Default: FALSE}
}
\value{
Plot object of the class "ggplot".
}
\description{
The \code{acf_plot()} function creates a graphical display of
autocorrelations for various lags of a single Markov Chain.
The lags are mapped to the x - axis in increasing order with the
corresponding autocorrelation values mapped to the y - axis. \cr
This type of plot can be used to analyze the dependence of the
collected samples i.e. from a posterior distribution.
}
\examples{
fit <- lmls(
location = y ~ x1 + x2 + z1 + z2, scale = ~ z1 + z2,
data = toy_data, light = FALSE
) \%>\%
mcmc_ridge(num_sim = 1000)
# list of 4 matrices with 1000 rows each
samples <- fit$mcmc_ridge$sampling_matrices
# uses default lag_max = 30
acf_plot(samples$scale_prior, latex = TRUE)
# value of lag_max should be adapted depending on the correlation structure
acf_plot(samples$scale[, 1, drop = FALSE], lag_max = 100, latex = TRUE)
}
|
/man/acf_plot.Rd
|
permissive
|
joel-beck/asp21bridge
|
R
| false
| true
| 1,695
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diagnostic_plots.R
\name{acf_plot}
\alias{acf_plot}
\title{Plot autocorrelations of a single Markov Chain}
\usage{
acf_plot(samples, lag_max = 30, latex = FALSE)
}
\arguments{
\item{samples}{Numeric vector, matrix with a single column or list
containing such a vector or matrix as the only element.}
\item{lag_max}{Positive integer representing the maximum number of lags
that are shown on the x - axis. \cr
Default: 30}
\item{latex}{Logical. If TRUE, mathematical symbols such as greek letters
in the plot title with subscripts and superscripts are properly
rendered.
This option requires the column of the input matrix to be
labeled accordingly (as in the output of the \code{mcmc_ridge()}
function). \cr
Default: FALSE}
}
\value{
Plot object of the class "ggplot".
}
\description{
The \code{acf_plot()} function creates a graphical display of
autocorrelations for various lags of a single Markov Chain.
The lags are mapped to the x - axis in increasing order with the
corresponding autocorrelation values mapped to the y - axis. \cr
This type of plot can be used to analyze the dependence of the
collected samples i.e. from a posterior distribution.
}
\examples{
fit <- lmls(
location = y ~ x1 + x2 + z1 + z2, scale = ~ z1 + z2,
data = toy_data, light = FALSE
) \%>\%
mcmc_ridge(num_sim = 1000)
# list of 4 matrices with 1000 rows each
samples <- fit$mcmc_ridge$sampling_matrices
# uses default lag_max = 30
acf_plot(samples$scale_prior, latex = TRUE)
# value of lag_max should be adapted depending on the correlation structure
acf_plot(samples$scale[, 1, drop = FALSE], lag_max = 100, latex = TRUE)
}
|
/toulouse_appartement_creation.R
|
no_license
|
cregouby/show_me_your_model
|
R
| false
| false
| 583
|
r
| ||
% $Author: sinnwell $
% $Date: 2011/11/10 15:29:41 $
% $Header: /projects/genetics/cvs/cvsroot/haplo.stats/man/print.haplo.group.Rd,v 1.3 2011/11/10 15:29:41 sinnwell Exp $
% $Locker: $
%
%$Log: print.haplo.group.Rd,v $
%Revision 1.3 2011/11/10 15:29:41 sinnwell
%major update to hapglm, minor changes to Rd files, prepare for version 1.5.0 release
%
%Revision 1.2 2008/01/08 20:38:37 sinnwell
%add log
%
%Revision 1.7 2004/06/09 19:01:37 sinnwell
%haplo.group class, not haplo.score
%
%Revision 1.6 2004/04/06 21:30:30 sinnwell
%add nlines parm
%
%Revision 1.5 2004/03/02 15:05:30 sinnwell
%take out example, already shown in haplo.group.sgml
%
%Revision 1.4 2004/02/27 15:33:19 sinnwell
%add entry for '...' arg
%
%Revision 1.3 2003/03/19 16:17:02 sinnwell
%remove spaces after keywords for Rd conversion
%
%Revision 1.2 2002/09/16 14:59:07 sinnwell
%Fix RCs keywords
\name{print.haplo.group}
\alias{print.haplo.group}
\title{
Print a haplo.group object
}
\description{
Method function to print a class of type haplo.group
}
\usage{
\method{print}{haplo.group}(x, digits=max(options()$digits-2, 5), nlines=NULL, ...)
}
\arguments{
\item{x}{
The object returned from haplo.group (which has old class haplo.group).
}
\item{digits}{
Set the number of significant digits to print for haplotype probabilities.
}
\item{nlines}{
For shorter output, print first 1:nlines rows of the large data frame
}
\item{\dots}{
Optional arguments for the print method
}
}
\value{
Nothing is returned.
}
\details{
This is a print method function used to print information from the
haplo.group class, with haplotype-specific information given in a
table. Because haplo.group is a class, the generic print function
can be used, which in turn calls this print.haplo.group function.
}
\section{References}{
Schaid DJ, Rowland CM, Tines DE, Jacobson RM, Poland GA.
Expected haplotype frequencies for association of traits with
haplotypes when linkage phase is ambiguous.
Submitted to Amer J Hum Genet.
}
\seealso{
haplo.score, haplo.group, haplo.em
}
% docclass is function
% Converted by Sd2Rd version 37351.
|
/man/print.haplo.group.Rd
|
no_license
|
cran/haplo.stats
|
R
| false
| false
| 2,115
|
rd
|
% $Author: sinnwell $
% $Date: 2011/11/10 15:29:41 $
% $Header: /projects/genetics/cvs/cvsroot/haplo.stats/man/print.haplo.group.Rd,v 1.3 2011/11/10 15:29:41 sinnwell Exp $
% $Locker: $
%
%$Log: print.haplo.group.Rd,v $
%Revision 1.3 2011/11/10 15:29:41 sinnwell
%major update to hapglm, minor changes to Rd files, prepare for version 1.5.0 release
%
%Revision 1.2 2008/01/08 20:38:37 sinnwell
%add log
%
%Revision 1.7 2004/06/09 19:01:37 sinnwell
%haplo.group class, not haplo.score
%
%Revision 1.6 2004/04/06 21:30:30 sinnwell
%add nlines parm
%
%Revision 1.5 2004/03/02 15:05:30 sinnwell
%take out example, already shown in haplo.group.sgml
%
%Revision 1.4 2004/02/27 15:33:19 sinnwell
%add entry for '...' arg
%
%Revision 1.3 2003/03/19 16:17:02 sinnwell
%remove spaces after keywords for Rd conversion
%
%Revision 1.2 2002/09/16 14:59:07 sinnwell
%Fix RCs keywords
\name{print.haplo.group}
\alias{print.haplo.group}
\title{
Print a haplo.group object
}
\description{
Method function to print a class of type haplo.group
}
\usage{
\method{print}{haplo.group}(x, digits=max(options()$digits-2, 5), nlines=NULL, ...)
}
\arguments{
\item{x}{
The object returned from haplo.group (which has old class haplo.group).
}
\item{digits}{
Set the number of significant digits to print for haplotype probabilities.
}
\item{nlines}{
For shorter output, print first 1:nlines rows of the large data frame
}
\item{\dots}{
Optional arguments for the print method
}
}
\value{
Nothing is returned.
}
\details{
This is a print method function used to print information from the
haplo.group class, with haplotype-specific information given in a
table. Because haplo.group is a class, the generic print function
can be used, which in turn calls this print.haplo.group function.
}
\section{References}{
Schaid DJ, Rowland CM, Tines DE, Jacobson RM, Poland GA.
Expected haplotype frequencies for association of traits with
haplotypes when linkage phase is ambiguous.
Submitted to Amer J Hum Genet.
}
\seealso{
haplo.score, haplo.group, haplo.em
}
% docclass is function
% Converted by Sd2Rd version 37351.
|
setClass("Hero",
slots = c(HP = "numeric",
MP = "numeric",
HRR = "numeric",
MRR = "numeric",
AD = "numeric",
AP = "numeric",
ATKspeed = "numeric",
Armor = "numeric",
MR = "numeric",
SPEED = "numeric",
SkillCD = "numeric",
SkillCoolTime = "numeric"),
prototype =c(HP = 0, MP = 0,
HRR = 0, MRR = 0,
AD = 0, AP = 0,
ATKspeed = 0,
SPEED = 0,
Armor = 0, MR = 0,
SkillCD = 0,
SkillCoolTime = 0))
## 对变量检验进行设置,例如魔法值不能低于0
setValidity("Hero",
function(object){
if(object@MP < 0){
stop("MP 不能低于0")
}
}
)
# 1.定义接口:普通攻击
setGeneric("CommonAttack",
function(obj1, obj2, ...){ # 普通攻击
standardGeneric("CommonAttack")
}
)
setGeneric("UpDateCd",
function(obj1, ...){ # 普通
standardGeneric("UpDateCd")
}
)
# 2.定义默认函数(即面对父类Hero的计算方式)
# 2.1 当泛型函数针对父类和子类,当子类没有定义时,便会调用父类的计算方式。
setMethod("CommonAttack", "Hero",
function(obj1, obj2, cat = T){
hurt <- obj1@AD - obj2@Armor # A对B造成的伤害 = A的攻击力-B的护甲
if(cat){
cat(paste(obj1@name , "Use CommonAttack", obj2@name, "HP: -", hurt), "\n")
}
obj2@HP <- obj2@HP - hurt
return(obj2)
}
)
setMethod("UpDateCd", "Hero",
function(obj1, tStep) {
obj1@SkillCoolTime <- 0
return(obj1)
})
### 申明人物A,继承父类Hero
setClass("A", contains = "Hero", slots = list(name = "character"))
# 测试样本1 # 实例化A:基础属性
A <- new("A",
name = "a",
HP = 550,
MP = 340,
HRR = 1.8,
MRR = 12,
AD = 55 ,
AP = 25,
ATKspeed = 0.63,
Armor = 25,
MR = 30,
SPEED = 400,
SkillCD = 1/0.63,
SkillCoolTime = 0)
# 定义A的普通攻击计算方式:
setMethod("CommonAttack", "A",
function(obj1, obj2, cat=T){ # A特殊伤害公式
hurt <- obj1@AD*0.5 + obj1@AP*0.5 - obj2@Armor*0.8 #随便乱写
if(cat){ # 是否再调用时输出文字描述
cat(paste(obj1@name , "Use CommonAttack : ", obj2@name, "HP: -", hurt), "\n")
}
obj2@HP <- obj2@HP - hurt
return(obj2)
}
)
### 申明人物B,继承父类Hero
setClass("B", contains = "Hero", slots = list(name = "character")) # 测试样本2
B <- new("B",
name = "b",
HP = 550,
MP = 0,
HRR = 1.8,
MRR = 12,
AD = 58 ,
AP = 0,
ATKspeed = 0.70,
Armor = 25,
MR = 20,
SPEED = 400,
SkillCD = 1/0.70,
SkillCoolTime = 0)
# B采用默认的攻击方式,故不进行设定
t <- 0 # 时间初始计数
tStep <- 0.1 # 时间计数间隔
# 平
while(A@HP > 0 & B@HP > 0){
if(A@SkillCoolTime == 0){ # 当A普通攻击cd为0,A攻击B
B <- CommonAttack(A, B)
A@SkillCoolTime <- A@SkillCD
}
if(B@SkillCoolTime == 0){ # 当B普通攻击cd为0,B攻击A
A <- CommonAttack(B, A)
B@SkillCoolTime <- B@SkillCD
}
# CD更新
A <- UpDateCd(A, tStep)
B <- UpDateCd(B, tStep)
t <- t + tStep # 时间流逝
}
|
/tests/testS4.R
|
no_license
|
wangdi2014/gfplots
|
R
| false
| false
| 3,821
|
r
|
setClass("Hero",
slots = c(HP = "numeric",
MP = "numeric",
HRR = "numeric",
MRR = "numeric",
AD = "numeric",
AP = "numeric",
ATKspeed = "numeric",
Armor = "numeric",
MR = "numeric",
SPEED = "numeric",
SkillCD = "numeric",
SkillCoolTime = "numeric"),
prototype =c(HP = 0, MP = 0,
HRR = 0, MRR = 0,
AD = 0, AP = 0,
ATKspeed = 0,
SPEED = 0,
Armor = 0, MR = 0,
SkillCD = 0,
SkillCoolTime = 0))
## 对变量检验进行设置,例如魔法值不能低于0
setValidity("Hero",
function(object){
if(object@MP < 0){
stop("MP 不能低于0")
}
}
)
# 1.定义接口:普通攻击
setGeneric("CommonAttack",
function(obj1, obj2, ...){ # 普通攻击
standardGeneric("CommonAttack")
}
)
setGeneric("UpDateCd",
function(obj1, ...){ # 普通
standardGeneric("UpDateCd")
}
)
# 2.定义默认函数(即面对父类Hero的计算方式)
# 2.1 当泛型函数针对父类和子类,当子类没有定义时,便会调用父类的计算方式。
setMethod("CommonAttack", "Hero",
function(obj1, obj2, cat = T){
hurt <- obj1@AD - obj2@Armor # A对B造成的伤害 = A的攻击力-B的护甲
if(cat){
cat(paste(obj1@name , "Use CommonAttack", obj2@name, "HP: -", hurt), "\n")
}
obj2@HP <- obj2@HP - hurt
return(obj2)
}
)
setMethod("UpDateCd", "Hero",
function(obj1, tStep) {
obj1@SkillCoolTime <- 0
return(obj1)
})
### 申明人物A,继承父类Hero
setClass("A", contains = "Hero", slots = list(name = "character"))
# 测试样本1 # 实例化A:基础属性
A <- new("A",
name = "a",
HP = 550,
MP = 340,
HRR = 1.8,
MRR = 12,
AD = 55 ,
AP = 25,
ATKspeed = 0.63,
Armor = 25,
MR = 30,
SPEED = 400,
SkillCD = 1/0.63,
SkillCoolTime = 0)
# 定义A的普通攻击计算方式:
setMethod("CommonAttack", "A",
function(obj1, obj2, cat=T){ # A特殊伤害公式
hurt <- obj1@AD*0.5 + obj1@AP*0.5 - obj2@Armor*0.8 #随便乱写
if(cat){ # 是否再调用时输出文字描述
cat(paste(obj1@name , "Use CommonAttack : ", obj2@name, "HP: -", hurt), "\n")
}
obj2@HP <- obj2@HP - hurt
return(obj2)
}
)
### 申明人物B,继承父类Hero
setClass("B", contains = "Hero", slots = list(name = "character")) # 测试样本2
B <- new("B",
name = "b",
HP = 550,
MP = 0,
HRR = 1.8,
MRR = 12,
AD = 58 ,
AP = 0,
ATKspeed = 0.70,
Armor = 25,
MR = 20,
SPEED = 400,
SkillCD = 1/0.70,
SkillCoolTime = 0)
# B采用默认的攻击方式,故不进行设定
t <- 0 # 时间初始计数
tStep <- 0.1 # 时间计数间隔
# 平
while(A@HP > 0 & B@HP > 0){
if(A@SkillCoolTime == 0){ # 当A普通攻击cd为0,A攻击B
B <- CommonAttack(A, B)
A@SkillCoolTime <- A@SkillCD
}
if(B@SkillCoolTime == 0){ # 当B普通攻击cd为0,B攻击A
A <- CommonAttack(B, A)
B@SkillCoolTime <- B@SkillCD
}
# CD更新
A <- UpDateCd(A, tStep)
B <- UpDateCd(B, tStep)
t <- t + tStep # 时间流逝
}
|
tryTryTry <- function(x, n = 3L) {
response <- "failed"
attempt <- 1
while (response == "failed" && attempt <= n) {
print(sprintf("attempt: %s", attempt))
attempt <- attempt + 1
try({ response <- x }, silent = TRUE)
}
response
}
|
/functions/trytrytry.R
|
no_license
|
uva-bi-sdad/ers_dashboard
|
R
| false
| false
| 251
|
r
|
tryTryTry <- function(x, n = 3L) {
response <- "failed"
attempt <- 1
while (response == "failed" && attempt <= n) {
print(sprintf("attempt: %s", attempt))
attempt <- attempt + 1
try({ response <- x }, silent = TRUE)
}
response
}
|
library(ggplot2)
library(lme4)
library(hydroGOF)
library(dplyr)
library(lmerTest)
#library(tidyr)
#setwd("~/Documents/git/korean-scope/experiments/1-korean-baseline/Submiterator-master/")
setwd("~/git/korean_scope/experiments/1-korean-baseline/Submiterator-master")
#### first run of experiment
num_round_dirs = 5
df1 = do.call(rbind, lapply(1:num_round_dirs, function(i) {
return (read.csv(paste(
'round', i, '/korean-baseline.csv', sep=''),stringsAsFactors=FALSE) %>%
mutate(workerid = (workerid + (i-1)*9)))}))
df1$workerid = paste("vi.",df1$workerid)
d = subset(df1, select=c("workerid","order","item","scramble","scope", "type", "response", "school","lived","family","language","level","gender","age","describe","years","assess","classes","college","education"))
# re-factorize
d[] <- lapply( d, factor)
t <- d
# only look at "both8" for lived
t = t[t$lived=="both8",]
# must have provided a native langauge
t = t[t$language!="",]
# no self-described L2 speakers
t = t[t$describe!="L2",]
t$response = as.numeric(as.character(t$response))
#summary(t)
length(unique(t$workerid))# n=4
## eventually want to filter by fillers?
f = t[t$type=="test",]
table(f$order,f$scope,f$scramble)
agr = aggregate(response~order*scope*scramble,data=f,FUN=mean)
agr
|
/experiments/1-korean-baseline/results/analysis.R
|
no_license
|
gscontras/korean-scope
|
R
| false
| false
| 1,281
|
r
|
library(ggplot2)
library(lme4)
library(hydroGOF)
library(dplyr)
library(lmerTest)
#library(tidyr)
#setwd("~/Documents/git/korean-scope/experiments/1-korean-baseline/Submiterator-master/")
setwd("~/git/korean_scope/experiments/1-korean-baseline/Submiterator-master")
#### first run of experiment
num_round_dirs = 5
df1 = do.call(rbind, lapply(1:num_round_dirs, function(i) {
return (read.csv(paste(
'round', i, '/korean-baseline.csv', sep=''),stringsAsFactors=FALSE) %>%
mutate(workerid = (workerid + (i-1)*9)))}))
df1$workerid = paste("vi.",df1$workerid)
d = subset(df1, select=c("workerid","order","item","scramble","scope", "type", "response", "school","lived","family","language","level","gender","age","describe","years","assess","classes","college","education"))
# re-factorize
d[] <- lapply( d, factor)
t <- d
# only look at "both8" for lived
t = t[t$lived=="both8",]
# must have provided a native langauge
t = t[t$language!="",]
# no self-described L2 speakers
t = t[t$describe!="L2",]
t$response = as.numeric(as.character(t$response))
#summary(t)
length(unique(t$workerid))# n=4
## eventually want to filter by fillers?
f = t[t$type=="test",]
table(f$order,f$scope,f$scramble)
agr = aggregate(response~order*scope*scramble,data=f,FUN=mean)
agr
|
library(stat290.finalproject)
?history_weather
c="Alameda"
d1="2018-06-01"
d2="2018-12-31"
history_weather_tbl=history_weather(city_name = c,start_date=d1,end_date=d2)
head(history_weather_tbl)
?nearby_pws_city
pws_id_selected <- nearby_pws_city(city_name = "Alameda",check_date="2018-05-05")
head(pws_id_selected)
?nearby_pws_coordinates
l1=-122.27999878
l2=37.52000046
dis=4000
d="2018-05-05"
pws_id_selected=nearby_pws_coordinates(lon=l1,lat=l2,distance = dis,data="pws.rda",check_date = d)
head(pws_id_selected)
?weather_map
pws_id_selected <- nearby_pws_city(city_name = "Alameda",check_date="2018-05-05")
weather_map(df=pws_id_selected)
?history_temp_daily
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_temp_month
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_humidity_daily
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_humidity_month
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?weather_windrose
df=history_weather(city_name = "Alameda",start_date="2018-06-01",end_date="2018-12-31")
weather_windrose(data = df, spd = "wind_speed", dir = "wind_dir_degrees")
?weather_ui
library(shiny)
runApp('shiny_code.R')
|
/working code/test.R
|
no_license
|
tianjiangw/stats290-final-project
|
R
| false
| false
| 1,669
|
r
|
library(stat290.finalproject)
?history_weather
c="Alameda"
d1="2018-06-01"
d2="2018-12-31"
history_weather_tbl=history_weather(city_name = c,start_date=d1,end_date=d2)
head(history_weather_tbl)
?nearby_pws_city
pws_id_selected <- nearby_pws_city(city_name = "Alameda",check_date="2018-05-05")
head(pws_id_selected)
?nearby_pws_coordinates
l1=-122.27999878
l2=37.52000046
dis=4000
d="2018-05-05"
pws_id_selected=nearby_pws_coordinates(lon=l1,lat=l2,distance = dis,data="pws.rda",check_date = d)
head(pws_id_selected)
?weather_map
pws_id_selected <- nearby_pws_city(city_name = "Alameda",check_date="2018-05-05")
weather_map(df=pws_id_selected)
?history_temp_daily
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_temp_month
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_humidity_daily
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?history_humidity_month
d1="2018-06-01"
d2="2018-12-31"
city="Alameda"
history_weather_tbl=history_weather(city_name = city,start_date=d1,end_date=d2)
history_humidity_month(df=history_weather_tbl)
?weather_windrose
df=history_weather(city_name = "Alameda",start_date="2018-06-01",end_date="2018-12-31")
weather_windrose(data = df, spd = "wind_speed", dir = "wind_dir_degrees")
?weather_ui
library(shiny)
runApp('shiny_code.R')
|
library(OpenML)
### Name: listOMLRuns
### Title: List OpenML runs.
### Aliases: listOMLRuns
### ** Examples
# \dontrun{
# runs_ctree = listOMLRuns(flow.id = 2569)
# head(runs_ctree)
# }
|
/data/genthat_extracted_code/OpenML/examples/listOMLRuns.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 197
|
r
|
library(OpenML)
### Name: listOMLRuns
### Title: List OpenML runs.
### Aliases: listOMLRuns
### ** Examples
# \dontrun{
# runs_ctree = listOMLRuns(flow.id = 2569)
# head(runs_ctree)
# }
|
library(tidyverse)
library(patchwork)
library(paletteer)
wwc_outcomes <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-09/wwc_outcomes.csv")
# remove group stages and third place playoff
wwc_knockout <- wwc_outcomes %>%
filter(round != "Group", round != "Third Place Playoff")
count(wwc_knockout, team, sort = TRUE)
# Create `round` variable to indicate how far in competition each team got
wwc_round <- wwc_knockout %>%
group_by(year, team) %>%
add_count() %>%
mutate(count = if_else(round == "Final" & win_status == "Won", n + 1, as.numeric(n))) %>%
summarise(round = max(count))
# create colour palette from the 80s!
colpal <- c(
palettes_d$NineteenEightyR$sonny,
palettes_d$NineteenEightyR$miami1,
palettes_d$NineteenEightyR$miami2,
palettes_d$NineteenEightyR$sunset1,
palettes_d$NineteenEightyR$electronic_night)
# join colour palette to teams
team_cols <- bind_cols(team = unique(wwc_round$team),
team_col = colpal[1:23])
# add colours to dataset so they can be mapped to plot by identity
wwc_round_col <- wwc_round %>%
inner_join(team_cols, by = "team")
# define elements of theme
theme_wwc <- theme_minimal(base_family = "FuturaBT-Medium") +
theme(strip.placement = "outside",
panel.grid = element_blank(),
strip.background = element_rect(fill = "#110E43", colour = "white"),
strip.text = element_text(colour = "white", face = "bold", size = 14),
text = element_text(colour = "white"),
axis.text = element_text(colour = "white"))
# plot 1 - tournaments pre 2015 had QF, SF and final
p1 <- wwc_round_col %>%
filter(year < 2015) %>%
ggplot(aes(x = team, y = round*-1)) +
geom_col(aes(fill = team_col), width = 1, show.legend = FALSE) +
scale_x_discrete(position = "top") +
scale_y_continuous(breaks = seq(-0.5, -3.5, -1),
labels = c("QF", "SF", "F", "W")) +
scale_fill_identity() +
facet_wrap(~ year, ncol = 3, scales = "free") +
labs(x = NULL, y = NULL) +
theme_wwc +
theme(panel.spacing.x = unit(3, "lines"))
# plot 2 - tournaments in 2015 and 2019 also have a Round of 16
p2 <- wwc_round_col %>%
filter(year >= 2015) %>%
ggplot(aes(x = team, y = round*-1)) +
geom_col(aes(fill = team_col), width = 1, show.legend = FALSE) +
scale_x_discrete(position = "top") +
scale_y_continuous(breaks = seq(-0.5, -4.5, -1),
labels = c("R16", "QF", "SF", "F", "W")) +
scale_fill_identity() +
facet_wrap(~ year, ncol = 2, scales = "free") +
labs(x = NULL, y = NULL) +
theme_wwc
# arrange plots, add overall title and theme using patchwork package
p1 +
p2 +
plot_layout(nrow = 2, heights = c(2, 1)) +
plot_annotation(title = "FIFA Women's World Cup | History of Knockout Stages",
subtitle = "The progression of teams through the knockout stages of the Women's World Cup.\nIn 2015 the number of participants increased and a Round of 16 was introduced.",
caption = "Source: data.world | Graphic: @committedtotape",
theme = theme(plot.title = element_text(hjust = 0.5, colour = "white",
face = "bold", size = 20,
family = "FuturaBT-ExtraBlack"),
plot.subtitle = element_text(hjust = 0.5, colour = "white",
family = "FuturaBT-Medium"),
plot.background = element_rect(fill = "gray20", colour = "gray20"),
plot.caption = element_text(colour = "white", size = 11,
family = "FuturaBT-BoldCondensed")))
ggsave("womens_world_cup.png", width = 11, height = 8)
|
/2019/week28/wwc_tidytues.R
|
no_license
|
committedtotape/tidy-tuesday
|
R
| false
| false
| 3,842
|
r
|
library(tidyverse)
library(patchwork)
library(paletteer)
wwc_outcomes <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-07-09/wwc_outcomes.csv")
# remove group stages and third place playoff
wwc_knockout <- wwc_outcomes %>%
filter(round != "Group", round != "Third Place Playoff")
count(wwc_knockout, team, sort = TRUE)
# Create `round` variable to indicate how far in competition each team got
wwc_round <- wwc_knockout %>%
group_by(year, team) %>%
add_count() %>%
mutate(count = if_else(round == "Final" & win_status == "Won", n + 1, as.numeric(n))) %>%
summarise(round = max(count))
# create colour palette from the 80s!
colpal <- c(
palettes_d$NineteenEightyR$sonny,
palettes_d$NineteenEightyR$miami1,
palettes_d$NineteenEightyR$miami2,
palettes_d$NineteenEightyR$sunset1,
palettes_d$NineteenEightyR$electronic_night)
# join colour palette to teams
team_cols <- bind_cols(team = unique(wwc_round$team),
team_col = colpal[1:23])
# add colours to dataset so they can be mapped to plot by identity
wwc_round_col <- wwc_round %>%
inner_join(team_cols, by = "team")
# define elements of theme
theme_wwc <- theme_minimal(base_family = "FuturaBT-Medium") +
theme(strip.placement = "outside",
panel.grid = element_blank(),
strip.background = element_rect(fill = "#110E43", colour = "white"),
strip.text = element_text(colour = "white", face = "bold", size = 14),
text = element_text(colour = "white"),
axis.text = element_text(colour = "white"))
# plot 1 - tournaments pre 2015 had QF, SF and final
p1 <- wwc_round_col %>%
filter(year < 2015) %>%
ggplot(aes(x = team, y = round*-1)) +
geom_col(aes(fill = team_col), width = 1, show.legend = FALSE) +
scale_x_discrete(position = "top") +
scale_y_continuous(breaks = seq(-0.5, -3.5, -1),
labels = c("QF", "SF", "F", "W")) +
scale_fill_identity() +
facet_wrap(~ year, ncol = 3, scales = "free") +
labs(x = NULL, y = NULL) +
theme_wwc +
theme(panel.spacing.x = unit(3, "lines"))
# plot 2 - tournaments in 2015 and 2019 also have a Round of 16
p2 <- wwc_round_col %>%
filter(year >= 2015) %>%
ggplot(aes(x = team, y = round*-1)) +
geom_col(aes(fill = team_col), width = 1, show.legend = FALSE) +
scale_x_discrete(position = "top") +
scale_y_continuous(breaks = seq(-0.5, -4.5, -1),
labels = c("R16", "QF", "SF", "F", "W")) +
scale_fill_identity() +
facet_wrap(~ year, ncol = 2, scales = "free") +
labs(x = NULL, y = NULL) +
theme_wwc
# arrange plots, add overall title and theme using patchwork package
p1 +
p2 +
plot_layout(nrow = 2, heights = c(2, 1)) +
plot_annotation(title = "FIFA Women's World Cup | History of Knockout Stages",
subtitle = "The progression of teams through the knockout stages of the Women's World Cup.\nIn 2015 the number of participants increased and a Round of 16 was introduced.",
caption = "Source: data.world | Graphic: @committedtotape",
theme = theme(plot.title = element_text(hjust = 0.5, colour = "white",
face = "bold", size = 20,
family = "FuturaBT-ExtraBlack"),
plot.subtitle = element_text(hjust = 0.5, colour = "white",
family = "FuturaBT-Medium"),
plot.background = element_rect(fill = "gray20", colour = "gray20"),
plot.caption = element_text(colour = "white", size = 11,
family = "FuturaBT-BoldCondensed")))
ggsave("womens_world_cup.png", width = 11, height = 8)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proxygoodconfirm.R
\name{proxyGoodConfirm}
\alias{proxyGoodConfirm}
\title{confirm good proxy}
\usage{
proxyGoodConfirm(proxy_good, .proxy)
}
\arguments{
\item{proxy_good}{list with ip and port properties}
\item{.proxy}{list of list with ip and port properties, times}
}
\value{
list of list with `ip`, `port` and `times` properties
}
\description{
confirm good proxy
}
\details{
.proxy must be list class, if no elments in it, the return times will
be 1, otherwise, the time value will be added 1
}
\examples{
\dontrun{
.proxy <- list(list(ip = '1.1.1.1', port = 111, times = 1),
list(ip = '2.2.2.2', port = 222, times = 2))
proxy_good <- list(ip = '1.1.1.1', port = 111)
.proxy <- proxyGoodConfirm(proxy_good, .proxy)
.proxy
}
}
|
/man/proxyGoodConfirm.Rd
|
permissive
|
ashther/ashr.rogue
|
R
| false
| true
| 827
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/proxygoodconfirm.R
\name{proxyGoodConfirm}
\alias{proxyGoodConfirm}
\title{confirm good proxy}
\usage{
proxyGoodConfirm(proxy_good, .proxy)
}
\arguments{
\item{proxy_good}{list with ip and port properties}
\item{.proxy}{list of list with ip and port properties, times}
}
\value{
list of list with `ip`, `port` and `times` properties
}
\description{
confirm good proxy
}
\details{
.proxy must be list class, if no elments in it, the return times will
be 1, otherwise, the time value will be added 1
}
\examples{
\dontrun{
.proxy <- list(list(ip = '1.1.1.1', port = 111, times = 1),
list(ip = '2.2.2.2', port = 222, times = 2))
proxy_good <- list(ip = '1.1.1.1', port = 111)
.proxy <- proxyGoodConfirm(proxy_good, .proxy)
.proxy
}
}
|
library(tidyverse)
library(reshape2)
library(ggpubr)
data_path <- "Isoform/Permutation/" # path to the data
args = commandArgs(trailingOnly=TRUE)
#######################################################################
#this bit reads the output metrics files from the model fits
readFiles<-function(files, perm)
{
data <- data_frame(filename = files) %>% # create a data frame
# holding the file names
mutate(file_contents = map(filename, # read files into
~ read_delim(file.path(data_path, .), " ", skip=1, col_names = c("gene", "features", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value", "Precision", "Recall", "F1", "Prevalence", "Detection Rate", "Detection Prevalence", "Balanced Accuracy", "Accuracy", "Kappa", "AccuracyLower", "AccuracyUpper", "AccuracyNull", "AccuracyPValue", "McnemarPValue"))) # a new data column
)
data2<-unnest(data)
data2<-data2 %>% separate(gene, c("gene", "model"), sep=" ")
data2$perm<-perm
save(data2, file=paste("Isoform/Permutation/",perm,".RData", sep=""))
return(data2)
}
#currently just reads the real model results and results from just one permutation
#however ideally we would run at least ten permutations
files1 <- dir(data_path, pattern = "*[-|+]_metrics.txt") # get file names
datR<-readFiles(files1, "REAL")
files1 <- dir(data_path, pattern = "*1_metrics.txt") # get file names
dat1<-readFiles(files1, number)
files2 <- dir(data_path, pattern = "*2_metrics.txt") # get file names
dat2<-readFiles(files2, 2)
files3 <- dir(data_path, pattern = "*3_metrics.txt") # get file names
dat3<-readFiles(files3, 3)
files4 <- dir(data_path, pattern = "*4_metrics.txt") # get file names
dat4<-readFiles(files4, 4)
files5 <- dir(data_path, pattern = "*5_metrics.txt") # get file names
dat5<-readFiles(files5, 5)
files6 <- dir(data_path, pattern = "*6_metrics.txt") # get file names
dat6<-readFiles(files6, 6)
files7 <- dir(data_path, pattern = "*7_metrics.txt") # get file names
dat7<-readFiles(files7, 7)
files8 <- dir(data_path, pattern = "*8_metrics.txt") # get file names
dat8<-readFiles(files8, 8)
files9 <- dir(data_path, pattern = "*9_metrics.txt") # get file names
dat9<-readFiles(files9, 9)
files10 <- dir(data_path, pattern = "*10_metrics.txt") # get file names
dat10<-readFiles(files10, 10)
####################################################################################
#reload the results if needed
setwd("/Users/zhangxiaopu/Desktop/Isoform/02.modelTraining/02.metrics/")
load("metrics0.RData")
datR<-data2
load("metrics1.RData")
dat1<-data2
load("metrics2.RData")
dat2<-data2
load("metrics3.RData")
dat3<-data2
load("metrics4.RData")
dat4<-data2
load("metrics5.RData")
dat5<-data2
load("metrics6.RData")
dat6<-data2
load("metrics7.RData")
dat7<-data2
load("metrics8.RData")
dat8<-data2
load("metrics9.RData")
dat9<-data2
load("metrics10.RData")
dat10<-data2
data3<-rbind(datR,dat1,dat2,dat3,dat4,dat5,dat6,dat7,dat8,dat9,dat10)
breaks<-seq(0,1,0.1)
data3$pvalue_bin<-cut(data3$AccuracyPValue, breaks=breaks, include.lowest=TRUE, right=FALSE)
pvalue_counts<-data3 %>% group_by(pvalue_bin,model, perm) %>% tally()
pvalue_counts<-pvalue_counts %>% mutate_if(is.numeric, funs(replace_na(., 0)))
pvalue_counts<-pvalue_counts %>% spread(perm, n)
pvalue_counts$ratio1<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics1`+1)
pvalue_counts$ratio2<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics2`+1)
pvalue_counts$ratio3<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics3`+1)
pvalue_counts$ratio4<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics4`+1)
pvalue_counts$ratio5<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics5`+1)
pvalue_counts$ratio6<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics6`+1)
pvalue_counts$ratio7<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics7`+1)
pvalue_counts$ratio8<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics8`+1)
pvalue_counts$ratio9<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics9`+1)
pvalue_counts$ratio10<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics10`+1)
pvalue <- pvalue_counts %>% select(pvalue_bin, model, ratio1, ratio2, ratio3, ratio4,
ratio5, ratio6, ratio7, ratio8, ratio9, ratio10)
pvalue <- melt(pvalue, id.vars = c("pvalue_bin", "model"))
colnames(pvalue) <- c("pvalue bin", "model", "variable", "Num Real / Num Perm")
pvalue$model[pvalue$model=="1"] <- "logistic regression"
pvalue$model[pvalue$model=="2"] <- "elastic net"
pvalue$model[pvalue$model=="3"] <- "random forest"
pvalue$model[pvalue$model=="4"] <- "xgboost"
p2 <- ggboxplot(pvalue, x="pvalue bin", y="Num Real / Num Perm", color="model") +
scale_y_continuous(trans='log2')
win <- read.csv("~/Desktop/Isoform/01.winComparison/Gene.Window", header=T, sep = "\t")
win <- melt(win, id.vars = c("window.region", "GeneID"))
win$`log(win num)` <- log(win$value)
colnames(win) <- c("region","ID", "window size", "value", "log(number of windows)")
p1 <- ggboxplot(win, x="window size", y="log(number of windows)", color="window size")
pdf("/Users/zhangxiaopu/Desktop/Isoform/02.modelTraining/Fig2.pdf")
ggarrange(p1, p2, ncol=1, heights=c(1,1.5), labels=c("A","B"),
font.label = list(size = 14, color = "black", face = "bold", family = NULL))
dev.off()
#this bit gets corrected p value given permutation results
#when have more permutations would compare to results from all not just dat1
#count how many permutation p values were less than each real one for each model type
#correct P value is FDR
datALL <- rbind(dat1,dat2,dat3,dat4,dat5,dat6,dat7,dat8,dat9,dat10)
datR$perm_count<-NA
datR$corrected_p<-NA
for(i in as.numeric(unique(datR$model)))
{
datR$perm_count[which(datR$model == i)]<- sapply(datR$AccuracyPValue[which(datR$model == i)], function(x) sum(x >= datALL$AccuracyPValue[which(datALL$model == i)]))
datR$corrected_p[which(datR$model == i)]<-datR$perm_count[which(datR$model == i)]/length(datALL$AccuracyPValue[which(datALL$model == i)])
}
# write.table(datR,"~/correctP.permutation",row.names = F)
#count number of significant genes by model type
#note this doesnt mean were necessarily siginficant in original accuracy p value
library(tidyverse)
datR <- read.csv("~/Desktop/metrics/correctP.permutation", header=T, sep=" ", stringsAsFactors=F)
a <- datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(model)
write.table(a, "~/Desktop/metrics/FDR.05.fourModels",row.names=F, quote=F)
#get counts of genes significant across model types
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(gene) %>% tally() %>%
group_by(n) %>% tally()
# get counts of model types
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(model) %>% tally()
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(gene) %>% tally() %>%
filter(n > 3) %>% separate(gene, c("chr", "gene", "strand")) %>% print(n=300)
#datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
# select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% arrange(corrected_p, AccuracyPValue) %>%
# print(n=100)
|
/dataAnalysis/01.comparePerms.R
|
no_license
|
Aceculuses/IsoformUsage
|
R
| false
| false
| 7,401
|
r
|
library(tidyverse)
library(reshape2)
library(ggpubr)
data_path <- "Isoform/Permutation/" # path to the data
args = commandArgs(trailingOnly=TRUE)
#######################################################################
#this bit reads the output metrics files from the model fits
readFiles<-function(files, perm)
{
data <- data_frame(filename = files) %>% # create a data frame
# holding the file names
mutate(file_contents = map(filename, # read files into
~ read_delim(file.path(data_path, .), " ", skip=1, col_names = c("gene", "features", "Sensitivity", "Specificity", "Pos Pred Value", "Neg Pred Value", "Precision", "Recall", "F1", "Prevalence", "Detection Rate", "Detection Prevalence", "Balanced Accuracy", "Accuracy", "Kappa", "AccuracyLower", "AccuracyUpper", "AccuracyNull", "AccuracyPValue", "McnemarPValue"))) # a new data column
)
data2<-unnest(data)
data2<-data2 %>% separate(gene, c("gene", "model"), sep=" ")
data2$perm<-perm
save(data2, file=paste("Isoform/Permutation/",perm,".RData", sep=""))
return(data2)
}
#currently just reads the real model results and results from just one permutation
#however ideally we would run at least ten permutations
files1 <- dir(data_path, pattern = "*[-|+]_metrics.txt") # get file names
datR<-readFiles(files1, "REAL")
files1 <- dir(data_path, pattern = "*1_metrics.txt") # get file names
dat1<-readFiles(files1, number)
files2 <- dir(data_path, pattern = "*2_metrics.txt") # get file names
dat2<-readFiles(files2, 2)
files3 <- dir(data_path, pattern = "*3_metrics.txt") # get file names
dat3<-readFiles(files3, 3)
files4 <- dir(data_path, pattern = "*4_metrics.txt") # get file names
dat4<-readFiles(files4, 4)
files5 <- dir(data_path, pattern = "*5_metrics.txt") # get file names
dat5<-readFiles(files5, 5)
files6 <- dir(data_path, pattern = "*6_metrics.txt") # get file names
dat6<-readFiles(files6, 6)
files7 <- dir(data_path, pattern = "*7_metrics.txt") # get file names
dat7<-readFiles(files7, 7)
files8 <- dir(data_path, pattern = "*8_metrics.txt") # get file names
dat8<-readFiles(files8, 8)
files9 <- dir(data_path, pattern = "*9_metrics.txt") # get file names
dat9<-readFiles(files9, 9)
files10 <- dir(data_path, pattern = "*10_metrics.txt") # get file names
dat10<-readFiles(files10, 10)
####################################################################################
#reload the results if needed
setwd("/Users/zhangxiaopu/Desktop/Isoform/02.modelTraining/02.metrics/")
load("metrics0.RData")
datR<-data2
load("metrics1.RData")
dat1<-data2
load("metrics2.RData")
dat2<-data2
load("metrics3.RData")
dat3<-data2
load("metrics4.RData")
dat4<-data2
load("metrics5.RData")
dat5<-data2
load("metrics6.RData")
dat6<-data2
load("metrics7.RData")
dat7<-data2
load("metrics8.RData")
dat8<-data2
load("metrics9.RData")
dat9<-data2
load("metrics10.RData")
dat10<-data2
data3<-rbind(datR,dat1,dat2,dat3,dat4,dat5,dat6,dat7,dat8,dat9,dat10)
breaks<-seq(0,1,0.1)
data3$pvalue_bin<-cut(data3$AccuracyPValue, breaks=breaks, include.lowest=TRUE, right=FALSE)
pvalue_counts<-data3 %>% group_by(pvalue_bin,model, perm) %>% tally()
pvalue_counts<-pvalue_counts %>% mutate_if(is.numeric, funs(replace_na(., 0)))
pvalue_counts<-pvalue_counts %>% spread(perm, n)
pvalue_counts$ratio1<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics1`+1)
pvalue_counts$ratio2<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics2`+1)
pvalue_counts$ratio3<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics3`+1)
pvalue_counts$ratio4<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics4`+1)
pvalue_counts$ratio5<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics5`+1)
pvalue_counts$ratio6<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics6`+1)
pvalue_counts$ratio7<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics7`+1)
pvalue_counts$ratio8<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics8`+1)
pvalue_counts$ratio9<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics9`+1)
pvalue_counts$ratio10<-(pvalue_counts$metrics0+1)/(pvalue_counts$`metrics10`+1)
pvalue <- pvalue_counts %>% select(pvalue_bin, model, ratio1, ratio2, ratio3, ratio4,
ratio5, ratio6, ratio7, ratio8, ratio9, ratio10)
pvalue <- melt(pvalue, id.vars = c("pvalue_bin", "model"))
colnames(pvalue) <- c("pvalue bin", "model", "variable", "Num Real / Num Perm")
pvalue$model[pvalue$model=="1"] <- "logistic regression"
pvalue$model[pvalue$model=="2"] <- "elastic net"
pvalue$model[pvalue$model=="3"] <- "random forest"
pvalue$model[pvalue$model=="4"] <- "xgboost"
p2 <- ggboxplot(pvalue, x="pvalue bin", y="Num Real / Num Perm", color="model") +
scale_y_continuous(trans='log2')
win <- read.csv("~/Desktop/Isoform/01.winComparison/Gene.Window", header=T, sep = "\t")
win <- melt(win, id.vars = c("window.region", "GeneID"))
win$`log(win num)` <- log(win$value)
colnames(win) <- c("region","ID", "window size", "value", "log(number of windows)")
p1 <- ggboxplot(win, x="window size", y="log(number of windows)", color="window size")
pdf("/Users/zhangxiaopu/Desktop/Isoform/02.modelTraining/Fig2.pdf")
ggarrange(p1, p2, ncol=1, heights=c(1,1.5), labels=c("A","B"),
font.label = list(size = 14, color = "black", face = "bold", family = NULL))
dev.off()
#this bit gets corrected p value given permutation results
#when have more permutations would compare to results from all not just dat1
#count how many permutation p values were less than each real one for each model type
#correct P value is FDR
datALL <- rbind(dat1,dat2,dat3,dat4,dat5,dat6,dat7,dat8,dat9,dat10)
datR$perm_count<-NA
datR$corrected_p<-NA
for(i in as.numeric(unique(datR$model)))
{
datR$perm_count[which(datR$model == i)]<- sapply(datR$AccuracyPValue[which(datR$model == i)], function(x) sum(x >= datALL$AccuracyPValue[which(datALL$model == i)]))
datR$corrected_p[which(datR$model == i)]<-datR$perm_count[which(datR$model == i)]/length(datALL$AccuracyPValue[which(datALL$model == i)])
}
# write.table(datR,"~/correctP.permutation",row.names = F)
#count number of significant genes by model type
#note this doesnt mean were necessarily siginficant in original accuracy p value
library(tidyverse)
datR <- read.csv("~/Desktop/metrics/correctP.permutation", header=T, sep=" ", stringsAsFactors=F)
a <- datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(model)
write.table(a, "~/Desktop/metrics/FDR.05.fourModels",row.names=F, quote=F)
#get counts of genes significant across model types
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(gene) %>% tally() %>%
group_by(n) %>% tally()
# get counts of model types
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(model) %>% tally()
datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% group_by(gene) %>% tally() %>%
filter(n > 3) %>% separate(gene, c("chr", "gene", "strand")) %>% print(n=300)
#datR %>% filter(corrected_p < 0.05) %>% arrange(corrected_p) %>%
# select(gene, model, AccuracyPValue, perm_count, corrected_p) %>% arrange(corrected_p, AccuracyPValue) %>%
# print(n=100)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg-lockfile.R
\name{new_pkg_installation_plan}
\alias{new_pkg_installation_plan}
\alias{pkg_installation_plan}
\title{R6 class for installation from a lock file}
\usage{
new_pkg_installation_plan(lockfile = "pkg.lock", config = list(), ...)
}
\arguments{
\item{lockfile}{Path to the lock file to use.}
\item{config}{Configuration options, a named list. See
\link[=pkgdepends-config]{'Configuration'}. If it does not include \code{library}, then
\code{.libPaths()[1]} is added as \code{library}.}
\item{...}{Additional arguments, passed to
\href{#method-new}{\code{pkg_installation_plan$new()}}.}
}
\value{
\code{new_pkg_installation_plan()} returns a \code{pkg_installation_plan}
object.
}
\description{
An installation plan is similar to an installation proposal
(i.e. \link{pkg_installation_proposal}), but it already contains the solved
dependencies, complete with download URLs.
}
\details{
Typically you create a \code{pkg_installation_plan} object with
\code{new_pkg_installation_plan()} and then call its \verb{$download()} method
to download the packages and then its \verb{$install()} method to install
them.
}
\section{Super class}{
\code{\link[pkgdepends:pkg_installation_proposal]{pkgdepends::pkg_installation_proposal}} -> \code{pkg_installation_plan}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-pkg_installation_plan-new}{\code{pkg_installation_plan$new()}}
\item \href{#method-pkg_installation_plan-resolve}{\code{pkg_installation_plan$resolve()}}
\item \href{#method-pkg_installation_plan-async_resolve}{\code{pkg_installation_plan$async_resolve()}}
\item \href{#method-pkg_installation_plan-get_solve_policy}{\code{pkg_installation_plan$get_solve_policy()}}
\item \href{#method-pkg_installation_plan-set_solve_policy}{\code{pkg_installation_plan$set_solve_policy()}}
\item \href{#method-pkg_installation_plan-solve}{\code{pkg_installation_plan$solve()}}
\item \href{#method-pkg_installation_plan-update}{\code{pkg_installation_plan$update()}}
\item \href{#method-pkg_installation_plan-update_sysreqs}{\code{pkg_installation_plan$update_sysreqs()}}
\item \href{#method-pkg_installation_plan-format}{\code{pkg_installation_plan$format()}}
\item \href{#method-pkg_installation_plan-clone}{\code{pkg_installation_plan$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="async_download"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-async_download'><code>pkgdepends::pkg_installation_proposal$async_download()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="create_lockfile"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-create_lockfile'><code>pkgdepends::pkg_installation_proposal$create_lockfile()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="download"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-download'><code>pkgdepends::pkg_installation_proposal$download()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="draw"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-draw'><code>pkgdepends::pkg_installation_proposal$draw()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_config"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_config'><code>pkgdepends::pkg_installation_proposal$get_config()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_downloads"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_downloads'><code>pkgdepends::pkg_installation_proposal$get_downloads()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_install_plan"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_install_plan'><code>pkgdepends::pkg_installation_proposal$get_install_plan()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_refs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_refs'><code>pkgdepends::pkg_installation_proposal$get_refs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_resolution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_resolution'><code>pkgdepends::pkg_installation_proposal$get_resolution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_solution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_solution'><code>pkgdepends::pkg_installation_proposal$get_solution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_sysreqs'><code>pkgdepends::pkg_installation_proposal$get_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="install"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-install'><code>pkgdepends::pkg_installation_proposal$install()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="install_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-install_sysreqs'><code>pkgdepends::pkg_installation_proposal$install_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="print"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-print'><code>pkgdepends::pkg_installation_proposal$print()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="show_solution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-show_solution'><code>pkgdepends::pkg_installation_proposal$show_solution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="show_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-show_sysreqs'><code>pkgdepends::pkg_installation_proposal$show_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="stop_for_download_error"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-stop_for_download_error'><code>pkgdepends::pkg_installation_proposal$stop_for_download_error()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="stop_for_solution_error"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-stop_for_solution_error'><code>pkgdepends::pkg_installation_proposal$stop_for_solution_error()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-new"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{pkg_installation_plan} object. Consider using
\code{new_pkg_installation_plan()} instead of calling the constructor
directly.
The returned object can be used to download and install
packages, according to the plan.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$new(
lockfile = "pkg.lock",
config = list(),
remote_types = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{lockfile}}{Path to the lock file to use.}
\item{\code{config}}{Configuration options. See
\link[=pkgdepends-config]{'Configuration'}. It needs to include the package
library to install to, in \code{library}.}
\item{\code{remote_types}}{Custom remote ref types, this is for advanced
use, and experimental currently.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-resolve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-resolve}{}}}
\subsection{Method \code{resolve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$resolve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-async_resolve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-async_resolve}{}}}
\subsection{Method \code{async_resolve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$async_resolve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-get_solve_policy"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-get_solve_policy}{}}}
\subsection{Method \code{get_solve_policy()}}{
Installation plans are already solved, and this method will return
\code{NA_character_}, always.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$get_solve_policy()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-set_solve_policy"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-set_solve_policy}{}}}
\subsection{Method \code{set_solve_policy()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$set_solve_policy()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-solve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-solve}{}}}
\subsection{Method \code{solve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$solve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-update"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-update}{}}}
\subsection{Method \code{update()}}{
Update the plan to the current state of the library. If the library
has not changed since the plan was created, then it does nothing.
If new packages have been installed, then it might not be necessary
to download and install all packages in the plan.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$update()}\if{html}{\out{</div>}}
}
\subsection{Details}{
This operation is different than creating a new proposal with the
updated library, because it uses the the packages and package
versions of the original plan. E.g. if the library has a newer
version of a package, then \verb{$update()} will downgrade it to the
version in the plan.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-update_sysreqs"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-update_sysreqs}{}}}
\subsection{Method \code{update_sysreqs()}}{
Update information about installed and missing system requirements.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$update_sysreqs()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-format"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-format}{}}}
\subsection{Method \code{format()}}{
Format a \code{pkg_installation_plan} object, typically for printing.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$format(...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{not used currently.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A character vector, each element should be a line in the printout.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-clone"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/pkg_installation_plan.Rd
|
permissive
|
r-lib/pkgdepends
|
R
| false
| true
| 13,508
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pkg-lockfile.R
\name{new_pkg_installation_plan}
\alias{new_pkg_installation_plan}
\alias{pkg_installation_plan}
\title{R6 class for installation from a lock file}
\usage{
new_pkg_installation_plan(lockfile = "pkg.lock", config = list(), ...)
}
\arguments{
\item{lockfile}{Path to the lock file to use.}
\item{config}{Configuration options, a named list. See
\link[=pkgdepends-config]{'Configuration'}. If it does not include \code{library}, then
\code{.libPaths()[1]} is added as \code{library}.}
\item{...}{Additional arguments, passed to
\href{#method-new}{\code{pkg_installation_plan$new()}}.}
}
\value{
\code{new_pkg_installation_plan()} returns a \code{pkg_installation_plan}
object.
}
\description{
An installation plan is similar to an installation proposal
(i.e. \link{pkg_installation_proposal}), but it already contains the solved
dependencies, complete with download URLs.
}
\details{
Typically you create a \code{pkg_installation_plan} object with
\code{new_pkg_installation_plan()} and then call its \verb{$download()} method
to download the packages and then its \verb{$install()} method to install
them.
}
\section{Super class}{
\code{\link[pkgdepends:pkg_installation_proposal]{pkgdepends::pkg_installation_proposal}} -> \code{pkg_installation_plan}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-pkg_installation_plan-new}{\code{pkg_installation_plan$new()}}
\item \href{#method-pkg_installation_plan-resolve}{\code{pkg_installation_plan$resolve()}}
\item \href{#method-pkg_installation_plan-async_resolve}{\code{pkg_installation_plan$async_resolve()}}
\item \href{#method-pkg_installation_plan-get_solve_policy}{\code{pkg_installation_plan$get_solve_policy()}}
\item \href{#method-pkg_installation_plan-set_solve_policy}{\code{pkg_installation_plan$set_solve_policy()}}
\item \href{#method-pkg_installation_plan-solve}{\code{pkg_installation_plan$solve()}}
\item \href{#method-pkg_installation_plan-update}{\code{pkg_installation_plan$update()}}
\item \href{#method-pkg_installation_plan-update_sysreqs}{\code{pkg_installation_plan$update_sysreqs()}}
\item \href{#method-pkg_installation_plan-format}{\code{pkg_installation_plan$format()}}
\item \href{#method-pkg_installation_plan-clone}{\code{pkg_installation_plan$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="async_download"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-async_download'><code>pkgdepends::pkg_installation_proposal$async_download()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="create_lockfile"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-create_lockfile'><code>pkgdepends::pkg_installation_proposal$create_lockfile()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="download"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-download'><code>pkgdepends::pkg_installation_proposal$download()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="draw"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-draw'><code>pkgdepends::pkg_installation_proposal$draw()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_config"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_config'><code>pkgdepends::pkg_installation_proposal$get_config()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_downloads"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_downloads'><code>pkgdepends::pkg_installation_proposal$get_downloads()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_install_plan"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_install_plan'><code>pkgdepends::pkg_installation_proposal$get_install_plan()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_refs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_refs'><code>pkgdepends::pkg_installation_proposal$get_refs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_resolution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_resolution'><code>pkgdepends::pkg_installation_proposal$get_resolution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_solution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_solution'><code>pkgdepends::pkg_installation_proposal$get_solution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="get_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-get_sysreqs'><code>pkgdepends::pkg_installation_proposal$get_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="install"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-install'><code>pkgdepends::pkg_installation_proposal$install()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="install_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-install_sysreqs'><code>pkgdepends::pkg_installation_proposal$install_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="print"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-print'><code>pkgdepends::pkg_installation_proposal$print()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="show_solution"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-show_solution'><code>pkgdepends::pkg_installation_proposal$show_solution()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="show_sysreqs"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-show_sysreqs'><code>pkgdepends::pkg_installation_proposal$show_sysreqs()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="stop_for_download_error"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-stop_for_download_error'><code>pkgdepends::pkg_installation_proposal$stop_for_download_error()</code></a></span></li>
<li><span class="pkg-link" data-pkg="pkgdepends" data-topic="pkg_installation_proposal" data-id="stop_for_solution_error"><a href='../../pkgdepends/html/pkg_installation_proposal.html#method-pkg_installation_proposal-stop_for_solution_error'><code>pkgdepends::pkg_installation_proposal$stop_for_solution_error()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-new"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-new}{}}}
\subsection{Method \code{new()}}{
Create a new \code{pkg_installation_plan} object. Consider using
\code{new_pkg_installation_plan()} instead of calling the constructor
directly.
The returned object can be used to download and install
packages, according to the plan.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$new(
lockfile = "pkg.lock",
config = list(),
remote_types = NULL
)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{lockfile}}{Path to the lock file to use.}
\item{\code{config}}{Configuration options. See
\link[=pkgdepends-config]{'Configuration'}. It needs to include the package
library to install to, in \code{library}.}
\item{\code{remote_types}}{Custom remote ref types, this is for advanced
use, and experimental currently.}
}
\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-resolve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-resolve}{}}}
\subsection{Method \code{resolve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$resolve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-async_resolve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-async_resolve}{}}}
\subsection{Method \code{async_resolve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$async_resolve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-get_solve_policy"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-get_solve_policy}{}}}
\subsection{Method \code{get_solve_policy()}}{
Installation plans are already solved, and this method will return
\code{NA_character_}, always.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$get_solve_policy()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-set_solve_policy"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-set_solve_policy}{}}}
\subsection{Method \code{set_solve_policy()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$set_solve_policy()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-solve"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-solve}{}}}
\subsection{Method \code{solve()}}{
This function is implemented for installation plans, and will error.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$solve()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-update"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-update}{}}}
\subsection{Method \code{update()}}{
Update the plan to the current state of the library. If the library
has not changed since the plan was created, then it does nothing.
If new packages have been installed, then it might not be necessary
to download and install all packages in the plan.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$update()}\if{html}{\out{</div>}}
}
\subsection{Details}{
This operation is different than creating a new proposal with the
updated library, because it uses the the packages and package
versions of the original plan. E.g. if the library has a newer
version of a package, then \verb{$update()} will downgrade it to the
version in the plan.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-update_sysreqs"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-update_sysreqs}{}}}
\subsection{Method \code{update_sysreqs()}}{
Update information about installed and missing system requirements.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$update_sysreqs()}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-format"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-format}{}}}
\subsection{Method \code{format()}}{
Format a \code{pkg_installation_plan} object, typically for printing.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$format(...)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{...}}{not used currently.}
}
\if{html}{\out{</div>}}
}
\subsection{Returns}{
A character vector, each element should be a line in the printout.
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-pkg_installation_plan-clone"></a>}}
\if{latex}{\out{\hypertarget{method-pkg_installation_plan-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{pkg_installation_plan$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
context("Cassette")
test_that("Cassette", {
expect_is(Cassette, "R6ClassGenerator")
cl <- Cassette$new(name = "stuff")
expect_is(cl, "R6")
expect_is(cl, "Cassette")
# eject cassette
cl$eject()
})
test_that("Cassette fails well", {
expect_error(Cassette$new(), "\"name\" is missing")
})
test_that("Cassette fails well with invalid record mode", {
expect_error(
Cassette$new(name = "stuff2", record = "asdfadfs"),
"'record' value of 'asdfadfs' is not in the allowed set"
)
})
test_that("Cassette fails well with invalid request matchers", {
expect_error(
Cassette$new(name = "stuff2", match_requests_on = "x"),
"1 or more 'match_requests_on' values \\(x\\) is not in the allowed set"
)
})
test_that("Cassette fails well with unsupported matcher", {
expect_error(Cassette$new("foobar89", match_requests_on = "host"),
"we do not yet support host and path matchers")
})
test_that("make_http_interaction works as expected", {
#### Prepare http responses
# crul_resp1 <- crul::HttpClient$new("https://httpbin.org/get?foo=bar")$get()
# save(crul_resp1, file = "tests/testthat/crul_resp1.rda", version = 2)
# crul_resp2 <- crul::HttpClient$new("https://httpbin.org/image/png")$get()
# save(crul_resp2, file = "tests/testthat/crul_resp2.rda", version = 2)
# httr_resp1 <- httr::GET("https://httpbin.org/get?foo=bar")
# save(httr_resp1, file = "tests/testthat/httr_resp1.rda", version = 2)
# httr_resp2 <- httr::GET("https://httpbin.org/image/png")
# save(httr_resp2, file = "tests/testthat/httr_resp2.rda", version = 2)
# make a cassettes
zz <- Cassette$new(name = "bluecheese")
# crul, with non-image response body
# $response$body should be class `character`
load("crul_resp1.rda")
aa <- zz$make_http_interaction(crul_resp1)
expect_is(aa, "HTTPInteraction")
expect_is(aa$request, "Request")
expect_is(aa$response, "VcrResponse")
expect_is(aa$response$body, "character")
# crul, with image response body
# $response$body should be class `raw`
load("crul_resp2.rda")
bb <- zz$make_http_interaction(crul_resp2)
expect_is(bb, "HTTPInteraction")
expect_is(bb$request, "Request")
expect_is(bb$response, "VcrResponse")
expect_is(bb$response$body, "raw")
# eject cassette
zz$eject()
})
# cleanup
unlink(file.path(vcr_configuration()$dir, "stuff.yml"))
unlink(file.path(vcr_configuration()$dir, "stuff2.yml"))
unlink(file.path(vcr_configuration()$dir, "foobar89.yml"))
unlink(file.path(vcr_configuration()$dir, "bluecheese.yml"))
|
/tests/testthat/test-Cassette.R
|
permissive
|
alex-gable/vcr
|
R
| false
| false
| 2,536
|
r
|
context("Cassette")
test_that("Cassette", {
expect_is(Cassette, "R6ClassGenerator")
cl <- Cassette$new(name = "stuff")
expect_is(cl, "R6")
expect_is(cl, "Cassette")
# eject cassette
cl$eject()
})
test_that("Cassette fails well", {
expect_error(Cassette$new(), "\"name\" is missing")
})
test_that("Cassette fails well with invalid record mode", {
expect_error(
Cassette$new(name = "stuff2", record = "asdfadfs"),
"'record' value of 'asdfadfs' is not in the allowed set"
)
})
test_that("Cassette fails well with invalid request matchers", {
expect_error(
Cassette$new(name = "stuff2", match_requests_on = "x"),
"1 or more 'match_requests_on' values \\(x\\) is not in the allowed set"
)
})
test_that("Cassette fails well with unsupported matcher", {
expect_error(Cassette$new("foobar89", match_requests_on = "host"),
"we do not yet support host and path matchers")
})
test_that("make_http_interaction works as expected", {
#### Prepare http responses
# crul_resp1 <- crul::HttpClient$new("https://httpbin.org/get?foo=bar")$get()
# save(crul_resp1, file = "tests/testthat/crul_resp1.rda", version = 2)
# crul_resp2 <- crul::HttpClient$new("https://httpbin.org/image/png")$get()
# save(crul_resp2, file = "tests/testthat/crul_resp2.rda", version = 2)
# httr_resp1 <- httr::GET("https://httpbin.org/get?foo=bar")
# save(httr_resp1, file = "tests/testthat/httr_resp1.rda", version = 2)
# httr_resp2 <- httr::GET("https://httpbin.org/image/png")
# save(httr_resp2, file = "tests/testthat/httr_resp2.rda", version = 2)
# make a cassettes
zz <- Cassette$new(name = "bluecheese")
# crul, with non-image response body
# $response$body should be class `character`
load("crul_resp1.rda")
aa <- zz$make_http_interaction(crul_resp1)
expect_is(aa, "HTTPInteraction")
expect_is(aa$request, "Request")
expect_is(aa$response, "VcrResponse")
expect_is(aa$response$body, "character")
# crul, with image response body
# $response$body should be class `raw`
load("crul_resp2.rda")
bb <- zz$make_http_interaction(crul_resp2)
expect_is(bb, "HTTPInteraction")
expect_is(bb$request, "Request")
expect_is(bb$response, "VcrResponse")
expect_is(bb$response$body, "raw")
# eject cassette
zz$eject()
})
# cleanup
unlink(file.path(vcr_configuration()$dir, "stuff.yml"))
unlink(file.path(vcr_configuration()$dir, "stuff2.yml"))
unlink(file.path(vcr_configuration()$dir, "foobar89.yml"))
unlink(file.path(vcr_configuration()$dir, "bluecheese.yml"))
|
# Script to run CNOGpro
args <- commandArgs(trailingOnly = TRUE)
iso <- args[1]
if (length(args) > 1) {
window <- as.numeric(args[2])
} else {
window <- 1000
}
# prob <- as.numeric(args[3])
prob <- 1e-4
# Run CNOGpro
cnv<- CNOGpro(hitsfile=paste0(iso,".hits"), gbkfile="reference/ref.gbk", windowlength=window)
cnv_norm <- normalizeGC(cnv)
cnv_hmm <- runHMM(cnv_norm, changeprob = prob)
saveRDS(cnv_hmm, file = paste0(iso, ".cnv.Rda"))
#cnv_bootstrap <- runBootstrap(cnv_norm)
#saveRDS(cnv_bootstrap, file = paste0(iso, .cnv.Rda")
# Extract HMM table and save
#cnv_hmm <- cnv_bootstrap
df_cnv <- cnv_hmm$HMMtable
write.table(df_cnv, file = paste0(iso, ".cnv.hmm.tab"))
# Filter regions with > 1 copy and save in bed format. This will raise an error and terminte the script if there are no cnv
df_cnv_clean <- subset(df_cnv, State > 1)
df_cnv_clean["chrom"] <- cnv_hmm$accession
df_cnv_clean <- df_cnv_clean[c("chrom","Startpos","Endpos","State")]
write.table(df_cnv_clean, file = paste0(iso, ".cnv.bed"), sep = "\t", quote = F, row.names = F, col.names = F)
# Extract gene table and save
df_cnv_genes <- cnv_hmm$genes
write.table(df_cnv_genes, file = paste0(iso, ".cnv.genes.tab"), sep = "\t", quote = F, row.names = F, col.names = T)
# Filter regions with > 1 copy and save in bed format. This will raise an error and terminate the script if there are no cnv
df_cnv_genes_clean <- subset(df_cnv_genes, CN_HMM > 1)
df_cnv_genes_clean["chrom"] <- cnv_hmm$accession
df_cnv_genes_clean <- df_cnv_genes_clean[c("chrom","Left","Right","Locus","Strand","Type","Length","CN_HMM")]
write.table(df_cnv_genes_clean, file = paste0(iso, ".cnv.genes.bed"), sep = "\t", quote = F, row.names = F, col.names = F)
|
/variants_calling/run-CNOGpro.R
|
no_license
|
stefanogg/staph_adaptation_paper
|
R
| false
| false
| 1,711
|
r
|
# Script to run CNOGpro
args <- commandArgs(trailingOnly = TRUE)
iso <- args[1]
if (length(args) > 1) {
window <- as.numeric(args[2])
} else {
window <- 1000
}
# prob <- as.numeric(args[3])
prob <- 1e-4
# Run CNOGpro
cnv<- CNOGpro(hitsfile=paste0(iso,".hits"), gbkfile="reference/ref.gbk", windowlength=window)
cnv_norm <- normalizeGC(cnv)
cnv_hmm <- runHMM(cnv_norm, changeprob = prob)
saveRDS(cnv_hmm, file = paste0(iso, ".cnv.Rda"))
#cnv_bootstrap <- runBootstrap(cnv_norm)
#saveRDS(cnv_bootstrap, file = paste0(iso, .cnv.Rda")
# Extract HMM table and save
#cnv_hmm <- cnv_bootstrap
df_cnv <- cnv_hmm$HMMtable
write.table(df_cnv, file = paste0(iso, ".cnv.hmm.tab"))
# Filter regions with > 1 copy and save in bed format. This will raise an error and terminte the script if there are no cnv
df_cnv_clean <- subset(df_cnv, State > 1)
df_cnv_clean["chrom"] <- cnv_hmm$accession
df_cnv_clean <- df_cnv_clean[c("chrom","Startpos","Endpos","State")]
write.table(df_cnv_clean, file = paste0(iso, ".cnv.bed"), sep = "\t", quote = F, row.names = F, col.names = F)
# Extract gene table and save
df_cnv_genes <- cnv_hmm$genes
write.table(df_cnv_genes, file = paste0(iso, ".cnv.genes.tab"), sep = "\t", quote = F, row.names = F, col.names = T)
# Filter regions with > 1 copy and save in bed format. This will raise an error and terminate the script if there are no cnv
df_cnv_genes_clean <- subset(df_cnv_genes, CN_HMM > 1)
df_cnv_genes_clean["chrom"] <- cnv_hmm$accession
df_cnv_genes_clean <- df_cnv_genes_clean[c("chrom","Left","Right","Locus","Strand","Type","Length","CN_HMM")]
write.table(df_cnv_genes_clean, file = paste0(iso, ".cnv.genes.bed"), sep = "\t", quote = F, row.names = F, col.names = F)
|
<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="Hosting.Azure.PushmarketThree" generation="1" functional="0" release="0" Id="12f10db5-6e6f-406c-98e2-a6a928152e7a" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="Hosting.Azure.PushmarketThreeGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="ArtOfGroundFighting.Web:AogfEndpoint" protocol="http">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:AogfEndpoint" />
</inToChannel>
</inPort>
<inPort name="ArtOfGroundFighting.Web:AogfEndpointAdministrative" protocol="http">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:AogfEndpointAdministrative" />
</inToChannel>
</inPort>
<inPort name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.WebInstances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.WebInstances" />
</maps>
</aCS>
<aCS name="Certificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapCertificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:ArtOfGroundFighting.Web:AogfEndpoint">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/AogfEndpoint" />
</toPorts>
</lBChannel>
<lBChannel name="LB:ArtOfGroundFighting.Web:AogfEndpointAdministrative">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/AogfEndpointAdministrative" />
</toPorts>
</lBChannel>
<lBChannel name="LB:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</toPorts>
</lBChannel>
<sFSwitchChannel name="SW:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</toPorts>
</sFSwitchChannel>
</channels>
<maps>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</setting>
</map>
<map name="MapArtOfGroundFighting.WebInstances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebInstances" />
</setting>
</map>
<map name="MapCertificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" kind="Identity">
<certificate>
<certificateMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</map>
</maps>
<components>
<groupHascomponents>
<role name="ArtOfGroundFighting.Web" generation="1" functional="0" release="0" software="C:\DEV\Pushmarket\Code\Hosting.Azure.PushmarketThree\csx\Debug\roles\ArtOfGroundFighting.Web" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="1792" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="AogfEndpoint" protocol="http" portRanges="80" />
<inPort name="AogfEndpointAdministrative" protocol="http" portRanges="8080" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp" portRanges="3389" />
<outPort name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp">
<outToChannel>
<sFSwitchChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/SW:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</outToChannel>
</outPort>
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="ArtOfGroundFighting.Web" xmlns="urn:azure:m:v1"><r name="ArtOfGroundFighting.Web"><e name="AogfEndpoint" /><e name="AogfEndpointAdministrative" /><e name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" /><e name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
<storedcertificates>
<storedCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateStore="My" certificateLocation="System">
<certificate>
<certificateMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</storedCertificate>
</storedcertificates>
<certificates>
<certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificates>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebInstances" />
<sCSPolicyUpdateDomainMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebUpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebFaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="ArtOfGroundFighting.WebUpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="ArtOfGroundFighting.WebFaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="ArtOfGroundFighting.WebInstances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="22dcb072-47ae-4f98-a661-f7ea7d27d41a" ref="Microsoft.RedDog.Contract\ServiceContract\Hosting.Azure.PushmarketThreeContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="e0416662-5153-4dce-861b-d533d57f0c7a" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:AogfEndpoint@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:AogfEndpoint" />
</inPort>
</interfaceReference>
<interfaceReference Id="e41bdc2f-9df1-428b-9b7d-929acda2927a" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:AogfEndpointAdministrative@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:AogfEndpointAdministrative" />
</inPort>
</interfaceReference>
<interfaceReference Id="5a3b5e02-5d4a-4ae0-8783-4676b5499c87" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel>
|
/Hosting.Azure.PushmarketThree/csx/Debug/ServiceDefinition.rd
|
no_license
|
stackthatcode/Pleiades
|
R
| false
| false
| 14,344
|
rd
|
<?xml version="1.0" encoding="utf-8"?>
<serviceModel xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" name="Hosting.Azure.PushmarketThree" generation="1" functional="0" release="0" Id="12f10db5-6e6f-406c-98e2-a6a928152e7a" dslVersion="1.2.0.0" xmlns="http://schemas.microsoft.com/dsltools/RDSM">
<groups>
<group name="Hosting.Azure.PushmarketThreeGroup" generation="1" functional="0" release="0">
<componentports>
<inPort name="ArtOfGroundFighting.Web:AogfEndpoint" protocol="http">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:AogfEndpoint" />
</inToChannel>
</inPort>
<inPort name="ArtOfGroundFighting.Web:AogfEndpointAdministrative" protocol="http">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:AogfEndpointAdministrative" />
</inToChannel>
</inPort>
<inPort name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp">
<inToChannel>
<lBChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/LB:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inToChannel>
</inPort>
</componentports>
<settings>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</maps>
</aCS>
<aCS name="ArtOfGroundFighting.WebInstances" defaultValue="[1,1,1]">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapArtOfGroundFighting.WebInstances" />
</maps>
</aCS>
<aCS name="Certificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" defaultValue="">
<maps>
<mapMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/MapCertificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</maps>
</aCS>
</settings>
<channels>
<lBChannel name="LB:ArtOfGroundFighting.Web:AogfEndpoint">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/AogfEndpoint" />
</toPorts>
</lBChannel>
<lBChannel name="LB:ArtOfGroundFighting.Web:AogfEndpointAdministrative">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/AogfEndpointAdministrative" />
</toPorts>
</lBChannel>
<lBChannel name="LB:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</toPorts>
</lBChannel>
<sFSwitchChannel name="SW:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp">
<toPorts>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</toPorts>
</sFSwitchChannel>
</channels>
<maps>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" />
</setting>
</map>
<map name="MapArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" kind="Identity">
<setting>
<aCSMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" />
</setting>
</map>
<map name="MapArtOfGroundFighting.WebInstances" kind="Identity">
<setting>
<sCSPolicyIDMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebInstances" />
</setting>
</map>
<map name="MapCertificate|ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" kind="Identity">
<certificate>
<certificateMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</map>
</maps>
<components>
<groupHascomponents>
<role name="ArtOfGroundFighting.Web" generation="1" functional="0" release="0" software="C:\DEV\Pushmarket\Code\Hosting.Azure.PushmarketThree\csx\Debug\roles\ArtOfGroundFighting.Web" entryPoint="base\x64\WaHostBootstrapper.exe" parameters="base\x64\WaIISHost.exe " memIndex="1792" hostingEnvironment="frontendadmin" hostingEnvironmentVersion="2">
<componentports>
<inPort name="AogfEndpoint" protocol="http" portRanges="80" />
<inPort name="AogfEndpointAdministrative" protocol="http" portRanges="8080" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" protocol="tcp" />
<inPort name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp" portRanges="3389" />
<outPort name="ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" protocol="tcp">
<outToChannel>
<sFSwitchChannelMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/SW:ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" />
</outToChannel>
</outPort>
</componentports>
<settings>
<aCS name="Microsoft.WindowsAzure.Plugins.Diagnostics.ConnectionString" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountEncryptedPassword" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountExpiration" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.AccountUsername" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Enabled" defaultValue="" />
<aCS name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.Enabled" defaultValue="" />
<aCS name="__ModelData" defaultValue="<m role="ArtOfGroundFighting.Web" xmlns="urn:azure:m:v1"><r name="ArtOfGroundFighting.Web"><e name="AogfEndpoint" /><e name="AogfEndpointAdministrative" /><e name="Microsoft.WindowsAzure.Plugins.RemoteAccess.Rdp" /><e name="Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" /></r></m>" />
</settings>
<resourcereferences>
<resourceReference name="DiagnosticStore" defaultAmount="[4096,4096,4096]" defaultSticky="true" kind="Directory" />
<resourceReference name="EventStore" defaultAmount="[1000,1000,1000]" defaultSticky="false" kind="LogStore" />
</resourcereferences>
<storedcertificates>
<storedCertificate name="Stored0Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" certificateStore="My" certificateLocation="System">
<certificate>
<certificateMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web/Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificate>
</storedCertificate>
</storedcertificates>
<certificates>
<certificate name="Microsoft.WindowsAzure.Plugins.RemoteAccess.PasswordEncryption" />
</certificates>
</role>
<sCSPolicy>
<sCSPolicyIDMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebInstances" />
<sCSPolicyUpdateDomainMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebUpgradeDomains" />
<sCSPolicyFaultDomainMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.WebFaultDomains" />
</sCSPolicy>
</groupHascomponents>
</components>
<sCSPolicy>
<sCSPolicyUpdateDomain name="ArtOfGroundFighting.WebUpgradeDomains" defaultPolicy="[5,5,5]" />
<sCSPolicyFaultDomain name="ArtOfGroundFighting.WebFaultDomains" defaultPolicy="[2,2,2]" />
<sCSPolicyID name="ArtOfGroundFighting.WebInstances" defaultPolicy="[1,1,1]" />
</sCSPolicy>
</group>
</groups>
<implements>
<implementation Id="22dcb072-47ae-4f98-a661-f7ea7d27d41a" ref="Microsoft.RedDog.Contract\ServiceContract\Hosting.Azure.PushmarketThreeContract@ServiceDefinition">
<interfacereferences>
<interfaceReference Id="e0416662-5153-4dce-861b-d533d57f0c7a" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:AogfEndpoint@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:AogfEndpoint" />
</inPort>
</interfaceReference>
<interfaceReference Id="e41bdc2f-9df1-428b-9b7d-929acda2927a" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:AogfEndpointAdministrative@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:AogfEndpointAdministrative" />
</inPort>
</interfaceReference>
<interfaceReference Id="5a3b5e02-5d4a-4ae0-8783-4676b5499c87" ref="Microsoft.RedDog.Contract\Interface\ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput@ServiceDefinition">
<inPort>
<inPortMoniker name="/Hosting.Azure.PushmarketThree/Hosting.Azure.PushmarketThreeGroup/ArtOfGroundFighting.Web:Microsoft.WindowsAzure.Plugins.RemoteForwarder.RdpInput" />
</inPort>
</interfaceReference>
</interfacereferences>
</implementation>
</implements>
</serviceModel>
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_cart.R
\name{step_discretize_cart}
\alias{step_discretize_cart}
\alias{tidy.step_discretize_cart}
\title{Discretize numeric variables with CART}
\usage{
step_discretize_cart(
recipe,
...,
role = NA,
trained = FALSE,
outcome = NULL,
cost_complexity = 0.01,
tree_depth = 10,
min_n = 20,
rules = NULL,
skip = FALSE,
id = rand_id("discretize_cart")
)
\method{tidy}{step_discretize_cart}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Defaults to \code{"predictor"}.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{outcome}{A call to \code{vars} to specify which variable is used as the
outcome to train CART models in order to discretize explanatory
variables.}
\item{cost_complexity}{The regularization parameter. Any split that does not
decrease the overall lack of fit by a factor of \code{cost_complexity} is not
attempted. Corresponds to \code{cp} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 0.01.}
\item{tree_depth}{The \emph{maximum} depth in the final tree. Corresponds to
\code{maxdepth} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 10.}
\item{min_n}{The number of data points in a node required to continue
splitting. Corresponds to \code{minsplit} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 20.}
\item{rules}{The splitting rules of the best CART tree to retain for
each variable. If length zero, splitting could not be used on that column.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}? While all operations are baked
when \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_discretize_cart} object.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of existing steps (if any).
}
\description{
\code{step_discretize_cart} creates a \emph{specification} of a recipe step that will
discretize numeric data (e.g. integers or doubles) into bins in a
supervised way using a CART model.
}
\details{
\code{step_discretize_cart()} creates non-uniform bins from numerical
variables by utilizing the information about the outcome variable and
applying a CART model.
The best selection of buckets for each variable is selected using
the standard cost-complexity pruning of CART, which makes this
discretization method resistant to overfitting.
This step requires the \pkg{rpart} package. If not installed, the
step will stop with a note about installing the package.
Note that the original data will be replaced with the new bins.
}
\examples{
library(modeldata)
data(credit_data)
library(rsample)
split <- initial_split(credit_data, strata = "Status")
credit_data_tr <- training(split)
credit_data_te <- testing(split)
xgb_rec <-
recipe(Status ~ ., data = credit_data_tr) \%>\%
step_discretize_cart(all_numeric(), outcome = "Status", id = "cart splits")
xgb_rec <- prep(xgb_rec, training = credit_data_tr)
# The splits:
tidy(xgb_rec, id = "cart splits")
xgb_test_bins <- bake(xgb_rec, credit_data_te)
}
\seealso{
\code{\link[recipes:recipe]{recipes::recipe()}} \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}
}
\concept{discretization}
\concept{factors}
\concept{preprocessing}
\keyword{binning}
|
/man/step_discretize_cart.Rd
|
no_license
|
konradsemsch/embed-1
|
R
| false
| true
| 3,996
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/discretize_cart.R
\name{step_discretize_cart}
\alias{step_discretize_cart}
\alias{tidy.step_discretize_cart}
\title{Discretize numeric variables with CART}
\usage{
step_discretize_cart(
recipe,
...,
role = NA,
trained = FALSE,
outcome = NULL,
cost_complexity = 0.01,
tree_depth = 10,
min_n = 20,
rules = NULL,
skip = FALSE,
id = rand_id("discretize_cart")
)
\method{tidy}{step_discretize_cart}(x, ...)
}
\arguments{
\item{recipe}{A recipe object. The step will be added to the sequence of
operations for this recipe.}
\item{...}{One or more selector functions to choose which variables are
affected by the step. See \code{\link[=selections]{selections()}} for more details.}
\item{role}{Defaults to \code{"predictor"}.}
\item{trained}{A logical to indicate if the quantities for preprocessing
have been estimated.}
\item{outcome}{A call to \code{vars} to specify which variable is used as the
outcome to train CART models in order to discretize explanatory
variables.}
\item{cost_complexity}{The regularization parameter. Any split that does not
decrease the overall lack of fit by a factor of \code{cost_complexity} is not
attempted. Corresponds to \code{cp} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 0.01.}
\item{tree_depth}{The \emph{maximum} depth in the final tree. Corresponds to
\code{maxdepth} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 10.}
\item{min_n}{The number of data points in a node required to continue
splitting. Corresponds to \code{minsplit} in \code{\link[rpart:rpart]{rpart::rpart()}}. Defaults to 20.}
\item{rules}{The splitting rules of the best CART tree to retain for
each variable. If length zero, splitting could not be used on that column.}
\item{skip}{A logical. Should the step be skipped when the
recipe is baked by \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}? While all operations are baked
when \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} is run, some operations may not be able to be
conducted on new data (e.g. processing the outcome variable(s)).
Care should be taken when using \code{skip = TRUE} as it may affect
the computations for subsequent operations}
\item{id}{A character string that is unique to this step to identify it.}
\item{x}{A \code{step_discretize_cart} object.}
}
\value{
An updated version of \code{recipe} with the new step added to the
sequence of existing steps (if any).
}
\description{
\code{step_discretize_cart} creates a \emph{specification} of a recipe step that will
discretize numeric data (e.g. integers or doubles) into bins in a
supervised way using a CART model.
}
\details{
\code{step_discretize_cart()} creates non-uniform bins from numerical
variables by utilizing the information about the outcome variable and
applying a CART model.
The best selection of buckets for each variable is selected using
the standard cost-complexity pruning of CART, which makes this
discretization method resistant to overfitting.
This step requires the \pkg{rpart} package. If not installed, the
step will stop with a note about installing the package.
Note that the original data will be replaced with the new bins.
}
\examples{
library(modeldata)
data(credit_data)
library(rsample)
split <- initial_split(credit_data, strata = "Status")
credit_data_tr <- training(split)
credit_data_te <- testing(split)
xgb_rec <-
recipe(Status ~ ., data = credit_data_tr) \%>\%
step_discretize_cart(all_numeric(), outcome = "Status", id = "cart splits")
xgb_rec <- prep(xgb_rec, training = credit_data_tr)
# The splits:
tidy(xgb_rec, id = "cart splits")
xgb_test_bins <- bake(xgb_rec, credit_data_te)
}
\seealso{
\code{\link[recipes:recipe]{recipes::recipe()}} \code{\link[recipes:prep.recipe]{recipes::prep.recipe()}} \code{\link[recipes:bake.recipe]{recipes::bake.recipe()}}
}
\concept{discretization}
\concept{factors}
\concept{preprocessing}
\keyword{binning}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_cplex_solver.R
\name{add_cplex_solver}
\alias{add_cplex_solver}
\title{Add a \emph{CPLEX} solver}
\usage{
add_cplex_solver(
x,
gap = 0.1,
time_limit = .Machine$integer.max,
presolve = TRUE,
threads = 1,
verbose = TRUE
)
}
\arguments{
\item{x}{\code{\link[=problem]{problem()}} (i.e. \code{\linkS4class{ConservationProblem}}) object.}
\item{gap}{\code{numeric} gap to optimality. This gap is relative
and expresses the acceptable deviance from the optimal objective.
For example, a value of 0.01 will result in the solver stopping when
it has found a solution within 1\% of optimality.
Additionally, a value of 0 will result in the solver stopping
when it has found an optimal solution.
The default value is 0.1 (i.e. 10\% from optimality).}
\item{time_limit}{\code{numeric} time limit (seconds) for generating solutions.
The solver will return the current best solution when this time limit is
exceeded. The default value is the largest integer value
(i.e. \code{.Machine$integer.max}), effectively meaning that solver
will keep running until a solution within the optimality gap is found.}
\item{presolve}{\code{logical} attempt to simplify the
problem before solving it? Defaults to \code{TRUE}.}
\item{threads}{\code{integer} number of threads to use for the
optimization algorithm. The default value is 1.}
\item{verbose}{\code{logical} should information be printed while solving
optimization problems? Defaults to \code{TRUE}.}
}
\value{
Object (i.e. \code{\linkS4class{ConservationProblem}}) with the solver
added to it.
}
\description{
Specify that the
\href{https://www.ibm.com/analytics/cplex-optimizer}{\emph{IBM CPLEX}} software
(IBM 2017) should be used to solve a conservation planning \code{\link[=problem]{problem()}}.
This function can also be used to customize the behavior of the solver.
It requires the \pkg{cplexAPI} package to be installed
(see below for installation instructions).
}
\details{
\href{https://www.ibm.com/analytics/cplex-optimizer}{\emph{IBM CPLEX}} is a
commercial optimization software. It is faster than
the available open source solvers (e.g. \code{\link[=add_lpsymphony_solver]{add_lpsymphony_solver()}} and
\code{\link[=add_rsymphony_solver]{add_rsymphony_solver()}}.
Although formal benchmarks examining the performance of this solver for
conservation planning problems have yet to be completed, preliminary
analyses suggest that it performs slightly slower than the \emph{Gurobi}
solver (i.e. \code{\link[=add_gurobi_solver]{add_gurobi_solver()}}).
We recommend using this solver if the \emph{Gurobi} solver is not available.
Licenses are available for the \emph{IBM CPLEX} software to academics at no cost
(see \url{https://www.ibm.com/products/ilog-cplex-optimization-studio}).
}
\section{Installation}{
The pkg{cplexAPI} package is used to interface with \emph{IBM CPLEX}. To install
this package, the \code{CPLEX_BIN} variable must be set (similar to
the \code{GUROBI_HOME} variable for the \emph{Gurobi} software) to specify
the file path for the \emph{CPLEX} software. For example, on a Linux system,
this variable can be specified by adding the following text to the
\verb{~/.bashrc} file:\preformatted{ export CPLEX_BIN="/opt/ibm/ILOG/CPLEX_Studio128/cplex/bin/x86-64_linux/cplex"
}
Note that you may need to change the version
number in the file path (i.e. \code{"CPLEX_Studio128"}). For more information
on installing the pkg{cplexAPI} package, please see the
\href{https://CRAN.R-project.org/package=cplexAPI/INSTALL}{official installation instructions for the package}.
}
\examples{
\dontrun{
# load data
data(sim_pu_raster, sim_features)
# create problem
p <- problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.1) \%>\%
add_binary_decisions() \%>\%
add_cplex_solver(gap = 0.1, time_limit = 5, verbose = FALSE)
# generate solution
s <- solve(p)
# plot solution
plot(s, main = "solution", axes = FALSE, box = FALSE)
}
}
\references{
IBM (2017) IBM ILOG CPLEX Optimization Studio CPLEX User's Manual.
Version 12 Release 8. IBM ILOG CPLEX Division, Incline Village, NV.
}
\seealso{
\link{solvers}.
}
|
/man/add_cplex_solver.Rd
|
no_license
|
diminera/prioritizr
|
R
| false
| true
| 4,226
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_cplex_solver.R
\name{add_cplex_solver}
\alias{add_cplex_solver}
\title{Add a \emph{CPLEX} solver}
\usage{
add_cplex_solver(
x,
gap = 0.1,
time_limit = .Machine$integer.max,
presolve = TRUE,
threads = 1,
verbose = TRUE
)
}
\arguments{
\item{x}{\code{\link[=problem]{problem()}} (i.e. \code{\linkS4class{ConservationProblem}}) object.}
\item{gap}{\code{numeric} gap to optimality. This gap is relative
and expresses the acceptable deviance from the optimal objective.
For example, a value of 0.01 will result in the solver stopping when
it has found a solution within 1\% of optimality.
Additionally, a value of 0 will result in the solver stopping
when it has found an optimal solution.
The default value is 0.1 (i.e. 10\% from optimality).}
\item{time_limit}{\code{numeric} time limit (seconds) for generating solutions.
The solver will return the current best solution when this time limit is
exceeded. The default value is the largest integer value
(i.e. \code{.Machine$integer.max}), effectively meaning that solver
will keep running until a solution within the optimality gap is found.}
\item{presolve}{\code{logical} attempt to simplify the
problem before solving it? Defaults to \code{TRUE}.}
\item{threads}{\code{integer} number of threads to use for the
optimization algorithm. The default value is 1.}
\item{verbose}{\code{logical} should information be printed while solving
optimization problems? Defaults to \code{TRUE}.}
}
\value{
Object (i.e. \code{\linkS4class{ConservationProblem}}) with the solver
added to it.
}
\description{
Specify that the
\href{https://www.ibm.com/analytics/cplex-optimizer}{\emph{IBM CPLEX}} software
(IBM 2017) should be used to solve a conservation planning \code{\link[=problem]{problem()}}.
This function can also be used to customize the behavior of the solver.
It requires the \pkg{cplexAPI} package to be installed
(see below for installation instructions).
}
\details{
\href{https://www.ibm.com/analytics/cplex-optimizer}{\emph{IBM CPLEX}} is a
commercial optimization software. It is faster than
the available open source solvers (e.g. \code{\link[=add_lpsymphony_solver]{add_lpsymphony_solver()}} and
\code{\link[=add_rsymphony_solver]{add_rsymphony_solver()}}.
Although formal benchmarks examining the performance of this solver for
conservation planning problems have yet to be completed, preliminary
analyses suggest that it performs slightly slower than the \emph{Gurobi}
solver (i.e. \code{\link[=add_gurobi_solver]{add_gurobi_solver()}}).
We recommend using this solver if the \emph{Gurobi} solver is not available.
Licenses are available for the \emph{IBM CPLEX} software to academics at no cost
(see \url{https://www.ibm.com/products/ilog-cplex-optimization-studio}).
}
\section{Installation}{
The pkg{cplexAPI} package is used to interface with \emph{IBM CPLEX}. To install
this package, the \code{CPLEX_BIN} variable must be set (similar to
the \code{GUROBI_HOME} variable for the \emph{Gurobi} software) to specify
the file path for the \emph{CPLEX} software. For example, on a Linux system,
this variable can be specified by adding the following text to the
\verb{~/.bashrc} file:\preformatted{ export CPLEX_BIN="/opt/ibm/ILOG/CPLEX_Studio128/cplex/bin/x86-64_linux/cplex"
}
Note that you may need to change the version
number in the file path (i.e. \code{"CPLEX_Studio128"}). For more information
on installing the pkg{cplexAPI} package, please see the
\href{https://CRAN.R-project.org/package=cplexAPI/INSTALL}{official installation instructions for the package}.
}
\examples{
\dontrun{
# load data
data(sim_pu_raster, sim_features)
# create problem
p <- problem(sim_pu_raster, sim_features) \%>\%
add_min_set_objective() \%>\%
add_relative_targets(0.1) \%>\%
add_binary_decisions() \%>\%
add_cplex_solver(gap = 0.1, time_limit = 5, verbose = FALSE)
# generate solution
s <- solve(p)
# plot solution
plot(s, main = "solution", axes = FALSE, box = FALSE)
}
}
\references{
IBM (2017) IBM ILOG CPLEX Optimization Studio CPLEX User's Manual.
Version 12 Release 8. IBM ILOG CPLEX Division, Incline Village, NV.
}
\seealso{
\link{solvers}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{munck_pmm}
\alias{munck_pmm}
\alias{munck}
\title{Munck Index of Democracy}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 342 rows and 7 columns.}
\source{
Pemstein, Daniel, Stephen A. Meserve, and James Melton. 2013.
"Replication data for: Democratic Compromise: A Latent Variable Analysis of
Ten Measures of Regime Type." In: Harvard Dataverse.
http://hdl.handle.net/1902.1/PMM.
Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press.
}
\usage{
munck_pmm
}
\description{
0-1 index of democracy from Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press. Taken from Pemstein, Daniel, Stephen A. Meserve, and James Melton.
2013. Taken from Pemstein, Daniel, Stephen A. Meserve, and James Melton.
2013. "Replication data for: Democratic Compromise: A Latent Variable
Analysis of Ten Measures of Regime Type." In: Harvard Dataverse.
http://hdl.handle.net/1902.1/PMM. Higher values are more democratic.
}
\section{Variables}{
\describe{
\item{pmm_country}{The original country name in the PMM replication data.}
\item{year}{The calendar year. This is approximate. The surveys cover
specific periods in the original data that do not always overlap with a
single year. In particular, the year 1981 is "skipped" -- a single survey
covers Jan.1981 - Aug. 1982 and its value is assigned to 1982 here.}
\item{munck_pmm}{0-1 index of democracy from Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press. Only available for 18 Latin American countries for 19 years.} }
}
\section{Standard descriptive variables (generated by this package)}{
\describe{
\item{extended_country_name}{The name of the country in the Gleditsch-Ward
system of states, or the official name of the
entity (for non-sovereign entities and states not in the Gleditsch and Ward
system of states) or else a common name for disputed cases that do not have
an official name (e.g., Western Sahara, Hyderabad). The Gleditsch and Ward
scheme sometimes indicates the common name of the country and (in
parentheses) the name of an earlier incarnation of the state: thus, they
have Germany (Prussia), Russia (Soviet Union), Madagascar (Malagasy), etc.
For details, see Gleditsch, Kristian S. & Michael D. Ward. 1999. "Interstate
System Membership: A Revised List of the Independent States since 1816."
International Interactions 25: 393-413. The list can be found at
\url{http://privatewww.essex.ac.uk/~ksg/statelist.html}.}
\item{GWn}{Gleditsch and Ward's numeric country code, from the Gleditsch and
Ward list of independent states.}
\item{cown}{The Correlates of War numeric country code, 2016 version. This
differs from Gleditsch and Ward's numeric country code in a few cases. See
\url{http://www.correlatesofwar.org/data-sets/state-system-membership} for
the full list.}
\item{in_GW_system}{Whether the state is "in system" (that is, is
independent and sovereign), according to Gleditsch and Ward, for this
particular date. Matches at the end of the year; so, for example South
Vietnam 1975 is \code{FALSE} because, according to Gleditsch and Ward, the
country ended on April 1975 (being absorbed by North Vietnam). It is also
\code{TRUE} for dates beyond 2012 for countries that did not end by then, depsite
the fact that the Gleditsch and Ward list has not been updated since.} }
}
\seealso{
Other democracy: \code{\link{LIED}}, \code{\link{PIPE}},
\code{\link{anckar}}, \code{\link{arat_pmm}},
\code{\link{blm}}, \code{\link{bmr}}, \code{\link{bnr}},
\code{\link{bollen_pmm}}, \code{\link{doorenspleet}},
\code{\link{download_fh_electoral}},
\code{\link{download_fh}}, \code{\link{download_reign}},
\code{\link{download_wgi_voice_and_accountability}},
\code{\link{eiu}}, \code{\link{fh_pmm}},
\code{\link{gwf_all}}, \code{\link{hadenius_pmm}},
\code{\link{kailitz}}, \code{\link{magaloni}},
\code{\link{mainwaring}}, \code{\link{pacl}},
\code{\link{peps}}, \code{\link{pitf}},
\code{\link{polity_pmm}},
\code{\link{polyarchy_dimensions}},
\code{\link{polyarchy}}, \code{\link{prc_gasiorowski}},
\code{\link{svmdi}}, \code{\link{svolik_regime}},
\code{\link{uds_2014}}, \code{\link{ulfelder}},
\code{\link{utip}}, \code{\link{vanhanen}},
\code{\link{wahman_teorell_hadenius}}
Other PMM replication data: \code{\link{arat_pmm}},
\code{\link{blm}}, \code{\link{bollen_pmm}},
\code{\link{fh_pmm}}, \code{\link{hadenius_pmm}},
\code{\link{mainwaring}}, \code{\link{pacl}},
\code{\link{polity_pmm}}, \code{\link{prc_gasiorowski}}
Other continuous democracy indexes: \code{\link{arat_pmm}},
\code{\link{bollen_pmm}},
\code{\link{download_wgi_voice_and_accountability}},
\code{\link{eiu}}, \code{\link{hadenius_pmm}},
\code{\link{svmdi}}, \code{\link{vanhanen}}
}
\concept{PMM replication data}
\concept{continuous democracy indexes}
\concept{democracy}
\keyword{datasets}
|
/man/munck_pmm.Rd
|
no_license
|
lgillson/democracyData
|
R
| false
| true
| 5,185
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_documentation.R
\docType{data}
\name{munck_pmm}
\alias{munck_pmm}
\alias{munck}
\title{Munck Index of Democracy}
\format{An object of class \code{tbl_df} (inherits from \code{tbl}, \code{data.frame}) with 342 rows and 7 columns.}
\source{
Pemstein, Daniel, Stephen A. Meserve, and James Melton. 2013.
"Replication data for: Democratic Compromise: A Latent Variable Analysis of
Ten Measures of Regime Type." In: Harvard Dataverse.
http://hdl.handle.net/1902.1/PMM.
Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press.
}
\usage{
munck_pmm
}
\description{
0-1 index of democracy from Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press. Taken from Pemstein, Daniel, Stephen A. Meserve, and James Melton.
2013. Taken from Pemstein, Daniel, Stephen A. Meserve, and James Melton.
2013. "Replication data for: Democratic Compromise: A Latent Variable
Analysis of Ten Measures of Regime Type." In: Harvard Dataverse.
http://hdl.handle.net/1902.1/PMM. Higher values are more democratic.
}
\section{Variables}{
\describe{
\item{pmm_country}{The original country name in the PMM replication data.}
\item{year}{The calendar year. This is approximate. The surveys cover
specific periods in the original data that do not always overlap with a
single year. In particular, the year 1981 is "skipped" -- a single survey
covers Jan.1981 - Aug. 1982 and its value is assigned to 1982 here.}
\item{munck_pmm}{0-1 index of democracy from Munck, Gerardo L. 2009. Measuring Democracy: A
Bridge Between Scholarship and Politics. Baltimore: Johns Hopkins University
Press. Only available for 18 Latin American countries for 19 years.} }
}
\section{Standard descriptive variables (generated by this package)}{
\describe{
\item{extended_country_name}{The name of the country in the Gleditsch-Ward
system of states, or the official name of the
entity (for non-sovereign entities and states not in the Gleditsch and Ward
system of states) or else a common name for disputed cases that do not have
an official name (e.g., Western Sahara, Hyderabad). The Gleditsch and Ward
scheme sometimes indicates the common name of the country and (in
parentheses) the name of an earlier incarnation of the state: thus, they
have Germany (Prussia), Russia (Soviet Union), Madagascar (Malagasy), etc.
For details, see Gleditsch, Kristian S. & Michael D. Ward. 1999. "Interstate
System Membership: A Revised List of the Independent States since 1816."
International Interactions 25: 393-413. The list can be found at
\url{http://privatewww.essex.ac.uk/~ksg/statelist.html}.}
\item{GWn}{Gleditsch and Ward's numeric country code, from the Gleditsch and
Ward list of independent states.}
\item{cown}{The Correlates of War numeric country code, 2016 version. This
differs from Gleditsch and Ward's numeric country code in a few cases. See
\url{http://www.correlatesofwar.org/data-sets/state-system-membership} for
the full list.}
\item{in_GW_system}{Whether the state is "in system" (that is, is
independent and sovereign), according to Gleditsch and Ward, for this
particular date. Matches at the end of the year; so, for example South
Vietnam 1975 is \code{FALSE} because, according to Gleditsch and Ward, the
country ended on April 1975 (being absorbed by North Vietnam). It is also
\code{TRUE} for dates beyond 2012 for countries that did not end by then, depsite
the fact that the Gleditsch and Ward list has not been updated since.} }
}
\seealso{
Other democracy: \code{\link{LIED}}, \code{\link{PIPE}},
\code{\link{anckar}}, \code{\link{arat_pmm}},
\code{\link{blm}}, \code{\link{bmr}}, \code{\link{bnr}},
\code{\link{bollen_pmm}}, \code{\link{doorenspleet}},
\code{\link{download_fh_electoral}},
\code{\link{download_fh}}, \code{\link{download_reign}},
\code{\link{download_wgi_voice_and_accountability}},
\code{\link{eiu}}, \code{\link{fh_pmm}},
\code{\link{gwf_all}}, \code{\link{hadenius_pmm}},
\code{\link{kailitz}}, \code{\link{magaloni}},
\code{\link{mainwaring}}, \code{\link{pacl}},
\code{\link{peps}}, \code{\link{pitf}},
\code{\link{polity_pmm}},
\code{\link{polyarchy_dimensions}},
\code{\link{polyarchy}}, \code{\link{prc_gasiorowski}},
\code{\link{svmdi}}, \code{\link{svolik_regime}},
\code{\link{uds_2014}}, \code{\link{ulfelder}},
\code{\link{utip}}, \code{\link{vanhanen}},
\code{\link{wahman_teorell_hadenius}}
Other PMM replication data: \code{\link{arat_pmm}},
\code{\link{blm}}, \code{\link{bollen_pmm}},
\code{\link{fh_pmm}}, \code{\link{hadenius_pmm}},
\code{\link{mainwaring}}, \code{\link{pacl}},
\code{\link{polity_pmm}}, \code{\link{prc_gasiorowski}}
Other continuous democracy indexes: \code{\link{arat_pmm}},
\code{\link{bollen_pmm}},
\code{\link{download_wgi_voice_and_accountability}},
\code{\link{eiu}}, \code{\link{hadenius_pmm}},
\code{\link{svmdi}}, \code{\link{vanhanen}}
}
\concept{PMM replication data}
\concept{continuous democracy indexes}
\concept{democracy}
\keyword{datasets}
|
#' create a conditional inference tree, minify it and cross-check the responses.
#' analysis taken from https://datawookie.netlify.com/blog/2013/05/package-party-conditional-inference-trees/
#' @importFrom utils data
#' @importFrom stats predict
#' @export
validateBinaryTreeMap <- function(){
airQuality <- subset(datasets::airquality, !is.na(datasets::airquality$Ozone))
airTree <- party::ctree(Ozone ~ ., data = airQuality, controls = party::ctree_control(maxsurrogate = 3))
airQualityNew <- airQuality
sapply(airQuality[,-1], class)
sapply(airQualityNew, class)
ozonePrediction <- predict(airTree, newdata = airQualityNew)
treeMap <- createBinaryTreeMap(airTree@tree)
ozonePredictionMapped <- getBinaryTreeResponse(treeMap, airQuality)
identical(as.numeric(ozonePrediction), ozonePredictionMapped)
# now auto-generate code containing function definitiion for the conditional logic and implement...
logic <- paste("evalAutoCTree <- function(Temp, Wind){\n", writeTreeMapCondition(treeMap), "}")
outScript <- paste0(tempfile(), ".R")
write(logic, file = outScript)
source(outScript)
autoPredict <- apply(airQuality, 1, function(x) {evalAutoCTree(x["Temp"], x["Wind"])})
# precision...
identical(round(as.numeric(ozonePrediction), 13), round(as.numeric(autoPredict), 13))
}
|
/R/ValidateBinaryTreeMap.R
|
no_license
|
nikrepj/ctreeMinifyR
|
R
| false
| false
| 1,334
|
r
|
#' create a conditional inference tree, minify it and cross-check the responses.
#' analysis taken from https://datawookie.netlify.com/blog/2013/05/package-party-conditional-inference-trees/
#' @importFrom utils data
#' @importFrom stats predict
#' @export
validateBinaryTreeMap <- function(){
airQuality <- subset(datasets::airquality, !is.na(datasets::airquality$Ozone))
airTree <- party::ctree(Ozone ~ ., data = airQuality, controls = party::ctree_control(maxsurrogate = 3))
airQualityNew <- airQuality
sapply(airQuality[,-1], class)
sapply(airQualityNew, class)
ozonePrediction <- predict(airTree, newdata = airQualityNew)
treeMap <- createBinaryTreeMap(airTree@tree)
ozonePredictionMapped <- getBinaryTreeResponse(treeMap, airQuality)
identical(as.numeric(ozonePrediction), ozonePredictionMapped)
# now auto-generate code containing function definitiion for the conditional logic and implement...
logic <- paste("evalAutoCTree <- function(Temp, Wind){\n", writeTreeMapCondition(treeMap), "}")
outScript <- paste0(tempfile(), ".R")
write(logic, file = outScript)
source(outScript)
autoPredict <- apply(airQuality, 1, function(x) {evalAutoCTree(x["Temp"], x["Wind"])})
# precision...
identical(round(as.numeric(ozonePrediction), 13), round(as.numeric(autoPredict), 13))
}
|
# read in the example file:
ex <- readRDS("R_dev/haplotype_frequency_estimation_example.RDS")
# this contains counts of all of the "phenotypes", or the visible genotypes of two different haplotypes
ex <- ex[,-grep("NN", colnames(ex))]
x <- ex[1,]
phenos <- which(x != 0)
x <- x[phenos] # the cleaned version is the data for one row, only the cells that actually have data.
# haptable, containing the tabulated unambigious haplotypes
hap.ex <- readRDS("R_dev/hapmat_example.RDS")
haptable <- hap.ex[1,] # these are the counts from the unambigious loci
haps <- c("AC", "AG", "GC", "GG")
names(haptable) <- haps
# so here's an example funciton. Right now, it only works for the first row of ex!
single_haplotype_estimation <- function(x, haptable, sigma = 0.0001){
# find the double het. Should be able to use an approach like this when this gets extended to work with everything.
# cj values for each possible genotype:
s1 <- substr(names(x), 1, 1)
s2 <- substr(names(x), 2, 2)
s3 <- substr(names(x), 3, 3)
s4 <- substr(names(x), 4, 4)
het.1 <- s1 != s2
het.2 <- s3 != s4
# First, make a guess at the starting haplotype frequencies. We'll do this by taking the unambigious haplotype frequencies,
# then making a guess at the haplotype composition in the double heterozygote assuming that all possible haplotypes are equally likely
doub.het <- which(het.1 + het.2 == 2) # identify double heterozygotes
nhap.counts <- haptable # grab the haplotypes
ehap.counts <- nhap.counts + .5*x[doub.het] # assuming that both haplopairs are equaly likely in the double het
shap.freqs <- ehap.counts/sum(ehap.counts) # get the starting haplotype frequencies
# now that we have our starting conditions, we will do the EM loops.
# 1) First, we find out how many of each haplotype
# we expect to get from our double heterozygotes given the initial haplotype frequencies we guessed above.
# 2) Then, we use those expected frequencies to update our estimates of the haplotype frequencies.
# 3) repeat 1 and 2 until the difference between the haplotype frequencies between loop iterations is less than sigma.
# we'll use a while loop, which will run as long as the conditions are met. Note that this can freeze your computer if
# the conditions are NEVER met! Try just hitting the stop sign, if that doesn't work you'll need to restart Rstudio.
diff <- sigma + 1 # initialize the diff. Doesn't matter what it is as long as it's larger than sigma.
while(diff > sigma){
# 1)
# expectation, which is that we are drawing haplotypes (aka alleles) from a pool of options. Follows HWE, essentially,
# but the "alleles" are actually haplotypes
op1.e <- (2*shap.freqs["AC"]*shap.freqs["GG"])/
((2*shap.freqs["AC"]*shap.freqs["GG"])+(2*shap.freqs["AG"]*shap.freqs["GC"])) # percentage of AC/GG haplo pairs
op2.e <- 1 - op1.e
# maximization: given the expected haplotype frequencies, how many of each haplotype should we have? get new frequencies
n1hap.freqs <- haptable # grab the known haplotype frequencies form the unambigious phenotypes again.
n1hap.freqs[c("AC", "GG")] <- n1hap.freqs[c("AC", "GG")] + (x[4]*op1.e*.5) # we basically add the expected number of haplotypes for the double heterozygotes
n1hap.freqs[c("AG", "GC")] <- n1hap.freqs[c("AG", "GC")] + (x[4]*op2.e*.5)
n1hap.freqs <- n1hap.freqs/sum(n1hap.freqs)
# calculate the diff and update
diff <- sum(abs(n1hap.freqs - shap.freqs))
shap.freqs <- n1hap.freqs
}
# return the output
return(shap.freqs)
}
# run
out <- single_haplotype_estimation(x, haptable)
# The goal now is to extend this so that the function runs when provided with the full ex and hap.ex files.
# Note that each row will stop at a different point, since the difference in haplotype frequencies between
# iterations will reach sigma at different numbers of iterations. What we can do is either keep going until
# everything hits sigma, or just stop each row when they cross sigma and only iterate the remainder. The latter
# should be faster.
# Paper source for EM:
# Maximum-likelihood estimation of molecular haplotype frequencies in a diploid population. Excoffier, L., and Slatkin, M. (1995)
# The math in this paper IS NOT HELPFUL in general. Here's maybe a better source:
# https://homes.cs.washington.edu/~suinlee/genome541/lecture3-genetics-Lee.pdf
# Note: the haplotype table is provided by the tabulate_haplotypes function in the calc_pairwise_ld function in the package.
# Go to the R/stat_functions.R file, then ctrl + F "tabulate_haplotypes" to find it. That function calls "GtoH", a function that is defined just above
# it in the script!
|
/R_dev/EM_haplotype_example.R
|
no_license
|
michellepepping/snpR
|
R
| false
| false
| 4,691
|
r
|
# read in the example file:
ex <- readRDS("R_dev/haplotype_frequency_estimation_example.RDS")
# this contains counts of all of the "phenotypes", or the visible genotypes of two different haplotypes
ex <- ex[,-grep("NN", colnames(ex))]
x <- ex[1,]
phenos <- which(x != 0)
x <- x[phenos] # the cleaned version is the data for one row, only the cells that actually have data.
# haptable, containing the tabulated unambigious haplotypes
hap.ex <- readRDS("R_dev/hapmat_example.RDS")
haptable <- hap.ex[1,] # these are the counts from the unambigious loci
haps <- c("AC", "AG", "GC", "GG")
names(haptable) <- haps
# so here's an example funciton. Right now, it only works for the first row of ex!
single_haplotype_estimation <- function(x, haptable, sigma = 0.0001){
# find the double het. Should be able to use an approach like this when this gets extended to work with everything.
# cj values for each possible genotype:
s1 <- substr(names(x), 1, 1)
s2 <- substr(names(x), 2, 2)
s3 <- substr(names(x), 3, 3)
s4 <- substr(names(x), 4, 4)
het.1 <- s1 != s2
het.2 <- s3 != s4
# First, make a guess at the starting haplotype frequencies. We'll do this by taking the unambigious haplotype frequencies,
# then making a guess at the haplotype composition in the double heterozygote assuming that all possible haplotypes are equally likely
doub.het <- which(het.1 + het.2 == 2) # identify double heterozygotes
nhap.counts <- haptable # grab the haplotypes
ehap.counts <- nhap.counts + .5*x[doub.het] # assuming that both haplopairs are equaly likely in the double het
shap.freqs <- ehap.counts/sum(ehap.counts) # get the starting haplotype frequencies
# now that we have our starting conditions, we will do the EM loops.
# 1) First, we find out how many of each haplotype
# we expect to get from our double heterozygotes given the initial haplotype frequencies we guessed above.
# 2) Then, we use those expected frequencies to update our estimates of the haplotype frequencies.
# 3) repeat 1 and 2 until the difference between the haplotype frequencies between loop iterations is less than sigma.
# we'll use a while loop, which will run as long as the conditions are met. Note that this can freeze your computer if
# the conditions are NEVER met! Try just hitting the stop sign, if that doesn't work you'll need to restart Rstudio.
diff <- sigma + 1 # initialize the diff. Doesn't matter what it is as long as it's larger than sigma.
while(diff > sigma){
# 1)
# expectation, which is that we are drawing haplotypes (aka alleles) from a pool of options. Follows HWE, essentially,
# but the "alleles" are actually haplotypes
op1.e <- (2*shap.freqs["AC"]*shap.freqs["GG"])/
((2*shap.freqs["AC"]*shap.freqs["GG"])+(2*shap.freqs["AG"]*shap.freqs["GC"])) # percentage of AC/GG haplo pairs
op2.e <- 1 - op1.e
# maximization: given the expected haplotype frequencies, how many of each haplotype should we have? get new frequencies
n1hap.freqs <- haptable # grab the known haplotype frequencies form the unambigious phenotypes again.
n1hap.freqs[c("AC", "GG")] <- n1hap.freqs[c("AC", "GG")] + (x[4]*op1.e*.5) # we basically add the expected number of haplotypes for the double heterozygotes
n1hap.freqs[c("AG", "GC")] <- n1hap.freqs[c("AG", "GC")] + (x[4]*op2.e*.5)
n1hap.freqs <- n1hap.freqs/sum(n1hap.freqs)
# calculate the diff and update
diff <- sum(abs(n1hap.freqs - shap.freqs))
shap.freqs <- n1hap.freqs
}
# return the output
return(shap.freqs)
}
# run
out <- single_haplotype_estimation(x, haptable)
# The goal now is to extend this so that the function runs when provided with the full ex and hap.ex files.
# Note that each row will stop at a different point, since the difference in haplotype frequencies between
# iterations will reach sigma at different numbers of iterations. What we can do is either keep going until
# everything hits sigma, or just stop each row when they cross sigma and only iterate the remainder. The latter
# should be faster.
# Paper source for EM:
# Maximum-likelihood estimation of molecular haplotype frequencies in a diploid population. Excoffier, L., and Slatkin, M. (1995)
# The math in this paper IS NOT HELPFUL in general. Here's maybe a better source:
# https://homes.cs.washington.edu/~suinlee/genome541/lecture3-genetics-Lee.pdf
# Note: the haplotype table is provided by the tabulate_haplotypes function in the calc_pairwise_ld function in the package.
# Go to the R/stat_functions.R file, then ctrl + F "tabulate_haplotypes" to find it. That function calls "GtoH", a function that is defined just above
# it in the script!
|
library(stats)
library(pryr)
DEFAULT_CUTOFF_FRAMES <- 50
DEFAULT_LOESS_FIXED_SPAN <- 10
DEFAULT_NA_REPLACE <- -0.001
cut_mdelta <- function(d, cutoff_frames=DEFAULT_CUTOFF_FRAMES) {
d$m_delta[d$frames_to_end < 50] <- NA
return(d)
}
loess_span <- function(n_frames_on_either_side, n_total_frames) {
n_span_frames <- 2*n_frames_on_either_side + 1
result <- min(1.0, n_span_frames/n_total_frames)
return(result)
}
smooth_mdelta <- function(d, loess_fixed_span=DEFAULT_LOESS_FIXED_SPAN,
col_name="m_delta") {
span <- loess_span(loess_fixed_span, max(d$time))
m <- loess(m_delta ~ time, data=d, span=span)
smoothed_values <- predict(m)
smoothed_values[is.na(d$m_delta)] <- NA
d[[col_name]] <- smoothed_values
d <- d[d$direction=="positive", -which(colnames(d) == "direction")]
return(d)
}
between <- function(x, start, end) {
return(x >= start & x <= end)
}
invert <- function(intervals, final_time) {
new_start <- intervals %$% c(0.0, end)
new_end <- intervals %$% c(start, final_time)
result <- data.frame(start=new_start, end=new_end)
return(result)
}
mean_na <- partial(mean, na.rm=T)
median_na <- partial(median, na.rm=T)
intervals_average_mdelta <- function(intervals, mdelta_d, stat=mean_na,
invert=FALSE){
registerDoParallel()
if (invert) {
intervals <- invert(intervals, max(mdelta_d$time_sec))
}
result <- mdelta_d %$% ddply(intervals, .(start, end), .parallel=TRUE,
.fun=function(d) d %$%
data.frame(m_delta=stat(m_delta[between(time_sec, start, end)])))
return(result)
}
intervals_and_inverted <- function(intervals, mdelta_d, stat=mean_na) {
res <- intervals_average_mdelta(intervals, mdelta_d, stat=stat)
res$inverted <- F
res_i <- intervals_average_mdelta(intervals, mdelta_d, stat=stat, invert=T)
res_i$inverted <- T
return(rbind(res, res_i))
}
replace_na <- function(d, na_replace=DEFAULT_NA_REPLACE) {
d$m_delta[is.na(d$m_delta)] <- na_replace
return(d)
}
|
/src/analysis/clean.R
|
no_license
|
bootphon/mdelta
|
R
| false
| false
| 2,026
|
r
|
library(stats)
library(pryr)
DEFAULT_CUTOFF_FRAMES <- 50
DEFAULT_LOESS_FIXED_SPAN <- 10
DEFAULT_NA_REPLACE <- -0.001
cut_mdelta <- function(d, cutoff_frames=DEFAULT_CUTOFF_FRAMES) {
d$m_delta[d$frames_to_end < 50] <- NA
return(d)
}
loess_span <- function(n_frames_on_either_side, n_total_frames) {
n_span_frames <- 2*n_frames_on_either_side + 1
result <- min(1.0, n_span_frames/n_total_frames)
return(result)
}
smooth_mdelta <- function(d, loess_fixed_span=DEFAULT_LOESS_FIXED_SPAN,
col_name="m_delta") {
span <- loess_span(loess_fixed_span, max(d$time))
m <- loess(m_delta ~ time, data=d, span=span)
smoothed_values <- predict(m)
smoothed_values[is.na(d$m_delta)] <- NA
d[[col_name]] <- smoothed_values
d <- d[d$direction=="positive", -which(colnames(d) == "direction")]
return(d)
}
between <- function(x, start, end) {
return(x >= start & x <= end)
}
invert <- function(intervals, final_time) {
new_start <- intervals %$% c(0.0, end)
new_end <- intervals %$% c(start, final_time)
result <- data.frame(start=new_start, end=new_end)
return(result)
}
mean_na <- partial(mean, na.rm=T)
median_na <- partial(median, na.rm=T)
intervals_average_mdelta <- function(intervals, mdelta_d, stat=mean_na,
invert=FALSE){
registerDoParallel()
if (invert) {
intervals <- invert(intervals, max(mdelta_d$time_sec))
}
result <- mdelta_d %$% ddply(intervals, .(start, end), .parallel=TRUE,
.fun=function(d) d %$%
data.frame(m_delta=stat(m_delta[between(time_sec, start, end)])))
return(result)
}
intervals_and_inverted <- function(intervals, mdelta_d, stat=mean_na) {
res <- intervals_average_mdelta(intervals, mdelta_d, stat=stat)
res$inverted <- F
res_i <- intervals_average_mdelta(intervals, mdelta_d, stat=stat, invert=T)
res_i$inverted <- T
return(rbind(res, res_i))
}
replace_na <- function(d, na_replace=DEFAULT_NA_REPLACE) {
d$m_delta[is.na(d$m_delta)] <- na_replace
return(d)
}
|
\name{parserOutputTFSummary}
\alias{parserOutputTFSummary}
\title{Method "parserOutputTFSummary"}
\description{
Parsers model output summary from TF framework and returns it in readable named vector format
}
\usage{
parserOutputTFSummary(linearRegressionOutput)
}
\arguments{
\item{linearRegressionOutput}{linear regression output that comes from the TF method; mandatory argument}
}
\value{
Returns a named vector with model output summary results
}
\references{
Karp N, Melvin D, Sanger Mouse Genetics Project, Mott R (2012): Robust and Sensitive Analysis of Mouse Knockout Phenotypes. \emph{PLoS ONE} \bold{7}(12): e52410. doi:10.1371/journal.pone.0052410
West B, Welch K, Galecki A (2007): Linear Mixed Models: A practical guide using statistical software \emph{New York: Chapman & Hall/CRC} 353 p.
}
\author{Natalja Kurbatova, Natasha Karp, Jeremy Mason}
\seealso{\code{\linkS4class{PhenTestResult}}}
\examples{
file <- system.file("extdata", "test6_RR.csv", package="PhenStat")
test <- PhenStat:::PhenList(dataset=read.csv(file,na.strings = '-'),
testGenotype="Oxr1/Oxr1")
result <- PhenStat:::testDataset(test,
depVariable="Ca",
method="TF",
dataPointsThreshold=2)
linearRegressionOutput <- PhenStat:::analysisResults(result)
PhenStat:::parserOutputTFSummary(linearRegressionOutput)
}
|
/Early adults stats pipeline/PhenStat/PhenStatPackage/PhenStat/man/parserOutputTFSummary.Rd
|
permissive
|
mpi2/impc_stats_pipeline
|
R
| false
| false
| 1,391
|
rd
|
\name{parserOutputTFSummary}
\alias{parserOutputTFSummary}
\title{Method "parserOutputTFSummary"}
\description{
Parsers model output summary from TF framework and returns it in readable named vector format
}
\usage{
parserOutputTFSummary(linearRegressionOutput)
}
\arguments{
\item{linearRegressionOutput}{linear regression output that comes from the TF method; mandatory argument}
}
\value{
Returns a named vector with model output summary results
}
\references{
Karp N, Melvin D, Sanger Mouse Genetics Project, Mott R (2012): Robust and Sensitive Analysis of Mouse Knockout Phenotypes. \emph{PLoS ONE} \bold{7}(12): e52410. doi:10.1371/journal.pone.0052410
West B, Welch K, Galecki A (2007): Linear Mixed Models: A practical guide using statistical software \emph{New York: Chapman & Hall/CRC} 353 p.
}
\author{Natalja Kurbatova, Natasha Karp, Jeremy Mason}
\seealso{\code{\linkS4class{PhenTestResult}}}
\examples{
file <- system.file("extdata", "test6_RR.csv", package="PhenStat")
test <- PhenStat:::PhenList(dataset=read.csv(file,na.strings = '-'),
testGenotype="Oxr1/Oxr1")
result <- PhenStat:::testDataset(test,
depVariable="Ca",
method="TF",
dataPointsThreshold=2)
linearRegressionOutput <- PhenStat:::analysisResults(result)
PhenStat:::parserOutputTFSummary(linearRegressionOutput)
}
|
# dat = prepDat;
# booklets = inputList$booklets;
# blocks = inputList$blocks;
# rotation = inputList$rotation;
# sysMis = "mbd";
# id = "ID";
# subunits = inputList$subunits;
# verbose = TRUE
data(inputDat)
data(inputList)
prepDat <-
automateDataPreparation(
inputList = inputList,
datList = inputDat,
readSpss = FALSE,
checkData = FALSE,
mergeData = TRUE,
recodeData = TRUE,
aggregateData = FALSE,
scoreData = FALSE,
writeSpss = FALSE,
verbose = FALSE
)
# Function with defaults to expose arguments for manipulation
checkDesignTest <- function(
dat = prepDat,
booklets = inputList$booklets,
blocks = inputList$blocks,
rotation = inputList$rotation,
sysMis = "mbd",
id = "ID",
subunits = inputList$subunits,
verbose = TRUE
) {
checkDesign(
dat = dat,
booklets = booklets,
blocks = blocks,
rotation = rotation,
sysMis = sysMis,
id = id,
subunits = subunits,
verbose = verbose
)
}
test_that_cli("returns nothing with no problems or only success messages on verbose mode", {
expect_snapshot(checkDesignTest(verbose = FALSE))
expect_snapshot(checkDesignTest(verbose = TRUE))
})
test_that_cli("identifies ID variables that cannot be found in the dataset", {
expect_snapshot(
error = TRUE,
checkDesignTest(id = "FalseID")
)
})
test_that_cli("returns an error if missing variable names in blocks", {
expect_error(
checkDesignTest(blocks = within(inputList$blocks, subunit <- NULL))
)
})
test_that_cli("returns an error if missing variable names in booklets", {
# Missing booklet column
expect_error(
checkDesignTest(booklets = within(inputList$booklets, booklet <- NULL))
)
# Missing "block[0-9*]" pattern
test_booklets <- inputList$booklets
names(test_booklets) <- c("booklet", "block1", "block2", "part3")
expect_error(
checkDesignTest(booklets = test_booklets)
)
# Should also throw an error?
# test_booklets <- inputList$booklets
# names(test_booklets) <- c("booklet", "block1", "block2", "block__3")
# expect_error(
# checkDesignTest(booklets = test_booklets)
# )
})
test_that_cli("returns an error if missing variable names in rotation", {
# Missing booklet column
expect_error(
checkDesignTest(rotation = within(inputList$rotation, booklet <- NULL))
)
})
test_that_cli("returns an error if missing variable names in rotation", {
# Missing booklet column
expect_error(
checkDesignTest(rotation = within(inputList$rotation, booklet <- NULL))
)
})
test_that_cli("throws danger messages when block names in blocks do not equal those in booklets", {
# Manipulation: add block in blocks
test_block_block <- rbind.data.frame(
inputList$blocks,
data.frame(
subunit = "I99",
block = "bl9",
subunitBlockPosition = 1
)
)
expect_snapshot(
checkDesignTest(blocks = test_block_block)
)
# Manipulation: block names in blocks
test_block_block <- within(inputList$blocks, {
block <- ifelse(block == "bl1", "bl9", block)
})
expect_snapshot(
checkDesignTest(blocks = test_block_block)
)
# Manipulation: block names in booklets
test_booklet_block <- rbind.data.frame(
inputList$booklets,
data.frame(
booklet = "booklet4",
block1 = "bl2",
block2 = "bl4",
block3 = "bl1"
)
)
expect_snapshot(
checkDesignTest(booklets = test_booklet_block)
)
# Manipulation: block names in booklets
test_booklet_block <- within(inputList$booklets, {
block1 <- ifelse(block1 == "bl1", "bl8", block1)
})
expect_snapshot(
checkDesignTest(booklets = test_booklet_block)
)
})
test_that_cli("throws danger messages when booklet names in booklets do not equal those in rotation", {
# Manipulation: block names in blocks
test_booklet_booklet <- within(inputList$booklets, {
booklet <- ifelse(booklet == "booklet1", "booklet9", booklet)
})
expect_snapshot(
checkDesignTest(booklets = test_booklet_booklet)
)
# Manipulation: block names in booklets
test_rotation_booklet <- within(inputList$rotation, {
booklet <- ifelse(booklet == "booklet1", "booklet8", booklet)
})
expect_snapshot(
checkDesignTest(rotation = test_rotation_booklet)
)
})
test_that_cli("throws warning when more variables in dataset available than in blocks$subunit", {
# Manipulation: delete hisei (default in dataset)
expect_snapshot(
checkDesignTest(dat = within(prepDat, hisei <- NULL))
)
# Manipulation: add two other variables (hisei is available per default)
expect_snapshot(
checkDesignTest(dat = within(prepDat, {
hisei <- NULL
testB <- 2
testA <- 1
}))
)
})
test_that_cli("identifies incorrect sysMis codes and allows for user-defined sysMis", {
# Change vc to sysMis for item I01R
expect_snapshot(
checkDesignTest(dat = within(prepDat, I01R <- ifelse(I01R == "mbi", "mbd", I01R)), sysMis = "mbd")
)
# Change vc to user-defined sysMis for I01R
userDefinedSysMis <- as.data.frame(lapply(prepDat, FUN = function(x) ifelse(x == "mbd", NA, x)))
# Change vc to sysMis for item I01R
expect_snapshot(
checkDesignTest(dat = within(userDefinedSysMis, I01R <- ifelse(I01R == "mbi", NA, I01R)),
sysMis = "NA")
)
})
test_that_cli("identifies incorrect vc codes", {
# Change sysMis to vc for item I22R
expect_snapshot(
checkDesignTest(dat = within(prepDat, I22R <- ifelse(I22R == "mbd", "mbi", I22R)), sysMis = "mbd")
)
# Change vc to user-defined sysMis for I01R
userDefinedSysMis <- as.data.frame(lapply(prepDat, FUN = function(x) ifelse(x == "mbd", NA, x)))
# Change sysMis to vc for item I22R
expect_snapshot(
checkDesignTest(dat = within(userDefinedSysMis, I22R <- ifelse(is.na(I22R), "mbi", I22R)),
sysMis = "NA")
)
})
|
/tests/testthat/test-checkDesign.R
|
no_license
|
sachseka/eatPrep
|
R
| false
| false
| 5,855
|
r
|
# dat = prepDat;
# booklets = inputList$booklets;
# blocks = inputList$blocks;
# rotation = inputList$rotation;
# sysMis = "mbd";
# id = "ID";
# subunits = inputList$subunits;
# verbose = TRUE
data(inputDat)
data(inputList)
prepDat <-
automateDataPreparation(
inputList = inputList,
datList = inputDat,
readSpss = FALSE,
checkData = FALSE,
mergeData = TRUE,
recodeData = TRUE,
aggregateData = FALSE,
scoreData = FALSE,
writeSpss = FALSE,
verbose = FALSE
)
# Function with defaults to expose arguments for manipulation
checkDesignTest <- function(
dat = prepDat,
booklets = inputList$booklets,
blocks = inputList$blocks,
rotation = inputList$rotation,
sysMis = "mbd",
id = "ID",
subunits = inputList$subunits,
verbose = TRUE
) {
checkDesign(
dat = dat,
booklets = booklets,
blocks = blocks,
rotation = rotation,
sysMis = sysMis,
id = id,
subunits = subunits,
verbose = verbose
)
}
test_that_cli("returns nothing with no problems or only success messages on verbose mode", {
expect_snapshot(checkDesignTest(verbose = FALSE))
expect_snapshot(checkDesignTest(verbose = TRUE))
})
test_that_cli("identifies ID variables that cannot be found in the dataset", {
expect_snapshot(
error = TRUE,
checkDesignTest(id = "FalseID")
)
})
test_that_cli("returns an error if missing variable names in blocks", {
expect_error(
checkDesignTest(blocks = within(inputList$blocks, subunit <- NULL))
)
})
test_that_cli("returns an error if missing variable names in booklets", {
# Missing booklet column
expect_error(
checkDesignTest(booklets = within(inputList$booklets, booklet <- NULL))
)
# Missing "block[0-9*]" pattern
test_booklets <- inputList$booklets
names(test_booklets) <- c("booklet", "block1", "block2", "part3")
expect_error(
checkDesignTest(booklets = test_booklets)
)
# Should also throw an error?
# test_booklets <- inputList$booklets
# names(test_booklets) <- c("booklet", "block1", "block2", "block__3")
# expect_error(
# checkDesignTest(booklets = test_booklets)
# )
})
test_that_cli("returns an error if missing variable names in rotation", {
# Missing booklet column
expect_error(
checkDesignTest(rotation = within(inputList$rotation, booklet <- NULL))
)
})
test_that_cli("returns an error if missing variable names in rotation", {
# Missing booklet column
expect_error(
checkDesignTest(rotation = within(inputList$rotation, booklet <- NULL))
)
})
test_that_cli("throws danger messages when block names in blocks do not equal those in booklets", {
# Manipulation: add block in blocks
test_block_block <- rbind.data.frame(
inputList$blocks,
data.frame(
subunit = "I99",
block = "bl9",
subunitBlockPosition = 1
)
)
expect_snapshot(
checkDesignTest(blocks = test_block_block)
)
# Manipulation: block names in blocks
test_block_block <- within(inputList$blocks, {
block <- ifelse(block == "bl1", "bl9", block)
})
expect_snapshot(
checkDesignTest(blocks = test_block_block)
)
# Manipulation: block names in booklets
test_booklet_block <- rbind.data.frame(
inputList$booklets,
data.frame(
booklet = "booklet4",
block1 = "bl2",
block2 = "bl4",
block3 = "bl1"
)
)
expect_snapshot(
checkDesignTest(booklets = test_booklet_block)
)
# Manipulation: block names in booklets
test_booklet_block <- within(inputList$booklets, {
block1 <- ifelse(block1 == "bl1", "bl8", block1)
})
expect_snapshot(
checkDesignTest(booklets = test_booklet_block)
)
})
test_that_cli("throws danger messages when booklet names in booklets do not equal those in rotation", {
# Manipulation: block names in blocks
test_booklet_booklet <- within(inputList$booklets, {
booklet <- ifelse(booklet == "booklet1", "booklet9", booklet)
})
expect_snapshot(
checkDesignTest(booklets = test_booklet_booklet)
)
# Manipulation: block names in booklets
test_rotation_booklet <- within(inputList$rotation, {
booklet <- ifelse(booklet == "booklet1", "booklet8", booklet)
})
expect_snapshot(
checkDesignTest(rotation = test_rotation_booklet)
)
})
test_that_cli("throws warning when more variables in dataset available than in blocks$subunit", {
# Manipulation: delete hisei (default in dataset)
expect_snapshot(
checkDesignTest(dat = within(prepDat, hisei <- NULL))
)
# Manipulation: add two other variables (hisei is available per default)
expect_snapshot(
checkDesignTest(dat = within(prepDat, {
hisei <- NULL
testB <- 2
testA <- 1
}))
)
})
test_that_cli("identifies incorrect sysMis codes and allows for user-defined sysMis", {
# Change vc to sysMis for item I01R
expect_snapshot(
checkDesignTest(dat = within(prepDat, I01R <- ifelse(I01R == "mbi", "mbd", I01R)), sysMis = "mbd")
)
# Change vc to user-defined sysMis for I01R
userDefinedSysMis <- as.data.frame(lapply(prepDat, FUN = function(x) ifelse(x == "mbd", NA, x)))
# Change vc to sysMis for item I01R
expect_snapshot(
checkDesignTest(dat = within(userDefinedSysMis, I01R <- ifelse(I01R == "mbi", NA, I01R)),
sysMis = "NA")
)
})
test_that_cli("identifies incorrect vc codes", {
# Change sysMis to vc for item I22R
expect_snapshot(
checkDesignTest(dat = within(prepDat, I22R <- ifelse(I22R == "mbd", "mbi", I22R)), sysMis = "mbd")
)
# Change vc to user-defined sysMis for I01R
userDefinedSysMis <- as.data.frame(lapply(prepDat, FUN = function(x) ifelse(x == "mbd", NA, x)))
# Change sysMis to vc for item I22R
expect_snapshot(
checkDesignTest(dat = within(userDefinedSysMis, I22R <- ifelse(is.na(I22R), "mbi", I22R)),
sysMis = "NA")
)
})
|
\name{rbind.fill}
\alias{rbind.fill}
\title{Combine data.frames by row, filling in missing columns.}
\usage{
rbind.fill(...)
}
\arguments{
\item{...}{input data frames to row bind together. The
first argument can be a list of data frames, in which
case all other arguments are ignored. Any NULL inputs
are silently dropped. If all inputs are NULL, the output
is NULL.}
}
\value{
a single data frame
}
\description{
\code{rbind}s a list of data frames filling missing columns
with NA.
}
\details{
This is an enhancement to \code{\link{rbind}} that adds in
columns that are not present in all inputs, accepts a list
of data frames, and operates substantially faster.
Column names and types in the output will appear in the
order in which they were encountered.
Unordered factor columns will have their levels unified and
character data bound with factors will be converted to
character. POSIXct data will be converted to be in the same
time zone. Array and matrix columns must have identical
dimensions after the row count. Aside from these there are
no general checks that each column is of consistent data
type.
}
\examples{
rbind.fill(mtcars[c("mpg", "wt")], mtcars[c("wt", "cyl")])
}
\seealso{
Other binding functions: \code{\link{rbind.fill.matrix}}
}
\keyword{manip}
|
/man/rbind.fill.Rd
|
no_license
|
baptiste/plyr
|
R
| false
| false
| 1,286
|
rd
|
\name{rbind.fill}
\alias{rbind.fill}
\title{Combine data.frames by row, filling in missing columns.}
\usage{
rbind.fill(...)
}
\arguments{
\item{...}{input data frames to row bind together. The
first argument can be a list of data frames, in which
case all other arguments are ignored. Any NULL inputs
are silently dropped. If all inputs are NULL, the output
is NULL.}
}
\value{
a single data frame
}
\description{
\code{rbind}s a list of data frames filling missing columns
with NA.
}
\details{
This is an enhancement to \code{\link{rbind}} that adds in
columns that are not present in all inputs, accepts a list
of data frames, and operates substantially faster.
Column names and types in the output will appear in the
order in which they were encountered.
Unordered factor columns will have their levels unified and
character data bound with factors will be converted to
character. POSIXct data will be converted to be in the same
time zone. Array and matrix columns must have identical
dimensions after the row count. Aside from these there are
no general checks that each column is of consistent data
type.
}
\examples{
rbind.fill(mtcars[c("mpg", "wt")], mtcars[c("wt", "cyl")])
}
\seealso{
Other binding functions: \code{\link{rbind.fill.matrix}}
}
\keyword{manip}
|
## This file contains the Coursera's Exploratory Data Analysis 2nd Course Project's 4th script with the question and the answer.
## Question: Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
library(dplyr)
library(ggplot2)
## Unzipping the files
summarySCC <- readRDS(unzip("exdata_data_NEI_data.zip", "summarySCC_PM25.rds"))
sourceCC <- readRDS(unzip("exdata_data_NEI_data.zip", "Source_Classification_Code.rds"))
## Looking for the answer in the data sets
sourceCC_Comb_Coal <- sourceCC[grepl("[Cc]omb.*[Cc]oal", sourceCC$Short.Name, ignore.case = TRUE),]
str(sourceCC_Comb_Coal)
summarySCC_sourceCC_Comb_Coal <- inner_join(summarySCC, sourceCC_Comb_Coal, by = "SCC")
## Making a plot and saving to a PNG file
g <- ggplot(summarySCC_sourceCC_Comb_Coal, aes(factor(year), Emissions/10^6)) +
geom_bar(stat = "identity", fill = "darkblue") +
labs(x = "Year", y = "Total Emissions of PM2.5 in tons", title = "Total Coal Combustion-related Emissions of PM2.5 \n in tons in the United States (1999-2008)") +
theme(plot.title = element_text(hjust = 0.5)) +
ggsave("plot4.png")
print(g)
## Answer: The coal combustion-related emissions of PM2.5 decreased in the United States from 1999 to 2008.
|
/plot4.R
|
no_license
|
tamaskjf/Exploratory_Data_Analysis_Course_Project_2
|
R
| false
| false
| 1,291
|
r
|
## This file contains the Coursera's Exploratory Data Analysis 2nd Course Project's 4th script with the question and the answer.
## Question: Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
library(dplyr)
library(ggplot2)
## Unzipping the files
summarySCC <- readRDS(unzip("exdata_data_NEI_data.zip", "summarySCC_PM25.rds"))
sourceCC <- readRDS(unzip("exdata_data_NEI_data.zip", "Source_Classification_Code.rds"))
## Looking for the answer in the data sets
sourceCC_Comb_Coal <- sourceCC[grepl("[Cc]omb.*[Cc]oal", sourceCC$Short.Name, ignore.case = TRUE),]
str(sourceCC_Comb_Coal)
summarySCC_sourceCC_Comb_Coal <- inner_join(summarySCC, sourceCC_Comb_Coal, by = "SCC")
## Making a plot and saving to a PNG file
g <- ggplot(summarySCC_sourceCC_Comb_Coal, aes(factor(year), Emissions/10^6)) +
geom_bar(stat = "identity", fill = "darkblue") +
labs(x = "Year", y = "Total Emissions of PM2.5 in tons", title = "Total Coal Combustion-related Emissions of PM2.5 \n in tons in the United States (1999-2008)") +
theme(plot.title = element_text(hjust = 0.5)) +
ggsave("plot4.png")
print(g)
## Answer: The coal combustion-related emissions of PM2.5 decreased in the United States from 1999 to 2008.
|
%do not edit, edit noweb/qmrparser.nw
\name{isNewline}
\alias{isNewline}
\title{
Is it a new line character?
}
\description{
Checks whether a character is a new line character.
}
\usage{
isNewline(ch)
}
\arguments{
\item{ch}{character to be checked}
}
\value{
TRUE/FALSE, depending on character being a newline character
}
\examples{
isNewline(' ')
isNewline('\n')
}
\keyword{set of character}
|
/man/isNewline.Rd
|
no_license
|
cran/qmrparser
|
R
| false
| false
| 395
|
rd
|
%do not edit, edit noweb/qmrparser.nw
\name{isNewline}
\alias{isNewline}
\title{
Is it a new line character?
}
\description{
Checks whether a character is a new line character.
}
\usage{
isNewline(ch)
}
\arguments{
\item{ch}{character to be checked}
}
\value{
TRUE/FALSE, depending on character being a newline character
}
\examples{
isNewline(' ')
isNewline('\n')
}
\keyword{set of character}
|
#!/usr/bin/env Rscript
# aggregate htseq-counts output into a data frame
# discard genes with 0 counts across all samples
# save resulting dataframe as 'my_counts.csv' file
files <- dir(pattern = "_count.txt")
names <- as.vector(sapply(files, function(x) strsplit(x, "_count.txt")[[1]]))
data <- lapply(files, read.table, sep = "\t", header = FALSE)
# drop last 5 summary rows
data <- lapply(data, function(x) head(x, -5))
names(data) <- names
data <- lapply(names, function(x) {
colnames(data[[x]])[2] <- x
data[[x]]
})
data <- Reduce(function(x, y) merge(x, y, by.x = "V1", by.y = "V1"), data, accumulate = FALSE)
names(data)[1] <- "gene_id"
data$filter <- rowSums(data[ ,-1])
data <- subset(data, filter != 0)
data$filter <- NULL
write.csv(data, file = "my_counts.csv", quote = FALSE, row.names = FALSE)
|
/counts2df.R
|
no_license
|
kn3in/rnaseq-util
|
R
| false
| false
| 815
|
r
|
#!/usr/bin/env Rscript
# aggregate htseq-counts output into a data frame
# discard genes with 0 counts across all samples
# save resulting dataframe as 'my_counts.csv' file
files <- dir(pattern = "_count.txt")
names <- as.vector(sapply(files, function(x) strsplit(x, "_count.txt")[[1]]))
data <- lapply(files, read.table, sep = "\t", header = FALSE)
# drop last 5 summary rows
data <- lapply(data, function(x) head(x, -5))
names(data) <- names
data <- lapply(names, function(x) {
colnames(data[[x]])[2] <- x
data[[x]]
})
data <- Reduce(function(x, y) merge(x, y, by.x = "V1", by.y = "V1"), data, accumulate = FALSE)
names(data)[1] <- "gene_id"
data$filter <- rowSums(data[ ,-1])
data <- subset(data, filter != 0)
data$filter <- NULL
write.csv(data, file = "my_counts.csv", quote = FALSE, row.names = FALSE)
|
# Data import from Excel .csv export and cleaning for nice handling in R
# 14-Feb-2019
# Set WORK working directory
setwd("~/`Stock assessment/Analysis/Data files")
# Set HOME working directory
setwd("~/DFO BI02 Stock assessment data analysis/Analysis/Data files")
# Load important packages to use
library(dplyr)
library(ggplot2)
library(tidyr)
library(magrittr)
library(lubridate)
#######################
#
# TALLY data cleaning
#
#######################
# Read data
tally.df <- read.csv("2017_MissionRST_Sockeye_20180406_TALLY.csv")
# Rename column headers, reformat data for easier handling in R, create USID. Will be done with pipes for quick running
tally.df <- tally.df %>%
rename(date = Date, # used this extended way of renaming columns so can see what old column names correspond to in case of error
survey_type = Survey.Type,
observers = Observers..Initials.Only.,
daily_sheet = Daily.Sheet..,
run = Run..,
bay = Bay..,
direction_travel = GPS...Direction.of.Travel,
dist_travel_m = GPS...Distance.Traveled..m.,
run_time_sec = GPS...Total.Run.Time..sec.,
vessel_speed_mps = GPS...Vessel.Speed..m.s.,
current_speed_mps = GPS...Current.Speed..m.s.,
time_block = Time.Block..Based.on.Start.Time.,
set_start = Set.Start.Time..24.hr.Clock.,
set_end = Set.End.Time..24.hr.Clock.,
NEW_set_start = NEW.Set.Start.Time,
NEW_set_end = NEW.Set.End.Time,
run_time = Total.Run.Time,
shift_seg = Shift.Segment,
trap_type = TrapType,
depth_ft = Depth..ft..Vertical.Only,
mod_stat = Original.or.Modifed.Nets,
pink_fry_total = Total.Pink.Fry,
chum_fry_total = Total.Chum.Fry,
chinook_fry_total = Total.Chinook.Fry,
chinook_smolt_total = Total.Chinook.Smolts,
coho_smolt_total = Total.Coho.Smolts,
sockeye_smolt_total = Total.Sockeye.Smolts,
sockeye_smolt_bio = Sockeye.Smolt.Biosampled,
sockeye_smolt_release = Sockeye.Smolt.Released,
sockeye_fry_total = Total.Sockeye.Fry,
sockeye_fry_bio = Sockeye.Fry.Biosampled,
sockeye_fry_release = Sockeye.Fry.Released,
coho_fry_total = Total.Coho.fry,
coho_fry_AFC = Coho.fry..AFC.,
sth_AFC = STH..AFC.,
chinook_fry_sampled_total = Total.Chinook.Fry.Retained.For.Samples,
chinook_fry_release = Total.Chinook.Fry.Released,
trap_type_old = Trap.Type,
histo_AFC = Histo..Disease..AFC..RNA,
net_type = Net.Type,
comments_tally = Comments) %>%
mutate_at(vars(c(7:11)), funs(as.character)) %>% # Change columns 7-11 to character before can change to numeric
mutate_at(vars(c(8:11)), funs(as.numeric)) %>% # Change columns 8-10 to be numeric
mutate_at(vars(c(22:34)), funs(as.character)) %>% # Change columns 22-34 to be character before integer to preserve values
mutate_at(vars(c(22:34)), funs(as.integer)) %>% # Change columns 22-34 to be integer
mutate_at(vars(c(20)), funs(as.character)) %>% # Change column 20 to be character
mutate(dist_travel_m = ifelse(dist_travel_m < 0, -dist_travel_m, dist_travel_m)) %>% # Turn negative distances into positives
mutate(depth_ft = as.integer(replace(depth_ft, depth_ft=="Surface", 0))) %>% # Change depth to be integer and change "surface" to "0"
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling and future plotting
mutate(run = paste("R", run, sep = ""), bay = paste("B", bay, sep = "")) %>% # Add "R" and "B" before run and bay (respectively) to help make USID in next step
mutate(USID = paste(paste(gsub("-", "", date)), run, bay, trap_type, depth_ft, sep="-")) %>% # Create a unique sampling id (USID) that describes each sampling event over the summer. For simplicity right now, will be combined date-run-bay-trap-depth. This will correspond to the same USID in the bio.df. Note: when pasting date, I copied it over while simulatenously removing the "-" separator for easier reading.
mutate(env_index = paste(paste(gsub("-", "", date)), run, sep="-")) %T>% # Create another unique index (env_index) to link TALLY with ENV dataframe. Just date-run
write.csv("Mission_Sockeye_TALLY_2017_clean.csv", row.names = F) # Export all this as a .csv file. Use row.names=F otherwise R will add unique row entry numbers (not helpful) and your first column will just be numbers
# Note: will return warnings. This is only because fish counts include NAs, which R doesn't like when considering data as integers. We will
# likely remove these NAs later, but for now they are kept to preserve original entries.
#####################
#
# BIOSAMPLE data cleaning
#
#####################
# Read original csv data from excel
bio.df <- read.csv("2017_MissionRST_Sockeye_20180406_BIO.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
bio.df <- bio.df %>%
rename(date = Date, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
survey_type = Survey.Type,
observers = Observers..Initials.Only.,
daily_sheet = Daily.Sheet..,
run = Run..,
bay = Bay..,
set_start = Run.Start.Time.I...24.hr.Clock.,
set_end = Run.End.Time.I...24.hr.Clock.,
NEW_set_start = Run.Start.Time.II,
NEW_set_end = Run.End.Time.II,
run_time = Total.Run.Duration..Time.,
run_time_min = Total.Run.Duration..Minutes.,
time_block = Time.Block..Based.on.Start.Time.,
shift_seg = Shift.Segment,
trap_type = Trap..Type,
depth_ft = Depth...feet.,
vt_net_type = Net.Type....VT.Only.,
species_ID_field = Species.ID.in.the.Field,
life_stage_field = Species.Life.Stage.ID...Field,
ID = Fish.ID..,
DNA_vial = DNA.......vial...,
fork_mm = Fork.Length..mm.,
size_class_mm = Size.Class..mm.,
weight_g = Weight..g.,
AFC = AFC..y.n.,
histo_sample = Histo.Sample..y.n.,
histo_jar = Histo.Jar..,
spag_tag = Spaghetti.Tag..,
fish_ID_DNA = Fish.ID.....DNA.Lab,
GSI_submitted = Was.the.sample.submitted.for.GSI.processing....Yes.or.No.,
reason_not_submitted = Reason.if.sample.not.submitted,
GSI_processed = Was.the.sample.GSI.processed....Yes.or.No.,
reason_not_processed = Reason.if.sample.not.processed,
GSI_received = Did.we.receive.a.GSI.result...Yes.or.No.,
reason_not_received = Reason.if.no.GSI.result,
species_ID_DNA = Species.ID...DNA.Lab,
ID_error = Was.a.species.ID.error.made....Yes.or.No.,
ID_current_20180327 = Species.ID...Current...as.of...2018.03.27.,
tally_df_adj = Has.the.Tally.Sheet.been.adjusted...Yes.or.No.,
fish_ID_DNA2 = FISH.ID..DNA.LAB,
CU1_number = CU.Assignment1...CU...with.the.highest.probability,
CU1_name = CU.Assignment1...CU.Name.with.the.highest.probability,
CU1_prob = CU.Assignment1...Probability,
CU2_number = CU.Assignment2...CU...with.the.highest.probability,
CU2_name = CU.Assignment2...CU.Name.with.the.highest.probability,
CU2_prob = CU.Assignment2...Probability,
CU3_number = CU.Assignment3...CU...with.the.highest.probability,
CU3_name = CU.Assignment3...CU.Name.with.the.highest.probability,
CU3_prob = CU.Assignment3...Probability,
CU4_number = CU.Assignment4...CU...with.the.highest.probability,
CU4_name = CU.Assignment4...CU.Name.with.the.highest.probability,
CU4_prob = CU.Assignment4...Probability,
CU5_number = CU.Assignment5...CU...with.the.highest.probability,
CU5_name = CU.Assignment5...CU.Name.with.the.highest.probability,
CU5_prob = CU.Assignment5...Probability,
fish_ID_test1 = Fish.ID.Test.Column.1,
fish_ID_test2 = Fish.ID.Test.Column.2,
CU_assgn1_60 = Is.the.1st.CU.assignment.probability...0.600,
CU_diff_assgn1_2 = The.difference.btw.the.1st.and.2nd.CU.assignments,
CU_diff_assgn1_2_20 = Is.the.difference.btw.the.1st.and.2nd.CU.assignments...0.200,
CU_rule = Rule.trigger.used.to.assign.a.CU,
CU_assigned = Assigned.CU.based.on.GSI.Analysis.I..60.20.Rule.Applied.,
CU_chilko_combo_method = Combined.Chiko.probability,
CU_chilko_combo_prob = Combining.Chiko..ES....Chilko..S..probabilities..assignments.used,
CU_chilko_gr_th_60 = If.the.combined.Chilko.probablility...0.6..is.the.diff.btw.the.combined.and.the.2nd.largest.CU.prob...or...0.2.,
CU_final = Assigned.CU.based.on.GSI.Analysis.II..60.20...Chilko.Combined.Rules.Applied.,
comments_bio = Comments) %>%
mutate_at(vars(c(22)), funs(as.numeric)) %>% # Make column 22 numeric
mutate_at(vars(c(16)), funs(as.character)) %>% # Change column 20 to be character
mutate(depth_ft = as.integer(replace(depth_ft, depth_ft=="Surface", 0))) %>% # Change depth to be integer and change "surface" to "0"
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(run = paste("R", run, sep = ""), bay = paste("B", bay, sep = "")) %>% # Add "R" and "B" before run and bay (respectively) to help make USID in next step
mutate(USID = paste(paste(gsub("-", "", date)), run, bay, trap_type, depth_ft, sep="-")) %>% # Create a unique sampling id (USID) that describes each sampling event over the summer. For simplicity right now, will be combined date-run-bay-trap-depth. This will correspond to the same USID in the bio.df
mutate(UFID = paste("2017", ID, sep="-")) %T>% # Create a unique fish id (UFID) - this is not so important in this dataset as it is only 2017 fish. However, may want to consider assigning UFID's if planning to compare biometrics across years. Add "2017" as prefix for now
write.csv("Mission_Sockeye_BIO_2017_clean.csv", row.names = F) # Export it all as a .csv
#####################
#
# ENVIRONMENTAL data cleaning
#
#####################
# Read original csv data from excel
env.df <- read.csv("2017_MissionRST_Sockeye_20180406_ENV.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
env.df <- env.df %>%
select(-c(Flow.........m.s., BAY)) %>% # Dropped empty flow column
rename(date = Date, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
RPM = RPM,
run = Run..,
time = Time,
wx_description = Weather.Description,
cc_perc = X..Cloud.Cover,
brightness = Brightness,
precipitation = Precipitation,
surface_chop = Surface.Chop,
water_temp_C = Water.Temp..oC.,
air_temp_C = Air.Temp..oC.,
water_clarity_in = Water.Clarity..inches.,
debris = Debris,
flow_in = Initial.Flow.Reading,
flow_out = Secondary.Flow.Reading,
flow_ms = Flow.Calculation..m.s.,
flow_diff = difference,
comments_env = Comments) %>%
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(run = paste("R", run, sep = "")) %>% # Add "R" before run to help make env_index in next step
mutate(env_index = paste(paste(gsub("-", "", date)), run, sep="-")) %T>% # Create a unique index ("env_index") to link to TALLY dataframe. Just date-run
write.csv("Mission_Sockeye_ENV_2017_clean.csv", row.names = F) # Export it all as a .csv
#####################
#
# DISCHARGE @ HOPE data cleaning
#
#####################
# Read original csv data from excel
dis.df <- read.csv("2017_MissionRST_Sockeye_20180406_HOPEDISCHARGE.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
dis.df <- dis.df %>%
rename(date = DATE, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
discharge_m3s = Discharge..m3.s.) %>%
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(discharge_m3s = as.numeric(discharge_m3s)) %T>% # Make discharge numerical
write.csv("Mission_Sockeye_HOPEDISCHARGE_2017_clean.csv", row.names = F) # Export it all as a .csv
## THESE DATAFRAMES STILL ESSENTIALLY REPRESENT THE ORIGINAL EXCEL SPREADSHEETS. FURTHER SELECTION WILL OCCUR IN "data_explr" script.
|
/2017data_cleaning.R
|
permissive
|
khdavidson/chum-et-al
|
R
| false
| false
| 14,250
|
r
|
# Data import from Excel .csv export and cleaning for nice handling in R
# 14-Feb-2019
# Set WORK working directory
setwd("~/`Stock assessment/Analysis/Data files")
# Set HOME working directory
setwd("~/DFO BI02 Stock assessment data analysis/Analysis/Data files")
# Load important packages to use
library(dplyr)
library(ggplot2)
library(tidyr)
library(magrittr)
library(lubridate)
#######################
#
# TALLY data cleaning
#
#######################
# Read data
tally.df <- read.csv("2017_MissionRST_Sockeye_20180406_TALLY.csv")
# Rename column headers, reformat data for easier handling in R, create USID. Will be done with pipes for quick running
tally.df <- tally.df %>%
rename(date = Date, # used this extended way of renaming columns so can see what old column names correspond to in case of error
survey_type = Survey.Type,
observers = Observers..Initials.Only.,
daily_sheet = Daily.Sheet..,
run = Run..,
bay = Bay..,
direction_travel = GPS...Direction.of.Travel,
dist_travel_m = GPS...Distance.Traveled..m.,
run_time_sec = GPS...Total.Run.Time..sec.,
vessel_speed_mps = GPS...Vessel.Speed..m.s.,
current_speed_mps = GPS...Current.Speed..m.s.,
time_block = Time.Block..Based.on.Start.Time.,
set_start = Set.Start.Time..24.hr.Clock.,
set_end = Set.End.Time..24.hr.Clock.,
NEW_set_start = NEW.Set.Start.Time,
NEW_set_end = NEW.Set.End.Time,
run_time = Total.Run.Time,
shift_seg = Shift.Segment,
trap_type = TrapType,
depth_ft = Depth..ft..Vertical.Only,
mod_stat = Original.or.Modifed.Nets,
pink_fry_total = Total.Pink.Fry,
chum_fry_total = Total.Chum.Fry,
chinook_fry_total = Total.Chinook.Fry,
chinook_smolt_total = Total.Chinook.Smolts,
coho_smolt_total = Total.Coho.Smolts,
sockeye_smolt_total = Total.Sockeye.Smolts,
sockeye_smolt_bio = Sockeye.Smolt.Biosampled,
sockeye_smolt_release = Sockeye.Smolt.Released,
sockeye_fry_total = Total.Sockeye.Fry,
sockeye_fry_bio = Sockeye.Fry.Biosampled,
sockeye_fry_release = Sockeye.Fry.Released,
coho_fry_total = Total.Coho.fry,
coho_fry_AFC = Coho.fry..AFC.,
sth_AFC = STH..AFC.,
chinook_fry_sampled_total = Total.Chinook.Fry.Retained.For.Samples,
chinook_fry_release = Total.Chinook.Fry.Released,
trap_type_old = Trap.Type,
histo_AFC = Histo..Disease..AFC..RNA,
net_type = Net.Type,
comments_tally = Comments) %>%
mutate_at(vars(c(7:11)), funs(as.character)) %>% # Change columns 7-11 to character before can change to numeric
mutate_at(vars(c(8:11)), funs(as.numeric)) %>% # Change columns 8-10 to be numeric
mutate_at(vars(c(22:34)), funs(as.character)) %>% # Change columns 22-34 to be character before integer to preserve values
mutate_at(vars(c(22:34)), funs(as.integer)) %>% # Change columns 22-34 to be integer
mutate_at(vars(c(20)), funs(as.character)) %>% # Change column 20 to be character
mutate(dist_travel_m = ifelse(dist_travel_m < 0, -dist_travel_m, dist_travel_m)) %>% # Turn negative distances into positives
mutate(depth_ft = as.integer(replace(depth_ft, depth_ft=="Surface", 0))) %>% # Change depth to be integer and change "surface" to "0"
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling and future plotting
mutate(run = paste("R", run, sep = ""), bay = paste("B", bay, sep = "")) %>% # Add "R" and "B" before run and bay (respectively) to help make USID in next step
mutate(USID = paste(paste(gsub("-", "", date)), run, bay, trap_type, depth_ft, sep="-")) %>% # Create a unique sampling id (USID) that describes each sampling event over the summer. For simplicity right now, will be combined date-run-bay-trap-depth. This will correspond to the same USID in the bio.df. Note: when pasting date, I copied it over while simulatenously removing the "-" separator for easier reading.
mutate(env_index = paste(paste(gsub("-", "", date)), run, sep="-")) %T>% # Create another unique index (env_index) to link TALLY with ENV dataframe. Just date-run
write.csv("Mission_Sockeye_TALLY_2017_clean.csv", row.names = F) # Export all this as a .csv file. Use row.names=F otherwise R will add unique row entry numbers (not helpful) and your first column will just be numbers
# Note: will return warnings. This is only because fish counts include NAs, which R doesn't like when considering data as integers. We will
# likely remove these NAs later, but for now they are kept to preserve original entries.
#####################
#
# BIOSAMPLE data cleaning
#
#####################
# Read original csv data from excel
bio.df <- read.csv("2017_MissionRST_Sockeye_20180406_BIO.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
bio.df <- bio.df %>%
rename(date = Date, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
survey_type = Survey.Type,
observers = Observers..Initials.Only.,
daily_sheet = Daily.Sheet..,
run = Run..,
bay = Bay..,
set_start = Run.Start.Time.I...24.hr.Clock.,
set_end = Run.End.Time.I...24.hr.Clock.,
NEW_set_start = Run.Start.Time.II,
NEW_set_end = Run.End.Time.II,
run_time = Total.Run.Duration..Time.,
run_time_min = Total.Run.Duration..Minutes.,
time_block = Time.Block..Based.on.Start.Time.,
shift_seg = Shift.Segment,
trap_type = Trap..Type,
depth_ft = Depth...feet.,
vt_net_type = Net.Type....VT.Only.,
species_ID_field = Species.ID.in.the.Field,
life_stage_field = Species.Life.Stage.ID...Field,
ID = Fish.ID..,
DNA_vial = DNA.......vial...,
fork_mm = Fork.Length..mm.,
size_class_mm = Size.Class..mm.,
weight_g = Weight..g.,
AFC = AFC..y.n.,
histo_sample = Histo.Sample..y.n.,
histo_jar = Histo.Jar..,
spag_tag = Spaghetti.Tag..,
fish_ID_DNA = Fish.ID.....DNA.Lab,
GSI_submitted = Was.the.sample.submitted.for.GSI.processing....Yes.or.No.,
reason_not_submitted = Reason.if.sample.not.submitted,
GSI_processed = Was.the.sample.GSI.processed....Yes.or.No.,
reason_not_processed = Reason.if.sample.not.processed,
GSI_received = Did.we.receive.a.GSI.result...Yes.or.No.,
reason_not_received = Reason.if.no.GSI.result,
species_ID_DNA = Species.ID...DNA.Lab,
ID_error = Was.a.species.ID.error.made....Yes.or.No.,
ID_current_20180327 = Species.ID...Current...as.of...2018.03.27.,
tally_df_adj = Has.the.Tally.Sheet.been.adjusted...Yes.or.No.,
fish_ID_DNA2 = FISH.ID..DNA.LAB,
CU1_number = CU.Assignment1...CU...with.the.highest.probability,
CU1_name = CU.Assignment1...CU.Name.with.the.highest.probability,
CU1_prob = CU.Assignment1...Probability,
CU2_number = CU.Assignment2...CU...with.the.highest.probability,
CU2_name = CU.Assignment2...CU.Name.with.the.highest.probability,
CU2_prob = CU.Assignment2...Probability,
CU3_number = CU.Assignment3...CU...with.the.highest.probability,
CU3_name = CU.Assignment3...CU.Name.with.the.highest.probability,
CU3_prob = CU.Assignment3...Probability,
CU4_number = CU.Assignment4...CU...with.the.highest.probability,
CU4_name = CU.Assignment4...CU.Name.with.the.highest.probability,
CU4_prob = CU.Assignment4...Probability,
CU5_number = CU.Assignment5...CU...with.the.highest.probability,
CU5_name = CU.Assignment5...CU.Name.with.the.highest.probability,
CU5_prob = CU.Assignment5...Probability,
fish_ID_test1 = Fish.ID.Test.Column.1,
fish_ID_test2 = Fish.ID.Test.Column.2,
CU_assgn1_60 = Is.the.1st.CU.assignment.probability...0.600,
CU_diff_assgn1_2 = The.difference.btw.the.1st.and.2nd.CU.assignments,
CU_diff_assgn1_2_20 = Is.the.difference.btw.the.1st.and.2nd.CU.assignments...0.200,
CU_rule = Rule.trigger.used.to.assign.a.CU,
CU_assigned = Assigned.CU.based.on.GSI.Analysis.I..60.20.Rule.Applied.,
CU_chilko_combo_method = Combined.Chiko.probability,
CU_chilko_combo_prob = Combining.Chiko..ES....Chilko..S..probabilities..assignments.used,
CU_chilko_gr_th_60 = If.the.combined.Chilko.probablility...0.6..is.the.diff.btw.the.combined.and.the.2nd.largest.CU.prob...or...0.2.,
CU_final = Assigned.CU.based.on.GSI.Analysis.II..60.20...Chilko.Combined.Rules.Applied.,
comments_bio = Comments) %>%
mutate_at(vars(c(22)), funs(as.numeric)) %>% # Make column 22 numeric
mutate_at(vars(c(16)), funs(as.character)) %>% # Change column 20 to be character
mutate(depth_ft = as.integer(replace(depth_ft, depth_ft=="Surface", 0))) %>% # Change depth to be integer and change "surface" to "0"
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(run = paste("R", run, sep = ""), bay = paste("B", bay, sep = "")) %>% # Add "R" and "B" before run and bay (respectively) to help make USID in next step
mutate(USID = paste(paste(gsub("-", "", date)), run, bay, trap_type, depth_ft, sep="-")) %>% # Create a unique sampling id (USID) that describes each sampling event over the summer. For simplicity right now, will be combined date-run-bay-trap-depth. This will correspond to the same USID in the bio.df
mutate(UFID = paste("2017", ID, sep="-")) %T>% # Create a unique fish id (UFID) - this is not so important in this dataset as it is only 2017 fish. However, may want to consider assigning UFID's if planning to compare biometrics across years. Add "2017" as prefix for now
write.csv("Mission_Sockeye_BIO_2017_clean.csv", row.names = F) # Export it all as a .csv
#####################
#
# ENVIRONMENTAL data cleaning
#
#####################
# Read original csv data from excel
env.df <- read.csv("2017_MissionRST_Sockeye_20180406_ENV.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
env.df <- env.df %>%
select(-c(Flow.........m.s., BAY)) %>% # Dropped empty flow column
rename(date = Date, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
RPM = RPM,
run = Run..,
time = Time,
wx_description = Weather.Description,
cc_perc = X..Cloud.Cover,
brightness = Brightness,
precipitation = Precipitation,
surface_chop = Surface.Chop,
water_temp_C = Water.Temp..oC.,
air_temp_C = Air.Temp..oC.,
water_clarity_in = Water.Clarity..inches.,
debris = Debris,
flow_in = Initial.Flow.Reading,
flow_out = Secondary.Flow.Reading,
flow_ms = Flow.Calculation..m.s.,
flow_diff = difference,
comments_env = Comments) %>%
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(run = paste("R", run, sep = "")) %>% # Add "R" before run to help make env_index in next step
mutate(env_index = paste(paste(gsub("-", "", date)), run, sep="-")) %T>% # Create a unique index ("env_index") to link to TALLY dataframe. Just date-run
write.csv("Mission_Sockeye_ENV_2017_clean.csv", row.names = F) # Export it all as a .csv
#####################
#
# DISCHARGE @ HOPE data cleaning
#
#####################
# Read original csv data from excel
dis.df <- read.csv("2017_MissionRST_Sockeye_20180406_HOPEDISCHARGE.csv")
# Rename column headers, reformat data for easier handling in R, create USID and UFID. Will be done with pipes for quick running
dis.df <- dis.df %>%
rename(date = DATE, # Used this extended way of renaming columns so can see what old column names correspond to in case of error
discharge_m3s = Discharge..m3.s.) %>%
mutate(date = lubridate::dmy(date)) %>% # Convert old dd-mmm-yy to yyyy-mm-dd, makes as.Date for better handling.
mutate(discharge_m3s = as.numeric(discharge_m3s)) %T>% # Make discharge numerical
write.csv("Mission_Sockeye_HOPEDISCHARGE_2017_clean.csv", row.names = F) # Export it all as a .csv
## THESE DATAFRAMES STILL ESSENTIALLY REPRESENT THE ORIGINAL EXCEL SPREADSHEETS. FURTHER SELECTION WILL OCCUR IN "data_explr" script.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CONDOP.R
\name{get.intergenic.regions}
\alias{get.intergenic.regions}
\title{Build a data table containing generic information on intergenic regions.}
\usage{
get.intergenic.regions(genes, str = "+")
}
\arguments{
\item{genes}{An annotation data table.}
\item{str}{A given strand. Defaults to "+".}
}
\description{
Internal function to build a data table containing information of the intergenic regions on a given strand.
}
\author{
Vittorio Fortino
get.intergenic.regions()
}
\keyword{internal}
|
/man/get.intergenic.regions.Rd
|
no_license
|
cran/CONDOP
|
R
| false
| true
| 577
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CONDOP.R
\name{get.intergenic.regions}
\alias{get.intergenic.regions}
\title{Build a data table containing generic information on intergenic regions.}
\usage{
get.intergenic.regions(genes, str = "+")
}
\arguments{
\item{genes}{An annotation data table.}
\item{str}{A given strand. Defaults to "+".}
}
\description{
Internal function to build a data table containing information of the intergenic regions on a given strand.
}
\author{
Vittorio Fortino
get.intergenic.regions()
}
\keyword{internal}
|
clikcorr_n <-
function(data, lower1, upper1, lower2, upper2, cp=.95, starVal=NA, nlm=FALSE, ...) {
# clikcorr constructs a confidence interval for the correlation
# coefficients between two or more variables. The variables may
# be censored (left, right, or interval), or missing. The
# interval is constructed by inverting likelihood ratio tests.
#
# Args
# data : A data frame containing the data and censoring status indicators
# lower1: The name of the variable giving the lower bound of the first
# measurement (NA if missing or left censored)
# upper1: The name of the variable giving the upper bound of the first
# measurement (NA if missing or left censored)
# lower2: The name of the variable giving the lower bound of the second
# measurement (NA if missing or left censored)
# upper2: The name of the variable giving the upper bound of the second
# measurement (NA if missing or left censored)
# cp : The coverage probability of the confidence interval
#
# Returns
# A list containing coefficient estimates and inferential quantities.
F <- prepare_data(data, lower1, upper1, lower2, upper2)
## Get the point estimate.
m <- suppressWarnings(estimate(F, starVal=starVal, nlm=nlm, ...))
r_est <- m$C[1,2] / sqrt(m$C[1,1]*m$C[2,2])
## Get the confidence interval.
ci <- suppressWarnings(profile_ci(data, lower1, upper1, lower2, upper2, cp, starVal=starVal, nlm=nlm, ...))
## Get a p-value for the null hypothesis that r=0.
p0 <- suppressWarnings(correlation_lrt(F, 0, starVal=starVal, ...))
result <- list(Cor=r_est, Cov=m$C, Mean=m$Mu, P0=p0,
LCL=ci$lcl, UCL=ci$ucl, Loglike=m$loglike)
return(result)
}
|
/R/clikcorr_n.R
|
no_license
|
cran/clikcorr
|
R
| false
| false
| 1,840
|
r
|
clikcorr_n <-
function(data, lower1, upper1, lower2, upper2, cp=.95, starVal=NA, nlm=FALSE, ...) {
# clikcorr constructs a confidence interval for the correlation
# coefficients between two or more variables. The variables may
# be censored (left, right, or interval), or missing. The
# interval is constructed by inverting likelihood ratio tests.
#
# Args
# data : A data frame containing the data and censoring status indicators
# lower1: The name of the variable giving the lower bound of the first
# measurement (NA if missing or left censored)
# upper1: The name of the variable giving the upper bound of the first
# measurement (NA if missing or left censored)
# lower2: The name of the variable giving the lower bound of the second
# measurement (NA if missing or left censored)
# upper2: The name of the variable giving the upper bound of the second
# measurement (NA if missing or left censored)
# cp : The coverage probability of the confidence interval
#
# Returns
# A list containing coefficient estimates and inferential quantities.
F <- prepare_data(data, lower1, upper1, lower2, upper2)
## Get the point estimate.
m <- suppressWarnings(estimate(F, starVal=starVal, nlm=nlm, ...))
r_est <- m$C[1,2] / sqrt(m$C[1,1]*m$C[2,2])
## Get the confidence interval.
ci <- suppressWarnings(profile_ci(data, lower1, upper1, lower2, upper2, cp, starVal=starVal, nlm=nlm, ...))
## Get a p-value for the null hypothesis that r=0.
p0 <- suppressWarnings(correlation_lrt(F, 0, starVal=starVal, ...))
result <- list(Cor=r_est, Cov=m$C, Mean=m$Mu, P0=p0,
LCL=ci$lcl, UCL=ci$ucl, Loglike=m$loglike)
return(result)
}
|
# This funtion is desiged to expand a set of vector bin values from their initial bin interval, to a larger interval.
# For example this is good to use if you want to transfrom data that is initially binned in 7-day groups, to bins that are in 21 day groups.
# The function will output a new vector of binned values of length equal to newIntMax
# It takes 4 inputs
# xraw is the input vector of bin values to be scaled
# oldIntMax is the max value in each bin for the xraw bins
# newIntMax is the max value in each bin for the new transformed bins
# sMult is a scaler multiplier to multiply the input bin values
expandBins <- function(xraw, oldIntMax, newIntMax, sMult){
# create fudge to help ensure that the sum of all binned values gets conserved when we transform the group.
xfudge <- (1-sum(xraw))/length(xraw)
x <- xraw + xfudge
# create interpolated bin values for each intermediate integer based on the data from the input bin values
for(i in 1:length(oldIntMax)) {
if(i==1) xnew <- rep(x[i]/(1*sMult),1*sMult)
else xnew <- c(xnew,rep(x[i]/(sMult*(oldIntMax[i]-oldIntMax[i-1])),sMult*(oldIntMax[i]-oldIntMax[i-1])))
}
# combine the intermediate bin values to the desired bin width for the transformed bin values
for(i in 1:length(newIntMax)) {
if(i==1) xtrans <- sum(xnew[1:newIntMax[i]])
else xtrans <- c(xtrans,sum(xnew[(newIntMax[i-1]+1):newIntMax[i]]))
}
# return transfromed bin vector
xtrans
}
|
/prisByRaceInc2004/expandBins.R
|
no_license
|
quasitar/USPrisonData2004
|
R
| false
| false
| 1,497
|
r
|
# This funtion is desiged to expand a set of vector bin values from their initial bin interval, to a larger interval.
# For example this is good to use if you want to transfrom data that is initially binned in 7-day groups, to bins that are in 21 day groups.
# The function will output a new vector of binned values of length equal to newIntMax
# It takes 4 inputs
# xraw is the input vector of bin values to be scaled
# oldIntMax is the max value in each bin for the xraw bins
# newIntMax is the max value in each bin for the new transformed bins
# sMult is a scaler multiplier to multiply the input bin values
expandBins <- function(xraw, oldIntMax, newIntMax, sMult){
# create fudge to help ensure that the sum of all binned values gets conserved when we transform the group.
xfudge <- (1-sum(xraw))/length(xraw)
x <- xraw + xfudge
# create interpolated bin values for each intermediate integer based on the data from the input bin values
for(i in 1:length(oldIntMax)) {
if(i==1) xnew <- rep(x[i]/(1*sMult),1*sMult)
else xnew <- c(xnew,rep(x[i]/(sMult*(oldIntMax[i]-oldIntMax[i-1])),sMult*(oldIntMax[i]-oldIntMax[i-1])))
}
# combine the intermediate bin values to the desired bin width for the transformed bin values
for(i in 1:length(newIntMax)) {
if(i==1) xtrans <- sum(xnew[1:newIntMax[i]])
else xtrans <- c(xtrans,sum(xnew[(newIntMax[i-1]+1):newIntMax[i]]))
}
# return transfromed bin vector
xtrans
}
|
tmp_expand.multichoice.wide<-function(datalong,choice.var,cons.var="cons",trip.var="viaje",alt.var="alt",chid.var="chid",view.var=NA,px.var="preciobase") {
#browser()
c<-0
for (i in choice.var) {
c<-c+1
data <-datalong [,c(-which(names(datalong) %in% choice.var))]
data$choice <- datalong[,i]
data$choicenum<-c
# Elimino todos los choicets que no tienen elecciones
data<-data[which(data$chid %in% unique(data$chid)[tapply(data$choice,data$chid,sum)==1]),]
#Elimino lo que no se muestra
data<-subset(data,!is.na(choice))
# Elimino los nombres sino no puedo hacer el rbind
rownames(data)<-NULL
ifelse (c==1,datafull<-data,datafull<-rbind(datafull,data))
}
# Ordeno las columnas
if (is.na(view.var)) {
datafull<-datafull[,c(cons.var,trip.var,"choicenum",alt.var,chid.var,px.var,betas,"choice")]}
else {
datafull<-datafull[,c(cons.var,trip.var,"choicenum",alt.var,chid.var,view.var,px.var,betas,"choice")]}
# Ordeno las filas
datafull <- sort.data.frame(~cons+viaje+choicenum+alt,datafull)
# Agrego la nueva columna chid
datafull$chid<-rep(1:(nrow(datafull)/nalt),each=nalt)
rownames(datafull)<-NULL
datafull
}
|
/R/_expand.multichoice.wide.R
|
no_license
|
mbonoli/funcionesMBO
|
R
| false
| false
| 1,187
|
r
|
tmp_expand.multichoice.wide<-function(datalong,choice.var,cons.var="cons",trip.var="viaje",alt.var="alt",chid.var="chid",view.var=NA,px.var="preciobase") {
#browser()
c<-0
for (i in choice.var) {
c<-c+1
data <-datalong [,c(-which(names(datalong) %in% choice.var))]
data$choice <- datalong[,i]
data$choicenum<-c
# Elimino todos los choicets que no tienen elecciones
data<-data[which(data$chid %in% unique(data$chid)[tapply(data$choice,data$chid,sum)==1]),]
#Elimino lo que no se muestra
data<-subset(data,!is.na(choice))
# Elimino los nombres sino no puedo hacer el rbind
rownames(data)<-NULL
ifelse (c==1,datafull<-data,datafull<-rbind(datafull,data))
}
# Ordeno las columnas
if (is.na(view.var)) {
datafull<-datafull[,c(cons.var,trip.var,"choicenum",alt.var,chid.var,px.var,betas,"choice")]}
else {
datafull<-datafull[,c(cons.var,trip.var,"choicenum",alt.var,chid.var,view.var,px.var,betas,"choice")]}
# Ordeno las filas
datafull <- sort.data.frame(~cons+viaje+choicenum+alt,datafull)
# Agrego la nueva columna chid
datafull$chid<-rep(1:(nrow(datafull)/nalt),each=nalt)
rownames(datafull)<-NULL
datafull
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nWadeableStationsPerTransect.r
\name{nWadeableStations}
\alias{nWadeableStations}
\alias{nWadeableStationsPerTransect}
\title{Estimates the intended number of wadeable thalweg stations to be visited at
an NRSA study site.}
\usage{
nWadeableStations(uid, transect, station, by = c("transect", "site"))
nWadeableStationsPerTransect(uid, transect, station)
}
\arguments{
\item{uid}{a vector of site identifiers}
\item{transect}{a vector of transect identifiers}
\item{station}{a vector of station identifiers}
\item{by}{}
}
\description{
Estimates the intended number of wadeable thalweg stations at each transect
which are considered sampled (even if there is no data) for the purposes of
calculating residual pools and channel lengths. The number of stations at
a transect is calculated as the greater of either the number of stations
occuring in the dataframe for that transect, or the most common count of
stations (i.e. station mode) occuring at that site.
}
\details{
It takes advantage of the fact that stations
are numeric, and thus that the last station to be expected for a transect
is the number of transects to be expected at that transect, adding 1 to
the station to account for station numbering starting at 0. Transect K
always has 1 station.
The by option allows you to calculate the number of stations by site or transect.
Doing it by site makes the calculations of reach length (in
\link{calculateWadeableReachLength}) easier.
}
\examples{
d <- expand.grid(uid = 1:10, transects = LETTERS[1:11], station = 0:9)
nWadeableStations(d$uid, d$transects, d$station, by = 'site')
nWadeableStationsPerTransect(d$uid, d$transects, d$station)
}
|
/man/nWadeableStations.Rd
|
no_license
|
jasonelaw/nrsa
|
R
| false
| true
| 1,737
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nWadeableStationsPerTransect.r
\name{nWadeableStations}
\alias{nWadeableStations}
\alias{nWadeableStationsPerTransect}
\title{Estimates the intended number of wadeable thalweg stations to be visited at
an NRSA study site.}
\usage{
nWadeableStations(uid, transect, station, by = c("transect", "site"))
nWadeableStationsPerTransect(uid, transect, station)
}
\arguments{
\item{uid}{a vector of site identifiers}
\item{transect}{a vector of transect identifiers}
\item{station}{a vector of station identifiers}
\item{by}{}
}
\description{
Estimates the intended number of wadeable thalweg stations at each transect
which are considered sampled (even if there is no data) for the purposes of
calculating residual pools and channel lengths. The number of stations at
a transect is calculated as the greater of either the number of stations
occuring in the dataframe for that transect, or the most common count of
stations (i.e. station mode) occuring at that site.
}
\details{
It takes advantage of the fact that stations
are numeric, and thus that the last station to be expected for a transect
is the number of transects to be expected at that transect, adding 1 to
the station to account for station numbering starting at 0. Transect K
always has 1 station.
The by option allows you to calculate the number of stations by site or transect.
Doing it by site makes the calculations of reach length (in
\link{calculateWadeableReachLength}) easier.
}
\examples{
d <- expand.grid(uid = 1:10, transects = LETTERS[1:11], station = 0:9)
nWadeableStations(d$uid, d$transects, d$station, by = 'site')
nWadeableStationsPerTransect(d$uid, d$transects, d$station)
}
|
# Emily Linebarger, based on code by Audrey Batzel
# May/June 2019
# Prep activities and outputs data for TB impact model in Guatemala.
# The current working directory should be the root of this repo (set manually by user)
# -----------------------------------------------------------
# ---------------------------------------------------
# Read in data
# ---------------------------------------------------
drc = readRDS("J:/Project/Evaluation/GF/impact_evaluation/cod/prepped_data/outputs_activities_for_pilot_wide.RDS") #For reference
activities = fread(actFile)
outputs = fread(outputsFile)
#Change year and quarter to date
activities[, quarter:=(quarter/4)-0.25]
activities[, date:=year+quarter]
outputs[, quarter:=(quarter/4)-0.25]
outputs[, date:=year+quarter]
#Add _ to names of data.
names(activities) = gsub(" ", "_", names(activities))
names(outputs) = gsub(" ", "_", names(outputs))
names(activities) = gsub("/", "_", names(activities))
names(outputs) = gsub("/", "_", names(outputs))
#-------------------------------------------------------
# Before anything is changed, make general variable graphs.
#-------------------------------------------------------
# activities_wide = melt(activities, id.vars = c('date', 'department', 'municipality'))
# pdf(paste0(visIeDir, "raw_activities_plots.pdf"), height=5.5, width=9)
# #Municipality level plots - only do where municipality is not NA
# act_muns = unique(activities_wide$municipality)
# for (m in act_muns){
# plot = ggplot(activities_wide[municipality==m], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all activity vars for municipality ', m), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
#
# #Department-level plots
# act_depts = unique(activities_wide$department)
# activities_wide_d = activities_wide[, .(value = sum(value)), by=c('date', 'department', 'variable')]
# for (d in act_depts){
# plot = ggplot(activities_wide_d[department==d], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all activity vars for department ', d), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
# dev.off()
#
# outputs_wide = melt(outputs, id.vars = c('date', 'department', 'municipality'))
# pdf(paste0(visIeDir, "raw_outputs_plots.pdf"), height=5.5, width=9)
# #Municipality level plots - only do where municipality is not NA
# out_muns = unique(outputs_wide$municipality)
# for (m in out_muns){
# plot = ggplot(outputs_wide[municipality==m], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all output vars for municipality ', m), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
#
# #Department-level plots
# out_depts = unique(outputs_wide$department)
# outputs_wide_d = outputs_wide[, .(value=sum(value)), by=c('date', 'department', 'variable')]
# for (d in out_depts){
# plot = ggplot(outputs_wide_d[department==d], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all output vars for department ', d), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
# dev.off()
#----------------------------------------------------
# Validate files, and subset data.
#----------------------------------------------------
#Replace unknown municipalities
stopifnot(nrow(activities[is.na(municipality) | is.na(department)])==0)
stopifnot(nrow(outputs[is.na(municipality) | is.na(department)])==0)
#Drop all 0 departments and municipalities - these are national-level.
# There are 0 of these cases in the 7.15.19 data - EL
activities = activities[!(department==0|municipality==0)]
outputs = outputs[!(department==0 | municipality==0)]
#Make sure that merge below will work - dates.
a_dates = unique(activities$date)
o_dates = unique(outputs$date)
a_dates[!a_dates%in%o_dates] #None. EL 8/7/19
o_dates[!o_dates%in%a_dates] #2009 and 2012. EL 8/7/19 2009, EL 8/19/19
#Departments
a_depts = unique(activities$department)
o_depts = unique(outputs$department)
a_depts[!a_depts%in%o_depts] #None.
o_depts[!o_depts%in%a_depts] #None.
#Municipalities
a_mun = unique(activities$municipality)
o_mun = unique(outputs$municipality)
a_mun[!a_mun%in%o_mun] #None. 7.15.19 EL
o_mun[!o_mun%in%a_mun] #None. 7.15.19 EL ==> Changed to several, EL 8/7/19 ==> Changed back to none 8/19/19.
#Subset data to only department-level, because municipalities aren't matching right now.
#Check that data is uniquely identified
activities[duplicated(activities, by=c('municipality', 'department','date')), dup:=TRUE]
if (nrow(activities[dup==TRUE])!=0){
print(paste0("There are ", nrow(activities[dup==TRUE]), " duplicates in municipality and date in the activities data. Review."))
}
outputs[duplicated(outputs, by=c('municipality', 'department', 'date')), dup:=TRUE]
if (nrow(outputs[dup==TRUE])!=0){
print(paste0("There are ", nrow(outputs[dup==TRUE]), " duplicates in municipality and date in the outputs data. Review."))
}
# Check to make sure that the first number of municipality is the department
activities[, mun_start:=floor(municipality/100)]
activities[department!=mun_start, department_error:=TRUE]
activities[department==mun_start, department_error:=FALSE]
if (nrow(activities[department_error==TRUE])!=0){
print(paste0("There are ", nrow(activities[department_error==TRUE]), " cases where the first numbers of municipality don't match department in activities data."))
}
outputs[, mun_start:=floor(municipality/100)]
outputs[department!=mun_start, department_error:=TRUE]
outputs[department==mun_start, department_error:=FALSE]
if (nrow(outputs[department_error==TRUE])!=0){
print(paste0("There are ", nrow(outputs[department_error==TRUE]), " cases where the first numbers of municipality don't match department in outputs data."))
}
#See if there are any NAs in values for municipality, department, or date.
vars = c('municipality', 'department', 'date')
for (var in vars){
activities[is.na(get(var)), NA_ERROR:=TRUE]
outputs[is.na(get(var)), NA_ERROR:=TRUE]
if (var%in%c('municipality', 'department')){
activities[get(var)==0, NA_ERROR:=TRUE]
outputs[get(var)==0, NA_ERROR:=TRUE]
}
}
if (nrow(activities[NA_ERROR==TRUE])!=0){
print("There are NAs in key variables in activities data")
print(unique(activities[NA_ERROR==TRUE, .(date, department, municipality)]))
}
if (nrow(outputs[NA_ERROR==TRUE])!=0){
print("There are NAs in key variables in outputs data")
print(unique(outputs[NA_ERROR==TRUE, .(date, department, municipality)]))
}
#Drop unneeded names
activities = activities[, -c('dup', 'mun_start', 'department_error', 'NA_ERROR')]
outputs = outputs[, -c('dup', 'mun_start', 'department_error', 'NA_ERROR')]
#------------------------
# Activities
#------------------------
#Run a check to decide which variables are already at the dept. level and which need to be summed.
vars = names(activities)[!names(activities)%in%c('date', 'department', 'municipality')]
dep_vars = c()
mun_vars = c()
for (var in vars){
dt = unique(activities[, .(date, department, var=get(var))])
dt[duplicated(dt, by=c('date', 'department')), dup:=TRUE]
if (nrow(dt[dup==TRUE])!=0){
mun_vars = c(mun_vars, var)
} else {
dep_vars = c(dep_vars, var)
}
}
#Find out which variables are at the year and quarter level.
year_vars = vars[grepl("yearly", vars)]
year_vars = c(year_vars, "Second_Line_Drugs_Distributed_value_d") #Hard-code second line drugs. EL 10.24.2019
quarter_vars = vars[grepl("quarterly", vars)]
stopifnot(length(year_vars)+length(quarter_vars)==length(vars)-2) #Subtract 2 because year and quarter are still in the data.
# #Go ahead and hard code these variables to be department-level, because there is a data prep error. EL 7.8.19
#This should be removed once new data is sent! REMOVED 8/19/19 EL
# dep_vars = c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d", dep_vars)
# mun_vars = mun_vars[!mun_vars%in%c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d")]
#Flag cases where variables end in _d but are in the mun-level dataset.
dept_level_error = mun_vars[grepl("_d", mun_vars)]
if (length(dept_level_error)!=0){
print("ERROR: Some department-level variables are not uniquely identified by department and date!")
print(dept_level_error)
}
#-------------------------------------------------------------------------------------------------------------
# Handle 4 unique cases: department + year, department + quarter, municipality + year, municipality + quarter
case1 = names(activities)[names(activities)%in%dep_vars & names(activities)%in%year_vars]
case2 = names(activities)[names(activities)%in%dep_vars & names(activities)%in%quarter_vars]
case3 = names(activities)[names(activities)%in%mun_vars & names(activities)%in%year_vars]
case4 = names(activities)[names(activities)%in%mun_vars & names(activities)%in%quarter_vars]
date_frame = data.table(expand.grid(year=seq(2009, 2018, by=1), quarter=seq(0.0, 0.75, by=0.25)))
date_frame[, date:=year+quarter]
date_frame$quarter = NULL
#-------------------------------
# Case 1 - department and year
#Take the average of the department-level variables by date and department.
dt_case1 = data.table(year=integer(), department=integer())
for (var in case1){
var_subset = activities[, .(var=mean(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case1 = merge(dt_case1, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case1[duplicated(dt_case1, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case1[dup==TRUE])==0)
dt_case1$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case1)
dt_case1 = merge(dt_case1, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case1){
dt_case1[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case1) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case1) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case1))
dt_case1$year <- NULL
#-------------------------------
# Case 2 - department and quarter
#Take the average of the department-level variables by date and department.
dt_case2 = data.table(date=integer(), department=integer())
for (var in case2){
var_subset = activities[, .(var=mean(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case2 = merge(dt_case2, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case2[duplicated(dt_case2, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case2[dup==TRUE])==0)
dt_case2$dup<-NULL
#Fix names.
names(dt_case2) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case2))
#-------------------------------
# Case 3 - municipality and year
#Take the sum of municipality-level variables by date and department.
dt_case3 = data.table(year=integer(), department=integer())
for (var in case3){
var_subset = activities[, .(var=sum(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case3 = merge(dt_case3, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case3[duplicated(dt_case3, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case3[dup==TRUE])==0)
dt_case3$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case3)
dt_case3 = merge(dt_case3, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case3){
dt_case3[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case3) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case3) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case3))
dt_case3$year <- NULL
#-------------------------------
# Case 4 - municipality and quarter
#Take the sum of municipality-level variables by date and department.
dt_case4 = data.table(date=integer(), department=integer())
for (var in case4){
var_subset = activities[, .(var=sum(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case4 = merge(dt_case4, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case4[duplicated(dt_case4, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case4[dup==TRUE])==0)
dt_case4$dup<-NULL
#Fix names.
names(dt_case4) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case4))
#-----------------------------------------
# Merge all of this data together.
activities1 = merge(dt_case1, dt_case2, by=c('date', 'department'), all=T)
activities1 = merge(activities1, dt_case3, by=c('date', 'department'), all=T)
activities1 = merge(activities1, dt_case4, by=c('date', 'department'), all=T)
#Make sure you've accounted for all columns except municipality, year, and quarter
stopifnot(ncol(activities1) == (ncol(activities)-3))
new_names = names(activities1)[3:ncol(activities1)] #Resest the names so they're distinguishable from activity variables.
new_names = paste0(new_names, "_act")
names(activities1)[3:ncol(activities1)] <- new_names
#------------------------
# Outputs
#------------------------
#Run a check to decide which variables are already at the dept. level and which need to be summed.
vars = names(outputs)[!names(outputs)%in%c('date', 'department', 'municipality')]
dep_vars = c()
mun_vars = c()
for (var in vars){
dt = unique(outputs[, .(date, department, var=get(var))])
dt[duplicated(dt, by=c('date', 'department')), dup:=TRUE]
if (nrow(dt[dup==TRUE])!=0){
mun_vars = c(mun_vars, var)
} else {
dep_vars = c(dep_vars, var)
}
}
#Find out which variables are at the year and quarter level.
year_vars = vars[grepl("yearly", vars)]
quarter_vars = vars[grepl("quarterly", vars)]
stopifnot(length(year_vars)+length(quarter_vars)==length(vars)-2) #Subtract 2 because year and quarter are still in the data.
# #Go ahead and hard code these variables to be department-level, because there is a data prep error. EL 7.8.19
#This should be removed once new data is sent! REMOVED 8/19/19 EL
# dep_vars = c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d", dep_vars)
# mun_vars = mun_vars[!mun_vars%in%c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d")]
#Flag cases where variables end in _d but are in the mun-level dataset.
dept_level_error = mun_vars[grepl("_d", mun_vars)]
if (length(dept_level_error)!=0){
print("ERROR: Some department-level variables are not uniquely identified by department and date!")
print(dept_level_error)
}
#-------------------------------------------------------------------------------------------------------------
# Handle 4 unique cases: department + year, department + quarter, municipality + year, municipality + quarter
case1 = names(outputs)[names(outputs)%in%dep_vars & names(outputs)%in%year_vars]
case2 = names(outputs)[names(outputs)%in%dep_vars & names(outputs)%in%quarter_vars]
case3 = names(outputs)[names(outputs)%in%mun_vars & names(outputs)%in%year_vars]
case4 = names(outputs)[names(outputs)%in%mun_vars & names(outputs)%in%quarter_vars]
date_frame = data.table(expand.grid(year=seq(2009, 2018, by=1), quarter=seq(0.0, 0.75, by=0.25)))
date_frame[, date:=year+quarter]
date_frame$quarter = NULL
#-------------------------------
# Case 1 - department and year
#Take the average of the department-level variables by date and department.
dt_case1 = data.table(year=integer(), department=integer())
for (var in case1){
var_subset = outputs[, .(var=mean(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case1 = merge(dt_case1, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case1[duplicated(dt_case1, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case1[dup==TRUE])==0)
dt_case1$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case1)
dt_case1 = merge(dt_case1, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case1){
dt_case1[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case1) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case1) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case1))
dt_case1$year <- NULL
#-------------------------------
# Case 2 - department and quarter
#Take the average of the department-level variables by date and department.
dt_case2 = data.table(date=integer(), department=integer())
for (var in case2){
var_subset = outputs[, .(var=mean(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case2 = merge(dt_case2, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case2[duplicated(dt_case2, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case2[dup==TRUE])==0)
dt_case2$dup<-NULL
#Fix names.
names(dt_case2) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case2))
#-------------------------------
# Case 3 - municipality and year
#Take the sum of municipality-level variables by date and department.
dt_case3 = data.table(year=integer(), department=integer())
for (var in case3){
var_subset = outputs[, .(var=sum(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case3 = merge(dt_case3, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case3[duplicated(dt_case3, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case3[dup==TRUE])==0)
dt_case3$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case3)
dt_case3 = merge(dt_case3, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case3){
dt_case3[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case3) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case3) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case3))
dt_case3$year <- NULL
#-------------------------------
# Case 4 - municipality and quarter
#Take the sum of municipality-level variables by date and department.
dt_case4 = data.table(date=integer(), department=integer())
for (var in case4){
var_subset = outputs[, .(var=sum(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case4 = merge(dt_case4, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case4[duplicated(dt_case4, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case4[dup==TRUE])==0)
dt_case4$dup<-NULL
#Fix names.
names(dt_case4) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case4))
#-----------------------------------------
# Merge all of this data together.
outputs1 = merge(dt_case1, dt_case2, by=c('date', 'department'), all=T)
outputs1 = merge(outputs1, dt_case3, by=c('date', 'department'), all=T)
outputs1 = merge(outputs1, dt_case4, by=c('date', 'department'), all=T)
#Make sure you've accounted for all columns except municipality, year, and quarter
stopifnot(ncol(outputs1) == (ncol(outputs)-3))
new_names = names(outputs1)[3:ncol(outputs1)] #Resest the names so they're distinguishable from activity variables.
new_names = paste0(new_names, "_out")
names(outputs1)[3:ncol(outputs1)] <- new_names
#--------------------------------
# Add "Children <5 referred for tb evaluation" to ACF pathway EL 8/22/19
# Using new extramuros data uploaded from Guillermo
extramuros = data.table(read_excel("J:/Project/Evaluation/GF/impact_evaluation/gtm/raw_data/TB-Extramuros-2017-2018.xlsx"))
names(extramuros) = tolower(names(extramuros))
extramuros = extramuros[variable=="Cantidad de niños <de 5 años referidos para evaluación a través de ruta diagnostica para descartar tuberculosis", .(Children_less5_referred_out=sum(extramuros)), by=c('department', 'year')]
setnames(extramuros, 'year', 'date')
#Merge data together
outputs1 = merge(outputs1, extramuros, by=c('department', 'date'), all=T)
#-----------------------------------------------------
# Check to make sure you're still uniquely identifying data
#-----------------------------------------------------
activities1[duplicated(activities1, by=c('department','date')), dup:=TRUE]
if (nrow(activities1[dup==TRUE])!=0){
print(paste0("There are ", nrow(activities1[dup==TRUE]), " duplicates in department and date in the activities data. Review."))
}
outputs1[duplicated(outputs1, by=c('department', 'date')), dup:=TRUE]
if (nrow(outputs1[dup==TRUE])!=0){
print(paste0("There are ", nrow(outputs1[dup==TRUE]), " duplicates in department and date in the outputs data. Review."))
}
activities1 = activities1[, -c('dup')]
outputs1 = outputs1[, -c('dup')]
#-----------------------------------------------------
# Merge data
#-----------------------------------------------------
dt_final = merge(activities1, outputs1, by=c('date', 'department'), all=T) #Save dates and departments from both, in case you have data in one and not the other.
#Replace NaN and NA with 0 - we can assume these actually mean 0.
# cols = 3:ncol(dt_final) #Just don't do this for date and department, the first two columns.
# for (col in cols){
# dt_final[is.na(dt_final[[col]]), (col):=0]
# }
#Generate combined first- and second-line drug activity variables EL 8/22/19
# EDIT FROM DAVID PHILLIPS 9/6/2019 - we need to impute the combination variables for this step right before creating first and second-line variables.
# This imputation code is copied from step 4a.
#--------------------------------------------------
# extrapolate where necessary using GLM (better would be to use multiple imputation)
i=1
for(v in drugComboVars) {
for(h in unique(dt_final$department)) {
i=i+1
#First, check whether all values for this department and this variable are zero.
# if they are, don't backcast.
values = unique(dt_final[department==h, as.vector(get(v))]) #Get a vector of the unique values of the variable.
values[is.na(values)] = 0
zero_compare = rep(0, length(values)) #Get an equal length vector of zeros.
if (all(values==zero_compare)){
print(paste0(v, " is completely zero for department", h, " - making 0 for the entire time series in this department"))
dt_final[department==h, (v):=0]
} else {
#Backcast if it doesn't fall into this category.
if (!any(is.na(dt_final[department==h][[v]]))) next
if (!any(!is.na(dt_final[department==h][[v]]))) next
form = as.formula(paste0(v,'~date'))
lmFit = glm(form, dt_final[department==h], family='poisson')
dt_final[department==h, tmp:=exp(predict(lmFit, newdata=dt_final[department==h]))]
lim = max(dt_final[department==h][[v]], na.rm=T)+sd(dt_final[department==h][[v]], na.rm=T)
dt_final[department==h & tmp>lim, tmp:=lim]
dt_final[department==h & is.na(get(v)), (v):=tmp]
}
pct_complete = floor(i/(length(drugComboVars)*length(unique(dt_final$department)))*100)
cat(paste0('\r', pct_complete, '% Complete'))
flush.console()
}
}
dt_final$tmp = NULL
# Replace NAs with zeros after back-casting DP 8/16/19
for (v in drugComboVars){
dt_final[is.na(get(v)), (v):=0]
}
#-----------------------------------------------------------------------
dt_final[, Firstline_Distributed_act:=sum(Total_First_Line_Drugs_inusIsonizide__Distributed_act, Isoniazid_Distributed_act, na.rm=T), by=c('date', 'department')]
dt_final[, Secondline_Distributed_act:=sum(Second_Line_Drugs_Distributed_act, Total_MDR_Drugs_Distributed_act, na.rm=T), by=c('date', 'department')]
#-----------------------------------------------------
# Save data
#-----------------------------------------------------
saveRDS(dt_final, outputFile2b)
archive(outputFile2b)
print("Step 2b: Prep activities outputs completed successfully.")
|
/impact_evaluation/gtm/2b_prep_activities_outputs.R
|
no_license
|
ihmeuw/gf
|
R
| false
| false
| 23,950
|
r
|
# Emily Linebarger, based on code by Audrey Batzel
# May/June 2019
# Prep activities and outputs data for TB impact model in Guatemala.
# The current working directory should be the root of this repo (set manually by user)
# -----------------------------------------------------------
# ---------------------------------------------------
# Read in data
# ---------------------------------------------------
drc = readRDS("J:/Project/Evaluation/GF/impact_evaluation/cod/prepped_data/outputs_activities_for_pilot_wide.RDS") #For reference
activities = fread(actFile)
outputs = fread(outputsFile)
#Change year and quarter to date
activities[, quarter:=(quarter/4)-0.25]
activities[, date:=year+quarter]
outputs[, quarter:=(quarter/4)-0.25]
outputs[, date:=year+quarter]
#Add _ to names of data.
names(activities) = gsub(" ", "_", names(activities))
names(outputs) = gsub(" ", "_", names(outputs))
names(activities) = gsub("/", "_", names(activities))
names(outputs) = gsub("/", "_", names(outputs))
#-------------------------------------------------------
# Before anything is changed, make general variable graphs.
#-------------------------------------------------------
# activities_wide = melt(activities, id.vars = c('date', 'department', 'municipality'))
# pdf(paste0(visIeDir, "raw_activities_plots.pdf"), height=5.5, width=9)
# #Municipality level plots - only do where municipality is not NA
# act_muns = unique(activities_wide$municipality)
# for (m in act_muns){
# plot = ggplot(activities_wide[municipality==m], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all activity vars for municipality ', m), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
#
# #Department-level plots
# act_depts = unique(activities_wide$department)
# activities_wide_d = activities_wide[, .(value = sum(value)), by=c('date', 'department', 'variable')]
# for (d in act_depts){
# plot = ggplot(activities_wide_d[department==d], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all activity vars for department ', d), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
# dev.off()
#
# outputs_wide = melt(outputs, id.vars = c('date', 'department', 'municipality'))
# pdf(paste0(visIeDir, "raw_outputs_plots.pdf"), height=5.5, width=9)
# #Municipality level plots - only do where municipality is not NA
# out_muns = unique(outputs_wide$municipality)
# for (m in out_muns){
# plot = ggplot(outputs_wide[municipality==m], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all output vars for municipality ', m), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
#
# #Department-level plots
# out_depts = unique(outputs_wide$department)
# outputs_wide_d = outputs_wide[, .(value=sum(value)), by=c('date', 'department', 'variable')]
# for (d in out_depts){
# plot = ggplot(outputs_wide_d[department==d], aes(y=value, x=date)) +
# geom_line() +
# facet_wrap(~variable, scales='free') +
# labs(title=paste('Time series of all output vars for department ', d), y='Value', x='Date') +
# theme_bw()
# print(plot)
# }
# dev.off()
#----------------------------------------------------
# Validate files, and subset data.
#----------------------------------------------------
#Replace unknown municipalities
stopifnot(nrow(activities[is.na(municipality) | is.na(department)])==0)
stopifnot(nrow(outputs[is.na(municipality) | is.na(department)])==0)
#Drop all 0 departments and municipalities - these are national-level.
# There are 0 of these cases in the 7.15.19 data - EL
activities = activities[!(department==0|municipality==0)]
outputs = outputs[!(department==0 | municipality==0)]
#Make sure that merge below will work - dates.
a_dates = unique(activities$date)
o_dates = unique(outputs$date)
a_dates[!a_dates%in%o_dates] #None. EL 8/7/19
o_dates[!o_dates%in%a_dates] #2009 and 2012. EL 8/7/19 2009, EL 8/19/19
#Departments
a_depts = unique(activities$department)
o_depts = unique(outputs$department)
a_depts[!a_depts%in%o_depts] #None.
o_depts[!o_depts%in%a_depts] #None.
#Municipalities
a_mun = unique(activities$municipality)
o_mun = unique(outputs$municipality)
a_mun[!a_mun%in%o_mun] #None. 7.15.19 EL
o_mun[!o_mun%in%a_mun] #None. 7.15.19 EL ==> Changed to several, EL 8/7/19 ==> Changed back to none 8/19/19.
#Subset data to only department-level, because municipalities aren't matching right now.
#Check that data is uniquely identified
activities[duplicated(activities, by=c('municipality', 'department','date')), dup:=TRUE]
if (nrow(activities[dup==TRUE])!=0){
print(paste0("There are ", nrow(activities[dup==TRUE]), " duplicates in municipality and date in the activities data. Review."))
}
outputs[duplicated(outputs, by=c('municipality', 'department', 'date')), dup:=TRUE]
if (nrow(outputs[dup==TRUE])!=0){
print(paste0("There are ", nrow(outputs[dup==TRUE]), " duplicates in municipality and date in the outputs data. Review."))
}
# Check to make sure that the first number of municipality is the department
activities[, mun_start:=floor(municipality/100)]
activities[department!=mun_start, department_error:=TRUE]
activities[department==mun_start, department_error:=FALSE]
if (nrow(activities[department_error==TRUE])!=0){
print(paste0("There are ", nrow(activities[department_error==TRUE]), " cases where the first numbers of municipality don't match department in activities data."))
}
outputs[, mun_start:=floor(municipality/100)]
outputs[department!=mun_start, department_error:=TRUE]
outputs[department==mun_start, department_error:=FALSE]
if (nrow(outputs[department_error==TRUE])!=0){
print(paste0("There are ", nrow(outputs[department_error==TRUE]), " cases where the first numbers of municipality don't match department in outputs data."))
}
#See if there are any NAs in values for municipality, department, or date.
vars = c('municipality', 'department', 'date')
for (var in vars){
activities[is.na(get(var)), NA_ERROR:=TRUE]
outputs[is.na(get(var)), NA_ERROR:=TRUE]
if (var%in%c('municipality', 'department')){
activities[get(var)==0, NA_ERROR:=TRUE]
outputs[get(var)==0, NA_ERROR:=TRUE]
}
}
if (nrow(activities[NA_ERROR==TRUE])!=0){
print("There are NAs in key variables in activities data")
print(unique(activities[NA_ERROR==TRUE, .(date, department, municipality)]))
}
if (nrow(outputs[NA_ERROR==TRUE])!=0){
print("There are NAs in key variables in outputs data")
print(unique(outputs[NA_ERROR==TRUE, .(date, department, municipality)]))
}
#Drop unneeded names
activities = activities[, -c('dup', 'mun_start', 'department_error', 'NA_ERROR')]
outputs = outputs[, -c('dup', 'mun_start', 'department_error', 'NA_ERROR')]
#------------------------
# Activities
#------------------------
#Run a check to decide which variables are already at the dept. level and which need to be summed.
vars = names(activities)[!names(activities)%in%c('date', 'department', 'municipality')]
dep_vars = c()
mun_vars = c()
for (var in vars){
dt = unique(activities[, .(date, department, var=get(var))])
dt[duplicated(dt, by=c('date', 'department')), dup:=TRUE]
if (nrow(dt[dup==TRUE])!=0){
mun_vars = c(mun_vars, var)
} else {
dep_vars = c(dep_vars, var)
}
}
#Find out which variables are at the year and quarter level.
year_vars = vars[grepl("yearly", vars)]
year_vars = c(year_vars, "Second_Line_Drugs_Distributed_value_d") #Hard-code second line drugs. EL 10.24.2019
quarter_vars = vars[grepl("quarterly", vars)]
stopifnot(length(year_vars)+length(quarter_vars)==length(vars)-2) #Subtract 2 because year and quarter are still in the data.
# #Go ahead and hard code these variables to be department-level, because there is a data prep error. EL 7.8.19
#This should be removed once new data is sent! REMOVED 8/19/19 EL
# dep_vars = c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d", dep_vars)
# mun_vars = mun_vars[!mun_vars%in%c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d")]
#Flag cases where variables end in _d but are in the mun-level dataset.
dept_level_error = mun_vars[grepl("_d", mun_vars)]
if (length(dept_level_error)!=0){
print("ERROR: Some department-level variables are not uniquely identified by department and date!")
print(dept_level_error)
}
#-------------------------------------------------------------------------------------------------------------
# Handle 4 unique cases: department + year, department + quarter, municipality + year, municipality + quarter
case1 = names(activities)[names(activities)%in%dep_vars & names(activities)%in%year_vars]
case2 = names(activities)[names(activities)%in%dep_vars & names(activities)%in%quarter_vars]
case3 = names(activities)[names(activities)%in%mun_vars & names(activities)%in%year_vars]
case4 = names(activities)[names(activities)%in%mun_vars & names(activities)%in%quarter_vars]
date_frame = data.table(expand.grid(year=seq(2009, 2018, by=1), quarter=seq(0.0, 0.75, by=0.25)))
date_frame[, date:=year+quarter]
date_frame$quarter = NULL
#-------------------------------
# Case 1 - department and year
#Take the average of the department-level variables by date and department.
dt_case1 = data.table(year=integer(), department=integer())
for (var in case1){
var_subset = activities[, .(var=mean(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case1 = merge(dt_case1, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case1[duplicated(dt_case1, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case1[dup==TRUE])==0)
dt_case1$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case1)
dt_case1 = merge(dt_case1, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case1){
dt_case1[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case1) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case1) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case1))
dt_case1$year <- NULL
#-------------------------------
# Case 2 - department and quarter
#Take the average of the department-level variables by date and department.
dt_case2 = data.table(date=integer(), department=integer())
for (var in case2){
var_subset = activities[, .(var=mean(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case2 = merge(dt_case2, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case2[duplicated(dt_case2, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case2[dup==TRUE])==0)
dt_case2$dup<-NULL
#Fix names.
names(dt_case2) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case2))
#-------------------------------
# Case 3 - municipality and year
#Take the sum of municipality-level variables by date and department.
dt_case3 = data.table(year=integer(), department=integer())
for (var in case3){
var_subset = activities[, .(var=sum(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case3 = merge(dt_case3, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case3[duplicated(dt_case3, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case3[dup==TRUE])==0)
dt_case3$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case3)
dt_case3 = merge(dt_case3, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case3){
dt_case3[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case3) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case3) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case3))
dt_case3$year <- NULL
#-------------------------------
# Case 4 - municipality and quarter
#Take the sum of municipality-level variables by date and department.
dt_case4 = data.table(date=integer(), department=integer())
for (var in case4){
var_subset = activities[, .(var=sum(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case4 = merge(dt_case4, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case4[duplicated(dt_case4, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case4[dup==TRUE])==0)
dt_case4$dup<-NULL
#Fix names.
names(dt_case4) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case4))
#-----------------------------------------
# Merge all of this data together.
activities1 = merge(dt_case1, dt_case2, by=c('date', 'department'), all=T)
activities1 = merge(activities1, dt_case3, by=c('date', 'department'), all=T)
activities1 = merge(activities1, dt_case4, by=c('date', 'department'), all=T)
#Make sure you've accounted for all columns except municipality, year, and quarter
stopifnot(ncol(activities1) == (ncol(activities)-3))
new_names = names(activities1)[3:ncol(activities1)] #Resest the names so they're distinguishable from activity variables.
new_names = paste0(new_names, "_act")
names(activities1)[3:ncol(activities1)] <- new_names
#------------------------
# Outputs
#------------------------
#Run a check to decide which variables are already at the dept. level and which need to be summed.
vars = names(outputs)[!names(outputs)%in%c('date', 'department', 'municipality')]
dep_vars = c()
mun_vars = c()
for (var in vars){
dt = unique(outputs[, .(date, department, var=get(var))])
dt[duplicated(dt, by=c('date', 'department')), dup:=TRUE]
if (nrow(dt[dup==TRUE])!=0){
mun_vars = c(mun_vars, var)
} else {
dep_vars = c(dep_vars, var)
}
}
#Find out which variables are at the year and quarter level.
year_vars = vars[grepl("yearly", vars)]
quarter_vars = vars[grepl("quarterly", vars)]
stopifnot(length(year_vars)+length(quarter_vars)==length(vars)-2) #Subtract 2 because year and quarter are still in the data.
# #Go ahead and hard code these variables to be department-level, because there is a data prep error. EL 7.8.19
#This should be removed once new data is sent! REMOVED 8/19/19 EL
# dep_vars = c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d", dep_vars)
# mun_vars = mun_vars[!mun_vars%in%c("Total_Drugs_Distributed_value_d", "Isoniazid_Distributed_value_d")]
#Flag cases where variables end in _d but are in the mun-level dataset.
dept_level_error = mun_vars[grepl("_d", mun_vars)]
if (length(dept_level_error)!=0){
print("ERROR: Some department-level variables are not uniquely identified by department and date!")
print(dept_level_error)
}
#-------------------------------------------------------------------------------------------------------------
# Handle 4 unique cases: department + year, department + quarter, municipality + year, municipality + quarter
case1 = names(outputs)[names(outputs)%in%dep_vars & names(outputs)%in%year_vars]
case2 = names(outputs)[names(outputs)%in%dep_vars & names(outputs)%in%quarter_vars]
case3 = names(outputs)[names(outputs)%in%mun_vars & names(outputs)%in%year_vars]
case4 = names(outputs)[names(outputs)%in%mun_vars & names(outputs)%in%quarter_vars]
date_frame = data.table(expand.grid(year=seq(2009, 2018, by=1), quarter=seq(0.0, 0.75, by=0.25)))
date_frame[, date:=year+quarter]
date_frame$quarter = NULL
#-------------------------------
# Case 1 - department and year
#Take the average of the department-level variables by date and department.
dt_case1 = data.table(year=integer(), department=integer())
for (var in case1){
var_subset = outputs[, .(var=mean(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case1 = merge(dt_case1, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case1[duplicated(dt_case1, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case1[dup==TRUE])==0)
dt_case1$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case1)
dt_case1 = merge(dt_case1, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case1){
dt_case1[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case1) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case1) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case1))
dt_case1$year <- NULL
#-------------------------------
# Case 2 - department and quarter
#Take the average of the department-level variables by date and department.
dt_case2 = data.table(date=integer(), department=integer())
for (var in case2){
var_subset = outputs[, .(var=mean(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case2 = merge(dt_case2, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case2[duplicated(dt_case2, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case2[dup==TRUE])==0)
dt_case2$dup<-NULL
#Fix names.
names(dt_case2) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case2))
#-------------------------------
# Case 3 - municipality and year
#Take the sum of municipality-level variables by date and department.
dt_case3 = data.table(year=integer(), department=integer())
for (var in case3){
var_subset = outputs[, .(var=sum(get(var), na.rm=T)), by=c('year', 'department')]
names(var_subset)[3] = var
dt_case3 = merge(dt_case3, var_subset, by=c('year', 'department'), all=T)
}
#Check for uniqueness.
dt_case3[duplicated(dt_case3, by=c('year', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case3[dup==TRUE])==0)
dt_case3$dup<-NULL
# Divide into quarters.
start_row = nrow(dt_case3)
dt_case3 = merge(dt_case3, date_frame, by='year', all.x=T, allow.cartesian=T)
for (var in case3){
dt_case3[, (var):=get(var)/4] #Divide each variable to the quarter-level. This assumes everything is counts!
}
stopifnot(nrow(dt_case3) == start_row*4)
#Fix names, and drop unneeded columns.
names(dt_case3) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case3))
dt_case3$year <- NULL
#-------------------------------
# Case 4 - municipality and quarter
#Take the sum of municipality-level variables by date and department.
dt_case4 = data.table(date=integer(), department=integer())
for (var in case4){
var_subset = outputs[, .(var=sum(get(var), na.rm=T)), by=c('date', 'department')]
names(var_subset)[3] = var
dt_case4 = merge(dt_case4, var_subset, by=c('date', 'department'), all=T)
}
#Check for uniqueness.
dt_case4[duplicated(dt_case4, by=c('date', 'department')), dup:=TRUE]
stopifnot(nrow(dt_case4[dup==TRUE])==0)
dt_case4$dup<-NULL
#Fix names.
names(dt_case4) = gsub("_m|_d|_yearly|_quarterly|_value", "", names(dt_case4))
#-----------------------------------------
# Merge all of this data together.
outputs1 = merge(dt_case1, dt_case2, by=c('date', 'department'), all=T)
outputs1 = merge(outputs1, dt_case3, by=c('date', 'department'), all=T)
outputs1 = merge(outputs1, dt_case4, by=c('date', 'department'), all=T)
#Make sure you've accounted for all columns except municipality, year, and quarter
stopifnot(ncol(outputs1) == (ncol(outputs)-3))
new_names = names(outputs1)[3:ncol(outputs1)] #Resest the names so they're distinguishable from activity variables.
new_names = paste0(new_names, "_out")
names(outputs1)[3:ncol(outputs1)] <- new_names
#--------------------------------
# Add "Children <5 referred for tb evaluation" to ACF pathway EL 8/22/19
# Using new extramuros data uploaded from Guillermo
extramuros = data.table(read_excel("J:/Project/Evaluation/GF/impact_evaluation/gtm/raw_data/TB-Extramuros-2017-2018.xlsx"))
names(extramuros) = tolower(names(extramuros))
extramuros = extramuros[variable=="Cantidad de niños <de 5 años referidos para evaluación a través de ruta diagnostica para descartar tuberculosis", .(Children_less5_referred_out=sum(extramuros)), by=c('department', 'year')]
setnames(extramuros, 'year', 'date')
#Merge data together
outputs1 = merge(outputs1, extramuros, by=c('department', 'date'), all=T)
#-----------------------------------------------------
# Check to make sure you're still uniquely identifying data
#-----------------------------------------------------
activities1[duplicated(activities1, by=c('department','date')), dup:=TRUE]
if (nrow(activities1[dup==TRUE])!=0){
print(paste0("There are ", nrow(activities1[dup==TRUE]), " duplicates in department and date in the activities data. Review."))
}
outputs1[duplicated(outputs1, by=c('department', 'date')), dup:=TRUE]
if (nrow(outputs1[dup==TRUE])!=0){
print(paste0("There are ", nrow(outputs1[dup==TRUE]), " duplicates in department and date in the outputs data. Review."))
}
activities1 = activities1[, -c('dup')]
outputs1 = outputs1[, -c('dup')]
#-----------------------------------------------------
# Merge data
#-----------------------------------------------------
dt_final = merge(activities1, outputs1, by=c('date', 'department'), all=T) #Save dates and departments from both, in case you have data in one and not the other.
#Replace NaN and NA with 0 - we can assume these actually mean 0.
# cols = 3:ncol(dt_final) #Just don't do this for date and department, the first two columns.
# for (col in cols){
# dt_final[is.na(dt_final[[col]]), (col):=0]
# }
#Generate combined first- and second-line drug activity variables EL 8/22/19
# EDIT FROM DAVID PHILLIPS 9/6/2019 - we need to impute the combination variables for this step right before creating first and second-line variables.
# This imputation code is copied from step 4a.
#--------------------------------------------------
# extrapolate where necessary using GLM (better would be to use multiple imputation)
i=1
for(v in drugComboVars) {
for(h in unique(dt_final$department)) {
i=i+1
#First, check whether all values for this department and this variable are zero.
# if they are, don't backcast.
values = unique(dt_final[department==h, as.vector(get(v))]) #Get a vector of the unique values of the variable.
values[is.na(values)] = 0
zero_compare = rep(0, length(values)) #Get an equal length vector of zeros.
if (all(values==zero_compare)){
print(paste0(v, " is completely zero for department", h, " - making 0 for the entire time series in this department"))
dt_final[department==h, (v):=0]
} else {
#Backcast if it doesn't fall into this category.
if (!any(is.na(dt_final[department==h][[v]]))) next
if (!any(!is.na(dt_final[department==h][[v]]))) next
form = as.formula(paste0(v,'~date'))
lmFit = glm(form, dt_final[department==h], family='poisson')
dt_final[department==h, tmp:=exp(predict(lmFit, newdata=dt_final[department==h]))]
lim = max(dt_final[department==h][[v]], na.rm=T)+sd(dt_final[department==h][[v]], na.rm=T)
dt_final[department==h & tmp>lim, tmp:=lim]
dt_final[department==h & is.na(get(v)), (v):=tmp]
}
pct_complete = floor(i/(length(drugComboVars)*length(unique(dt_final$department)))*100)
cat(paste0('\r', pct_complete, '% Complete'))
flush.console()
}
}
dt_final$tmp = NULL
# Replace NAs with zeros after back-casting DP 8/16/19
for (v in drugComboVars){
dt_final[is.na(get(v)), (v):=0]
}
#-----------------------------------------------------------------------
dt_final[, Firstline_Distributed_act:=sum(Total_First_Line_Drugs_inusIsonizide__Distributed_act, Isoniazid_Distributed_act, na.rm=T), by=c('date', 'department')]
dt_final[, Secondline_Distributed_act:=sum(Second_Line_Drugs_Distributed_act, Total_MDR_Drugs_Distributed_act, na.rm=T), by=c('date', 'department')]
#-----------------------------------------------------
# Save data
#-----------------------------------------------------
saveRDS(dt_final, outputFile2b)
archive(outputFile2b)
print("Step 2b: Prep activities outputs completed successfully.")
|
library(eggCounts)
### Name: eggCounts-package
### Title: Hierarchical modelling of faecal egg counts
### Aliases: eggCounts eggCounts-package
### Keywords: package
### ** Examples
## Not run:
##D
##D ## Citations
##D citation('eggCounts')
##D
##D ## History of changes
##D file.show(system.file("NEWS", package = "eggCounts"))
##D
##D ## Demonstration
##D demo("fecm_stan", package = "eggCounts")
##D
##D ## Install eggCountsExtra
##D devtools::install_github("CraigWangUZH/eggCountsExtra")
## End(Not run)
|
/data/genthat_extracted_code/eggCounts/examples/eggCounts-package.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 520
|
r
|
library(eggCounts)
### Name: eggCounts-package
### Title: Hierarchical modelling of faecal egg counts
### Aliases: eggCounts eggCounts-package
### Keywords: package
### ** Examples
## Not run:
##D
##D ## Citations
##D citation('eggCounts')
##D
##D ## History of changes
##D file.show(system.file("NEWS", package = "eggCounts"))
##D
##D ## Demonstration
##D demo("fecm_stan", package = "eggCounts")
##D
##D ## Install eggCountsExtra
##D devtools::install_github("CraigWangUZH/eggCountsExtra")
## End(Not run)
|
\name{smooths.frame}
\alias{smooths.frame}
\alias{smooths.frame-class}
\title{Description of a smooths.frame object}
\description{A \code{data.frame} of S3-class \code{smooths.frame} that stores the
smooths of one or more responses for several sets of smoothing parameters.
\code{\link{as.smooths.frame}} is function that converts a
\code{\link{data.frame}} to an object of this class.
\code{\link{is.smooths.frame}} is the membership function for this class; it tests
that an object has class \code{smooths.frame}.
\code{\link{validSmoothsFrame}} can be used to test the validity of a
\code{smooths.frame}.
}
\value{A \code{\link{data.frame}} that is also inherits the S3-class
\code{\link{smooths.frame}}. It contains the results of smoothing a response
over time from a set of \code{individuals}, the data being arranged in long
format both with respect to the times and the smoothing-parameter values used in
the smoothing. That is, each response occupies a single column. The
\code{\link{smooths.frame}} must include the columns \code{Type}, \code{TunePar},
\code{TuneVal}, \code{Tuning} (the combination of \code{TunePar} and
\code{TuneVal}) and \code{Method}, and the columns that would be nominated using
the \code{\link{probeSmooths}} arguments \code{individuals}, the \code{plots} and
\code{facet} arguments, \code{times}, \code{response}, \code{response.smoothed}, and,
if requested, the AGR and the RGR of the \code{response} and \code{response.smoothed}.
The names of the growth rates should be formed from \code{response} and
\code{response.smoothed} by adding \code{.AGR} and \code{.RGR} to both of them.
The function \code{\link{probeSmooths}} produces a \code{\link{smooths.frame}}
for a response.
A \code{\link{smooths.frame}} has the following attributes:
\enumerate{
\item \code{individuals}, the \code{\link{character}} giving the name of the
\code{\link{factor}} that define the subsets of the \code{data}
for which each subset corresponds to the \code{response} values for
an individual;
\item \code{n}, the number of unique \code{individuals};
\item \code{times}, the \code{\link{character}} giving the name of the
\code{\link{numeric}}, or \code{\link{factor}} with numeric levels, that
contains the values of the predictor variable plotted on the x-axis;
\item \code{t}, the number of unique values in the \code{times};
\item \code{nschemes}, the number of unique combinations of the
smoothing-parameter values in the \code{smoothsframe}. }
}
\examples{
dat <- read.table(header = TRUE, text = "
Type TunePar TuneVal Tuning Method ID DAP PSA sPSA
NCSS df 4 df-4 direct 045451-C 28 57.446 51.18456
NCSS df 4 df-4 direct 045451-C 30 89.306 87.67343
NCSS df 7 df-7 direct 045451-C 28 57.446 57.01589
NCSS df 7 df-7 direct 045451-C 30 89.306 87.01316
")
dat[1:7] <- lapply(dat[1:6], factor)
dat <- as.smooths.frame(dat, individuals = "ID", times = "DAP")
is.smooths.frame(dat)
validSmoothsFrame(dat)
data(exampleData)
vline <- list(ggplot2::geom_vline(xintercept=29, linetype="longdash", size=1))
smths <- probeSmooths(data = longi.dat,
response = "PSA", response.smoothed = "sPSA",
times = "DAP",
smoothing.args =
args4smoothing(smoothing.methods = "direct",
spline.types = "NCSS",
df = c(4,7), lambdas = NULL),
profile.plot.args =
args4profile_plot(plots.by = NULL,
facet.x = "Tuning",
facet.y = "Treatment.1",
include.raw = "no",
ggplotFuncs = vline))
is.smooths.frame(smths)
validSmoothsFrame(smths)
}
\author{Chris Brien}
\seealso{\code{\link{probeSmooths}}, \code{\link{is.smooths.frame}},
\code{\link{as.smooths.frame}}, \code{\link{validSmoothsFrame}}, \code{\link{args4smoothing}}}
\keyword{asreml}
\keyword{htest}
|
/man/smooths.frame.Rd
|
no_license
|
cran/growthPheno
|
R
| false
| false
| 4,549
|
rd
|
\name{smooths.frame}
\alias{smooths.frame}
\alias{smooths.frame-class}
\title{Description of a smooths.frame object}
\description{A \code{data.frame} of S3-class \code{smooths.frame} that stores the
smooths of one or more responses for several sets of smoothing parameters.
\code{\link{as.smooths.frame}} is function that converts a
\code{\link{data.frame}} to an object of this class.
\code{\link{is.smooths.frame}} is the membership function for this class; it tests
that an object has class \code{smooths.frame}.
\code{\link{validSmoothsFrame}} can be used to test the validity of a
\code{smooths.frame}.
}
\value{A \code{\link{data.frame}} that is also inherits the S3-class
\code{\link{smooths.frame}}. It contains the results of smoothing a response
over time from a set of \code{individuals}, the data being arranged in long
format both with respect to the times and the smoothing-parameter values used in
the smoothing. That is, each response occupies a single column. The
\code{\link{smooths.frame}} must include the columns \code{Type}, \code{TunePar},
\code{TuneVal}, \code{Tuning} (the combination of \code{TunePar} and
\code{TuneVal}) and \code{Method}, and the columns that would be nominated using
the \code{\link{probeSmooths}} arguments \code{individuals}, the \code{plots} and
\code{facet} arguments, \code{times}, \code{response}, \code{response.smoothed}, and,
if requested, the AGR and the RGR of the \code{response} and \code{response.smoothed}.
The names of the growth rates should be formed from \code{response} and
\code{response.smoothed} by adding \code{.AGR} and \code{.RGR} to both of them.
The function \code{\link{probeSmooths}} produces a \code{\link{smooths.frame}}
for a response.
A \code{\link{smooths.frame}} has the following attributes:
\enumerate{
\item \code{individuals}, the \code{\link{character}} giving the name of the
\code{\link{factor}} that define the subsets of the \code{data}
for which each subset corresponds to the \code{response} values for
an individual;
\item \code{n}, the number of unique \code{individuals};
\item \code{times}, the \code{\link{character}} giving the name of the
\code{\link{numeric}}, or \code{\link{factor}} with numeric levels, that
contains the values of the predictor variable plotted on the x-axis;
\item \code{t}, the number of unique values in the \code{times};
\item \code{nschemes}, the number of unique combinations of the
smoothing-parameter values in the \code{smoothsframe}. }
}
\examples{
dat <- read.table(header = TRUE, text = "
Type TunePar TuneVal Tuning Method ID DAP PSA sPSA
NCSS df 4 df-4 direct 045451-C 28 57.446 51.18456
NCSS df 4 df-4 direct 045451-C 30 89.306 87.67343
NCSS df 7 df-7 direct 045451-C 28 57.446 57.01589
NCSS df 7 df-7 direct 045451-C 30 89.306 87.01316
")
dat[1:7] <- lapply(dat[1:6], factor)
dat <- as.smooths.frame(dat, individuals = "ID", times = "DAP")
is.smooths.frame(dat)
validSmoothsFrame(dat)
data(exampleData)
vline <- list(ggplot2::geom_vline(xintercept=29, linetype="longdash", size=1))
smths <- probeSmooths(data = longi.dat,
response = "PSA", response.smoothed = "sPSA",
times = "DAP",
smoothing.args =
args4smoothing(smoothing.methods = "direct",
spline.types = "NCSS",
df = c(4,7), lambdas = NULL),
profile.plot.args =
args4profile_plot(plots.by = NULL,
facet.x = "Tuning",
facet.y = "Treatment.1",
include.raw = "no",
ggplotFuncs = vline))
is.smooths.frame(smths)
validSmoothsFrame(smths)
}
\author{Chris Brien}
\seealso{\code{\link{probeSmooths}}, \code{\link{is.smooths.frame}},
\code{\link{as.smooths.frame}}, \code{\link{validSmoothsFrame}}, \code{\link{args4smoothing}}}
\keyword{asreml}
\keyword{htest}
|
## code to prepare `plastic_samples` datasets
Choy_sam_loc <- dplyr::tibble(
lon = c(-121.82,-122.05),
lat = c(36.8,36.7),
depth = "depth"
)
BoxKash_sam_loc <- dplyr::tibble(
lon = c(-122.9,-122.6,-123.4,
-121.83595,-122.0129,-122.3055,-122.7199333),
lat = c(37.45,37.65,37.98,
36.73223333,36.95426667, 36.39216667, 35.75838333),
depth = "surface"
)
LattinDoyle_sam_loc <- dplyr::tibble(
lon = c(-118.5),
lat = c(33.9),
depth = "surface"
)
usethis::use_data(Choy_sam_loc, overwrite = TRUE)
usethis::use_data(BoxKash_sam_loc, overwrite = TRUE)
usethis::use_data(LattinDoyle_sam_loc, overwrite = TRUE)
|
/data-raw/plastic_samples.R
|
no_license
|
FlukeAndFeather/plasticmaps
|
R
| false
| false
| 642
|
r
|
## code to prepare `plastic_samples` datasets
Choy_sam_loc <- dplyr::tibble(
lon = c(-121.82,-122.05),
lat = c(36.8,36.7),
depth = "depth"
)
BoxKash_sam_loc <- dplyr::tibble(
lon = c(-122.9,-122.6,-123.4,
-121.83595,-122.0129,-122.3055,-122.7199333),
lat = c(37.45,37.65,37.98,
36.73223333,36.95426667, 36.39216667, 35.75838333),
depth = "surface"
)
LattinDoyle_sam_loc <- dplyr::tibble(
lon = c(-118.5),
lat = c(33.9),
depth = "surface"
)
usethis::use_data(Choy_sam_loc, overwrite = TRUE)
usethis::use_data(BoxKash_sam_loc, overwrite = TRUE)
usethis::use_data(LattinDoyle_sam_loc, overwrite = TRUE)
|
files <- as.data.frame(list.files("BreastMaleData"))
fm <- read.table("file_matching.txt", header = TRUE, sep = "\t")
names(files) <- "file_id"
merged <- merge(files, fm, by = "file_id")
|
/mergeData.R
|
no_license
|
federicocozza/BioInformaticsGlioma
|
R
| false
| false
| 191
|
r
|
files <- as.data.frame(list.files("BreastMaleData"))
fm <- read.table("file_matching.txt", header = TRUE, sep = "\t")
names(files) <- "file_id"
merged <- merge(files, fm, by = "file_id")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miss_val_est.R
\name{mve.rcbd}
\alias{mve.rcbd}
\title{Estimation of missing values for a RCBD}
\usage{
mve.rcbd(trait, treat, rep, data, maxp = 0.1, tol = 1e-06)
}
\arguments{
\item{trait}{The trait to estimate missing values.}
\item{treat}{The treatments.}
\item{rep}{The replications.}
\item{data}{The name of the data frame.}
\item{maxp}{Maximum allowed proportion of missing values to estimate, defaults to 10\%.}
\item{tol}{Tolerance for the convergence of the iterative estimation process.}
}
\value{
It returns a data frame with the experimental layout and columns \code{trait}
and \code{trait.est} with the original data and the original data plus the estimated values.
}
\description{
Function to estimate missing values for a Randomized Complete Block Design (RCBD) by
the least squares method.
}
\details{
A \code{data.frame} with data for a RCBD with at least two replications
and at least one datum for each treatment must be loaded. Experimental data
with only one replication, any treatment without data, or more missing values than
specified in \code{maxp} will generate an error message.
}
\examples{
temp <- subset(met8x12, env == "TM80N")
mve.rcbd("y", "geno", "rep", temp)
}
\author{
Raul Eyzaguirre.
}
|
/man/mve.rcbd.Rd
|
no_license
|
CIP-RIU/st4gi
|
R
| false
| true
| 1,307
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/miss_val_est.R
\name{mve.rcbd}
\alias{mve.rcbd}
\title{Estimation of missing values for a RCBD}
\usage{
mve.rcbd(trait, treat, rep, data, maxp = 0.1, tol = 1e-06)
}
\arguments{
\item{trait}{The trait to estimate missing values.}
\item{treat}{The treatments.}
\item{rep}{The replications.}
\item{data}{The name of the data frame.}
\item{maxp}{Maximum allowed proportion of missing values to estimate, defaults to 10\%.}
\item{tol}{Tolerance for the convergence of the iterative estimation process.}
}
\value{
It returns a data frame with the experimental layout and columns \code{trait}
and \code{trait.est} with the original data and the original data plus the estimated values.
}
\description{
Function to estimate missing values for a Randomized Complete Block Design (RCBD) by
the least squares method.
}
\details{
A \code{data.frame} with data for a RCBD with at least two replications
and at least one datum for each treatment must be loaded. Experimental data
with only one replication, any treatment without data, or more missing values than
specified in \code{maxp} will generate an error message.
}
\examples{
temp <- subset(met8x12, env == "TM80N")
mve.rcbd("y", "geno", "rep", temp)
}
\author{
Raul Eyzaguirre.
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(tidyverse)
library(shiny)
library(shinydashboard)
library(ggthemes)
library(tidyverse)
library(shiny)
library(shinydashboard)
library(ggthemes)
attrates_long <- readr::read_csv("attrateslong.csv")
ui <- dashboardPage(skin ="black",
dashboardHeader(title = "Attendance Data"),
dashboardSidebar(disable = T),
dashboardBody(
fluidRow(
box(title = "Plot Options", width = 3,
selectInput("x", "Select Term", choices = c("attendance_rate_percent", "student_add", "student_drop"),
selected = "attendance_rate_percent"),
), # close the first box
box(title = "Attendance Rates, Enrollment and Withdrawal Data by Month", width=8,
plotOutput("plot", width = "700px", height = "500px")
) # close the second box
) # close the row
) # close the dashboard body
) # close the ui
server <- function(input, output, session) {
output$plot <- renderPlot({
attrates_long %>%
filter(attendance_statistic==input$x) %>%
ggplot(aes(x=month, y=value)) +
geom_col(color="black", fill="darkcyan", alpha=.5)+
facet_wrap(~year)+
#geom_col(color="black", fill="slateblue2", alpha=1)+
theme_gdocs()+
labs(x="Month",y="Count")
})
# stop the app when we close it
session$onSessionEnded(stopApp)
}
shinyApp(ui, server)
|
/group project/Attendance_Rates_Shiny/app.R
|
no_license
|
jnrico33/groupwork
|
R
| false
| false
| 1,664
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(tidyverse)
library(shiny)
library(shinydashboard)
library(ggthemes)
library(tidyverse)
library(shiny)
library(shinydashboard)
library(ggthemes)
attrates_long <- readr::read_csv("attrateslong.csv")
ui <- dashboardPage(skin ="black",
dashboardHeader(title = "Attendance Data"),
dashboardSidebar(disable = T),
dashboardBody(
fluidRow(
box(title = "Plot Options", width = 3,
selectInput("x", "Select Term", choices = c("attendance_rate_percent", "student_add", "student_drop"),
selected = "attendance_rate_percent"),
), # close the first box
box(title = "Attendance Rates, Enrollment and Withdrawal Data by Month", width=8,
plotOutput("plot", width = "700px", height = "500px")
) # close the second box
) # close the row
) # close the dashboard body
) # close the ui
server <- function(input, output, session) {
output$plot <- renderPlot({
attrates_long %>%
filter(attendance_statistic==input$x) %>%
ggplot(aes(x=month, y=value)) +
geom_col(color="black", fill="darkcyan", alpha=.5)+
facet_wrap(~year)+
#geom_col(color="black", fill="slateblue2", alpha=1)+
theme_gdocs()+
labs(x="Month",y="Count")
})
# stop the app when we close it
session$onSessionEnded(stopApp)
}
shinyApp(ui, server)
|
setwd("~/ferdig_rotation/gabes_CRC/")
######################## Group A ###############################
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpAuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpACorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the counts on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
#counts but added the orange line
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3, ylim=c(500,1400)) # first plot
lines(my_seq[-1], tp_count_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#percents but added the orange line
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.37,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
################################### Group B ################################
setwd("~/ferdig_rotation/gabes_CRC/")
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpBuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpBCorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.17,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0.50, 0.55, 0.6, 0.65, 0.70))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
############################# Group AB ###############################
setwd("~/ferdig_rotation/gabes_CRC/")
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpAandBuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpAandBCorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.40,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0.3, 0.35, 0.4, 0.45, 0.50))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
|
/crcovariates/Final_TPFP_plotMike.R
|
no_license
|
katiemeis/code_gradlab
|
R
| false
| false
| 11,264
|
r
|
setwd("~/ferdig_rotation/gabes_CRC/")
######################## Group A ###############################
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpAuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpACorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the counts on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
#counts but added the orange line
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3, ylim=c(500,1400)) # first plot
lines(my_seq[-1], tp_count_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#percents but added the orange line
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.37,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
################################### Group B ################################
setwd("~/ferdig_rotation/gabes_CRC/")
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpBuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpBCorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.17,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0.50, 0.55, 0.6, 0.65, 0.70))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
############################# Group AB ###############################
setwd("~/ferdig_rotation/gabes_CRC/")
######## TP in uncorrected out of total TP
######## FP in uncorrected out of total predicted
uncorrected <- read.csv("GrpAandBuncorrectedcorr.csv", stringsAsFactors = F, header = T, row.names = 1)
uncorrected <- uncorrected[-c(1,2),]
corrected <- read.csv("GrpAandBCorrectedCorr.csv", stringsAsFactors = F, header = T, row.names = 1)
corrected <- corrected[-c(1,2,3,4),]
#want to plot anything with Pvaluse 0.01 AND FDR 0.05 or better
corrected_filt <- corrected[which(corrected$PValue < 0.01 & corrected$FDR < 0.05),]
uncorrected_filt <- uncorrected[which(uncorrected$PValue < 0.01 & uncorrected$FDR < 0.05),]
# this is total TP
ground_truth <- row.names(corrected_filt)
my_seq <- seq(0, 0.01, 0.000001)
tp_vec <- vector()
fp_vec <- vector()
tp_vec_corrected <- vector()
tp_count_vec <- vector()
fp_count_vec <- vector()
tp_count_corrected <-vector()
for(i in my_seq){
#threshold the uncorrected
row_ind <- which(uncorrected_filt$PValue < i)
all_pos <- row.names(uncorrected_filt)[row_ind]
#threshold the corrected
row_ind_cor <- which(corrected_filt$PValue < i)
all_pos_cor <- row.names(corrected_filt)[row_ind_cor]
#counts
tp_count <- length(intersect(ground_truth, all_pos))
fp_count <- length(all_pos) - tp_count
tp_count_cor <- length(all_pos_cor)
tp_count_vec <- c(tp_count_vec, tp_count)
fp_count_vec <- c(fp_count_vec, fp_count)
tp_count_corrected <- c(tp_count_corrected, tp_count_cor)
#percents
tp_percent <- tp_count/length(ground_truth)
fp_percent <- fp_count/length(all_pos)
tp_percent_cor <- tp_count_cor/length(ground_truth)
tp_vec <- c(tp_vec, tp_percent)
fp_vec <- c(fp_vec, fp_percent)
tp_vec_corrected <- c(tp_vec_corrected, tp_percent_cor)
}
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_count_vec[-1], xlab = "P-value", ylab = "True Positives (count)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_count_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, 50, 100, 150, 200, 250))
mtext("False Positives (count)", side=4, line=3)
#plot the percents on the same graph, TP is blue FP is green
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3) # first plot
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0, .05, 0.1, 0.15))
mtext("False Positives (%)", side=4, line=3)
#legend("topright",legend = c("TP", "FP"), col = c("blue", "green"))
par(mar = c(5, 4, 4, 4) + 0.5) # Leave space for z axis
plot(my_seq[-1], tp_vec[-1], xlab = "P-value", ylab = "True Positives (%)", type = "o", col = "blue", lwd = 3, ylim=c(0.40,1)) # first plot
lines(my_seq[-1], tp_vec_corrected[-1], type="o", col = "orange", lwd=3)
par(new = TRUE)
plot(my_seq[-1], fp_vec[-1], type = "o", col="green", axes = FALSE, bty = "n", xlab = "", ylab = "", lwd = 3)
axis(side=4, at = c(0.3, 0.35, 0.4, 0.45, 0.50))
mtext("False Positives (%)", side=4, line=3)
legend("bottomright", legend = c("TP uncorrected", "TP corrected", "FP uncorrected"), col=c("blue", "orange", "green"),
pch=c(19, 19, 19), lwd = c(3,3,3))
|
#
# Rlibraries.R
#Wed Oct 31 19:00:40 CET 2012
loadLibraries = function() {
require('geepack');
require('glmnet');
require('ggplot2');
#library('foreign');
}
#
# Rdata.R
#Mon 27 Jun 2005 10:49:06 AM CEST
#system("cd ~/src/Rprivate ; ./exportR.sh");
#system("cd ~/src/Rprivate ; ./exportR.sh"); source("RgenericAll.R"); source("Rgenetics.R"); loadLibraries();
#
# <§> abstract data functions
#
defined = function(x) exists(as.character(substitute(x)));
defined.by.name = function(name) { class(try(get(name), silent = T)) != 'try-error' }
# equivalent to i %in % v
is.in = function(i, v)(length((1:length(v))[v == i])>0)
rget = function(name, default = NULL, ..., pos = -1, envir = as.environment(pos)) {
#obj = try(get(name, ...), silent = T);
#r = if(class(obj) == 'try-error') default else obj;
#r = if (exists(name, where = pos, envir = envir)) get(name, ..., pos = pos, envir = envir) else default;
r = if (exists(name, envir = envir)) get(name, ..., envir = envir) else default;
r
}
firstDef = function(..., .fdInterpolate = F, .fdIgnoreErrors = F) {
l = if (.fdInterpolate) c(...) else list(...);
for (i in l) { if (!is.null(i) && (!.fdIgnoreErrors || class(i) != 'try-error')) return(i)};
NULL
}
firstDefNA = function(..., .fdInterpolate = F){
l = if (.fdInterpolate) c(...) else list(...);
for (i in l) { if (!is.na(i)) return(i)};
NULL
}
# <N> NULL behaviour
to.list = function(..., .remove.factors = T){
r = if(is.null(...)) NULL else if (is.list(...)) c(...) else list(...);
if (.remove.factors) {
r = sapply(r, function(e)ifelse(is.factor(e), levels(e)[e], e));
}
r
}
# pretty much force to vector
#avu = function(v)as.vector(unlist(v))
avu = function(v, recursive = T, toNA = T) {
transform = if (toNA)
function(e, condition)(if (condition) NA else avu(e, toNA = T, recursive = T)) else
function(e, ...)avu(e, toNA = F, recursive = T);
r = if (is.list(v)) {
nls = sapply(v, is.null); # detects nulls
# unlist removes NULL values -> NA
unlist(sapply(seq_along(v), function(i)transform(v[[i]], nls[i])));
} else as.vector(v);
if (!length(r)) return(NULL);
r
}
pop = function(v)rev(rev(v)[-1]);
assign.list = function(l, pos = -1, envir = as.environment(pos), inherits = FALSE, immediate = TRUE) {
for (n in names(l)) {
assign(n, l[[n]], pos, envir, inherits, immediate);
}
}
eval.text = function(text, envir = parent.frame())eval(parse(text = c[1]), envir= envir);
# replace elements base on list
# l may be a list of lists with elements f (from) and t (to), when f is replaced with t
# if both, f and t arguments are not NULL, l will be ignored and f is replaced with t
vector.replace = function(v, l, regex = F, ..., f = NULL, t = NULL) {
# if (!is.null(f) & !is.null(t)) l = list(list(f = f, t = t));
# # replacments are given in f/t pairs
# if (all(sapply(l, length) == 2)) {
# from = list.key(l, "f");
# to = list.key(l, "t");
# } else {
# from = names(l);
# to = unlist(l);
# }
# for (i in 1:length(from)) {
# if (regex) {
# idcs = which(sapply(v, function(e)(length(fetchRegexpr(from[i], e, ...)) > 0)));
# v[idcs] = sapply(v[idcs], function(e)gsub(from[i], to[i], e));
# } else v[which(v == from[i])] = to[i];
# }
repl = if (!is.null(f) & !is.null(t)) listKeyValue(f, t) else l;
# <!> tb tested
v = if (!regex) {
raw = repl[v];
unlist(ifelse(sapply(repl[v], is.null), v, raw))
} else {
sapply(v, function(e){
# first match takes precedent
j = which(sapply(names(repl), function(f)length(fetchRegexpr(f, e, ...)) > 0))[1];
if (is.na(j)) e else gsub(names(repl)[j], repl[[j]], e)
})
}
v
}
vector.with.names = function(v, all_names, default = 0) {
r = rep(default, length(all_names));
names(r) = all_names;
is = which.indeces(names(v), all_names, ret.na = T);
r[is[!is.na(is)]] = v[!is.na(is)];
r
}
# dir: direction of selection: 1: select rows, 2: select columns
mat.sel = function(m, v, dir = 1) {
r = if (dir == 1)
sapply(1:length(v), function(i)m[v[i], i]) else
sapply(1:length(v), function(i)m[i, v[i]]);
r
}
# rbind on list
sapplyId = function(l)sapply(l, identity);
listFind = function(lsed, lsee) {
values = sapply(names(lsee), function(n)list.key(lsed, n), simplify = F, USE.NAMES = F);
values = sapply(values, identity);
found = apply(values, 1, function(r) all(r == lsee));
r = unlist.n(lsed[found], 1);
r
}
same.vector = function(v)all(v == v[1])
#
# <§> string manipulation
#
say = function(...)cat(..., "\n");
printf = function(fmt, ...)cat(sprintf(fmt, ...));
join = function(v, sep = " ")paste(v, collapse = sep);
con = function(...)paste(..., sep="");
# pastem = function(a, b, ..., revsort = T) {
# if (revsort)
# as.vector(apply(merge(data.frame(a = b), data.frame(b = a), sort = F), 1,
# function(e)paste(e[2], e[1], ...))) else
# as.vector(apply(merge(data.frame(a = a), data.frame(b = b), sort = F), 1,
# function(e)paste(e[1], e[2], ...)))
# }
pastem = function(a, b, ..., revsort = T) {
df = merge.multi.list(list(Df(a = a), Df(b = b)), .first.constant = revsort);
paste(df[, 1], df[, 2], ...)
}
r.output.to.vector.int = function(s) {
matches = gregexpr("(?<![\\[\\d])\\d+", s, perl=T);
starts = as.vector(matches[[1]]);
lengthes = attr(matches[[1]], "match.length");
v = sapply(1:length(starts), function(i){ substr(s, starts[i], starts[i] + lengthes[i] -1) });
as.integer(v)
}
r.output.to.vector.numeric = function(s) {
matches = gregexpr("\\d*\\.\\d+", s, perl=T);
starts = as.vector(matches[[1]]);
lengthes = attr(matches[[1]], "match.length");
v = sapply(1:length(starts), function(i){ substr(s, starts[i], starts[i] + lengthes[i] -1) });
as.numeric(v)
}
readFile = function(path) { join(scan(path, what = "raw", sep = "\n", quiet = T), sep = "\n") };
circumfix = function(s, post = NULL, pre = NULL) {
if (is.null(s) || length(s) == 0) return('');
sapply(s, function(s)if (s == '') s else con(pre, s, post))
}
abbr = function(s, Nchar = 20, ellipsis = '...') {
ifelse(nchar(s) > Nchar, paste(substr(s, 1, Nchar - nchar(ellipsis)), ellipsis, sep = ''), s)
}
Which.max = function(l, last.max = T, default = NA) {
if (is.logical(l) && all(!l)) return(default);
r = if (last.max) (length(l) - which.max(rev(l)) + 1) else which.max(l);
r
}
Which.min = function(l, last.min = F, default = NA) {
if (is.logical(l) && all(!l)) return(default);
r = if (last.min) (length(l) - which.min(rev(l)) + 1) else which.min(l);
r
}
# capturesN: named captures; for each name in captureN put the captured value assuming names to be ordered
# captures: fetch only first capture per match <!> deprecated
# capturesAll: fetch all caputers for each match
fetchRegexpr = function(re, str, ..., ret.all = F, globally = T, captures = F, captureN = c(),
capturesAll = F, maxCaptures = 9, returnMatchPositions = F) {
if (length(re) == 0) return(c());
r = if (globally)
gregexpr(re, str, perl = T, ...)[[1]] else
regexpr(re, str, perl = T, ...);
if (all(r < 0)) return(NULL);
l = sapply(1:length(r), function(i)substr(str, r[i], r[i] + attr(r, "match.length")[i] - 1));
if (captures) {
l = sapply(l, function(e)gsub(re, '\\1', e, perl = T, fixed = F));
} else if (length(captureN) > 0) {
l = lapply(l, function(e) {
r = sapply(1:length(captureN), function(i) {
list(gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F))
});
names(r) = captureN;
r
});
} else if (capturesAll) {
l = lapply(l, function(e) {
cs = c(); # captures
# <!> hack to remove zero-width assertions (no nested grouping!)
#re = gsub('(\\(\\?<=.*?\\))|(\\(\\?=.*?\\))', '', re, perl = T, fixed = F);
for (i in 1:maxCaptures) {
n = gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F);
cs = c(cs, n);
}
cs
});
# trim list
#maxEls = maxCaptures - min(c(maxCaptures + 1, sapply(l, function(e)Which.max(rev(e != ''))))
# , na.rm = T) + 1;
maxEls = max(c(sapply(l, function(e)Which.max(e != '', default = 1)), 1));
l = lapply(l, function(e)(if (maxEls > 0) e[1:maxEls] else NULL));
}
if (!ret.all) l = l[l != ""];
ret = if (returnMatchPositions) list(match = l, positions = r) else l;
ret
}
# improved multistring version
FetchRegexpr = function(re, str, ..., ret.all = F, globally = T, captures = F, captureN = c(),
capturesAll = F, maxCaptures = 9, returnMatchPositions = F) {
if (length(re) == 0) return(c());
r = if (globally)
gregexpr(re, str, perl = T, ...) else
list(regexpr(re, str, perl = T, ...));
if (all(unlist(r) < 0)) return(NULL);
l = sapply(seq_along(r),
function(j) {
r0 = r[[j]];
sapply(1:length(r0),
function(i)substr(str[j], r0[i], r0[i] + attr(r0, "match.length")[i] - 1))
});
if (captures) {
l = sapply(l, function(e)gsub(re, '\\1', e, perl = T, fixed = F));
#print(l);
} else if (length(captureN) > 0) {
l = lapply(l, function(e) {
r = sapply(1:length(captureN), function(i) {
list(gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F))
});
names(r) = captureN;
r
});
} else if (capturesAll) {
l = lapply(l, function(e) {
cs = c(); # captures
# <!> hack to remove zero-width assertions (no nested grouping!)
#re = gsub('(\\(\\?<=.*?\\))|(\\(\\?=.*?\\))', '', re, perl = T, fixed = F);
for (i in 1:maxCaptures) {
n = gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F);
cs = c(cs, n);
}
cs
});
# trim list
#maxEls = maxCaptures - min(c(maxCaptures + 1, sapply(l, function(e)Which.max(rev(e != ''))))
# , na.rm = T) + 1;
maxEls = max(c(sapply(l, function(e)Which.max(e != '', default = 1)), 1));
l = lapply(l, function(e)(if (maxEls > 0) e[1:maxEls] else NULL));
}
if (!ret.all) l = l[l != ""];
ret = if (returnMatchPositions) list(match = l, positions = r) else l;
ret
}
regex = Vectorize(fetchRegexpr, 'str', SIMPLIFY = T, USE.NAMES = T);
Regex = Vectorize(FetchRegexpr, 're', SIMPLIFY = T, USE.NAMES = T);
regexIdcs = function(re, s, ...)vectorIdcs(regex(re, s, ...), is.null, not = T)
# unify capture extraction for gregexpr, regexpr
# pos == 0: grexepr, regexpr else by iterating pos as index into str
matchRegexCapture = function(reg, str, pos = NULL) {
if (is.null(attr(reg, 'capture.start'))) return(NULL);
if (!is.null(pos)) str = str[pos] else pos = seq_along(reg);
captures = lapply(1:ncol(attr(reg, 'capture.start')), function(i) {
sapply(pos, function(j)Substr(str,
attr(reg, 'capture.start')[j, i], attr(reg, 'capture.length')[j, i]))
});
names(captures) = attr(reg, 'capture.names');
captures
}
matchRegexExtract = function(reg, str, pos = NULL) {
if (!is.null(pos)) str = str[pos] else pos = seq_along(reg);
matches = ifelse(reg[pos] < 0, character(0),
sapply(pos, function(i)Substr(str, reg[i], attr(reg, 'match.length')[i])));
matches
}
# <i> re nested list with sub-res for named captures
# <!> globally == FALSE, removeNonMatch == FALSE
matchRegex = function(re, str, ..., globally = TRUE, simplify = TRUE,
positions = FALSE, removeNonMatch = FALSE) {
if (length(re) == 0) return(NULL);
reg = if (globally) gregexpr(re, str, perl = T, ...) else regexpr(re, str, perl = T, ...);
ms = if (globally)
lapply(seq_along(reg), function(i)matchRegexExtract(reg[[i]], str[i])) else
lapply(seq_along(str), function(i)matchRegexExtract(reg, str, pos = i));
# regmatches(str, reg);
captures = if (globally)
lapply(seq_along(reg), function(i)matchRegexCapture(reg[[i]], str[i])) else
lapply(seq_along(str), function(i)matchRegexCapture(reg, str, pos = i));
if (removeNonMatch) {
nonmatch = sapply(ms, length) == 0 | is.na(ms);
ms = ms[!nonmatch];
captures = captures[!nonmatch];
reg = reg[!nonmatch];
}
if (simplify && length(str) == 1) {
ms = ms[[1]];
captures = captures[[1]];
reg = reg[[1]];
}
r = if(positions) list(match = ms, capture = captures, positions = reg) else
list(match = ms, capture = captures);
r
}
#
# <p> final interface as of 2016/04
#
MatchRegex = function(re, str, mode = 'return') {
r = regexpr(re, str);
if (mode == 'return') {
r = str[which(r > 0)];
}
r
}
splitString = function(re, str, ..., simplify = T) {
l = lapply(str, function(str) {
r = gregexpr(re, str, perl = T, ...)[[1]];
if (r[1] < 0) return(str);
l = sapply(1:(length(r) + 1), function(i) {
substr(str, ifelse(i == 1, 1, r[i - 1] + attr(r, "match.length")[i - 1]),
ifelse(i > length(r), nchar(str), r[i] - 1))
});
});
if (length(l) == 1 && simplify) l = l[[1]];
l
}
quoteString = function(s)sprintf('"%s"', s)
trimString = function(s) {
sapply(s, function(e)
if (is.na(e)) NA else FetchRegexpr('^\\s*(.*?)\\s*$', e, captures = T)
)
}
mergeDictToString = function(d, s, valueMapper = function(s)
ifelse(is.na(d[[n]]), '{\\bf Value missing}', d[[n]]),
iterative = F, re = F, maxIterations = 100, doApplyValueMap = T, doOrderKeys = T, maxLength = 1e7) {
ns = names(d);
# proceed in order of decreasing key lengthes
if (doOrderKeys) ns = ns[rev(order(sapply(ns, nchar)))];
for (i in 1:maxIterations) {
s0 = s;
for (n in ns) {
# counteract undocumented string interpolation
subst = if (doApplyValueMap)
gsub("[\\\\]", "\\\\\\\\", valueMapper(d[[n]]), perl = T)
else d[[n]];
# <!> quoting
if (!re) n = sprintf("\\Q%s\\E", n);
s = gsub(n, firstDef(subst, ""), s, perl = T, fixed = F);
# <A> if any substitution was made, it is nescessary to reiterate ns to preserver order
# of substitutions
if (iterative && s != s0) break;
}
if (!iterative || s == s0 || nchar(s) > maxLength) break;
}
s
}
mergeDictToStringV = Vectorize(mergeDictToString, 's', SIMPLIFY = T, USE.NAMES = T);
mergeDictToVector = function(d, v) { unlist(ifelse(is.na(names(d[v])), v, d[v])) }
mergeDictToDict = function(dMap, dValues, ..., recursive = T) {
r = lapply(dValues, function(v) {
r = if (class(v) == 'list') {
if (recursive) mergeDictToDict(dMap, v, ...) else v
} else if (class(v) == 'character') mergeDictToString(dMap, v, ...) else v;
r
});
r
}
# double quote if needed
qsSingle = function(s, force = F) {
# <N> better implementation possible: detect unquoted white-space
if (force || length(fetchRegexpr('[ \t"()\\[\\]:,]', s)) > 0) {
s = gsub('([\\"])', '\\\\\\1', s);
s = sprintf('"%s"', s);
} else {
s0 = gsub("([\\'])", '\\\\\\1', s);
if (s0 != s) s = sprintf("$'%s'", s0);
}
s
}
qs = function(s, ...)sapply(s, qsSingle, ...)
# single quote if needed
qssSingle = function(s, force = F) {
# <N> better implementation possible: detect unquoted white-space
if (force || length(fetchRegexpr("[ \t'()\\[\\]:,]", s)) > 0) {
s = gsub("(['])", "'\"'\"'", s);
s = sprintf("'%s'", s);
}
s
}
qss = function(s, ...)sapply(s, qssSingle, ...)
#' Return sub-strings indicated by positions or produce a string by substituting those strings with
#' replacements
#'
#' The function behaves similar to sprintf, except that character sequences to be substituted are
#' indicated by name.
#'
#' @param s template string
#' @param start vector of start positions of substrings to substitute
#' @param length vector of lengthes of substrings to substitute
#' @param replacement vector of strings to subsitute. If missing, \code{Substr} returns sub-strings indicated
#' by start/length
#'
#' @examples
#' print(Substr("abc", c(2, 3), c(1, 1), c("def", 'jkl')));
#' print(Substr("abcdef", c(2, 3, 5), c(1, 1, 1), c("123", '456', '789')));
#' print(Substr("abcdef", c(1, 3, 5), c(1, 1, 1), c("123", '456', '789')));
#' print(Substr("abcdef", c(1, 3, 5), c(0, 1, 0), c("123", '456', '789')));
Substr = function(s, start, length, replacement) {
if (missing(replacement)) return(substr(s, start, start + length - 1));
start = c(start, nchar(s) + 1);
l = sapply(seq_along(replacement), function(i)c(
replacement[i],
substr(s, start[i] + length[i], start[i + 1] - 1)
));
l = c(substr(s, 1, start[1] - 1), as.vector(l));
r = join(as.vector(l), sep = '');
r
}
# <!> quoting
#' Produce string by substituting placeholders
#'
#' The function behaves similar to sprintf, except that character sequences to be substituted are
#' indicated by name. To be implemented: *-specifications
#'
#' @param s template string
#' @param d values to substitute into \code{s}
#' @param template template for substitution pattern. Within this pattern \code{__DICT_KEY__} is
#' substituted for a key in \code{d}. This string \code{k} is substituted in \code{s} with \code{d[[k]]}.
#'
#' @examples
#' Sprintf('These are N %{N} characters.', list(N = 10));
#' Sprintf('These are N %{N}d characters.', list(N = 10));
#' Sprintf('These are N %{N}02d characters.', list(N = 10));
Sprintfl = function(fmt, values, sprintf_cartesian = FALSE, envir = parent.frame()) {
dict = extraValues = list();
for (i in seq_along(values)) {
if (is.list(values[[i]]))
dict = merge.lists(dict, values[[i]]) else
if (!is.null(names(values)[i]) && names(values)[i] != '')
dict = merge.lists(dict, values[i]) else
extraValues = c(extraValues, values[i]);
}
# re = '(?x)(?:
# (?:^|[^%]|(?:%%)+)\\K
# [%]
# (?:[{]([^{}\\*\'"]*)[}])?
# ((?:[-]?[*\\d]*[.]?[*\\d]*)?(?:[sdfegG]|))(?=[^%sdfegG]|$)
# )';
# <!> new, untested regexpr as of 22.5.2014
# un-interpolated formats do no longer work
re = '(?x)(?:
(?:[^%]+|(?:%%)+)*\\K
[%]
(?:[{]([^{}\\*\'"]*)[}])?
((?:[-]?[*\\d]*[.]?[*\\d]*)?(?:[sdfegGDQqu]|))(?=[^sdfegGDQqu]|$)
)';
r = fetchRegexpr(re, fmt, capturesAll = T, returnMatchPositions = T);
# <p> nothing to format
if (length(r$match) == 0) return(fmt);
typesRaw = sapply(r$match, function(m)ifelse(m[2] == '', 's', m[2]));
types = ifelse(typesRaw %in% c('D', 'Q'), 's', typesRaw);
fmts = sapply(r$match, function(m)sprintf('%%%s',
ifelse(m[2] %in% c('', 'D', 'Q', 'q', 'u'), 's', m[2])));
fmt1 = Substr(fmt, r$positions, attr(r$positions, 'match.length'), fmts);
keys = sapply(r$match, function(i)i[1]);
nonKeysI = cumsum(keys == ''); # indeces of values not passed by name
nonKeysIdcs = which(keys == '');
# <p> collect all values
allValues = c(extraValues, dict);
# get interpolation variables
interpolation = nlapply(keys[keys != ''], function(k)
if (!is.null(allValues[[k]])) NULL else rget(k, default = NA, envir = envir)
);
# <p> handle %D: current day
keys[typesRaw == 'D'] = '..Sprintf.date..';
dateValue = if (sum(typesRaw == 'D'))
list(`..Sprintf.date..` = format(Sys.time(), '%Y%d%m')) else
list();
allValues = c(allValues, dateValue, List_(interpolation, rm.null = T));
# 14.9.2015 -> convert to indeces
# build value combinations
listedValues = lapply(keys, function(k)allValues[[k]]);
dictDf = if (!sprintf_cartesian) Df_(listedValues) else merge.multi.list(listedValues);
#if (substr(fmt, 0, 5) == '%{wel') browser();
# fill names of anonymous formats
keys[keys == ''] = names(dictDf)[Seq(1, sum(nonKeysI != 0))];
# due to repeat rules of R vectors might have been converted to factors
#dictDf = Df_(dictDf, as_character = unique(keys[types == 's']));
dictDf = Df_(dictDf, as_character = which(types == 's'));
# <p> conversion <i>: new function
#colsQ = keys[typesRaw == 'Q'];
# <!> switch to index based transformation on account of duplicate keys
colsQ = which(typesRaw == 'Q');
dictDf[, colsQ] = apply(dictDf[, colsQ, drop = F], 2, qs);
#colsq = keys[typesRaw == 'q'];
colsq = which(typesRaw == 'q');;
dictDf[, colsq] = apply(dictDf[, colsq, drop = F], 2, qss);
colsu = which(typesRaw == 'u');;
dictDf[, colsu] = apply(dictDf[, colsu, drop = F], 2, uc.first);
colsd = which(typesRaw == 'd');;
dictDf[, colsd] = apply(dictDf[, colsd, drop = F], 2, as.integer);
s = sapply(1:nrow(dictDf), function(i) {
valueDict = as.list(dictDf[i, , drop = F]);
# sprintfValues = lapply(seq_along(keys), function(i)
# ifelse(keys[i] == '', extraValues[[nonKeysI[i]]],
# firstDef(valueDict[[keys[i]]], rget(keys[i], default = '__no value__'), pos = -2)));
# sprintfValues = lapply(seq_along(keys), function(i)
# firstDef(valueDict[[keys[i]]], rget(keys[i], default = '__no value__', envir = envir)));
#sprintfValues = lapply(seq_along(keys), function(i)valueDict[[keys[i]]]);
#do.call(sprintf, c(list(fmt = fmt1), sprintfValues))
# <!> simplify above two lines, now robust against duplicated entries -> <i> needs unit tests
names(valueDict) = NULL;
do.call(sprintf, c(list(fmt = fmt1), valueDict))
});
s
}
Sprintf = sprintd = function(fmt, ..., sprintf_cartesian = FALSE, envir = parent.frame()) {
Sprintfl(fmt, list(...), sprintf_cartesian = sprintf_cartesian, envir = envir);
}
#r = getPatternFromStrings(DOC, '(?:\\nDOCUMENTATION_BEGIN:)([^\\n]+)\\n(.*?)(?:\\nDOCUMENTATION_END\\n)');
getPatternFromStrings = function(strings, pattern, keyIndex = 1) {
r = lapply(strings, function(s) {
ps = fetchRegexpr(pattern, s, capturesAll = T);
listKeyValue(sapply(ps, function(e)e[[keyIndex]]), sapply(ps, function(e)e[-keyIndex]));
});
r
}
getPatternFromFiles = function(files, locations = NULL, ...) {
strings = sapply(files, function(f)readFile(f, prefixes = locations));
getPatternFromStrings(strings, ...);
}
#
# hex strings
#
asc = function(x)strtoi(charToRaw(x), 16L);
character.as.characters = function(str) {
sapply(str, function(s) sapply(1:nchar(s), function(i)substr(str, i, i)));
}
# bit_most_sig in bits
hex2int = function(str, bit_most_sig = 32) {
cs = rev(sapply(character.as.characters(tolower(str)), asc));
cms = bit_most_sig / 4; # character containing most significant bit
is = ifelse(cs >= asc('a'), cs - asc('a') + 10, cs - asc('0'));
flipSign = (length(is) >= cms && is[cms] >= 8);
if (flipSign) is[cms] = is[cms] - 8;
r = sum(sapply(1:length(is), function(i)(is[i] * 16^(i-1))));
if (flipSign) r = r - 2^(bit_most_sig - 1);
r = if (r == - 2^(bit_most_sig - 1)) NA else as.integer(r);
r
}
# chunk_size in bits
hex2ints = function(str, chunk_size = 32) {
l = nchar(str);
csc = chunk_size / 4; # chunk_size in characters
chunks = (l + csc - 1) %/% csc;
r = sapply(1:chunks, function(i)hex2int(substr(str, (i - 1)*csc + 1, min(l, i*csc))));
r
}
#
# <§> binary numbers/n-adic numbers
#
ord2base = dec2base = function(o, digits = 5, base = 2) {
sapply(1:digits, function(i){(o %/% base^(i-1)) %% base})
}
base2ord = base2dec = function(v, base = 2) {
sum(sapply(1:length(v), function(i)v[i] * base^(i-1)))
}
ord2bin = dec.to.bin = function(number, digits = 5) ord2base(number, digits, base = 2);
bin2ord = bin.to.dec = function(bin) base2ord(bin, base = 2);
#
# <Par> sequences
#
#' Produce constrained sequences
#'
#' This is a wrapper around seq that adds constraints. Setting ascending, descending to NA reverts to
#' standard \code{seq} behaviour.
#'
#' @param ascending restrict sequences to be ascending; return empty list if to < from
#' @param descending restrict sequences to be descending; return empty list if from < to
#' @examples
#' Seq(1, 10, ascending = T)
#' Seq(1, 10, descending = T)
#' Seq(10, 1, ascending = NA)
Seq = function(from, to, ..., ascending = T, descending = !ascending, neg = F) {
# <!> order matters: if called with only descending == T
if (nif(descending) && to > from) return(if (neg) T else c()) else
if (nif(ascending) && from > to) return(if (neg) T else c());
s = seq(from, to, ...);
r = if (neg) -s else s;
r
}
#' Produce index pairs for vector of counts
#'
#' @param counts vector of integers specifying counts
#' @return vector of pairs of indeces indicating the first and last element in a vector for the blocks
#' specified by \code{counts}
#' @examples
#' count2blocks(c(1, 5, 3))
count2blocks = function(counts) {
ccts = cumsum(counts);
fidcs = c(1, ccts[-length(ccts)] + 1);
blks = as.vector(rbind(fidcs, fidcs + counts - 1));
blks
}
#
# expand a block list - for example as from count2blocks - to a list of integers
#
expandBlocks = function(blks) {
apply(matrix(blks, ncol = 2, byrow = T), 1, function(r) { r[1]:r[2] } )
}
splitListIndcs = function(M, N = 1, .compact = F, .truncate = T) {
if (.truncate & M < N) N = M;
if (.compact) {
n = rep(ceiling(M / N), N); # size of parts
idcs = c(0, cumsum(n));
idcs = idcs[idcs < M];
idcs = c(idcs, M);
} else {
n = rep(floor(M / N), N); # size of parts
R = M - n[1] * N;
n = n + c(rep(1, R), rep(0, N - R));
idcs = c(0, cumsum(n));
}
idcs = cbind(idcs + 1, c(idcs[-1], 0))[-length(idcs), ]; # from:to in a row
# <!> usual R degeneracy
if (!is.matrix(idcs)) idcs = matrix(idcs, nrow = 1);
idcs
}
splitListEls = function(l, N, returnElements = FALSE) {
idcs = splitListIndcs(length(l), N);
li = apply(idcs, 1, function(r)(if (returnElements) l[r[1]:r[2]] else r[1]:r[2]));
# <!> R ambiguity of apply return type
if (is.matrix(li)) li = lapply(1:(dim(li)[2]), function(i)li[, i]);
if (is.vector(li)) li = as.list(li);;
li
}
# @arg l list of index positions from another object
# @return return vector indicating to which list element an index was assigned
# Example: glmnet accepts fold numbers per index (as opposed to a partitioning of elements)
index2listPosition = function(l) {
N = sum(sapply(l, length));
na = rep(NA, N);
m = sapply(1:length(l), function(i)vector.assign(na, l[[i]], i, na.rm = NA));
r = apply(m, 1, na.omit);
r
}
# splitting based on fractions
# voting percentages to seats
# simple algorithm based on size of residuals
splitSeatsForFractions = function(Nseats, fractions) {
# number of parties
Nparties = length(fractions);
# fractional seats
Nseats0 = fractions * Nseats;
# garuantee one seat, otherwise round to nearest
Nseats1 = ifelse (Nseats0 < 1, 1, round(Nseats0));
# mismatch
diff = sum(Nseats1) - Nseats;
# redistribute deficit/overshoot
if (diff != 0) {
Nresid = sapply(Nseats0 - Nseats1, function(i)ifelse(i < 0, 1, i));
subtr = order(Nresid, decreasing = diff < 0)[1:abs(diff)];
# assume one round of correction is always sufficient <!>
Nseats1[subtr] = Nseats1[subtr] - sign(diff);
}
Nseats1
}
# tranform number of elements (as from splitSeatsForFractions) into from:to per row in a matrix
counts2idcs = function(counts) {
idcs = c(0, cumsum(counts));
idcs = cbind(idcs + 1, c(idcs[-1], 0))[-length(idcs), ];
idcs
}
# N is partitioned into fractions from p, where each element of p partitions the remaining part of N
# procedure makes sure to leave space for length(p) elements
cumpartition = function(N, p) {
I = c(); # indeces within 1:N
for (i in 1:length(p)) {
# partition remaining space (ifelse), leave room for subsequent indeces
Ii = floor(p[i] * (ifelse(i == 1, N, N - I[i - 1]) - (length(p) - i))) + 1;
I = c(I, ifelse(i == 1, Ii, I[i - 1] + Ii));
}
as.integer(I)
}
#' Extract parts of a nested structure based on the range from..to
#'
#'
#' @param Ns Vector of integers that specify the size of the substructure
#' @return Return list of list, where each basic list contains key \code{segment}
#' (which of the elements of Ns) and key \code{range}, a list with elements \code{from} and \code{to},
#' specifying which elements to use from
#' that segment.
subListFromRaggedIdcs = function(Ns, from = 1, to = sum(segments)) {
NsCS = cumsum(Ns);
NsCSs = c(0, pop(NsCS)); # shifted cumsum
segments = which(from <= NsCS & to > NsCSs);
r = lapply(segments, function(segment){
N = Ns[segment]; # list-call
from_ = 1;
to_ = N;
if (segment == segments[1]) from_ = from - NsCSs[segment];
if (segment == rev(segments)[1]) to_ = to - NsCSs[segment];
r = list(segment = segment, range = list(from = from_, to = to_));
r
});
r
}
#' Extract parts of nested lists based on the range from..to
#'
#'
#' @param ls nested list structure (currently only two levels supported)
#' @return Return list of list, where each basic list contains key \code{segment}
#' (which of the elements of Ns) and key \code{range}, a list with elements \code{from} and \code{to},
#' specifying which elements to use from
#' that segment.
subListFromRaggedLists = function(ls, from = 1, to = sum(sapply(ls, length))) {
sl = subListFromRaggedIdcs(sapply(ls, length), from = from, to = to);
r = lapply(sl, function(s) with(s, {
r = ls[[segment]][range$from: range$to];
r
}));
r = unlist.n(r, 1);
r
}
#
# <§> vector functions
#
# does the position exists in vector v
exists.pos = function(v, i)(is.vector(v) && !is.na(v[i]))
#
# <par> lists
#
merge.lists = function(..., ignore.nulls = TRUE, listOfLists = FALSE, concat = FALSE, useIndeces = FALSE) {
lists = if (listOfLists) c(...) else list(...);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
ns = if (useIndeces) 1L:length(l2) else names(l2);
for(n in ns) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]]))) {
if (concat) l1[[n]] = c(l1[[n]], l2[[n]]) else l1[[n]] = l2[[n]];
}
}
}
l1
}
merge.lists.recursive = function(..., ignore.nulls = TRUE, listOfLists = F) {
lists = if (listOfLists) c(...) else list(...);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
for(n in names(l2)) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]])))
l1[[n]] = if (is.list(l1[[n]]))
merge.lists.recursive(l1[[n]], l2[[n]]) else
l2[[n]]
}
}
l1
}
unshift = function(l, listOfList = T) {
if (!listOfList) l = list(l);
e1 = lapply(l, function(l0)if (is.list(l0)) l0[[1]] else l0[1]);
r1 = lapply(l, function(l0)l0[-1]);
r = list(elements = e1, remainder = r1);
r
}
Merge.lists.raw = function(lists, ignore.nulls = TRUE, recursive = FALSE, keys = NULL) {
if (!is.null(keys)) keys = unshift(keys);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
for(n in names(l2)) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]])))
l1[[n]] = if (recursive && is.list(l1[[n]]) && (is.null(keys) || n %in% keys$elements))
Merge.lists.raw(list(l1[[n]], l2[[n]]), ignore.nulls, recursive,
if (is.null(keys)) NULL else keys$remainder) else
l2[[n]]
}
}
l1
}
Merge.lists = function(..., ignore.nulls = TRUE, listOfLists = F, recursive = F, keyPathes = NULL) {
lists = if (listOfLists) c(...) else list(...);
keys = if (!is.null(keyPathes)) splitString("[$]", keyPathes, simplify = F) else NULL;
l = Merge.lists.raw(lists, ignore.nulls = ignore.nulls, recursive = recursive, keys = keys);
l
}
compare_print = function(r, e) {
require('compare');
cmp = compare(model = r, comparison = e);
if (!cmp$result) {
print("Expectation not met (result != expectation):");
print(r);
print(e);
}
cmp$result
}
# use.names preserves names and concatenates with lower level names
# reset sets names to top level names
unlist.n = function(l, n = 1, use.names = T, reset = F) {
if (n > 0) for (i in 1:n) {
ns = names(l);
#names(l) = rep(NULL, length(l)); # <!> untested removal Tue Oct 19 17:11:53 2010
l = unlist(l, recursive = F, use.names = use.names);
if (reset) names(l) = ns;
}
l
}
# <N> obsolete, better: with(l, { ...})
instantiate.list = function(l, n = 1) {
for (nm in names(l)) {
eval.parent(parse(file = "", text = sprintf("%s = %s", nm, deparse(l[[nm]]))), n = n);
# if (is.integer(l[[nm]])) {
# eval.parent(parse(file = "", text = sprintf("%s = %d", nm, l[[nm]])), n = n);
# } else if (is.numeric(l[[nm]])) {
# eval.parent(parse(file = "", text = sprintf("%s = %f", nm, l[[nm]])), n = n);
# } else {
# eval.parent(parse(file = "", text = sprintf("%s = \"%s\"", nm, l[[nm]])), n = n);
# };
}
}
# for use in testing code
instantiate = function(l, ..., envir = parent.frame()) {
l0 = c(l, list(...));
for (i in seq_along(l0)) assign(names(l0)[i], l0[[i]], envir = envir);
invisible(l0)
}
# assume a list of lists (aka vector of dicts) and extract a certain key from each of the lists
list.key = function(v, key, unlist = T, template = NULL, null2na = F) {
l = lapply(v, function(i){
if (is.list(i)) {
if (is.null(i[[key]])) { if (null2na) NA else NULL } else i[[key]]
} else template});
if (unlist) l = unlist(l);
l
}
# extract key path from list, general, recursive version
# key path recursive worker
list.kprw = function(l, keys, unlist.pats, template, null2na, carryNames, test) {
key = keys[1];
# <p> extract key
r = if (key != "*") {
index = fetchRegexpr("\\A\\[\\[(\\d+)\\]\\]\\Z", key, captures = T);
if (length(index) > 0) key = as.integer(index[[1]]);
if (is.list(l)) {
r = if (is.null(l[[key]])) {
if (null2na) NA else NULL
} else l[[key]];
if (length(keys) > 1)
list.kprw(r, keys[-1], unlist.pats[-1], template, null2na, carryNames, test) else
if (test) !(is.null(r) || all(is.na(r))) else r;
} else if (class(l) %in% c('character')) {
l[names(l) %in% key];
} else if (class(l) %in% c('data.frame', 'matrix')) {
l[, key]
} else return(template)
} else {
if (length(keys) > 1)
lapply(l, function(sl)
list.kprw(sl, keys[-1], unlist.pats[-1], template, null2na, carryNames, test)
) else l;
}
# <p> unlisting
if (!is.null(unlist.pats)) if (unlist.pats[1]) r = unlist.n(r, 1, reset = carryNames);
r
}
# wrapper for list.kprw
# keyPath obeys EL1 $ EL2 $ ..., where ELn is '*' or a literal
# unlist.pat is pattern of truth values TR1 $ TR2 $..., where TRn is in 'T|F' and specifies unlist actions
# carryNames determines names to be carried over from the top level in case of unlist
list.kpr = function(l, keyPath, do.unlist = F, template = NULL,
null2na = F, unlist.pat = NULL, carryNames = T, as.matrix = F, test = F) {
keys = fetchRegexpr("[^$]+", keyPath);
unlist.pats = if (!is.null(unlist.pat)) as.logical(fetchRegexpr("[^$]+", unlist.pat)) else NULL;
r = list.kprw(l, keys, unlist.pats, template, null2na, carryNames, test = test);
if (do.unlist) { r = unlist(r); }
if (as.matrix) r = t(sapply(r, function(e)e));
r
}
# extract key path from list
# <!> interface change: unlist -> do.unlist (Wed Sep 29 18:16:05 2010)
# test: test existance instead of returning value
list.kp = function(l, keyPath, do.unlist = F, template = NULL, null2na = F, test = F) {
r = list.kpr(l, sprintf("*$%s", keyPath), do.unlist = do.unlist, template, null2na = null2na, test = test);
r
}
list.keys = function(l, keys, default = NA) {
l = as.list(l);
r = lapply(unlist(keys), function(key) if (is.null(l[[key]])) default else l[[key]]);
r
}
# return list without listed keys
list.min = function(l, keys) {
l[-which.indeces(keys, names(l))]
}
# list generation on steroids (wraps other functions)
.list = function(l, .min = NULL) {
if (!is.null(.min)) l = list.min(l, .min);
l
}
# get apply
gapply = function(l, key, unlist = F)list.key(l, key, unlist)
# construct list as a dictionary for given keys and values
listKeyValue = function(keys, values) {
if (length(keys) != length(values))
stop("listKeyValue: number of provided keys does not match that of values");
l = as.list(values);
names(l) = keys;
l
}
vectorNamed = function(v, names) {
if (length(names) > length(v)) stop("vectorNamed: more names than vector elements");
names(v) = names;
v
}
#listInverse = function(l)listKeyValue(avu(l), names(l));
listInverse = function(l, toNA = F) {
n = sapply(l, length);
# <p> values of inverse map
vs = rep.each(names(l), n);
# <p> construct list
r = listKeyValue(avu(l, recursive = F, toNA = toNA), vs);
r
}
# name the list elements by the iterated vector elements ns (names)
nlapply = function(ns, f, ...) {
if (is.list(ns)) ns = names(ns);
r = lapply(ns, f, ...);
names(r) = ns;
r
}
nelapply = function(l, f, ...) {
ns = names(l);
r = lapply(ns, function(n, ...)f(n, l[[n]]), ...);
names(r) = ns;
r
}
ilapply = function(l, f, ...) {
r = lapply(1:length(l), function(i)f(l[[i]], i, ...));
if (!is.null(names(l))) names(r) = names(l);
r
}
# pass element, index, name
einlapply = function(l, f, ...) {
ns = names(l);
r = lapply(1:length(l), function(i)f(l[[i]], i, ns[i], ...));
names(r) = ns;
r
}
kvlapply = function(l, f, ...) {
ns = names(l);
r = lapply(1:length(l), function(i)f(ns[i], l[[i]], ...));
names(r) = ns;
r
}
# USE.NAMES logic reversed for sapply
sapplyn = function(l, f, ...)sapply(l, f, ..., USE.NAMES = F);
list.with.names = function(..., .key = 'name') {
l = list(...);
ns = names(l);
r = nlapply(l, function(n) c(l[[n]], listKeyValue(.key, n)));
r
}
#
# <par> data type conversions
#
# assure m has at least 1 column
to.col = function(m) { if (is.null(dim(m))) t(t(m)) else m }
col.frame = function(l, col.name = 'value', minus = NULL, ignore.null = TRUE,
do.paste = NULL, do.format = T, digits = 3, plus = NULL) {
if (ignore.null) { for (n in names(l)) { if (is.null(l[[n]])) l[[n]] = NULL; } }
if (!is.null(minus)) { for (n in minus) { l[[n]] = NULL; } }
my.names = if (!is.null(plus)) plus else names(l);
digits = if (length(digits) > 1) digits else rep(digits, length(l));
if (!is.null(do.paste)) {
if (do.format) {
i = 1;
for (n in my.names) { if (is.vector(l[[n]])) {
l[[n]] = paste(sapply(l[[n]],
function(e){if (is.numeric(e)) sprintf("%.*f", digits[i], e) else e}
), collapse = do.paste)
i = i + 1;
}}
} else {
for (n in my.names) { if (is.vector(l[[n]])) l[[n]] = paste(l[[n]], collapse = do.paste) }
}
}
f = as.data.frame(l);
if (dim(f)[2] > length(col.name) && length(col.name) == 1)
row.names(f) = paste(col.name, 1:dim(f)[1], sep = "")
else row.names(f) = c(col.name);
t(f)
}
# <i> collect recursively until list or data.frame
# convert list of lists to data frame (assuming identical keys for each sub list)
# also works on list of vectors
listOfLists2data.frame = function(l, idColumn = "id", .names = NULL) {
# collect keys
keys = if (is.list(l[[1]]))
sort(unique(as.vector(unlist(sapply(l, function(e)names(e)))))) else 1:length(l[[1]]);
if (is.null(.names)) .names = keys;
# row names
rows = names(l);
if (is.null(rows)) rows = 1:length(l);
# build df
#df = t(sapply(rows, function(r) { unlist(l[[r]][keys]) }));
df = t(sapply(rows, function(r)list2df(l[[r]], keys)));
df = if (!is.null(idColumn)) {
data.frame.types(data.frame(..idColumn.. = rows, df),
row.names = 1:length(rows), names = c(idColumn, .names));
} else {
data.frame.types(df, row.names = rows, names = .names);
}
df
}
# resetColNames: reset column names to names of first data frame
# colsFromFirstDf: take columns from the first data frame
# <i> improved algorithm: unlist everything, bind together: cave: data types,
# strictly valid only for matrices
# Use cases:
# list with named vectors: get data frame that contains all vectors with all possible names represented
# listOfDataFrames2data.frame(cfs, colsFromUnion = T, do.transpose = T, idColumn = NULL);
listOfDataFrames2data.frame = function(l, idColumn = "id", do.unlist = T, direction = rbind,
resetColNames = T, colsFromFirstDf = F, colsFromUnion = F, do.transpose = F, idAsFactor = F) {
# row names
# <!> 2009-11-20 changed from: rows = firstDef(names(l), list(1:length(l)));
rows = firstDef(names(l), 1:length(l));
# columns
ns = NULL;
if (colsFromUnion) {
ns = unique(unlist(lapply(l, names)));
# get data.frame names
ns = names(do.call(data.frame, listKeyValue(ns, rep(NA, length(ns)))));
resetColNames = F; # <!> mutually exclusive
}
# build df
df = NULL;
for (i in 1:length(rows)) {
if (is.null(l[[i]])) next; # ignore empty entries
# <p> force to data frame
df0 = if (do.transpose) as.data.frame(t(l[[i]])) else as.data.frame(l[[i]]);
# <p> homogenize columns
if (colsFromUnion) {
# add missing columns
ns0 = setdiff(ns, names(df0));
df0 = do.call(data.frame, c(list(df0), listKeyValue(ns0, rep(NA, length(ns0)))));
# correct order of columns
df0 = df0[, ns];
}
if (!is.null(df)) {
if (colsFromFirstDf) df0 = df0[, names(df)] else
if (resetColNames) {
names(df0) = if (is.null(idColumn)) names(df) else names(df)[-1];
}
}
# <p> add id column
df0 = if (is.null(idColumn)) df0 else cbind(rep(rows[i], dim(df0)[1]), df0);
# <A> case differentiation should not me necessary
df = if (i == 1) df0 else direction(df, df0);
}
if (!is.null(idColumn)) names(df)[1] = idColumn;
if (do.unlist) for (n in names(df)) { df[[n]] = unlist(df[[n]]); }
if (idAsFactor) df[[idColumn]] = as.factor(df[[idColumn]]);
row.names(df) = NULL;
df
}
cbindDataFrames = function(l, do.unlist = F) {
listOfDataFrames2data.frame(l, idColumn = NULL, do.unlist = do.unlist, direction = cbind,
resetColNames = F)
}
rbindDataFrames = function(l, do.unlist = F, useDisk = F, idColumn = NULL, transpose = F,
resetColNames = F, colsFromFirstDf = F, idAsFactor = F) {
r = if (useDisk) {
tempTable = tempfile();
for (i in 1:length(l)) {
d0 = l[[i]];
if (class(d0) != 'data.frame') d0 = as.data.frame(d0);
if (transpose) d0 = t(d0);
if (!is.null(idColumn)) {
d0 = data.frame(idColumn = names(l)[i], d0);
names(d0)[1] = idColumn;
}
write.table(d0, file = tempTable, col.names = i == 1, append = i != 1, row.names = F);
}
read.table(tempTable, header = T, as.is = T);
} else {
listOfDataFrames2data.frame(l, idColumn = idColumn, do.unlist = do.unlist,
direction = rbind, resetColNames = resetColNames, colsFromFirstDf = colsFromFirstDf,
idAsFactor = idAsFactor)
}
r
}
# names2col assigns names of the list to a column of the data frame and values to the valueCol
list2df = function(l, cols = names(l), row.name = NULL, names2col = NULL, valueCol = 'value') {
idcs = if (is.null(cols)) 1:length(l) else
if (all(is.integer(cols))) cols else which.indeces(names(l), cols);
if (is.null(cols) || all(is.integer(cols))) cols = paste('C', 1:length(l), sep = '');
r = as.list(rep(NA, length(cols)));
names(r) = cols;
r[idcs] = l;
r = as.data.frame(r, stringsAsFactors = F);
if (!is.null(row.name)) row.names(r)[1] = row.name;
if (!is.null(names2col)) {
r = data.frame(name = names(r), value = unlist(r[1, ]), row.names = NULL, stringsAsFactors = F);
names(r) = c(names2col, valueCol);
}
r
}
be.numeric = function(v)
sapply(v, function(e)grepl('^-?\\d*(\\.\\d+)?(e-?\\d+)?$', e, ignore.case = T, perl = T));
list2df.print = function(l, valueCol = 'value', names2col = NULL, ..., digits = 3, scientific = 3) {
l1 = list2df(l, valueCol = valueCol, names2col = names2col, ...);
numericRows = be.numeric(l1[[valueCol]]);
numbers = as.numeric(l1[[valueCol]][numericRows]);
log10range = max(floor(log10(numbers))) - min(floor(log10(numbers)));
#fmt = if (log10range > digits + 1) '%.*e' else '%.*f';
numbers = sprintf(ifelse(abs(floor(log10(numbers))) > scientific, '%.*e', '%.*f'), digits, numbers);
#numbers = sapply(numbers, function(n)sprintf(fmt, digits, n));
separators = as.vector(names(l) == '' & is.na(l));
l1[separators, names2col] = '-';
l1[separators, valueCol] = '';
l1[numericRows, valueCol] = numbers;
print(l1);
}
rbind.list2df = function(d, l, row.name = NULL) {
d = as.data.frame(d);
r = list2df(l, names(d), row.name);
r0 = rbind(d, r);
r0
}
# take list of lists
# names of list elements become column-names
listOfLists2df = function(l, columnNames = names(l[[1]])) {
colV = lapply(columnNames, function(n)Df_(list.kp(l, n, do.unlist = T)));
r = Df_(do.call(cbind, colV), names = columnNames);
r
}
# d: data frame, l: list with names corresponding to cols, values to be searched for in columns
searchDataFrame = function(d, l, .remove.factors = T) {
ns = names(l);
d = d[, ns, drop = F];
if (.remove.factors) {
l = sapply(l, function(e)ifelse(is.factor(e), levels(e)[e], e));
#d = apply(d, 2, function(col)(if (is.factor(col)) levels(col)[col] else col));
}
rs = which(as.vector(apply(apply(d, 1, function(r)(r == l)), 2, all)));
rs
}
.df.cols = which.cols = function(d, cols, regex = F) {
cols[is.numeric(cols)] = as.integer(cols[is.numeric(cols)]);
cols[is.character(cols)] = which.indeces(cols[is.character(cols)], names(d), regex = regex);
as.integer(cols)
}
# select columns by name
.df = function(d, names, regex = T, as.matrix = F) {
cols = which.indeces(names, names(d), regex = regex);
d0 = d[, cols, drop = F];
# <t> simpler version:
# d0 = d[, .df.cols(d, names, regex)];
if (as.matrix) d0 = as.matrix(d0);
d0
}
.df.reorder = function(d, names, regex = T) {
cols = .df.cols(d, names, regex);
d0 = d[, c(cols, setdiff(1:dim(d)[2], cols))];
d0
}
# remove columns by name
.dfm = function(d, names, regex = F, as.matrix = F) {
cols = if (all(is.numeric(names))) as.integer(names) else which.indeces(names, names(d), regex = regex);
d0 = d[, -cols, drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# remove rows by name
.dfrmr = function(d, names, regex = F, as.matrix = F) {
rows = if (all(is.numeric(names)))
as.integer(names) else
which.indeces(names, row.names(d), regex = regex);
d0 = d[-rows, , drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# remove rows/columns by name
.dfrm = function(d, rows = NULL, cols = NULL, regex = F, as.matrix = F) {
d = as.data.frame(d); # enforce data frame
rows = if (is.null(rows)) 1:dim(d)[1] else
-(if (all(is.numeric(rows))) as.integer(rows) else which.indeces(rows, row.names(d), regex = regex));
cols = if (is.null(cols)) 1:dim(d)[2] else
-(if (all(is.numeric(cols))) as.integer(cols) else which.indeces(cols, names(d), regex = regex));
d0 = d[rows, cols, drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# convert strings to data frame names
# <i> create a data frame and extract names
.dfns = function(ns)gsub(':', '.', ns);
# manipulate list of vectors
# vectors i = 1,.., n with entries v_ij are represented as vector v_11, ..., v_n1, v_21, ...
vector.intercalate = meshVectors = function(...) {
l = list(...);
if (length(l) == 1) l = l[[1]];
v = as.vector(t(sapply(l, function(v)unlist(v))));
# <N> preferred implementation
# No unlist -> should be part of input sanitization
# v = as.vector(do.call(rbind, l));
v
}
is.sorted = function(...)(!is.unsorted(...))
is.ascending = function(v) {
if (length(v) < 2) return(T);
for (i in 2:length(v)) if (v[i] <= v[i - 1]) return(F);
return(T);
}
# pad a vector to length N
pad = function(v, N, value = NA)c(v, rep(value, N - length(v)));
#
# <par> number sequences
#
rep.each = function(l, n) {
l = avu(l);
if (length(n) == 1) as.vector(sapply(l, function(e)rep(e, n))) else
avu(sapply(seq_along(l), function(i)rep(l[i], n[i])))
}
rep.each.row = function(m, n) {
r = matrix(rep.each(m, n), ncol = ncol(m));
if (class(m) == 'data.frame') r = Df_(r, names = names(m));
r
}
rep.list = function(l, n) lapply(1:length(l), function(e)l);
matrix.intercalate = function(..., direction = 1) {
l = list(...);
# <!> assume same dimension
d = dim(l[[1]]);
N = prod(d);
# <p> create new matrix
v = c(if (direction == 1) sapply(l, as.vector) else sapply(sapply(l, t), as.vector) , recursive = T);
vN = as.vector(matrix(v, ncol = N, byrow = T));
r = if (direction == 1)
matrix(vN, nrow = d[1] * length(l)) else
matrix(vN, ncol = d[2] * length(l), byrow = T);
# <p> return value
if (class(l[[1]]) == 'data.frame') r = Df_(r, names = names(l[[1]]));
r
}
data.frame.expandWeigths = function(data, weights = 'weights') {
w = data[[weights]];
weightsCol = which(names(data) == weights);
df0 = lapply(1:length(w), function(i) {
if (w[i] > 0) rep.each.row(data[i, -weightsCol], w[i]) else list();
});
df1 = rbindDataFrames(df0);
df1
}
# spread/fill vector to indeces
vector.spread = function(v, idcs, N, default = 0) {
r = rep(default, N);
r[idcs] = v;
r
}
# create new vector with length == length(v) + length(idcs)
# idcs are positions in the final vector
vector.embed = function(v, idcs, e, idcsResult = T) {
if (!idcsResult) idcs = idcs + (1:length(idcs)) - 1;
N = length(v) + length(idcs);
r = rep(NA, N);
r[setdiff(1:N, idcs)] = v;
r[idcs] = e;
r
}
# set values at idcs
vector.assign = function(v, idcs, e, na.rm = 0) {
v[idcs] = e;
if (!is.na(na.rm)) v[is.na(v)] = na.rm;
v
}
matrix.assign = function(m, idcs, e, byrow = T) {
if (length(dim(idcs)) > 1) {
m[as.matrix(idcs)] = e
} else if (byrow)
m[idcs, ] = e else
m[, idcs] = e
m
}
# are columns/rows same values in matrix
matrix.same = function(m, direction = 1) {
apply(m, direction, function(e)all(e[1] == e))
}
vectorIdcs = function(v, f, ..., not = F) {
r = sapply(v, f, ...);
which(if (not) !r else r)
}
# produce indeces for indeces positioned into blocks of blocksize of which count units exists
# example: expand.block(2, 10, 1:2) == c(1, 2, 11, 12)
expand.block = function(count, blocksize, indeces) {
as.vector(apply(to.col(1:count), 1,
function(i){ (i - 1) * blocksize + t(to.col(indeces)) }
));
}
search.block = function(l, s) {
b.sz = length(s);
which(sapply(
1:(length(l)/b.sz), function(i){all(l[((i - 1) * b.sz + 1):(i * b.sz)] == s)}
));
}
#
# <par> matrix functions
#
# <!> assumes same indeces for rows/columns
matrixFromIndexedDf = function(df, idx.r = 'idx.r', idx.c = 'idx.c', value = 'value', referenceOrder = NULL) {
id = unique(c(df[[idx.r]], df[[idx.c]]));
# matrix indeces
# <A> canonical order is by repeating vector id for row index, constant for columns within repetition
# -> matrix filled by columns
midcs = merge(data.frame(id = id), data.frame(id = id), by = NULL);
midcs = data.frame(midcs, mfid.i = 1:nrow(midcs));
map = merge(df[, c(idx.r, idx.c, value)], midcs,
by.x = c(idx.r, idx.c), by.y = c('id.x', 'id.y'), all.y = T);
# return to midcs order
map = map[order(map$mfid.i), ];
# filled by rows
m = matrix(map[[value]], nrow = length(id));
# reorder matrix
o = order_align(firstDef(referenceOrder, id), id);
# reorder in two steps -> out of mem otherwise
m1 = m[o, ];
m2 = m1[, o];
m2
}
symmetrizeMatrix = function(m) {
m[is.na(m)] = t(m)[is.na(m)];
m
}
which.row = function(m, row) {
cols = names(as.list(row));
if (is.null(cols)) cols = 1:length(row);
rows = 1:(dim(m)[1]);
rows.found = rows[sapply(rows, function(i){ all(m[i, cols] == row) })];
rows.found
}
# lsee: list with searchees
# lsed: list with searched objects
# inverse: lsed are regexes matched against lsee; pre-condition: length(lsee) == 1
# ret.list: for match.multi return list by lsee
# <!><t> cave: semantics changed as of 17.8.2009: return NA entries for unfound lsee-entries
# <!> match multi only implemented for merge = T
which.indeces = function(lsee, lsed, regex = F, ret.na = F, merge = T, match.multi = F, ...,
inverse = F, ret.list = FALSE) {
if (!length(lsed) || !length(lsee)) return(c());
v = if (is.list(lsed)) names(lsed) else lsed;
idcs = if (regex) {
which(sapply(lsed, function(e)(
if (inverse) length(fetchRegexpr(e, lsee, ...)) > 0 else
any(sapply(lsee, function(see)(length(fetchRegexpr(see, e, ...)) > 0)))
)))
} else if (merge) {
d0 = merge(
data.frame(d = lsed, ix = 1:length(lsed)),
data.frame(d = lsee, iy = 1:length(lsee)), all.y = T);
d0 = d0[order(d0$iy), ];
idcs = if (match.multi) {
#d0$ix[unlist(sapply(lsee, function(e)which(d0$d == e)))]
#na.omit(sort(d0$ix))
r = if (ret.list)
unlist.n(by(d0, d0$d, function(d)list(na.omit(d$ix)), simplify = FALSE)) else
na.omit(d0$ix);
r
} else {
d0$ix[pop(which(c(d0$iy, 0) - c(0, d0$iy) != 0))];
}
# less efficient version
# } else d0$ix[unlist(sapply(lsee, function(e)which(d0$d == e)[1]))];
# } else d0$ix[order(d0$iy)]
if (!ret.na) idcs = idcs[!is.na(idcs)];
idcs
} else {
unlist(as.vector(sapply(lsee, function(e){
w = which(e == v);
if (!ret.na) return(w);
ifelse(length(w), w, NA)
})))
};
r = if (ret.list) idcs else as.integer(idcs);
r
}
grep.vector = function(lsee, lsed, regex = F, ret.na = F, merge = T, match.multi = F, ..., inverse = F) {
lsed[which.indeces(lsee, lsed, regex, ret.na, merge, match.multi, ..., inverse = inverse)]
}
grep.infixes = function(lsee, lsed, ...) {
r = grep.vector(sapply(lsee, function(v)sprintf('^%s.*', v)), lsed, regex = T, inverse = F, ... );
r
}
# force structure to be matrix (arrange vector into a row)
MR = function(m) {
if (!is.matrix(m)) m = matrix(m, byrow = T, ncol = length(m));
m
}
# force structure to be matrix (arrange vector into a columns)
MC = function(m) {
if (!is.matrix(m)) m = matrix(m, byrow = F, nrow = length(m));
m
}
#
# <par> data processing
#
# like table but produce columns for all numbers 1..n (not only for counts > 0)
# cats are the expected categories
table.n = function(v, n, min = 1, categories = NULL) {
if (is.null(categories)) categories = min:n;
t = as.vector(table(c(categories, v)) - rep(1, length(categories)));
t
}
table.freq = function(v) {
t0 = table(v);
r = t0 / sum(t0);
r
}
table.n.freq = function(...) {
t0 = table.n(...);
r = t0 / sum(t0);
r
}
#
# <par> data types
#
to.numeric = function(x) { suppressWarnings(as.numeric(x)) }
# set types for columns: numeric: as.numeric
data.frame.types = function(df, numeric = c(), character = c(), factor = c(), integer = c(),
do.unlist = T, names = NULL, row.names = NULL, reset.row.names = F, do.rbind = F, do.transpose = F,
stringsAsFactors = F) {
if (do.rbind) {
#old code: df = t(sapply(df, function(e)e));
lengthes = sapply(df, length);
maxL = max(lengthes);
df = t(sapply(1:length(df), function(i)c(df[[i]], rep(NA, maxL - lengthes[i]))));
}
if (do.transpose) df = t(df);
df = as.data.frame(df, stringsAsFactors = stringsAsFactors);
# set or replace column names
if (!is.null(names)) {
if (class(names) == "character") names(df)[1:length(names)] = names;
if (class(names) == "list") names(df) = vector.replace(names(df), names);
}
if (do.unlist) for (n in names(df)) { df[[n]] = unlist(df[[n]]); }
for (n in numeric) { df[[n]] = as.numeric(df[[n]]); }
for (n in integer) { df[[n]] = as.integer(df[[n]]); }
for (n in character) { df[[n]] = as.character(df[[n]]); }
for (n in factor) { df[[n]] = as.factor(df[[n]]); }
if (reset.row.names) row.names(df) = NULL;
if (length(row.names) > 0) row.names(df) = row.names;
df
}
DfClasses = function(dataFrame)nlapply(dataFrame, function(n)class(dataFrame[[n]]));
DfAsInteger = function(dataFrame, as_integer) {
#dfn = apply(dataFrame[, as_integer, drop = F], 2, function(col)as.integer(avu(col)));
# <!> 6.6.2016 as.integer first needed to retain factor status on factors
dfn = nlapply(as_integer, function(col)avu(as.integer(dataFrame[[col]])));
dataFrame[, as_integer] = do.call(cbind, dfn);
dataFrame
}
DfAsCharacter = function(dataFrame, as_character) {
#dfn = apply(dataFrame[, as_character, drop = F], 2, function(col)as.character(avu(col)));
#dataFrame[, as_character] = as.data.frame(dfn, stringsAsFactors = FALSE);
dfn = nlapply(as_character, function(col)avu(as.character(dataFrame[[col]])));
dataFrame[, as_character] = do.call(cbind, dfn);
dataFrame
}
# as of 22.7.2013 <!>: min_ applied before names/headerMap
# as of 19.12.2013 <!>: as.numeric -> as_numeric
# as of 22.5.2014 <!>: t -> t_
# as of 13.11.2014 <!>: sapply -> simplify_
#' Create data frames with more options than \code{data.frame}
Df_ = function(df0, headerMap = NULL, names = NULL, min_ = NULL,
as_numeric = NULL, as_character = NULL, as_factor = NULL, as_integer = NULL,
row.names = NA, valueMap = NULL, Df_as_is = TRUE, simplify_ = FALSE,
deep_simplify_ = FALSE, t_ = FALSE, unlist_cols = F, transf_log = NULL, transf_m1 = NULL,
Df_doTrimValues = FALSE, Df_mapping_value = '__df_mapping_value__') {
#r = as.data.frame(df0);
if (t_) df0 = t(df0);
r = data.frame(df0, stringsAsFactors = !Df_as_is);
if (!is.null(min_)) {
is = which.indeces(min_, names(r));
if (length(is) > 0) r = r[, -is, drop = F];
}
if (simplify_) r = sapply(r, identity);
if (deep_simplify_) r = as.data.frame(
nlapply(r, function(col)sapply(r[[col]], unlist)), stringsAsFactors = !Df_as_is
);
if (!is.null(names)) {
if (class(names) == 'character') names(r)[1:length(names)] = names;
if (class(names) == 'list') names(r) = vector.replace(names(r), names);
}
if (!is.null(headerMap)) names(r) = vector.replace(names(r), headerMap);
if (!is.null(valueMap)) {
for (n in names(valueMap)) {
vs = if (Df_doTrimValues)
nina(trimString(as.character(r[[n]])), Df_mapping_value) else
as.character(r[[n]]);
vs = nina(valueMap[[n]][vs], Df_mapping_value);
r[[n]] = ifelse(vs == Df_mapping_value, as.character(r[[n]]), vs);
}
}
if (!is.null(as_numeric)) {
dfn = apply(r[, as_numeric, drop = F], 2, function(col)as.numeric(avu(col)));
r[, as_numeric] = as.data.frame(dfn);
}
if (!is.null(as_integer)) r = DfAsInteger(r, as_integer);
if (!is.null(as_character)) r = DfAsCharacter(r, as_character);
if (!is.null(as_factor)) {
# <N> does not work
#dfn = apply(r[, as_factor, drop = F], 2, function(col)as.factor(col));
#r[, as_factor] = dfn;
for (f in as_factor) r[, f] = as.factor(r[[f]]);
}
#
# <p> transformations
#
if (!is.null(transf_log)) r[, transf_log] = log(r[, transf_log, drop = F]);
if (!is.null(transf_m1)) r[, transf_m1] = r[, transf_m1, drop = F] - 1;
if (!all(is.na(row.names))) row.names(r) = row.names;
if (unlist_cols) for (n in names(r)) r[[n]] = avu(r[[n]]);
r
}
Df = function(..., headerMap = NULL, names = NULL, min_ = NULL, row.names = NA, Df_as_is = TRUE,
as_numeric = NULL, as_character = NULL, as_factor = NULL, t_ = F, unlist_cols = F) {
r = data.frame(...);
Df_(r, headerMap = headerMap, names = names, min_ = min_, row.names = row.names,
as_numeric = as_numeric,
as_character = as_character,
as_factor = as_factor,
Df_as_is = Df_as_is,
t_ = t_,
unlist_cols = unlist_cols
);
}
Df2list = function(df) {
df = as.data.frame(df);
nlapply(names(df), function(n)df[[n]]);
}
Dfselect = function(data, l, na.rm = nif) {
sel = apply(sapply(nlapply(l, function(n)data[[n]] == l[[n]]), identity), 1, all);
r = data[na.rm(sel), ];
r
}
List_ = .List = function(l, min_ = NULL, sel_ = NULL,
rm.null = F, names_ = NULL, null2na = F, simplify_ = F) {
if (!is.null(min_)) {
i = which.indeces(min_, names(l));
if (length(i) > 0) l = l[-i];
}
if (!is.null(sel_)) {
i = which.indeces(sel_, names(l));
if (length(i) > 0) l = l[i];
}
if (rm.null) {
remove = -which(sapply(l, is.null));
if (length(remove) > 0) l = l[remove];
}
if (null2na) {
nullI = which(sapply(l, is.null));
l[nullI] = NA;
}
if (!is.null(names_)) names(l)[Seq(1, length(names_))] = names_;
if (simplify_) l = sapply(l, identity);
l
}
List = function(..., min_ = NULL, envir = parent.frame(), names_ = NULL) {
l = eval(list(...), envir = envir);
.List(l, min_ = min_, names_ = names_);
}
Unlist = function(l, ..., null2na_ = FALSE) {
if (null2na_) l[sapply(l, is.null)] = NA;
unlist(l, ...)
}
last = function(v)(v[length(v)])
pop = function(v)(v[-length(v)])
# differences between successive elements, first diff is first element with start
vectorLag = function(v, start = 0)pop(c(v, start) - c(start, v))
splitN = function(N, by = 4) vectorLag(round(cumsum(rep(N/by, by))));
splitToMax = function(N, max = 4) vectorLag(round(cumsum(rep(N/ceiling(N/max), ceiling(N/max)))));
# cumsum returning indeces for numbers given in Ns
cumsumI = function(Ns, offset = 1, do.pop = FALSE) {
cs = vectorNamed(c(0, cumsum(Ns)) + offset, c(names(Ns), 'N'));
if (do.pop) cs = pop(cs);
cs
}
# recursive cumsum (one level)
cumsumR = function(l, offset = 1) {
cs0 = if (is.list(l)) lapply(l, cumsumR, offset = 0) else rev(cumsum(l))[1];
cs = vectorNamed(c(0, pop(unlist(cs0))) + offset, names(cs0));
cs
}
#
# <par> sets and permutations
#
#' @title wrapper for order to allow multivariate ordering
Order = function(v, ...) {
if (is.data.frame(v)) do.call(order, lapply(v, identity), ...) else
if (is.list(v)) do.call(order, v, ...) else
order(v, ...)
}
#' @title Return all value combinations appearing in a data frame
#'
#' @examples
#' combs = valueCombinations(iris);
valueCombinations = function(d) merge.multi.list(dimnames(table(d)));
#' @title Computes order so that inverseOrder after order is the identity
#'
#' @examples
#' v = runif(1e2);
#' print(all(sort(v)[inverseOrder(v)] == v))
Rank = inverseOrder = inversePermutation = function(p) {
## <p> naive version
# o = order(p);
# i = rep(NA, length(o));
# for (j in 1:length(o)) { i[o[j]] = j};
# i
## <p> build-in version (not working for multivariate case)
#rank(v, ties.method = 'first')
## <p> better version
which.indeces(1:(if (class(p) == 'data.frame') nrow(p) else length(p)), Order(p))
}
#' @title Calculates inverseOrder, assuming that the argument is already an \code{order}-vector.
inverseOrder_fromOrder = function(p)which.indeces(1:length(p), p)
#' @title Return vector that reorders v to equal reference.
#'
#' Assuming that two arguments are permutaions of each other, return a vector of indeces such that \code{all(reference == v[order_align(reference, v)]) == T} for all vectors \code{reference, v}.
#'
#' @examples
#' sapply(1:10, function(i){v = sample(1:5); v[order_align(5:1, v)]})
#' sapply(1:10, function(i){v = runif(1e2); v1 = sample(v, length(v)); all(v1[order_align(v, v1)] == v)})
order_align = function(reference, v)Order(v)[inverseOrder(reference)];
#' Calculates \code{order_align}, assuming that the both arguments are already orders.
#' sapply(1:40, function(i){v = runif(1e2); v1 = sample(v, length(v)); all(v1[order_align_fromOrder(order(v), order(v1))] == v)})
order_align_fromOrder = function(reference, v)v[inverseOrder_fromOrder(reference)];
# permutation is in terms of elements of l (not indeces)
applyPermutation = function(l, perm, from = 'from', to = 'to', returnIndeces = T) {
# 1. bring perm[[from]] in the same order as l
# 2. apply this order to perm[[to]]
r0 = perm[[to]][order(perm[[from]])[inverseOrder(l)]];
# 3. determine permutation going from l to r0
r = order(l)[inverseOrder(r0)]
if (!returnIndeces) r = l[r];
r
}
order.df = function(df, cols = NULL, decreasing = F, na.last = F) {
if (is.null(cols)) cols = 1:ncol(df);
if (!is.numeric(cols)) cols = which.indeces(cols, names(df));
orderText = sprintf("order(%s, decreasing = %s, na.last = %s)",
paste(sapply(cols, function(i) { sprintf("df[, %d]", i) }), collapse = ", "
), as.character(decreasing), as.character(na.last)
# paste(sapply(cols, function(i) {
# if (is.numeric(i)) sprintf("df[, %d]", i) else sprintf("df$%s", i) }), collapse = ", "
# ), as.character(decreasing), as.character(na.last)
);
o = eval(parse(text = orderText));
#print(list(text = orderText, order = o, df=df));
o
}
order.df.maps = function(d, maps, ..., regex = F) {
cols = NULL;
for (i in 1:length(maps)) {
m = names(maps)[i];
map = maps[[i]];
keys = names(map);
cols = c(cols, if (is.list(map)) {
tempColName = sprintf("..order.df.maps.%04d", i);
col = if (regex)
sapply(d[[m]], function(e){ j = which.indeces(e, keys, regex = T, inverse = T)
if (length(j) == 0) NA else map[[j]]
}) else as.character(map[d[[m]]]);
col[col == "NULL"] = NA;
d = data.frame(col, d, stringsAsFactors = F);
names(d)[1] = tempColName;
} else { m });
}
o = order.df(d, cols, ...);
o
}
data.frame.union = function(l) {
dfu = NULL;
for (n in names(l)) {
df = l[[n]];
factor = rep(n, dim(df)[1]);
dfu = rbind(dfu, cbind(df, factor));
}
dfu
}
# levels: take levels in that order, unmentioned levels are appended
# setLevels: set to these levels, else to NA
recodeLevels = function(f, map = NULL, others2na = TRUE, levels = NULL, setLevels = NULL) {
r = f;
if (!is.null(map)) {
# map others to NA
if (others2na) {
nonmentioned = setdiff(if (is.factor(f)) levels(f) else unique(f), names(map));
map = c(map, listKeyValue(nonmentioned, rep(NA, length(nonmentioned))));
}
v = vector.replace(as.character(f), map);
if (is.integer(f)) v = as.integer(v);
if (is.factor(f)) v = as.factor(v);
r = v;
}
if (!is.null(levels) || !is.null(setLevels)) {
# <p> preparation
fact0 = as.factor(r);
levls = levels(fact0);
r = levls[fact0];
# <p> new levels
levlsN0 = firstDef(setLevels, levels, levls);
levlsN = c(levlsN0, setdiff(levls, levlsN0));
# <p> remove unwanted factors
if (!is.null(setLevels)) r = ifelse(r %in% setLevels, r, NA);
r = factor(r, levels = if (!is.null(setLevels)) levlsN0 else levlsN);
}
r
}
Union = function(..., .drop = T, as.list = FALSE) {
l = if (as.list) list(...)[[1]] else list(...);
l = list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = NULL;
for (e in l) { r = union(r, e); }
r
}
Intersect = function(..., .drop = T, as.list = FALSE) {
l = if (as.list) list(...)[[1]] else list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = l[[1]];
for (e in l[-1]) { r = intersect(r, e); }
r
}
intersectSetsCount = function(sets) {
i = iterateModels(list(s1 = names(sets), s2 = names(sets)), function(s1, s2) {
length(intersect(sets[[s1]], sets[[s2]]))
}, lapply__ = lapply);
#r = reshape.wide(Df(i$models_symbolic, count = unlist(i$results)), 's1', 's2');
rM = matrix(i$results, nrow = length(sets), byrow = T);
dimnames(rM) = list(names(sets), names(sets));
rM
}
unionCum = function(..., .drop = T) {
l = list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = l[1];
if (length(l) > 1)
for (n in names(l)[-1]) { r = c(r, List(union(r[[length(r)]], l[[n]]), names_ = n)); }
r
}
# row bind of data.frames/matrices with equal number of cols
lrbind = function(l, as.data.frame = F, names = NULL) {
d = dim(l[[1]])[2];
v = unlist(sapply(l, function(m) unlist(t(m))));
m = matrix(v, byrow = T, ncol = d);
dimnames(m) = list(NULL, names(l[[1]]));
if (as.data.frame) {
m = data.frame(m);
if (!is.null(names)) names(m) = names;
}
m
}
#
# logic arrays/function on list properties
#
# old versions:
# if (na.rm) v = v[!is.na(v)];
# sum(v) # old version: length((1:length(v))[v])
# same as in Rlab
count = function(v, na.rm = T)sum(v, na.rm = na.rm)
# old versions:
# if (na.rm) v = v[!is.na(v)]; (sum(v)/length(v))
# { length(v[v]) / length(v) }
# v assumed to be logical
fraction = function(v, na.rm = T)mean(v, na.rm = na.rm);
# treat v as set
set.card = function(v)count(unique(v))
# cardinality of a set
size = function(set)length(unique(set));
# null is false
#nif = function(b)(!(is.null(b) | is.na(b) | !b))
#nif = function(b)sapply(b, function(b)(!(is.null(b) || is.na(b) || !b)))
nif = function(b) {
if (length(b) == 0) return(F);
!(is.null(b) | is.na(b) | !b)
}
# null is true
#nit = function(b)(is.null(b) | is.na (b) | b)
#nit = function(b)sapply(b, function(b)(is.null(b) || is.na (b) || b))
nit = function(b) {
if (length(b) == 0) return(T);
is.null(b) | is.na (b) | b
}
# null is zero
#niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
# null is na (or other special value
#niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
nina = function(e, value = NA)sapply(e, function(e)ifelse(is.null(e), value, e))
#
# <p> complex structures
#
#
# Averaging a list of data frames per entry over list elements
#
# meanMatrices = function(d) {
# df = as.data.frame(d[[1]]);
# ns = names(df);
# # iterate columns
# dfMean = sapply(ns, function(n) {
# m = sapply(d, function(e)as.numeric(as.data.frame(e)[[n]]));
# mn = apply(as.matrix(m), 1, mean, na.rm = T);
# mn
# });
# dfMean
# }
meanMatrices = function(d) {
dm = dim(d[[1]]);
good = sapply(d, function(m)(length(dim(m)) == 2 && all(dim(m) == dm)));
if (any(!good)) warning('meanMatrices: malformed/incompatible matrices in list, ignored');
d = d[good];
m0 = sapply(d, function(e)avu(e));
m1 = apply(m0, 1, mean, na.rm = T);
r = matrix(m1, ncol = dm[2], dimnames = dimnames(d[[1]]));
r
}
meanVectors = function(d) {
ns = names(d[[1]]);
mn = apply(as.matrix(sapply(d, function(e)e)), 1, mean, na.rm = T);
mn
}
meanList = function(l)mean(as.numeric(l));
meanStructure = function(l) {
r = nlapply(names(l[[1]]), function(n) {
meanFct =
if (is.matrix(l[[1]][[n]])) meanMatrices else
if (length(l[[1]][[n]]) > 1) meanVectors else
meanList;
meanFct(list.key(l, n, unlist = F));
});
r
}
matrixCenter = function(m, direction = 2, centerBy = median) {
center = apply(m, direction, centerBy, na.rm = T);
m = if (direction == 1) (m - center) else t(t(m) - center);
list(matrix = m, center = center)
}
matrixDeCenter = function(m, center, direction = 2) {
m = if (direction == 1) t(t(m) + center) else (m + center);
m
}
#
# <p> combinatorial functions
#
# form all combinations of input arguments as after being constraint to lists
# .first.constant designates whether the first list changes slowest (T) or fastest (F)
# in the resulting data frame,
# i.e. all other factors are iterated for a fixed value of l[[1]] (T) or not
# .constraint provides a function to filter the resulting data frame
merge.multi.list = function(l, .col.names = NULL, .col.names.prefix = "X",
.return.lists = F, .first.constant = T, stringsAsFactors = F, .cols.asAre = F, .constraint = NULL, ...) {
# <p> determine column names of final data frame
.col.names.generic = paste(.col.names.prefix, 1:length(l), sep = "");
if (is.null(.col.names)) .col.names = names(l);
if (is.null(.col.names)) .col.names = .col.names.generic;
.col.names[.col.names == ""] = .col.names.generic[.col.names == ""];
names(l) = .col.names; # overwrite names
# <p> construct combinations
if (.first.constant) l = rev(l);
df0 = data.frame();
if (length(l) >= 1) for (i in 1:length(l)) {
newNames = if (.cols.asAre) names(l[[i]]) else names(l)[i];
# <p> prepare data.frame: handle lists as well as data.frames
# <!> changed 22.3.2016
#dfi = if (is.list(l[[i]])) unlist(l[[i]]) else l[[i]];
dfi = if (!is.data.frame(l[[i]])) unlist(l[[i]]) else l[[i]];
df1 = data.frame.types(dfi, names = newNames, stringsAsFactors = stringsAsFactors);
# <p> perform merge
df0 = if (i > 1) merge(df0, df1, ...) else df1;
}
if (.first.constant) df0 = df0[, rev(names(df0)), drop = F];
if (.return.lists) df0 = apply(df0, 1, as.list);
if (!is.null(.constraint)) {
df0 = df0[apply(df0, 1, function(r).do.call(.constraint, as.list(r))), ];
}
df0
}
# analysis pattern using merge.multi.list
# i needs not to be an argument to f as .do.call strips excess arguments
iterateModels_old = function(modelList, f, ...,
.constraint = NULL, .clRunLocal = T, .resultsOnly = F, .unlist = 0, lapply__ = clapply) {
models = merge.multi.list(modelList, .constraint = .constraint);
r = lapply__(1:dim(models)[1], function(i, ..., f__, models__) {
args = c(list(i = i), as.list(models__[i, , drop = F]), list(...));
.do.call(f__, args)
}, ..., f__ = f, models__ = models);
r = if (.resultsOnly) r else list(models = models, results = r);
r = unlist.n(r, .unlist);
r
}
# list of list, vector contains index for each of these lists to select elements from
# these elements are merged and return
# if sub-element is not a list, take name of sub-element and contruct list therefrom
# namesOfLists controls whether, if a selected element is a list, its name is used instead
# can be used to produce printable summaries
list.takenFrom = function(listOfLists, v) {
ns = names(listOfLists);
if (any(ns != names(v))) v = v[order_align(ns, names(v))];
l = lapply(1:length(v), function(i) {
new = if (!is.list(listOfLists[[i]]))
listKeyValue(ns[i], listOfLists[[i]][v[i]]) else {
t = listOfLists[[i]][[v[i]]];
# list of vectors
t = (if (!is.list(t)) {
# define name from higher level
listKeyValue(firstDef(
names(listOfLists[[i]])[v[i]], ns[i]
), list(t))
# <A> probably better and correct
#listKeyValue(ns[i], list(t))
} else if (is.null(names(t))) listKeyValue(ns[i], t) else t);
t
}
});
names(l) = names(v);
l
}
merge.lists.takenFrom = function(listOfLists, v) {
merge.lists(list.takenFrom(listOfLists, v), listOfLists = TRUE);
}
merge.lists.takenFrom_old = function(listOfLists, v) {
l = list();
ns = names(listOfLists);
if (any(ns != names(v))) v = v[order_align(ns, names(v))];
for (i in 1:length(v)) {
new = if (!is.list(listOfLists[[i]]))
listKeyValue(ns[i], listOfLists[[i]][v[i]]) else {
t = listOfLists[[i]][[v[i]]];
# list of vectors
t = (if (!is.list(t)) {
# define name from higher level
listKeyValue(firstDef(
names(listOfLists[[i]])[v[i]], ns[i]
), list(t))
# <A> probably better and correct
#listKeyValue(ns[i], list(t))
} else if (is.null(names(t))) listKeyValue(ns[i], t) else t);
t
}
l = merge.lists(l, new);
}
l
}
# take indeces given by v from a nested list
# namesOfLists: take the name of the list at the position in v
# if null, take first element or leave aggregation to the function aggregator
# aggregator: called with the final result, should flatten existing lists into characters
lists.splice = function(listOfLists, v, namesOfLists = F, aggregator = NULL, null2na = T) {
ns = names(listOfLists);
l = lapply(1:length(ns), function(i) {
name = ns[i];
e = listOfLists[[i]][v[i]];
r = if (!is.list(e)) e else {
f = if (namesOfLists) {
g = names(e)[1];
# handle name == NULL
if (is.null(g)) {
# make an attempt later to print element
#if (!is.null(aggregator)) e[[1]] else e[[1]][[1]]
if (!is.null(aggregator))
e[[1]] else
join(as.character(e[[1]][[1]]), ", ")
} else g
} else e[[1]];
}
r
});
if (null2na) l = lapply(l, function(e)ifelse(is.null(e), NA, e));
if (!is.null(aggregator)) l = aggregator(listKeyValue(ns, l), v, l);
l
}
# dictionary produced by lists.splice, v: splice vector, l: aggregated list (w/o names)
merge.multi.symbolizer = function(d, v, l)unlist.n(d, 1);
merge.multi.list.symbolic = function(modelList, ..., symbolizer = NULL) {
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, ...);
namesDf = if (is.null(symbolizer)) names(modelList) else NULL;
df0 = sapply(1:nrow(models), function(i, ...) {
r = lists.splice(modelList, unlist(models[i, ]),
namesOfLists = T, aggregator = symbolizer);
r
});
r = Df_(df0, t_ = T, names = namesDf);
r
}
inlist = function(l)lapply(l, function(e)list(e));
Inlist = function(...)inlist(list(...));
Do.callIm = function(im__f, args, ..., restrictArgs = TRUE, callMode = 'inline') {
if (callMode == 'inlist') {
.do.call(im__f, c(args, list(...)), restrictArgs = restrictArgs)
} else if (callMode == 'list') {
im__f(args, ...)
} else if (callMode == 'inline') {
args = c(merge.lists(args, listOfLists = TRUE), list(...));
.do.call(im__f, args, restrictArgs = restrictArgs)
} else stop('Unknown call mode');
}
# <!> should be backwards compatible with iterateModels_old, not tested
# modelList: list of lists/vectors; encapuslate blocks of parameters in another level of lists
# Example:
#
#' Iterate combinations of parameters
#'
#' This function takes a list of parameters for which several values are to be evaluated. These values can be vectors of numbers or lists that contain blocks of parameters. All combinations are formed and passed to a user supplied function \code{f}. This functions takes an index of the combination together with parameter values. Argument \code{callWithList} controls whether there is exactly one argument per parameter position or wether one more step of unlisting takes place. In case that a block of parameters is supplied, all values of the block are passed as individual arguments to \code{f} in case \code{callWithList == F}.
#'
#' @param selectIdcs restrict models to the given indeces
#'
#' @examples
#' modelList = list(global = list(list(a=1, b=2)), N = c(1, 2, 3));
#' print(iterateModels(modelList));
#' modelList = list(N = c(1, 2, 3), parsAsBlock = list(list(list(c = 1, d = 2)), list(list(c = 3, d = 4))));
#' print(iterateModels(modelList));
#' # ensure elements on A are given as a block (list)
#' A = list(list(a = 1, b = 2), list(a = 3, b = 5));
#' modelList = list(N = inlist(A), parsAsBlock = list(list(list(c = 1, d = 2)), list(list(c = 3, d = 4))));
#' print(iterateModels(modelList));
#' # shorter version of the above
#' modelList = list(N = Inlist(list(a = 1, b = 2), list(a = 3, b = 5)), parsAsBlock = Inlist(list(c = 1, d = 2), list(c = 3, d = 4)));
#' print(iterateModels(modelList));
#' # inline calling
#' modelList = list(N = list(list(a = 1, b = 2), list(a = 3, b = 5)), parsAsBlock = list(list(c = 1, d = 2), list(c = 3, d = 4)));
#' print(iterateModels(modelList));
#'
#'
#'
#' callMode: 'inline', 'list', 'inlist'
iterateModels_raw = function(modelList, models, f_iterate = function(...)list(...), ...,
callWithList = F, callMode = NULL, restrictArgs = T, parallel = F, lapply__) {
if (!parallel) Lapply = lapply;
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
# model indeces contains the original positions in models
# this allows reordering of execution, eg with reverseEvaluationOrder
r = Lapply(1:nrow(models), function(i, ..., im__f, im__model_idcs) {
args = c(Inlist(i = im__model_idcs[i]), list.takenFrom(modelList, unlist(models[i, ])));
Do.callIm(im__f, args, ..., restrictArgs = restrictArgs, callMode = callMode);
}, ..., im__f = f_iterate, im__model_idcs = as.integer(row.names(models)));
r
}
# <i> refactor iterateModels to use iterateModels_prepare
iterateModels_prepare = function(modelList, .constraint = NULL,
callWithList = FALSE, callMode = NULL, restrictArgs = T, selectIdcs = NULL, .first.constant = T) {
# <p> preparation
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .first.constant = .first.constant);
# <p> handle constraints
selC = if (is.null(.constraint)) T else
unlist(iterateModels_raw(modelList, models, f_iterate = .constraint,
parallel = FALSE, callMode = callMode, restrictArgs = restrictArgs, ...));
selI = if (is.null(selectIdcs)) T else 1:nrow(models) %in% selectIdcs;
# apply constraints
models = models[selC & selI, , drop = F];
r = list(
modelsRaw = models,
selection = selC & selI,
models = models
);
r
}
iterateModelsDefaultSymbolizer = function(i, ...) {
l = list(...);
r = lapply(l, function(e)unlist(as.character(unlist(e)[1])));
r
}
iterateModelsSymbolizer = function(i, ..., im_symbolizer, im_symbolizerMode) {
l = list(...);
l0 = iterateModelsDefaultSymbolizer(i, ...);
l1 = .do.call(im_symbolizer, c(list(i = i), list(...)), restrictArgs = TRUE);
r = merge.lists(l0, l1);
r
}
iterateModels = function(modelList, f = function(...)list(...), ...,
.constraint = NULL, .clRunLocal = TRUE, .resultsOnly = FALSE, .unlist = 0,
callWithList = FALSE, callMode = NULL,
symbolizer = iterateModelsDefaultSymbolizer, symbolizerMode = 'inlist',
restrictArgs = T, selectIdcs = NULL,
.first.constant = TRUE, parallel = FALSE, lapply__, reverseEvaluationOrder = TRUE) {
# <p> pre-conditions
nsDupl = duplicated(names(modelList));
if (any(nsDupl))
stop(con('iterateModels: duplicated modelList entries: ', join(names(modelList)[nsDupl], ', ')));
# <p> preparation
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
# <p> produce raw combinations
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .first.constant = .first.constant);
# models_symbolic = merge.multi.list.symbolic(modelList,
# symbolizer = symbolizer, .first.constant = .first.constant);
models_symbolic = do.call(rbind, iterateModels_raw(modelList, models, iterateModelsSymbolizer,
callMode = 'inlist', parallel = F,
im_symbolizerMode = symbolizerMode, im_symbolizer = symbolizer));
# <p> handle constraints
selC = if (is.null(.constraint)) T else
unlist(iterateModels_raw(modelList, models, f_iterate = .constraint,
callMode = callMode, restrictArgs = restrictArgs, ..., parallel = F));
selI = if (is.null(selectIdcs)) T else 1:nrow(models) %in% selectIdcs;
# <p> apply constraints
models = models[selC & selI, , drop = F];
models_symbolic = models_symbolic[selC & selI, , drop = F];
# <p> models to be iterated
modelsIt = if (reverseEvaluationOrder) models[rev(1:nrow(models)), , drop = F] else models;
r = iterateModels_raw(modelList, modelsIt, f_iterate = f,
callMode = callMode, restrictArgs = restrictArgs, ..., parallel = parallel);
if (reverseEvaluationOrder) r = rev(r);
r = if (.resultsOnly) r else list(
models = models,
results = r,
models_symbolic = models_symbolic
);
r = unlist.n(r, .unlist);
r
}
iterateModelsExpand = function(modelList, .constraint = NULL) {
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .constraint = .constraint);
r = list(
models = models,
models_symbolic = merge.multi.list.symbolic(modelList, .constraint = .constraint)
);
r
}
# reverse effect of .retern.lists = T
# list.to.df(merge.multi.list(..., .return.lists = T)) === merge.multi.list(..., .return.lists = F)
list.to.df = function(l)t(sapply(l, function(e)e))
merge.multi = function(..., .col.names = NULL, .col.names.prefix = "X",
.return.lists = F, stringsAsFactors = F, .constraint = NULL, .first.constant = T) {
merge.multi.list(list(...), .col.names = .col.names, .return.lists = .return.lists,
stringsAsFactors = stringsAsFactors, .constraint = .constraint, .first.constant = .first.constant)
}
merge.multi.dfs = function(l, .first.constant = T, all = T, stringsAsFactors = F, ...) {
if (.first.constant) l = rev(l);
if (length(l) >= 1) for (i in 1:length(l)) {
df1 = data.frame.types(l[[i]], stringsAsFactors = stringsAsFactors);
df0 = if (i > 1) merge(df0, df1, all = all, ...) else df1;
}
if (.first.constant) df0 = df0[, rev(names(df0)), drop = F];
df0
}
Merge = function(x, y, by = intersect(names(x), names(y)), ..., safemerge = T, stableByX = FALSE) {
if (stableByX) x = data.frame(x, MergeStableByX = 1:nrow(x));
if (safemerge && length(by) == 0) {
stop(sprintf('Merge: safemerge triggered. No common columns between "%s" and "%s"',
join(names(x), sep = ','), join(names(y), sep = ',')))
}
r = merge(x = x, y = y, by = by, ...);
if (stableByX) {
indexCol = which(names(r) == 'MergeStableByX');
r = r[order(r$MergeStableByX), -indexCol, drop = FALSE];
}
r
}
# ids: variables identifying rows in final table
# vars: each combination of vars gets transformed to an own column
# <!> not tested for length(ids) > 1 || ength(rvars) > 1
# blockVars: should the repeated vars go in blocks or be meshed for vars
#
# Examples:
# intersection table
# i = intersectSetsCount(sets);
# reshape.wide(Df(i$models_symbolic, count = unlist(i$results)), 's1', 's2');
reshape.wide = function(d, ids, vars, blockVars = F, reverseNames = F, sort.by.ids = T) {
# remaining vars
rvars = setdiff(names(d), union(ids, vars));
# levels of variables used in the long expansion
levls = lapply(vars, function(v)unique(as.character(d[[v]])));
# combinations at the varying vars as passed to vars
cbs = merge.multi.list(levls, .col.names = vars, .first.constant = !blockVars);
# repvars: repeated variables
repvars = merge.multi.list(c(list(rvars), levls),
.first.constant = !blockVars, .col.names = c("..var", vars));
varnames = apply(repvars, 1, function(r)join(if (reverseNames) rev(r) else r, "."));
r0 = data.frame.types(unique(d[, ids], drop = F), names = ids);
r1 = data.frame.types(apply(r0, 1, function(r) {
# <p> isolate rows which match to current id columns
ids = which(apply(d[, ids, drop = F], 1, function(id)all(id == r)));
d1 = d[ids, ];
# <p> construct vector of repeated values
vs = sapply(1:dim(cbs)[1], function(i) {
# <A> should be equal to one
row = which(apply(d1[, vars, drop = F], 1, function(r)all(r == cbs[i, ])));
v = if (length(row) != 1) rep(NA, length(rvars)) else d1[row, rvars];
v
});
# heed blockVars
vs = as.vector(unlist(if (!blockVars) t(vs) else vs));
vs
}), do.transpose = T, names = varnames);
r = data.frame(r0, r1);
if (sort.by.ids) r = r[order.df(r, ids), ];
row.names(r) = NULL;
r
}
#' Convert data in wide format to long format
#'
#' Long format duplicates certain columns and adds rows for which one new column hold values coming
#' from a set of columns in wide format.
#'
#' @param d data frame with columns in wide format
#' @param vars columns in wide format by name or index
#' @param factors \code{vars} can be grouped. For each level of \code{factor} a new row is created. Implies
#' that \code{length(vars)} is a multiple of \code{length(levels(factor))}
#' @param factorColumn name of the column to be created for the factor
#' @param valueColumn name of the new column of values that were in wide format
# factors: provide factor combinations explicitly for vars (otherwise split by '.', <i>)
#' @examples
#' #reshape variables 2:9 (forming two groups: case/ctr), value of which is named 'group'
#' # the shortened columns will get names valueColumn
#' d0 = reshape.long(d, vars = 2:9, factors = c('case', 'ctr'), factorColumn = 'group',
#' valueColumn = c('AA', 'AG', 'GG', 'tot'));
reshape.long = function(d, vars = NULL, factorColumn = 'factor', valueColumn = 'value',
factors = as.factor(vars), useDisk = F, rowNamesAs = NULL) {
if (is.null(vars)) vars = names(d);
# make rownames an extra column
if (!is.null(rowNamesAs)) {
d = data.frame(reshape_row_names__ = rownames(d), d);
names(d)[1] = rowNamesAs;
}
# indeces of columns vars
Ivars = .df.cols(d, vars);
# remaining vars
rvars = setdiff(1:length(names(d)), Ivars);
# names thereof
Nrvars = names(d)[rvars];
# how wide are the blocks?
S = length(vars) / length(factors);
# columns of intermediate data.frame
N = length(rvars);
# create list of data frames
dfs = lapply(1:nrow(d), function(i) {
st = d[i, rvars]; # start of the new row
df0 = data.frame(factors, value = matrix(d[i, vars], nrow = length(factors), byrow = T));
df1 = data.frame(st, df0, row.names = NULL);
names(df1) = c(Nrvars, factorColumn, valueColumn);
df1
});
r = rbindDataFrames(dfs, do.unlist = T, useDisk = useDisk);
r
}
#' Reduce data frame by picking the first row of blocks for which \code{cols} has the same values
uniqueByCols = function(d, cols) {
row.names(d) = NULL;
d[as.integer(row.names(unique(d[, cols, drop = F]))), ]
}
#
# <p> string functions
#
uc.first = firstUpper = function(s) {
paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = "");
}
#
# <p> factor transformations for data frames
#
dataExpandedNames = function(data) {
dnames = unlist(lapply(names(data), function(v){
if (is.factor(data[[v]])) paste(v, 1:(length(levels(data[[v]])) - 1), sep = "") else v;
}));
dnames
}
# model.matrix removes missing columns and could not be tweaked into working
dataExpandFactors = function(data, vars = NULL) {
if (is.null(vars)) vars = names(data);
d0 = lapply(vars, function(v) {
if (is.factor(data[[v]])) {
ls = levels(data[[v]]);
dcNa = rep(NA, length(ls) - 1); # missing data coding
dc = rep(0, length(ls) - 1); # dummy coding
sapply(data[[v]], function(e) {
if (is.na(e)) return(dcNa);
i = which(e == ls);
if (i == 1) return(dc);
dc[i - 1] = 1;
return(dc);
});
} else data[[v]];
});
d0names = dataExpandedNames(data[, vars]);
# re-transform data
d1 = data.frame(matrix(unlist(lapply(d0, function(e)t(e))), ncol = length(d0names), byrow = F));
names(d1) = d0names;
d1
}
coefficientNamesForData = function(vars, data) {
lnames = dataExpandedNames(data); # names of levels of factors
cnames = lnames[unlist(sapply(vars, function(v)which.indeces(v, lnames, regex = T)))];
cnames
}
#
# <p> statistic oriented data frame manipulation
#
variableIndecesForData = function(d, vars, varsArePrefixes = T) {
if (varsArePrefixes) vars = sapply(vars, function(e)sprintf('%s.*', e));
which.indeces(vars, names(d), regex = T, match.multi = T)
}
variablesForData = function(d, vars, varsArePrefixes = T) {
names(d)[variableIndecesForData(d, vars, varsArePrefixes)]
}
subData = function(d, vars, varsArePrefixes = T) {
dfr = d[, variableIndecesForData(d, vars, varsArePrefixes), drop = F];
dfr
}
subDataFromFormula = function(d, formula, responseIsPrefix = T, covariateIsPrefix = T) {
resp = formula.response(formula);
cov = formula.covariates(formula);
ns = names(d);
r = list(
response = subData(d, resp, responseIsPrefix),
covariate = subData(d, cov, covariateIsPrefix)
);
r
}
#
# <p> graph functions
#
sub.graph.merge = function(df, leader, follower) {
# next transitive step
r0 = merge(df, data.frame(leader = leader, follower = follower), by = 'follower');
# add new connections
r1 = rbind(df, data.frame(follower = r0$leader.y, leader = r0$leader.x, cluster = r0$cluster));
# symmetric closure
r1 = rbind(r1, data.frame(follower = r1$leader, leader = r1$follower, cluster = r1$cluster))
# form clusters by selecting min cluster number per connection
r1 = r1[order(r1$cluster), ];
row.names(r1) = 1:dim(r1)[1];
r2 = unique(r1[, c('leader', 'follower')]);
# select unique rows (first occurunce selects cluster)
r = r1[as.integer(row.names(r2)), ];
# pretty sort data frame
r = r[order(r$cluster), ];
r
}
# form clusters from a relationally defined hierarchy
sub.graph = function(df) {
df = as.data.frame(df);
names(df)[1:2] = c('follower', 'leader');
df = df[order(df$follower), ];
# seed clusters
ids = sort(unique(df$follower));
idsC = as.character(ids);
counts = lapply(ids, function(id)sum(df$follower == id));
names(counts) = idsC;
clusters = unlist(sapply(idsC, function(id){ rep(as.integer(id), counts[[id]]) }));
df = cbind(df, data.frame(cluster = rep(clusters, 2)));
df = unique(rbind(df, data.frame(follower = df$leader, leader = df$follower, cluster = df$cluster)));
# receiving frame
df0 = df;
# results with clusters
i = 1;
repeat {
Nrows = dim(df0)[1];
cls = df0$clusters;
# add transitive connections
df0 = sub.graph.merge(df0, follower = df0$leader, leader = df0$follower);
if (dim(df0)[1] == Nrows && all(cls == df0$clusters)) break();
}
df0 = df0[order(df0$cluster), ];
cIds = unique(df0$cluster);
cls = lapply(cIds, function(id)unique(avu(df0[df0$cluster == id, c('follower', 'leader')])));
cls
}
#
# <p> formulas
#
# formula: formula as a character string with wildcard character '%'
# <!>: assume whitespace separation in formula between terms
# <!>: write interaction with spaces <!> such as in:
# f = 'MTOTLOS_binair ~ ZRES% + sq(ZRes%) + ( ZRES% )^2';
formula.re = function(formula, data, ignore.case = F, re.string = '.*') {
vars = names(data);
#regex = '(?:([A-Za-z_.]+[A-Za-z0-9_.]*)[(])?([A-Za-z.]+[%][A-Za-z0-9.%_]*)(?:[)])?';
# function names ( regex )
#regex = '(?:([A-Za-z_.]+[A-Za-z0-9_.]*)[(])?([A-Za-z%.]+[A-Za-z0-9.%_]*)(?:[)])?';
# allow backslash quoting
regex = '(?:([A-Za-z_.\\\\]+[A-Za-z0-9_.\\\\]*)[(])?([A-Za-z%.\\\\]+[A-Za-z0-9.%_\\\\]*)(?:[)])?';
patterns = unique(fetchRegexpr(regex, formula, ignore.case = ignore.case));
subst = nlapply(patterns, function(p) {
comps = fetchRegexpr(regex, p, captureN = c('fct', 'var'), ignore.case = ignore.case)[[1]];
p = sprintf("^%s$", gsub('%', re.string, comps$var));
mvars = vars[sapply(vars, function(v)regexpr(p, v, perl = T, ignore.case = ignore.case)>=0)];
if (comps$fct != '') {
varf = sprintf('%s', paste(sapply(mvars, function(v)sprintf('%s(%s)', comps$fct, v)),
collapse = " + "));
} else {
varf = sprintf('%s', paste(mvars, collapse = " + "));
}
varf
});
formulaExp = as.formula(mergeDictToString(subst, formula));
formulaExp
}
formula.response = function(f) {
#r = fetchRegexpr('[^\\s~][^~]*?(?=\\s*~)', if (is.formula(f)) deparse(f) else f);
f = if (class(f) == 'formula') join(deparse(f), '') else f;
r = as.character(fetchRegexpr('^\\s*([^~]*?)(?:\\s*~)', f, captures = T));
# <p> version 2
#fs = as.character(as.formula(as.character(f))); # "~" "response" "covs"
#r = fs[2];
# <p> version 1
#f = as.formula(f);
#r = all.vars(f)[attr(terms(f), "response")]; # fails to work on 'response ~ .'
r
}
formula.rhs = function(f, noTilde = FALSE) {
rhs = fetchRegexpr('[~](.*)', if (!is.character(f)) formula.to.character(f) else f, captures = T);
if (noTilde) rhs else as.formula(con('~', rhs))
}
formula.covariates = function(f) {
covs = all.vars(formula.rhs(f));
#covs = setdiff(all.vars(as.formula(f)), formula.response(f));
covs
}
formula.vars = function(f)union(formula.response(f), formula.covariates(f));
formula.nullModel = function(f) {
r = formula.response(f);
fn = as.formula(sprintf("%s ~ 1", r));
fn
}
formula.to.character = function(f)join(deparse(as.formula(f)), '');
Formula.to.character = function(f)ifelse(is.character(f), f, formula.to.character(f));
formula2filename = function(f) {
fs = join(f, sep = '');
filename = mergeDictToString(list(
`\\s+` = '',
`_` = '-',
`Surv\\(.*\\)` = 'surv',
MARKER = 'snp'
# other components
), fs, re = T, doApplyValueMap = F, doOrderKeys = F);
filename
}
data.vars = function(data, formula, re.string = '.*', ignore.case = F) {
all.vars(formula.re(formula = formula, data = data, re.string = re.string, ignore.case = ignore.case));
}
formula.add.rhs = function(f0, f1) {
as.formula(join(c(formula.to.character(f0), formula.rhs(f1, noTilde = TRUE)), '+'))
}
formula.add.response = function(f0, f1) {
formula = join(c(formula.response(f1), formula.rhs(f0, noTilde = FALSE)), ' ');
as.formula(formula)
}
formula.predictors = function(f, data, dataFrameNames = TRUE) {
if (formula.rhs(f) == ~ 1) return('(Intercept)');
mm = model.matrix(model.frame(formula.rhs(f), data), data);
ns = dimnames(mm)[[2]];
# <p> create data frame to extract proper names
# if (dataFrameNames) {
# df0 = as.data.frame(t(rep(1, length(ns))));
# names(df0) = ns;
# ns = names(df0);
# }
ns
}
# <!> cave survival
formulaRemoveTransformation = function(model) {
respVar = setdiff(all.vars(model), all.vars(formula.rhs(model)));
formula.add.response(formula.rhs(model), as.formula(Sprintf('%{respVar}s ~ 1')))
}
formulas.free = function(f1, f0, data) {
setdiff(formula.predictors(f1, data), formula.predictors(f0, data))
}
# <i> use terms.formula from a (a + ... + z)^2 formula
# <i> merge.multi.list(rep.list(covs, 2), .constraint = is.ascending)
covariatePairs = function(covs) {
pairs = merge(data.frame(c1 = 1:length(covs)), data.frame(c2 = 1:length(covs)));
pairs = pairs[pairs[, 1] > pairs[ ,2], ];
df = data.frame(c1 = covs[pairs[, 1]], c2 = covs[pairs[, 2]]);
df
}
formulaWith = function(repsonse = "y", covariates = "x")
as.formula(sprintf("%s ~ %s", repsonse, paste(covariates, collapse = "+")))
#
# <p> set operations
#
minimax = function(v, min = -Inf, max = Inf) {
r = ifelse(v < min, min, ifelse(v > max, max, v));
r
}
#
# Rsystem.R
#Mon 27 Jun 2005 10:51:30 AM CEST
#
# <par> file handling
#
# <!><N> works only on atomic path
# <!> 5.1.2016: trailing slash leads to basename of ""
splitPath = function(path, removeQualifier = T, ssh = F, skipExists = F) {
if (is.null(path)) return(NULL);
if (removeQualifier) {
q = fetchRegexpr('(?<=^\\[).*?(?=\\]:)', path);
if (length(q) > 0) path = substr(path, nchar(q) + 4, nchar(path));
}
sshm = list(user = '', host = '', userhost = '');
if (ssh) {
sshm = fetchRegexpr('^(?:(?:([a-z]\\w*)(?:@))?([a-z][\\w.]*):)?(.*)', path,
ignore.case = T, captureN = c('user', 'host', 'path'))[[1]];
sshm$userhost = if (sshm$user != '') sprintf('%s@%s', sshm$user, sshm$host) else sshm$host;
path = sshm$path;
}
#path = "abc/def.ext";
#r.base = basename(path);
#re = "([^.]*$)";
#r = gregexpr(re, r.base)[[1]];
#ext = substr(r.base, r[1], r[1] + attr(r, "match.length")[1] - 1);
#ext = firstDef(fetchRegexpr('(?<=\\.)[^/.]+\\Z', path), '');
ext = fetchRegexpr('(?<=\\.)[^/.]+\\Z', path);
# take everything before ext and handle possible absence of '.'
#base = substr(r.base, 1, r[1] - 1 - (ifelse(substr(r.base, r[1] - 1, r[1] - 1) == '.', 1, 0)));
# reduce to file.ext
Nchar = nchar(path);
if (Nchar != 0 && substr(path, Nchar, Nchar) == '/') {
base = '';
dir = substr(path, 1, Nchar - 1);
} else {
base = basename(path);
dir = dirname(path);
}
# base as yet still contains the file extension
file = base;
# chop off extension if present
if (length(fetchRegexpr('\\.', base)) > 0) base = fetchRegexpr('\\A.*(?=\\.)', base);
#pieces = regexpr(re, path, perl = T);
pieces = fetchRegexpr('([^.]+)', path);
isAbsolute = Nchar != 0 && substr(path, 1, 1) == '/';
# <N> disk is accessed
exists = if (!skipExists) File.exists(path, host = sshm$userhost, ssh = F) else NA;
nonempty = exists && (file.info(path)$size > 0);
ret = list(
dir = dir,
base = base,
path = path,
fullbase = sprintf("%s/%s", dir, base),
ext = ext,
file = file,
isAbsolute = isAbsolute,
absolute = if (isAbsolute) path else sprintf('%s/%s', getwd(), path),
# fs properties
exists = exists, nonempty = nonempty,
# remote
is.remote = !(sshm$user == '' && sshm$host == ''),
user = sshm$user, host = sshm$host, userhost = sshm$userhost
);
ret
}
path.absolute = absolutePath = function(path, home.dir = T, ssh = T) {
path = splitPath(path, ssh = ssh)$path;
if (home.dir && nchar(path) >= 2 && substr(path, 1, 2) == "~/")
path = sprintf("%s/%s", Sys.getenv('HOME'), substr(path, 3, nchar(path)));
if (nchar(path) > 0 && substr(path, 1, 1) == "/") path else sprintf("%s/%s", getwd(), path)
}
tempFileName = function(prefix, extension = NULL, digits = 6, retries = 5, inRtmp = F,
createDir = F, home.dir = T, doNotTouch = F) {
ext = if (is.null(extension)) '' else sprintf('.%s', extension);
path = NULL;
if (inRtmp) prefix = sprintf('%s/%s', tempdir(), prefix);
if (home.dir) prefix = path.absolute(prefix, home.dir = home.dir);
for (i in 1:retries) {
path = sprintf('%s%0*d%s', prefix, digits, floor(runif(1) * 10^digits), ext);
if (!File.exists(path)) break;
}
if (File.exists(path))
stop(sprintf('Could not create tempfile with prefix "%s" after %d retries', prefix, retries));
# potential race condition <N>
if (createDir)
Dir.create(path, recursive = T) else
if (!doNotTouch) writeFile(path, '', mkpath = T, ssh = T);
# # old implementation
#path = tempfile(prefix);
#cat('', file = path); # touch path to lock name
#path = sprintf("%s%s%s", path, ifelse(is.null(extension), "", "."),
# ifelse(is.null(extension), "", extension));
Log(sprintf('Tempfilename:%s', path), 5);
path
}
dirList = function(dir, regex = T, case = T) {
base = splitPath(dir)$dir;
files = list.files(base);
if (regex) {
re = splitPath(dir)$file;
files = files[grep(re, files, perl = T, ignore.case = !case)];
}
files
}
write.csvs = function(t, path, semAppend = "-sem", ...) {
s = splitPath(path);
write.csv(t, path);
pathSem = sprintf("%s%s.%s", s$fullbase, semAppend, s$ext);
# make sure t is a data.frame or dec option will not take effect <A>
#write.csv2(t, pathSem);
write.table(t, file = pathSem, row.names = F, col.names = T, dec = ",", sep = ";");
}
#
# <p> file manipulation
#
File.exists = function(path, host = '', agent = 'ssh', ssh = T) {
if (ssh) {
sp = splitPath(path, skipExists = T, ssh = T);
host = sp$userhost;
path = sp$path;
}
r = if (!is.null(host) && host != '') {
ret = system(sprintf('%s %s stat %s >/dev/null 2>&1', agent, host, qs(path)));
ret == 0
} else file.exists(path);
r
}
File.copy_raw = function(from, to, ..., recursive = F, agent = 'scp', logLevel = 6, ignore.shell = T,
symbolicLinkIfLocal = T) {
spF = splitPath(from, ssh = T);
spT = splitPath(to, ssh = T);
is.remote.f = !spF$is.remote || spF$host == 'localhost';
is.remote.t = !spT$is.remote || spT$host == 'localhost';
r = if (!is.remote.f && !is.remote.t) {
if (symbolicLinkIfLocal) {
file.symlink(spF$path, spT$path, ...);
} else file.copy(spF$path, spT$path, recursive = recursive, ...);
} else {
# <A> assume 'to' to be atomic
System(sprintf('%s %s %s %s %s',
agent,
ifelse(recursive, '-r', ''),
paste(sapply(from, qs), collapse = ' '),
qs(to),
ifelse(ignore.shell, '>/dev/null', '')
), logLevel);
}
r
}
File.copy = function(from, to, ..., recursive = F, agent = 'scp', logLevel = 6, ignore.shell = T,
symbolicLinkIfLocal = T) {
if (is.null(from)) return(NULL);
pairs = cbind(from, to);
r = apply(pairs, 1, function(r) {
File.copy_raw(r[1], r[2], ...,
recursive = recursive, agent = agent, logLevel = logLevel,
ignore.shell = ignore.shell, symbolicLinkIfLocal = symbolicLinkIfLocal)
})
r
}
File.remove = function(path, ..., agent = 'ssh', ssh = T, logLevel = 6) {
r = if (ssh) {
sp = splitPath(path, skipExists = T, ssh = T);
host = sp$userhost;
rpath = sp$path;
if (File.exists(path, ssh = T))
System(sprintf('rm %s', join(sapply(rpath, qs))), pattern = agent,
ssh_host = host, logLevel = logLevel);
} else if (file.exists(path)) file.remove(path, ...);
r
}
# <i> remote operations
File.symlink = function(from, to, replace = T, agent = 'ssh', ssh = F, logLevel = 6) {
r = if (ssh) {
sp = splitPath(from, skipExists = T, ssh = T);
host = sp$userhost;
rpath = sp$path;
# <!><i>
stop('not implmenented');
} else {
Log(sprintf('symlink %s -> %s', qs(from), qs(to)), logLevel);
if (replace && file.exists(to)) file.remove(to);
file.symlink(from, to);
}
r
}
# <!> only atomic path
# treatAsFile: causes Dir.create to split off last path-component
Dir.create = function(path, ..., recursive = F, agent = 'ssh', logLevel = 6,
ignore.shell = T, allow.exists = T, treatPathAsFile = F) {
sp = splitPath(path, ssh = T);
# ignore last path-component
if (treatPathAsFile) {
sp$path = sp$dir;
Log(sprintf('creating path %s', sp$path), 4);
}
if (sp$is.remote) {
System(sprintf('ssh %s mkdir %s %s %s',
sp$userhost,
if (recursive) '--parents' else '',
paste(sapply(sp$path, qs), collapse = ' '),
if (ignore.shell) '2>/dev/null' else ''
), logLevel);
} else {
if (allow.exists && !file.exists(sp$path)) dir.create(sp$path, ..., recursive = recursive);
}
}
Save = function(..., file = NULL, symbolsAsVectors = F, mkpath = T, envir = parent.frame(1)) {
sp = splitPath(file, ssh = T);
localPath = if (sp$is.remote) tempfile() else file;
if (mkpath) { Dir.create(file, recursive = T, treatPathAsFile = T); }
r = if (symbolsAsVectors) {
do.call('save', c(as.list(c(...)), list(file = localPath)), envir = envir);
} else save(..., file = localPath, envir = envir);
if (sp$is.remote) File.copy(localPath, file);
r
}
Load = function(..., file = NULL, Load_sleep = 0, Load_retries = 3, envir = parent.frame(1), logLevel = 6) {
sp = splitPath(file, ssh = T);
localPath = if (sp$is.remote) tempfile() else file;
r = NULL;
for (i in 1:Load_retries) {
if (sp$is.remote) {
if (!File.exists(file)) {
Sys.sleep(Load_sleep);
next;
}
File.copy(file, localPath, logLevel = logLevel);
}
r = try(load(..., file = localPath, envir = envir));
if (class(r) == 'try-error' && Load_sleep > 0) Sys.sleep(Load_sleep) else break;
}
if (is.null(r)) stop(sprintf('could not Load %s', file));
if (class(r) == 'try-error') stop(r[1]);
r
}
#
# create output file names
# output = list(prefix = "results/pch", extension = "pdf", tag = "20100727");
fileName = function(output, extension = NULL, subtype = NULL) {
if (is.null(output)) return(NULL);
if (is.null(output$prefix)) return(NULL);
subtype = firstDef(subtype, output$subtype, "");
if (subtype != "") subtype = sprintf("%s-", subtype);
r = sprintf("%s-%s%s.%s", output$prefix, subtype, output$tag,
firstDef(extension, output$extension, ""));
Log(r, 4);
r
}
#.globalOutput = list(prefix = 'results/20120126-');
#save(r, file = .fn('simulation', 'RData'))
.globalOutputDefault = .globalOutput = list(prefix = '', tag = NULL, tagFirst = F);
GlobalOutput_env__ = new.env();
# .fn.set(prefix = 'results/predictionTesting-')
.fn.set = function(...) {
.globalOutput = merge.lists(.globalOutputDefault, list(...));
assign('.globalOutput', .globalOutput, envir = GlobalOutput_env__);
}
# create output file name on globalOptions
.fn = function(name, extension = '', options = NULL) {
o = merge.lists(.globalOutputDefault, .globalOutput,
get('.globalOutput', envir = GlobalOutput_env__), options);
# construct plain filename
pathes = sprintf('%s%s%s%s', o$prefix, name, ifelse(extension == '', '', '.'), extension);
fn = sapply(pathes, function(path) {
sp = splitPath(path);
# <p> dir
if (!file.exists(sp$dir)) dir.create(sp$dir);
# <p> tag
ext = firstDef(sp$ext, '');
fn = if (!is.null(o$tag)) {
if (o$tagFirst) {
sprintf('%s/%s-%s%s%s', sp$dir, o$tag, sp$base, ifelse(ext == '', '', '.'), ext)
} else { sprintf('%s/%s-%s%s%s', sp$dir, sp$base, o$tag, ifelse(ext == '', '', '.'), ext) };
} else sprintf('%s/%s%s%s', sp$dir, sp$base, ifelse(ext == '', '', '.'), ext);
fn
});
avu(fn)
}
.fn.pushPrefix = function(prefix) {
output = merge.lists(.globalOutput, list(prefix = sprintf('%s%s', .globalOutput$prefix, prefix)));
assign('.globalOutput', output, envir = GlobalOutput_env__);
.globalOutput
}
.fn.popPrefix = function(prefix) {
output = merge.lists(.globalOutput, list(prefix = sprintf('%s/', splitPath(.globalOutput$prefix)$dir)));
assign('.globalOutput', output, envir = GlobalOutput_env__);
.globalOutput
}
#
# command argument handling
#
# default args: command line call minus command
evaluateArgs = function(c = commandArgs()[-1]) {
is.no.option = is.na(as.integer(sapply(c, function(a)grep("^--", a))));
#c = c[!(c == "--vanilla")]; # eliminate '--vanilla' arguments
c = c[is.no.option];
if (length(c) > 0) {
eval.parent(parse(text = c[1]));
argListString = gsub(";", ",", gsub(";$", "", c[1]));
print(argListString);
return(eval(parse(text = sprintf("list(%s)", argListString))));
}
return(NULL);
}
# default args: command line call minus command
getCommandOptions = function(c = commandArgs()[-1]) {
is.no.option = is.na(as.integer(sapply(c, function(a)grep("^--", a))));
#c = c[!(c == "--vanilla")]; # eliminate '--vanilla' arguments
c = c[is.no.option];
o = lapply(c, function(e) {
eval(parse(text = e));
nlapply(setdiff(ls(), 'e'), function(n)get(n))
});
o = unlist.n(o, 1);
o
}
# R.pl interface
handleTriggers = function(o, triggerDefinition = NULL) {
if (is.null(triggerDefinition)) triggerDefinition = rget('.globalTriggers');
if (!is.list(o) || is.null(triggerDefinition)) return(NULL);
for (n in names(triggerDefinition)) {
if (!is.null(o[[n]])) triggerDefinition[[n]](o$args, o);
}
}
#
# level dependend logging
#
#Global..Log..Level = 4;
#Default..Log..Level = 4;
#assign(Default..Log..Level, 4, envir = .GlobalEnv);
Log_env__ <- new.env();
assign('DefaultLogLevel', 4, envir = Log_env__);
#' Log a message to stderr.
#'
#' Log a message to stderr. Indicate a logging level to control verbosity.
#'
#' This function prints a message to stderr if the condition is met that a
#' global log-level is set to greater or equal the value indicated by
#' \code{level}. \code{Log.level} returns the current logging level.
#'
#' @aliases Log Log.setLevel Log.level
#' @param o Message to be printed.
#' @param level If \code{Log.setLevel} was called with this value, subsequent
#' calls to \code{Log} with values of \code{level} smaller or equal to this
#' value will be printed.
#' @author Stefan Böhringer <r-packages@@s-boehringer.org>
#' @seealso \code{\link{Log.setLevel}}, ~~~
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' Log.setLevel(4);
#' Log('hello world', 4);
#' Log.setLevel(3);
#' Log('hello world', 4);
#'
Log = function(o, level = get('DefaultLogLevel', envir = Log_env__)) {
if (level <= get('GlobalLogLevel', envir = Log_env__)) {
cat(sprintf("R %s: %s\n", date(), as.character(o)));
}
}
Logs = function(o, level = get('DefaultLogLevel', envir = Log_env__), ..., envir = parent.frame()) {
Log(Sprintf(o, ..., envir = envir), level = level);
}
Log.level = function()get('GlobalLogLevel', envir = Log_env__);
Log.setLevel = function(level = get('GlobalLogLevel', envir = Log_env__)) {
assign("GlobalLogLevel", level, envir = Log_env__);
}
Log.setLevel(4); # default
.System.fileSystem = list(
#tempfile = function(prefix, ...)tempfile(splitPath(prefix)$base, tmpdir = splitPath(prefix)$dir, ...),
tempfile = function(prefix, ...)tempFileName(prefix, ...),
readFile = function(...)readFile(...)
);
.System.patterns = list(
default = list(pre = function(cmd, ...)cmd, post = function(spec, ret, ...)list() ),
qsub = list(pre = function(cmd, spec,
jidFile = spec$fs$tempfile(sprintf('/tmp/R_%s/qsub_pattern', Sys.getenv('USER'))),
qsubOptions = '',
waitForJids = NULL, ...) {
Dir.create(jidFile, treatPathAsFile = TRUE);
waitOption = if (is.null(waitForJids)) '' else
sprintf('--waitForJids %s', join(waitForJids, sep = ','));
print(cmd);
ncmd = sprintf('qsub.pl --jidReplace %s %s --unquote %s -- %s',
jidFile, waitOption, qsubOptions, qs(cmd));
print(ncmd);
spec = list(cmd = ncmd, jidFile = jidFile);
spec
},
post = function(spec, ret, ...) { list(jid = as.integer(spec$fs$readFile(spec$jidFile))) }
),
cwd = list(pre = function(cmd, spec, cwd = '.', ...) {
ncmd = sprintf('cd %s ; %s', qs(cwd), cmd);
spec = list(cmd = ncmd);
spec
},
post = function(spec, ret, ...) { list() }
),
# <i> stdout/stderr handling
ssh = list(pre = function(cmd, spec, ssh_host = 'localhost', ssh_source_file = NULL, ...,
ssh_single_quote = T) {
if (!is.null(ssh_source_file)) {
cmd = sprintf('%s ; %s',
join(paste('source', qs(ssh_source_file), sep = ' '), ' ; '), cmd);
}
fmt = if (ssh_single_quote) 'ssh %{ssh_host}s %{cmd}q' else 'ssh %{ssh_host}s %{cmd}Q';
spec = list(cmd = Sprintf(fmt));
spec
},
fs = function(fs, ..., ssh_host) {
list(
tempfile = function(prefix, ...) {
Log(sprintf('tempfile ssh:%s', prefix), 1);
r = splitPath(tempFileName(sprintf('%s:%s', ssh_host, prefix), ...), ssh = T)$path;
Log(sprintf('tempfile ssh-remote:%s', r), 1);
r
},
readFile = function(path, ...)readFile(sprintf('%s:%s', ssh_host, path), ..., ssh = T)
);
},
post = function(spec, ret, ...) { list() }
)
);
#
# a system call (c.f. privatePerl/TempFilenames::System)
#
System_env__ <- new.env();
assign(".system.doLogOnly", FALSE, envir = System_env__);
System = function(cmd, logLevel = get('DefaultLogLevel', envir = Log_env__),
doLog = TRUE, printOnly = NULL, return.output = F,
pattern = NULL, patterns = NULL, ..., return.cmd = F) {
# prepare
if (!exists(".system.doLogOnly", envir = System_env__))
assign(".system.doLogOnly", F, envir = System_env__);
doLogOnly = ifelse (!is.null(printOnly), printOnly, get('.system.doLogOnly', envir = System_env__));
# pattern mapping
fs = .System.fileSystem;
if (!is.null(patterns)) {
spec = list();
# map file accesses
for (pattern in rev(patterns)) {
fsMapper = .System.patterns[[pattern]]$fs;
if (!is.null(fsMapper)) fs = fsMapper(fs, ...);
spec[[length(spec) + 1]] = list(fs = fs);
}
# wrap commands into each other
for (i in 1:length(patterns)) {
spec[[i]] = merge.lists(spec[[i]], .System.patterns[[patterns[[i]]]]$pre(cmd, spec[[i]], ...));
cmd = spec[[i]]$cmd;
}
} else if (!is.null(pattern)) {
spec = .System.patterns[[pattern]]$pre(cmd, list(fs = fs), ...);
spec$fs = fs; # manually install fs
cmd = spec$cmd;
}
# redirection (after patterns) <A>
if (return.output & !doLogOnly) {
tmpOutput = tempfile();
cmd = sprintf("%s > %s", cmd, tmpOutput);
}
# logging
if (doLog){ Log(sprintf("system: %s", cmd), logLevel); }
# system call
ret = NULL;
if (!doLogOnly) ret = system(cmd);
# return value
r = list(error = ret);
if (return.output & !doLogOnly) {
r = merge.lists(r, list(error = ret, output = readFile(tmpOutput)));
}
# postprocess
if (!doLogOnly) if (!is.null(patterns)) {
for (i in rev(1:length(patterns))) {
r = merge.lists(r, .System.patterns[[patterns[[i]]]]$post(spec[[i]], ret, ...));
}
} else if (!is.null(pattern)) {
r = merge.lists(r, .System.patterns[[pattern]]$post(spec, ret, ...));
}
if (return.cmd) r$command = cmd;
# simplified output
if (!return.output && !return.cmd && is.null(pattern)) r = r$error;
r
}
# wait on job submitted by system
.System.wait.patterns = list(
default = function(r, ...)(NULL),
qsub = function(r, ...) {
ids = if (is.list(r[[1]]) & !is.null(r[[1]]$jid)) list.kp(r, 'jid', do.unlist = T) else r$jid;
idsS = if (length(ids) == 0) '' else paste(ids, collapse = ' ');
System(sprintf('qwait.pl %s', idsS), ...);
}
);
System.wait = function(rsystem, pattern = NULL, ...) {
r = if (!is.null(pattern)) .System.wait.patterns[[pattern]](rsystem, ...) else NULL;
r
}
System.SetDoLogOnly = function(doLogOnly = F) {
assign(".system.doLogOnly", doLogOnly, envir = System_env__);
}
ipAddress = function(interface = "eth0") {
o = System(sprintf("/sbin/ifconfig %s", interface), logLevel = 6, return.output = T);
ip = fetchRegexpr("(?<=inet addr:)[^ ]+", o$output);
ip
}
#
# <p> cluster abstraction
#
# Example:
#specifyCluster(localNodes = 8, sourceFiles = c('RgenericAll.R', 'dataPreparation.R'));
#.clRunLocal = F;
#data.frame.types(clapply(l, f, arg1 = 1), rbind = T, do.transpose = T);
# default cluster configuration
.defaultClusterConfig = list(
hosts = list(list(host = "localhost", count = 2, type = "PSOCK")), local = F,
provideChunkArgument = F, reverseEvaluationOrder = T, splitN = 4, reuseCluster = F,
nestingLevel = 0, # records the nesting of clapply calls
splittingLevel = 1, # specifies at which level clapply should parallelize
evalEnvironment = F # call environment_eval on function before passing on
);
Snow_cluster_env__ = new.env();
specifyCluster = function(localNodes = 8, sourceFiles = NULL, cfgDict = list(), hosts = NULL,
.doSourceLocally = F, .doCopy = T, splitN = NULL, reuseCluster = F, libraries = NULL,
evalEnvironment = F) {
cfg = merge.lists(.defaultClusterConfig,
cfgDict,
list(splitN = splitN, reuseCluster = reuseCluster, evalEnvironment = evalEnvironment),
list(local = F, source = sourceFiles, libraries = libraries, hosts = (if(is.null(hosts))
list(list(host = "localhost", count = localNodes, type = "PSOCK", environment = list())) else
hosts)
));
assign(".globalClusterSpecification", cfg, envir = Snow_cluster_env__);
.globalClusterSpecification = get('.globalClusterSpecification', envir = Snow_cluster_env__);
if (.doCopy) {
for (h in .globalClusterSpecification$hosts) {
if (h$host != "localhost" & !is.null(h$env$setwd)) {
System(sprintf("ssh %s mkdir '%s' 2>/dev/null", h$host, h$env$setwd), 5);
System(sprintf("scp '%s' %s:'%s' >/dev/null", paste(sourceFiles, collapse = "' '"),
h$host, h$env$setwd), 5);
}
}
}
if (.doSourceLocally) {
sourceFiles = setdiff(sourceFiles, "RgenericAll.R"); # assume we have been sourced
eval(parse(text =
paste(sapply(sourceFiles, function(s)sprintf("source('%s', chdir = TRUE);", s)), collapse = "")));
}
}
#<!> might not be available/outdated
library('parallel');
# l: list, f: function, c: config
# <i><!> test clCfg$reverseEvaluationOrder before uncommenting
clapply_cluster = function(l, .f, ..., clCfg = NULL) {
#if (clCfg$reverseEvaluationOrder) l = rev(l);
# only support SOCK type right now <!><i>
hosts = unlist(sapply(clCfg$hosts, function(h){
if (h$type == "PSOCK") rep(h$host, h$count) else NULL}));
master = ifelse(all(hosts == "localhost"), "localhost", ipAddress("eth0"));
establishEnvironment = T;
cl = if (clCfg$reuseCluster) {
if (!exists(".globalClusterObject")) {
assign(".globalClusterObject", makeCluster(hosts, type = "PSOCK", master = master),
envir = Snow_cluster_env__);
} else establishEnvironment = FALSE;
get('.globalClusterObject', envir = Snow_cluster_env__)
} else makeCluster(hosts, type = "PSOCK", master = master);
#clusterSetupRNG(cl); # snow
clusterSetRNGStream(cl, iseed = NULL); # parallel
clusterExport(cl, clCfg$vars);
# <p> establish node environment
envs = listKeyValue(list.key(clCfg$hosts, "host"), list.key(clCfg$hosts, "environment", unlist = F));
Log(clCfg, 7);
if (establishEnvironment) r = clusterApply(cl, hosts, function(host, environments, cfg){
env = environments[[host]];
if (!is.null(env$setwd)) setwd(env$setwd);
if (!is.null(cfg$source)) for (s in cfg$source) source(s, chdir = TRUE);
if (!is.null(cfg$libraries)) for (package in cfg$libraries) library(package, character.only = TRUE);
# <!> as of 3.4.2013: stop support of exporting global variables to enable CRAN submission
#if (!is.null(env$globalVars))
# for (n in names(env$globalVars)) assign(n, env$globalVars[[n]], pos = .GlobalEnv);
#sprintf("%s - %s - %s", host, hapmap, getwd());
NULL
}, environments = envs, cfg = clCfg);
# <p> iterate
N = clCfg$splitN * length(hosts); # No of splits
idcs = splitListIndcs(length(l), N);
exportNames = c();
iterator__ = if (clCfg$provideChunkArgument) {
function(.i, ...) {
r = lapply(idcs[.i, 1]:idcs[.i, 2], function(j)try(.f(l[[j]], .i, ...)));
if (class(r) == "try-error") r = NULL;
r
}
} else {
function(.i, ...){
r = lapply(idcs[.i, 1]:idcs[.i, 2], function(j)try(.f(l[[j]], ...)));
if (class(r) == "try-error") r = NULL;
r
}
}
if (clCfg$evalEnvironment) {
iterator__ = environment_eval(iterator__, functions = T);
#clusterExport(cl, varlist = names(as.list(environment(iterator__))), envir = environment(iterator__));
}
r = clusterApplyLB(cl, 1:dim(idcs)[1], iterator__, ...);
# <p> finish up
if (!clCfg$reuseCluster) stopCluster(cl)
r = unlist(r, recursive = F);
#if (clCfg$reverseEvaluationOrder) r = rev(r);
r
}
# wrapper (as of 3.12.8: I seem to have lost a previous change)
clapply = function(l, .f, ..., clCfg = NULL, .clRunLocal = rget(".clRunLocal", F, envir = .GlobalEnv)) {
# <p> get cluster specification
clCfg = merge.lists(
rget(".globalClusterSpecification", default = list(), envir = Snow_cluster_env__),
firstDef(clCfg, list())
);
# <p> update cluster specification
clCfg$nestingLevel = clCfg$nestingLevel + 1;
assign(".globalClusterSpecification", clCfg, envir = Snow_cluster_env__);
# <p> choose/decline parallelization
r = if (firstDef(.clRunLocal, clCfg$local, F) || clCfg$nestingLevel != clCfg$splittingLevel) {
if (clCfg$provideChunkArgument) lapply(X = l, FUN = .f, 1, ...)
else lapply(X = l, FUN = .f, ...)
} else {
clapply_cluster(l, .f, ..., clCfg = clCfg);
};
# <p> update cluster specification
clCfg$nestingLevel = clCfg$nestingLevel - 1;
assign(".globalClusterSpecification", clCfg, envir = Snow_cluster_env__);
r
}
evalCall = function(call) {
call = callEvalArgs(call);
do.call(call$f, call$args, envir = call$envir)
}
# envirArgs: non-functional, depracated
Do.call = function(what, args, quote = FALSE, envir = parent.frame(),
defaultEnvir = .GlobalEnv, envirArgs = NULL, do_evaluate_args = F) {
if (is.null(envir)) envir = defaultEnvir;
if (do_evaluate_args) args = nlapply(args, function(e)eval(args[[e]], envir = envir));
do.call(what = what, args = args, quote = quote, envir = envir)
}
#
# <p> file operations
#
#' Return absolute path for name searched in search-pathes
#'
#' Search for pathes.
#'
#' @param as.dirs assume that prefixes are pathes, i.e. a slash will be put between path and prefix
#' @param force enforces that path and prefix are always joined, otherwise if path is absolute no prefixing is performed
file.locate = function(path, prefixes = NULL, normalize = T, as.dirs = T, force = F, home = T) {
if (!force && substr(path, 1, 1) == '/') return(path);
if (substr(path, 1, 1) == '~' && home) {
path = path.absolute(path, home = TRUE);
if (!force) return(path);
}
if (is.null(prefixes)) prefixes = if (as.dirs) '.' else '';
sep = ifelse(as.dirs, '/', '');
for (prefix in prefixes) {
npath = sprintf('%s%s%s', prefix, sep, path);
if (normalize) npath = path.absolute(npath);
if (file.exists(npath)) return(npath);
}
NULL
}
#' Read content of file and return as character object.
#'
#' Read content of file and return as character object.
#'
#' Read content of file and return as character object.
#'
#' @param path Path to the file to be read.
#' @param prefixes Search for file by prepending character strings from
#' prefixes.
#' @param normalize Standardize pathes.
#' @param ssh Allow pathes to remote files in \code{scp} notation.
#' @author Stefan Böhringer <r-packages@@s-boehringer.org>
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' parallel8 = function(e) log(1:e) %*% log(1:e);
#' cat(readFile(tempcodefile(parallel8)));
#'
# prefixes only supported locally <!>
readFile = function(path, prefixes = NULL, normalize = T, ssh = F) {
s = splitPath(path, ssh = ssh);
r = if (s$is.remote) {
tf = tempfile();
File.copy(path, tf);
readChar(tf, nchars = as.list(file.info(tf)[1,])$size);
} else {
if (!is.null(prefixes)) path = file.locate(path, prefixes, normalize);
readChar(path, nchars = as.list(file.info(path)[1,])$size);
}
r
}
writeFile = function(path, str, mkpath = F, ssh = F) {
s = splitPath(path, ssh = ssh);
if (s$is.remote) {
Dir.create(sprintf('%s:%s', s$userhost, s$dir), recursive = mkpath);
tf = tempfile();
out = file(description = tf, open = 'w', encoding='UTF-8');
cat(str, file = out, sep = "");
close(con = out);
File.copy(tf, path);
} else {
if (mkpath) {
if (!file.exists(s$dir)) dir.create(s$dir, recursive = T);
}
out = file(description = path, open = 'w', encoding='UTF-8');
cat(str, file = out, sep = "");
close(con = out);
}
path
}
isURL = function(path)(length(grep("^(ftp|http|https|file)://", path)) > 0L)
Source_url = function(url, ...) {
require('RCurl');
request = getURL(url, followlocation = TRUE,
cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"));
tf = tempfile();
writeFile(tf, request);
source(tf, ...)
}
# <!> local = T does not work
Source = function(file, ...,
locations = c('', '.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
sapply(file, function(file) {
if (isURL(file)) Source_url(file, ...) else {
file0 = file.locate(file, prefixes = locations);
source(file = file0, ...)
}
})
}
#
# <p> helper functions readTable/writeTable
#
compressPathBz2 = function(pathRaw, path, doRemoveOrig = TRUE) {
cmd = Sprintf("cat %{pathRaw}q | bzip2 -9 > %{path}q");
r = System(cmd, 2);
if (doRemoveOrig && !get('.system.doLogOnly', envir = System_env__)) file.remove(pathRaw);
r
}
compressPath = function(pathRaw, path, extension = NULL, doRemoveOrig = TRUE) {
if (is.null(extension)) return(path);
compressor = get(Sprintf('compressPath%{extension}u'));
r = compressor(pathRaw, path, doRemoveOrig = doRemoveOrig);
r
}
decompressPathBz2 = function(path, pathTmp, doRemoveOrig = FALSE) {
cmd = Sprintf("cat %{path}q | bunzip2 > %{pathTmp}q");
r = System(cmd, 2);
if (doRemoveOrig && !get('.system.doLogOnly', envir = System_env__)) file.remove(pathRaw);
r
}
decompressPath = function(path, pathTmp, extension = NULL, doRemoveOrig = FALSE) {
if (is.null(extension)) return(path);
decompressor = get(Sprintf('decompressPath%{extension}u'));
r0 = decompressor(path, pathTmp, doRemoveOrig = doRemoveOrig);
r = list(destination = pathTmp, pathOrig = path, return = r0);
r
}
compressedConnectionBz2 = function(path, mode = '') {
#r = Sprintf('%{path}s.bz2');
bzfile(path, open = mode)
}
compressedConnectionGz = function(path, mode = '') {
gzfile(path, open = mode)
}
compressedConnection = function(path, extension = NULL, mode = '') {
if (is.null(extension)) return(path);
compressor = get(Sprintf('compressedConnection%{extension}u'));
compressor(path, mode = mode)
}
compressedConnectionPath = function(conn) {
if ('connection' %in% class(conn)) summary(conn)$description else conn
}
#
# <p> readTable
#
# complete: return only complete data with respect to specified colums
# NA: specify 'NA'-values
readTableSepMap = list(T = "\t", S = ' ', C = ',', `;` = ';', `S+` = '');
optionParser = list(
SEP = function(e)readTableSepMap[[e]],
QUOTE = function(e)(if (e == 'F') '' else e),
HEADER = function(e)list(T = T, F = F)[[e]],
ROW.NAMES = function(e)list(T = T, F = F)[[e]],
NAMES = function(e)splitString(';', e),
FACTORS = function(e)splitString(';', e),
PROJECT = function(e)splitString(';', e),
`NA` = function(e)splitString(';', e),
complete = function(e)splitString(';', e),
CONST = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
v = if (length(fetchRegexpr('^\\d+$', r[2])) > 0) r[2] = as.integer(r[2]) else r[2];
listKeyValue(r[1], v)
});
unlist.n(r, 1)
},
HEADERMAP = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
listKeyValue(r[1], r[2])
});
unlist.n(r, 1)
},
# tb implemented: <i>: merge.lists recursive
VALUEMAP = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
listKeyValue(r[1], r[2])
});
unlist.n(r, 1)
},
COLNAMESFILE = identity,
SHEET = as.integer
);
splitExtendedPath = function(path) {
q = fetchRegexpr('(?<=^\\[).*?(?=\\]:)', path);
options = list();
if (length(q) > 0 && nchar(q) > 0) {
path = substr(path, nchar(q) + 4, nchar(path));
os = sapply(splitString(',', q), function(e)splitString('=', e));
os = listKeyValue(os[1, ], os[2, ]);
os = nlapply(names(os), function(n)optionParser[[n]](os[[n]]));
options = merge.lists(options, os);
}
r = list(path = path, options = options)
}
readTable.ods = function(path, options = NULL) {
require('readODS');
sheet = firstDef(options$SHEET, 1);
read.ods(path)[[sheet]];
}
# <!> changed SEP default "\t" -> ",", 20.5.2015
#readTable.csv.defaults = list(HEADER = T, SEP = "\t", `NA` = c('NA'), QUOTE = '"');
readTable.csv.defaults = list(HEADER = T, SEP = ",", `NA` = c('NA'), QUOTE = '"');
readTable.txt = readTable.csv = function(
path, options = readTable.csv.defaults, headerMap = NULL, setHeader = NULL, ...) {
options = merge.lists(readTable.csv.defaults, options);
t = read.table(path, header = options$HEADER, sep = options$SEP, as.is = T,
na.strings = options$`NA`, comment.char = '', quote = options$QUOTE, ...);
if (!is.null(options$NAMES)) names(t)[1:length(options$NAMES)] = options$NAMES;
if (!is.null(headerMap)) names(t) = vector.replace(names(t), headerMap);
if (!is.null(setHeader)) names(t) = c(setHeader, names(t)[(length(setHeader)+1): length(names(t))]);
if (!is.null(options$FACTORS)) t = Df_(t, as_factor = options$FACTORS);
t
}
readTable.sav = function(path, options = NULL, headerMap = NULL, stringsAsFactors = F) {
require('foreign');
# read file
r = read.spss(path);
as.data.frame(r, stringsAsFactors = stringsAsFactors)
}
readTable.RData = function(path, options = NULL, headerMap = NULL) {
t = as.data.frame(get(load(path)[1]), stringsAsFactors = F);
#print(t);
t
}
readTable.xls = function(path, options = NULL, ..., sheet = 1) {
require('gdata');
read.xls(path, sheet = sheet, verbose = FALSE);
}
tableFunctionConnect = c('csv', 'RData');
tableFunctionForPathMeta = function(path, template = 'readTable.%{ext}s', default = readTable.csv,
forceReader = NULL) {
sp = splitPath(path);
compression = NULL;
tmpFile = NULL;
if (firstDef(forceReader, sp$ext) %in% c('bz2', 'gz')) {
compression = sp$ext;
sp = splitPath(sp$fullbase);
tmpFile = Sprintf('%{file}s.%{ext}s', file = tempfile(), ext = sp$ext);
}
name = Sprintf(template, ext = firstDef(forceReader, sp$ext));
f = if (exists(name)) get(name) else default;
r = list(
fct = f, name = name, ext = sp$ext,
compression = compression, tempfile = tmpFile, path = path
);
r
}
tableFunctionForPath = function(path, template = 'readTable.%{ext}s',
default = readTable.csv, forceReader = NULL) {
tableFunctionForPathMeta(path, template, default, forceReader)$fct
}
# forceReader: force readerFunction
tableFunctionForPathReader = function(path, template = 'readTable.%{ext}s', default = readTable.csv,
forceReader = NULL) {
m = m0 = tableFunctionForPathMeta(path, template = 'readTable.%{ext}s', default = default, forceReader);
if (!is.null(m$compression)) {
path = if (m0$compression %in% tableFunctionConnect)
compressedConnection(m0$path, m0$compression) else
decompressPath(m0$path, m0$tempfile, m0$compression)$destination
m = merge.lists(m0, list(path = path));
}
m
}
# <!> as of 23.5.2014: headerMap after o$NAMES assignment
# <i> use tableFunctionForPath
readTable = function(path, autodetect = T, headerMap = NULL, extendedPath = T, colnamesFile = NULL, ...,
as_factor = NULL, stringsAsFactors = F, defaultReader = readTable.csv, doRemoveTempFile = TRUE,
forceReader = NULL) {
# <p> preparation
path = join(path, '');
o = list();
if (extendedPath) {
r = splitExtendedPath(path);
path = r$path;
o = r$options;
}
# <p> read table raw
sp = splitPath(path);
reader = if (autodetect && !is.null(sp$ext))
tableFunctionForPathReader(path, 'readTable.%{ext}s', readTable.csv, forceReader) else
list(fct = defaultReader, path = path);
r = reader$fct(reader$path, options = o, ...);
# <p> cleanup
if (doRemoveTempFile && !get('.system.doLogOnly', envir = System_env__) && !is.null(reader$tempfile))
file.remove(reader$tempfile);
# <p> table transformations
if (!is.null(o$NAMES) && length(o$NAMES) <= ncol(r)) names(r)[1:length(o$NAMES)] = o$NAMES;
colnamesFile = firstDef(o$COLNAMESFILE, colnamesFile);
headerMap = c(headerMap, o$HEADERMAP);
if (!is.null(headerMap)) names(r) = vector.replace(names(r), headerMap);
if (!is.null(colnamesFile)) {
ns = read.table(colnamesFile, header = F, as.is = T)[, 1];
names(r)[1:length(ns)] = ns;
}
if (!is.null(o$PROJECT)) r = r[, o$PROJECT];
if (!is.null(o$complete)) r = r[apply(r[, o$complete], 1, function(e)!any(is.na(e))), ];
if (!is.null(o$CONST)) { for (n in names(o$CONST)) r[[n]] = o$CONST[[n]]; }
if (!is.null(as_factor)) r = Df_(r, as_factor = as_factor);
r
}
#
# <p> writeTable
#
writeTable.defaults = list(
SEP = ' ',
ROW.NAMES = FALSE,
HEADER = TRUE,
QUOTE = TRUE
);
writeTable.table = function(dataFrame, path, ..., doCompress = NULL, row.names = TRUE, options = list()) {
o = merge.lists(writeTable.defaults, list(ROW.NAMES = row.names), options);
conn = compressedConnection(path, doCompress, mode = 'w');
with(o, write.table(dataFrame, file = conn, ...,
row.names = ROW.NAMES, col.names = HEADER, sep = SEP, quote = (QUOTE != '')));
}
writeTable.xls = function(object, path, doCompress = NULL, row.names = TRUE,
doRemoveOrig = TRUE, options = list()) {
require('WriteXLS');
r = path;
dataFrame = as.data.frame(object);
pathRaw = if (!is.null(doCompress)) Sprintf('%{path}s_raw_') else path;
r0 = WriteXLS(dataFrame, ExcelFileName = pathRaw, row.names = row.names);
r1 = compressPath(pathRaw, path, doCompress, doRemoveOrig);
r0
}
writeTable.csv = function(dataFrame, path, ..., doCompress = NULL, row.names = TRUE, options = list()) {
conn = compressedConnection(path, doCompress, mode = 'w');
write.csv(dataFrame, file = conn, ..., row.names = row.names);
}
# doCompress = 'bz2' to write bz2
# <i><!> determine from path
writeTableRaw = function(object, path, ..., doCompress = NULL, row.names = TRUE, autodetect = TRUE,
defaultWriter = writeTable.csv, options = list()) {
sp = splitPath(path);
if (!is.null(doCompress) && sp$ext %in% c('bz2', 'gz')) doCompress = sp$ext;
writer = if (autodetect && !is.null(sp$ext))
tableFunctionForPath(path, 'writeTable.%{ext}s', writeTable.csv) else defaultWriter;
if (is.null(writer))
stop(Sprintf("Writing table to extension '%{ext}s' not supported", ext = sp$ext));
r0 = writer(object, path = path, ..., doCompress = doCompress, row.names = row.names, options = options);
r = list(path = path, return = r0);
r
}
writeTable = function(object, path, ..., doCompress = NULL, row.names = TRUE, autodetect = TRUE,
defaultWriter = writeTable.csv, simplify = TRUE, extendedPath = TRUE) {
o = list();
if (extendedPath) {
r = splitExtendedPath(path);
path = r$path;
o = r$options;
defaultWriter = writeTable.table;
}
r = lapply(path, function(p)
writeTableRaw(object, p, ...,
doCompress = doCompress, row.names = row.names, autodetect = autodetect,
defaultWriter = defaultWriter, options = o)
);
if (simplify && length(path) == 1) r = r[[1]];
r
}
#
# <p> swig
#
swigIt = function(interface, code, moduleName = NULL) {
dir = tempdir(); # will be constant across calls
if (is.null(moduleName)) {
t = tempFileName("swig");
moduleName = splitPath(t)$base;
}
ifile = sprintf("%s/%s.%s", dir, moduleName, "i");
interface = sprintf("
%%module %s
%%inline %%{
%s;
%%}
", moduleName, paste(interface, collapse = ";\n\t\t\t"));
ifile = sprintf("%s/%s.%s", dir, moduleName, "i");
base = splitPath(ifile)$fullbase;
writeFile(ifile, interface);
cfile = sprintf("%s.c", base);
writeFile(cfile, code);
#print(list(i = ifile, c = cfile, so = sprintf("%s.so", base)));
system(sprintf("swig -r %s", ifile));
#cat(code);
system(sprintf("cd %s ; gcc -O2 -D__USE_BSD -D__USE_GNU -std=c99 -c -fpic %s.c %s_wrap.c -I/usr/local/lib64/R/include -lm ",
splitPath(ifile)$dir, base, base));
system(sprintf("cd %s ; gcc -shared %s.o %s_wrap.o -o %s.so", splitPath(ifile)$dir, base, base, base));
#dyn.unload(sprintf("%s.so", base));
dyn.load(sprintf("%s.so", base));
source(sprintf("%s/%s.R", splitPath(ifile)$dir, moduleName));
}
#
# <p> print
#
fprint = function(..., file = NULL, append = F) {
if (!is.null(file)) sink(file = file, append = append);
r = print(...);
if (!is.null(file)) sink();
r
}
stdOutFromCall = function(call_) {
tf = tempfile();
sink(tf);
eval.parent(call_, n = 2);
sink();
readFile(tf)
}
#
# crypotgraphy/checksumming
#
md5sumString = function(s, prefix = 'md5generator') {
require('tools');
path = tempfile('md5generator');
writeFile(path, s);
md5 = avu(md5sum(path));
md5
}
#
# <p> package documentation
#
# docFile = sprintf('%s/tmp/docOut.Rd', Sys.getenv('HOME'));
# docDir = sprintf('%s/src/Rpackages/parallelize.dynamic/parallelize.dynamic/man', Sys.getenv('HOME'));
# docs = RdocumentationSkeleton('Rparallel.back.R', 'parallelize.dynamic', output = docFile);
# writeRdocumentationToDir(docFile, docDir);
RdocumentationForObjects = function(items, envir, unparser = function(item, envir)item) {
files = suppressMessages({
sapply(items, function(item)unparser(item, envir));
});
docs = lapply(files, readFile);
names(docs) = sapply(files, function(f)splitPath(f)$base);
docs
}
RdocumentationForFunctions = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s.Rd", item));
prompt(get(item, envir = envir), name = item, filename = file);
file
});
docs
}
RdocumentationForClasses = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s-class.Rd", item));
methods::promptClass(item, filename = file, where = envir);
file
});
docs
}
RdocumentationForMethods = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s-methods.Rd", item));
methods::promptMethods(item, filename = file, findMethods(item, where = envir));
file
});
docs
}
# code from packages.skeleton
objectsFromCodeFiles = function(R_files, packageName = 'generic') {
e = new.env(hash = T);
methods::setPackageName(packageName, e);
for (f in R_files) sys.source(f, envir = e);
classes = getClasses(e);
methods = getGenerics(e);
others = ls(e, all.names = T);
others = others[grep('^\\.', others, invert = T)];
r = list(envir = e, classes = classes, methods = methods,
others = setdiff(setdiff(others, classes), methods));
r
}
RdocumentationSkeleton = function(R_files, output = NULL, packageName = 'generic') {
os = objectsFromCodeFiles(R_files, packageName = packageName);
docs = c(
RdocumentationForFunctions(os$others, os$envir),
RdocumentationForClasses(os$classes, os$envir),
RdocumentationForMethods(os$methods, os$envir)
);
doc = join(nlapply(docs, function(n) {
sprintf("\nDOCUMENTATION_BEGIN:%s\n%s\nDOCUMENTATION_END\n", n, docs[[n]])
}), "\n");
if (!is.null(output)) {
if (File.exists(output)) {
Log(sprintf("Move away file '%s' before writing new skeleton", output), 2);
} else {
writeFile(output, doc);
}
}
doc
}
writeRdocumentationToDir = function(pathesIn, pathOut, cleanOut = F) {
doc = sapply(pathesIn, readFile, USE.NAMES = F);
r = unlist.n(getPatternFromStrings(doc, '(?s)(?:\\nDOCUMENTATION_BEGIN:)([^\\n]+)\\n(.*?)(?:\\nDOCUMENTATION_END\\n)'), 1);
Dir.create(pathOut, recursive = T);
if (cleanOut) {
files = list_files_with_exts(pathOut, 'Rd');
file.remove(files);
}
nlapply(r, function(n) {
output = file.path(pathOut, sprintf('%s.Rd', n));
Log(sprintf('Writing to %s', output), 3);
writeFile(output, r[[n]]);
});
names(r)
}
reDoc = function(package = 'parallelize.dynamic',
docFile = sprintf('./%s.doc.Rd', package), docDir = sprintf('./%s/man', package)) {
writeRdocumentationToDir(docFile, docDir, cleanOut = T);
install.packages(sprintf('./%s', package), repos = NULL);
#detach(package);
#library(package)
}
#
# <p> Rcpp helpers
#
createModule = function(name, libpathes = c(), headers = c(), output = NULL) {
require('Rcpp');
require('inline');
dirs = sapply(libpathes, function(e)splitPath(e)$dir);
libs = sapply(libpathes, function(e)fetchRegexpr('(?<=lib)(.*)(?=.so)', splitPath(e)$file));
.libPaths(c(.libPaths(), dirs));
libincludes = join(sapply(seq_along(dirs), function(i)sprintf('-L"%s" -l%s', splitPath(dirs[i])$absolute, libs[i])), ' ');
Sys.setenv(`PKG_LIBS` = sprintf('%s %s', Sys.getenv('PKG_LIBS'), libincludes));
Sys.setenv(`PKG_CXXFLAGS` = sprintf('%s %s', Sys.getenv('PKG_LIBS'), stdOutFromCall(Rcpp:::CxxFlags())));
for (lib in libpathes) { dyn.load(lib, local = F) }
moduleRegex = '(?s:(?<=// -- begin inline Rcpp\n)(.*?)(?=// -- end inline Rcpp))';
inc = join(sapply(headers, function(f) fetchRegexpr(moduleRegex, readFile(f))), "\n");
rcpp = cxxfunction( signature(), '' , includes = inc, plugin = 'Rcpp', verbose = T );
mod = Module( name, getDynLib(rcpp) );
if (!is.null(output)) {
Dir.create(output, recursive = T);
libfiles = sapply(libpathes, function(lib) {
File.copy(lib, sprintf('%s/%s', output, splitPath(lib)$file));
splitPath(lib)$file
});
glue = sprintf('%s/%s.so', output, name);
File.copy(getDynLib(rcpp)[['path']], glue);
module_descriptor = list(
name = name,
libs = c(libfiles, splitPath(glue)$file)
);
save(module_descriptor, file = sprintf('%s/module.RData', output));
}
mod
}
activateModule = function(path) {
require('Rcpp');
module_descriptor = get(load(sprintf('%s/module.RData', path))[1]);
r = lapply(module_descriptor$libs, function(lib)try(dyn.unload(sprintf('%s/%s', path, lib)), silent = T));
r = lapply(module_descriptor$libs, function(lib)dyn.load(sprintf('%s/%s', path, lib), local = F));
mod = Module( module_descriptor$name, rev(r)[[1]] );
mod
}
#
# <p> sqlite
#
sqlCreateTable = function(columns, types = list, index = NULL, createAt = NULL) {
# <p> create database
types = merge.lists(listKeyValue(columns, rep('text', length(columns))), types);
createDbSql = join(sep = "\n", c(
sprintf('CREATE TABLE data (%s);',
join(sep = ', ', sapply(columns, function(e)sprintf('%s %s', e, types[e])))),
if (is.null(index)) c() else sapply(1:length(index), function(i)
sprintf('CREATE INDEX index_%d ON data (%s);', i, join(index[[i]], sep = ', '))),
'.quit', ''
));
if (!is.null(createAt)) System(sprintf('echo \'%s\' | sqlite3 %s', createDbSql, qs(createAt)), 1);
createDbSql
}
# Create sqlite database with contents of csv-file
# @par index: list of columns to index
# @par type: sqlite types: integer, real, text, blob, not specified assumes text
csv2sqlitSepMap = readTableSepMap;
sepMap = list(T = '\\t', S = ' ', C = ',', `;` = ';', `S+` = '');
sepMapCut = list(T = '\\t', S = '" "', C = ',', `;` = ';', `S+` = '');
csv2sqlite = function(path, output = tempfile(),
columnsNames = NULL, columnsSelect = NULL,
index = NULL,
inputSep = 'T', inputHeader = T, inputSkip = NULL,
NULLs = NULL, types = list()) {
# <!> cave: does not heed skip
if (!inputHeader && is.null(columnsNames)) {
columnsNames = read.table(path, header = F, nrows = 1, sep = csv2sqlitSepMap[[inputSep]]);
}
# <p> select columns
cut = if (!is.null(columnsSelect)) {
skipColumnsIds = which.indeces(columnsSelect, columnsNames);
sprintf('| cut %s -f %s ',
if (inputSep == 'T') '' else sprintf('-d %s', sepMapCut[[inputSep]]),
join(skipColumnsIds, ',')
)
} else '';
columns = if (is.null(columnsSelect)) columnsNames else columnsSelect;
types = merge.lists(listKeyValue(columns, rep('text', length(columns))), types);
sqlCreateTable(columns, types, index, createAt = output);
# <p> import data
skipCommand = if (is.null(inputSkip)) '' else sprintf('| tail -n +%d ', inputSkip + 1);
reader = if (splitPath(path)$ext == 'gz') 'zcat' else 'cat';
importSql = writeFile(tempfile(), join(sep = "\n", c(
sprintf(".separator %s\n", sepMap[[inputSep]]),
sprintf(".import \"/dev/stdin\" data")
)));
sepText = sepMap[[inputSep]];
filter = if (is.null(NULLs)) '' else
sprintf("| perl -pe 's/((?<=%s)(?:%s)(?=%s|$)|(?<=^)(?:%s)(?=%s|$))//g'",
sepText, join(NULLs, '|'), sepText, sepText, sepText);
cmd = Sprintf(con(
"%{reader}s %{path}Q %{skipCommand}s %{cut}s %{filter}s",
" | sqlite3 -init %{importSql}Q %{output}Q"));
System(cmd, 1);
output
}
# <!> unfinished, siphones code from old csv2sqlite function
url2sqlite = function(url, output = tempfile(), header = NULL, skip = NULL, selectColumns = NULL,
index = NULL, sep = 'T',
NULLs = NULL, types = list()) {
# <p> determine header
tmp1 = tempfile();
ret = download.file(url, tmp1, method, quiet = FALSE, mode = "w", cacheOK = TRUE);
#if (ret) stop(sprintf("Download of '%s' failed.", url));
if (is.null(header)) {
tmpHeader = tempfile();
}
}
# <!> 7.1.2015: was qq, but conflicts with QQ-plot function
qquote = function(s)as.character(fetchRegexpr('([^ ]+)', s, captures = T))
sqlite2sqlite = function(dbS, dbD, query, cols, types = list(), index = NULL) {
sqlCreateTable(cols, types, index, createAt = dbD);
cmd = sprintf("echo %s | sqlite3 -init %s %s | sqlite3 -init %s %s",
qs(query),
qs(writeFile(tempfile(), ".mode csv")),
qs(dbS),
qs(writeFile(tempfile(), ".separator ,\n.import \"/dev/stdin\" data")),
qs(dbD)
);
System(cmd, 1);
dbD
}
sqliteOpen = function(path) {
require('RSQLite');
dbConnect(SQLite(), dbname = path);
}
sqliteQuery = function(db, query, table = NULL) {
if (is.null(table)) table = dbListTables(db)[1];
query = con(sapply(names(query), function(n)Sprintf('%{n}Q = %{v}s', v = qs(query[[n]], force = T))));
query1 = Sprintf('SELECT * FROM %{table}Q WHERE %{query}s');
Log(query1, 5);
dbGetQuery(db, query1);
}
#
# <p> publishing
#
# if (1) {
# .fn.set(prefix = 'results/201404/expressionMonocytes-')
# initPublishing('expressionMonocytes201404', '201405');
# publishFile('results/expressionMonocytesReportGO.pdf');
# }
Publishing_env__ <- new.env();
initPublishing = function(project, currentIteration, publicationPath = '/home/Library/ProjectPublishing') {
assign('project', project, Publishing_env__);
assign('projectMd5', md5sumString(project), Publishing_env__);
assign('currentIteration', currentIteration, Publishing_env__);
assign('publicationPath', publicationPath, Publishing_env__);
}
publishFctEnv = function(path, into = NULL, as = NULL) with(as.list(Publishing_env__), {
if (!exists('project')) stop('Publishing system not yet initialized.');
projectFolder = Sprintf('%{publicationPath}s/%{projectMd5}s');
prefix = if (is.null(into)) '' else Sprintf('%{into}s/');
destinationPrefix = Sprintf('%{projectFolder}s/%{currentIteration}s/%{prefix}s');
destination = Sprintf('%{destinationPrefix}s%{path}s',
path = if (is.null(as)) splitPath(path)$file else as);
r = list(projectFolder = projectFolder, prefix = prefix, destination = destination,
destinationPrefix = destinationPrefix);
r
})
publishFile = function(file, into = NULL, as = NULL) with(publishFctEnv(file, into, as), {
if (!is.null(into)) Dir.create(destination, treatPathAsFile = T);
Logs('Publishing %{file} --> "%{destination}s', 3);
Dir.create(splitPath(destination)$dir, recursive = T);
System(Sprintf("chmod -R a+rX %{dir}s", dir = qs(projectFolder)), 4);
file.copy(file, destination, overwrite = T);
Sys.chmod(destination, mode = '0755', use_umask = F);
destination
})
publishCsv = function(table, as, ..., into = NULL) {
file = tempfile('publish', fileext = 'csv');
write.csv(table, file = file, ...);
publishFile(file, into, as);
}
publishDir = function(dir, into = NULL, as = NULL, asSubdir = FALSE) with(publishFctEnv('', into, as), {
if (asSubdir) into = splitPath(dir)$file;
if (!is.null(into)) {
destination = splitPath(Sprintf('%{destination}s/%{into}s/'))$fullbase; # remove trailing slash
}
Dir.create(destination);
Logs('Publishing %{dir} --> %{destination}s', 3);
Dir.create(destination, recursive = T);
System(Sprintf("chmod -R a+rX %{projectFolder}Q"), 4);
System(Sprintf("cp -r %{dir}Q/* %{destination}Q"), 4);
System(Sprintf("chmod -R a+rX %{projectFolder}Q"), 4);
destination
})
publishAsZip = function(files, as, into = NULL, recursive = FALSE) {
tmp = tempFileName('publishAsZip', createDir = T, inRtmp = T);
output = tempFileName('publishAsZip', 'zip', inRtmp = T, doNotTouch = T);
sapply(files, function(file) {
File.symlink(splitPath(file)$absolute, Sprintf("%{tmp}s"), replace = F);
NULL
});
recursiveOption = ifelse(recursive, '-r', '');
System(Sprintf("zip -j %{recursiveOption}s %{output}s %{tmp}s/*"), 2);
publishFile(output, into = into, as = as);
}
#
# <p> quick pdf generation
#
print2pdf = function(elements, file) {
es = elements;
tf = tempfile();
sink(tf);
nlapply(es, function(n) {
cat(n);
cat('\n-------------------------------------------\n');
print(es[[n]]);
cat('\n\n');
})
sink();
System(Sprintf('a2ps %{tf}s --columns 1 --portrait --o - | ps2pdf - - > %{output}s', output = qs(file)));
}
#
# <p> workarounds
#
# fix broken install from dir: create tarball -> install_local
Install_local = function(path, ...) {
pkgPath = Sprintf('%{dir}Q/%{base}Q.tgz', dir = tempdir(), base = splitPath(path)$base);
System(Sprintf('tar czf %{pkgPath}Q %{path}Q'), 2);
install_local(pkgPath, ...);
}
# misc
clearWarnings = function()assign('last.warning', NULL, envir = baseenv())#
# Rmeta.R
#Wed Jun 3 15:11:27 CEST 2015
#
# <p> Meta-functions
#
#
# Environments
#
# copy functions code adapted from restorepoint R package
object.copy = function(obj) {
# Dealing with missing values
if (is.name(obj)) return(obj);
obj_class = class(obj);
copy =
if ('environment' %in% obj_class) environment.copy(obj) else
if (all('list' == class(obj))) list.copy(obj) else
#if (is.list(obj) && !(is.data.frame(obj))) list.copy(obj) else
obj;
return(copy)
}
list.copy = function(l)lapply(l, object.copy);
environment.restrict = function(envir__, restrict__= NULL) {
if (!is.null(restrict__)) {
envir__ = as.environment(List_(as.list(envir__), min_ = restrict__));
}
envir__
}
environment.copy = function(envir__, restrict__= NULL) {
as.environment(eapply(environment.restrict(envir__, restrict__), object.copy));
}
bound_vars = function(f, functions = F) {
fms = formals(f);
# variables bound in default arguments
vars_defaults = unique(unlist(sapply(fms, function(e)all.vars(as.expression(e)))));
# variables used in the body
vars_body = setdiff(all.vars(body(f)), names(fms));
vars = setdiff(unique(c(vars_defaults, vars_body)), c('...', '', '.GlobalEnv'));
if (functions) {
vars = vars[!sapply(vars, function(v)is.function(rget(v, envir = environment(f))))];
}
vars
}
bound_fcts_std_exceptions = c('Lapply', 'Sapply', 'Apply');
bound_fcts = function(f, functions = F, exceptions = bound_fcts_std_exceptions) {
fms = formals(f);
# functions bound in default arguments
fcts_defaults = unique(unlist(sapply(fms, function(e)all.vars(as.expression(e), functions = T))));
# functions bound in body
fcts = union(fcts_defaults, all.vars(body(f), functions = T));
# remove variables
#fcts = setdiff(fcts, c(bound_vars(f, functions), names(fms), '.GlobalEnv', '...'));
fcts = setdiff(fcts, c(bound_vars(f, functions = functions), names(fms), '.GlobalEnv', '...'));
# remove functions from packages
fcts = fcts[
sapply(fcts, function(e) {
f_e = rget(e, envir = environment(f));
!is.null(f_e) && environmentName(environment(f_e)) %in% c('R_GlobalEnv', '') && !is.primitive(f_e)
})];
fcts = setdiff(fcts, exceptions);
fcts
}
environment_evaled = function(f, functions = FALSE, recursive = FALSE) {
vars = bound_vars(f, functions);
e = nlapply(vars, function(v) rget(v, envir = environment(f)));
#Log(sprintf('environment_evaled: vars: %s', join(vars, ', ')), 7);
#Log(sprintf('environment_evaled: functions: %s', functions), 7);
if (functions) {
fcts = bound_fcts(f, functions = TRUE);
fcts_e = nlapply(fcts, function(v){
#Log(sprintf('environment_evaled: fct: %s', v), 7);
v = rget(v, envir = environment(f));
#if (!(environmentName(environment(v)) %in% c('R_GlobalEnv')))
v = environment_eval(v, functions = TRUE);
});
#Log(sprintf('fcts: %s', join(names(fcts_e))));
e = c(e, fcts_e);
}
#Log(sprintf('evaled: %s', join(names(e))));
r = new.env();
lapply(names(e), function(n)assign(n, e[[n]], envir = r));
#r = if (!length(e)) new.env() else as.environment(e);
parent.env(r) = .GlobalEnv;
#Log(sprintf('evaled: %s', join(names(as.list(r)))));
r
}
environment_eval = function(f, functions = FALSE, recursive = FALSE) {
environment(f) = environment_evaled(f, functions = functions, recursive = recursive);
f
}
#
# Freeze/thaw
#
delayed_objects_env = new.env();
delayed_objects_attach = function() {
attach(delayed_objects_env);
}
delayed_objects_detach = function() {
detach(delayed_objects_env);
}
thaw_list = function(l)lapply(l, thaw_object, recursive = T);
thaw_environment = function(e) {
p = parent.env(e);
r = as.environment(thaw_list(as.list(e)));
parent.env(r) = p;
r
}
# <i> sapply
thaw_object_internal = function(o, recursive = T, envir = parent.frame()) {
r = if (class(o) == 'ParallelizeDelayedLoad') thaw(o) else
#if (recursive && class(o) == 'environment') thaw_environment(o) else
if (recursive && class(o) == 'list') thaw_list(o) else o;
r
}
thaw_object = function(o, recursive = T, envir = parent.frame()) {
if (all(search() != 'delayed_objects_env')) delayed_objects_attach();
thaw_object_internal(o, recursive = recursive, envir = envir);
}
#
# <p> backend classes
#
setGeneric('thaw', function(self, which = NA) standardGeneric('thaw'));
setClass('ParallelizeDelayedLoad',
representation = list(
path = 'character'
),
prototype = list(path = NULL)
);
setMethod('initialize', 'ParallelizeDelayedLoad', function(.Object, path) {
.Object@path = path;
.Object
});
setMethod('thaw', 'ParallelizeDelayedLoad', function(self, which = NA) {
if (0) {
key = sprintf('%s%s', self@path, ifelse(is.na(which), '', which));
if (!exists(key, envir = delayed_objects_env)) {
Log(sprintf('Loading: %s; key: %s', self@path, key), 4);
ns = load(self@path);
object = get(if (is.na(which)) ns[1] else which);
assign(key, object, envir = delayed_objects_env);
gc();
} else {
#Log(sprintf('Returning existing object: %s', key), 4);
}
#return(get(key, envir = delayed_objects_env));
# assume delayed_objects_env to be attached
return(as.symbol(key));
}
delayedAssign('r', {
gc();
ns = load(self@path);
object = get(if (is.na(which)) ns[1] else which);
object
});
return(r);
});
RNGuniqueSeed = function(tag) {
if (exists('.Random.seed')) tag = c(.Random.seed, tag);
md5 = md5sumString(join(tag, ''));
r = list(
kind = RNGkind(),
seed = hex2int(substr(md5, 1, 8))
);
r
}
RNGuniqueSeedSet = function(seed) {
RNGkind(seed$kind[1], seed$kind[2]);
#.Random.seed = freeze_control$rng$seed;
set.seed(seed$seed);
}
FreezeThawControlDefaults = list(
dir = '.', sourceFiles = c(), libraries = c(), objects = c(), saveResult = T,
freeze_relative = F, freeze_ssh = T, logLevel = Log.level()
);
thawCall = function(
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag)) {
load(freeze_file, envir = .GlobalEnv);
r = with(callSpecification, {
for (library in freeze_control$libraries) {
eval(parse(text = sprintf('library(%s)', library)));
}
for (s in freeze_control$sourceFiles) source(s, chdir = T);
Log.setLevel(freeze_control$logLevel);
if (!is.null(freeze_control$rng)) RNGuniqueSeed(freeze_control$rng);
if (is.null(callSpecification$freeze_envir)) freeze_envir = .GlobalEnv;
# <!> freeze_transformation must be defined by the previous source/library calls
transformation = eval(parse(text = freeze_control$thaw_transformation));
r = do.call(eval(parse(text = f)), transformation(args), envir = freeze_envir);
#r = do.call(f, args);
if (!is.null(freeze_control$output)) save(r, file = freeze_control$output);
r
});
r
}
frozenCallWrap = function(freeze_file, freeze_control = FreezeThawControlDefaults,
logLevel = Log.level(), remoteLogLevel = logLevel)
with(merge.lists(FreezeThawControlDefaults, freeze_control), {
sp = splitPath(freeze_file, ssh = freeze_ssh);
file = if (freeze_relative) sp$file else sp$path;
browser();
#wrapperPath = sprintf("%s-wrapper.RData", splitPath(file)$fullbase);
r = sprintf("R.pl --template raw --no-quiet --loglevel %d --code 'eval(get(load(\"%s\")[[1]]))' --",
logLevel, file);
r
})
frozenCallResults = function(file) {
callSpecification = NULL; # define callSpecification
load(file);
get(load(callSpecification$freeze_control$output)[[1]]);
}
freezeCallEncapsulated = function(call_,
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag),
freeze_save_output = F, freeze_objects = NULL, thaw_transformation = identity)
with(merge.lists(FreezeThawControlDefaults, freeze_control), {
sp = splitPath(freeze_file, ssh = freeze_ssh);
outputFile = if (freeze_save_output)
sprintf("%s_result.RData", if (freeze_relative) sp$base else sp$fullbase) else
NULL;
callSpecification = list(
f = deparse(call_$fct),
#f = freeze_f,
args = call_$args,
freeze_envir = if (is.null(call_$envir)) new.env() else call_$envir,
freeze_control = list(
sourceFiles = sourceFiles,
libraries = libraries,
output = outputFile,
rng = freeze_control$rng,
logLevel = freeze_control$logLevel,
thaw_transformation = deparse(thaw_transformation)
)
);
thawFile = if (freeze_relative) sp$file else sp$path;
callWrapper = call('thawCall', freeze_file = thawFile);
#Save(callWrapper, callSpecification, thawCall, file = file);
#Save(c('callWrapper', 'callSpecification', 'thawCall', objects),
# file = freeze_file, symbolsAsVectors = T);
#Save(c(c('callWrapper', 'callSpecification', 'thawCall'), objects),
Save(c('callWrapper', 'callSpecification', 'thawCall', freeze_objects),
file = freeze_file, symbolsAsVectors = T);
freeze_file
})
# <!> assume matched call
# <A> we only evaluate named args
callEvalArgs = function(call_, env_eval = FALSE) {
#if (is.null(call_$envir__) || is.null(names(call_$args))) return(call_);
#if (is.null(call_$envir) || !length(call_$args)) return(call_);
# <p> evaluate args
if (length(call_$args)) {
args = call_$args;
callArgs = lapply(1:length(args), function(i)eval(args[[i]], envir = call_$envir));
# <i> use match.call instead
names(callArgs) = setdiff(names(call_$args), '...');
call_$args = callArgs;
}
if (env_eval) {
call_$fct = environment_eval(call_$fct, functions = FALSE, recursive = FALSE);
}
# <p> construct return value
#callArgs = lapply(call_$args, function(e){eval(as.expression(e), call_$envir)});
call_
}
#callWithFunctionArgs = function(f, args, envir__ = parent.frame(), name = NULL) {
callWithFunctionArgs = function(f__, args__, envir__ = environment(f__), name = NULL, env_eval = FALSE) {
if (env_eval) f = environment_eval(f__, functions = FALSE, recursive = FALSE);
call_ = list(
fct = f__,
envir = environment(f__),
args = args__,
name = name
);
call_
}
freezeCall = function(freeze_f, ...,
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag),
freeze_save_output = F, freeze_envir = parent.frame(), freeze_objects = NULL, freeze_env_eval = F,
thaw_transformation = identity) {
# args = eval(list(...), envir = freeze_envir)
call_ = callWithFunctionArgs(f = freeze_f, args = list(...),
envir__ = freeze_envir, name = as.character(sys.call()[[2]]), env_eval = freeze_env_eval);
freezeCallEncapsulated(call_,
freeze_control = freeze_control, freeze_tag = freeze_tag,
freeze_file = freeze_file, freeze_save_output = freeze_save_output, freeze_objects = freeze_objects,
thaw_transformation = thaw_transformation
);
}
encapsulateCall = function(.call, ..., envir__ = environment(.call), do_evaluate_args__ = FALSE,
unbound_functions = F) {
# function body of call
name = as.character(.call[[1]]);
fct = get(name);
callm = if (!is.primitive(fct)) {
callm = match.call(definition = fct, call = .call);
as.list(callm)[-1]
} else as.list(.call)[-1];
args = if (do_evaluate_args__) {
nlapply(callm, function(e)eval(callm[[e]], envir = envir__))
} else nlapply(callm, function(e)callm[[e]])
# unbound variables in body fct
#unbound_vars =
call_ = list(
fct = fct,
envir = envir__,
#args = as.list(sys.call()[[2]])[-1],
args = args,
name = name
);
call_
}
#
# </p> freeze/thaw functions
#
#
# Rgraphics.R
#Mon 27 Jun 2005 10:52:17 AM CEST
require('grid');
#
# <p> unit model
#
# base unit is cm
setGeneric("factorToBase", function(this) standardGeneric("factorToBase"));
setGeneric("fromUnitToUnit", function(thisA, thisB) standardGeneric("fromUnitToUnit"));
setClass('unitGeneric', representation = list(value = 'numeric'), prototype = list(value = as.numeric(NA)));
setMethod('initialize', 'unitGeneric', function(.Object, value = as.numeric(NA)) {
.Object@value = value;
.Object
});
setMethod('fromUnitToUnit', c('unitGeneric', 'unitGeneric'), function(thisA, thisB)
new(class(thisB), value = thisA@value * factorToBase(thisA) / factorToBase(thisB)));
setClass('unitCm', contains = 'unitGeneric');
setMethod('initialize', 'unitCm', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitCm', function(this)1);
setClass('unitInch', contains = 'unitGeneric');
setMethod('initialize', 'unitInch', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitInch', function(this)cm(1));
setClass('unitDpi150', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi150', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi150', function(this)cm(1)/150);
setClass('unitDpi200', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi200', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi200', function(this)cm(1)/200);
setClass('unitDpi300', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi300', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi300', function(this)cm(1)/300);
setClass('unitPoints', contains = 'unitGeneric');
setMethod('initialize', 'unitPoints', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitPoints', function(this)cm(1)/72);
valueU = valueUnited = function(value, unit) {
class = getClass(Sprintf('unit%{unit}u'));
new(class, value = value)
}
toUnit = function(value, unit)fromUnitToUnit(value, valueU(as.numeric(NA), unit));
ToUnit = function(value, unit)toUnit(value, unit)@value;
#
# </p> unit model
#
cm2in = function(i) (i/2.54)
plotPoints = function(f=sin, interval=c(0,1), count = 1e2, steps = NULL, ...) {
if (!is.null(steps))
count = as.integer((interval[2] - interval[1]) / steps) else
steps = (interval[2] - interval[1]) / (count + 1);
xs = c(interval[1] + (0:(count - 1)) * steps, interval[2]);
#ys = apply(t(xs), 2, function(x)(f(x)));
#ys = Vectorize(function(x)f(x, ...))(xs);
ys = Vectorize(function(x)f(x))(xs);
data.frame(x = xs, y = ys)
}
plotRobust = function(f=sin, interval=c(0,1), count = 1e2, steps = NULL, points = F, ...) {
pts = plotPoints(f, interval, count, steps, points, ...);
if (points) {
points(pts$x, pts$y, type="l");
} else {
plot(pts$x, pts$y, type="l");
}
}
robustPlot = function(f=sin, interval=c(0,1), steps = 0.05, points = F, ...) {
plotRobust(f, interval, steps = steps, points = points, ...);
}
#
# <p> vector functions
#
vNorm = function(v)sqrt(sum(v^2))
vToNorm = toNorm = function(v) {
l = vNorm(v);
if (l == 0) NA else v/l
}
# orthogonal vector in 2D
vPerp = function(v)rev(v) * c(-1, 1)
# the normal of a vector (in 2D), i.e. the perpendicular unit vector
vNormal = function(v)vToNorm(vPerp(v))
#
# <p> graph drawing
#
# draw wedges
# x: x-coordinates
# y: y-coordinates
# w: widthes
wedge = function(x0, y0 = NULL, x1 = NULL, y1 = NULL, width = NULL, col = "black", ..., defaultWidth = .1) {
d = if (!is.null(y0)) data.frame(x0, y0, x1, y1) else x0;
if (is.null(width)) width = matrix(defaultWidth, ncol = 2, nrow = dim(x0)[1]);
pts = matrix(sapply(1:dim(d)[1], function(i) {
p1 = d[i, c("x0", "y0")];
p2 = d[i, c("x1", "y1")];
w = width[i, ];
n = vNormal(p2 - p1); # normal of line
c(p1 + n * w[1]/2, p1 - n * w[1]/2, p2 - n * w[2]/2, p2 + n * w[2]/2)
}), ncol = 2, byrow = T);
grid.polygon(x = pts[, 1], y = pts[, 2], id.lengths = rep(4, dim(d)[1]), gp = gpar(fill=1, col = col))
}
#
# <p> ggplot2
#
#library('ggplot2');
qplotFaceted = function(f, from = 0, to = 1, data, facets, geom = 'line', ..., by = 0.02) {
qplot.call = match.call(qplot);
vars = formula.vars(facets);
varLevels = unique(data[, vars, drop = F]);
print(varLevels);
xs = seq(from, to, by = by);
r = apply(varLevels, 1, function(r) {
environment(f) = f.env = new.env(parent = environment(f));
fl = as.list(r);
for (n in names(fl)) assign(n, fl[[n]], envir = f.env);
ys = f(xs);
d = data.frame(x = xs, y = ys, fl);
d
});
d = rbindDataFrames(r);
qplotArgs = c(as.list(qplot.call[-1]));
p = qplot(x, y, data = d, facets = facets, geom = geom, ...);
p
}
#
# plot to file
#
plot_file_DefaultOptions = list(width = 12, height = 12, dpi = 200);
plot_file = function(code_or_object, file = NULL, options = list(), ..., envir = parent.frame()) {
call = sys.call()[[2]];
if (is.null(file)) file = tempFileName('plot_file', 'pdf', inRtmp = T);
p = if (any(class(code_or_object) == 'ggplot')) {
o = merge.lists(plot_file_DefaultOptions, options, list(...));
with(o, { ggsave(code_or_object, file = file, width = width, height = height, dpi = dpi) });
code_or_object
} else {
device = get(splitPath(file)$ext);
device(file, ...);
eval(call, envir = envir);
dev.off();
encapsulateCall(call, envir__ = envir);
}
p
}
#
# <p> special plots
#
ggplot_qqunif = function(p.values, alpha = .05, fontsize = 6,
tr = function(x)-log(x, 10), trName = '-log10(P-value)', colorCI = "#000099") {
p.values = tr(sort(p.values));
N = length(p.values);
Ns = 1:N;
# j-th order statistic from a uniform(0,1) sample has beta(j,n-j+1) distribution
# (Casella & Berger, 2002, 2nd edition, pg 230, Duxbury)
ciU = tr(qbeta(1 - alpha/2, Ns, N - Ns + 1));
ciL = tr(qbeta( alpha/2, Ns, N - Ns + 1));
d = data.frame(theoretical = tr(Ns/N), ciU = ciU, ciL = ciL, p.value = p.values, colorCI = colorCI);
p = ggplot(d) +
geom_line(aes(x = theoretical, y = ciU, colour = colorCI)) +
geom_line(aes(x = theoretical, y = ciL, colour = colorCI)) +
geom_point(aes(x = theoretical, y = p.value), size = 1) +
theme_bw() + theme(legend.position = 'none') + coord_cartesian(ylim = c(0, max(p.values)*1.1)) +
scale_y_continuous(name = trName) +
theme(text = element_text(size = fontsize));
p
}
#ggplot_qqunif(seq(1e-2, 3e-2, length.out = 1e2))
vp_at = function(x, y)viewport(layout.pos.row = x, layout.pos.col = y);
plot_grid_grid = function(plots, coords) {
# <p> do plotting
grid.newpage();
# <!> layout might not be respected
nrow = max(coords[, 1]);
ncol = max(coords[, 2]);
pushViewport(viewport(layout = grid.layout(nrow, ncol)));
sapply(1:length(plots), function(i) {
print(plots[[i]], vp = vp_at(coords[i, 1], coords[i, 2]));
});
}
plot_grid_base = function(plots, coords, envirPlotVar = 'plot') {
# <p> do plotting
coordMat0 = matrix(0, nrow = max(coords[, 1]), ncol = max(coords[, 2]));
coordMat = matrix.assign(coordMat0, coords, 1:length(plots));
layout(coordMat);
sapply(1:length(plots), function(i) {
eval(get(envirPlotVar, plots[[i]]));
});
# if (is.environment(plots[[i]])) eval(get(envirPlotVar, plots[[i]])) else print(plots[[i]]);
}
plot_grid = function(plots, nrow, ncol, byrow = T, mapper = NULL, envirPlotVar = 'plot') {
if (missing(nrow)) {
if (missing(ncol)) {
ncol = 1;
nrow = length(plots);
} else {
nrow = ceiling(length(plots) / ncol);
}
} else if (missing(ncol)) ncol = ceiling(length(plots) / nrow);
coords = if (is.null(mapper))
merge.multi(1:nrow, 1:ncol, .first.constant = byrow) else
mapper(1:length(plots));
if (is.environment(plots[[1]]))
plot_grid_base(plots, coords, envirPlotVar) else
plot_grid_grid(plots, coords)
}
plot_grid_to_path = function(plots, ..., path,
width = valueU(21, 'cm'), height = valueU(29.7, 'cm'), NperPage = NULL, pdfOptions = list(paper = 'a4')) {
if (class(width) == 'numeric') width = valueU(width, 'inch');
if (class(height) == 'numeric') height = valueU(height, 'inch');
Nplots = length(plots);
pages = if (!is.null(NperPage)) {
Npages = ceiling(Nplots / NperPage);
lapply(1:Npages, function(i) {
Istrt = (i - 1) * NperPage + 1;
Istop = min(i * NperPage, Nplots);
Istrt:Istop
})
} else list(1:length(plots));
pdfArgs = c(list(
file = path, onefile = TRUE, width = ToUnit(width, 'inch'), height = ToUnit(height, 'inch')
), pdfOptions);
do.call(pdf, pdfArgs);
lapply(pages, function(plotIdcs) {
plot_grid(plots[plotIdcs], ...);
});
dev.off();
}
plot_adjacent = function(fts, factor, N = ncol(fts)) {
ns = names(fts);
ps = lapply(1:(N - 1), function(i){
x = eval({fts[, i]});
y = eval({fts[, i + 1]});
qplot(x, y, color = as.factor(factor), xlab = ns[i], ylab = ns[i + 1]);
});
}
plot_grid_pdf = function(plots, file, nrow, ncol, NperPage, byrow = T, mapper = NULL,
pdfOptions = list(paper = 'a4')) {
Nplots = length(plots);
if (missing(nrow)) nrow = NperPage / ncol;
if (missing(ncol)) ncol = NperPage / nrow;
if (missing(NperPage)) NperPage = ncol * nrow;
Npages = ceiling(Nplots / NperPage);
do.call(pdf, c(list(file = file), pdfOptions));
sapply(1:Npages, function(i) {
Istrt = (i - 1) * NperPage + 1;
Istop = min(i * NperPage, Nplots);
plot_grid(plots[Istrt:Istop], nrow, ncol, byrow = byrow, mapper = mapper);
});
dev.off();
}
#
# <p> Kaplan-Meier with ggplot
#
# stolen from the internet
createSurvivalFrame <- function(f.survfit){
# initialise frame variable
f.frame <- NULL
# check if more then one strata
if(length(names(f.survfit$strata)) == 0){
# create data.frame with data from survfit
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event,
n.censor = f.survfit$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower)
# create first two rows (start at 1)
f.start <- data.frame(time=c(0, f.frame$time[1]), n.risk=c(f.survfit$n, f.survfit$n), n.event=c(0,0),
n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1))
# add first row to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start)
} else {
# create vector for strata identification
f.strata <- NULL
for(f.i in 1:length(f.survfit$strata)){
# add vector for one strata according to number of rows of strata
f.strata <- c(f.strata, rep(names(f.survfit$strata)[f.i], f.survfit$strata[f.i]))
}
# create data.frame with data from survfit (create column for strata)
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event, n.censor = f.survfit
$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower, strata=factor(f.strata))
# remove temporary data
rm(f.strata)
# create first two rows (start at 1) for each strata
for(f.i in 1:length(f.survfit$strata)){
# take only subset for this strata from data
f.subset <- subset(f.frame, strata==names(f.survfit$strata)[f.i])
f.start <- data.frame(time=c(0, f.subset$time[1]), n.risk=rep(f.survfit[f.i]$n, 2), n.event=c(0,0), n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1), strata=rep(names(f.survfit$strata)[f.i], 2))
# add first two rows to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start, f.subset)
}
# reorder data
f.frame <- f.frame[order(f.frame$strata, f.frame$time), ]
# rename row.names
rownames(f.frame) <- NULL
}
# return frame
return(f.frame)
}
# define custom function to draw kaplan-meier curve with ggplot
qplot_survival = function(f.frame, f.CI = "default", f.shape = 3, ..., title = NULL, layers = NULL){
# use different plotting commands dependig whether or not strata's are given
p = if("strata" %in% names(f.frame) == FALSE) {
# confidence intervals are drawn if not specified otherwise
if(f.CI == "default" | f.CI == TRUE ){
# create plot with 4 layers (first 3 layers only events, last layer only censored)
# hint: censoring data for multiple censoring events at timepoint are overplotted
# (unlike in plot.survfit in survival package)
ggplot(data=f.frame, ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_step(aes(x=time, y=upper), directions="hv", linetype=2) +
geom_step(aes(x=time,y=lower), direction="hv", linetype=2) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
} else {
# create plot without confidence intervalls
ggplot(data=f.frame) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
}
} else {
# without CI
if(f.CI == "default" | f.CI == FALSE){
ggplot(data=f.frame, aes(group=strata, colour=strata), ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
} else {
ggplot(data=f.frame, aes(colour=strata, group=strata), ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_step(aes(x=time, y=upper), directions="hv", linetype=2, alpha=0.5) +
geom_step(aes(x=time,y=lower), direction="hv", linetype=2, alpha=0.5) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
}
}
if (!is.null(title)) p = p + labs(title = title);
if (!is.null(layers)) p = p + layers;
p
}
quantileBinning = function(x, Nbins) {
cut(x, quantile(x, seq(0, 1, length = Nbins + 1)), labels = seq_len(Nbins), include.lowest = TRUE)
}
kaplanMeierStrat = function(d1, f1, levels = NULL, title = NULL) {
# <i> only allow one covariate
stratVar = all.vars(formula.rhs(f1))[1];
if (!is.null(levels)) {
d1[[stratVar]] = as.factor(quantileBinning(d1[[stratVar]], levels));
}
stratValue = levels(d1[[stratVar]]);
# <p> log-rank test
lr = survdiff(as.formula(f1), data = d1);
p.lr = pchisq(lr$chisq, df = dim(lr$n) - 1, lower.tail = F)
# <p> kaplan-meyer
fit = survfit(as.formula(f1), data = d1);
fit.frame = createSurvivalFrame(fit);
titleCooked = if (is.null(title))
sprintf('%s, [P = %.2e]', stratVar, p.lr) else
Sprintf('%{title}s, [P = %.2e]', p.lr)
p = qplot_survival(fit.frame, F, 20, title = titleCooked,
layers = theme_bw());
list(plot = p, level = stratValue)
}
kaplanMeierNested = function(d, f1, strata, combine = FALSE) {
dStrat = d[, strata];
cbs = valueCombinations(dStrat);
plots = apply(cbs, 1, function(r) {
sel1 = nif(apply(dStrat == r, 1, all));
sel = nif(sapply(1:nrow(d), function(i)all(dStrat[i,] == r)));
if (sum(sel) == 0) browser();
#if (sum(sel) == 0) return(NULL);
dSel = d[sel, , drop = F];
N = sum(sel);
title = Sprintf('Stratum: %{stratum}s, N = %{N}d',
stratum = paste(names(r), r, sep = '=', collapse = ', '));
kaplanMeierStrat(dSel, f1, title = title);
});
plots
}
#
# <p> histograms
#
histogram_colors = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7");
histogram_colors = c('red', 'blue', 'green', 'yellow');
#dayColor = list(`0` = 'red', `1` = 'blue', `3` = 'green', `8` = 'yellow');
histogram_overlayed = function(data, f1,
groupNames = levels(groups), palette = histogram_colors, log10 = T,
x_lab = formula.response(f1), title = 'histogram', alpha = .3, breaks = 30) {
# <p> column names, range
xn = formula.response(f1);
gn = formula.covariates(f1);
lvls = levels(data[[gn]]);
tab = table(cut(data[[xn]], breaks));
#mx = if (log10) 10^ceiling(log10(max(tab))) else max(tab);
mx = max(tab);
# <p> create legend using pseudo data (shifted out of view)
dp = Df(x = rep(0, length(lvls)), y = rep(mx + 1, length(lvls)), group = lvls);
p = ggplot(dp, aes(x = x)) +
geom_rect(data = dp, aes(xmin = x, xmax = x + 1, ymin = y, ymax = y + 1, fill = group)) +
scale_fill_manual(name = gn, values = palette);
# <p> histograms
for (i in 1:length(lvls)) {
p = p + geom_histogram(data = data.frame(x = data[[xn]][data[[gn]] == lvls[i]]),
fill = palette[i], alpha = alpha);
}
# <p> log transform
if (log10) p = p + scale_y_continuous(trans = 'log10') + coord_cartesian(ylim = c(1, mx));
# <p> final formatting
p = p + ggtitle(title) + xlab(x_lab);
p
}
#'@param data: data frame or list
histograms_alpha = function(data, palette = histogram_colors, log10 = F,
x_lab = '', title = 'histogram', alpha = .3, origin = NULL, binwidth = NULL, relative = FALSE,
textsize = 20) {
# <p> preparation
N = length(as.list(data));
columns = names(data);
mx = max(unlist(as.list(data)), na.rm = T);
mn = min(unlist(as.list(data)), na.rm = T);
# <p> create legend using pseudo data (shifted out of view)
dp = Df(x = rep(2*mx + 2, N), y = rep(0, N), group = columns);
p = ggplot(dp, aes(x = x)) +
geom_rect(data = dp, aes(xmin = x, xmax = x + .01, ymin = y, ymax = y + .01, fill = group)) +
scale_fill_manual(name = dp$group, values = palette);
# <p> histograms
for (i in 1:N) {
col = columns[i];
dfH = data.frame(x = data[[col]]);
p = p + if (relative)
geom_histogram(data = dfH, aes(y=..count../sum(..count..)),
fill = palette[i], alpha = alpha, binwidth = binwidth, origin = origin
) else
geom_histogram(data = dfH, fill = palette[i], alpha = alpha, binwidth = binwidth, origin = origin)
}
# <p> log transform
if (log10) p = p + scale_y_continuous(trans = 'log10') + coord_cartesian(ylim = c(1, mx));
# <p> final formatting
p = p + coord_cartesian(xlim = c(mn - 1, mx + 1)) + ggtitle(title) + xlab(x_lab) + theme_bw() +
theme(text = element_text(size = textsize));
if (relative) p = p + ylab('percentage');
p
}
#
# <p> saving of plots
#
# base unit is 600dpi
units_conv = list(
cm = list(from = function(cm)(cm/2.54*600), to = function(b)(b/600*2.54)),
points = list(from = function(points)(points/72*600), to = function(b)(b/600*72)),
inch = list(from = function(i)(i*600), to = function(b)(b/600)),
dpi150 = list(from = function(dpi)(dpi/150*600), to = function(b)(b*150/600))
);
units_default = list(jpeg = 'dpi150', pdf = 'cm', png = 'points');
plot_save_raw = function(object, ..., width = 20, height = 20, plot_path = NULL,
type = NULL, options = list(), unit = 'cm', unit_out = NULL, envir = parent.frame()) {
device = get(type);
if (is.null(unit_out)) unit_out = units_default[[type]];
width = toUnit(width, unit_out)@value;
height = toUnit(height, unit_out)@value;
Log(Sprintf('Saving %{type}s to "%{plot_path}s" [width: %{width}f %{height}f]'), 5);
device(plot_path, width = width, height = height, ...);
#ret = eval(object, envir = envir);
ret = if (any(class(object) %in% c('ggplot', 'plot'))) {
print(object)
} else {
eval(object, envir = envir);
}
dev.off();
}
plot_typeMap = list(jpg = 'jpeg');
plot_save = function(object, ..., width = valueU(20, 'cm'), height = valueU(20, 'cm'), plot_path = NULL,
type = NULL,
envir = parent.frame(), options = list(), simplify = T, unit_out = NULL, createDir = TRUE) {
if (class(width) == 'numeric') width = valueU(width, 'cm');
if (class(height) == 'numeric') height = valueU(height, 'cm');
if (is.null(plot_path)) file = tempFileName('plot_save', 'pdf', inRtmp = T);
ret = lapply(plot_path, function(plot_path) {
if (createDir) Dir.create(plot_path, recursive = T, treatPathAsFile = T);
if (is.null(type) && !is.null(plot_path)) {
ext = splitPath(plot_path)$ext;
type = firstDef(plot_typeMap[[ext]], ext);
}
Logs("plot_path: %{plot_path}s, device: %{type}s", logLevel = 5);
plot_save_raw(object, ..., type = type, width = width, height = height, plot_path = plot_path,
options = options, unit_out = unit_out, envir = envir);
});
if (length(plot_path) == 1 && simplify) ret = ret[[1]];
r = list(path = plot_path, ret = ret);
r
}
# USAGE:
# plts = exprR1$Eapply(function(data, probe_name) {
# delayedPlot({
# boxplot(model, data, main = main);
# beeswarm(model, data, add = T)
# })
# });
# eval(plts[[1]])
delayedPlot = function(plotExpr, envir = parent.frame()) {
e = new.env(parent = envir);
delayedAssign('plot', plotExpr, assign.env = e)
e
}
#
# Rreporting.R
#Mon 06 Feb 2006 11:41:43 AM EST
#
# <p> documentation (by example
#
# Example:
# create a Reporter instance to report to LaTeX
# r = new("Rreporter", final.path = "/tmp/output.pdf", patterns = "latex");
#
# </p> end documentation
#
#
# <p> generic reporting functions
#
row.standardFormatter = function(e, digits = NA) {
f = if (is.na(digits) || substring(digits, 1, 1) == 'p') {
e
} else {
e = as.numeric(e);
if (substring(digits, 1, 1) == "#") {
sprintf("%.*e", as.numeric(substring(digits, 2)), e)
} else if (substring(digits, 1, 1) == "%") {
sprintf('%.*f\\%%', as.numeric(substring(digits, 2)), e * 100)
} else if (as.numeric(digits) < 0) {
digits = as.integer(digits);
ifelse(floor(log10(abs(e))) <= digits,
sprintf("%.*g", -digits, e),
sprintf("%.*f", -digits, e))
} else { sprintf("%.*f", as.integer(digits), e) }
}
f
}
latex = list(
# table patterns
header = "{LONGTABLESTARTFMT\\begin{longtable}{COLUMN_FORMAT}\nLONGTABLECAPTION",
columnNames = "%s%s %s\\hline\n",
separator = " & ",
hline = "\\hline\n",
lineEnd = " \\\\\n",
subHeading = function(h, rowSpan)
sprintf("\\hline\n & \\multicolumn{%d}{l}{\\bf %s}\\\\\\hline\n", rowSpan, h),
footer = "\\end{longtable}}\n\n",
postProcess = function(s, df, row.formatters, digits, caption, na.value, subHeadings,
ignoreRowNames, patterns, alignment, startFmt, bars) {
if (is.null(alignment)) alignment = rep(NA, dim(df)[2]);
alignment[is.na(alignment) & !is.na(digits)] = 'r';
alignment[is.na(alignment)] = 'l';
paragraphs = !is.na(digits) & substring(digits, 1, 1) == 'p';
alignment[paragraphs] = digits[paragraphs];
bars = c(bars, rep(F, length(alignment) - length(bars)));
alignment = ifelse(!bars, alignment, paste(alignment, '|', sep = ''));
colFmt = sprintf("%s%s", ifelse(ignoreRowNames, "", "r|"),
paste(alignment, collapse = ""));
captionPt = if (caption == '') list(LONGTABLECAPTION = '') else
list(LONGTABLECAPTION = '\\caption{CAPTION}\\\\\n', CAPTION = caption)
s = mergeDictToString(merge.lists(
list(COLUMN_FORMAT = colFmt, LONGTABLESTARTFMT = startFmt),
captionPt), s);
s
},
quote = function(s, detectFormula = T) {
s = gsub('_', '\\\\_', s, perl = T);
s = gsub('&', '\\\\&', s, perl = T);
s = gsub('~', '$\\\\sim$', s, perl = T);
s = gsub('([<>])', '$\\1$', s, perl = T);
s = gsub('\\^2', '$^2$', s, perl = T);
#ifelse(length(grep('_', s)) > 0, gsub('_', '\\\\_', s, perl = T), s)
s
},
# general text formatting
newpage = "\n\n\\newpage\n\n",
section = "\\section{SECTION_NAME}\n\n",
subsection = "\\subsection{SECTION_NAME}\n\n",
paragraph = "PARAGRAPH_TEXT\\par\n\n",
# finalize
document = "HEADER\n\\begin{document}\nDOC_HERE\n\\end{document}\n",
docHeader = "\\documentclass[a4paper,oneside,11pt]{article}\n\\usepackage{setspace,amsmath,amssymb, amsthm, epsfig, epsf, amssymb, amsfonts, latexsym, rotating, longtable, setspace, natbib, a4wide,verbatim, caption}\n\\usepackage[utf8x]{inputenc}",
docCmd = "cd TMP_DIR ; pdflatex TMP_FILE_BASE 1&>/dev/null ; cp TMP_FILE_BASE.pdf OUTPUT_FILE",
# figure table
figureTable = list(
table = "\\begin{center}\\begin{tabular}{COLS}\nROWS\\end{tabular}\\end{center}",
figure = '\\includegraphics[width=%.3f\\textwidth]{%s}',
figureCaption = "\\begin{minipage}[b]{%.3f\\linewidth}\\centering
\\begin{tabular}{c}
%s\\\\
\\includegraphics[width=\\textwidth]{%s}
\\end{tabular}\\end{minipage}\n",
formatTable = function(rows, cols = 2, template = latex$figureTable$table) {
mergeDictToString(list(COLS = join(rep('c', cols), ''), ROWS = rows), template)
},
formatRows = function(rows, cols = 2) {
sep = c(rep(' & ', cols - 1), "\\\\\n");
seps = rep(sep, (length(rows) + cols - 1) %/% cols);
seps = seps[1:length(rows)];
rs = meshVectors(rows, seps);
r = join(c(pop(rs), "\n"), '');
#browser();
# texRows = sapply(1:(length(rows) - 1), function(i)sprintf('%s%s', rows[i],
# ifelse(i %% cols == 1, ' & ', "\\\\\n")));
# rs = join(c(texRows, rev(rows)[1], "\n"), '');
# rs
},
formatFigure = function(figure, cols = 2, width = 1/cols - 0.05,
template = latex$figureTable$figure, templateCaption = latex$figureTable$figureCaption,
caption = '') {
if (File.exists(figure)) figure = path.absolute(figure);
caption = if (firstDef(caption, '') != '')
sprintf(templateCaption, width, caption, figure) else
sprintf(template, width, figure)
}
)
);
# bars: parallel structure to digits: where to insert vertical bars
report.data.frame.toString = function(df = NULL,
row.formatters = c(row.standardFormatter), digits = NA, caption = "", na.value = "-",
subHeadings = NULL, ignoreRowNames = F, patterns = latex, names.as = NULL, alignment = NULL,
quoteHeader = T, quoteRows = T, quoteRowNames = quoteHeader, startFmt = '', bars = NULL) {
with(patterns, {
# <p> initialize
rFmtC = length(row.formatters);
if (length(digits) == 1) digits = rep(digits, dim(df)[2]);
t = header; # the nascent table as string
if (!is.null(names.as)) names(df) = names.as;
# <p> complete header
header = if (quoteHeader) sapply(dimnames(df)[[2]], quote) else dimnames(df)[[2]];
t = con(t, sprintf("%s%s%s%s", ifelse(!ignoreRowNames, separator, ""),
paste(header, collapse = separator), lineEnd, hline));
# <p> append rows
for (i in Seq(1, nrow(df))) {
row.fmt = row.formatters[[((i - 1) %% rFmtC) + 1]];
if (i %in% subHeadings$indeces) { # insert subheading
j = which(subHeadings$indeces == i);
t = con(t, subHeading(subHeadings$headings[j], dim(df)[2] - ignoreRowNames));
}
if (!ignoreRowNames) {
rowName = dimnames(df)[[1]][i];
t = con(t, sprintf("%s%s", if (quoteRowNames) quote(rowName) else rowName, separator));
}
# <p> formatting and quoting
values = sapply(1:ncol(df), function(j)
if (is.na(df[i, j])) na.value else row.fmt(as.character(df[i, j]), digits[j])
);
if (quoteRows) values = sapply(values, quote);
t = con(t, sprintf("%s%s", paste(values, collapse = separator), lineEnd));
}
t = con(t, footer);
t = postProcess(t, df, row.formatters, digits, caption, na.value, subHeadings,
ignoreRowNames, patterns, alignment, startFmt, bars);
})
}
report.figure.tableSingle = function(figures, cols = 2, width = 1/cols - 0.05, patterns = latex, captions = NULL)
with(patterns, with(figureTable, {
figs = sapply(1:length(figures), function(i){
formatFigure(figures[i], cols = cols, width = width, caption = captions[i])
});
rows = formatRows(figs, cols = cols);
table = formatTable(rows, cols = cols);
table
}))
report.figure.table = function(figures, cols = 2, width = 1/cols - 0.05, patterns = latex,
captions = NULL, maxRows = 5) with(patterns, {
NfiguresPerPage = maxRows * cols;
Nfigures = ceiling(ceiling(length(figures)/cols) / maxRows);
if (Nfigures > 1) {
tables = sapply(1:Nfigures, function(i) {
Is = ((i - 1)*NfiguresPerPage + 1): min((i*NfiguresPerPage), length(figures));
report.figure.tableSingle(figures[Is], cols, width, patterns, captions[Is])
});
join(tables, "\n")
} else report.figure.tableSingle(figures, cols, width, patterns, captions)
})
#
# <p> Rreporter (base on S4 methods)
#
setClass("Rreporter",
representation(tmp.path = "character", final.path = "character", patterns = "list"),
prototype(tmp.path = sprintf("%s.rr", tempfile()), final.path = NULL, patterns = latex)
);
setMethod("initialize", "Rreporter", function(.Object, final.path, patterns = latex) {
.Object@final.path = final.path;
.Object@patterns = if (is.character(patterns)) get(patterns) else patterns;
# create temp file
cat("", file = .Object@tmp.path);
.Object
});
# <p> generic methods
report.data.frame = function(self, df = NULL, row.formatters = c(row.standardFormatter),
digits = NA, caption = "", na.value = "-", subHeadings = NULL, ignoreRowNames = F, verbose = T) {
patterns = self@patterns;
s = report.data.frame.toString(df, row.formatters , digits, caption, na.value,
subHeadings, ignoreRowNames, patterns);
cat(s, file = self@tmp.path, append = T);
if (verbose) cat(s);
self
}
report.newpage = function(self) {
cat(self@patterns$newpage, file = self@tmp.path, append = T);
}
report.newsection = function(self, name) {
cat(
mergeDictToString(list(SECTION_NAME = name), self@patterns$section),
file = self@tmp.path, append = T
);
}
report.newsubsection = function(self, name) {
cat(
mergeDictToString(list(SECTION_NAME = name), self@patterns$subsection),
file = self@tmp.path, append = T
);
}
report.paragraph = function(self, text) {
cat(
mergeDictToString(list(PARAGRAPH_TEXT = text), self@patterns$paragraph),
file = self@tmp.path, append = T
);
}
report.finalize = finalize = function(self) {
cmd = sprintf("cp \"%s\" \"%s\"", self@tmp.path, absolutePath(self@final.path));
System(cmd);
}
report.finalizeAsDocument = function(self) {
# <p> read document to string
doc = readFile(self@tmp.path);
# <p> write processed document
sp = splitPath(self@tmp.path);
writeFile(sprintf("%s.tex", sp$fullbase),
mergeDictToString(list(
HEADER = self@patterns$docHeader, DOC_HERE = readFile(self@tmp.path)
), self@patterns$document)
);
cmd = mergeDictToString(
list(
TMP_DIR = sp$dir,
TMP_FILE = sp$path,
TMP_FILE_BASE = sp$fullbase,
OUTPUT_FILE = absolutePath(self@final.path)
)
, self@patterns$docCmd)
System(cmd);
}
#
# <p> end Rreporter (base on S4 methods)
#
#
# <p> convenience methods
#
reportDataFrame2pdf = function(df, file = tempfile(), row.formatters = c(row.standardFormatter),
digits = NA, caption = "", na.value = "-", subHeadings = NULL, ignoreRowNames = F, verbose = T) {
r = new("Rreporter", final.path = file);
report.data.frame(r, df,
row.formatters, digits, caption, na.value, subHeadings, ignoreRowNames, verbose);
report.finalizeAsDocument(r);
}
#
# <p> sweave
#
swaeveIt = function(file = NULL, N = 1) {
System(sprintf("R CMD Sweave '%s.Rnw'", file));
cmd = sprintf("sh -c 'pdflatex \"./%s\"'", file);
for (i in 1:N) System(cmd);
}
#
# <p> Sweave replacement
#
.REP.standardTemplate = '\\input{SETUP}
\\begin{document}
TEMPLATE_MAIN
\\end{document}
';
# REP.plot('Tag', Qplot(rate, geom = 'histogram', xlab = 'heterocygosity', file = 'dest'));
# REP.plot('Tag', Qplot(sample = ps, dist = qunif, file = 'results/qc-markers-hweQQ.jpg'));
Qplot_defaults = list(
width = 5, height = 5, dpi = 150,
dimx = c(0, 1), dimy = c(0, 100)
);
Qplot = function(..., file = NULL, pp = Qplot_defaults) {
pp = merge.lists(Qplot_defaults, pp);
args = list(...);
geom = firstDef(args$geom, 'default');
# <b> workaround for QQ-plot instead of the expected qplot(...)
p = if (any(class(args[[1]]) == 'ggplot')) {
args[[1]]
} else if (
# histogram
(all(is.na(args[[1]])) && geom == 'histogram')
# xy-plot
|| (all(is.na(args[[1]]) | is.na(args[[2]])))) {
ggplot(data = data.frame()) + geom_point() +
xlim(pp$dimx[1], pp$dimx[2]) +
ylim(pp$dimy[1], pp$dimx[2]);
} else do.call(qplot, list(...));
ggsave(p, file = file, width = pp$width, height = pp$height, dpi = pp$dpi);
file
}
GGplot = function(p, file = NULL, pp = list(width = 5, height = 5, dpi = 150)) {
ggsave(p, file = file, width = pp$width, height = pp$height, dpi = pp$dpi, encoding = 'AdobeStd');
file
}
PlotDefaults = list(
pdf = list(width = 6, height = 6),
jpeg = list(width = 2048, height = 2048)
);
Plot = function(..., file = NULL, .plotType = 'pdf', o = NULL, f = NULL) {
if (is.null(file)) file = tempFileName('reporter', .plotType);
device = get(.plotType);
plotFunction = firstDef(f, plot);
o = merge.lists(PlotDefaults[[.plotType]], o);
do.call(device, c(list(file = file), o));
do.call(plotFunction, list(...));
dev.off();
file
}
.REP.extractFromTemplates = function(templates, re = '(?s)(?<=TEMPLATE_BEGIN).*?(?=TEMPLATE_END)',
locations = c('.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
nst = names(templates);
# <p> set empty template names
if (is.null(nst)) nst = rep('', length(templates));
nst[nst == ''] = paste('TEMPL_', 1:sum(nst == ''), sep = '');
# <p> parse template definitions
ts = lapply(1:length(templates), function(i) {
# raw read templates
templ = readFile(templates[[i]], prefixes = locations);
tsRaw = fetchRegexpr(re, templ);
# inline templates
r = if (length(tsRaw) != 0) {
ns = sapplyn(tsRaw, function(e)fetchRegexpr('(?<=^:).*?(?=\\n)', e, globally = F));
# colon, new-line
ts = sapply(1:length(ns), function(i)substr(tsRaw[i], nchar(ns[i]) + 3, nchar(tsRaw[i])));
listKeyValue(ns, ts);
} else {
listKeyValue(nst[i], templ);
}
r
});
#r = unlist.n(ts, 1);
r = merge.lists(ts, listOfLists = T);
r
}
.REP.getTemplates = function(templates, locations = c('.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
nst = names(templates);
# <p> set empty template names
if (is.null(nst)) nst = rep('', length(templates));
nst[nst == ''] = paste('TEMPL_', 1:sum(nst == ''), sep = '');
# <p> parse template definitions
ts = lapply(1:length(templates), function(i) {
# raw read templates
templ = readFile(templates[[i]], prefixes = locations);
tsRaw = fetchRegexpr('(?s)(?<=TEMPLATE_BEGIN).*?(?=TEMPLATE_END)', templ);
# inline templates
r = if (length(tsRaw) != 0) {
ns = sapplyn(tsRaw, function(e)fetchRegexpr('(?<=^:).*?(?=\\n)', e, globally = F));
# colon, new-line
ts = sapply(1:length(ns), function(i)substr(tsRaw[i], nchar(ns[i]) + 3, nchar(tsRaw[i])));
listKeyValue(ns, ts);
} else {
listKeyValue(nst[i], templ);
}
r
});
#r = unlist.n(ts, 1);
r = merge.lists(ts, listOfLists = T);
# backward compatibility: determine wether default template should be used
if (length(r) > 0) {
if (names(r)[1] != 'TEMPL_1') { # expect full document template tb specified otherwise
# interpolate first template into standard template
r[[1]] = mergeDictToString(list(TEMPLATE_MAIN = r[[1]]), .REP.standardTemplate);
}
}
r
}
.REP.getPatterns = function(templates) {
.REP.extractFromTemplates(templates, '(?s)(?<=KEY_BEGIN).*?(?=KEY_END)');
}
.REP.defaultParameters = list(
copy.files = 'setup.tex',
setup = 'setup.tex',
latex = 'pdflatex',
useDefaultTemplate = T
);
# create new, global reporter
REP.new = function(templates = NULL, cache = NULL, parameters = list(), resetCache = F,
latex = 'pdflatex', setup = 'setup.tex') {
copy.files = merge.lists(.REP.defaultParameters['copy.files'], list(copy.files = setup), concat = TRUE);
parameters = merge.lists(.REP.defaultParameters,
parameters,
list(latex = latex, setup = setup),
copy.files,
concat = FALSE);
if (!is.null(cache) && file.exists(cache) && !resetCache) {
REP.tex('SETUP', setup);
REP.setParameters(parameters);
load(file = cache, envir = .GlobalEnv);
} else {
templatePathes = c(as.list(templates), parameters$subTemplates);
ts = .REP.getTemplates(templatePathes);
ps = merge.lists(
list(SETUP = setup),
.REP.getPatterns(templatePathes)
);
mainPath = splitPath(as.vector(templates)[1]);
assign('.REPORTER.ITEMS', list(
# list of named templates
templates = ts,
# patterns to be interpolated
patterns = ps,
# housekeeping: tags for consecutively reported subtemplates
templateTags = list(),
# parameters passed in
parameters = parameters,
# path to the cache file
cache = cache,
# create default output name
output = sprintf('%s.pdf', mainPath$fullbase),
# name of the template to be used for the global, final document
mainTemplate = names(ts)[1],
templatePathes = templatePathes,
# conditionals
conditionals = list()
), pos = .GlobalEnv
);
}
NULL
}
REP.refreshTemplates = function(templates) {
if (!exists('.REPORTER.ITEMS')) return();
templatePathes = templates;
ts = .REP.getTemplates(as.list(templates));
ps = .REP.getPatterns(templatePathes);
.REPORTER.ITEMS$templates = ts;
.REPORTER.ITEMS$mainTemplate = names(ts)[1];
.REPORTER.ITEMS$templatePathes = templatePathes;
.REPORTER.ITEMS$patterns = merge.lists(.REPORTER.ITEMS$patterns, ps);
assign('.REPORTER.ITEMS', .REPORTER.ITEMS, pos = .GlobalEnv);
REP.save();
}
REP.save = function() {
if (!is.null(.REPORTER.ITEMS$cache)) {
dir = splitPath(.REPORTER.ITEMS$cache)$dir;
if (!file.exists(dir)) dir.create(dir, recursive = T);
save(.REPORTER.ITEMS, file = .REPORTER.ITEMS$cache);
}
NULL
}
REP.setParameters = function(parameters = .REP.defaultParameters) {
.REPORTER.ITEMS$parameters = merge.lists(.REP.defaultParameters, parameters);
assign('.REPORTER.ITEMS', .REPORTER.ITEMS, pos = .GlobalEnv);
REP.save();
}
REP.unreport = function(keys) {
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
idcs = which.indeces(keys, names(l$patterns));
if (!length(idcs)) return(NULL);
l$patterns = l$patterns[-idcs];
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
setREPentry = function(key, value) {
if (!exists('.REPORTER.ITEMS')) assign('.REPORTER.ITEMS', list(), pos = .GlobalEnv);
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
l$patterns[[key]] = value;
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
setRI = function(ri)assign('.REPORTER.ITEMS', ri, pos = .GlobalEnv);
REP.setConditional = function(name, v) {
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
if (is.null(l$conditionals)) l$conditionals = list();
l$conditionals[[name]] = v;
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
outputOf = function(code, print = T, envir = parent.frame()) {
tempFile = tempFileName('reporter', inRtmp = T);
sink(tempFile);
if (print) print(eval(code, envir = envir)) else eval(code, envir = envir);
sink();
output = readFile(tempFile);
output
}
expression2str = function(exp, removeBraces = T) {
strs = deparse(exp);
if (removeBraces) strs = strs[2:(length(strs) - 1)];
sprintf("%s\n", join(strs, "\n"))
}
codeRepresentation = function(code) {
if (is.character(code)) {
codeExp = parse(text = code);
codeText = gsub('^\n?(.*)', '\\1', code); # remove leading \n
} else {
codeExp = code;
codeText = expression2str(code);
}
r = list(code = codeExp, text = codeText);
r
}
REP.format.sci = function(s, digits = 1) {
e = floor(log10(as.numeric(s)));
m = as.numeric(s) * 10^(-e);
if (round(m, digits) == 1) {
sprintf("$10^{%d}$", e)
} else {
sprintf("$%.*f \\times 10^{%d}$", digits, m, e)
}
}
REP.formats = list(
small = function(s)sprintf("{\n\\small %s\n}", s),
tiny = function(s)sprintf("{\n\\tiny %s\n}", s),
percent = function(s)sprintf("%.1f", 100 * as.numeric(s)),
`.1` = function(s)sprintf("%.1f", as.numeric(s)),
`.2` = function(s)sprintf("%.2f", as.numeric(s)),
`.3` = function(s)sprintf("%.3f", as.numeric(s)),
`.4` = function(s)sprintf("%.4f", as.numeric(s)),
sci0 = function(s) REP.format.sci(s, 0),
sci1 = function(s) REP.format.sci(s, 1),
sci2 = function(s) REP.format.sci(s, 2),
file = function(f) {
ri = .REPORTER.ITEMS;
# due to caching choose a persistent location <!> uniqueness
tdir = sprintf('/tmp/%s/Rpreporting/%s', Sys.getenv('USER'), names(ri$templates)[1]);
if (!file.exists(tdir)) dir.create(tdir, recursive = T);
tf = sprintf('%s/%s', tdir, splitPath(f)$file);
unlink(tf); # overwrite previous version
# <!> expect relative filename, spaces in file name not eliminated
file.symlink(sprintf('%s/%s', getwd(), f), tf);
tf
}
);
REP.tex = function(name, str, print = T, quote = F, fmt = NULL) {
if (!is.null(fmt) && !is.na(fmt)) {
str = if (is.null(REP.formats[[fmt]])) sprintf(fmt, str) else REP.formats[[fmt]](str);
}
if (quote) { #<i> use backend quoting
#str = gsub('_', '\\\\_', str, perl = T); # replace _
str = latex$quote(str);
}
setREPentry(sprintf('%s', name), str);
str
}
REP.texq = function(name, str, print = T, quote = T, fmt = NULL)REP.tex(name, str, print, quote, fmt)
REP.vector = function(name, v, print = T, quote = T, typewriter = T, sep = ', ', max = 50) {
if (max > 0) v = v[1:min(max, length(v))];
if (typewriter) {
v = sapply(v, function(s)sprintf('\\texttt{%s}', s));
}
REP.tex(name, sprintf('%s%s', join(v, sep), ifelse(length(v) > max, '...', '')), quote = quote);
}
REP = function(name, code, print = T, execute = T, envir = parent.frame()) {
c = codeRepresentation(as.list(sys.call())[[3]]);
setREPentry(sprintf('%s_code', name), c$text);
if (execute) {
output = outputOf(c$code, envir = envir);
setREPentry(sprintf('%s_out', name), output);
if (print) cat(output);
}
NULL
}
REP.plotDefaultOptions = list(width = 5, height = 5, dpi = 150);
REP.plot = function(name, code, ..., file = NULL, type = 'pdf', envir = parent.frame(),
options = list(), copyToTmp = F) {
#c = codeRepresentation(as.list(sys.call())[[3]]);
c = codeRepresentation(sys.call()[[3]]); # as of version R 3.0.1
if (is.null(file)) file = tempFileName('reporter', 'pdf', inRtmp = T);
if (type == 'ggplot') {
o = merge.lists(REP.plotDefaultOptions, options, list(...));
with(o, { ggsave(code, file = file, width = width, height = height, dpi = dpi) });
} else if (is.character(code)) {
file = code;
} else {
device = get(type);
device(file, ...);
eval(c$code, envir = envir);
dev.off();
}
pathToFile = path.absolute(file);
if (copyToTmp) {
fileTmp = tempFileName('reporter', splitPath(pathToFile)$ext, inRtmp = T);
file.copy(pathToFile, fileTmp, overwrite = T);
pathToFile = fileTmp;
}
if (file.info(pathToFile)$size == 0) {
pathToFile = '';
}
setREPentry(sprintf('%s_plot', name), pathToFile);
setREPentry(sprintf('%s_code', name), c$text);
NULL
}
# tag allows to search for overloading templates (_tag). This can be used in reportSubTemplate to
# conditionally report templates
.REP.interpolateTemplate = function(templName, conditionals = list(), tag = NULL) {
ri = .REPORTER.ITEMS;
if (!is.null(tag) && !is.null(ri$templates[[sprintf('%s_%s', templName, tag)]]))
templName = sprintf('%s_%s', templName, tag);
s = ri$templates[[templName]]
#s = readFile(tpath);
s = mergeDictToString(.REPORTER.ITEMS$patterns, s, iterative = T);
lengths = sapply(names(conditionals), nchar);
for (n in names(conditionals)[rev(order(lengths))]) {
s = gsub(sprintf('IF_%s(.*?)END_IF', n), if (conditionals[[n]]) '\\1' else '', s);
}
s
}
# initialize a series of reportSubTemplate calls followed by a finalizeSubTemplate call
REP.reportSubTemplateInitialize = function(subTemplate) {
patterns = .REPORTER.ITEMS$patterns;
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
REP.unreport(subPatterns);
}
REP.reportSubTemplate = function(subTemplate, tag = NULL, conditionals = list()) {
ri = .REPORTER.ITEMS;
# tag
if (is.null(tag)) {
tt = ri$templateTags;
tag = ri$templateTags[[subTemplate]] =
ifelse (is.null(tt[[subTemplate]]), 0, tt[[subTemplate]]) + 1;
setRI(ri);
}
# finalize subTemplates
patterns = ri$patterns;
subPattern = sprintf('TEMPLATE:%s_%s', subTemplate, as.character(tag));
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
# set own entry
setREPentry(subPattern, .REP.interpolateTemplate(subTemplate, tag = tag));
# collect all subTemplates
# for (st in names(ri$parameters$subTemplates)) {
# i = which.indeces(sprintf('TEMPLATE:%s_.*', st), names(.REPORTER.ITEMS$patterns), regex = T);
# setREPentry(sprintf('TEMPLATE:%s:subTemplates', st), join(unlist(names(patterns[i])), "\n"));
# }
#i = which.indeces(sprintf('TEMPLATE:%s_.*', subTemplate), names(.REPORTER.ITEMS$patterns), regex = T);
# append new element
setREPentry(subPatterns, join(c(patterns[[subPatterns]], subPattern), "\n"));
REP.save();
}
REP.finalizeSubTemplate = function(subTemplate) {
# finalize subTemplates
patterns = .REPORTER.ITEMS$patterns;
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
text = mergeDictToString(patterns, patterns[[subPatterns]], iterative = T);
setREPentry(sprintf('TEMPLATE:%s', subTemplate), text);
# remove trail
if (is.null(subPatterns)) return(NULL);
subPattern = splitString("\n", .REPORTER.ITEMS$patterns[[subPatterns]]);
#print(c(subPatterns, subPattern));
REP.unreport(c(subPatterns, subPattern));
REP.save();
}
REP.finalize = function(conditionals = list(), verbose = FALSE, cycles = 1, output = NULL) {
# <p> vars
ri = .REPORTER.ITEMS;
# <p> prepare
dir = tempFileName('rreporter', inRtmp = T);
file.remove(dir);
dir.create(dir);
# <!> assume relative pathes
for (cpath in .REPORTER.ITEMS$parameters$copy.files) {
if (splitPath(cpath)$isAbsolute) {
dest = sprintf('%s/%s', dir, splitPath(cpath)$file);
Log(sprintf('Reporting: symlinking %s -> %s', cpath, dest), 4);
file.symlink(cpath, dest);
} else {
for (sdir in c('', getwd(), sapply(ri$templatePathes, function(tp)splitPath(tp)$dir))) {
source = sprintf('%s/%s/%s', getwd(), sdir, cpath);
Log(sprintf('Reporting: dir %s', sdir), 4);
if (file.exists(source)) {
dest = sprintf('%s/%s', dir, cpath);
Log(sprintf('Reporting: symlinking %s -> %s', source, dest), 4);
file.symlink(source, dest);
break;
}
}
}
}
# <p> create final document
tn = names(ri$templates)[1];
allConditionals = merge.lists(ri$conditionals, conditionals);
s = .REP.interpolateTemplate(ri$mainTemplate, allConditionals);
# <p> run latex to produce temp file
tmpPath = sprintf('%s/%s.tex', dir, tn);
writeFile(tmpPath, s);
Log(readFile(tmpPath), 5)
latexCmd = firstDef(ri$parameters$latex, 'pdflatex');
for (i in 1:cycles) {
r = System(Sprintf('cd %{dir}s ; %{latexCmd}s -interaction=nonstopmode \"%{tn}s\"'),
4, return.output = T);
if (r$error > 0) Log(Sprintf("%{latexCmd}s exited with error."), 1);
if (r$error > 0 || (verbose && i == 1)) Log(r$output, 1);
#if (r$error > 0) break;
}
# <p> output
postfix = join(names(conditionals[unlist(conditionals)]), '-');
if (postfix != '') postfix = sprintf('-%s', postfix);
#fileOut = sprintf('%s%s%s.pdf', splitPath(tpath)$base, if (postfix == '') '' else '-', postfix);
#fileOut = sprintf('%s%s%s.pdf', tn, if (postfix == '') '' else '-', postfix);
if (is.null(output))
output = if (exists('.globalOutput'))
.fn(sprintf('%s%s', splitPath(ri$output)$base, postfix), 'pdf') else ri$output;
Log(sprintf('Writing to output %s', output), 4);
file.copy(sprintf('%s.pdf', splitPath(tmpPath)$fullbase), output, overwrite = T);
file.copy(sprintf('%s.tex', splitPath(tmpPath)$fullbase),
sprintf('%s.tex', splitPath(output)$fullbase), overwrite = T);
}
#
# <p> helpers
#
REP.reportFigureTable = function(nameTag, namesPlots, cols = 2, captions = NULL) {
namesPlots = sapply(namesPlots, function(p) {
path = if ('ggplot' %in% class(p)) {
path = tempfile(fileext = '.pdf');
ggsave(path, plot = p);
path
} else p;
path
});
figureTable = report.figure.table(namesPlots, cols = cols, captions = captions);
REP.tex(nameTag, figureTable);
}
#
# Example code
#
# # refresh only valid after a REP.new call
# REP.refreshTemplates('gwas/reportGwas.tex')
# REP.new(
# 'gwas/reportGwas.tex',
# cache = sprintf('%s/reportGWAS_cache', outputDir),
# resetCache = resetCache
# );
# # reporting
# REP.tex('G:DESCRIPTION', firstDef(o$studyDescription, ''));
# REP.tex('G:ROUNDNAME', firstDef(o$runName, 'unnamed'));
# REP.finalize(verbose = T, output = sprintf('%s/reportGwas-%s.pdf', outputDir, o$runName), cycles = 3);
# # reporting patterns
# REP.tex('ASS:TABLE', report.data.frame.toString(
# psTop,
# digits = c(rep(NA, length(varsMap)), '#2', rep(2, length(Evars)), '#2', 2),
# names.as = rep.names, quoteHeader = F,
# caption = caption
# ), fmt = 'tiny');
# REP.tex('ASS:QQ:INFLATION', inflation, fmt = '.2');
# REP.plot('ASS:QQ:ASSOCIATION', Qplot(sample = ps$P, dist = qunif,
# file = sprintf('%s/ass-QQ-%s.jpg', outputDir, tag2fn(tag))));
# REP.tex('QC:SAMPLE:MDS:Outlier', fraction(qcMdsOutliers), fmt = 'percent');
#
# # sub-templates
# REP.reportSubTemplateInitialize('association');
# for (m in expandedModels$models) with(m, {
# REP.tex('ABC', 2);
# REP.reportSubTemplate('association', tag);
# });
# REP.finalizeSubTemplate('association');
#
# Rfunctions.R
#Tue 14 Aug 2007 01:39:42 PM CEST
#
# <§> abstract data functions
#
inverse = function(f, interval = c(-Inf, Inf)) {
Vectorize(
function(y, ...) {
optimize(function(x, ...){ (y - f(x, ...))^2 }, interval = interval, ...)$minimum
}
)
}
#
# <p> meta functions
#
callWithArgs = function(fctName, args) {
#arguments = paste(sapply(names(args), function(n)sprintf("%s = %s", n, args[[n]])), collapse = ", ");
fhead = sprintf("%s(%s)", fctName, paste(names(args), collapse = ", "));
eval(parse(text = fhead))
}
.do.call = function(f, args, restrictArgs = T) {
if (restrictArgs) {
fargs = names(as.list(args(f)));
fargs = fargs[fargs != ''];
if (all(fargs != '...')) args = args[which.indeces(fargs, names(args))];
}
do.call(f, args)
}
#
# <p> benchmarking
#
benchmark.timed = function(.f, ..., N__ = 1e1) {
t0 = Sys.time();
for (i in 1:N__) {
r = .f(...);
}
t1 = Sys.time();
r = list(time = (t1 - t0)/N__, lastResult = r, t0 = t0, t1 = t1);
print(r$time);
print(r$t0);
print(r$t1);
r
}
#
# Rstatistic.R
#Fri 19 Jan 2007 11:06:44 PM CET
# contains simple statistics to evaluate consulting questions
sizesDesc = function(s) {
col.frame(list(
mean = mean(s),
median = median(s),
stddev = sqrt(var(s)),
quantiles = quantile(s)
), do.paste = " ", digits = 1)
}
compareSamples = function(l) {
desc = data.frame(lapply(l, function(e)sizesDesc(e)));
print(desc);
tests = col.frame(list(
test.t = t.test(l[[1]], l[[2]])$p.value,
test.wilcoxon = wilcox.test(l[[1]], l[[2]])$p.value
));
print(tests);
}
df2numeric = function(df) apply(df, 2, function(col)as.numeric(as.vector(col)));
expandCounts = function(tab) unlist(apply(tab, 1, function(r){rep(r[1], r[2])}));
chisq.test.robust = function(tab, bootstrapCellCount = 5, B = 5e3) {
# reduce table by removing 0-marginals and check for degeneration
tab = tab[, !apply(tab, 2, function(c)all(c == 0))];
if (is.vector(tab)) return(list(p.value = NA));
tab = tab[!apply(tab, 1, function(r)all(r == 0)), ];
if (is.vector(tab)) return(list(p.value = NA));
# determine whether to bootstrap
r = if (any(tab < bootstrapCellCount))
chisq.test(tab, simulate.p.value = T, B = B) else
chisq.test(tab);
r
}
# depends on coin package <!>, unfinished
armitage.test.robust = function(formula, df, scores) {
tab = table(df);
# only eliminate 0-rows of table from score vector
zRows = sapply(1:dim(tab)[1], function(i){ all(tab[i,] == 0) });
scores[[1]] = scores[[1]][!zRows];
r = independence_test(formula, df, teststat = "quad", scores = scores);
r
}
# simulations in project 2014-02-Microsatellites
logSumExpRaw = function(v, pivot = median(v))(log(sum(exp(v - pivot))) + pivot)
logSumExpPivot = logSumExpMax = function(v)logSumExpRaw(v, pivot = max(v))
logSumExp = function(x) {
Imx = which.max(x);
log1p(sum(exp(x[-Imx] - x[Imx]))) + x[Imx]
}
# rejFrac = function(x, alpha = 0.05) {
# x = na.omit(x);
# f = count(x <= alpha) / length(x);
# f
# }
rejFrac = function(x, alpha = 0.05)mean(x <= alpha, na.rm = T);
vector.std = function(v, C = 1)(C * v / sum(v));
vector.std.log = function(v, C = 0)(v - (logSumExp(v) - C));
#
# <p> ml methods
#
lhWrapperFunctions = c("initialize",
"parsScale", "parsMap", "parsMapInv", "parsStart", "parsNames", "lh", "null2alt", "alt2null"
);
# <!> transition to S4-objects
lhGetWrapper = function(prefix, self, ...) {
createNullWrapper = F;
f = list();
if (substr(prefix, nchar(prefix) - 3, nchar(prefix)) == "null") {
createNullWrapper = T;
prefix = substr(prefix, 1, nchar(prefix) - 5);
}
for (n in lhWrapperFunctions) {
f[[n]] = mget(sprintf("%s.%s", prefix, n), envir = globalenv(), ifnotfound=list(NULL))[[1]];
}
f$self = if (is.null(self)) { if (is.null(f$initialize)) list(...) else f$initialize(...) } else self;
if (createNullWrapper) {
f1 = f;
self = f1$self = f$self;
f1$parsStart = function(self){ f$alt2null(self, f$parsStart(self)) };
f1$parsScale = function(self){ f$alt2null(self, f$parsScale(self)) };
f1$parsMap = function(self, p){ f$alt2null(self, f$parsMap(self, f$null2alt(self, p))) };
f1$parsMapInv = function(self, p){ f$alt2null(self, f$parsMapInv(self, f$null2alt(self, p))) };
f1$lh = function(self){ lhRaw = f$lh(self); function(p)lhRaw(f$null2alt(self, p)) };
return(f1);
}
f
}
lhCopyWrapper = function(name, template) {
for (f in lhWrapperFunctions) {
g = mget(sprintf("%s.%s", template, f), envir = globalenv(), ifnotfound=list(NULL))[[1]];
if (!is.null(g)) eval.parent(parse(text = sprintf("%s.%s = %s.%s;", name, f, template, f)));
}
}
lhInit = function(lhWrapper) {
}
mapU = function(y){ -log(1/y - 1) }
map2U = function(z){ 1/(1 + exp(-z)) }
# one-dimensional estimation
lhMlEstimatorOD = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
if (is.null(c)) c = list(tol = .Machine$double.eps^0.25);
f = lhGetWrapper(lhWrapper, c$self, ...);
lhRaw = f$lh(f$self);
lh = function(p) { lhRaw(mapU(f$parsMap(f$self, p))) }
o = try(optimize(lh, lower = 0, upper = 1, tol = c$tol, maximum = T));
r = list(par = mapU(f$parsMap(f$self, o$maximum)), par.os = o$maximum, value = o$objective);
r
}
# multi-dimensional estimation
lhMlEstimatorMD = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
if (is.null(c)) c = list(do.sann = F, sann.cycles = 1000);
f = lhGetWrapper(lhWrapper, c$self, ...);
eps = 1e-5;
#if (!is.null(start)) { starts = matrix(start, nrow = 1); }
if (is.null(start)) start = f$parsStart(f$self);
starts = if (!is.matrix(start)) matrix(as.numeric(unlist(start)), nrow = 1) else start;
parscale = f$parsScale(f$self);
lhRaw = f$lh(f$self);
lh = function(p) { lhRaw(f$parsMap(f$self, p)) }
os = apply(starts, 1, function(s) {
s = f$parsMapInv(f$self, s);
o = try(optim(s, lh, method = "Nelder-Mead",
control = list(fnscale = -1, parscale = parscale, maxit = 1000),
));
if (class(o) == "try-error") return(NA);
if (0) { # if (o$convergence > 0 || c$do.sann) { # Nelder-Mead failed to converged
o1 = try(optim(s, lh, method = "SANN",
control = list(fnscale = -1, parscale = parscale, maxit = c$sann.cycles),
));
#if (class(o1) == "try-error") return(NA);
if (o$convergence > 0 || o1$value > o$value) o = o1;
}
o$par.os = o$par; # parameter values in optimiztation space
o$par = f$parsMap(f$self, o$par);
o
});
if (all(is.na(os))) return(NA);
vs = sapply(os, function(o){o$value});
arg.max = which.max(vs);
estimate = os[[arg.max[1]]];
fisher = list();
#if (!is.null(c$computeFisher) & c$computeFisher)
if (!is.null(c$computeFisher)) fisher = estimate.fisher(d, estimate, fisher.eps = 1e-1);
r = c(estimate, fisher);
r
}
lhMlEstimator = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
f = lhGetWrapper(lhWrapper, c$self, ...);
r = if (length(f$parsStart(f$self)) > 1) {
lhMlEstimatorMD(lhWrapper, start, c, ...);
} else if (length(f$parsStart(f$self)) == 1) {
lhMlEstimatorOD(lhWrapper, start, c, ...);
} else { # null hypothesis w/o nuisance parameters
r = f$lh(f$self)();
}
r
}
lhLRtest = function(lhWrapper = NULL, start = NULL, c = list(do.sann = F, sann.cycles = 1000), ...) {
f = lhGetWrapper(lhWrapper, NULL, c$self, ...); # f$self is likelihood object and absorbs ellipsis parameters
self = f$self;
if (is.null(start)) start = f$parsStart(self);
startNull = if (is.matrix(start))
t(apply(start, 1, function(r)f$alt2null(self, r))) else
f$alt2null(self, start);
e.null = lhMlEstimator(sprintf("%s.%s", lhWrapper, "null"), startNull, c(c, list(self = self)));
start = rbind(start, f$null2alt(self, e.null$par));
e.alt = lhMlEstimator(lhWrapper, start, c(c, list(self = self)));
# <p> calcualte degrees of freedom
st = f$parsStart(self);
df = length(st) - length(f$alt2null(self, st));
stat = 2 * (e.alt$value - e.null$value);
list(ll.null = e.null$value, ll.alt = e.alt$value,
test.stat = stat, p = 1 - pchisq(stat, df), df = df, par.null = e.null$par, par.alt = e.alt$par
)
}
#
# lh-functions based on likelihood specification
#
# Example: see dataAnalysis.R in hwe project
# Example: binomial distribution
# lhBin = function(p, k, N)dbinom(k, N, p)
# spec_lhBin = list(
# ll = "lhBin",
# alt = list(
# start = c(.5), # also specifies number of parameters
# pars = list(list(name = "rho", type = "freq"))
# ),
# null = list(
# start = c(.5), # assume same likelihood and therefore #pars from alternative
# parsFree = 0 # alternative: list free parameters or specify tail from alt
# )
# );
# r = lhMl(spec_lhBin)
#define a function
toF = function(expr, args, env = parent.frame()) {
as.function(c(args, expr), env)
}
logitI = expit = function(x, min = 0, max = 1) { (max - min)/(1 + exp(-x)) + min }
expitD = toF(D(expression((max - min)/(1 + exp(-x)) + min), 'x'), list(x = NULL, min = 0, max = 1));
logit = function(x, min = 0, max = 1) { log((x - min)/(max - x)) }
logitD = toF(D(expression(log((x - min)/(max - x))), 'x'), list(x = NULL, min = 0, max = 1));
# templates assuming X as argument, p as parameter description list
lhArgMappers = list(
freq = "expit(X)",
int = "expit(X, min, max)",
real = "X",
positive = "log1p(exp(X))"
);
lhArgMappersD = list(
freq = NULL, #D(expression(expit(x), 'x')),
int = "expit(X, min, max)",
real = "X",
positive = "log1p(exp(X))"
);
lhArgMappersI = list(
freq = "logit(X)",
int = "logit(X, min, max)",
real = "X",
positive = "log(expm1(X))"
);
lhSpecificationDefaults = list(
# embed null-parameter into alt-parameter space: variables: npars, parsFree, s (specification),
# p: input parameters
# <i>: optimization: substitute literals from start
default = list(mapper = 'c(c(ARGS_FREE), c(ARGS_BOUND))', mapperInv = 'c(ARGS_FREE)')
);
# richest: richest parametrization of the likelihood
# lhInterface: call the likelihood function with a vector (vector) or with separate arguments formula
# the paramters (inline)
lhSpecificationDefault = list(richest = 'alt', lhInterface = 'vector');
lhSpecificationInterfaces = list(
vector = 'function(p, ...) { pm = mapper(p); if (any(abs(pm) > 1e10)) return(-Inf); lf(pm, ...) }',
inline = 'function(p, ...) { pm = mapper(p); if (any(abs(pm) > 1e10)) return(-Inf); lf(ARGS_INLINE, ...) }'
);
#
# <p> logit derivatives
#simulations in 2014-07-Borstkanker/src/borstKankerExp.R
logExpit1 = function(x)log(expit(x))
logExpit = logExpit2 = function(x)-log1p(exp(-x))
logitExp1 = function(x)logit(exp(x))
logitExp = logitExp2 = function(x)-log(expm1(-x))
logExpit1m1 = function(x)log(1 - expit(x))
logExpit1m = logExpit1m2 = function(x)-log1p(exp(x))
logit1mExp1 = function(x)logit(1 - exp(x))
logit1mExp = logit1mExp2 = function(x)log(expm1(-x))
#
# <p> helper functions
#
# mappers for individual parameters
# ps: list of parameters
# mappers: mapper templates to used
# target: name of variable on which to apply
# idcs: indeces to iterate
lhMapperPars = function(ps, mappers, target = 'p', idcs = 1:length(ps)) {
maps = if (length(idcs) == 0) c() else sapply(idcs, function(i) {
p = ps[[i]];
a = gsub("X", sprintf("%s[%s]", target,
deparse(if (length(p$entries)) p$entries else i)), mappers[[p$type]]);
a = mergeDictToString(ps[[i]]$args, a);
a
});
r = paste(maps, collapse = ", ");
r
}
# <!> auto inverse mapping has to heed mapperPost time of application
# mappers map individual arguments, mapper sub-sequently maps the whole vector
lhMapperFunction = function(s, mappers, mapper) {
free = 1:s$parsFree; # idcs of free variables
bound = if(s$parsFree < s$npars) (s$parsFree + 1):s$npars else c(); # idcs of bound variables
mStr = sprintf('function(p){%s}',
mergeDictToString(list(
ARGS_FREE = lhMapperPars(s$pars, mappers, 'p', free),
ARGS_BOUND = lhMapperPars(s$pars, mappers, 'start', bound)
), mapper));
mf = with(s, eval(parse(text = mStr)));
mf
}
lhMapperFunctions = function(s) {
r = list(
mapper = lhMapperFunction(s, lhArgMappers, s$mapper),
mapperInv = lhMapperFunction(s, lhArgMappersI, s$mapperInv)
);
r
}
#' Build wrapper function around likelihood
#'
#' @param template parameter specification used as template (usually richest parametrization tb reduced
#' for other hypotheses)
lhPreparePars = function(pars, defaults = lhSpecificationDefaults$default, spec = lhSpecificationDefault,
template = pars) {
# <p> determine free parameters
t = merge.lists(defaults, pars);
npars = length(template$pars);
if (!is.null(t$parsFree)) {
t$pars = if(t$parsFree == 0) list() else template$pars[(npars - t$parsFree): npars];
}
if (is.null(t$start)) t$start = template$start;
if (is.null(t$parsFree)) t$parsFree = length(t$pars);
# <p> construct mapped likelihood function
fs = mergeDictToString(
list(ARGS_INLINE =
paste(sapply(1:npars, function(i) { sprintf("pm[%s]",
deparse(if (length(template$pars[[i]]$entries)) template$pars[[i]]$entries else i)) }
), collapse = ', ')),
lhSpecificationInterfaces[[spec$lhInterface]]
);
t = merge.lists(t, list(npars = npars));
t = merge.lists(t, lhMapperFunctions(t), list(lf = get(spec$ll)));
f = with(t, eval(parse(text = fs)));
t = merge.lists(t, list(npars = npars, lh = f));
t
}
# types: names of specifications for which to define wrapped functions
# richest: name of specification for model that includes a superset of parameters of all other types
lhPrepare = function(s, types = c('null', 'alt')) {
# <p> preparation
s = merge.lists(lhSpecificationDefault, s);
ri = s[[s$richest]];
# number of parameter groups
npars = length(ri$pars);
# number of parameters of the likelihood function
#Npar = sum(list.kp(ri$pars, 'entries', template = 1));
# <p> build wrappers
m = nlapply(types, function(type) {
defaults = merge.lists(lhSpecificationDefaults$default, lhSpecificationDefaults[[type]]);
lhPreparePars(s[[type]], defaults, s, template = ri)
});
m = merge.lists(s, m);
m
}
# <N> free parameters come first
lhFreePars = function(s, p)with(s, {
r = if (parsFree > 0) {
idcs = unlist(list.kp(s$pars[1:parsFree], 'entries'));
if (length(idcs) == 0) idcs = 1:parsFree;
p[idcs]
} else c();
r
})
# second numeric derivative of x
Dn2f = function(f, x, ..., eps = 1e-5) {
(f(x + 2*eps, ...) + f(x - 2*eps, ...) - 2*f(x, ...))/(4*eps^2)
}
..OptimizeControl = list(fnscale = -1, tol = .Machine$double.eps^0.25);
# assume unconstraint arguments
Optimize = function(p, f, method = 'BFGS', control = ..OptimizeControl, ...,
hessian = T, ci = T, alpha = 5e-2) {
r = if (length(p) > 1) {
control = .list(control, .min = 'tol');
o = optim(p, f, method = method, control = control, hessian = hessian, ...);
} else if (length(p) == 1) {
f0 = function(p, ...) { f(logit(p), ...) };
o0 = try(optimize(f0, lower = 0, upper = 1,
tol = control$tol, maximum = control$fnscale < 0, ...));
o = if (class(o0) == 'try-error') list(par = NA, value = NA, hessian = NA) else
list(par = logit(o0$maximum), value = o0$objective,
hessian = if(hessian) matrix(Dn2f(f, logit(o0$maximum), ...)/o0$objective) else NA);
} else {
o = list(par = c(), value = f(...));
}
if (ci && hessian && !is.na(r$hessian)) {
var = -1/diag(r$hessian); # assume sharp cramer-rao bound
sd = sqrt(var);
r = c(r, list(ci = list(
ciL = qnorm(alpha/2, r$par, sd, lower.tail = T),
ciU = qnorm(alpha/2, r$par, sd, lower.tail = F), level = alpha, var = var)));
}
r
}
# p: matrix of row-wise start values
OptimizeMultiStart = function(p, f, method = 'BFGS', control = ..OptimizeControl, ...) {
r = if (is.null(p)) { # special case of degenerate matrix (does not work in R)
Optimize(c(), f, method = method, control = control, ...)
} else if (!is.matrix(p)) {
Optimize(p, f, method = method, control = control, ...)
} else {
os = apply(p, 1, function(s)Optimize(s, f, method = method, control = control, ...));
# find maximum
if (all(is.na(os))) return(NA);
vs = list.key(os, 'value');
arg.max = which.max(vs);
r = os[[arg.max[1]]];
}
r
}
lhEstMLRaw = function(t, start = NULL, ..., optim_method = 'BFGS') {
if (is.null(start)) start = t$start;
for (method in optim_method) {
o = try(OptimizeMultiStart(t$mapperInv(start), t$lh, method = method, ...));
if (!('try-error' %in% class(o))) break();
}
o$par = t$mapper(o$par);
o$ci$ciL = t$mapper(o$ci$ciL);
o$ci$ciU = t$mapper(o$ci$ciU);
o
}
lhEstML = lhMl = function(s, start = NULL, type = 'alt', ..., optim_method = 'BFGS') {
# <p> mapping of parameters
s = lhPrepare(s, types = type);
lhEstMLRaw(s[[type]], start = start, ..., optim_method = optim_method)
}
lfPrepare = function(s, ...) {
lhParsOrig = list(...);
prepare = sprintf('%s%s', s$ll, c('prepare', '_prepare'));
prepareExists = min(which(sapply(prepare, exists)));
lhPars = if (prepareExists < Inf) get(prepare[prepareExists])(...) else lhParsOrig;
lhPars
}
# specification based LR-test
lhTestLR = function(s, startNull = NULL, startAlt = NULL, types = c('null', 'alt'), ...,
optim_method = 'BFGS', addTypeArg = F) {
# <p> general preparation
s = lhPrepare(s, types = types);
null = s[[types[1]]];
alt = s[[types[2]]];
# <p> specific preparation (user defined)
lhPars = lfPrepare(s, ...);
# <p> null hypothesis
if (is.null(startNull))
startNull = if(null$parsFree == 0) NULL else matrix(lhFreePars(null, null$start), nrow = 1);
lhEstMLRawArgs = c(list(t = null, start = startNull), lhPars, list(optim_method = optim_method));
if (addTypeArg) lhEstMLRawArgs = c(lhEstMLRawArgs, list(lh_type__ = 'null'));
o0 = do.call(lhEstMLRaw, lhEstMLRawArgs);
# <p> alternative hypothesis
if (is.null(startAlt)) {
# build from fit under the null
parNull = lhFreePars(null, o0$par);
startAlt = matrix(c(parNull, alt$start[(length(parNull) + 1):length(alt$start)]), nrow = 1);
}
lhEstMLRawArgs = c(list(t = alt, start = startAlt), lhPars, list(optim_method = optim_method));
if (addTypeArg) lhEstMLRawArgs = c(lhEstMLRawArgs, list(lh_type__ = 'alt'));
o1 = do.call(lhEstMLRaw, lhEstMLRawArgs);
# <p> calcualte degrees of freedom
df = length(alt$start) - length(lhFreePars(null, o0$par));
stat = 2 * (o1$value - o0$value);
r = list(ll.null = o0$value, ll.alt = o1$value,
test.stat = stat, p = 1 - pchisq(stat, df), df = df, par.null = o0$par, par.alt = o1$par,
lh.pars = lhPars, lh.pars.orig = lhParsOrig
);
r
}
#
# <p> latest iteration of LH wrapper
#
lhPrepareFormula = function(s, type, formula, data, ...) {
# <o> compute on subset of data <N> cave: missingness
X = model.matrix(model.frame(formula, data = data), data = data);
# <p> expand paramters
t = s[[type]];
ps = t$pars;
fparsI = which(list.key(ps, 'name') == 'formula');
fpars = ps[[fparsI]]; # formula pars
ps[[fparsI]] = merge.lists(ps[[fparsI]], list(name = 'beta', count = ncol(X)));
# <p> determine slots
counts = cumsum(list.key(ps, 'count'));
countsStart = pop(c(1, counts + 1));
ps = lapply(seq_along(ps), function(i)merge.lists(ps[[i]], list(entries = countsStart[i]:counts[i])));
# <p> determine start
start = avu(sapply(ps, function(p)rep(p$start, p$count)));
# <p> map pars
t$pars = ps;
t = lhPreparePars(t, spec = merge.lists(lhSpecificationDefault, s));
t$start = start;
t
}
lhMlFormula = function(s, formula, data, type = 'formula', ..., optim_method = 'BFGS') {
# <p> mapping of parameters
t = lhPrepareFormula(s, type, formula, data, ...);
# <p> extra args
lhPars = lfPrepare(s, formula = formula, data = data, ...);
# <p> call optimizer
lhEstMLRawArgs = c(list(t = t, start = s$start), lhPars, list(optim_method = optim_method));
r = try(do.call(lhEstMLRaw, lhEstMLRawArgs), silent = T);
print(r);
if (class(r) == 'try-error') r = list(par = rep(NA, length(t$start)), value = NA, convergence = 1);
r
}
#
# <p> model manipulation
#
response.is.binary = function(r) {
vs = sort(unique(r));
if (length(vs) != 2) F else all(vs == c(0, 1));
}
#
# <p> clustered data
#
#
# <p> describe relationships (genetic) given a relational (database) model
#
# given relatedness in a data frame of ids and clusterIds, return a list of clusters containing ids
# clusterRelation2list_old = function(r, idName = "id", idClusterName = "idFam", byIndex = T) {
# r = r[, c(idName, idClusterName)];
# ns = sort(unique(r[, 2]));
# # <p> build clusters
# clusters = sapply(ns, function(e)list()); # holds members of clusters
# names(clusters) = ns;
# # <!> we can iterate the list, given it is ordered lexicographically
# for (i in 1:(dim(r)[1])) {
# clN = as.character(r[i, 2]);
# clusters[[clN]] = unlist(c(clusters[[clN]], ifelse(byIndex, i, as.character(r[i, 1]))));
# }
# clusters
# }
clusterRelation2list = function(r, idName = "id", idClusterName = "idFam", byIndex = T) {
r = r[, c(idName, idClusterName)];
clusters = nlapply(sort(unique(r[[idClusterName]])), function(n) {
idcs = which(r[[idClusterName]] == n);
c = if (byIndex) idcs else r[[idName]][idcs];
c
});
clusters
}
# permute clusters of identical size and within clusters
# cluster specification as given by clusterRelation2list assuming byIndex = T
# returned permutation is relative to refIds
permuteClusters = function(cls, refIds = NULL, selectIds = NULL) {
# allow to filter ids from cluster specification
if (!is.null(selectIds)) {
cls = lapply(cls, function(cl)intersect(cl, selectIds));
cls = clusters[sapply(cls, length) > 0];
}
cSizes = sapply(cls, function(e)length(e));
# which cluster sizes are present in the data set?
sizes = unique(cSizes);
# indexable list of ids
refIds = if (is.null(refIds)) sort(unlist(cls));
# final permutation of refIds, such that refIds[perm] gives new order
perm = 1:length(refIds);
for (s in sort(sizes, decreasing = T)) { # permute cluster of same size, permute within cluster
clsS = which(cSizes == s);
p1 = sample(1:length(clsS)); # permute clusters
for (i in 1:length(clsS)) {
p2 = sample(1:s);
# <p> indeces that are to be replaced
indT = which.indeces(cls[[clsS[i]]], refIds);
# <p> indeces where the replacement comes from
indF = which.indeces(cls[[clsS[p1[i]]]][p2], refIds);
# <p> save partial permutation
perm[indT] = indF;
}
}
perm
}
# clusters is a vector with cluster ids
clustersPermute = function(cls) {
permuteClusters(clusterRelation2list(data.frame(id = 1:length(cls), idFam = cls)))
}
#
# <p> wrap model fitting for lm/glm/gee fitters
#
#library("geepack"); # <i> move to init method
regressionMethods = list(
# assume formula to contain random effect
glmr = list(
fit = function(formula, data, clusterCol = NULL, ...) {
glmer(formula, data = data, ...)
},
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chisq)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
# use cluster column <!> untested
glmrcl = list(
fit = function(formula, data, clusterCol = NULL, ...) {
f = update(formula, as.formula(Sprintf('~ . + (1|%{clusterCol}s)')));
glmer(f, data = data, ...)
},
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chisq)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
glm = list(
fit = function(formula, data, clusterCol = NULL, ...)glm(formula, data = data, ...),
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chi)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
lm = list(
fit = function(formula, data, clusterCol = NULL, ...)lm(formula, data = data, ...),
compare = function(m1, m0){
a = anova(m0$r, m1$r);
list(anova = a, m0 = m0, m1 = m1, p.value = a[["Pr(>F)"]][2],
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
gee = list(
fit = function(formula, data, clusterCol, ...) {
if (!length(formula.covariates(formula))) return(NULL);
# geeglm needs ordered clusterIds <!>
data = data[order(data[[clusterCol]]), ];
names(data)[which(names(data) == clusterCol)] = "..gee.clusters"; # hack to make geeglm work
r = geeglm(formula, data = data, id = ..gee.clusters, ...);
r
},
compare = function(m1, m0){
a = if (is.null(m0)) anova(m1$r) else anova.geeglm(m0$r, m1$r);
list(anova = a, m0 = m0, m1 = m1, p.value = a[["P(>|Chi|)"]][1],
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std.err"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std.err"]
)
}
)
);
completeRows = function(f1, data) {
vars = all.vars(as.formula(f1));
rows = apply(data[, vars, drop = F], 1, function(r)all(!is.na(r)));
r = which(rows);
r
}
# <!> clusterIds is needed as argument although just forwarded
regressionFit = function(f, data, type, ...) {
r = regressionMethods[[type]]$fit(f, data, ...);
list(type = type, r = r)
}
regressionCompare = function(m1, m0) {
r = regressionMethods[[m1$type]]$compare(m1, m0);
r
}
regressionCompareModelsRaw = function(f1, f0, data, type = "lm", clusterCol = NULL, ...) {
# <p> jointly trim data according to missing data
#rows = which(apply(data[, c(formula.vars(f1), clusterCol)], 1, function(r)all(!is.na(r))));
# more robust version
row.names(data) = NULL;
#rows = as.integer(row.names(model.frame(f1, data = data)));
# robust for random effects
rows = apply(data[, all.vars(as.formula(f1)), drop = F], 1, function(r)!any(is.na(r)));
d0 = data[rows, ];
# <p> fit and compare models
m1 = regressionFit(as.formula(f1), data = d0, type = type, clusterCol = clusterCol, ...);
m0 = regressionFit(as.formula(f0), data = d0, type = type, clusterCol = clusterCol, ...);
a = regressionCompare(m1, m0);
a
}
permuteDefault = list(
p.value = 0, sdev.rel = .3, Nchunk = 1e3,
nuisanceCovariates = NULL, .clRunLocal = T
);
# idCol: used for permutation: column specifying identiy of individuals: could be filled automatically <i>
# permute:
# sdev.rel: sdev relative to p.value to decide how often to permute
regressionCompareModels = function(f1, f0, data, type = "lm", clusterCol = NULL, ...,
permute = permuteDefault) {
permute = merge.lists(permuteDefault, permute);
r = regressionCompareModelsRaw(f1, f0, data, type, clusterCol, ...);
if (!is.null(r) && !is.null(r$p.value) && !is.na(r$p.value) && r$p.value < permute$p.value)
r = regressionCompareModelsPermuted(f1, f0, data, type, clusterCol, ..., permute = permute);
r
}
#
# <p> permuted cluster regression
#
regressionCompareModelsPermuted = function(f1, f0, data, type = "lm", clusterCol = "cluster", ...,
idCol = "id", permute = permuteDefault, Nmax = 1e5) {
# <p> data p-value
a.data = regressionCompareModelsRaw(f1, f0, data, type, clusterCol = clusterCol, ...);
p.data = a.data$p.value;
# <p> logging
Log(sprintf("Permuting Regression: %s [p = %.2e]", paste(as.character(f1), collapse = " "), p.data), 4);
# <p> permutation variables indeces
pvs = setdiff(formula.covariates(f1), permute$nuisanceCovariates);
# <p> precompute cluster data structure
cls = clusterRelation2list(data.frame(id = 1:length(data[[clusterCol]]), idFam = data[[clusterCol]]))
ps = NULL;
d0 = data;
# adaptive permutation
repeat {
ps0 = clapply(1:permute$Nchunk, function(i, f1, f0, data, type, clusterCol, cls, pvs){
d0[, pvs] = if (is.null(clusterCol)) d0[sample(1:(dim(data)[1])), pvs] else
d0[permuteClusters(cls), pvs];
r = regressionCompareModelsRaw(f1, f0, d0, type, clusterCol, ...);
r$p.value
}, f1 = f1, f0 = f0, data = data, type = type, clusterCol = clusterCol, cls = cls, pvs = pvs,
.clRunLocal = permute$.clRunLocal);
ps0 = na.exclude(as.numeric(ps0));
ps = c(ps, ps0);
#print(ps[1:100]);
p.emp = fraction(ps <= p.data);
# <p> stopping criterion
p.break = if (p.emp == 0) 1 / length(ps) else p.emp;
sdev.rel = sqrt(p.break * (1 - p.break) / length(ps)) / p.break;
#print(list(sd = sdev.rel * p.break, sd.rel = sdev.rel, p = p.emp));
if (sdev.rel <= permute$sdev.rel) break;
# <p> final stop
if (length(ps) >= Nmax) break;
};
r = list(f1 = f1, f0 = f0, p.value = p.emp, p.data = p.data, anova = a.data$anova, ps = ps);
r
}
# permute covariates in order to obtain empirical p-values
# f1: model formula alternative
# f0: model formula hypothesis
# M: number of permutations
regressionCompareModelsEmp = function(f1, f0, data, nuisanceCovariates = c(), type = "lm", M = 1e3, ...,
idName = "id", idClusterName = "cluster", .clRunLocal = T) {
r = regressionCompareModelsPermuted(f1, f0, type, ..., clusterCol = idClusterName, idCol = idName,
permute = list(Nchunk = M, nuisanceCovariates = nuisanceCovariates, .clRunLocal = .clRunLocal));
r
}
# data: data.frame
# stat: function computing test statistic
# vars: formula for permuation
# Nperm: number of permutations
# Pvalue: c('upper', 'lower', 'two.tailed')
permute = function(data, stat, vars, ..., Nperm = 5e3, Pvalue = 'lower', na.rm = T, fracBadStatThres = .01,
returnT = TRUE) {
perm.vars = all.vars(as.formula(vars));
f = function(i, ...) {
};
Ts = Sapply(0:Nperm, function(i, data, ...) {
if (i > 0) data[, perm.vars] = data[sample(nrow(data)), perm.vars];
stat(data, ...)
}, data = data, ...);
fracBadStatistics = mean(is.na(Ts[-1]));
if (is.na(Ts[1]) || fracBadStatistics >= fracBadStatThres) return(list(p.value = NA));
Ts = Ts[!is.na(Ts)];
Tdata = Ts[1];
Ts = Ts[-1];
Plower = (1 + sum(Ts <= Tdata)) / Nperm;
Pupper = (1 + sum(Ts >= Tdata)) / Nperm;
p.value = switch(Pvalue,
lower = Plower,
upper = Pupper,
two.tailed = 2 * min(Plower, Pupper)
);
r = if (returnT)
list(p.value = p.value, t.data = Tdata, t.perm = Ts) else
list(p.value = p.value, t.data = Tdata);
r
}
#
# <p> error propagation
#
# as derived from the RB project and tested therein
errProd = function(x, sdx, y, sdy, covxy = 0) {
sdp = (x * y) * sqrt((sdx/x)^2 + (sdy/y)^2 + 2 * sdx * sdy * covxy);
sdp
}
errFrac = function(x, sdx, y, sdy, covxy = 0) {
sdp = (x / y) * sqrt((sdx/x)^2 + (sdy/y)^2 - 2 * sdx * sdy * covxy);
sdp
}
errSum = function(sdx, cx = 1, sdy = 0, cy = 1, covxy = 0) {
sds = sqrt((cx *sdx)^2 + (cy * sdy)^2 + 2 * cx * cy * covxy);
sds
}
#
# <§> some general statistical transformations
#
# convert confidence interval to standard dev based on a normality assumption
ciToSd = function(ci.lo, ci.up, level = .95) {
# upper centered limit
ciU = ci.up - mean(c(ci.lo, ci.up));
span = ci.up - ci.lo;
# corresponding sd
sd = Vectorize(inverse(function(s)qnorm(1 - (1 - level)/2, 0, s), interval = c(0, span * 8)))(ciU);
sd
}
ciToP = function(ci.lo, ci.up, level = .95, one.sided = F, against = 0) {
sd = ciToSd(ci.lo, ci.up, level)
P = peSdToP((ci.lo + ci.up)/2 - against, sd, one.sided);
P
}
# convert point estimate and SD to p-value (assuming normality)
peSdToP = function(beta, sd, one.sided = F) {
pnorm(-abs(beta), 0, sd, lower.tail = T) * ifelse(one.sided, 1, 2);
}
ciFromBetaSdev = function(beta, sdev, level = .95) {
r = list(effect = beta,
lower = qnorm((1 - level)/2, beta, sdev, lower.tail = T),
upper = qnorm((1 - level)/2, beta, sdev, lower.tail = F)
);
r
}
ciFromSummary = function(s, var, level = .95) {
cs = coefficients(s)[var, ];
ciFromBetaSdev(cs[["Estimate"]], cs[["Std. Error"]], level = level);
}
pFromBetaSd = function(beta, sd, null = 0)pnorm(null, abs(beta), sd)
sdFromBetaP = function(beta, p)Vectorize(inverse(function(s)peSdToP(beta, s), interval = c(0, 10)))(p);
betaPtoCi = function(beta, p) {
sd = sdFromBetaP(beta, p);
ciFromBetaSdev(beta, sd)
}
#
# meta analysis
#
# meta analysis row-wise
metaPvalue = function(ps) {
if (!is.matrix(ps)) ps = matrix(ps, nrow = 1);
if (!all(is.numeric(ps))) ps = apply(ps, 1:2, as.numeric);
cs = apply(ps, 1, function(r)sum(-2*log(r)));
psM = pchisq(cs, 2*dim(ps)[2], lower.tail = F);
psM
}
#
# data imputation
#
Sample = function(x, ...)if (length(x) == 1)x else sample(x, ...);
mi.simple = function(data, n.imp = 20) {
r = lapply(1:n.imp, function(i) {
for (v in names(data)) {
data[is.na(data[, v]), v] = Sample(na.omit(data[, v]), count(is.na(data[, v])));
}
data
})
r
}
cross.imputer = function(imputationData, imputationVars = NULL, doExpandFactors = T) {
if (is.null(imputationVars)) imputationVars = names(imputationData);
f = function(data) {
d0 = data;
for (v in imputationVars) { # cross impute from imputationData
d0[is.na(d0[, v]), v] = Sample(na.omit(imputationData[[v]]), count(is.na(d0[, v])));
}
if (doExpandFactors) d0 = dataExpandFactors(d0)[, vars];
d0
};
f
}
imputeMeanVar = function(col) {
mn = mean(col, na.rm = T);
col[is.na(col)] = mn;
col
}
imputeMean = function(data) {
d1 = apply(data, 2, imputeMeanVar);
d1
}
#
# <p> cross validation
#
# cross validation partitions for classification data
crossValidationPartitionsClassification = function(responses, K = 15, minEls = 3, maxTries = 15) {
N = length(responses);
cats = unique(responses);
for (i in 1:maxTries) {
# random permutation
perm = sample(1:N, N);
# compute partitions
parts = splitListEls(perm, K, returnElements = T);
counts = data.frame.types(
lapply(parts, function(p)table.n(responses[-p], categories = cats)),
names = cats, do.rbind = T
);
doReject = any(apply(counts, 1, function(r)any(r < minEls)));
if (!doReject) break;
}
r = if (i < maxTries) parts else {
Log("Error: failed to find suitable cross validation partition!");
NULL
}
r
}
# cross validation parititions for clustered data
# return indeces into cluster vector (cluster identities assumed to be given by integers)
# so far do not heed cluster sizes
crossValidationPartitionsClusters = function(clusters, K = 20) {
N = length(clusters);
# unique cluster ids
cls = unique(clusters);
# random permutation
perm = Sample(cls, length(cls));
# compute partitions
parts = splitListEls(perm, K, returnElements = T);
r = lapply(parts, function(p)which.indeces(p, clusters, match.multi = T));
r
}
#
# <p> optimization
#
nested.search = function(f, ..., key = NULL, parameters = list(p1 = c(0, 10)),
steps = 3, Ngrid = 4, rscale = 1, return.grid = F, par.as.vector = F, .clRunLocal = rget('.clRunLocal')) {
ps = ps0 = parameters;
for (i in 1:steps) {
# <p> create serach grid
pars = lapply(ps, function(p)seq(p[1], p[2], length.out = Ngrid));
grid = merge.multi.list(pars);
# <p> apply function
r = clapply(1:dim(grid)[1], function(j, grid, ...) {
args = if (par.as.vector) list(as.vector(grid[j, ]), ...) else c(as.list(grid[j, ]), list(...));
do.call(f, args);
}, grid = grid, ..., .clRunLocal = .clRunLocal);
# <p> search optimum
values = if (is.null(key)) r else list.kp(r, key, do.unlist = T);
opt = which.min(values * rscale);
pt = grid[opt, ]; # optimal point in the grid search
ps = lapply(1:length(ps), function(j){
from = max(pt[j] - (ps[[j]][2] - ps[[j]][1])/Ngrid, ps0[[j]][1]);
to = min(pt[j] + (ps[[j]][2] - ps[[j]][1])/Ngrid, ps0[[j]][2]);
c(from, to)
});
names(ps) = names(ps0);
}
r = if (return.grid) list(value = values[[opt]], par = pt, grid = r) else
list(value = values[[opt]], par = pt, r = r[[opt]]);
r
}
optim.nested.defaults = list(steps = 5, Ngrid = 4, rscale = 1, return.grid = F);
optim.nested = function(par = NULL, f, ..., lower = -Inf, upper = Inf, control = list())
with(merge.lists(optim.nested.defaults, control), {
parameters = apply(cbind(lower, upper), 1, function(r)list(r));
r = nested.search(f, ..., parameters,
steps = steps, Ngrid = Ngrid, rscale = rscale, return.grid = return.grid, par.as.vector = T);
r
})
#
# <p> correlation in data
#
Df.corr = function(df, eps = 1e-2) {
N = dim(df)[2];
rc = rcorr(df);
pairs = t(sapply(which(abs(rc$r) > (1 - eps)), function(e) {
row = ((e - 1) %/% N) + 1;
col = ((e - 1) %% N) + 1;
r = c(row, col);
r
}));
pairs = pairs[pairs[, 1] < pairs[, 2], ];
clusters = sub.graph(pairs);
remove = unlist(lapply(clusters, function(e)e[-1]));
r = list(clusters = clusters, cols.remove = remove);
r
}
identity = function(e)e
seq.transf = function(from = 0, to = 1, length.out = 1e1, ..., transf = log, transfI = exp, eps = 1e-5) {
s = transfI(seq(from = transf(from + eps), to = transf(to - eps), length.out = length.out, ...));
s
}
#
# <p> bug fixes for packages
#
model_matrix_from_formula = function(f, data, offset = NULL, ignore.case = F, remove.intercept = F) {
# <p> prepare data matrices
f1 = formula.re(f, data = data, ignore.case = ignore.case);
f1vars = all.vars(f1);
response = formula.response(f1);
responseValues = if (length(response) > 0) data[[response]] else NULL;
row.names(data) = NULL;
complete = !apply(data[, f1vars, drop = F], 1, function(r)any(is.na(r)));
data = droplevels(data[complete, ]);
responseValues = responseValues[complete];
offset = if (!is.null(offset)) offset[complete] else NULL;
mm = model.matrix(f1, model.frame(f1, data = data));
if (remove.intercept) mm = mm[, !(dimnames(mm)[[2]] == '(Intercept)')];
r = list(mm = mm, response = responseValues, offset = offset, indeces = as.integer(row.names(data)));
r
}
complete_from_formula = function(f, data, offset = NULL, ignore.case = F, remove.intercept = F) {
model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept)$indeces
}
complete_from_vars = function(vars, data, offset = NULL, ignore.case = F, remove.intercept = F) {
f = as.formula(Sprintf('~ %{vars}s', vars = join(vars, ' + ')));
model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept)$indeces
}
glmnet_re = function(f, data, ..., offset = NULL, ignore.case = F, remove.intercept = F,
lambdas = NULL, cv = T) {
d = model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept);
# <p> fit model
r = if (cv) {
r0 = cv.glmnet(x = d$mm, y = d$response, lambda = lambdas, ..., offset = d$offset);
args = c(List(..., min_ = c('foldid', 'nfolds', 'grouped')),
list(x = d$mm, y = d$response, lambda = r0$lambda.min, offset = d$offset));
# list(x = d$mm, y = d$response, lambda = (3*r0$lambda.min + r0$lambda.1se)/4, offset = d$offset));
# list(x = d$mm, y = d$response, lambda = (r0$lambda.min), offset = d$offset));
do.call('glmnet', args);
} else glmnet(x = d$mm, y = d$response, lambda = lambdas, ..., offset = d$offset);
r = c(r, list(formula = f));
r
}
glmnet_re_refit = function(model, data, ..., var_cutoff = 1e-6, intercept = '1', impute = NULL) {
response = formula.response(model$formula);
if (model$df <= 1) return(list());
# <p> scrutinize model
coefs = model$beta;
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
if (!is.null(impute) && impute == 'mean') {
# <!> use model matrix <i>
d0 = sapply(varsSel, function(var) {
data[[var]][is.na(data[[var]])] = mean(data[[var]], na.rm = T);
});
data[, varsSel] = d0;
}
# <p> refit
f = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f, data = data, ...);
r0 = list(glm = glm1, score = as.vector(predict(glm1, data, type = 'link')))
r0
}
#library('glmnet');
grid.glmnet.raw = function(..., glmnet.f = cv.glmnet, max.tries = 3) {
for (i in 1:max.tries) {
fit = try(glmnet.f(...), silent = T);
if (all(class(fit) != 'try-error')) break();
}
if (any(class(fit) == 'try-error')) stop(fit[1]);
fit
}
grid.glmnet.control = list(steps = 4, Ngrid = 50, from = .01, to = .8, eps = 1e-5,
transf = identity, transfI = identity);
grid.glmnet = function(..., control = grid.glmnet.control)
with (merge.lists(grid.glmnet.control, control), {
# initialize
fit = NULL;
fromO = from;
toO = to;
options(warn = -1);
for (i in 1:steps) {
lambda = seq.transf(from, to, length.out = Ngrid + 1, eps = eps,
transf = transf, transfI = transfI);
fit = grid.glmnet.raw(..., lambda = sort(lambda, decreasing = T));
from = max(fit$lambda.min - (to - from)/Ngrid, 0);
to = fit$lambda.min + (to - from)/Ngrid;
}
options(warn = 0);
# choose lambdas to contain lambda.min also covering the range between from and to
lambda = c(
seq.transf(fromO, toO, length.out = Ngrid + 1, eps = eps,
transf = transf, transfI = transfI),
fit$lambda.min
);
fit0 = do.call('grid.glmnet.raw', c(list(...), list(lambda = sort(lambda, decreasing = T))));
args = List(..., min_ = c('nfolds', 'grouped'));
fit1 = do.call('grid.glmnet.raw', c(args, list(lambda = fit$lambda.min, glmnet.f = glmnet)));
r = fit0;
r$glmnet.fit = fit1;
r
})
# f: formula, passed through formula.re
# data: data frame
grid.glmnet.re = function(f, data, ..., offset = NULL, control = grid.glmnet.control,
ignore.case = F, remove.intercept = T)
with (merge.lists(grid.glmnet.control, control), {
# <p> prepare data matrices
f1 = formula.re(f, data = data, ignore.case = ignore.case);
f1vars = all.vars(f1);
response = formula.response(f1);
complete = !apply(data[, f1vars], 1, function(r)any(is.na(r)));
d1 = data[complete, ];
if (!is.null(offset)) offset = offset[complete];
mm = model.matrix(f1, model.frame(f1, data = d1));
if (remove.intercept) mm = mm[, !(dimnames(mm)[[2]] == '(Intercept)')];
# <p> fit model
r = grid.glmnet(x = mm, y = d1[[response]], ..., offset = offset, control = control);
r = c(r, list(formula = f1));
r
})
grid_glmnet_re_refit = function(model, data, ..., var_cutoff = 1e-6, intercept = '1', impute = NULL) {
# <p> scrutinize model
coefs = coefficients(model$glmnet.fit);
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
response = formula.response(model$formula);
if (!is.null(impute) && impute == 'mean') {
# <!> use model matrix <i>
d0 = sapply(varsSel, function(var) {
data[[var]][is.na(data[[var]])] = mean(data[[var]], na.rm = T);
});
data[, varsSel] = d0;
}
# <p> refit
f = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f, data = data, ...);
r0 = list(glm = glm1, score = as.vector(predict(glm1, data, type = 'link')))
r0
}
refitModel = function(model, f1, f0, data, ..., var_cutoff = 1e-6, ignore.case = F, intercept = '0') {
# <p> prepare formula and data set
f1 = formula.re(f1, data = data, ignore.case = ignore.case);
f0 = formula.re(f0, data = data, ignore.case = ignore.case);
f0covs = formula.covariates(f0);
f1vars = all.vars(f1);
response = formula.response(f1);
complete = complete.cases(data[, f1vars]); #!apply(data[, f1vars], 1, function(r)(any(is.na(r))));
d1 = data[complete, ];
# <p> extract data set according to model
coefs = coefficients(model);
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
varsSel0 = intersect(varsSel, f0covs);
if (!length(varsSel0)) return(
list(coefficients = coefs, anova = NA, r2 = NA, r20 = NA, raw = NA, model1 = NA, model0 = NA)
);
# <p> re-fit glm
f1 = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f1, data = d1, ...);
f0 = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel0), collapse = ' + ')));
glm0 = glm(f0, data = d1, ...);
# <p> anova
a = anova(glm0, glm1, test = 'Chisq');
# <p> R^2
mn = mean(d1[[response]]);
#mm = model.matrix(f1, model.frame(f1, data = d1));
pr = as.vector(predict(glm1, d1, type = 'response'));
#r2 = cor((pr - mn), (d1[[response]] - mn));
r2 = cor(pr, d1[[response]]);
pr0 = as.vector(predict(glm0, d1, type = 'response'));
#r20 = cor((pr0 - mn), (d1[[response]] - mn));
r20 = cor(pr0, d1[[response]]);
# <p> raw-model fit
fScore = as.formula(sprintf('y ~ score + %s', paste(c(intercept, varsSel0), collapse = ' + ')));
d2 = data.frame(
d1[, varsSel0], y = d1[[response]], score = as.vector(predict(glm1, d1))
);
if (length(varsSel0)) names(d2)[1:length(varsSel0)] = varsSel0;
raw = glm(fScore, data = d2, ...);
r = list(coefficients = coefs, anova = a, r2 = r2, r20 = r20,
raw = summary(raw), model1 = glm1, model0 = glm0);
r
}
#
# <p> crossvalidation
#
# <!> tb implemented
cv_summary_lm = function(model, pred, data, ...) {
summary(r0)$fstatistic[1]
r = mean( (pred - data)^2 );
r
}
cv_test_glm = function(model, formula, data, ...) {
response = formula.response(formula);
responseP = predict(model, data, type = 'response');
responseD = data[[response]];
ll = sum(log(responseP));
ll
}
# cv_prepare = function(data, argsFrom...)
# cv_train = function(data, argsFrom...)
# cv_test = function(model, data, argsFrom...)
# @arg cv_fold number of crossvalidation folds, denotes leave -cv_fold out if negative
crossvalidate = function(cv_train, cv_test, cv_prepare = function(data, ...)list(),
data, cv_fold = 20, cv_repeats = 1, ..., parallel = F, align_order = TRUE) {
if (cv_fold == 0) stop('crossvalidate: cv_fold must be an integer != 0');
if (!parallel) Lapply = lapply;
N = nrow(data);
r = with(cv_prepare(data = data, ...), {
Lapply(1:cv_repeats, function(i, ...) {
perm = Sample(1:N, N);
# compute partitions
fold = if (cv_fold > 0) cv_fold else as.integer(N/-cv_fold);
parts = splitListEls(perm, fold, returnElements = T);
o = order(unlist(parts));
r = Lapply(parts, function(part, cv_train, cv_test, data, cv_repeats, ...) {
d0 = data[-part, , drop = F];
d1 = data[part, , drop = F];
model = cv_train(..., data = d0);
r = cv_test(model = model, ..., data = d1);
gc();
r
}, cv_train = cv_train, cv_test = cv_test,
data = data, cv_repeats = cv_repeats, ...);
# re-establish order
r = if (align_order
&& all(sapply(r, class) %in% c('numeric', 'integer'))
&& all(sapply(r, length) == 1)) {
unlist(r)[o];
} else if (align_order && all(sapply(r, class) == 'data.frame') &&
sum(sapply(r, nrow)) == nrow(data)) {
#<!> untested
#r = rbindDataFrames(r, colsFromFirstDf = T);
r = do.call(rbind, r);
r[o, ]
} else if (align_order) stop("Crossvalidate: didn't know how to align order.") else r;
gc();
r
}, ...)});
r
}
#
# <p> data standardization
#
standardize = function(v)(v / sd(v));
df2z = function(data, vars = names(as.data.frame(data))) {
data = as.data.frame(data);
df = data.frame.types(sapply(vars, function(v) {
(data[[v]] - mean(data[[v]], na.rm = T)) / sd(data[[v]], na.rm = T)
}), do.transpose = F);
i = which.indeces(vars, names(data));
d0 = data.frame(data[, -i], df);
d0
}
lumpFactor = function(factor, minFreq = NULL, minN = 20, levelPrefix = 'l') {
# <p> preparation
f0 = as.factor(factor);
t0 = table(f0);
ls = levels(f0);
N = length(f0);
if (!is.null(minFreq)) minN = as.integer(minFreq * N + 0.5);
# <p> lumping
map = listKeyValue(ls, ls);
for (i in 1:length(t0)) {
t0 = table(factor);
if (all(t0 >= minN) || length(t0) < 2) break;
# combine two smallest groups
t1 = sort(t0);
newLevel = sprintf('%s%d', levelPrefix, i);
factor = as.character(factor);
factor[factor == names(t1)[1] | factor == names(t1)[2]] = newLevel;
map[[names(t1)[1]]] = map[[names(t1)[2]]] = newLevel;
map[[newLevel]] = newLevel;
}
# <p> normalize map
lsNew = as.character(ls);
repeat {
lsNew0 = lsNew;
lsNew = as.character(map[lsNew]);
if (all(lsNew == lsNew0)) break;
}
return(list(map = listKeyValue(ls, lsNew), factor = factor));
}
# lump a variable after checking other variables for non-missingness
lumpVariableOnVariables = function(data, var, vars, postfix = '_lump', minN = 20) {
# prepare confounder afkomst
lump = sapply(vars, function(v) {
dvar = data[[var]][!is.na(data[[v]])];
lump = lumpFactor(dvar, minN = minN);
dvarNew = as.character(lump$map[as.factor(data[[var]])]);
dvarNew[dvarNew == 'NULL'] = NA;
as.factor(dvarNew)
});
d = data.frame(lump);
names(d) = paste(var, paste(vars, postfix, sep = ''), sep = '_');
d
}
#
# <p> descriptive
#
compareVectors = function(l) {
sets = names(l);
# marginals
r0 = nlapply(sets, function(n)c(n, length(l[[n]])));
r1 = nlapply(sets, function(n)c(sprintf('%s-unique', n), length(unique(l[[n]]))));
r2 = nlapply(sets, function(n)c(sprintf("%s-NA", n), sum(is.na(l[[n]]))));
modelList = list(A = sets, B = sets);
r3 = iterateModels(modelList, .constraint = function(A, B)(A < B), function(i, A, B) {
r = list(
c(sprintf("%s inter %s", A, B), length(intersect(l[[A]], l[[B]]))),
c(sprintf("%s union %s", A, B), length(union(l[[A]], l[[B]]))),
c(sprintf("%s min %s", A, B), length(setdiff(l[[A]], l[[B]]))),
c(sprintf("%s min %s", B, A), length(setdiff(l[[B]], l[[A]])))
);
r
}, lapply__ = lapply)$results;
r = c(r0, r1, r2, unlist.n(r3, 1));
r = data.frame.types(r, do.rbind = T, names = c('type', 'count'));
r
}
pairs_std.panel.hist <- function(x, ...) {
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "grey", ...)
}
pairs_std.panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) {
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
c0 = cor.test(x, y);
txt <- paste0(prefix,
sprintf("Cor: %.2f (%.2f, %.2f)", c0$estimate, c0$conf.int[1], c0$conf.int[2]), "\n",
sprintf("P-value: %.2e", c0$p.value)
)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * 1) # used tb cex.cor * r
}
pairs_std = function(...) {
pairs(..., diag.panel = pairs_std.panel.hist, upper.panel = pairs_std.panel.cor)
}
#
# <p> omics data
#
quantileData = function(d, p) {
dS = sort(d);
q = dS[ceiling(p * length(dS))];
q
}
quantileReference = function(reference, direction = 2, center = TRUE) {
if (is.matrix(reference) && center) {
refC = matrixCenter(reference, direction);
reference = matrixDeCenter(refC$matrix, mean(refC$center), direction);
}
ref = na.omit(as.vector(as.matrix(reference)));
ref
}
Skewness = function(x, na.rm = T) {
x0 = if(na.rm) na.omit(x) else x;
N = length(x0);
x1 = x0 - mean(x0)
y = sqrt(N) * sum(x1^3) / (sum(x1^2)^(3/2))
s = y * ((1 - 1/N))^(3/2);
s
}
Noutliers = function(x, coef = 1.5)length(boxplot.stats(x, coef = coef)$out)
#' Quantile normalization of frame/matrix with respect to reference distribution
#'
#' Distribution to be normalized are represented as columns or rows of a matrix/data frame.
#' Each value is replaced by the quantile of the reference distribution as given by the value of the
#' empirical distribution function of the given value.
#'
#' @param reference numeric vector with realizations from the target distribution
#' @param data data frame or matrix with data to be normalized
#' @param direction is \code{data} organized per row or column?
#'
#' @examples
#' d = sapply(1:20, rnorm(1e4));
#' dNorm = quantileNormalization(as.vector(d), d)
quantileNormalization = function(reference, data, direction = 2,
impute = TRUE, ties = 'random', center = TRUE, referenceDirection = direction) {
ref = quantileReference(reference, referenceDirection, center);
if (impute) mns = apply(data, 3 - direction, median, na.rm = T);
dN = apply(data, direction, function(d) {
d0 = d;
if (impute) d[is.na(d0)] = mns[is.na(d0)];
r = quantile(ref, probs = rank(d, na.last = 'keep', ties = ties)/length(na.omit(d)))
if (impute) r[is.na(d0)] = NA;
r
});
if (direction == 1) dN = t(dN);
dimnames(dN) = dimnames(data);
dN
}
# quantile normalization based on samples picked on the basis of their medians (around the medians)
# Nqn: number of reference samples
quantileNormalizationMedians = function(data, direction = 2, Nqn = 5, impute = TRUE) {
# <p> determine median of medians, corresponding median, IQR
medians = apply(data, direction, median);
mediansO = order(medians);
medianOI = as.integer(length(mediansO)/2 + .5);
medianI = mediansO[medianOI];
refMed = summary(data[, medianI]);
refIQR = refMed[['3rd Qu.']] - refMed[['1st Qu.']];
# <p> reference samples
refL = as.integer(medianOI - Nqn/2 + .5);
refU = refL + Nqn - 1;
refSamples = mediansO[refL:refU];
#print(refSamples)
# <p> standardize reference samples wrt median, IQR
refSampleValues = sapply(refSamples, function(i) {
refI = summary(data[, i]);
refIIQR = refI[['3rd Qu.']] - refI[['1st Qu.']];
E = (data[, i] - refI[['Median']]) * refIQR/refIIQR + refMed[['Median']];
#refIE = summary(E);
#refIEIQR = refIE[['3rd Qu.']] - refIE[['1st Qu.']];
#print(list(refI = refI, refIIQR = refIIQR, refIE = refIE, refIEIQR = refIEIQR));
E
});
eQn = quantileNormalization(refSampleValues, data,
direction = direction, impute = impute, center = FALSE);
eQn
}
dataCentered = function(d, na.rm = T) {
dC = apply(d, 2, function(col)col - mean(col, na.rm = na.rm));
dC
}
#
# <p> distributions
#
qtruncnorm = function(p, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
plow = pnorm(lower, mean, sd);
pupp = pnorm(upper, mean, sd);
qnorm(p * (pupp - plow) + plow, mean, sd = sd)
}
rtruncnorm = function(N, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
qtruncnorm(runif(N), mean, sd = sd, lower, upper)
}
qqDist = function(Nqts = 1e2, qdist, ...) {
qtls = (1:Nqtls)/(Nqtls + 1);
qtlsExp = qdist(qtls, ...);
qtlsObs = quantile(rtn, qtls);
qq = qplot(qtlsExp, qtlsObs) + theme_bw();
qq
}
qqSim = function(Nsim, dist = 'truncnorm', Nqts = Nsim/10, ...) {
require('ggplot2');
rdist = get(Sprintf('r%{dist}s'));
r = rdist(Nsim, ...);
qdist = get(Sprintf('q%{dist}s'));
qq = qqDist(Nqts, qdist, ...);
qq
}
#
# <p> entropy
#
table.entropy = function(d) {
p = table.freq(d);
p = p[p != 0];
H = - sum(p * log(p));
}
#
# <p> qvalue
#
Qvalue = function(P.value, ...) {
require('qvalue');
P.valuesNotNA = na.omit(P.value);
qv = qvalue(P.valuesNotNA, ...);
r = qv;
r$qvalue = vector.embed(rep(NA, sum(is.na(P.value))), which(!is.na(P.value)), qv$qvalue);
r
}
#
# Rpatches.R
#Fri Nov 20 17:18:37 CET 2009
# geepack patch
anovageePrim2 = function (m1, m2, ...)
{
mm1 <- model.matrix(m1)
mm2 <- model.matrix(m2)
P1 <- mm1 %*% solve(t(mm1) %*% mm1) %*% t(mm1)
P2 <- mm2 %*% solve(t(mm2) %*% mm2) %*% t(mm2)
e2 <- mm2 - P1 %*% mm2
e1 <- mm1 - P2 %*% mm1
m2inm1 <- all(apply(e2, 2, var) < 1e-10)
m1inm2 <- all(apply(e1, 2, var) < 1e-10)
if (!any(c(m2inm1, m1inm2)))
cat("Models not nested\n")
else if (all(c(m2inm1, m1inm2)))
cat("Models are identical\n")
else {
if (m1inm2) {
tmp <- m1
m1 <- m2
m2 <- tmp
}
mm1 <- model.matrix(m1)
mm2 <- model.matrix(m2)
mf1 <- paste(paste(formula(m1))[c(2, 1, 3)], collapse = " ")
mf2 <- paste(paste(formula(m2))[c(2, 1, 3)], collapse = " ")
mm <- cbind(mm2, mm1)
qmm <- qr(mm)
qmmq <- qr.Q(qmm)
nymm1 <- as.data.frame(qmmq[, 1:qmm$rank])
colnames(nymm1) <- paste("parm", 1:ncol(nymm1), sep = ".")
nymm2 <- nymm1[, 1:ncol(mm2), drop = FALSE]
formula1 <- formula(paste(formula(m1)[[2]], formula(m1)[[1]],
paste(c("-1", colnames(nymm1)), collapse = "+"),
collapse = ""))
m1call <- m1$call
nymm1[, paste(formula(m1)[[2]])] <- m1$y
nymm1[, paste(m1call$id)] <- m1$id
m1call$offset <- m1$offset
m1call$weights <- m1$weights
m1call$formula <- formula1
m1call$data <- nymm1
m1ny <- eval(m1call)
beta <- coef(m1ny)
vbeta <- summary(m1ny)$cov.unscaled
df <- dim(mm1)[2] - dim(mm2)[2]
rbeta <- rep(1, length(beta))
rbeta[1:df] <- 0
beta0 <- rev(rbeta)
zeroidx <- beta0 == 0
X2 <- t(beta[zeroidx]) %*% solve(vbeta[zeroidx, zeroidx,
drop = FALSE]) %*% beta[zeroidx]
topnote <- paste("Model 1", mf1, "\nModel 2", mf2)
title <- "Analysis of 'Wald statistic' Table\n"
table <- data.frame(Df = df, X2 = X2, p = 1 - pchisq(X2,
df))
dimnames(table) <- list("1", c("Df", "X2", "P(>|Chi|)"))
val <- structure(table, heading = c(title, topnote),
class = c("anova", "data.frame"))
return(val)
}
}
#
# Rdataset.R
#Tue Sep 28 14:53:47 2010
# a dataset is a list with two data.frames
# data: contains "data"
# meta: contains meta information about "data"
# meta data frame
# name string/re to describe variable
# type (admin|var|unknown)
# fullType (admin:cluster|id|idM|idF)
# index index of column
metaData = function(d, metaTemplate, ignore.case = T) {
ns = names(d);
dm = listOfLists2data.frame(lapply(1:length(ns), function(i) {
n = ns[i];
m = sapply(metaTemplate, function(mt)(length(grep(mt$name, n, ignore.case = ignore.case)) > 0));
r = metaTemplate[m];
r = if (length(r) != 1) list(name = n, type = 'unknown', fullType = 'unknown') else
merge.lists(r[[1]], list(name = n))[c('name', 'type', 'fullType')];
r = c(r, list(index = i));
r
}), idColumn = NULL);
dm
}
transformData = function(d, metaTemplate, ..., ignore.case = T) {
ns = names(d);
for (n in ns) {
m = sapply(metaTemplate, function(mt)(length(grep(mt$name, n, ignore.case = ignore.case)) > 0));
if (sum(m) == 1) {
mt = metaTemplate[m][[1]];
if (!is.null(mt$transf)) d[[n]] = mt$transf(d[[n]]);
}
}
d
}
columnsOfType = function(d, type)d$meta$name[d$meta$fullType == type];
#
# Rsimulation.R
#Mon 07 Jan 2008 06:56:12 PM CET
#
# <§> setup
#
#library(MASS);
#source(sprintf("%s/Rgeneric.R", Sys.getenv("MYRLIB")), chdir=TRUE);
#library(ggplot2); #firstUpper
#
# <§> implementation
#
#
# <p> helper methods
#
parameterCombinationsTwins = function(specification, parameters, twins) {
pars = strsplit(twins, ".", fixed = T)[[1]];
N = length(pars);
M = length(parameters[[pars[1]]]); # assume equal length here
df = data.frame(matrix(1:M, ncol = N, nrow = M));
names(df) = pars;
df
}
parameterCombinations = function(specification, parameters) {
# <p> initialization
parCnts = lapply(parameters, length);
# <p> handle constraints (<A> must not overlap)
if (!is.null(specification$constraints)) {
parsC = lapply(names(specification$constraints), function(c) {
fn = get(con("parameterCombinations", firstUpper(specification$constraints[[c]]$type)));
cs = fn(specification, parameters, c);
cs
})
names(parsC) = names(specification$constraints);
} else parsC = list();
# <p> add remaining parameters
parsF = if (!is.null(specification$constraints)) {
parameters[-unlist(sapply(names(specification$constraints), function(p) {
pars = strsplit(p, ".", fixed = T)[[1]];
idcs = which.indeces(pars, parameters);
idcs
}))]
} else parameters;
parsF = lapply(parsF, function(p)1:length(p));
parsA = c(parsC, parsF);
# <p> construct natural joint: unconstraint combinations
df = data.frame(..dummy = 1);
for (i in 1:length(parsA)) {
df = merge(df, parsA[i]);
}
df = df[, -1];
# <p> cleanup (names of df)
ns = unlist(lapply(parsC, function(p)names(p)));
ns = c(ns, names(parsF));
names(df) = ns;
df
}
# gIndex: global index for reference purposes
# lists are interpolated with arrays such that the name of the array
# becomes embedded as list element
collapseParameters = function(collapsingGroups, parameters, indeces, gIndex) {
iNs = names(indeces);
pars = lapply(collapsingGroups, function(g) {
# p = unlist.n(sapply(g$names, function(nm){
# as.list(parameters[[nm]][indeces[[nm]]])
# }), firstDef(g$collapse, 0));
p = unlist.n(lapply(g, function(nm){
po = parameters[[nm]][[indeces[[nm]]]]; # parameter object
if (!is.list(po)) {
po = list(po);
names(po) = nm;
}
po
}), 1);
p
});
#if (is.list(pars$system)) pars$system$globalIndex = gIndex;
pars
}
#
# <p> generic methods
#
parameterIteration = function(s, order = NULL, reverse = F) {
o = firstDef(order, 1:dim(s@combinations)[1], .dfInterpolate = F);
#order.df(s@combinations, names(s@parameters), reverse);
ps = lapply(o, function(i) {
p = collapseParameters(s@specification$collapse, s@parameters, as.list(s@combinations[i, ]), i);
p
});
i = list(parameters = ps, order = o);
i
}
# i is given in canonical ordering of parameters
simulationFile = function(s, i) {
spec = s@specification;
pars = parameterIteration(s); # canonical ordering
digits = ceiling(log10(length(pars$order))); # digits needed for enumeration
filename = sprintf("%s/%s-%0*d.RData", spec$resultsDir, spec$name, digits, i);
filename
}
# needs: spec$cluster(hosts, type), spec$resultsFile|spec$name, spec$simulationFunction
runIterationCluster = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = merge.lists(list(doSave = T, delaySave = F, local = F), s@specification);
simulationPars = parameterIteration(s, order = order, reverse = reverse);
# <p> initialize
if (!is.null(spec$init)) { eval(parse(text = spec$init)); }
f = get(spec$simulationFunction);
# <p> iteration function
clf = function(i, simulationPars, ...){
p = simulationPars$parameters[[i]];
t0 = sum(proc.time()[3]);
sim = try(f(p, ...));
t1 = sum(proc.time()[3]) - t0;
if (class(sim) != "try-error" & spec$doSave & !spec$delaySave) {
save(sim, file = simulationFile(s, simulationPars$order[i]));
}
r = list(
time = t1,
parameters = p,
result = ifelse(spec$delaySave, sim, class(sim) != "try-error")
);
r
};
if (!spec$local) {
# <p> make cluster
library("snow");
c = spec$cluster;
hosts = if (is.null(c$hosts)) rep("localhost", 8) else c$hosts; #<A> cave vectors
cl = makeCluster(hosts, type = firstDef(c$type, "SOCK"));
clusterSetupRNG(cl);
# <p> cluster intitalizations
if (!is.null(c$source)) {
textSource = sprintf("clusterEvalQ(cl, { %s })",
paste(c(sapply(c$source, function(s)sprintf("source(\"%s\")", s)), ""), collapse = "; ")
);
eval(parse(text = textSource));
}
clusterExport(cl, spec$simulationFunction);
}
# <p> iterate
textExec = sprintf(
"%s 1:length(simulationPars$parameters), clf, simulationPars = simulationPars, %s%s;",
ifelse(spec$local, "lapply(", "clusterApplyLB(cl,"), paste(spec$args, collapse = ", "), ")"
);
print(textExec);
simulations = eval(parse(text = textExec));
#print(simulations);
# <p> finish up
if (!spec$local) stopCluster(cl)
if (spec$delaySave) for (i in 1:length(simulations)) {
sim = simulations[[i]];
if (class(sim) != "try-error" & spec$doSave) save(sim, file = simulationFile(s, i, pars$order[i]));
}
simulationPars
}
runIterationPlain = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = s@specification;
pars = parameterIteration(s, order = order, reverse = reverse);
f = get(spec$simulationFunction);
# <p> iterate
simulations = lapply(1:length(pars$parameters), function(i){
p = pars$parameters[[i]];
t0 = sum(proc.time()[1:2]);
sim = try(f(p));
t1 = sum(proc.time()[1:2]) - t0;
if (class(sim) != "try-error" & spec$doSave & !spec$delaySave) {
save(sim, file = simulationFile(s, pars$order[i]));
}
r = list(
time = t1,
parameters = p,
result = ifelse(spec$delaySave, sim, class(sim) != "try-error"));
r
});
if (spec$delaySave) for (i in 1:length(simulations)) {
sim = simulations[[i]];
if (class(sim) != "try-error" & spec$doSave) save(sim, file = simulationFile(s, i, pars$order[i]));
}
pars
}
summarizeIteration = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = s@specification;
pars = parameterIteration(s, order = order, reverse = reverse);
print(pars);
f = if (is.null(spec$summaryFunctionSingle)) NULL else get(spec$summaryFunctionSingle);
simulations = lapply(1:length(pars$order), function(i) {
parIndex = pars$order[i];
file = simulationFile(s, parIndex);
sim = if (file.exists(file)) { get(load(file)[1]) } else NULL;
# <%><N> interpolate old simulations
#if (length(sim) == 1) sim = sim[[1]];
r = if (is.null(f)){ NA } else f(s, sim, pars$parameters[[parIndex]]);
r
});
r = NULL;
if (!is.null(spec$summaryFunction)) {
summary = get(spec$summaryFunction);
r = summary(s, simulations, pars$order, pars);
}
r
}
runIteration = function(s, order = NULL, reverse = F) {
spec = s@specification;
methodName = sprintf("runIteration%s", firstUpper(firstDef(spec$iterationMethod, "plain")));
method = get(methodName);
Log(sprintf('Rsimulation: %s', methodName), 2);
method(s, order, reverse);
}
#
# <p> class
#
# specification contains restrictions on parameter combinations, grouping
# restrictions:
# twins: pair parameters as listed (e.g. model simulation, estimation)
# grouping: build final parameters by merging sublists
# conventional group:
# system: parameters other than involved in statistical concepts
# model: specification of the model
# parameters: model parameters
setClass("Rsimulation",
representation(specification = "list", parameters = "list", combinations = "data.frame",
mode = "character"),
prototype(specification = list(), parameters = list(), combinations = data.frame(), mode = NULL)
);
setMethod("initialize", "Rsimulation", function(.Object, simulationName, mode = NULL) {
s = get(simulationName);
specification = merge.lists(list(doSave = T, delaySave = F), s$specification);
specification$name = simulationName;
parameters = s$parameters;
if (specification$needsMode & is.null(mode)) {
stop(con("Need simulation mode [",
paste(names(specification$mode), collapse = ", "), "]"));
}
if (!is.null(mode)) {
specification = merge.lists(specification, specification$mode[[mode]]);
}
.Object@mode = mode;
.Object@specification = specification;
.Object@parameters = parameters;
.Object@combinations = parameterCombinations(specification, parameters);
.Object
});
#
# RpropertyList.R
#Fri Jan 7 17:40:12 2011
# wrap string for property list
ws = function(s) {
s = if (length(grep('^([_/\\a-zA-Z0-9.]+)$', s)) > 0) { s } else {
s = gsub('([\\"])', '\\\\\\1', s);
sprintf('"%s"', s);
}
s
}
# can a string be condensed into a single line
condense = function(s, ident, o) {
if (nchar(s) + ident * o$tabWidth - nchar(grep("\t", s)) < o$screenWidth) {
s = gsub("\n", ' ', s);
s = gsub("\t", '', s);
}
s
}
stringFromPropertyI = function(obj, ident, o) {
str = '';
inS = join(rep("\t", ident), '');
in1S = join(rep("\t", ident + 1), '');
if ( class(obj) == 'function' ) {
str = sprintf('%s%s', str, ws(join(deparse(obj), "\n")))
} else if ( class(obj) != 'list' & length(obj) == 1 & !(o$kp %in% o$forceVectors)) {
# <i> data support
str = sprintf('%s%s', str, ws(obj));
} else if (class(obj) == 'list' && !is.null(names(obj))) {
hash = sprintf("{\n%s%s;\n%s}", in1S, paste(sapply(names(obj), function(k) {
o = merge.lists(o, list(kp = sprintf('%s.%s', o$kp, k)));
r = sprintf('%s = %s', ws(k), stringFromPropertyI(obj[[k]], ident+1, o))
r
}), collapse = sprintf(";\n%s", in1S)), inS);
if (!o$noFormatting) hash = condense(hash, ident, o);
str = sprintf('%s%s', str, hash);
} else { # vector or anonymous list
obj = as.list(obj);
array = sprintf("(\n%s%s\n%s)", in1S, if (length(obj) < 1) '' else paste(
sapply(1:length(obj), function(i) {
e = obj[[i]];
o = merge.lists(o, list(kp = sprintf('%s.[%d]', o$kp, i)));
stringFromPropertyI(e, ident+1, o)
}), collapse = sprintf(",\n%s", in1S)), inS);
if (!o$noFormatting) array = condense(array, ident, o);
str = sprintf('%s%s', str, array);
}
str
}
defaults = list(screenWidth = 80, tabWidth = 4, noFormatting = F, kp = '');
stringFromProperty = function(obj, o = list()) {
o = merge.lists(defaults, o);
s = stringFromPropertyI(obj, 0, o);
if (o$noFormatting) {
s = gsub("[\n\t]", '', s);
}
s
}
# tokens: character vector of tokens
# ti: current token cursor (token index)
propertyFromStringRaw = function(tokens, ti = 1) {
if (length(tokens) < 1) stop("propertyFromString: out of tokens");
pl = if (tokens[ti] == '(') { # we have an array here # ')' (bracket)
a = NULL;
repeat {
ti = ti + 1; # advance to next token
if (ti > length(tokens) || tokens[ti] == ')') break; # <A> empty list
r = propertyFromStringRaw(tokens, ti); # sub propertyList
if (is.list(r$pl)) r$pl = list(r$pl); # <A> concatanating of lists
a = c(a, r$pl);
ti = r$ti + 1;
if (ti > length(tokens) || tokens[ti] == ')') break; # <A> returning to list end
if (tokens[ti] != ',') stop("propertyFromString: expected ',' or ')'");
}
if (ti > length(tokens) || tokens[ti] != ')') stop("propertyFromString: no array termination");
a
} else if (tokens[ti] == '{') {
dict = list();
repeat {
ti = ti + 1; # advance to next token
if (ti > length(tokens) || tokens[ti] == '}') break;
key = tokens[ti];
if (tokens[ti + 1] != '=') stop("propertyFromString: expected '='");
r = propertyFromStringRaw(tokens, ti + 2);
dict[[key]] = r$pl;
ti = r$ti + 1;
if (tokens[ti] != ';') stop("propertyFromString: expected ';'");
}
if (ti > length(tokens) || tokens[ti] != '}') stop("propertyFromString: no dict termination");;
dict
#} elsif ($token =~ /^<(.*)>$/so) { # we encountered data
# <N> data not supported
} else { # string
s = tokens[ti];
if (substr(s, 1, 1) == '"') s = substr(s, 2, nchar(s) - 1);
s
}
r = list(pl = pl, ti = ti);
r
}
plStringRE = '(?:(?:[_\\/\\-a-zA-Z0-9.]+)|(?:\"(?:(?:\\\\.)*(?:[^"\\\\]+(?:\\\\.)*)*)\"))';
plCommentRE = '(?:/\\*(?:.*?)\\*/)';
propertyFromString = function(plistString, o = list()) {
plistString = gsub(plCommentRE, '', plistString, perl = T);
tokens = fetchRegexpr(sprintf('%s|[(]|[)]|[{]|[}]|[=]|[,]|[;]|<.*?>', plStringRE), plistString);
pl = propertyFromStringRaw(tokens);
pl$pl
}
#
# Rlinux.R
#Tue May 8 18:05:44 2012
#
# <p> RsowReap.R
#Wed May 7 18:16:23 CEST 2014
# <p> Design
# These classes are meant to implement several Sow/Reap patterns
# Standard Pattern
# r = Reap(expression, returnResult = T);
# print(r$result);
# print(r$yield);
#
# AutoPrint sowed values, reap later
# SowerAddReaper(auto_reaper = printRepeaper, logLevel = 4);
# { Sow(my_tag = 4, logLevel = 3); }
# r = Reap();
#
# for (i in 1:10) {
# Sow(my_number = i);
# Sow(my_greeting = 'hello world');
# }
# # prints list of list w/ each entry beting list(my_number = i, my_greeting = ..)
# print(Reap(stacked = T));
#
# Sow to different categories
# SowerSetCatcher(default = StackingSowCatcherClass);
# SowerSetCatcher(exclusions = SowCatcherClass);
# Sow(1);
# Sow(individuals = 1:10, sow_field = 'exclusions');
# Collect(union, sow_field = 'exclusions'); # do not remove
ReaperAbstractClass = setRefClass('ReaperAbstract',
fields = list(),
methods = list(
#
# <p> methods
#
initialize = function(...) {
.self$initFields(...);
.self
},
reap = function(...) { }
#
# </p> methods
#
)
);
#ReaperAbstractClass$accessors(names(ReaperAbstractClass$fields()));
SowCatcherClass = setRefClass('SowCatcher', contains = 'ReaperAbstract',
fields = list(
auto_reapers = 'list',
seeds = 'list'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
auto_reapers <<- list();
seeds <<- list();
.self$initFields(...);
.self
},
sow_raw = function(seed) {
for (r in c(.self, auto_reapers)) r$reap(seed);
},
sow = function(...) {
.self$sow_raw(list(...)[1]);
},
reap = function(seed) {
seeds <<- c(seeds, seed);
},
last_seed = function() {
seeds[length(seeds)];
},
seed_count = function()length(seeds),
Seeds = function(fields = NULL) {
if (is.null(fields)) seeds else seeds[which.indeces(fields, names(seeds))]
},
set_seed_at = function(seed, pos) {
seeds[pos] <<- seed;
names(seeds)[pos] <<- names(seed);
NULL
},
push_reaper = function(r) {
auto_reapers <<- c(auto_reapers, r);
NULL
},
register = function(ensemble, field)NULL,
# <p> end a global SowReap session
conclude = function()NULL
#
# </p> methods
#
)
);
SowCatcherClass$accessors(names(SowCatcherClass$fields()));
SowCatcherPersistentClass = setRefClass('SowCatcherPersistent', contains = 'SowCatcher',
fields = list(
path = 'character',
splitRe = 'character',
cursor = 'integer'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
splitRe <<- '';
callSuper(...);
cursor <<- 1L;
.self
},
seed_path_name = function(n, i = length(seeds) + 1) {
key = if (splitRe != '') splitString(splitRe, n) else n;
key[1] = Sprintf('%{i}03d_%{k}s', k = key[1]);
seedPath = Sprintf('%{path}s/%{keyComponents}s.RData', keyComponents = join(key, '/'));
},
seed_path = function(seed, i = length(seeds) + 1) .self$seed_path_name(names(seed), i),
seed_save = function(seed, i = length(seeds) + 1) {
seedPath = .self$seed_path(seed, i);
s = seed[[1]];
Save(s, file = seedPath);
},
set_seed_at = function(seed, i) {
.self$seed_save(seed, i);
if (names(seeds)[i] != names(seed))
Logs('SowCatcherPersistent: Warning: seed key %{k2}s does not match seed slot %{k1}s',
k1 = names(seeds)[i], k2 = names(seeds), logLevel = 3);
},
reap_raw = function(seed) {
.self$seed_save(seed);
seeds <<- c(seeds, listKeyValue(names(seed), NA));
save(seeds, file = .self$seed_path_name('__seed_names', 0));
NULL
},
reap = function(seed) {
if (cursor > .self$seed_count()) {
.self$reap_raw(seed);
.self$setCursor(cursor + 1L);
return(NULL);
}
seed_nm = names(seed);
# <p> locate previous position
ns = names(.self$getSeeds());
occs = which(seed_nm == ns[Seq(1, cursor - 1, neg = T)]);
if (length(occs) == 0) {
Logs('SowCatcherPersistent: adding seed %{seed_nm}s of class %{cl}s not seen before.',
cl = class(seed[[1]]), 3);
.self$reap_raw(seed);
return(NULL);
}
new_cursor = cursor + min(occs) - 1L;
Logs('SowCatcherPersistent: Skipping to cursor %{new_cursor}s.', 5);
.self$set_seed_at(seed, new_cursor);
.self$setCursor(new_cursor + 1L);
},
Seeds = function(fields = NULL) {
idcs = if (is.null(fields)) Seq(1, length(seeds)) else which.indeces(fields, names(seeds));
r = lapply(idcs, function(i)get(load(.self$seed_path(seeds[i], i))[1]));
names(r) = names(seeds)[idcs];
r
},
register = function(ensemble, field, doReset = F) {
# <N> if path was not specified yet, try to query from ensemble, should exit on NULL
if (!length(.self$getPath())) {
.self$setPath(ensemble$getPath());
# <p> subpath for this field
path <<- Sprintf('%{path}s/%{field}s');
}
# <p> keep track of seeds
seedsPath = .self$seed_path_name('__seed_names', 0);
if (file.exists(seedsPath)) seeds <<- get(load(seedsPath)[1]);
if (doReset) {
unlink(sapply(Seq(1, length(seeds)), function(i).self$seed_path(seeds[i], i)));
if (file.exists(seedsPath)) unlink(seedsPath);
seeds <<- list();
}
NULL
}
#
# </p> methods
#
)
);
SowCatcherPersistentClass$accessors(names(SowCatcherPersistentClass$fields()));
SowCatcherStackClass = setRefClass('SowCatcherStack',
fields = list(
sowCatchers = 'list',
sowCatcherClass = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
sowCatchers <<- list();
sowCatcherClass <<- 'SowCatcher';
.self$initFields(...);
.self
},
push = function(sowCatcher = getRefClass(.self$sowCatcherClass)$new(), ...) {
sowCatchers[[length(sowCatchers) + 1]] <<- sowCatcher;
},
pop = function() {
currentCatcher = sowCatchers[[length(sowCatchers)]];
sowCatchers <<- sowCatchers[-length(sowCatchers)];
currentCatcher
},
sowCatcher = function() {
if (!length(sowCatchers)) .self$push(); # autovivify
sowCatchers[[length(sowCatchers)]]
},
reap = function(fields = NULL) {
r = lapply(sowCatchers, function(sc)sc$Seeds(fields))
},
register = function(ensemble, sow_field, ...)
lapply(sowCatchers, function(sc)sc$register(ensemble, sow_field, ...)),
conclude = function()lapply(rev(sowCatchers), function(sc)sc$conclude())
#
# </p> methods
#
)
);
SowCatcherStackClass$accessors(names(SowCatcherStackClass$fields()));
SowCatcherEnsembleClass = setRefClass('SowCatcherEnsemble',
fields = list(
sowers = 'list',
sowCatcherClass = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
sowers <<- list();
sowCatcherClass <<- 'SowCatcher';
.self$initFields(...);
.self
},
push = function(sowCatcher = SowCatcherStackClass$new(), sow_field = 'default', ...) {
# <b> default argument mechanism does not work
#if (is.null(sowCatcher)) sowCatcher = getRefClass('SowCatcher')$new();
if (is.null(sowers[[sow_field]])) sowers[[sow_field]] <<- SowCatcherStackClass$new();
sowers[[sow_field]]$push(sowCatcher)
sowCatcher$register(.self, sow_field, ...);
},
pop = function(sow_field = 'default')sowers[[sow_field]]$pop(),
sowCatcher = function(sow_field = 'default')sowers[[sow_field]]$sowCatcher(),
reap = function(sow_field = 'default', fields = NULL) sowers[[sow_field]]$reap(fields),
conclude = function() sapply(sowers, function(sower)sower$conclude())
#
# </p> methods
#
)
);
SowCatcherEnsembleClass$accessors(names(SowCatcherEnsembleClass$fields()));
SowCatcherEnsemblePersistentClass = setRefClass('SowCatcherEnsemblePersistent',
contains = 'SowCatcherEnsemble',
fields = list(
path = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
callSuper(...)
.self
},
push = function(sowCatcher = SowCatcherStackClass$new(), sow_field = 'default', ...) {
r = callSuper(sowCatcher, sow_field, ...);
.self$freeze();
r
},
pop = function(sow_field = 'default') {
r = callSuper(sow_field);
.self$freeze();
r
},
freeze_path = function()Sprintf('%{path}s/000_ensemble.RData'),
freeze = function() {
Save(.self, file = freeze_path());
NULL
},
thaw = function() {
e = get(load(freeze_path())[1]);
# SowCatchers have to recover their own state
lapply(names(e$sowers), function(n)e$sowers[[n]]$register(e, n));
e
}
#
# </p> methods
#
)
);
SowCatcherEnsemblePersistentClass$accessors(names(SowCatcherEnsemblePersistentClass$fields()));
if (!exists('SowReap_env__')) SowReap_env__ = new.env();
SowReap_env__ = new.env();
SowReapInit = function(ensembleClass = 'SowCatcherEnsemble', ...) {
ensemble = getRefClass(ensembleClass)$new(...);
assign('sowEnsemble', ensemble, envir = SowReap_env__);
ensemble
}
SowReapConclude = function() {
sowReapEnsemble()$conclude();
}
sowReapEnsemble = function() {
if (!exists('sowEnsemble', envir = SowReap_env__)) SowReapInit();
ensemble = get('sowEnsemble', envir = SowReap_env__);
ensemble
}
SowReapCreateField = function(sow_field, sowCatcherClass = 'SowCatcher', ...) {
e = sowReapEnsemble();
for (sf in sow_field) {
catcher = getRefClass(sowCatcherClass)$new();
e$push(catcher, sow_field = sf, ...);
}
NULL
}
SowReapReapField = function(sow_field) {
e = sowReapEnsemble();
e$pop(sow_field)$getSeeds();
}
Sow = function(..., sow_field = 'default') {
catcher = sowReapEnsemble()$sowCatcher(sow_field = sow_field);
catcher$sow(...)
}
Reap = function(expr, sow_field = 'default', fields = NULL, envir = parent.frame(), auto_unlist = T,
vivify = F) {
e = sowReapEnsemble();
r = if (missing(expr)) {
r = e$reap(sow_field, fields = fields);
if (vivify) {
r = lapply(r, function(e) {
tbVivified = setdiff(fields, names(e));
e = c(e, unlist.n(lapply(tbVivified, function(n)List(NULL, names_ = n)), 1));
e
});
}
if (auto_unlist && length(r) == 1) r = r[[1]];
r
} else {
catcher = getRefClass(e$getSowCatcherClass())$new();
e$push(catcher, sow_field = sow_field);
eval(expr, envir = envir);
e$pop(sow_field)$Seeds(fields);
}
r
}
ReapFromDisk = function(path, sow_field = 'default', fields = NULL, auto_unlist = T,
ensembleClass = 'SowCatcherEnsemblePersistent', vivify = F) {
e = getRefClass(ensembleClass)$new(path = path);
e = e$thaw();
r = e$reap(sow_field, fields = fields);
if (vivify) {
r = lapply(r, function(e) {
tbVivified = setdiff(fields, names(e));
e = c(e, lapply(tbVivified, function(n)List(NULL, names_ = n)));
e
});
}
if (auto_unlist && length(r) == 1) r = r[[1]];
r
}
#
# RparallelTools.R
#Fri Jul 26 09:13:16 2013
#
# <p> interface functions
#
Env.new = function(hash = T, parent = parent.frame(), size = 29L, content = list()) {
e = new.env(hash = hash, parent = parent, size = size);
nlapply(content, function(n) {
assign(n, content[[n]], envir = e);
NULL
});
e
}
#' Create a placeholder for an object to be loaded later
#'
#' @param path File system path to the file containing a saved R data structure
#'
delayed_load = function(path) {
new('ParallelizeDelayedLoad', path)
}
delayed_load_dummy = function(path) get(load(path)[1])
|
/src/R/Rprivate/RgenericAllRaw.R
|
no_license
|
wzuhou/ImputationPipeline
|
R
| false
| false
| 287,473
|
r
|
#
# Rlibraries.R
#Wed Oct 31 19:00:40 CET 2012
loadLibraries = function() {
require('geepack');
require('glmnet');
require('ggplot2');
#library('foreign');
}
#
# Rdata.R
#Mon 27 Jun 2005 10:49:06 AM CEST
#system("cd ~/src/Rprivate ; ./exportR.sh");
#system("cd ~/src/Rprivate ; ./exportR.sh"); source("RgenericAll.R"); source("Rgenetics.R"); loadLibraries();
#
# <§> abstract data functions
#
defined = function(x) exists(as.character(substitute(x)));
defined.by.name = function(name) { class(try(get(name), silent = T)) != 'try-error' }
# equivalent to i %in % v
is.in = function(i, v)(length((1:length(v))[v == i])>0)
rget = function(name, default = NULL, ..., pos = -1, envir = as.environment(pos)) {
#obj = try(get(name, ...), silent = T);
#r = if(class(obj) == 'try-error') default else obj;
#r = if (exists(name, where = pos, envir = envir)) get(name, ..., pos = pos, envir = envir) else default;
r = if (exists(name, envir = envir)) get(name, ..., envir = envir) else default;
r
}
firstDef = function(..., .fdInterpolate = F, .fdIgnoreErrors = F) {
l = if (.fdInterpolate) c(...) else list(...);
for (i in l) { if (!is.null(i) && (!.fdIgnoreErrors || class(i) != 'try-error')) return(i)};
NULL
}
firstDefNA = function(..., .fdInterpolate = F){
l = if (.fdInterpolate) c(...) else list(...);
for (i in l) { if (!is.na(i)) return(i)};
NULL
}
# <N> NULL behaviour
to.list = function(..., .remove.factors = T){
r = if(is.null(...)) NULL else if (is.list(...)) c(...) else list(...);
if (.remove.factors) {
r = sapply(r, function(e)ifelse(is.factor(e), levels(e)[e], e));
}
r
}
# pretty much force to vector
#avu = function(v)as.vector(unlist(v))
avu = function(v, recursive = T, toNA = T) {
transform = if (toNA)
function(e, condition)(if (condition) NA else avu(e, toNA = T, recursive = T)) else
function(e, ...)avu(e, toNA = F, recursive = T);
r = if (is.list(v)) {
nls = sapply(v, is.null); # detects nulls
# unlist removes NULL values -> NA
unlist(sapply(seq_along(v), function(i)transform(v[[i]], nls[i])));
} else as.vector(v);
if (!length(r)) return(NULL);
r
}
pop = function(v)rev(rev(v)[-1]);
assign.list = function(l, pos = -1, envir = as.environment(pos), inherits = FALSE, immediate = TRUE) {
for (n in names(l)) {
assign(n, l[[n]], pos, envir, inherits, immediate);
}
}
eval.text = function(text, envir = parent.frame())eval(parse(text = c[1]), envir= envir);
# replace elements base on list
# l may be a list of lists with elements f (from) and t (to), when f is replaced with t
# if both, f and t arguments are not NULL, l will be ignored and f is replaced with t
vector.replace = function(v, l, regex = F, ..., f = NULL, t = NULL) {
# if (!is.null(f) & !is.null(t)) l = list(list(f = f, t = t));
# # replacments are given in f/t pairs
# if (all(sapply(l, length) == 2)) {
# from = list.key(l, "f");
# to = list.key(l, "t");
# } else {
# from = names(l);
# to = unlist(l);
# }
# for (i in 1:length(from)) {
# if (regex) {
# idcs = which(sapply(v, function(e)(length(fetchRegexpr(from[i], e, ...)) > 0)));
# v[idcs] = sapply(v[idcs], function(e)gsub(from[i], to[i], e));
# } else v[which(v == from[i])] = to[i];
# }
repl = if (!is.null(f) & !is.null(t)) listKeyValue(f, t) else l;
# <!> tb tested
v = if (!regex) {
raw = repl[v];
unlist(ifelse(sapply(repl[v], is.null), v, raw))
} else {
sapply(v, function(e){
# first match takes precedent
j = which(sapply(names(repl), function(f)length(fetchRegexpr(f, e, ...)) > 0))[1];
if (is.na(j)) e else gsub(names(repl)[j], repl[[j]], e)
})
}
v
}
vector.with.names = function(v, all_names, default = 0) {
r = rep(default, length(all_names));
names(r) = all_names;
is = which.indeces(names(v), all_names, ret.na = T);
r[is[!is.na(is)]] = v[!is.na(is)];
r
}
# dir: direction of selection: 1: select rows, 2: select columns
mat.sel = function(m, v, dir = 1) {
r = if (dir == 1)
sapply(1:length(v), function(i)m[v[i], i]) else
sapply(1:length(v), function(i)m[i, v[i]]);
r
}
# rbind on list
sapplyId = function(l)sapply(l, identity);
listFind = function(lsed, lsee) {
values = sapply(names(lsee), function(n)list.key(lsed, n), simplify = F, USE.NAMES = F);
values = sapply(values, identity);
found = apply(values, 1, function(r) all(r == lsee));
r = unlist.n(lsed[found], 1);
r
}
same.vector = function(v)all(v == v[1])
#
# <§> string manipulation
#
say = function(...)cat(..., "\n");
printf = function(fmt, ...)cat(sprintf(fmt, ...));
join = function(v, sep = " ")paste(v, collapse = sep);
con = function(...)paste(..., sep="");
# pastem = function(a, b, ..., revsort = T) {
# if (revsort)
# as.vector(apply(merge(data.frame(a = b), data.frame(b = a), sort = F), 1,
# function(e)paste(e[2], e[1], ...))) else
# as.vector(apply(merge(data.frame(a = a), data.frame(b = b), sort = F), 1,
# function(e)paste(e[1], e[2], ...)))
# }
pastem = function(a, b, ..., revsort = T) {
df = merge.multi.list(list(Df(a = a), Df(b = b)), .first.constant = revsort);
paste(df[, 1], df[, 2], ...)
}
r.output.to.vector.int = function(s) {
matches = gregexpr("(?<![\\[\\d])\\d+", s, perl=T);
starts = as.vector(matches[[1]]);
lengthes = attr(matches[[1]], "match.length");
v = sapply(1:length(starts), function(i){ substr(s, starts[i], starts[i] + lengthes[i] -1) });
as.integer(v)
}
r.output.to.vector.numeric = function(s) {
matches = gregexpr("\\d*\\.\\d+", s, perl=T);
starts = as.vector(matches[[1]]);
lengthes = attr(matches[[1]], "match.length");
v = sapply(1:length(starts), function(i){ substr(s, starts[i], starts[i] + lengthes[i] -1) });
as.numeric(v)
}
readFile = function(path) { join(scan(path, what = "raw", sep = "\n", quiet = T), sep = "\n") };
circumfix = function(s, post = NULL, pre = NULL) {
if (is.null(s) || length(s) == 0) return('');
sapply(s, function(s)if (s == '') s else con(pre, s, post))
}
abbr = function(s, Nchar = 20, ellipsis = '...') {
ifelse(nchar(s) > Nchar, paste(substr(s, 1, Nchar - nchar(ellipsis)), ellipsis, sep = ''), s)
}
Which.max = function(l, last.max = T, default = NA) {
if (is.logical(l) && all(!l)) return(default);
r = if (last.max) (length(l) - which.max(rev(l)) + 1) else which.max(l);
r
}
Which.min = function(l, last.min = F, default = NA) {
if (is.logical(l) && all(!l)) return(default);
r = if (last.min) (length(l) - which.min(rev(l)) + 1) else which.min(l);
r
}
# capturesN: named captures; for each name in captureN put the captured value assuming names to be ordered
# captures: fetch only first capture per match <!> deprecated
# capturesAll: fetch all caputers for each match
fetchRegexpr = function(re, str, ..., ret.all = F, globally = T, captures = F, captureN = c(),
capturesAll = F, maxCaptures = 9, returnMatchPositions = F) {
if (length(re) == 0) return(c());
r = if (globally)
gregexpr(re, str, perl = T, ...)[[1]] else
regexpr(re, str, perl = T, ...);
if (all(r < 0)) return(NULL);
l = sapply(1:length(r), function(i)substr(str, r[i], r[i] + attr(r, "match.length")[i] - 1));
if (captures) {
l = sapply(l, function(e)gsub(re, '\\1', e, perl = T, fixed = F));
} else if (length(captureN) > 0) {
l = lapply(l, function(e) {
r = sapply(1:length(captureN), function(i) {
list(gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F))
});
names(r) = captureN;
r
});
} else if (capturesAll) {
l = lapply(l, function(e) {
cs = c(); # captures
# <!> hack to remove zero-width assertions (no nested grouping!)
#re = gsub('(\\(\\?<=.*?\\))|(\\(\\?=.*?\\))', '', re, perl = T, fixed = F);
for (i in 1:maxCaptures) {
n = gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F);
cs = c(cs, n);
}
cs
});
# trim list
#maxEls = maxCaptures - min(c(maxCaptures + 1, sapply(l, function(e)Which.max(rev(e != ''))))
# , na.rm = T) + 1;
maxEls = max(c(sapply(l, function(e)Which.max(e != '', default = 1)), 1));
l = lapply(l, function(e)(if (maxEls > 0) e[1:maxEls] else NULL));
}
if (!ret.all) l = l[l != ""];
ret = if (returnMatchPositions) list(match = l, positions = r) else l;
ret
}
# improved multistring version
FetchRegexpr = function(re, str, ..., ret.all = F, globally = T, captures = F, captureN = c(),
capturesAll = F, maxCaptures = 9, returnMatchPositions = F) {
if (length(re) == 0) return(c());
r = if (globally)
gregexpr(re, str, perl = T, ...) else
list(regexpr(re, str, perl = T, ...));
if (all(unlist(r) < 0)) return(NULL);
l = sapply(seq_along(r),
function(j) {
r0 = r[[j]];
sapply(1:length(r0),
function(i)substr(str[j], r0[i], r0[i] + attr(r0, "match.length")[i] - 1))
});
if (captures) {
l = sapply(l, function(e)gsub(re, '\\1', e, perl = T, fixed = F));
#print(l);
} else if (length(captureN) > 0) {
l = lapply(l, function(e) {
r = sapply(1:length(captureN), function(i) {
list(gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F))
});
names(r) = captureN;
r
});
} else if (capturesAll) {
l = lapply(l, function(e) {
cs = c(); # captures
# <!> hack to remove zero-width assertions (no nested grouping!)
#re = gsub('(\\(\\?<=.*?\\))|(\\(\\?=.*?\\))', '', re, perl = T, fixed = F);
for (i in 1:maxCaptures) {
n = gsub(re, sprintf('\\%d', i), e, perl = T, fixed = F);
cs = c(cs, n);
}
cs
});
# trim list
#maxEls = maxCaptures - min(c(maxCaptures + 1, sapply(l, function(e)Which.max(rev(e != ''))))
# , na.rm = T) + 1;
maxEls = max(c(sapply(l, function(e)Which.max(e != '', default = 1)), 1));
l = lapply(l, function(e)(if (maxEls > 0) e[1:maxEls] else NULL));
}
if (!ret.all) l = l[l != ""];
ret = if (returnMatchPositions) list(match = l, positions = r) else l;
ret
}
regex = Vectorize(fetchRegexpr, 'str', SIMPLIFY = T, USE.NAMES = T);
Regex = Vectorize(FetchRegexpr, 're', SIMPLIFY = T, USE.NAMES = T);
regexIdcs = function(re, s, ...)vectorIdcs(regex(re, s, ...), is.null, not = T)
# unify capture extraction for gregexpr, regexpr
# pos == 0: grexepr, regexpr else by iterating pos as index into str
matchRegexCapture = function(reg, str, pos = NULL) {
if (is.null(attr(reg, 'capture.start'))) return(NULL);
if (!is.null(pos)) str = str[pos] else pos = seq_along(reg);
captures = lapply(1:ncol(attr(reg, 'capture.start')), function(i) {
sapply(pos, function(j)Substr(str,
attr(reg, 'capture.start')[j, i], attr(reg, 'capture.length')[j, i]))
});
names(captures) = attr(reg, 'capture.names');
captures
}
matchRegexExtract = function(reg, str, pos = NULL) {
if (!is.null(pos)) str = str[pos] else pos = seq_along(reg);
matches = ifelse(reg[pos] < 0, character(0),
sapply(pos, function(i)Substr(str, reg[i], attr(reg, 'match.length')[i])));
matches
}
# <i> re nested list with sub-res for named captures
# <!> globally == FALSE, removeNonMatch == FALSE
matchRegex = function(re, str, ..., globally = TRUE, simplify = TRUE,
positions = FALSE, removeNonMatch = FALSE) {
if (length(re) == 0) return(NULL);
reg = if (globally) gregexpr(re, str, perl = T, ...) else regexpr(re, str, perl = T, ...);
ms = if (globally)
lapply(seq_along(reg), function(i)matchRegexExtract(reg[[i]], str[i])) else
lapply(seq_along(str), function(i)matchRegexExtract(reg, str, pos = i));
# regmatches(str, reg);
captures = if (globally)
lapply(seq_along(reg), function(i)matchRegexCapture(reg[[i]], str[i])) else
lapply(seq_along(str), function(i)matchRegexCapture(reg, str, pos = i));
if (removeNonMatch) {
nonmatch = sapply(ms, length) == 0 | is.na(ms);
ms = ms[!nonmatch];
captures = captures[!nonmatch];
reg = reg[!nonmatch];
}
if (simplify && length(str) == 1) {
ms = ms[[1]];
captures = captures[[1]];
reg = reg[[1]];
}
r = if(positions) list(match = ms, capture = captures, positions = reg) else
list(match = ms, capture = captures);
r
}
#
# <p> final interface as of 2016/04
#
MatchRegex = function(re, str, mode = 'return') {
r = regexpr(re, str);
if (mode == 'return') {
r = str[which(r > 0)];
}
r
}
splitString = function(re, str, ..., simplify = T) {
l = lapply(str, function(str) {
r = gregexpr(re, str, perl = T, ...)[[1]];
if (r[1] < 0) return(str);
l = sapply(1:(length(r) + 1), function(i) {
substr(str, ifelse(i == 1, 1, r[i - 1] + attr(r, "match.length")[i - 1]),
ifelse(i > length(r), nchar(str), r[i] - 1))
});
});
if (length(l) == 1 && simplify) l = l[[1]];
l
}
quoteString = function(s)sprintf('"%s"', s)
trimString = function(s) {
sapply(s, function(e)
if (is.na(e)) NA else FetchRegexpr('^\\s*(.*?)\\s*$', e, captures = T)
)
}
mergeDictToString = function(d, s, valueMapper = function(s)
ifelse(is.na(d[[n]]), '{\\bf Value missing}', d[[n]]),
iterative = F, re = F, maxIterations = 100, doApplyValueMap = T, doOrderKeys = T, maxLength = 1e7) {
ns = names(d);
# proceed in order of decreasing key lengthes
if (doOrderKeys) ns = ns[rev(order(sapply(ns, nchar)))];
for (i in 1:maxIterations) {
s0 = s;
for (n in ns) {
# counteract undocumented string interpolation
subst = if (doApplyValueMap)
gsub("[\\\\]", "\\\\\\\\", valueMapper(d[[n]]), perl = T)
else d[[n]];
# <!> quoting
if (!re) n = sprintf("\\Q%s\\E", n);
s = gsub(n, firstDef(subst, ""), s, perl = T, fixed = F);
# <A> if any substitution was made, it is nescessary to reiterate ns to preserver order
# of substitutions
if (iterative && s != s0) break;
}
if (!iterative || s == s0 || nchar(s) > maxLength) break;
}
s
}
mergeDictToStringV = Vectorize(mergeDictToString, 's', SIMPLIFY = T, USE.NAMES = T);
mergeDictToVector = function(d, v) { unlist(ifelse(is.na(names(d[v])), v, d[v])) }
mergeDictToDict = function(dMap, dValues, ..., recursive = T) {
r = lapply(dValues, function(v) {
r = if (class(v) == 'list') {
if (recursive) mergeDictToDict(dMap, v, ...) else v
} else if (class(v) == 'character') mergeDictToString(dMap, v, ...) else v;
r
});
r
}
# double quote if needed
qsSingle = function(s, force = F) {
# <N> better implementation possible: detect unquoted white-space
if (force || length(fetchRegexpr('[ \t"()\\[\\]:,]', s)) > 0) {
s = gsub('([\\"])', '\\\\\\1', s);
s = sprintf('"%s"', s);
} else {
s0 = gsub("([\\'])", '\\\\\\1', s);
if (s0 != s) s = sprintf("$'%s'", s0);
}
s
}
qs = function(s, ...)sapply(s, qsSingle, ...)
# single quote if needed
qssSingle = function(s, force = F) {
# <N> better implementation possible: detect unquoted white-space
if (force || length(fetchRegexpr("[ \t'()\\[\\]:,]", s)) > 0) {
s = gsub("(['])", "'\"'\"'", s);
s = sprintf("'%s'", s);
}
s
}
qss = function(s, ...)sapply(s, qssSingle, ...)
#' Return sub-strings indicated by positions or produce a string by substituting those strings with
#' replacements
#'
#' The function behaves similar to sprintf, except that character sequences to be substituted are
#' indicated by name.
#'
#' @param s template string
#' @param start vector of start positions of substrings to substitute
#' @param length vector of lengthes of substrings to substitute
#' @param replacement vector of strings to subsitute. If missing, \code{Substr} returns sub-strings indicated
#' by start/length
#'
#' @examples
#' print(Substr("abc", c(2, 3), c(1, 1), c("def", 'jkl')));
#' print(Substr("abcdef", c(2, 3, 5), c(1, 1, 1), c("123", '456', '789')));
#' print(Substr("abcdef", c(1, 3, 5), c(1, 1, 1), c("123", '456', '789')));
#' print(Substr("abcdef", c(1, 3, 5), c(0, 1, 0), c("123", '456', '789')));
Substr = function(s, start, length, replacement) {
if (missing(replacement)) return(substr(s, start, start + length - 1));
start = c(start, nchar(s) + 1);
l = sapply(seq_along(replacement), function(i)c(
replacement[i],
substr(s, start[i] + length[i], start[i + 1] - 1)
));
l = c(substr(s, 1, start[1] - 1), as.vector(l));
r = join(as.vector(l), sep = '');
r
}
# <!> quoting
#' Produce string by substituting placeholders
#'
#' The function behaves similar to sprintf, except that character sequences to be substituted are
#' indicated by name. To be implemented: *-specifications
#'
#' @param s template string
#' @param d values to substitute into \code{s}
#' @param template template for substitution pattern. Within this pattern \code{__DICT_KEY__} is
#' substituted for a key in \code{d}. This string \code{k} is substituted in \code{s} with \code{d[[k]]}.
#'
#' @examples
#' Sprintf('These are N %{N} characters.', list(N = 10));
#' Sprintf('These are N %{N}d characters.', list(N = 10));
#' Sprintf('These are N %{N}02d characters.', list(N = 10));
Sprintfl = function(fmt, values, sprintf_cartesian = FALSE, envir = parent.frame()) {
dict = extraValues = list();
for (i in seq_along(values)) {
if (is.list(values[[i]]))
dict = merge.lists(dict, values[[i]]) else
if (!is.null(names(values)[i]) && names(values)[i] != '')
dict = merge.lists(dict, values[i]) else
extraValues = c(extraValues, values[i]);
}
# re = '(?x)(?:
# (?:^|[^%]|(?:%%)+)\\K
# [%]
# (?:[{]([^{}\\*\'"]*)[}])?
# ((?:[-]?[*\\d]*[.]?[*\\d]*)?(?:[sdfegG]|))(?=[^%sdfegG]|$)
# )';
# <!> new, untested regexpr as of 22.5.2014
# un-interpolated formats do no longer work
re = '(?x)(?:
(?:[^%]+|(?:%%)+)*\\K
[%]
(?:[{]([^{}\\*\'"]*)[}])?
((?:[-]?[*\\d]*[.]?[*\\d]*)?(?:[sdfegGDQqu]|))(?=[^sdfegGDQqu]|$)
)';
r = fetchRegexpr(re, fmt, capturesAll = T, returnMatchPositions = T);
# <p> nothing to format
if (length(r$match) == 0) return(fmt);
typesRaw = sapply(r$match, function(m)ifelse(m[2] == '', 's', m[2]));
types = ifelse(typesRaw %in% c('D', 'Q'), 's', typesRaw);
fmts = sapply(r$match, function(m)sprintf('%%%s',
ifelse(m[2] %in% c('', 'D', 'Q', 'q', 'u'), 's', m[2])));
fmt1 = Substr(fmt, r$positions, attr(r$positions, 'match.length'), fmts);
keys = sapply(r$match, function(i)i[1]);
nonKeysI = cumsum(keys == ''); # indeces of values not passed by name
nonKeysIdcs = which(keys == '');
# <p> collect all values
allValues = c(extraValues, dict);
# get interpolation variables
interpolation = nlapply(keys[keys != ''], function(k)
if (!is.null(allValues[[k]])) NULL else rget(k, default = NA, envir = envir)
);
# <p> handle %D: current day
keys[typesRaw == 'D'] = '..Sprintf.date..';
dateValue = if (sum(typesRaw == 'D'))
list(`..Sprintf.date..` = format(Sys.time(), '%Y%d%m')) else
list();
allValues = c(allValues, dateValue, List_(interpolation, rm.null = T));
# 14.9.2015 -> convert to indeces
# build value combinations
listedValues = lapply(keys, function(k)allValues[[k]]);
dictDf = if (!sprintf_cartesian) Df_(listedValues) else merge.multi.list(listedValues);
#if (substr(fmt, 0, 5) == '%{wel') browser();
# fill names of anonymous formats
keys[keys == ''] = names(dictDf)[Seq(1, sum(nonKeysI != 0))];
# due to repeat rules of R vectors might have been converted to factors
#dictDf = Df_(dictDf, as_character = unique(keys[types == 's']));
dictDf = Df_(dictDf, as_character = which(types == 's'));
# <p> conversion <i>: new function
#colsQ = keys[typesRaw == 'Q'];
# <!> switch to index based transformation on account of duplicate keys
colsQ = which(typesRaw == 'Q');
dictDf[, colsQ] = apply(dictDf[, colsQ, drop = F], 2, qs);
#colsq = keys[typesRaw == 'q'];
colsq = which(typesRaw == 'q');;
dictDf[, colsq] = apply(dictDf[, colsq, drop = F], 2, qss);
colsu = which(typesRaw == 'u');;
dictDf[, colsu] = apply(dictDf[, colsu, drop = F], 2, uc.first);
colsd = which(typesRaw == 'd');;
dictDf[, colsd] = apply(dictDf[, colsd, drop = F], 2, as.integer);
s = sapply(1:nrow(dictDf), function(i) {
valueDict = as.list(dictDf[i, , drop = F]);
# sprintfValues = lapply(seq_along(keys), function(i)
# ifelse(keys[i] == '', extraValues[[nonKeysI[i]]],
# firstDef(valueDict[[keys[i]]], rget(keys[i], default = '__no value__'), pos = -2)));
# sprintfValues = lapply(seq_along(keys), function(i)
# firstDef(valueDict[[keys[i]]], rget(keys[i], default = '__no value__', envir = envir)));
#sprintfValues = lapply(seq_along(keys), function(i)valueDict[[keys[i]]]);
#do.call(sprintf, c(list(fmt = fmt1), sprintfValues))
# <!> simplify above two lines, now robust against duplicated entries -> <i> needs unit tests
names(valueDict) = NULL;
do.call(sprintf, c(list(fmt = fmt1), valueDict))
});
s
}
Sprintf = sprintd = function(fmt, ..., sprintf_cartesian = FALSE, envir = parent.frame()) {
Sprintfl(fmt, list(...), sprintf_cartesian = sprintf_cartesian, envir = envir);
}
#r = getPatternFromStrings(DOC, '(?:\\nDOCUMENTATION_BEGIN:)([^\\n]+)\\n(.*?)(?:\\nDOCUMENTATION_END\\n)');
getPatternFromStrings = function(strings, pattern, keyIndex = 1) {
r = lapply(strings, function(s) {
ps = fetchRegexpr(pattern, s, capturesAll = T);
listKeyValue(sapply(ps, function(e)e[[keyIndex]]), sapply(ps, function(e)e[-keyIndex]));
});
r
}
getPatternFromFiles = function(files, locations = NULL, ...) {
strings = sapply(files, function(f)readFile(f, prefixes = locations));
getPatternFromStrings(strings, ...);
}
#
# hex strings
#
asc = function(x)strtoi(charToRaw(x), 16L);
character.as.characters = function(str) {
sapply(str, function(s) sapply(1:nchar(s), function(i)substr(str, i, i)));
}
# bit_most_sig in bits
hex2int = function(str, bit_most_sig = 32) {
cs = rev(sapply(character.as.characters(tolower(str)), asc));
cms = bit_most_sig / 4; # character containing most significant bit
is = ifelse(cs >= asc('a'), cs - asc('a') + 10, cs - asc('0'));
flipSign = (length(is) >= cms && is[cms] >= 8);
if (flipSign) is[cms] = is[cms] - 8;
r = sum(sapply(1:length(is), function(i)(is[i] * 16^(i-1))));
if (flipSign) r = r - 2^(bit_most_sig - 1);
r = if (r == - 2^(bit_most_sig - 1)) NA else as.integer(r);
r
}
# chunk_size in bits
hex2ints = function(str, chunk_size = 32) {
l = nchar(str);
csc = chunk_size / 4; # chunk_size in characters
chunks = (l + csc - 1) %/% csc;
r = sapply(1:chunks, function(i)hex2int(substr(str, (i - 1)*csc + 1, min(l, i*csc))));
r
}
#
# <§> binary numbers/n-adic numbers
#
ord2base = dec2base = function(o, digits = 5, base = 2) {
sapply(1:digits, function(i){(o %/% base^(i-1)) %% base})
}
base2ord = base2dec = function(v, base = 2) {
sum(sapply(1:length(v), function(i)v[i] * base^(i-1)))
}
ord2bin = dec.to.bin = function(number, digits = 5) ord2base(number, digits, base = 2);
bin2ord = bin.to.dec = function(bin) base2ord(bin, base = 2);
#
# <Par> sequences
#
#' Produce constrained sequences
#'
#' This is a wrapper around seq that adds constraints. Setting ascending, descending to NA reverts to
#' standard \code{seq} behaviour.
#'
#' @param ascending restrict sequences to be ascending; return empty list if to < from
#' @param descending restrict sequences to be descending; return empty list if from < to
#' @examples
#' Seq(1, 10, ascending = T)
#' Seq(1, 10, descending = T)
#' Seq(10, 1, ascending = NA)
Seq = function(from, to, ..., ascending = T, descending = !ascending, neg = F) {
# <!> order matters: if called with only descending == T
if (nif(descending) && to > from) return(if (neg) T else c()) else
if (nif(ascending) && from > to) return(if (neg) T else c());
s = seq(from, to, ...);
r = if (neg) -s else s;
r
}
#' Produce index pairs for vector of counts
#'
#' @param counts vector of integers specifying counts
#' @return vector of pairs of indeces indicating the first and last element in a vector for the blocks
#' specified by \code{counts}
#' @examples
#' count2blocks(c(1, 5, 3))
count2blocks = function(counts) {
ccts = cumsum(counts);
fidcs = c(1, ccts[-length(ccts)] + 1);
blks = as.vector(rbind(fidcs, fidcs + counts - 1));
blks
}
#
# expand a block list - for example as from count2blocks - to a list of integers
#
expandBlocks = function(blks) {
apply(matrix(blks, ncol = 2, byrow = T), 1, function(r) { r[1]:r[2] } )
}
splitListIndcs = function(M, N = 1, .compact = F, .truncate = T) {
if (.truncate & M < N) N = M;
if (.compact) {
n = rep(ceiling(M / N), N); # size of parts
idcs = c(0, cumsum(n));
idcs = idcs[idcs < M];
idcs = c(idcs, M);
} else {
n = rep(floor(M / N), N); # size of parts
R = M - n[1] * N;
n = n + c(rep(1, R), rep(0, N - R));
idcs = c(0, cumsum(n));
}
idcs = cbind(idcs + 1, c(idcs[-1], 0))[-length(idcs), ]; # from:to in a row
# <!> usual R degeneracy
if (!is.matrix(idcs)) idcs = matrix(idcs, nrow = 1);
idcs
}
splitListEls = function(l, N, returnElements = FALSE) {
idcs = splitListIndcs(length(l), N);
li = apply(idcs, 1, function(r)(if (returnElements) l[r[1]:r[2]] else r[1]:r[2]));
# <!> R ambiguity of apply return type
if (is.matrix(li)) li = lapply(1:(dim(li)[2]), function(i)li[, i]);
if (is.vector(li)) li = as.list(li);;
li
}
# @arg l list of index positions from another object
# @return return vector indicating to which list element an index was assigned
# Example: glmnet accepts fold numbers per index (as opposed to a partitioning of elements)
index2listPosition = function(l) {
N = sum(sapply(l, length));
na = rep(NA, N);
m = sapply(1:length(l), function(i)vector.assign(na, l[[i]], i, na.rm = NA));
r = apply(m, 1, na.omit);
r
}
# splitting based on fractions
# voting percentages to seats
# simple algorithm based on size of residuals
splitSeatsForFractions = function(Nseats, fractions) {
# number of parties
Nparties = length(fractions);
# fractional seats
Nseats0 = fractions * Nseats;
# garuantee one seat, otherwise round to nearest
Nseats1 = ifelse (Nseats0 < 1, 1, round(Nseats0));
# mismatch
diff = sum(Nseats1) - Nseats;
# redistribute deficit/overshoot
if (diff != 0) {
Nresid = sapply(Nseats0 - Nseats1, function(i)ifelse(i < 0, 1, i));
subtr = order(Nresid, decreasing = diff < 0)[1:abs(diff)];
# assume one round of correction is always sufficient <!>
Nseats1[subtr] = Nseats1[subtr] - sign(diff);
}
Nseats1
}
# tranform number of elements (as from splitSeatsForFractions) into from:to per row in a matrix
counts2idcs = function(counts) {
idcs = c(0, cumsum(counts));
idcs = cbind(idcs + 1, c(idcs[-1], 0))[-length(idcs), ];
idcs
}
# N is partitioned into fractions from p, where each element of p partitions the remaining part of N
# procedure makes sure to leave space for length(p) elements
cumpartition = function(N, p) {
I = c(); # indeces within 1:N
for (i in 1:length(p)) {
# partition remaining space (ifelse), leave room for subsequent indeces
Ii = floor(p[i] * (ifelse(i == 1, N, N - I[i - 1]) - (length(p) - i))) + 1;
I = c(I, ifelse(i == 1, Ii, I[i - 1] + Ii));
}
as.integer(I)
}
#' Extract parts of a nested structure based on the range from..to
#'
#'
#' @param Ns Vector of integers that specify the size of the substructure
#' @return Return list of list, where each basic list contains key \code{segment}
#' (which of the elements of Ns) and key \code{range}, a list with elements \code{from} and \code{to},
#' specifying which elements to use from
#' that segment.
subListFromRaggedIdcs = function(Ns, from = 1, to = sum(segments)) {
NsCS = cumsum(Ns);
NsCSs = c(0, pop(NsCS)); # shifted cumsum
segments = which(from <= NsCS & to > NsCSs);
r = lapply(segments, function(segment){
N = Ns[segment]; # list-call
from_ = 1;
to_ = N;
if (segment == segments[1]) from_ = from - NsCSs[segment];
if (segment == rev(segments)[1]) to_ = to - NsCSs[segment];
r = list(segment = segment, range = list(from = from_, to = to_));
r
});
r
}
#' Extract parts of nested lists based on the range from..to
#'
#'
#' @param ls nested list structure (currently only two levels supported)
#' @return Return list of list, where each basic list contains key \code{segment}
#' (which of the elements of Ns) and key \code{range}, a list with elements \code{from} and \code{to},
#' specifying which elements to use from
#' that segment.
subListFromRaggedLists = function(ls, from = 1, to = sum(sapply(ls, length))) {
sl = subListFromRaggedIdcs(sapply(ls, length), from = from, to = to);
r = lapply(sl, function(s) with(s, {
r = ls[[segment]][range$from: range$to];
r
}));
r = unlist.n(r, 1);
r
}
#
# <§> vector functions
#
# does the position exists in vector v
exists.pos = function(v, i)(is.vector(v) && !is.na(v[i]))
#
# <par> lists
#
merge.lists = function(..., ignore.nulls = TRUE, listOfLists = FALSE, concat = FALSE, useIndeces = FALSE) {
lists = if (listOfLists) c(...) else list(...);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
ns = if (useIndeces) 1L:length(l2) else names(l2);
for(n in ns) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]]))) {
if (concat) l1[[n]] = c(l1[[n]], l2[[n]]) else l1[[n]] = l2[[n]];
}
}
}
l1
}
merge.lists.recursive = function(..., ignore.nulls = TRUE, listOfLists = F) {
lists = if (listOfLists) c(...) else list(...);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
for(n in names(l2)) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]])))
l1[[n]] = if (is.list(l1[[n]]))
merge.lists.recursive(l1[[n]], l2[[n]]) else
l2[[n]]
}
}
l1
}
unshift = function(l, listOfList = T) {
if (!listOfList) l = list(l);
e1 = lapply(l, function(l0)if (is.list(l0)) l0[[1]] else l0[1]);
r1 = lapply(l, function(l0)l0[-1]);
r = list(elements = e1, remainder = r1);
r
}
Merge.lists.raw = function(lists, ignore.nulls = TRUE, recursive = FALSE, keys = NULL) {
if (!is.null(keys)) keys = unshift(keys);
l1 = lists[[1]];
if (length(lists) > 1) for (i in 2:length(lists)) {
l2 = lists[[i]];
for(n in names(l2)) {
if (is.null(n)) print("Warning: tried to merge NULL key");
if (!is.null(n) & (!ignore.nulls | !is.null(l2[[n]])))
l1[[n]] = if (recursive && is.list(l1[[n]]) && (is.null(keys) || n %in% keys$elements))
Merge.lists.raw(list(l1[[n]], l2[[n]]), ignore.nulls, recursive,
if (is.null(keys)) NULL else keys$remainder) else
l2[[n]]
}
}
l1
}
Merge.lists = function(..., ignore.nulls = TRUE, listOfLists = F, recursive = F, keyPathes = NULL) {
lists = if (listOfLists) c(...) else list(...);
keys = if (!is.null(keyPathes)) splitString("[$]", keyPathes, simplify = F) else NULL;
l = Merge.lists.raw(lists, ignore.nulls = ignore.nulls, recursive = recursive, keys = keys);
l
}
compare_print = function(r, e) {
require('compare');
cmp = compare(model = r, comparison = e);
if (!cmp$result) {
print("Expectation not met (result != expectation):");
print(r);
print(e);
}
cmp$result
}
# use.names preserves names and concatenates with lower level names
# reset sets names to top level names
unlist.n = function(l, n = 1, use.names = T, reset = F) {
if (n > 0) for (i in 1:n) {
ns = names(l);
#names(l) = rep(NULL, length(l)); # <!> untested removal Tue Oct 19 17:11:53 2010
l = unlist(l, recursive = F, use.names = use.names);
if (reset) names(l) = ns;
}
l
}
# <N> obsolete, better: with(l, { ...})
instantiate.list = function(l, n = 1) {
for (nm in names(l)) {
eval.parent(parse(file = "", text = sprintf("%s = %s", nm, deparse(l[[nm]]))), n = n);
# if (is.integer(l[[nm]])) {
# eval.parent(parse(file = "", text = sprintf("%s = %d", nm, l[[nm]])), n = n);
# } else if (is.numeric(l[[nm]])) {
# eval.parent(parse(file = "", text = sprintf("%s = %f", nm, l[[nm]])), n = n);
# } else {
# eval.parent(parse(file = "", text = sprintf("%s = \"%s\"", nm, l[[nm]])), n = n);
# };
}
}
# for use in testing code
instantiate = function(l, ..., envir = parent.frame()) {
l0 = c(l, list(...));
for (i in seq_along(l0)) assign(names(l0)[i], l0[[i]], envir = envir);
invisible(l0)
}
# assume a list of lists (aka vector of dicts) and extract a certain key from each of the lists
list.key = function(v, key, unlist = T, template = NULL, null2na = F) {
l = lapply(v, function(i){
if (is.list(i)) {
if (is.null(i[[key]])) { if (null2na) NA else NULL } else i[[key]]
} else template});
if (unlist) l = unlist(l);
l
}
# extract key path from list, general, recursive version
# key path recursive worker
list.kprw = function(l, keys, unlist.pats, template, null2na, carryNames, test) {
key = keys[1];
# <p> extract key
r = if (key != "*") {
index = fetchRegexpr("\\A\\[\\[(\\d+)\\]\\]\\Z", key, captures = T);
if (length(index) > 0) key = as.integer(index[[1]]);
if (is.list(l)) {
r = if (is.null(l[[key]])) {
if (null2na) NA else NULL
} else l[[key]];
if (length(keys) > 1)
list.kprw(r, keys[-1], unlist.pats[-1], template, null2na, carryNames, test) else
if (test) !(is.null(r) || all(is.na(r))) else r;
} else if (class(l) %in% c('character')) {
l[names(l) %in% key];
} else if (class(l) %in% c('data.frame', 'matrix')) {
l[, key]
} else return(template)
} else {
if (length(keys) > 1)
lapply(l, function(sl)
list.kprw(sl, keys[-1], unlist.pats[-1], template, null2na, carryNames, test)
) else l;
}
# <p> unlisting
if (!is.null(unlist.pats)) if (unlist.pats[1]) r = unlist.n(r, 1, reset = carryNames);
r
}
# wrapper for list.kprw
# keyPath obeys EL1 $ EL2 $ ..., where ELn is '*' or a literal
# unlist.pat is pattern of truth values TR1 $ TR2 $..., where TRn is in 'T|F' and specifies unlist actions
# carryNames determines names to be carried over from the top level in case of unlist
list.kpr = function(l, keyPath, do.unlist = F, template = NULL,
null2na = F, unlist.pat = NULL, carryNames = T, as.matrix = F, test = F) {
keys = fetchRegexpr("[^$]+", keyPath);
unlist.pats = if (!is.null(unlist.pat)) as.logical(fetchRegexpr("[^$]+", unlist.pat)) else NULL;
r = list.kprw(l, keys, unlist.pats, template, null2na, carryNames, test = test);
if (do.unlist) { r = unlist(r); }
if (as.matrix) r = t(sapply(r, function(e)e));
r
}
# extract key path from list
# <!> interface change: unlist -> do.unlist (Wed Sep 29 18:16:05 2010)
# test: test existance instead of returning value
list.kp = function(l, keyPath, do.unlist = F, template = NULL, null2na = F, test = F) {
r = list.kpr(l, sprintf("*$%s", keyPath), do.unlist = do.unlist, template, null2na = null2na, test = test);
r
}
list.keys = function(l, keys, default = NA) {
l = as.list(l);
r = lapply(unlist(keys), function(key) if (is.null(l[[key]])) default else l[[key]]);
r
}
# return list without listed keys
list.min = function(l, keys) {
l[-which.indeces(keys, names(l))]
}
# list generation on steroids (wraps other functions)
.list = function(l, .min = NULL) {
if (!is.null(.min)) l = list.min(l, .min);
l
}
# get apply
gapply = function(l, key, unlist = F)list.key(l, key, unlist)
# construct list as a dictionary for given keys and values
listKeyValue = function(keys, values) {
if (length(keys) != length(values))
stop("listKeyValue: number of provided keys does not match that of values");
l = as.list(values);
names(l) = keys;
l
}
vectorNamed = function(v, names) {
if (length(names) > length(v)) stop("vectorNamed: more names than vector elements");
names(v) = names;
v
}
#listInverse = function(l)listKeyValue(avu(l), names(l));
listInverse = function(l, toNA = F) {
n = sapply(l, length);
# <p> values of inverse map
vs = rep.each(names(l), n);
# <p> construct list
r = listKeyValue(avu(l, recursive = F, toNA = toNA), vs);
r
}
# name the list elements by the iterated vector elements ns (names)
nlapply = function(ns, f, ...) {
if (is.list(ns)) ns = names(ns);
r = lapply(ns, f, ...);
names(r) = ns;
r
}
nelapply = function(l, f, ...) {
ns = names(l);
r = lapply(ns, function(n, ...)f(n, l[[n]]), ...);
names(r) = ns;
r
}
ilapply = function(l, f, ...) {
r = lapply(1:length(l), function(i)f(l[[i]], i, ...));
if (!is.null(names(l))) names(r) = names(l);
r
}
# pass element, index, name
einlapply = function(l, f, ...) {
ns = names(l);
r = lapply(1:length(l), function(i)f(l[[i]], i, ns[i], ...));
names(r) = ns;
r
}
kvlapply = function(l, f, ...) {
ns = names(l);
r = lapply(1:length(l), function(i)f(ns[i], l[[i]], ...));
names(r) = ns;
r
}
# USE.NAMES logic reversed for sapply
sapplyn = function(l, f, ...)sapply(l, f, ..., USE.NAMES = F);
list.with.names = function(..., .key = 'name') {
l = list(...);
ns = names(l);
r = nlapply(l, function(n) c(l[[n]], listKeyValue(.key, n)));
r
}
#
# <par> data type conversions
#
# assure m has at least 1 column
to.col = function(m) { if (is.null(dim(m))) t(t(m)) else m }
col.frame = function(l, col.name = 'value', minus = NULL, ignore.null = TRUE,
do.paste = NULL, do.format = T, digits = 3, plus = NULL) {
if (ignore.null) { for (n in names(l)) { if (is.null(l[[n]])) l[[n]] = NULL; } }
if (!is.null(minus)) { for (n in minus) { l[[n]] = NULL; } }
my.names = if (!is.null(plus)) plus else names(l);
digits = if (length(digits) > 1) digits else rep(digits, length(l));
if (!is.null(do.paste)) {
if (do.format) {
i = 1;
for (n in my.names) { if (is.vector(l[[n]])) {
l[[n]] = paste(sapply(l[[n]],
function(e){if (is.numeric(e)) sprintf("%.*f", digits[i], e) else e}
), collapse = do.paste)
i = i + 1;
}}
} else {
for (n in my.names) { if (is.vector(l[[n]])) l[[n]] = paste(l[[n]], collapse = do.paste) }
}
}
f = as.data.frame(l);
if (dim(f)[2] > length(col.name) && length(col.name) == 1)
row.names(f) = paste(col.name, 1:dim(f)[1], sep = "")
else row.names(f) = c(col.name);
t(f)
}
# <i> collect recursively until list or data.frame
# convert list of lists to data frame (assuming identical keys for each sub list)
# also works on list of vectors
listOfLists2data.frame = function(l, idColumn = "id", .names = NULL) {
# collect keys
keys = if (is.list(l[[1]]))
sort(unique(as.vector(unlist(sapply(l, function(e)names(e)))))) else 1:length(l[[1]]);
if (is.null(.names)) .names = keys;
# row names
rows = names(l);
if (is.null(rows)) rows = 1:length(l);
# build df
#df = t(sapply(rows, function(r) { unlist(l[[r]][keys]) }));
df = t(sapply(rows, function(r)list2df(l[[r]], keys)));
df = if (!is.null(idColumn)) {
data.frame.types(data.frame(..idColumn.. = rows, df),
row.names = 1:length(rows), names = c(idColumn, .names));
} else {
data.frame.types(df, row.names = rows, names = .names);
}
df
}
# resetColNames: reset column names to names of first data frame
# colsFromFirstDf: take columns from the first data frame
# <i> improved algorithm: unlist everything, bind together: cave: data types,
# strictly valid only for matrices
# Use cases:
# list with named vectors: get data frame that contains all vectors with all possible names represented
# listOfDataFrames2data.frame(cfs, colsFromUnion = T, do.transpose = T, idColumn = NULL);
listOfDataFrames2data.frame = function(l, idColumn = "id", do.unlist = T, direction = rbind,
resetColNames = T, colsFromFirstDf = F, colsFromUnion = F, do.transpose = F, idAsFactor = F) {
# row names
# <!> 2009-11-20 changed from: rows = firstDef(names(l), list(1:length(l)));
rows = firstDef(names(l), 1:length(l));
# columns
ns = NULL;
if (colsFromUnion) {
ns = unique(unlist(lapply(l, names)));
# get data.frame names
ns = names(do.call(data.frame, listKeyValue(ns, rep(NA, length(ns)))));
resetColNames = F; # <!> mutually exclusive
}
# build df
df = NULL;
for (i in 1:length(rows)) {
if (is.null(l[[i]])) next; # ignore empty entries
# <p> force to data frame
df0 = if (do.transpose) as.data.frame(t(l[[i]])) else as.data.frame(l[[i]]);
# <p> homogenize columns
if (colsFromUnion) {
# add missing columns
ns0 = setdiff(ns, names(df0));
df0 = do.call(data.frame, c(list(df0), listKeyValue(ns0, rep(NA, length(ns0)))));
# correct order of columns
df0 = df0[, ns];
}
if (!is.null(df)) {
if (colsFromFirstDf) df0 = df0[, names(df)] else
if (resetColNames) {
names(df0) = if (is.null(idColumn)) names(df) else names(df)[-1];
}
}
# <p> add id column
df0 = if (is.null(idColumn)) df0 else cbind(rep(rows[i], dim(df0)[1]), df0);
# <A> case differentiation should not me necessary
df = if (i == 1) df0 else direction(df, df0);
}
if (!is.null(idColumn)) names(df)[1] = idColumn;
if (do.unlist) for (n in names(df)) { df[[n]] = unlist(df[[n]]); }
if (idAsFactor) df[[idColumn]] = as.factor(df[[idColumn]]);
row.names(df) = NULL;
df
}
cbindDataFrames = function(l, do.unlist = F) {
listOfDataFrames2data.frame(l, idColumn = NULL, do.unlist = do.unlist, direction = cbind,
resetColNames = F)
}
rbindDataFrames = function(l, do.unlist = F, useDisk = F, idColumn = NULL, transpose = F,
resetColNames = F, colsFromFirstDf = F, idAsFactor = F) {
r = if (useDisk) {
tempTable = tempfile();
for (i in 1:length(l)) {
d0 = l[[i]];
if (class(d0) != 'data.frame') d0 = as.data.frame(d0);
if (transpose) d0 = t(d0);
if (!is.null(idColumn)) {
d0 = data.frame(idColumn = names(l)[i], d0);
names(d0)[1] = idColumn;
}
write.table(d0, file = tempTable, col.names = i == 1, append = i != 1, row.names = F);
}
read.table(tempTable, header = T, as.is = T);
} else {
listOfDataFrames2data.frame(l, idColumn = idColumn, do.unlist = do.unlist,
direction = rbind, resetColNames = resetColNames, colsFromFirstDf = colsFromFirstDf,
idAsFactor = idAsFactor)
}
r
}
# names2col assigns names of the list to a column of the data frame and values to the valueCol
list2df = function(l, cols = names(l), row.name = NULL, names2col = NULL, valueCol = 'value') {
idcs = if (is.null(cols)) 1:length(l) else
if (all(is.integer(cols))) cols else which.indeces(names(l), cols);
if (is.null(cols) || all(is.integer(cols))) cols = paste('C', 1:length(l), sep = '');
r = as.list(rep(NA, length(cols)));
names(r) = cols;
r[idcs] = l;
r = as.data.frame(r, stringsAsFactors = F);
if (!is.null(row.name)) row.names(r)[1] = row.name;
if (!is.null(names2col)) {
r = data.frame(name = names(r), value = unlist(r[1, ]), row.names = NULL, stringsAsFactors = F);
names(r) = c(names2col, valueCol);
}
r
}
be.numeric = function(v)
sapply(v, function(e)grepl('^-?\\d*(\\.\\d+)?(e-?\\d+)?$', e, ignore.case = T, perl = T));
list2df.print = function(l, valueCol = 'value', names2col = NULL, ..., digits = 3, scientific = 3) {
l1 = list2df(l, valueCol = valueCol, names2col = names2col, ...);
numericRows = be.numeric(l1[[valueCol]]);
numbers = as.numeric(l1[[valueCol]][numericRows]);
log10range = max(floor(log10(numbers))) - min(floor(log10(numbers)));
#fmt = if (log10range > digits + 1) '%.*e' else '%.*f';
numbers = sprintf(ifelse(abs(floor(log10(numbers))) > scientific, '%.*e', '%.*f'), digits, numbers);
#numbers = sapply(numbers, function(n)sprintf(fmt, digits, n));
separators = as.vector(names(l) == '' & is.na(l));
l1[separators, names2col] = '-';
l1[separators, valueCol] = '';
l1[numericRows, valueCol] = numbers;
print(l1);
}
rbind.list2df = function(d, l, row.name = NULL) {
d = as.data.frame(d);
r = list2df(l, names(d), row.name);
r0 = rbind(d, r);
r0
}
# take list of lists
# names of list elements become column-names
listOfLists2df = function(l, columnNames = names(l[[1]])) {
colV = lapply(columnNames, function(n)Df_(list.kp(l, n, do.unlist = T)));
r = Df_(do.call(cbind, colV), names = columnNames);
r
}
# d: data frame, l: list with names corresponding to cols, values to be searched for in columns
searchDataFrame = function(d, l, .remove.factors = T) {
ns = names(l);
d = d[, ns, drop = F];
if (.remove.factors) {
l = sapply(l, function(e)ifelse(is.factor(e), levels(e)[e], e));
#d = apply(d, 2, function(col)(if (is.factor(col)) levels(col)[col] else col));
}
rs = which(as.vector(apply(apply(d, 1, function(r)(r == l)), 2, all)));
rs
}
.df.cols = which.cols = function(d, cols, regex = F) {
cols[is.numeric(cols)] = as.integer(cols[is.numeric(cols)]);
cols[is.character(cols)] = which.indeces(cols[is.character(cols)], names(d), regex = regex);
as.integer(cols)
}
# select columns by name
.df = function(d, names, regex = T, as.matrix = F) {
cols = which.indeces(names, names(d), regex = regex);
d0 = d[, cols, drop = F];
# <t> simpler version:
# d0 = d[, .df.cols(d, names, regex)];
if (as.matrix) d0 = as.matrix(d0);
d0
}
.df.reorder = function(d, names, regex = T) {
cols = .df.cols(d, names, regex);
d0 = d[, c(cols, setdiff(1:dim(d)[2], cols))];
d0
}
# remove columns by name
.dfm = function(d, names, regex = F, as.matrix = F) {
cols = if (all(is.numeric(names))) as.integer(names) else which.indeces(names, names(d), regex = regex);
d0 = d[, -cols, drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# remove rows by name
.dfrmr = function(d, names, regex = F, as.matrix = F) {
rows = if (all(is.numeric(names)))
as.integer(names) else
which.indeces(names, row.names(d), regex = regex);
d0 = d[-rows, , drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# remove rows/columns by name
.dfrm = function(d, rows = NULL, cols = NULL, regex = F, as.matrix = F) {
d = as.data.frame(d); # enforce data frame
rows = if (is.null(rows)) 1:dim(d)[1] else
-(if (all(is.numeric(rows))) as.integer(rows) else which.indeces(rows, row.names(d), regex = regex));
cols = if (is.null(cols)) 1:dim(d)[2] else
-(if (all(is.numeric(cols))) as.integer(cols) else which.indeces(cols, names(d), regex = regex));
d0 = d[rows, cols, drop = F];
if (as.matrix) d0 = as.matrix(d0);
d0
}
# convert strings to data frame names
# <i> create a data frame and extract names
.dfns = function(ns)gsub(':', '.', ns);
# manipulate list of vectors
# vectors i = 1,.., n with entries v_ij are represented as vector v_11, ..., v_n1, v_21, ...
vector.intercalate = meshVectors = function(...) {
l = list(...);
if (length(l) == 1) l = l[[1]];
v = as.vector(t(sapply(l, function(v)unlist(v))));
# <N> preferred implementation
# No unlist -> should be part of input sanitization
# v = as.vector(do.call(rbind, l));
v
}
is.sorted = function(...)(!is.unsorted(...))
is.ascending = function(v) {
if (length(v) < 2) return(T);
for (i in 2:length(v)) if (v[i] <= v[i - 1]) return(F);
return(T);
}
# pad a vector to length N
pad = function(v, N, value = NA)c(v, rep(value, N - length(v)));
#
# <par> number sequences
#
rep.each = function(l, n) {
l = avu(l);
if (length(n) == 1) as.vector(sapply(l, function(e)rep(e, n))) else
avu(sapply(seq_along(l), function(i)rep(l[i], n[i])))
}
rep.each.row = function(m, n) {
r = matrix(rep.each(m, n), ncol = ncol(m));
if (class(m) == 'data.frame') r = Df_(r, names = names(m));
r
}
rep.list = function(l, n) lapply(1:length(l), function(e)l);
matrix.intercalate = function(..., direction = 1) {
l = list(...);
# <!> assume same dimension
d = dim(l[[1]]);
N = prod(d);
# <p> create new matrix
v = c(if (direction == 1) sapply(l, as.vector) else sapply(sapply(l, t), as.vector) , recursive = T);
vN = as.vector(matrix(v, ncol = N, byrow = T));
r = if (direction == 1)
matrix(vN, nrow = d[1] * length(l)) else
matrix(vN, ncol = d[2] * length(l), byrow = T);
# <p> return value
if (class(l[[1]]) == 'data.frame') r = Df_(r, names = names(l[[1]]));
r
}
data.frame.expandWeigths = function(data, weights = 'weights') {
w = data[[weights]];
weightsCol = which(names(data) == weights);
df0 = lapply(1:length(w), function(i) {
if (w[i] > 0) rep.each.row(data[i, -weightsCol], w[i]) else list();
});
df1 = rbindDataFrames(df0);
df1
}
# spread/fill vector to indeces
vector.spread = function(v, idcs, N, default = 0) {
r = rep(default, N);
r[idcs] = v;
r
}
# create new vector with length == length(v) + length(idcs)
# idcs are positions in the final vector
vector.embed = function(v, idcs, e, idcsResult = T) {
if (!idcsResult) idcs = idcs + (1:length(idcs)) - 1;
N = length(v) + length(idcs);
r = rep(NA, N);
r[setdiff(1:N, idcs)] = v;
r[idcs] = e;
r
}
# set values at idcs
vector.assign = function(v, idcs, e, na.rm = 0) {
v[idcs] = e;
if (!is.na(na.rm)) v[is.na(v)] = na.rm;
v
}
matrix.assign = function(m, idcs, e, byrow = T) {
if (length(dim(idcs)) > 1) {
m[as.matrix(idcs)] = e
} else if (byrow)
m[idcs, ] = e else
m[, idcs] = e
m
}
# are columns/rows same values in matrix
matrix.same = function(m, direction = 1) {
apply(m, direction, function(e)all(e[1] == e))
}
vectorIdcs = function(v, f, ..., not = F) {
r = sapply(v, f, ...);
which(if (not) !r else r)
}
# produce indeces for indeces positioned into blocks of blocksize of which count units exists
# example: expand.block(2, 10, 1:2) == c(1, 2, 11, 12)
expand.block = function(count, blocksize, indeces) {
as.vector(apply(to.col(1:count), 1,
function(i){ (i - 1) * blocksize + t(to.col(indeces)) }
));
}
search.block = function(l, s) {
b.sz = length(s);
which(sapply(
1:(length(l)/b.sz), function(i){all(l[((i - 1) * b.sz + 1):(i * b.sz)] == s)}
));
}
#
# <par> matrix functions
#
# <!> assumes same indeces for rows/columns
matrixFromIndexedDf = function(df, idx.r = 'idx.r', idx.c = 'idx.c', value = 'value', referenceOrder = NULL) {
id = unique(c(df[[idx.r]], df[[idx.c]]));
# matrix indeces
# <A> canonical order is by repeating vector id for row index, constant for columns within repetition
# -> matrix filled by columns
midcs = merge(data.frame(id = id), data.frame(id = id), by = NULL);
midcs = data.frame(midcs, mfid.i = 1:nrow(midcs));
map = merge(df[, c(idx.r, idx.c, value)], midcs,
by.x = c(idx.r, idx.c), by.y = c('id.x', 'id.y'), all.y = T);
# return to midcs order
map = map[order(map$mfid.i), ];
# filled by rows
m = matrix(map[[value]], nrow = length(id));
# reorder matrix
o = order_align(firstDef(referenceOrder, id), id);
# reorder in two steps -> out of mem otherwise
m1 = m[o, ];
m2 = m1[, o];
m2
}
symmetrizeMatrix = function(m) {
m[is.na(m)] = t(m)[is.na(m)];
m
}
which.row = function(m, row) {
cols = names(as.list(row));
if (is.null(cols)) cols = 1:length(row);
rows = 1:(dim(m)[1]);
rows.found = rows[sapply(rows, function(i){ all(m[i, cols] == row) })];
rows.found
}
# lsee: list with searchees
# lsed: list with searched objects
# inverse: lsed are regexes matched against lsee; pre-condition: length(lsee) == 1
# ret.list: for match.multi return list by lsee
# <!><t> cave: semantics changed as of 17.8.2009: return NA entries for unfound lsee-entries
# <!> match multi only implemented for merge = T
which.indeces = function(lsee, lsed, regex = F, ret.na = F, merge = T, match.multi = F, ...,
inverse = F, ret.list = FALSE) {
if (!length(lsed) || !length(lsee)) return(c());
v = if (is.list(lsed)) names(lsed) else lsed;
idcs = if (regex) {
which(sapply(lsed, function(e)(
if (inverse) length(fetchRegexpr(e, lsee, ...)) > 0 else
any(sapply(lsee, function(see)(length(fetchRegexpr(see, e, ...)) > 0)))
)))
} else if (merge) {
d0 = merge(
data.frame(d = lsed, ix = 1:length(lsed)),
data.frame(d = lsee, iy = 1:length(lsee)), all.y = T);
d0 = d0[order(d0$iy), ];
idcs = if (match.multi) {
#d0$ix[unlist(sapply(lsee, function(e)which(d0$d == e)))]
#na.omit(sort(d0$ix))
r = if (ret.list)
unlist.n(by(d0, d0$d, function(d)list(na.omit(d$ix)), simplify = FALSE)) else
na.omit(d0$ix);
r
} else {
d0$ix[pop(which(c(d0$iy, 0) - c(0, d0$iy) != 0))];
}
# less efficient version
# } else d0$ix[unlist(sapply(lsee, function(e)which(d0$d == e)[1]))];
# } else d0$ix[order(d0$iy)]
if (!ret.na) idcs = idcs[!is.na(idcs)];
idcs
} else {
unlist(as.vector(sapply(lsee, function(e){
w = which(e == v);
if (!ret.na) return(w);
ifelse(length(w), w, NA)
})))
};
r = if (ret.list) idcs else as.integer(idcs);
r
}
grep.vector = function(lsee, lsed, regex = F, ret.na = F, merge = T, match.multi = F, ..., inverse = F) {
lsed[which.indeces(lsee, lsed, regex, ret.na, merge, match.multi, ..., inverse = inverse)]
}
grep.infixes = function(lsee, lsed, ...) {
r = grep.vector(sapply(lsee, function(v)sprintf('^%s.*', v)), lsed, regex = T, inverse = F, ... );
r
}
# force structure to be matrix (arrange vector into a row)
MR = function(m) {
if (!is.matrix(m)) m = matrix(m, byrow = T, ncol = length(m));
m
}
# force structure to be matrix (arrange vector into a columns)
MC = function(m) {
if (!is.matrix(m)) m = matrix(m, byrow = F, nrow = length(m));
m
}
#
# <par> data processing
#
# like table but produce columns for all numbers 1..n (not only for counts > 0)
# cats are the expected categories
table.n = function(v, n, min = 1, categories = NULL) {
if (is.null(categories)) categories = min:n;
t = as.vector(table(c(categories, v)) - rep(1, length(categories)));
t
}
table.freq = function(v) {
t0 = table(v);
r = t0 / sum(t0);
r
}
table.n.freq = function(...) {
t0 = table.n(...);
r = t0 / sum(t0);
r
}
#
# <par> data types
#
to.numeric = function(x) { suppressWarnings(as.numeric(x)) }
# set types for columns: numeric: as.numeric
data.frame.types = function(df, numeric = c(), character = c(), factor = c(), integer = c(),
do.unlist = T, names = NULL, row.names = NULL, reset.row.names = F, do.rbind = F, do.transpose = F,
stringsAsFactors = F) {
if (do.rbind) {
#old code: df = t(sapply(df, function(e)e));
lengthes = sapply(df, length);
maxL = max(lengthes);
df = t(sapply(1:length(df), function(i)c(df[[i]], rep(NA, maxL - lengthes[i]))));
}
if (do.transpose) df = t(df);
df = as.data.frame(df, stringsAsFactors = stringsAsFactors);
# set or replace column names
if (!is.null(names)) {
if (class(names) == "character") names(df)[1:length(names)] = names;
if (class(names) == "list") names(df) = vector.replace(names(df), names);
}
if (do.unlist) for (n in names(df)) { df[[n]] = unlist(df[[n]]); }
for (n in numeric) { df[[n]] = as.numeric(df[[n]]); }
for (n in integer) { df[[n]] = as.integer(df[[n]]); }
for (n in character) { df[[n]] = as.character(df[[n]]); }
for (n in factor) { df[[n]] = as.factor(df[[n]]); }
if (reset.row.names) row.names(df) = NULL;
if (length(row.names) > 0) row.names(df) = row.names;
df
}
DfClasses = function(dataFrame)nlapply(dataFrame, function(n)class(dataFrame[[n]]));
DfAsInteger = function(dataFrame, as_integer) {
#dfn = apply(dataFrame[, as_integer, drop = F], 2, function(col)as.integer(avu(col)));
# <!> 6.6.2016 as.integer first needed to retain factor status on factors
dfn = nlapply(as_integer, function(col)avu(as.integer(dataFrame[[col]])));
dataFrame[, as_integer] = do.call(cbind, dfn);
dataFrame
}
DfAsCharacter = function(dataFrame, as_character) {
#dfn = apply(dataFrame[, as_character, drop = F], 2, function(col)as.character(avu(col)));
#dataFrame[, as_character] = as.data.frame(dfn, stringsAsFactors = FALSE);
dfn = nlapply(as_character, function(col)avu(as.character(dataFrame[[col]])));
dataFrame[, as_character] = do.call(cbind, dfn);
dataFrame
}
# as of 22.7.2013 <!>: min_ applied before names/headerMap
# as of 19.12.2013 <!>: as.numeric -> as_numeric
# as of 22.5.2014 <!>: t -> t_
# as of 13.11.2014 <!>: sapply -> simplify_
#' Create data frames with more options than \code{data.frame}
Df_ = function(df0, headerMap = NULL, names = NULL, min_ = NULL,
as_numeric = NULL, as_character = NULL, as_factor = NULL, as_integer = NULL,
row.names = NA, valueMap = NULL, Df_as_is = TRUE, simplify_ = FALSE,
deep_simplify_ = FALSE, t_ = FALSE, unlist_cols = F, transf_log = NULL, transf_m1 = NULL,
Df_doTrimValues = FALSE, Df_mapping_value = '__df_mapping_value__') {
#r = as.data.frame(df0);
if (t_) df0 = t(df0);
r = data.frame(df0, stringsAsFactors = !Df_as_is);
if (!is.null(min_)) {
is = which.indeces(min_, names(r));
if (length(is) > 0) r = r[, -is, drop = F];
}
if (simplify_) r = sapply(r, identity);
if (deep_simplify_) r = as.data.frame(
nlapply(r, function(col)sapply(r[[col]], unlist)), stringsAsFactors = !Df_as_is
);
if (!is.null(names)) {
if (class(names) == 'character') names(r)[1:length(names)] = names;
if (class(names) == 'list') names(r) = vector.replace(names(r), names);
}
if (!is.null(headerMap)) names(r) = vector.replace(names(r), headerMap);
if (!is.null(valueMap)) {
for (n in names(valueMap)) {
vs = if (Df_doTrimValues)
nina(trimString(as.character(r[[n]])), Df_mapping_value) else
as.character(r[[n]]);
vs = nina(valueMap[[n]][vs], Df_mapping_value);
r[[n]] = ifelse(vs == Df_mapping_value, as.character(r[[n]]), vs);
}
}
if (!is.null(as_numeric)) {
dfn = apply(r[, as_numeric, drop = F], 2, function(col)as.numeric(avu(col)));
r[, as_numeric] = as.data.frame(dfn);
}
if (!is.null(as_integer)) r = DfAsInteger(r, as_integer);
if (!is.null(as_character)) r = DfAsCharacter(r, as_character);
if (!is.null(as_factor)) {
# <N> does not work
#dfn = apply(r[, as_factor, drop = F], 2, function(col)as.factor(col));
#r[, as_factor] = dfn;
for (f in as_factor) r[, f] = as.factor(r[[f]]);
}
#
# <p> transformations
#
if (!is.null(transf_log)) r[, transf_log] = log(r[, transf_log, drop = F]);
if (!is.null(transf_m1)) r[, transf_m1] = r[, transf_m1, drop = F] - 1;
if (!all(is.na(row.names))) row.names(r) = row.names;
if (unlist_cols) for (n in names(r)) r[[n]] = avu(r[[n]]);
r
}
Df = function(..., headerMap = NULL, names = NULL, min_ = NULL, row.names = NA, Df_as_is = TRUE,
as_numeric = NULL, as_character = NULL, as_factor = NULL, t_ = F, unlist_cols = F) {
r = data.frame(...);
Df_(r, headerMap = headerMap, names = names, min_ = min_, row.names = row.names,
as_numeric = as_numeric,
as_character = as_character,
as_factor = as_factor,
Df_as_is = Df_as_is,
t_ = t_,
unlist_cols = unlist_cols
);
}
Df2list = function(df) {
df = as.data.frame(df);
nlapply(names(df), function(n)df[[n]]);
}
Dfselect = function(data, l, na.rm = nif) {
sel = apply(sapply(nlapply(l, function(n)data[[n]] == l[[n]]), identity), 1, all);
r = data[na.rm(sel), ];
r
}
List_ = .List = function(l, min_ = NULL, sel_ = NULL,
rm.null = F, names_ = NULL, null2na = F, simplify_ = F) {
if (!is.null(min_)) {
i = which.indeces(min_, names(l));
if (length(i) > 0) l = l[-i];
}
if (!is.null(sel_)) {
i = which.indeces(sel_, names(l));
if (length(i) > 0) l = l[i];
}
if (rm.null) {
remove = -which(sapply(l, is.null));
if (length(remove) > 0) l = l[remove];
}
if (null2na) {
nullI = which(sapply(l, is.null));
l[nullI] = NA;
}
if (!is.null(names_)) names(l)[Seq(1, length(names_))] = names_;
if (simplify_) l = sapply(l, identity);
l
}
List = function(..., min_ = NULL, envir = parent.frame(), names_ = NULL) {
l = eval(list(...), envir = envir);
.List(l, min_ = min_, names_ = names_);
}
Unlist = function(l, ..., null2na_ = FALSE) {
if (null2na_) l[sapply(l, is.null)] = NA;
unlist(l, ...)
}
last = function(v)(v[length(v)])
pop = function(v)(v[-length(v)])
# differences between successive elements, first diff is first element with start
vectorLag = function(v, start = 0)pop(c(v, start) - c(start, v))
splitN = function(N, by = 4) vectorLag(round(cumsum(rep(N/by, by))));
splitToMax = function(N, max = 4) vectorLag(round(cumsum(rep(N/ceiling(N/max), ceiling(N/max)))));
# cumsum returning indeces for numbers given in Ns
cumsumI = function(Ns, offset = 1, do.pop = FALSE) {
cs = vectorNamed(c(0, cumsum(Ns)) + offset, c(names(Ns), 'N'));
if (do.pop) cs = pop(cs);
cs
}
# recursive cumsum (one level)
cumsumR = function(l, offset = 1) {
cs0 = if (is.list(l)) lapply(l, cumsumR, offset = 0) else rev(cumsum(l))[1];
cs = vectorNamed(c(0, pop(unlist(cs0))) + offset, names(cs0));
cs
}
#
# <par> sets and permutations
#
#' @title wrapper for order to allow multivariate ordering
Order = function(v, ...) {
if (is.data.frame(v)) do.call(order, lapply(v, identity), ...) else
if (is.list(v)) do.call(order, v, ...) else
order(v, ...)
}
#' @title Return all value combinations appearing in a data frame
#'
#' @examples
#' combs = valueCombinations(iris);
valueCombinations = function(d) merge.multi.list(dimnames(table(d)));
#' @title Computes order so that inverseOrder after order is the identity
#'
#' @examples
#' v = runif(1e2);
#' print(all(sort(v)[inverseOrder(v)] == v))
Rank = inverseOrder = inversePermutation = function(p) {
## <p> naive version
# o = order(p);
# i = rep(NA, length(o));
# for (j in 1:length(o)) { i[o[j]] = j};
# i
## <p> build-in version (not working for multivariate case)
#rank(v, ties.method = 'first')
## <p> better version
which.indeces(1:(if (class(p) == 'data.frame') nrow(p) else length(p)), Order(p))
}
#' @title Calculates inverseOrder, assuming that the argument is already an \code{order}-vector.
inverseOrder_fromOrder = function(p)which.indeces(1:length(p), p)
#' @title Return vector that reorders v to equal reference.
#'
#' Assuming that two arguments are permutaions of each other, return a vector of indeces such that \code{all(reference == v[order_align(reference, v)]) == T} for all vectors \code{reference, v}.
#'
#' @examples
#' sapply(1:10, function(i){v = sample(1:5); v[order_align(5:1, v)]})
#' sapply(1:10, function(i){v = runif(1e2); v1 = sample(v, length(v)); all(v1[order_align(v, v1)] == v)})
order_align = function(reference, v)Order(v)[inverseOrder(reference)];
#' Calculates \code{order_align}, assuming that the both arguments are already orders.
#' sapply(1:40, function(i){v = runif(1e2); v1 = sample(v, length(v)); all(v1[order_align_fromOrder(order(v), order(v1))] == v)})
order_align_fromOrder = function(reference, v)v[inverseOrder_fromOrder(reference)];
# permutation is in terms of elements of l (not indeces)
applyPermutation = function(l, perm, from = 'from', to = 'to', returnIndeces = T) {
# 1. bring perm[[from]] in the same order as l
# 2. apply this order to perm[[to]]
r0 = perm[[to]][order(perm[[from]])[inverseOrder(l)]];
# 3. determine permutation going from l to r0
r = order(l)[inverseOrder(r0)]
if (!returnIndeces) r = l[r];
r
}
order.df = function(df, cols = NULL, decreasing = F, na.last = F) {
if (is.null(cols)) cols = 1:ncol(df);
if (!is.numeric(cols)) cols = which.indeces(cols, names(df));
orderText = sprintf("order(%s, decreasing = %s, na.last = %s)",
paste(sapply(cols, function(i) { sprintf("df[, %d]", i) }), collapse = ", "
), as.character(decreasing), as.character(na.last)
# paste(sapply(cols, function(i) {
# if (is.numeric(i)) sprintf("df[, %d]", i) else sprintf("df$%s", i) }), collapse = ", "
# ), as.character(decreasing), as.character(na.last)
);
o = eval(parse(text = orderText));
#print(list(text = orderText, order = o, df=df));
o
}
order.df.maps = function(d, maps, ..., regex = F) {
cols = NULL;
for (i in 1:length(maps)) {
m = names(maps)[i];
map = maps[[i]];
keys = names(map);
cols = c(cols, if (is.list(map)) {
tempColName = sprintf("..order.df.maps.%04d", i);
col = if (regex)
sapply(d[[m]], function(e){ j = which.indeces(e, keys, regex = T, inverse = T)
if (length(j) == 0) NA else map[[j]]
}) else as.character(map[d[[m]]]);
col[col == "NULL"] = NA;
d = data.frame(col, d, stringsAsFactors = F);
names(d)[1] = tempColName;
} else { m });
}
o = order.df(d, cols, ...);
o
}
data.frame.union = function(l) {
dfu = NULL;
for (n in names(l)) {
df = l[[n]];
factor = rep(n, dim(df)[1]);
dfu = rbind(dfu, cbind(df, factor));
}
dfu
}
# levels: take levels in that order, unmentioned levels are appended
# setLevels: set to these levels, else to NA
recodeLevels = function(f, map = NULL, others2na = TRUE, levels = NULL, setLevels = NULL) {
r = f;
if (!is.null(map)) {
# map others to NA
if (others2na) {
nonmentioned = setdiff(if (is.factor(f)) levels(f) else unique(f), names(map));
map = c(map, listKeyValue(nonmentioned, rep(NA, length(nonmentioned))));
}
v = vector.replace(as.character(f), map);
if (is.integer(f)) v = as.integer(v);
if (is.factor(f)) v = as.factor(v);
r = v;
}
if (!is.null(levels) || !is.null(setLevels)) {
# <p> preparation
fact0 = as.factor(r);
levls = levels(fact0);
r = levls[fact0];
# <p> new levels
levlsN0 = firstDef(setLevels, levels, levls);
levlsN = c(levlsN0, setdiff(levls, levlsN0));
# <p> remove unwanted factors
if (!is.null(setLevels)) r = ifelse(r %in% setLevels, r, NA);
r = factor(r, levels = if (!is.null(setLevels)) levlsN0 else levlsN);
}
r
}
Union = function(..., .drop = T, as.list = FALSE) {
l = if (as.list) list(...)[[1]] else list(...);
l = list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = NULL;
for (e in l) { r = union(r, e); }
r
}
Intersect = function(..., .drop = T, as.list = FALSE) {
l = if (as.list) list(...)[[1]] else list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = l[[1]];
for (e in l[-1]) { r = intersect(r, e); }
r
}
intersectSetsCount = function(sets) {
i = iterateModels(list(s1 = names(sets), s2 = names(sets)), function(s1, s2) {
length(intersect(sets[[s1]], sets[[s2]]))
}, lapply__ = lapply);
#r = reshape.wide(Df(i$models_symbolic, count = unlist(i$results)), 's1', 's2');
rM = matrix(i$results, nrow = length(sets), byrow = T);
dimnames(rM) = list(names(sets), names(sets));
rM
}
unionCum = function(..., .drop = T) {
l = list(...);
# auto-detect list of values
if (.drop && length(l) == 1 && is.list(l[[1]])) l = l[[1]];
r = l[1];
if (length(l) > 1)
for (n in names(l)[-1]) { r = c(r, List(union(r[[length(r)]], l[[n]]), names_ = n)); }
r
}
# row bind of data.frames/matrices with equal number of cols
lrbind = function(l, as.data.frame = F, names = NULL) {
d = dim(l[[1]])[2];
v = unlist(sapply(l, function(m) unlist(t(m))));
m = matrix(v, byrow = T, ncol = d);
dimnames(m) = list(NULL, names(l[[1]]));
if (as.data.frame) {
m = data.frame(m);
if (!is.null(names)) names(m) = names;
}
m
}
#
# logic arrays/function on list properties
#
# old versions:
# if (na.rm) v = v[!is.na(v)];
# sum(v) # old version: length((1:length(v))[v])
# same as in Rlab
count = function(v, na.rm = T)sum(v, na.rm = na.rm)
# old versions:
# if (na.rm) v = v[!is.na(v)]; (sum(v)/length(v))
# { length(v[v]) / length(v) }
# v assumed to be logical
fraction = function(v, na.rm = T)mean(v, na.rm = na.rm);
# treat v as set
set.card = function(v)count(unique(v))
# cardinality of a set
size = function(set)length(unique(set));
# null is false
#nif = function(b)(!(is.null(b) | is.na(b) | !b))
#nif = function(b)sapply(b, function(b)(!(is.null(b) || is.na(b) || !b)))
nif = function(b) {
if (length(b) == 0) return(F);
!(is.null(b) | is.na(b) | !b)
}
# null is true
#nit = function(b)(is.null(b) | is.na (b) | b)
#nit = function(b)sapply(b, function(b)(is.null(b) || is.na (b) || b))
nit = function(b) {
if (length(b) == 0) return(T);
is.null(b) | is.na (b) | b
}
# null is zero
#niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
# null is na (or other special value
#niz = function(e)ifelse(is.null(e) | is.na(e), 0, e)
nina = function(e, value = NA)sapply(e, function(e)ifelse(is.null(e), value, e))
#
# <p> complex structures
#
#
# Averaging a list of data frames per entry over list elements
#
# meanMatrices = function(d) {
# df = as.data.frame(d[[1]]);
# ns = names(df);
# # iterate columns
# dfMean = sapply(ns, function(n) {
# m = sapply(d, function(e)as.numeric(as.data.frame(e)[[n]]));
# mn = apply(as.matrix(m), 1, mean, na.rm = T);
# mn
# });
# dfMean
# }
meanMatrices = function(d) {
dm = dim(d[[1]]);
good = sapply(d, function(m)(length(dim(m)) == 2 && all(dim(m) == dm)));
if (any(!good)) warning('meanMatrices: malformed/incompatible matrices in list, ignored');
d = d[good];
m0 = sapply(d, function(e)avu(e));
m1 = apply(m0, 1, mean, na.rm = T);
r = matrix(m1, ncol = dm[2], dimnames = dimnames(d[[1]]));
r
}
meanVectors = function(d) {
ns = names(d[[1]]);
mn = apply(as.matrix(sapply(d, function(e)e)), 1, mean, na.rm = T);
mn
}
meanList = function(l)mean(as.numeric(l));
meanStructure = function(l) {
r = nlapply(names(l[[1]]), function(n) {
meanFct =
if (is.matrix(l[[1]][[n]])) meanMatrices else
if (length(l[[1]][[n]]) > 1) meanVectors else
meanList;
meanFct(list.key(l, n, unlist = F));
});
r
}
matrixCenter = function(m, direction = 2, centerBy = median) {
center = apply(m, direction, centerBy, na.rm = T);
m = if (direction == 1) (m - center) else t(t(m) - center);
list(matrix = m, center = center)
}
matrixDeCenter = function(m, center, direction = 2) {
m = if (direction == 1) t(t(m) + center) else (m + center);
m
}
#
# <p> combinatorial functions
#
# form all combinations of input arguments as after being constraint to lists
# .first.constant designates whether the first list changes slowest (T) or fastest (F)
# in the resulting data frame,
# i.e. all other factors are iterated for a fixed value of l[[1]] (T) or not
# .constraint provides a function to filter the resulting data frame
merge.multi.list = function(l, .col.names = NULL, .col.names.prefix = "X",
.return.lists = F, .first.constant = T, stringsAsFactors = F, .cols.asAre = F, .constraint = NULL, ...) {
# <p> determine column names of final data frame
.col.names.generic = paste(.col.names.prefix, 1:length(l), sep = "");
if (is.null(.col.names)) .col.names = names(l);
if (is.null(.col.names)) .col.names = .col.names.generic;
.col.names[.col.names == ""] = .col.names.generic[.col.names == ""];
names(l) = .col.names; # overwrite names
# <p> construct combinations
if (.first.constant) l = rev(l);
df0 = data.frame();
if (length(l) >= 1) for (i in 1:length(l)) {
newNames = if (.cols.asAre) names(l[[i]]) else names(l)[i];
# <p> prepare data.frame: handle lists as well as data.frames
# <!> changed 22.3.2016
#dfi = if (is.list(l[[i]])) unlist(l[[i]]) else l[[i]];
dfi = if (!is.data.frame(l[[i]])) unlist(l[[i]]) else l[[i]];
df1 = data.frame.types(dfi, names = newNames, stringsAsFactors = stringsAsFactors);
# <p> perform merge
df0 = if (i > 1) merge(df0, df1, ...) else df1;
}
if (.first.constant) df0 = df0[, rev(names(df0)), drop = F];
if (.return.lists) df0 = apply(df0, 1, as.list);
if (!is.null(.constraint)) {
df0 = df0[apply(df0, 1, function(r).do.call(.constraint, as.list(r))), ];
}
df0
}
# analysis pattern using merge.multi.list
# i needs not to be an argument to f as .do.call strips excess arguments
iterateModels_old = function(modelList, f, ...,
.constraint = NULL, .clRunLocal = T, .resultsOnly = F, .unlist = 0, lapply__ = clapply) {
models = merge.multi.list(modelList, .constraint = .constraint);
r = lapply__(1:dim(models)[1], function(i, ..., f__, models__) {
args = c(list(i = i), as.list(models__[i, , drop = F]), list(...));
.do.call(f__, args)
}, ..., f__ = f, models__ = models);
r = if (.resultsOnly) r else list(models = models, results = r);
r = unlist.n(r, .unlist);
r
}
# list of list, vector contains index for each of these lists to select elements from
# these elements are merged and return
# if sub-element is not a list, take name of sub-element and contruct list therefrom
# namesOfLists controls whether, if a selected element is a list, its name is used instead
# can be used to produce printable summaries
list.takenFrom = function(listOfLists, v) {
ns = names(listOfLists);
if (any(ns != names(v))) v = v[order_align(ns, names(v))];
l = lapply(1:length(v), function(i) {
new = if (!is.list(listOfLists[[i]]))
listKeyValue(ns[i], listOfLists[[i]][v[i]]) else {
t = listOfLists[[i]][[v[i]]];
# list of vectors
t = (if (!is.list(t)) {
# define name from higher level
listKeyValue(firstDef(
names(listOfLists[[i]])[v[i]], ns[i]
), list(t))
# <A> probably better and correct
#listKeyValue(ns[i], list(t))
} else if (is.null(names(t))) listKeyValue(ns[i], t) else t);
t
}
});
names(l) = names(v);
l
}
merge.lists.takenFrom = function(listOfLists, v) {
merge.lists(list.takenFrom(listOfLists, v), listOfLists = TRUE);
}
merge.lists.takenFrom_old = function(listOfLists, v) {
l = list();
ns = names(listOfLists);
if (any(ns != names(v))) v = v[order_align(ns, names(v))];
for (i in 1:length(v)) {
new = if (!is.list(listOfLists[[i]]))
listKeyValue(ns[i], listOfLists[[i]][v[i]]) else {
t = listOfLists[[i]][[v[i]]];
# list of vectors
t = (if (!is.list(t)) {
# define name from higher level
listKeyValue(firstDef(
names(listOfLists[[i]])[v[i]], ns[i]
), list(t))
# <A> probably better and correct
#listKeyValue(ns[i], list(t))
} else if (is.null(names(t))) listKeyValue(ns[i], t) else t);
t
}
l = merge.lists(l, new);
}
l
}
# take indeces given by v from a nested list
# namesOfLists: take the name of the list at the position in v
# if null, take first element or leave aggregation to the function aggregator
# aggregator: called with the final result, should flatten existing lists into characters
lists.splice = function(listOfLists, v, namesOfLists = F, aggregator = NULL, null2na = T) {
ns = names(listOfLists);
l = lapply(1:length(ns), function(i) {
name = ns[i];
e = listOfLists[[i]][v[i]];
r = if (!is.list(e)) e else {
f = if (namesOfLists) {
g = names(e)[1];
# handle name == NULL
if (is.null(g)) {
# make an attempt later to print element
#if (!is.null(aggregator)) e[[1]] else e[[1]][[1]]
if (!is.null(aggregator))
e[[1]] else
join(as.character(e[[1]][[1]]), ", ")
} else g
} else e[[1]];
}
r
});
if (null2na) l = lapply(l, function(e)ifelse(is.null(e), NA, e));
if (!is.null(aggregator)) l = aggregator(listKeyValue(ns, l), v, l);
l
}
# dictionary produced by lists.splice, v: splice vector, l: aggregated list (w/o names)
merge.multi.symbolizer = function(d, v, l)unlist.n(d, 1);
merge.multi.list.symbolic = function(modelList, ..., symbolizer = NULL) {
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, ...);
namesDf = if (is.null(symbolizer)) names(modelList) else NULL;
df0 = sapply(1:nrow(models), function(i, ...) {
r = lists.splice(modelList, unlist(models[i, ]),
namesOfLists = T, aggregator = symbolizer);
r
});
r = Df_(df0, t_ = T, names = namesDf);
r
}
inlist = function(l)lapply(l, function(e)list(e));
Inlist = function(...)inlist(list(...));
Do.callIm = function(im__f, args, ..., restrictArgs = TRUE, callMode = 'inline') {
if (callMode == 'inlist') {
.do.call(im__f, c(args, list(...)), restrictArgs = restrictArgs)
} else if (callMode == 'list') {
im__f(args, ...)
} else if (callMode == 'inline') {
args = c(merge.lists(args, listOfLists = TRUE), list(...));
.do.call(im__f, args, restrictArgs = restrictArgs)
} else stop('Unknown call mode');
}
# <!> should be backwards compatible with iterateModels_old, not tested
# modelList: list of lists/vectors; encapuslate blocks of parameters in another level of lists
# Example:
#
#' Iterate combinations of parameters
#'
#' This function takes a list of parameters for which several values are to be evaluated. These values can be vectors of numbers or lists that contain blocks of parameters. All combinations are formed and passed to a user supplied function \code{f}. This functions takes an index of the combination together with parameter values. Argument \code{callWithList} controls whether there is exactly one argument per parameter position or wether one more step of unlisting takes place. In case that a block of parameters is supplied, all values of the block are passed as individual arguments to \code{f} in case \code{callWithList == F}.
#'
#' @param selectIdcs restrict models to the given indeces
#'
#' @examples
#' modelList = list(global = list(list(a=1, b=2)), N = c(1, 2, 3));
#' print(iterateModels(modelList));
#' modelList = list(N = c(1, 2, 3), parsAsBlock = list(list(list(c = 1, d = 2)), list(list(c = 3, d = 4))));
#' print(iterateModels(modelList));
#' # ensure elements on A are given as a block (list)
#' A = list(list(a = 1, b = 2), list(a = 3, b = 5));
#' modelList = list(N = inlist(A), parsAsBlock = list(list(list(c = 1, d = 2)), list(list(c = 3, d = 4))));
#' print(iterateModels(modelList));
#' # shorter version of the above
#' modelList = list(N = Inlist(list(a = 1, b = 2), list(a = 3, b = 5)), parsAsBlock = Inlist(list(c = 1, d = 2), list(c = 3, d = 4)));
#' print(iterateModels(modelList));
#' # inline calling
#' modelList = list(N = list(list(a = 1, b = 2), list(a = 3, b = 5)), parsAsBlock = list(list(c = 1, d = 2), list(c = 3, d = 4)));
#' print(iterateModels(modelList));
#'
#'
#'
#' callMode: 'inline', 'list', 'inlist'
iterateModels_raw = function(modelList, models, f_iterate = function(...)list(...), ...,
callWithList = F, callMode = NULL, restrictArgs = T, parallel = F, lapply__) {
if (!parallel) Lapply = lapply;
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
# model indeces contains the original positions in models
# this allows reordering of execution, eg with reverseEvaluationOrder
r = Lapply(1:nrow(models), function(i, ..., im__f, im__model_idcs) {
args = c(Inlist(i = im__model_idcs[i]), list.takenFrom(modelList, unlist(models[i, ])));
Do.callIm(im__f, args, ..., restrictArgs = restrictArgs, callMode = callMode);
}, ..., im__f = f_iterate, im__model_idcs = as.integer(row.names(models)));
r
}
# <i> refactor iterateModels to use iterateModels_prepare
iterateModels_prepare = function(modelList, .constraint = NULL,
callWithList = FALSE, callMode = NULL, restrictArgs = T, selectIdcs = NULL, .first.constant = T) {
# <p> preparation
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .first.constant = .first.constant);
# <p> handle constraints
selC = if (is.null(.constraint)) T else
unlist(iterateModels_raw(modelList, models, f_iterate = .constraint,
parallel = FALSE, callMode = callMode, restrictArgs = restrictArgs, ...));
selI = if (is.null(selectIdcs)) T else 1:nrow(models) %in% selectIdcs;
# apply constraints
models = models[selC & selI, , drop = F];
r = list(
modelsRaw = models,
selection = selC & selI,
models = models
);
r
}
iterateModelsDefaultSymbolizer = function(i, ...) {
l = list(...);
r = lapply(l, function(e)unlist(as.character(unlist(e)[1])));
r
}
iterateModelsSymbolizer = function(i, ..., im_symbolizer, im_symbolizerMode) {
l = list(...);
l0 = iterateModelsDefaultSymbolizer(i, ...);
l1 = .do.call(im_symbolizer, c(list(i = i), list(...)), restrictArgs = TRUE);
r = merge.lists(l0, l1);
r
}
iterateModels = function(modelList, f = function(...)list(...), ...,
.constraint = NULL, .clRunLocal = TRUE, .resultsOnly = FALSE, .unlist = 0,
callWithList = FALSE, callMode = NULL,
symbolizer = iterateModelsDefaultSymbolizer, symbolizerMode = 'inlist',
restrictArgs = T, selectIdcs = NULL,
.first.constant = TRUE, parallel = FALSE, lapply__, reverseEvaluationOrder = TRUE) {
# <p> pre-conditions
nsDupl = duplicated(names(modelList));
if (any(nsDupl))
stop(con('iterateModels: duplicated modelList entries: ', join(names(modelList)[nsDupl], ', ')));
# <p> preparation
if (is.null(callMode)) callMode = if (callWithList) 'list' else 'inline';
# <p> produce raw combinations
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .first.constant = .first.constant);
# models_symbolic = merge.multi.list.symbolic(modelList,
# symbolizer = symbolizer, .first.constant = .first.constant);
models_symbolic = do.call(rbind, iterateModels_raw(modelList, models, iterateModelsSymbolizer,
callMode = 'inlist', parallel = F,
im_symbolizerMode = symbolizerMode, im_symbolizer = symbolizer));
# <p> handle constraints
selC = if (is.null(.constraint)) T else
unlist(iterateModels_raw(modelList, models, f_iterate = .constraint,
callMode = callMode, restrictArgs = restrictArgs, ..., parallel = F));
selI = if (is.null(selectIdcs)) T else 1:nrow(models) %in% selectIdcs;
# <p> apply constraints
models = models[selC & selI, , drop = F];
models_symbolic = models_symbolic[selC & selI, , drop = F];
# <p> models to be iterated
modelsIt = if (reverseEvaluationOrder) models[rev(1:nrow(models)), , drop = F] else models;
r = iterateModels_raw(modelList, modelsIt, f_iterate = f,
callMode = callMode, restrictArgs = restrictArgs, ..., parallel = parallel);
if (reverseEvaluationOrder) r = rev(r);
r = if (.resultsOnly) r else list(
models = models,
results = r,
models_symbolic = models_symbolic
);
r = unlist.n(r, .unlist);
r
}
iterateModelsExpand = function(modelList, .constraint = NULL) {
modelSize = lapply(modelList, function(m)1:length(m));
models = merge.multi.list(modelSize, .constraint = .constraint);
r = list(
models = models,
models_symbolic = merge.multi.list.symbolic(modelList, .constraint = .constraint)
);
r
}
# reverse effect of .retern.lists = T
# list.to.df(merge.multi.list(..., .return.lists = T)) === merge.multi.list(..., .return.lists = F)
list.to.df = function(l)t(sapply(l, function(e)e))
merge.multi = function(..., .col.names = NULL, .col.names.prefix = "X",
.return.lists = F, stringsAsFactors = F, .constraint = NULL, .first.constant = T) {
merge.multi.list(list(...), .col.names = .col.names, .return.lists = .return.lists,
stringsAsFactors = stringsAsFactors, .constraint = .constraint, .first.constant = .first.constant)
}
merge.multi.dfs = function(l, .first.constant = T, all = T, stringsAsFactors = F, ...) {
if (.first.constant) l = rev(l);
if (length(l) >= 1) for (i in 1:length(l)) {
df1 = data.frame.types(l[[i]], stringsAsFactors = stringsAsFactors);
df0 = if (i > 1) merge(df0, df1, all = all, ...) else df1;
}
if (.first.constant) df0 = df0[, rev(names(df0)), drop = F];
df0
}
Merge = function(x, y, by = intersect(names(x), names(y)), ..., safemerge = T, stableByX = FALSE) {
if (stableByX) x = data.frame(x, MergeStableByX = 1:nrow(x));
if (safemerge && length(by) == 0) {
stop(sprintf('Merge: safemerge triggered. No common columns between "%s" and "%s"',
join(names(x), sep = ','), join(names(y), sep = ',')))
}
r = merge(x = x, y = y, by = by, ...);
if (stableByX) {
indexCol = which(names(r) == 'MergeStableByX');
r = r[order(r$MergeStableByX), -indexCol, drop = FALSE];
}
r
}
# ids: variables identifying rows in final table
# vars: each combination of vars gets transformed to an own column
# <!> not tested for length(ids) > 1 || ength(rvars) > 1
# blockVars: should the repeated vars go in blocks or be meshed for vars
#
# Examples:
# intersection table
# i = intersectSetsCount(sets);
# reshape.wide(Df(i$models_symbolic, count = unlist(i$results)), 's1', 's2');
reshape.wide = function(d, ids, vars, blockVars = F, reverseNames = F, sort.by.ids = T) {
# remaining vars
rvars = setdiff(names(d), union(ids, vars));
# levels of variables used in the long expansion
levls = lapply(vars, function(v)unique(as.character(d[[v]])));
# combinations at the varying vars as passed to vars
cbs = merge.multi.list(levls, .col.names = vars, .first.constant = !blockVars);
# repvars: repeated variables
repvars = merge.multi.list(c(list(rvars), levls),
.first.constant = !blockVars, .col.names = c("..var", vars));
varnames = apply(repvars, 1, function(r)join(if (reverseNames) rev(r) else r, "."));
r0 = data.frame.types(unique(d[, ids], drop = F), names = ids);
r1 = data.frame.types(apply(r0, 1, function(r) {
# <p> isolate rows which match to current id columns
ids = which(apply(d[, ids, drop = F], 1, function(id)all(id == r)));
d1 = d[ids, ];
# <p> construct vector of repeated values
vs = sapply(1:dim(cbs)[1], function(i) {
# <A> should be equal to one
row = which(apply(d1[, vars, drop = F], 1, function(r)all(r == cbs[i, ])));
v = if (length(row) != 1) rep(NA, length(rvars)) else d1[row, rvars];
v
});
# heed blockVars
vs = as.vector(unlist(if (!blockVars) t(vs) else vs));
vs
}), do.transpose = T, names = varnames);
r = data.frame(r0, r1);
if (sort.by.ids) r = r[order.df(r, ids), ];
row.names(r) = NULL;
r
}
#' Convert data in wide format to long format
#'
#' Long format duplicates certain columns and adds rows for which one new column hold values coming
#' from a set of columns in wide format.
#'
#' @param d data frame with columns in wide format
#' @param vars columns in wide format by name or index
#' @param factors \code{vars} can be grouped. For each level of \code{factor} a new row is created. Implies
#' that \code{length(vars)} is a multiple of \code{length(levels(factor))}
#' @param factorColumn name of the column to be created for the factor
#' @param valueColumn name of the new column of values that were in wide format
# factors: provide factor combinations explicitly for vars (otherwise split by '.', <i>)
#' @examples
#' #reshape variables 2:9 (forming two groups: case/ctr), value of which is named 'group'
#' # the shortened columns will get names valueColumn
#' d0 = reshape.long(d, vars = 2:9, factors = c('case', 'ctr'), factorColumn = 'group',
#' valueColumn = c('AA', 'AG', 'GG', 'tot'));
reshape.long = function(d, vars = NULL, factorColumn = 'factor', valueColumn = 'value',
factors = as.factor(vars), useDisk = F, rowNamesAs = NULL) {
if (is.null(vars)) vars = names(d);
# make rownames an extra column
if (!is.null(rowNamesAs)) {
d = data.frame(reshape_row_names__ = rownames(d), d);
names(d)[1] = rowNamesAs;
}
# indeces of columns vars
Ivars = .df.cols(d, vars);
# remaining vars
rvars = setdiff(1:length(names(d)), Ivars);
# names thereof
Nrvars = names(d)[rvars];
# how wide are the blocks?
S = length(vars) / length(factors);
# columns of intermediate data.frame
N = length(rvars);
# create list of data frames
dfs = lapply(1:nrow(d), function(i) {
st = d[i, rvars]; # start of the new row
df0 = data.frame(factors, value = matrix(d[i, vars], nrow = length(factors), byrow = T));
df1 = data.frame(st, df0, row.names = NULL);
names(df1) = c(Nrvars, factorColumn, valueColumn);
df1
});
r = rbindDataFrames(dfs, do.unlist = T, useDisk = useDisk);
r
}
#' Reduce data frame by picking the first row of blocks for which \code{cols} has the same values
uniqueByCols = function(d, cols) {
row.names(d) = NULL;
d[as.integer(row.names(unique(d[, cols, drop = F]))), ]
}
#
# <p> string functions
#
uc.first = firstUpper = function(s) {
paste(toupper(substring(s, 1, 1)), substring(s, 2), sep = "", collapse = "");
}
#
# <p> factor transformations for data frames
#
dataExpandedNames = function(data) {
dnames = unlist(lapply(names(data), function(v){
if (is.factor(data[[v]])) paste(v, 1:(length(levels(data[[v]])) - 1), sep = "") else v;
}));
dnames
}
# model.matrix removes missing columns and could not be tweaked into working
dataExpandFactors = function(data, vars = NULL) {
if (is.null(vars)) vars = names(data);
d0 = lapply(vars, function(v) {
if (is.factor(data[[v]])) {
ls = levels(data[[v]]);
dcNa = rep(NA, length(ls) - 1); # missing data coding
dc = rep(0, length(ls) - 1); # dummy coding
sapply(data[[v]], function(e) {
if (is.na(e)) return(dcNa);
i = which(e == ls);
if (i == 1) return(dc);
dc[i - 1] = 1;
return(dc);
});
} else data[[v]];
});
d0names = dataExpandedNames(data[, vars]);
# re-transform data
d1 = data.frame(matrix(unlist(lapply(d0, function(e)t(e))), ncol = length(d0names), byrow = F));
names(d1) = d0names;
d1
}
coefficientNamesForData = function(vars, data) {
lnames = dataExpandedNames(data); # names of levels of factors
cnames = lnames[unlist(sapply(vars, function(v)which.indeces(v, lnames, regex = T)))];
cnames
}
#
# <p> statistic oriented data frame manipulation
#
variableIndecesForData = function(d, vars, varsArePrefixes = T) {
if (varsArePrefixes) vars = sapply(vars, function(e)sprintf('%s.*', e));
which.indeces(vars, names(d), regex = T, match.multi = T)
}
variablesForData = function(d, vars, varsArePrefixes = T) {
names(d)[variableIndecesForData(d, vars, varsArePrefixes)]
}
subData = function(d, vars, varsArePrefixes = T) {
dfr = d[, variableIndecesForData(d, vars, varsArePrefixes), drop = F];
dfr
}
subDataFromFormula = function(d, formula, responseIsPrefix = T, covariateIsPrefix = T) {
resp = formula.response(formula);
cov = formula.covariates(formula);
ns = names(d);
r = list(
response = subData(d, resp, responseIsPrefix),
covariate = subData(d, cov, covariateIsPrefix)
);
r
}
#
# <p> graph functions
#
sub.graph.merge = function(df, leader, follower) {
# next transitive step
r0 = merge(df, data.frame(leader = leader, follower = follower), by = 'follower');
# add new connections
r1 = rbind(df, data.frame(follower = r0$leader.y, leader = r0$leader.x, cluster = r0$cluster));
# symmetric closure
r1 = rbind(r1, data.frame(follower = r1$leader, leader = r1$follower, cluster = r1$cluster))
# form clusters by selecting min cluster number per connection
r1 = r1[order(r1$cluster), ];
row.names(r1) = 1:dim(r1)[1];
r2 = unique(r1[, c('leader', 'follower')]);
# select unique rows (first occurunce selects cluster)
r = r1[as.integer(row.names(r2)), ];
# pretty sort data frame
r = r[order(r$cluster), ];
r
}
# form clusters from a relationally defined hierarchy
sub.graph = function(df) {
df = as.data.frame(df);
names(df)[1:2] = c('follower', 'leader');
df = df[order(df$follower), ];
# seed clusters
ids = sort(unique(df$follower));
idsC = as.character(ids);
counts = lapply(ids, function(id)sum(df$follower == id));
names(counts) = idsC;
clusters = unlist(sapply(idsC, function(id){ rep(as.integer(id), counts[[id]]) }));
df = cbind(df, data.frame(cluster = rep(clusters, 2)));
df = unique(rbind(df, data.frame(follower = df$leader, leader = df$follower, cluster = df$cluster)));
# receiving frame
df0 = df;
# results with clusters
i = 1;
repeat {
Nrows = dim(df0)[1];
cls = df0$clusters;
# add transitive connections
df0 = sub.graph.merge(df0, follower = df0$leader, leader = df0$follower);
if (dim(df0)[1] == Nrows && all(cls == df0$clusters)) break();
}
df0 = df0[order(df0$cluster), ];
cIds = unique(df0$cluster);
cls = lapply(cIds, function(id)unique(avu(df0[df0$cluster == id, c('follower', 'leader')])));
cls
}
#
# <p> formulas
#
# formula: formula as a character string with wildcard character '%'
# <!>: assume whitespace separation in formula between terms
# <!>: write interaction with spaces <!> such as in:
# f = 'MTOTLOS_binair ~ ZRES% + sq(ZRes%) + ( ZRES% )^2';
formula.re = function(formula, data, ignore.case = F, re.string = '.*') {
vars = names(data);
#regex = '(?:([A-Za-z_.]+[A-Za-z0-9_.]*)[(])?([A-Za-z.]+[%][A-Za-z0-9.%_]*)(?:[)])?';
# function names ( regex )
#regex = '(?:([A-Za-z_.]+[A-Za-z0-9_.]*)[(])?([A-Za-z%.]+[A-Za-z0-9.%_]*)(?:[)])?';
# allow backslash quoting
regex = '(?:([A-Za-z_.\\\\]+[A-Za-z0-9_.\\\\]*)[(])?([A-Za-z%.\\\\]+[A-Za-z0-9.%_\\\\]*)(?:[)])?';
patterns = unique(fetchRegexpr(regex, formula, ignore.case = ignore.case));
subst = nlapply(patterns, function(p) {
comps = fetchRegexpr(regex, p, captureN = c('fct', 'var'), ignore.case = ignore.case)[[1]];
p = sprintf("^%s$", gsub('%', re.string, comps$var));
mvars = vars[sapply(vars, function(v)regexpr(p, v, perl = T, ignore.case = ignore.case)>=0)];
if (comps$fct != '') {
varf = sprintf('%s', paste(sapply(mvars, function(v)sprintf('%s(%s)', comps$fct, v)),
collapse = " + "));
} else {
varf = sprintf('%s', paste(mvars, collapse = " + "));
}
varf
});
formulaExp = as.formula(mergeDictToString(subst, formula));
formulaExp
}
formula.response = function(f) {
#r = fetchRegexpr('[^\\s~][^~]*?(?=\\s*~)', if (is.formula(f)) deparse(f) else f);
f = if (class(f) == 'formula') join(deparse(f), '') else f;
r = as.character(fetchRegexpr('^\\s*([^~]*?)(?:\\s*~)', f, captures = T));
# <p> version 2
#fs = as.character(as.formula(as.character(f))); # "~" "response" "covs"
#r = fs[2];
# <p> version 1
#f = as.formula(f);
#r = all.vars(f)[attr(terms(f), "response")]; # fails to work on 'response ~ .'
r
}
formula.rhs = function(f, noTilde = FALSE) {
rhs = fetchRegexpr('[~](.*)', if (!is.character(f)) formula.to.character(f) else f, captures = T);
if (noTilde) rhs else as.formula(con('~', rhs))
}
formula.covariates = function(f) {
covs = all.vars(formula.rhs(f));
#covs = setdiff(all.vars(as.formula(f)), formula.response(f));
covs
}
formula.vars = function(f)union(formula.response(f), formula.covariates(f));
formula.nullModel = function(f) {
r = formula.response(f);
fn = as.formula(sprintf("%s ~ 1", r));
fn
}
formula.to.character = function(f)join(deparse(as.formula(f)), '');
Formula.to.character = function(f)ifelse(is.character(f), f, formula.to.character(f));
formula2filename = function(f) {
fs = join(f, sep = '');
filename = mergeDictToString(list(
`\\s+` = '',
`_` = '-',
`Surv\\(.*\\)` = 'surv',
MARKER = 'snp'
# other components
), fs, re = T, doApplyValueMap = F, doOrderKeys = F);
filename
}
data.vars = function(data, formula, re.string = '.*', ignore.case = F) {
all.vars(formula.re(formula = formula, data = data, re.string = re.string, ignore.case = ignore.case));
}
formula.add.rhs = function(f0, f1) {
as.formula(join(c(formula.to.character(f0), formula.rhs(f1, noTilde = TRUE)), '+'))
}
formula.add.response = function(f0, f1) {
formula = join(c(formula.response(f1), formula.rhs(f0, noTilde = FALSE)), ' ');
as.formula(formula)
}
formula.predictors = function(f, data, dataFrameNames = TRUE) {
if (formula.rhs(f) == ~ 1) return('(Intercept)');
mm = model.matrix(model.frame(formula.rhs(f), data), data);
ns = dimnames(mm)[[2]];
# <p> create data frame to extract proper names
# if (dataFrameNames) {
# df0 = as.data.frame(t(rep(1, length(ns))));
# names(df0) = ns;
# ns = names(df0);
# }
ns
}
# <!> cave survival
formulaRemoveTransformation = function(model) {
respVar = setdiff(all.vars(model), all.vars(formula.rhs(model)));
formula.add.response(formula.rhs(model), as.formula(Sprintf('%{respVar}s ~ 1')))
}
formulas.free = function(f1, f0, data) {
setdiff(formula.predictors(f1, data), formula.predictors(f0, data))
}
# <i> use terms.formula from a (a + ... + z)^2 formula
# <i> merge.multi.list(rep.list(covs, 2), .constraint = is.ascending)
covariatePairs = function(covs) {
pairs = merge(data.frame(c1 = 1:length(covs)), data.frame(c2 = 1:length(covs)));
pairs = pairs[pairs[, 1] > pairs[ ,2], ];
df = data.frame(c1 = covs[pairs[, 1]], c2 = covs[pairs[, 2]]);
df
}
formulaWith = function(repsonse = "y", covariates = "x")
as.formula(sprintf("%s ~ %s", repsonse, paste(covariates, collapse = "+")))
#
# <p> set operations
#
minimax = function(v, min = -Inf, max = Inf) {
r = ifelse(v < min, min, ifelse(v > max, max, v));
r
}
#
# Rsystem.R
#Mon 27 Jun 2005 10:51:30 AM CEST
#
# <par> file handling
#
# <!><N> works only on atomic path
# <!> 5.1.2016: trailing slash leads to basename of ""
splitPath = function(path, removeQualifier = T, ssh = F, skipExists = F) {
if (is.null(path)) return(NULL);
if (removeQualifier) {
q = fetchRegexpr('(?<=^\\[).*?(?=\\]:)', path);
if (length(q) > 0) path = substr(path, nchar(q) + 4, nchar(path));
}
sshm = list(user = '', host = '', userhost = '');
if (ssh) {
sshm = fetchRegexpr('^(?:(?:([a-z]\\w*)(?:@))?([a-z][\\w.]*):)?(.*)', path,
ignore.case = T, captureN = c('user', 'host', 'path'))[[1]];
sshm$userhost = if (sshm$user != '') sprintf('%s@%s', sshm$user, sshm$host) else sshm$host;
path = sshm$path;
}
#path = "abc/def.ext";
#r.base = basename(path);
#re = "([^.]*$)";
#r = gregexpr(re, r.base)[[1]];
#ext = substr(r.base, r[1], r[1] + attr(r, "match.length")[1] - 1);
#ext = firstDef(fetchRegexpr('(?<=\\.)[^/.]+\\Z', path), '');
ext = fetchRegexpr('(?<=\\.)[^/.]+\\Z', path);
# take everything before ext and handle possible absence of '.'
#base = substr(r.base, 1, r[1] - 1 - (ifelse(substr(r.base, r[1] - 1, r[1] - 1) == '.', 1, 0)));
# reduce to file.ext
Nchar = nchar(path);
if (Nchar != 0 && substr(path, Nchar, Nchar) == '/') {
base = '';
dir = substr(path, 1, Nchar - 1);
} else {
base = basename(path);
dir = dirname(path);
}
# base as yet still contains the file extension
file = base;
# chop off extension if present
if (length(fetchRegexpr('\\.', base)) > 0) base = fetchRegexpr('\\A.*(?=\\.)', base);
#pieces = regexpr(re, path, perl = T);
pieces = fetchRegexpr('([^.]+)', path);
isAbsolute = Nchar != 0 && substr(path, 1, 1) == '/';
# <N> disk is accessed
exists = if (!skipExists) File.exists(path, host = sshm$userhost, ssh = F) else NA;
nonempty = exists && (file.info(path)$size > 0);
ret = list(
dir = dir,
base = base,
path = path,
fullbase = sprintf("%s/%s", dir, base),
ext = ext,
file = file,
isAbsolute = isAbsolute,
absolute = if (isAbsolute) path else sprintf('%s/%s', getwd(), path),
# fs properties
exists = exists, nonempty = nonempty,
# remote
is.remote = !(sshm$user == '' && sshm$host == ''),
user = sshm$user, host = sshm$host, userhost = sshm$userhost
);
ret
}
path.absolute = absolutePath = function(path, home.dir = T, ssh = T) {
path = splitPath(path, ssh = ssh)$path;
if (home.dir && nchar(path) >= 2 && substr(path, 1, 2) == "~/")
path = sprintf("%s/%s", Sys.getenv('HOME'), substr(path, 3, nchar(path)));
if (nchar(path) > 0 && substr(path, 1, 1) == "/") path else sprintf("%s/%s", getwd(), path)
}
tempFileName = function(prefix, extension = NULL, digits = 6, retries = 5, inRtmp = F,
createDir = F, home.dir = T, doNotTouch = F) {
ext = if (is.null(extension)) '' else sprintf('.%s', extension);
path = NULL;
if (inRtmp) prefix = sprintf('%s/%s', tempdir(), prefix);
if (home.dir) prefix = path.absolute(prefix, home.dir = home.dir);
for (i in 1:retries) {
path = sprintf('%s%0*d%s', prefix, digits, floor(runif(1) * 10^digits), ext);
if (!File.exists(path)) break;
}
if (File.exists(path))
stop(sprintf('Could not create tempfile with prefix "%s" after %d retries', prefix, retries));
# potential race condition <N>
if (createDir)
Dir.create(path, recursive = T) else
if (!doNotTouch) writeFile(path, '', mkpath = T, ssh = T);
# # old implementation
#path = tempfile(prefix);
#cat('', file = path); # touch path to lock name
#path = sprintf("%s%s%s", path, ifelse(is.null(extension), "", "."),
# ifelse(is.null(extension), "", extension));
Log(sprintf('Tempfilename:%s', path), 5);
path
}
dirList = function(dir, regex = T, case = T) {
base = splitPath(dir)$dir;
files = list.files(base);
if (regex) {
re = splitPath(dir)$file;
files = files[grep(re, files, perl = T, ignore.case = !case)];
}
files
}
write.csvs = function(t, path, semAppend = "-sem", ...) {
s = splitPath(path);
write.csv(t, path);
pathSem = sprintf("%s%s.%s", s$fullbase, semAppend, s$ext);
# make sure t is a data.frame or dec option will not take effect <A>
#write.csv2(t, pathSem);
write.table(t, file = pathSem, row.names = F, col.names = T, dec = ",", sep = ";");
}
#
# <p> file manipulation
#
File.exists = function(path, host = '', agent = 'ssh', ssh = T) {
if (ssh) {
sp = splitPath(path, skipExists = T, ssh = T);
host = sp$userhost;
path = sp$path;
}
r = if (!is.null(host) && host != '') {
ret = system(sprintf('%s %s stat %s >/dev/null 2>&1', agent, host, qs(path)));
ret == 0
} else file.exists(path);
r
}
File.copy_raw = function(from, to, ..., recursive = F, agent = 'scp', logLevel = 6, ignore.shell = T,
symbolicLinkIfLocal = T) {
spF = splitPath(from, ssh = T);
spT = splitPath(to, ssh = T);
is.remote.f = !spF$is.remote || spF$host == 'localhost';
is.remote.t = !spT$is.remote || spT$host == 'localhost';
r = if (!is.remote.f && !is.remote.t) {
if (symbolicLinkIfLocal) {
file.symlink(spF$path, spT$path, ...);
} else file.copy(spF$path, spT$path, recursive = recursive, ...);
} else {
# <A> assume 'to' to be atomic
System(sprintf('%s %s %s %s %s',
agent,
ifelse(recursive, '-r', ''),
paste(sapply(from, qs), collapse = ' '),
qs(to),
ifelse(ignore.shell, '>/dev/null', '')
), logLevel);
}
r
}
File.copy = function(from, to, ..., recursive = F, agent = 'scp', logLevel = 6, ignore.shell = T,
symbolicLinkIfLocal = T) {
if (is.null(from)) return(NULL);
pairs = cbind(from, to);
r = apply(pairs, 1, function(r) {
File.copy_raw(r[1], r[2], ...,
recursive = recursive, agent = agent, logLevel = logLevel,
ignore.shell = ignore.shell, symbolicLinkIfLocal = symbolicLinkIfLocal)
})
r
}
File.remove = function(path, ..., agent = 'ssh', ssh = T, logLevel = 6) {
r = if (ssh) {
sp = splitPath(path, skipExists = T, ssh = T);
host = sp$userhost;
rpath = sp$path;
if (File.exists(path, ssh = T))
System(sprintf('rm %s', join(sapply(rpath, qs))), pattern = agent,
ssh_host = host, logLevel = logLevel);
} else if (file.exists(path)) file.remove(path, ...);
r
}
# <i> remote operations
File.symlink = function(from, to, replace = T, agent = 'ssh', ssh = F, logLevel = 6) {
r = if (ssh) {
sp = splitPath(from, skipExists = T, ssh = T);
host = sp$userhost;
rpath = sp$path;
# <!><i>
stop('not implmenented');
} else {
Log(sprintf('symlink %s -> %s', qs(from), qs(to)), logLevel);
if (replace && file.exists(to)) file.remove(to);
file.symlink(from, to);
}
r
}
# <!> only atomic path
# treatAsFile: causes Dir.create to split off last path-component
Dir.create = function(path, ..., recursive = F, agent = 'ssh', logLevel = 6,
ignore.shell = T, allow.exists = T, treatPathAsFile = F) {
sp = splitPath(path, ssh = T);
# ignore last path-component
if (treatPathAsFile) {
sp$path = sp$dir;
Log(sprintf('creating path %s', sp$path), 4);
}
if (sp$is.remote) {
System(sprintf('ssh %s mkdir %s %s %s',
sp$userhost,
if (recursive) '--parents' else '',
paste(sapply(sp$path, qs), collapse = ' '),
if (ignore.shell) '2>/dev/null' else ''
), logLevel);
} else {
if (allow.exists && !file.exists(sp$path)) dir.create(sp$path, ..., recursive = recursive);
}
}
Save = function(..., file = NULL, symbolsAsVectors = F, mkpath = T, envir = parent.frame(1)) {
sp = splitPath(file, ssh = T);
localPath = if (sp$is.remote) tempfile() else file;
if (mkpath) { Dir.create(file, recursive = T, treatPathAsFile = T); }
r = if (symbolsAsVectors) {
do.call('save', c(as.list(c(...)), list(file = localPath)), envir = envir);
} else save(..., file = localPath, envir = envir);
if (sp$is.remote) File.copy(localPath, file);
r
}
Load = function(..., file = NULL, Load_sleep = 0, Load_retries = 3, envir = parent.frame(1), logLevel = 6) {
sp = splitPath(file, ssh = T);
localPath = if (sp$is.remote) tempfile() else file;
r = NULL;
for (i in 1:Load_retries) {
if (sp$is.remote) {
if (!File.exists(file)) {
Sys.sleep(Load_sleep);
next;
}
File.copy(file, localPath, logLevel = logLevel);
}
r = try(load(..., file = localPath, envir = envir));
if (class(r) == 'try-error' && Load_sleep > 0) Sys.sleep(Load_sleep) else break;
}
if (is.null(r)) stop(sprintf('could not Load %s', file));
if (class(r) == 'try-error') stop(r[1]);
r
}
#
# create output file names
# output = list(prefix = "results/pch", extension = "pdf", tag = "20100727");
fileName = function(output, extension = NULL, subtype = NULL) {
if (is.null(output)) return(NULL);
if (is.null(output$prefix)) return(NULL);
subtype = firstDef(subtype, output$subtype, "");
if (subtype != "") subtype = sprintf("%s-", subtype);
r = sprintf("%s-%s%s.%s", output$prefix, subtype, output$tag,
firstDef(extension, output$extension, ""));
Log(r, 4);
r
}
#.globalOutput = list(prefix = 'results/20120126-');
#save(r, file = .fn('simulation', 'RData'))
.globalOutputDefault = .globalOutput = list(prefix = '', tag = NULL, tagFirst = F);
GlobalOutput_env__ = new.env();
# .fn.set(prefix = 'results/predictionTesting-')
.fn.set = function(...) {
.globalOutput = merge.lists(.globalOutputDefault, list(...));
assign('.globalOutput', .globalOutput, envir = GlobalOutput_env__);
}
# create output file name on globalOptions
.fn = function(name, extension = '', options = NULL) {
o = merge.lists(.globalOutputDefault, .globalOutput,
get('.globalOutput', envir = GlobalOutput_env__), options);
# construct plain filename
pathes = sprintf('%s%s%s%s', o$prefix, name, ifelse(extension == '', '', '.'), extension);
fn = sapply(pathes, function(path) {
sp = splitPath(path);
# <p> dir
if (!file.exists(sp$dir)) dir.create(sp$dir);
# <p> tag
ext = firstDef(sp$ext, '');
fn = if (!is.null(o$tag)) {
if (o$tagFirst) {
sprintf('%s/%s-%s%s%s', sp$dir, o$tag, sp$base, ifelse(ext == '', '', '.'), ext)
} else { sprintf('%s/%s-%s%s%s', sp$dir, sp$base, o$tag, ifelse(ext == '', '', '.'), ext) };
} else sprintf('%s/%s%s%s', sp$dir, sp$base, ifelse(ext == '', '', '.'), ext);
fn
});
avu(fn)
}
.fn.pushPrefix = function(prefix) {
output = merge.lists(.globalOutput, list(prefix = sprintf('%s%s', .globalOutput$prefix, prefix)));
assign('.globalOutput', output, envir = GlobalOutput_env__);
.globalOutput
}
.fn.popPrefix = function(prefix) {
output = merge.lists(.globalOutput, list(prefix = sprintf('%s/', splitPath(.globalOutput$prefix)$dir)));
assign('.globalOutput', output, envir = GlobalOutput_env__);
.globalOutput
}
#
# command argument handling
#
# default args: command line call minus command
evaluateArgs = function(c = commandArgs()[-1]) {
is.no.option = is.na(as.integer(sapply(c, function(a)grep("^--", a))));
#c = c[!(c == "--vanilla")]; # eliminate '--vanilla' arguments
c = c[is.no.option];
if (length(c) > 0) {
eval.parent(parse(text = c[1]));
argListString = gsub(";", ",", gsub(";$", "", c[1]));
print(argListString);
return(eval(parse(text = sprintf("list(%s)", argListString))));
}
return(NULL);
}
# default args: command line call minus command
getCommandOptions = function(c = commandArgs()[-1]) {
is.no.option = is.na(as.integer(sapply(c, function(a)grep("^--", a))));
#c = c[!(c == "--vanilla")]; # eliminate '--vanilla' arguments
c = c[is.no.option];
o = lapply(c, function(e) {
eval(parse(text = e));
nlapply(setdiff(ls(), 'e'), function(n)get(n))
});
o = unlist.n(o, 1);
o
}
# R.pl interface
handleTriggers = function(o, triggerDefinition = NULL) {
if (is.null(triggerDefinition)) triggerDefinition = rget('.globalTriggers');
if (!is.list(o) || is.null(triggerDefinition)) return(NULL);
for (n in names(triggerDefinition)) {
if (!is.null(o[[n]])) triggerDefinition[[n]](o$args, o);
}
}
#
# level dependend logging
#
#Global..Log..Level = 4;
#Default..Log..Level = 4;
#assign(Default..Log..Level, 4, envir = .GlobalEnv);
Log_env__ <- new.env();
assign('DefaultLogLevel', 4, envir = Log_env__);
#' Log a message to stderr.
#'
#' Log a message to stderr. Indicate a logging level to control verbosity.
#'
#' This function prints a message to stderr if the condition is met that a
#' global log-level is set to greater or equal the value indicated by
#' \code{level}. \code{Log.level} returns the current logging level.
#'
#' @aliases Log Log.setLevel Log.level
#' @param o Message to be printed.
#' @param level If \code{Log.setLevel} was called with this value, subsequent
#' calls to \code{Log} with values of \code{level} smaller or equal to this
#' value will be printed.
#' @author Stefan Böhringer <r-packages@@s-boehringer.org>
#' @seealso \code{\link{Log.setLevel}}, ~~~
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' Log.setLevel(4);
#' Log('hello world', 4);
#' Log.setLevel(3);
#' Log('hello world', 4);
#'
Log = function(o, level = get('DefaultLogLevel', envir = Log_env__)) {
if (level <= get('GlobalLogLevel', envir = Log_env__)) {
cat(sprintf("R %s: %s\n", date(), as.character(o)));
}
}
Logs = function(o, level = get('DefaultLogLevel', envir = Log_env__), ..., envir = parent.frame()) {
Log(Sprintf(o, ..., envir = envir), level = level);
}
Log.level = function()get('GlobalLogLevel', envir = Log_env__);
Log.setLevel = function(level = get('GlobalLogLevel', envir = Log_env__)) {
assign("GlobalLogLevel", level, envir = Log_env__);
}
Log.setLevel(4); # default
.System.fileSystem = list(
#tempfile = function(prefix, ...)tempfile(splitPath(prefix)$base, tmpdir = splitPath(prefix)$dir, ...),
tempfile = function(prefix, ...)tempFileName(prefix, ...),
readFile = function(...)readFile(...)
);
.System.patterns = list(
default = list(pre = function(cmd, ...)cmd, post = function(spec, ret, ...)list() ),
qsub = list(pre = function(cmd, spec,
jidFile = spec$fs$tempfile(sprintf('/tmp/R_%s/qsub_pattern', Sys.getenv('USER'))),
qsubOptions = '',
waitForJids = NULL, ...) {
Dir.create(jidFile, treatPathAsFile = TRUE);
waitOption = if (is.null(waitForJids)) '' else
sprintf('--waitForJids %s', join(waitForJids, sep = ','));
print(cmd);
ncmd = sprintf('qsub.pl --jidReplace %s %s --unquote %s -- %s',
jidFile, waitOption, qsubOptions, qs(cmd));
print(ncmd);
spec = list(cmd = ncmd, jidFile = jidFile);
spec
},
post = function(spec, ret, ...) { list(jid = as.integer(spec$fs$readFile(spec$jidFile))) }
),
cwd = list(pre = function(cmd, spec, cwd = '.', ...) {
ncmd = sprintf('cd %s ; %s', qs(cwd), cmd);
spec = list(cmd = ncmd);
spec
},
post = function(spec, ret, ...) { list() }
),
# <i> stdout/stderr handling
ssh = list(pre = function(cmd, spec, ssh_host = 'localhost', ssh_source_file = NULL, ...,
ssh_single_quote = T) {
if (!is.null(ssh_source_file)) {
cmd = sprintf('%s ; %s',
join(paste('source', qs(ssh_source_file), sep = ' '), ' ; '), cmd);
}
fmt = if (ssh_single_quote) 'ssh %{ssh_host}s %{cmd}q' else 'ssh %{ssh_host}s %{cmd}Q';
spec = list(cmd = Sprintf(fmt));
spec
},
fs = function(fs, ..., ssh_host) {
list(
tempfile = function(prefix, ...) {
Log(sprintf('tempfile ssh:%s', prefix), 1);
r = splitPath(tempFileName(sprintf('%s:%s', ssh_host, prefix), ...), ssh = T)$path;
Log(sprintf('tempfile ssh-remote:%s', r), 1);
r
},
readFile = function(path, ...)readFile(sprintf('%s:%s', ssh_host, path), ..., ssh = T)
);
},
post = function(spec, ret, ...) { list() }
)
);
#
# a system call (c.f. privatePerl/TempFilenames::System)
#
System_env__ <- new.env();
assign(".system.doLogOnly", FALSE, envir = System_env__);
System = function(cmd, logLevel = get('DefaultLogLevel', envir = Log_env__),
doLog = TRUE, printOnly = NULL, return.output = F,
pattern = NULL, patterns = NULL, ..., return.cmd = F) {
# prepare
if (!exists(".system.doLogOnly", envir = System_env__))
assign(".system.doLogOnly", F, envir = System_env__);
doLogOnly = ifelse (!is.null(printOnly), printOnly, get('.system.doLogOnly', envir = System_env__));
# pattern mapping
fs = .System.fileSystem;
if (!is.null(patterns)) {
spec = list();
# map file accesses
for (pattern in rev(patterns)) {
fsMapper = .System.patterns[[pattern]]$fs;
if (!is.null(fsMapper)) fs = fsMapper(fs, ...);
spec[[length(spec) + 1]] = list(fs = fs);
}
# wrap commands into each other
for (i in 1:length(patterns)) {
spec[[i]] = merge.lists(spec[[i]], .System.patterns[[patterns[[i]]]]$pre(cmd, spec[[i]], ...));
cmd = spec[[i]]$cmd;
}
} else if (!is.null(pattern)) {
spec = .System.patterns[[pattern]]$pre(cmd, list(fs = fs), ...);
spec$fs = fs; # manually install fs
cmd = spec$cmd;
}
# redirection (after patterns) <A>
if (return.output & !doLogOnly) {
tmpOutput = tempfile();
cmd = sprintf("%s > %s", cmd, tmpOutput);
}
# logging
if (doLog){ Log(sprintf("system: %s", cmd), logLevel); }
# system call
ret = NULL;
if (!doLogOnly) ret = system(cmd);
# return value
r = list(error = ret);
if (return.output & !doLogOnly) {
r = merge.lists(r, list(error = ret, output = readFile(tmpOutput)));
}
# postprocess
if (!doLogOnly) if (!is.null(patterns)) {
for (i in rev(1:length(patterns))) {
r = merge.lists(r, .System.patterns[[patterns[[i]]]]$post(spec[[i]], ret, ...));
}
} else if (!is.null(pattern)) {
r = merge.lists(r, .System.patterns[[pattern]]$post(spec, ret, ...));
}
if (return.cmd) r$command = cmd;
# simplified output
if (!return.output && !return.cmd && is.null(pattern)) r = r$error;
r
}
# wait on job submitted by system
.System.wait.patterns = list(
default = function(r, ...)(NULL),
qsub = function(r, ...) {
ids = if (is.list(r[[1]]) & !is.null(r[[1]]$jid)) list.kp(r, 'jid', do.unlist = T) else r$jid;
idsS = if (length(ids) == 0) '' else paste(ids, collapse = ' ');
System(sprintf('qwait.pl %s', idsS), ...);
}
);
System.wait = function(rsystem, pattern = NULL, ...) {
r = if (!is.null(pattern)) .System.wait.patterns[[pattern]](rsystem, ...) else NULL;
r
}
System.SetDoLogOnly = function(doLogOnly = F) {
assign(".system.doLogOnly", doLogOnly, envir = System_env__);
}
ipAddress = function(interface = "eth0") {
o = System(sprintf("/sbin/ifconfig %s", interface), logLevel = 6, return.output = T);
ip = fetchRegexpr("(?<=inet addr:)[^ ]+", o$output);
ip
}
#
# <p> cluster abstraction
#
# Example:
#specifyCluster(localNodes = 8, sourceFiles = c('RgenericAll.R', 'dataPreparation.R'));
#.clRunLocal = F;
#data.frame.types(clapply(l, f, arg1 = 1), rbind = T, do.transpose = T);
# default cluster configuration
.defaultClusterConfig = list(
hosts = list(list(host = "localhost", count = 2, type = "PSOCK")), local = F,
provideChunkArgument = F, reverseEvaluationOrder = T, splitN = 4, reuseCluster = F,
nestingLevel = 0, # records the nesting of clapply calls
splittingLevel = 1, # specifies at which level clapply should parallelize
evalEnvironment = F # call environment_eval on function before passing on
);
Snow_cluster_env__ = new.env();
specifyCluster = function(localNodes = 8, sourceFiles = NULL, cfgDict = list(), hosts = NULL,
.doSourceLocally = F, .doCopy = T, splitN = NULL, reuseCluster = F, libraries = NULL,
evalEnvironment = F) {
cfg = merge.lists(.defaultClusterConfig,
cfgDict,
list(splitN = splitN, reuseCluster = reuseCluster, evalEnvironment = evalEnvironment),
list(local = F, source = sourceFiles, libraries = libraries, hosts = (if(is.null(hosts))
list(list(host = "localhost", count = localNodes, type = "PSOCK", environment = list())) else
hosts)
));
assign(".globalClusterSpecification", cfg, envir = Snow_cluster_env__);
.globalClusterSpecification = get('.globalClusterSpecification', envir = Snow_cluster_env__);
if (.doCopy) {
for (h in .globalClusterSpecification$hosts) {
if (h$host != "localhost" & !is.null(h$env$setwd)) {
System(sprintf("ssh %s mkdir '%s' 2>/dev/null", h$host, h$env$setwd), 5);
System(sprintf("scp '%s' %s:'%s' >/dev/null", paste(sourceFiles, collapse = "' '"),
h$host, h$env$setwd), 5);
}
}
}
if (.doSourceLocally) {
sourceFiles = setdiff(sourceFiles, "RgenericAll.R"); # assume we have been sourced
eval(parse(text =
paste(sapply(sourceFiles, function(s)sprintf("source('%s', chdir = TRUE);", s)), collapse = "")));
}
}
#<!> might not be available/outdated
library('parallel');
# l: list, f: function, c: config
# <i><!> test clCfg$reverseEvaluationOrder before uncommenting
clapply_cluster = function(l, .f, ..., clCfg = NULL) {
#if (clCfg$reverseEvaluationOrder) l = rev(l);
# only support SOCK type right now <!><i>
hosts = unlist(sapply(clCfg$hosts, function(h){
if (h$type == "PSOCK") rep(h$host, h$count) else NULL}));
master = ifelse(all(hosts == "localhost"), "localhost", ipAddress("eth0"));
establishEnvironment = T;
cl = if (clCfg$reuseCluster) {
if (!exists(".globalClusterObject")) {
assign(".globalClusterObject", makeCluster(hosts, type = "PSOCK", master = master),
envir = Snow_cluster_env__);
} else establishEnvironment = FALSE;
get('.globalClusterObject', envir = Snow_cluster_env__)
} else makeCluster(hosts, type = "PSOCK", master = master);
#clusterSetupRNG(cl); # snow
clusterSetRNGStream(cl, iseed = NULL); # parallel
clusterExport(cl, clCfg$vars);
# <p> establish node environment
envs = listKeyValue(list.key(clCfg$hosts, "host"), list.key(clCfg$hosts, "environment", unlist = F));
Log(clCfg, 7);
if (establishEnvironment) r = clusterApply(cl, hosts, function(host, environments, cfg){
env = environments[[host]];
if (!is.null(env$setwd)) setwd(env$setwd);
if (!is.null(cfg$source)) for (s in cfg$source) source(s, chdir = TRUE);
if (!is.null(cfg$libraries)) for (package in cfg$libraries) library(package, character.only = TRUE);
# <!> as of 3.4.2013: stop support of exporting global variables to enable CRAN submission
#if (!is.null(env$globalVars))
# for (n in names(env$globalVars)) assign(n, env$globalVars[[n]], pos = .GlobalEnv);
#sprintf("%s - %s - %s", host, hapmap, getwd());
NULL
}, environments = envs, cfg = clCfg);
# <p> iterate
N = clCfg$splitN * length(hosts); # No of splits
idcs = splitListIndcs(length(l), N);
exportNames = c();
iterator__ = if (clCfg$provideChunkArgument) {
function(.i, ...) {
r = lapply(idcs[.i, 1]:idcs[.i, 2], function(j)try(.f(l[[j]], .i, ...)));
if (class(r) == "try-error") r = NULL;
r
}
} else {
function(.i, ...){
r = lapply(idcs[.i, 1]:idcs[.i, 2], function(j)try(.f(l[[j]], ...)));
if (class(r) == "try-error") r = NULL;
r
}
}
if (clCfg$evalEnvironment) {
iterator__ = environment_eval(iterator__, functions = T);
#clusterExport(cl, varlist = names(as.list(environment(iterator__))), envir = environment(iterator__));
}
r = clusterApplyLB(cl, 1:dim(idcs)[1], iterator__, ...);
# <p> finish up
if (!clCfg$reuseCluster) stopCluster(cl)
r = unlist(r, recursive = F);
#if (clCfg$reverseEvaluationOrder) r = rev(r);
r
}
# wrapper (as of 3.12.8: I seem to have lost a previous change)
clapply = function(l, .f, ..., clCfg = NULL, .clRunLocal = rget(".clRunLocal", F, envir = .GlobalEnv)) {
# <p> get cluster specification
clCfg = merge.lists(
rget(".globalClusterSpecification", default = list(), envir = Snow_cluster_env__),
firstDef(clCfg, list())
);
# <p> update cluster specification
clCfg$nestingLevel = clCfg$nestingLevel + 1;
assign(".globalClusterSpecification", clCfg, envir = Snow_cluster_env__);
# <p> choose/decline parallelization
r = if (firstDef(.clRunLocal, clCfg$local, F) || clCfg$nestingLevel != clCfg$splittingLevel) {
if (clCfg$provideChunkArgument) lapply(X = l, FUN = .f, 1, ...)
else lapply(X = l, FUN = .f, ...)
} else {
clapply_cluster(l, .f, ..., clCfg = clCfg);
};
# <p> update cluster specification
clCfg$nestingLevel = clCfg$nestingLevel - 1;
assign(".globalClusterSpecification", clCfg, envir = Snow_cluster_env__);
r
}
evalCall = function(call) {
call = callEvalArgs(call);
do.call(call$f, call$args, envir = call$envir)
}
# envirArgs: non-functional, depracated
Do.call = function(what, args, quote = FALSE, envir = parent.frame(),
defaultEnvir = .GlobalEnv, envirArgs = NULL, do_evaluate_args = F) {
if (is.null(envir)) envir = defaultEnvir;
if (do_evaluate_args) args = nlapply(args, function(e)eval(args[[e]], envir = envir));
do.call(what = what, args = args, quote = quote, envir = envir)
}
#
# <p> file operations
#
#' Return absolute path for name searched in search-pathes
#'
#' Search for pathes.
#'
#' @param as.dirs assume that prefixes are pathes, i.e. a slash will be put between path and prefix
#' @param force enforces that path and prefix are always joined, otherwise if path is absolute no prefixing is performed
file.locate = function(path, prefixes = NULL, normalize = T, as.dirs = T, force = F, home = T) {
if (!force && substr(path, 1, 1) == '/') return(path);
if (substr(path, 1, 1) == '~' && home) {
path = path.absolute(path, home = TRUE);
if (!force) return(path);
}
if (is.null(prefixes)) prefixes = if (as.dirs) '.' else '';
sep = ifelse(as.dirs, '/', '');
for (prefix in prefixes) {
npath = sprintf('%s%s%s', prefix, sep, path);
if (normalize) npath = path.absolute(npath);
if (file.exists(npath)) return(npath);
}
NULL
}
#' Read content of file and return as character object.
#'
#' Read content of file and return as character object.
#'
#' Read content of file and return as character object.
#'
#' @param path Path to the file to be read.
#' @param prefixes Search for file by prepending character strings from
#' prefixes.
#' @param normalize Standardize pathes.
#' @param ssh Allow pathes to remote files in \code{scp} notation.
#' @author Stefan Böhringer <r-packages@@s-boehringer.org>
#' @keywords ~kwd1 ~kwd2
#' @examples
#'
#' parallel8 = function(e) log(1:e) %*% log(1:e);
#' cat(readFile(tempcodefile(parallel8)));
#'
# prefixes only supported locally <!>
readFile = function(path, prefixes = NULL, normalize = T, ssh = F) {
s = splitPath(path, ssh = ssh);
r = if (s$is.remote) {
tf = tempfile();
File.copy(path, tf);
readChar(tf, nchars = as.list(file.info(tf)[1,])$size);
} else {
if (!is.null(prefixes)) path = file.locate(path, prefixes, normalize);
readChar(path, nchars = as.list(file.info(path)[1,])$size);
}
r
}
writeFile = function(path, str, mkpath = F, ssh = F) {
s = splitPath(path, ssh = ssh);
if (s$is.remote) {
Dir.create(sprintf('%s:%s', s$userhost, s$dir), recursive = mkpath);
tf = tempfile();
out = file(description = tf, open = 'w', encoding='UTF-8');
cat(str, file = out, sep = "");
close(con = out);
File.copy(tf, path);
} else {
if (mkpath) {
if (!file.exists(s$dir)) dir.create(s$dir, recursive = T);
}
out = file(description = path, open = 'w', encoding='UTF-8');
cat(str, file = out, sep = "");
close(con = out);
}
path
}
isURL = function(path)(length(grep("^(ftp|http|https|file)://", path)) > 0L)
Source_url = function(url, ...) {
require('RCurl');
request = getURL(url, followlocation = TRUE,
cainfo = system.file("CurlSSL", "cacert.pem", package = "RCurl"));
tf = tempfile();
writeFile(tf, request);
source(tf, ...)
}
# <!> local = T does not work
Source = function(file, ...,
locations = c('', '.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
sapply(file, function(file) {
if (isURL(file)) Source_url(file, ...) else {
file0 = file.locate(file, prefixes = locations);
source(file = file0, ...)
}
})
}
#
# <p> helper functions readTable/writeTable
#
compressPathBz2 = function(pathRaw, path, doRemoveOrig = TRUE) {
cmd = Sprintf("cat %{pathRaw}q | bzip2 -9 > %{path}q");
r = System(cmd, 2);
if (doRemoveOrig && !get('.system.doLogOnly', envir = System_env__)) file.remove(pathRaw);
r
}
compressPath = function(pathRaw, path, extension = NULL, doRemoveOrig = TRUE) {
if (is.null(extension)) return(path);
compressor = get(Sprintf('compressPath%{extension}u'));
r = compressor(pathRaw, path, doRemoveOrig = doRemoveOrig);
r
}
decompressPathBz2 = function(path, pathTmp, doRemoveOrig = FALSE) {
cmd = Sprintf("cat %{path}q | bunzip2 > %{pathTmp}q");
r = System(cmd, 2);
if (doRemoveOrig && !get('.system.doLogOnly', envir = System_env__)) file.remove(pathRaw);
r
}
decompressPath = function(path, pathTmp, extension = NULL, doRemoveOrig = FALSE) {
if (is.null(extension)) return(path);
decompressor = get(Sprintf('decompressPath%{extension}u'));
r0 = decompressor(path, pathTmp, doRemoveOrig = doRemoveOrig);
r = list(destination = pathTmp, pathOrig = path, return = r0);
r
}
compressedConnectionBz2 = function(path, mode = '') {
#r = Sprintf('%{path}s.bz2');
bzfile(path, open = mode)
}
compressedConnectionGz = function(path, mode = '') {
gzfile(path, open = mode)
}
compressedConnection = function(path, extension = NULL, mode = '') {
if (is.null(extension)) return(path);
compressor = get(Sprintf('compressedConnection%{extension}u'));
compressor(path, mode = mode)
}
compressedConnectionPath = function(conn) {
if ('connection' %in% class(conn)) summary(conn)$description else conn
}
#
# <p> readTable
#
# complete: return only complete data with respect to specified colums
# NA: specify 'NA'-values
readTableSepMap = list(T = "\t", S = ' ', C = ',', `;` = ';', `S+` = '');
optionParser = list(
SEP = function(e)readTableSepMap[[e]],
QUOTE = function(e)(if (e == 'F') '' else e),
HEADER = function(e)list(T = T, F = F)[[e]],
ROW.NAMES = function(e)list(T = T, F = F)[[e]],
NAMES = function(e)splitString(';', e),
FACTORS = function(e)splitString(';', e),
PROJECT = function(e)splitString(';', e),
`NA` = function(e)splitString(';', e),
complete = function(e)splitString(';', e),
CONST = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
v = if (length(fetchRegexpr('^\\d+$', r[2])) > 0) r[2] = as.integer(r[2]) else r[2];
listKeyValue(r[1], v)
});
unlist.n(r, 1)
},
HEADERMAP = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
listKeyValue(r[1], r[2])
});
unlist.n(r, 1)
},
# tb implemented: <i>: merge.lists recursive
VALUEMAP = function(e){ r = lapply(splitString(';', e), function(e){
r = splitString(':', e);
listKeyValue(r[1], r[2])
});
unlist.n(r, 1)
},
COLNAMESFILE = identity,
SHEET = as.integer
);
splitExtendedPath = function(path) {
q = fetchRegexpr('(?<=^\\[).*?(?=\\]:)', path);
options = list();
if (length(q) > 0 && nchar(q) > 0) {
path = substr(path, nchar(q) + 4, nchar(path));
os = sapply(splitString(',', q), function(e)splitString('=', e));
os = listKeyValue(os[1, ], os[2, ]);
os = nlapply(names(os), function(n)optionParser[[n]](os[[n]]));
options = merge.lists(options, os);
}
r = list(path = path, options = options)
}
readTable.ods = function(path, options = NULL) {
require('readODS');
sheet = firstDef(options$SHEET, 1);
read.ods(path)[[sheet]];
}
# <!> changed SEP default "\t" -> ",", 20.5.2015
#readTable.csv.defaults = list(HEADER = T, SEP = "\t", `NA` = c('NA'), QUOTE = '"');
readTable.csv.defaults = list(HEADER = T, SEP = ",", `NA` = c('NA'), QUOTE = '"');
readTable.txt = readTable.csv = function(
path, options = readTable.csv.defaults, headerMap = NULL, setHeader = NULL, ...) {
options = merge.lists(readTable.csv.defaults, options);
t = read.table(path, header = options$HEADER, sep = options$SEP, as.is = T,
na.strings = options$`NA`, comment.char = '', quote = options$QUOTE, ...);
if (!is.null(options$NAMES)) names(t)[1:length(options$NAMES)] = options$NAMES;
if (!is.null(headerMap)) names(t) = vector.replace(names(t), headerMap);
if (!is.null(setHeader)) names(t) = c(setHeader, names(t)[(length(setHeader)+1): length(names(t))]);
if (!is.null(options$FACTORS)) t = Df_(t, as_factor = options$FACTORS);
t
}
readTable.sav = function(path, options = NULL, headerMap = NULL, stringsAsFactors = F) {
require('foreign');
# read file
r = read.spss(path);
as.data.frame(r, stringsAsFactors = stringsAsFactors)
}
readTable.RData = function(path, options = NULL, headerMap = NULL) {
t = as.data.frame(get(load(path)[1]), stringsAsFactors = F);
#print(t);
t
}
readTable.xls = function(path, options = NULL, ..., sheet = 1) {
require('gdata');
read.xls(path, sheet = sheet, verbose = FALSE);
}
tableFunctionConnect = c('csv', 'RData');
tableFunctionForPathMeta = function(path, template = 'readTable.%{ext}s', default = readTable.csv,
forceReader = NULL) {
sp = splitPath(path);
compression = NULL;
tmpFile = NULL;
if (firstDef(forceReader, sp$ext) %in% c('bz2', 'gz')) {
compression = sp$ext;
sp = splitPath(sp$fullbase);
tmpFile = Sprintf('%{file}s.%{ext}s', file = tempfile(), ext = sp$ext);
}
name = Sprintf(template, ext = firstDef(forceReader, sp$ext));
f = if (exists(name)) get(name) else default;
r = list(
fct = f, name = name, ext = sp$ext,
compression = compression, tempfile = tmpFile, path = path
);
r
}
tableFunctionForPath = function(path, template = 'readTable.%{ext}s',
default = readTable.csv, forceReader = NULL) {
tableFunctionForPathMeta(path, template, default, forceReader)$fct
}
# forceReader: force readerFunction
tableFunctionForPathReader = function(path, template = 'readTable.%{ext}s', default = readTable.csv,
forceReader = NULL) {
m = m0 = tableFunctionForPathMeta(path, template = 'readTable.%{ext}s', default = default, forceReader);
if (!is.null(m$compression)) {
path = if (m0$compression %in% tableFunctionConnect)
compressedConnection(m0$path, m0$compression) else
decompressPath(m0$path, m0$tempfile, m0$compression)$destination
m = merge.lists(m0, list(path = path));
}
m
}
# <!> as of 23.5.2014: headerMap after o$NAMES assignment
# <i> use tableFunctionForPath
readTable = function(path, autodetect = T, headerMap = NULL, extendedPath = T, colnamesFile = NULL, ...,
as_factor = NULL, stringsAsFactors = F, defaultReader = readTable.csv, doRemoveTempFile = TRUE,
forceReader = NULL) {
# <p> preparation
path = join(path, '');
o = list();
if (extendedPath) {
r = splitExtendedPath(path);
path = r$path;
o = r$options;
}
# <p> read table raw
sp = splitPath(path);
reader = if (autodetect && !is.null(sp$ext))
tableFunctionForPathReader(path, 'readTable.%{ext}s', readTable.csv, forceReader) else
list(fct = defaultReader, path = path);
r = reader$fct(reader$path, options = o, ...);
# <p> cleanup
if (doRemoveTempFile && !get('.system.doLogOnly', envir = System_env__) && !is.null(reader$tempfile))
file.remove(reader$tempfile);
# <p> table transformations
if (!is.null(o$NAMES) && length(o$NAMES) <= ncol(r)) names(r)[1:length(o$NAMES)] = o$NAMES;
colnamesFile = firstDef(o$COLNAMESFILE, colnamesFile);
headerMap = c(headerMap, o$HEADERMAP);
if (!is.null(headerMap)) names(r) = vector.replace(names(r), headerMap);
if (!is.null(colnamesFile)) {
ns = read.table(colnamesFile, header = F, as.is = T)[, 1];
names(r)[1:length(ns)] = ns;
}
if (!is.null(o$PROJECT)) r = r[, o$PROJECT];
if (!is.null(o$complete)) r = r[apply(r[, o$complete], 1, function(e)!any(is.na(e))), ];
if (!is.null(o$CONST)) { for (n in names(o$CONST)) r[[n]] = o$CONST[[n]]; }
if (!is.null(as_factor)) r = Df_(r, as_factor = as_factor);
r
}
#
# <p> writeTable
#
writeTable.defaults = list(
SEP = ' ',
ROW.NAMES = FALSE,
HEADER = TRUE,
QUOTE = TRUE
);
writeTable.table = function(dataFrame, path, ..., doCompress = NULL, row.names = TRUE, options = list()) {
o = merge.lists(writeTable.defaults, list(ROW.NAMES = row.names), options);
conn = compressedConnection(path, doCompress, mode = 'w');
with(o, write.table(dataFrame, file = conn, ...,
row.names = ROW.NAMES, col.names = HEADER, sep = SEP, quote = (QUOTE != '')));
}
writeTable.xls = function(object, path, doCompress = NULL, row.names = TRUE,
doRemoveOrig = TRUE, options = list()) {
require('WriteXLS');
r = path;
dataFrame = as.data.frame(object);
pathRaw = if (!is.null(doCompress)) Sprintf('%{path}s_raw_') else path;
r0 = WriteXLS(dataFrame, ExcelFileName = pathRaw, row.names = row.names);
r1 = compressPath(pathRaw, path, doCompress, doRemoveOrig);
r0
}
writeTable.csv = function(dataFrame, path, ..., doCompress = NULL, row.names = TRUE, options = list()) {
conn = compressedConnection(path, doCompress, mode = 'w');
write.csv(dataFrame, file = conn, ..., row.names = row.names);
}
# doCompress = 'bz2' to write bz2
# <i><!> determine from path
writeTableRaw = function(object, path, ..., doCompress = NULL, row.names = TRUE, autodetect = TRUE,
defaultWriter = writeTable.csv, options = list()) {
sp = splitPath(path);
if (!is.null(doCompress) && sp$ext %in% c('bz2', 'gz')) doCompress = sp$ext;
writer = if (autodetect && !is.null(sp$ext))
tableFunctionForPath(path, 'writeTable.%{ext}s', writeTable.csv) else defaultWriter;
if (is.null(writer))
stop(Sprintf("Writing table to extension '%{ext}s' not supported", ext = sp$ext));
r0 = writer(object, path = path, ..., doCompress = doCompress, row.names = row.names, options = options);
r = list(path = path, return = r0);
r
}
writeTable = function(object, path, ..., doCompress = NULL, row.names = TRUE, autodetect = TRUE,
defaultWriter = writeTable.csv, simplify = TRUE, extendedPath = TRUE) {
o = list();
if (extendedPath) {
r = splitExtendedPath(path);
path = r$path;
o = r$options;
defaultWriter = writeTable.table;
}
r = lapply(path, function(p)
writeTableRaw(object, p, ...,
doCompress = doCompress, row.names = row.names, autodetect = autodetect,
defaultWriter = defaultWriter, options = o)
);
if (simplify && length(path) == 1) r = r[[1]];
r
}
#
# <p> swig
#
swigIt = function(interface, code, moduleName = NULL) {
dir = tempdir(); # will be constant across calls
if (is.null(moduleName)) {
t = tempFileName("swig");
moduleName = splitPath(t)$base;
}
ifile = sprintf("%s/%s.%s", dir, moduleName, "i");
interface = sprintf("
%%module %s
%%inline %%{
%s;
%%}
", moduleName, paste(interface, collapse = ";\n\t\t\t"));
ifile = sprintf("%s/%s.%s", dir, moduleName, "i");
base = splitPath(ifile)$fullbase;
writeFile(ifile, interface);
cfile = sprintf("%s.c", base);
writeFile(cfile, code);
#print(list(i = ifile, c = cfile, so = sprintf("%s.so", base)));
system(sprintf("swig -r %s", ifile));
#cat(code);
system(sprintf("cd %s ; gcc -O2 -D__USE_BSD -D__USE_GNU -std=c99 -c -fpic %s.c %s_wrap.c -I/usr/local/lib64/R/include -lm ",
splitPath(ifile)$dir, base, base));
system(sprintf("cd %s ; gcc -shared %s.o %s_wrap.o -o %s.so", splitPath(ifile)$dir, base, base, base));
#dyn.unload(sprintf("%s.so", base));
dyn.load(sprintf("%s.so", base));
source(sprintf("%s/%s.R", splitPath(ifile)$dir, moduleName));
}
#
# <p> print
#
fprint = function(..., file = NULL, append = F) {
if (!is.null(file)) sink(file = file, append = append);
r = print(...);
if (!is.null(file)) sink();
r
}
stdOutFromCall = function(call_) {
tf = tempfile();
sink(tf);
eval.parent(call_, n = 2);
sink();
readFile(tf)
}
#
# crypotgraphy/checksumming
#
md5sumString = function(s, prefix = 'md5generator') {
require('tools');
path = tempfile('md5generator');
writeFile(path, s);
md5 = avu(md5sum(path));
md5
}
#
# <p> package documentation
#
# docFile = sprintf('%s/tmp/docOut.Rd', Sys.getenv('HOME'));
# docDir = sprintf('%s/src/Rpackages/parallelize.dynamic/parallelize.dynamic/man', Sys.getenv('HOME'));
# docs = RdocumentationSkeleton('Rparallel.back.R', 'parallelize.dynamic', output = docFile);
# writeRdocumentationToDir(docFile, docDir);
RdocumentationForObjects = function(items, envir, unparser = function(item, envir)item) {
files = suppressMessages({
sapply(items, function(item)unparser(item, envir));
});
docs = lapply(files, readFile);
names(docs) = sapply(files, function(f)splitPath(f)$base);
docs
}
RdocumentationForFunctions = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s.Rd", item));
prompt(get(item, envir = envir), name = item, filename = file);
file
});
docs
}
RdocumentationForClasses = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s-class.Rd", item));
methods::promptClass(item, filename = file, where = envir);
file
});
docs
}
RdocumentationForMethods = function(items, envir) {
docs = RdocumentationForObjects(items, envir, unparser = function(item, envir) {
file = file.path(tempdir(), sprintf("%s-methods.Rd", item));
methods::promptMethods(item, filename = file, findMethods(item, where = envir));
file
});
docs
}
# code from packages.skeleton
objectsFromCodeFiles = function(R_files, packageName = 'generic') {
e = new.env(hash = T);
methods::setPackageName(packageName, e);
for (f in R_files) sys.source(f, envir = e);
classes = getClasses(e);
methods = getGenerics(e);
others = ls(e, all.names = T);
others = others[grep('^\\.', others, invert = T)];
r = list(envir = e, classes = classes, methods = methods,
others = setdiff(setdiff(others, classes), methods));
r
}
RdocumentationSkeleton = function(R_files, output = NULL, packageName = 'generic') {
os = objectsFromCodeFiles(R_files, packageName = packageName);
docs = c(
RdocumentationForFunctions(os$others, os$envir),
RdocumentationForClasses(os$classes, os$envir),
RdocumentationForMethods(os$methods, os$envir)
);
doc = join(nlapply(docs, function(n) {
sprintf("\nDOCUMENTATION_BEGIN:%s\n%s\nDOCUMENTATION_END\n", n, docs[[n]])
}), "\n");
if (!is.null(output)) {
if (File.exists(output)) {
Log(sprintf("Move away file '%s' before writing new skeleton", output), 2);
} else {
writeFile(output, doc);
}
}
doc
}
writeRdocumentationToDir = function(pathesIn, pathOut, cleanOut = F) {
doc = sapply(pathesIn, readFile, USE.NAMES = F);
r = unlist.n(getPatternFromStrings(doc, '(?s)(?:\\nDOCUMENTATION_BEGIN:)([^\\n]+)\\n(.*?)(?:\\nDOCUMENTATION_END\\n)'), 1);
Dir.create(pathOut, recursive = T);
if (cleanOut) {
files = list_files_with_exts(pathOut, 'Rd');
file.remove(files);
}
nlapply(r, function(n) {
output = file.path(pathOut, sprintf('%s.Rd', n));
Log(sprintf('Writing to %s', output), 3);
writeFile(output, r[[n]]);
});
names(r)
}
reDoc = function(package = 'parallelize.dynamic',
docFile = sprintf('./%s.doc.Rd', package), docDir = sprintf('./%s/man', package)) {
writeRdocumentationToDir(docFile, docDir, cleanOut = T);
install.packages(sprintf('./%s', package), repos = NULL);
#detach(package);
#library(package)
}
#
# <p> Rcpp helpers
#
createModule = function(name, libpathes = c(), headers = c(), output = NULL) {
require('Rcpp');
require('inline');
dirs = sapply(libpathes, function(e)splitPath(e)$dir);
libs = sapply(libpathes, function(e)fetchRegexpr('(?<=lib)(.*)(?=.so)', splitPath(e)$file));
.libPaths(c(.libPaths(), dirs));
libincludes = join(sapply(seq_along(dirs), function(i)sprintf('-L"%s" -l%s', splitPath(dirs[i])$absolute, libs[i])), ' ');
Sys.setenv(`PKG_LIBS` = sprintf('%s %s', Sys.getenv('PKG_LIBS'), libincludes));
Sys.setenv(`PKG_CXXFLAGS` = sprintf('%s %s', Sys.getenv('PKG_LIBS'), stdOutFromCall(Rcpp:::CxxFlags())));
for (lib in libpathes) { dyn.load(lib, local = F) }
moduleRegex = '(?s:(?<=// -- begin inline Rcpp\n)(.*?)(?=// -- end inline Rcpp))';
inc = join(sapply(headers, function(f) fetchRegexpr(moduleRegex, readFile(f))), "\n");
rcpp = cxxfunction( signature(), '' , includes = inc, plugin = 'Rcpp', verbose = T );
mod = Module( name, getDynLib(rcpp) );
if (!is.null(output)) {
Dir.create(output, recursive = T);
libfiles = sapply(libpathes, function(lib) {
File.copy(lib, sprintf('%s/%s', output, splitPath(lib)$file));
splitPath(lib)$file
});
glue = sprintf('%s/%s.so', output, name);
File.copy(getDynLib(rcpp)[['path']], glue);
module_descriptor = list(
name = name,
libs = c(libfiles, splitPath(glue)$file)
);
save(module_descriptor, file = sprintf('%s/module.RData', output));
}
mod
}
activateModule = function(path) {
require('Rcpp');
module_descriptor = get(load(sprintf('%s/module.RData', path))[1]);
r = lapply(module_descriptor$libs, function(lib)try(dyn.unload(sprintf('%s/%s', path, lib)), silent = T));
r = lapply(module_descriptor$libs, function(lib)dyn.load(sprintf('%s/%s', path, lib), local = F));
mod = Module( module_descriptor$name, rev(r)[[1]] );
mod
}
#
# <p> sqlite
#
sqlCreateTable = function(columns, types = list, index = NULL, createAt = NULL) {
# <p> create database
types = merge.lists(listKeyValue(columns, rep('text', length(columns))), types);
createDbSql = join(sep = "\n", c(
sprintf('CREATE TABLE data (%s);',
join(sep = ', ', sapply(columns, function(e)sprintf('%s %s', e, types[e])))),
if (is.null(index)) c() else sapply(1:length(index), function(i)
sprintf('CREATE INDEX index_%d ON data (%s);', i, join(index[[i]], sep = ', '))),
'.quit', ''
));
if (!is.null(createAt)) System(sprintf('echo \'%s\' | sqlite3 %s', createDbSql, qs(createAt)), 1);
createDbSql
}
# Create sqlite database with contents of csv-file
# @par index: list of columns to index
# @par type: sqlite types: integer, real, text, blob, not specified assumes text
csv2sqlitSepMap = readTableSepMap;
sepMap = list(T = '\\t', S = ' ', C = ',', `;` = ';', `S+` = '');
sepMapCut = list(T = '\\t', S = '" "', C = ',', `;` = ';', `S+` = '');
csv2sqlite = function(path, output = tempfile(),
columnsNames = NULL, columnsSelect = NULL,
index = NULL,
inputSep = 'T', inputHeader = T, inputSkip = NULL,
NULLs = NULL, types = list()) {
# <!> cave: does not heed skip
if (!inputHeader && is.null(columnsNames)) {
columnsNames = read.table(path, header = F, nrows = 1, sep = csv2sqlitSepMap[[inputSep]]);
}
# <p> select columns
cut = if (!is.null(columnsSelect)) {
skipColumnsIds = which.indeces(columnsSelect, columnsNames);
sprintf('| cut %s -f %s ',
if (inputSep == 'T') '' else sprintf('-d %s', sepMapCut[[inputSep]]),
join(skipColumnsIds, ',')
)
} else '';
columns = if (is.null(columnsSelect)) columnsNames else columnsSelect;
types = merge.lists(listKeyValue(columns, rep('text', length(columns))), types);
sqlCreateTable(columns, types, index, createAt = output);
# <p> import data
skipCommand = if (is.null(inputSkip)) '' else sprintf('| tail -n +%d ', inputSkip + 1);
reader = if (splitPath(path)$ext == 'gz') 'zcat' else 'cat';
importSql = writeFile(tempfile(), join(sep = "\n", c(
sprintf(".separator %s\n", sepMap[[inputSep]]),
sprintf(".import \"/dev/stdin\" data")
)));
sepText = sepMap[[inputSep]];
filter = if (is.null(NULLs)) '' else
sprintf("| perl -pe 's/((?<=%s)(?:%s)(?=%s|$)|(?<=^)(?:%s)(?=%s|$))//g'",
sepText, join(NULLs, '|'), sepText, sepText, sepText);
cmd = Sprintf(con(
"%{reader}s %{path}Q %{skipCommand}s %{cut}s %{filter}s",
" | sqlite3 -init %{importSql}Q %{output}Q"));
System(cmd, 1);
output
}
# <!> unfinished, siphones code from old csv2sqlite function
url2sqlite = function(url, output = tempfile(), header = NULL, skip = NULL, selectColumns = NULL,
index = NULL, sep = 'T',
NULLs = NULL, types = list()) {
# <p> determine header
tmp1 = tempfile();
ret = download.file(url, tmp1, method, quiet = FALSE, mode = "w", cacheOK = TRUE);
#if (ret) stop(sprintf("Download of '%s' failed.", url));
if (is.null(header)) {
tmpHeader = tempfile();
}
}
# <!> 7.1.2015: was qq, but conflicts with QQ-plot function
qquote = function(s)as.character(fetchRegexpr('([^ ]+)', s, captures = T))
sqlite2sqlite = function(dbS, dbD, query, cols, types = list(), index = NULL) {
sqlCreateTable(cols, types, index, createAt = dbD);
cmd = sprintf("echo %s | sqlite3 -init %s %s | sqlite3 -init %s %s",
qs(query),
qs(writeFile(tempfile(), ".mode csv")),
qs(dbS),
qs(writeFile(tempfile(), ".separator ,\n.import \"/dev/stdin\" data")),
qs(dbD)
);
System(cmd, 1);
dbD
}
sqliteOpen = function(path) {
require('RSQLite');
dbConnect(SQLite(), dbname = path);
}
sqliteQuery = function(db, query, table = NULL) {
if (is.null(table)) table = dbListTables(db)[1];
query = con(sapply(names(query), function(n)Sprintf('%{n}Q = %{v}s', v = qs(query[[n]], force = T))));
query1 = Sprintf('SELECT * FROM %{table}Q WHERE %{query}s');
Log(query1, 5);
dbGetQuery(db, query1);
}
#
# <p> publishing
#
# if (1) {
# .fn.set(prefix = 'results/201404/expressionMonocytes-')
# initPublishing('expressionMonocytes201404', '201405');
# publishFile('results/expressionMonocytesReportGO.pdf');
# }
Publishing_env__ <- new.env();
initPublishing = function(project, currentIteration, publicationPath = '/home/Library/ProjectPublishing') {
assign('project', project, Publishing_env__);
assign('projectMd5', md5sumString(project), Publishing_env__);
assign('currentIteration', currentIteration, Publishing_env__);
assign('publicationPath', publicationPath, Publishing_env__);
}
publishFctEnv = function(path, into = NULL, as = NULL) with(as.list(Publishing_env__), {
if (!exists('project')) stop('Publishing system not yet initialized.');
projectFolder = Sprintf('%{publicationPath}s/%{projectMd5}s');
prefix = if (is.null(into)) '' else Sprintf('%{into}s/');
destinationPrefix = Sprintf('%{projectFolder}s/%{currentIteration}s/%{prefix}s');
destination = Sprintf('%{destinationPrefix}s%{path}s',
path = if (is.null(as)) splitPath(path)$file else as);
r = list(projectFolder = projectFolder, prefix = prefix, destination = destination,
destinationPrefix = destinationPrefix);
r
})
publishFile = function(file, into = NULL, as = NULL) with(publishFctEnv(file, into, as), {
if (!is.null(into)) Dir.create(destination, treatPathAsFile = T);
Logs('Publishing %{file} --> "%{destination}s', 3);
Dir.create(splitPath(destination)$dir, recursive = T);
System(Sprintf("chmod -R a+rX %{dir}s", dir = qs(projectFolder)), 4);
file.copy(file, destination, overwrite = T);
Sys.chmod(destination, mode = '0755', use_umask = F);
destination
})
publishCsv = function(table, as, ..., into = NULL) {
file = tempfile('publish', fileext = 'csv');
write.csv(table, file = file, ...);
publishFile(file, into, as);
}
publishDir = function(dir, into = NULL, as = NULL, asSubdir = FALSE) with(publishFctEnv('', into, as), {
if (asSubdir) into = splitPath(dir)$file;
if (!is.null(into)) {
destination = splitPath(Sprintf('%{destination}s/%{into}s/'))$fullbase; # remove trailing slash
}
Dir.create(destination);
Logs('Publishing %{dir} --> %{destination}s', 3);
Dir.create(destination, recursive = T);
System(Sprintf("chmod -R a+rX %{projectFolder}Q"), 4);
System(Sprintf("cp -r %{dir}Q/* %{destination}Q"), 4);
System(Sprintf("chmod -R a+rX %{projectFolder}Q"), 4);
destination
})
publishAsZip = function(files, as, into = NULL, recursive = FALSE) {
tmp = tempFileName('publishAsZip', createDir = T, inRtmp = T);
output = tempFileName('publishAsZip', 'zip', inRtmp = T, doNotTouch = T);
sapply(files, function(file) {
File.symlink(splitPath(file)$absolute, Sprintf("%{tmp}s"), replace = F);
NULL
});
recursiveOption = ifelse(recursive, '-r', '');
System(Sprintf("zip -j %{recursiveOption}s %{output}s %{tmp}s/*"), 2);
publishFile(output, into = into, as = as);
}
#
# <p> quick pdf generation
#
print2pdf = function(elements, file) {
es = elements;
tf = tempfile();
sink(tf);
nlapply(es, function(n) {
cat(n);
cat('\n-------------------------------------------\n');
print(es[[n]]);
cat('\n\n');
})
sink();
System(Sprintf('a2ps %{tf}s --columns 1 --portrait --o - | ps2pdf - - > %{output}s', output = qs(file)));
}
#
# <p> workarounds
#
# fix broken install from dir: create tarball -> install_local
Install_local = function(path, ...) {
pkgPath = Sprintf('%{dir}Q/%{base}Q.tgz', dir = tempdir(), base = splitPath(path)$base);
System(Sprintf('tar czf %{pkgPath}Q %{path}Q'), 2);
install_local(pkgPath, ...);
}
# misc
clearWarnings = function()assign('last.warning', NULL, envir = baseenv())#
# Rmeta.R
#Wed Jun 3 15:11:27 CEST 2015
#
# <p> Meta-functions
#
#
# Environments
#
# copy functions code adapted from restorepoint R package
object.copy = function(obj) {
# Dealing with missing values
if (is.name(obj)) return(obj);
obj_class = class(obj);
copy =
if ('environment' %in% obj_class) environment.copy(obj) else
if (all('list' == class(obj))) list.copy(obj) else
#if (is.list(obj) && !(is.data.frame(obj))) list.copy(obj) else
obj;
return(copy)
}
list.copy = function(l)lapply(l, object.copy);
environment.restrict = function(envir__, restrict__= NULL) {
if (!is.null(restrict__)) {
envir__ = as.environment(List_(as.list(envir__), min_ = restrict__));
}
envir__
}
environment.copy = function(envir__, restrict__= NULL) {
as.environment(eapply(environment.restrict(envir__, restrict__), object.copy));
}
bound_vars = function(f, functions = F) {
fms = formals(f);
# variables bound in default arguments
vars_defaults = unique(unlist(sapply(fms, function(e)all.vars(as.expression(e)))));
# variables used in the body
vars_body = setdiff(all.vars(body(f)), names(fms));
vars = setdiff(unique(c(vars_defaults, vars_body)), c('...', '', '.GlobalEnv'));
if (functions) {
vars = vars[!sapply(vars, function(v)is.function(rget(v, envir = environment(f))))];
}
vars
}
bound_fcts_std_exceptions = c('Lapply', 'Sapply', 'Apply');
bound_fcts = function(f, functions = F, exceptions = bound_fcts_std_exceptions) {
fms = formals(f);
# functions bound in default arguments
fcts_defaults = unique(unlist(sapply(fms, function(e)all.vars(as.expression(e), functions = T))));
# functions bound in body
fcts = union(fcts_defaults, all.vars(body(f), functions = T));
# remove variables
#fcts = setdiff(fcts, c(bound_vars(f, functions), names(fms), '.GlobalEnv', '...'));
fcts = setdiff(fcts, c(bound_vars(f, functions = functions), names(fms), '.GlobalEnv', '...'));
# remove functions from packages
fcts = fcts[
sapply(fcts, function(e) {
f_e = rget(e, envir = environment(f));
!is.null(f_e) && environmentName(environment(f_e)) %in% c('R_GlobalEnv', '') && !is.primitive(f_e)
})];
fcts = setdiff(fcts, exceptions);
fcts
}
environment_evaled = function(f, functions = FALSE, recursive = FALSE) {
vars = bound_vars(f, functions);
e = nlapply(vars, function(v) rget(v, envir = environment(f)));
#Log(sprintf('environment_evaled: vars: %s', join(vars, ', ')), 7);
#Log(sprintf('environment_evaled: functions: %s', functions), 7);
if (functions) {
fcts = bound_fcts(f, functions = TRUE);
fcts_e = nlapply(fcts, function(v){
#Log(sprintf('environment_evaled: fct: %s', v), 7);
v = rget(v, envir = environment(f));
#if (!(environmentName(environment(v)) %in% c('R_GlobalEnv')))
v = environment_eval(v, functions = TRUE);
});
#Log(sprintf('fcts: %s', join(names(fcts_e))));
e = c(e, fcts_e);
}
#Log(sprintf('evaled: %s', join(names(e))));
r = new.env();
lapply(names(e), function(n)assign(n, e[[n]], envir = r));
#r = if (!length(e)) new.env() else as.environment(e);
parent.env(r) = .GlobalEnv;
#Log(sprintf('evaled: %s', join(names(as.list(r)))));
r
}
environment_eval = function(f, functions = FALSE, recursive = FALSE) {
environment(f) = environment_evaled(f, functions = functions, recursive = recursive);
f
}
#
# Freeze/thaw
#
delayed_objects_env = new.env();
delayed_objects_attach = function() {
attach(delayed_objects_env);
}
delayed_objects_detach = function() {
detach(delayed_objects_env);
}
thaw_list = function(l)lapply(l, thaw_object, recursive = T);
thaw_environment = function(e) {
p = parent.env(e);
r = as.environment(thaw_list(as.list(e)));
parent.env(r) = p;
r
}
# <i> sapply
thaw_object_internal = function(o, recursive = T, envir = parent.frame()) {
r = if (class(o) == 'ParallelizeDelayedLoad') thaw(o) else
#if (recursive && class(o) == 'environment') thaw_environment(o) else
if (recursive && class(o) == 'list') thaw_list(o) else o;
r
}
thaw_object = function(o, recursive = T, envir = parent.frame()) {
if (all(search() != 'delayed_objects_env')) delayed_objects_attach();
thaw_object_internal(o, recursive = recursive, envir = envir);
}
#
# <p> backend classes
#
setGeneric('thaw', function(self, which = NA) standardGeneric('thaw'));
setClass('ParallelizeDelayedLoad',
representation = list(
path = 'character'
),
prototype = list(path = NULL)
);
setMethod('initialize', 'ParallelizeDelayedLoad', function(.Object, path) {
.Object@path = path;
.Object
});
setMethod('thaw', 'ParallelizeDelayedLoad', function(self, which = NA) {
if (0) {
key = sprintf('%s%s', self@path, ifelse(is.na(which), '', which));
if (!exists(key, envir = delayed_objects_env)) {
Log(sprintf('Loading: %s; key: %s', self@path, key), 4);
ns = load(self@path);
object = get(if (is.na(which)) ns[1] else which);
assign(key, object, envir = delayed_objects_env);
gc();
} else {
#Log(sprintf('Returning existing object: %s', key), 4);
}
#return(get(key, envir = delayed_objects_env));
# assume delayed_objects_env to be attached
return(as.symbol(key));
}
delayedAssign('r', {
gc();
ns = load(self@path);
object = get(if (is.na(which)) ns[1] else which);
object
});
return(r);
});
RNGuniqueSeed = function(tag) {
if (exists('.Random.seed')) tag = c(.Random.seed, tag);
md5 = md5sumString(join(tag, ''));
r = list(
kind = RNGkind(),
seed = hex2int(substr(md5, 1, 8))
);
r
}
RNGuniqueSeedSet = function(seed) {
RNGkind(seed$kind[1], seed$kind[2]);
#.Random.seed = freeze_control$rng$seed;
set.seed(seed$seed);
}
FreezeThawControlDefaults = list(
dir = '.', sourceFiles = c(), libraries = c(), objects = c(), saveResult = T,
freeze_relative = F, freeze_ssh = T, logLevel = Log.level()
);
thawCall = function(
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag)) {
load(freeze_file, envir = .GlobalEnv);
r = with(callSpecification, {
for (library in freeze_control$libraries) {
eval(parse(text = sprintf('library(%s)', library)));
}
for (s in freeze_control$sourceFiles) source(s, chdir = T);
Log.setLevel(freeze_control$logLevel);
if (!is.null(freeze_control$rng)) RNGuniqueSeed(freeze_control$rng);
if (is.null(callSpecification$freeze_envir)) freeze_envir = .GlobalEnv;
# <!> freeze_transformation must be defined by the previous source/library calls
transformation = eval(parse(text = freeze_control$thaw_transformation));
r = do.call(eval(parse(text = f)), transformation(args), envir = freeze_envir);
#r = do.call(f, args);
if (!is.null(freeze_control$output)) save(r, file = freeze_control$output);
r
});
r
}
frozenCallWrap = function(freeze_file, freeze_control = FreezeThawControlDefaults,
logLevel = Log.level(), remoteLogLevel = logLevel)
with(merge.lists(FreezeThawControlDefaults, freeze_control), {
sp = splitPath(freeze_file, ssh = freeze_ssh);
file = if (freeze_relative) sp$file else sp$path;
browser();
#wrapperPath = sprintf("%s-wrapper.RData", splitPath(file)$fullbase);
r = sprintf("R.pl --template raw --no-quiet --loglevel %d --code 'eval(get(load(\"%s\")[[1]]))' --",
logLevel, file);
r
})
frozenCallResults = function(file) {
callSpecification = NULL; # define callSpecification
load(file);
get(load(callSpecification$freeze_control$output)[[1]]);
}
freezeCallEncapsulated = function(call_,
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag),
freeze_save_output = F, freeze_objects = NULL, thaw_transformation = identity)
with(merge.lists(FreezeThawControlDefaults, freeze_control), {
sp = splitPath(freeze_file, ssh = freeze_ssh);
outputFile = if (freeze_save_output)
sprintf("%s_result.RData", if (freeze_relative) sp$base else sp$fullbase) else
NULL;
callSpecification = list(
f = deparse(call_$fct),
#f = freeze_f,
args = call_$args,
freeze_envir = if (is.null(call_$envir)) new.env() else call_$envir,
freeze_control = list(
sourceFiles = sourceFiles,
libraries = libraries,
output = outputFile,
rng = freeze_control$rng,
logLevel = freeze_control$logLevel,
thaw_transformation = deparse(thaw_transformation)
)
);
thawFile = if (freeze_relative) sp$file else sp$path;
callWrapper = call('thawCall', freeze_file = thawFile);
#Save(callWrapper, callSpecification, thawCall, file = file);
#Save(c('callWrapper', 'callSpecification', 'thawCall', objects),
# file = freeze_file, symbolsAsVectors = T);
#Save(c(c('callWrapper', 'callSpecification', 'thawCall'), objects),
Save(c('callWrapper', 'callSpecification', 'thawCall', freeze_objects),
file = freeze_file, symbolsAsVectors = T);
freeze_file
})
# <!> assume matched call
# <A> we only evaluate named args
callEvalArgs = function(call_, env_eval = FALSE) {
#if (is.null(call_$envir__) || is.null(names(call_$args))) return(call_);
#if (is.null(call_$envir) || !length(call_$args)) return(call_);
# <p> evaluate args
if (length(call_$args)) {
args = call_$args;
callArgs = lapply(1:length(args), function(i)eval(args[[i]], envir = call_$envir));
# <i> use match.call instead
names(callArgs) = setdiff(names(call_$args), '...');
call_$args = callArgs;
}
if (env_eval) {
call_$fct = environment_eval(call_$fct, functions = FALSE, recursive = FALSE);
}
# <p> construct return value
#callArgs = lapply(call_$args, function(e){eval(as.expression(e), call_$envir)});
call_
}
#callWithFunctionArgs = function(f, args, envir__ = parent.frame(), name = NULL) {
callWithFunctionArgs = function(f__, args__, envir__ = environment(f__), name = NULL, env_eval = FALSE) {
if (env_eval) f = environment_eval(f__, functions = FALSE, recursive = FALSE);
call_ = list(
fct = f__,
envir = environment(f__),
args = args__,
name = name
);
call_
}
freezeCall = function(freeze_f, ...,
freeze_control = FreezeThawControlDefaults,
freeze_tag = 'frozenFunction', freeze_file = sprintf('%s/%s.RData', freeze_control$dir, freeze_tag),
freeze_save_output = F, freeze_envir = parent.frame(), freeze_objects = NULL, freeze_env_eval = F,
thaw_transformation = identity) {
# args = eval(list(...), envir = freeze_envir)
call_ = callWithFunctionArgs(f = freeze_f, args = list(...),
envir__ = freeze_envir, name = as.character(sys.call()[[2]]), env_eval = freeze_env_eval);
freezeCallEncapsulated(call_,
freeze_control = freeze_control, freeze_tag = freeze_tag,
freeze_file = freeze_file, freeze_save_output = freeze_save_output, freeze_objects = freeze_objects,
thaw_transformation = thaw_transformation
);
}
encapsulateCall = function(.call, ..., envir__ = environment(.call), do_evaluate_args__ = FALSE,
unbound_functions = F) {
# function body of call
name = as.character(.call[[1]]);
fct = get(name);
callm = if (!is.primitive(fct)) {
callm = match.call(definition = fct, call = .call);
as.list(callm)[-1]
} else as.list(.call)[-1];
args = if (do_evaluate_args__) {
nlapply(callm, function(e)eval(callm[[e]], envir = envir__))
} else nlapply(callm, function(e)callm[[e]])
# unbound variables in body fct
#unbound_vars =
call_ = list(
fct = fct,
envir = envir__,
#args = as.list(sys.call()[[2]])[-1],
args = args,
name = name
);
call_
}
#
# </p> freeze/thaw functions
#
#
# Rgraphics.R
#Mon 27 Jun 2005 10:52:17 AM CEST
require('grid');
#
# <p> unit model
#
# base unit is cm
setGeneric("factorToBase", function(this) standardGeneric("factorToBase"));
setGeneric("fromUnitToUnit", function(thisA, thisB) standardGeneric("fromUnitToUnit"));
setClass('unitGeneric', representation = list(value = 'numeric'), prototype = list(value = as.numeric(NA)));
setMethod('initialize', 'unitGeneric', function(.Object, value = as.numeric(NA)) {
.Object@value = value;
.Object
});
setMethod('fromUnitToUnit', c('unitGeneric', 'unitGeneric'), function(thisA, thisB)
new(class(thisB), value = thisA@value * factorToBase(thisA) / factorToBase(thisB)));
setClass('unitCm', contains = 'unitGeneric');
setMethod('initialize', 'unitCm', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitCm', function(this)1);
setClass('unitInch', contains = 'unitGeneric');
setMethod('initialize', 'unitInch', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitInch', function(this)cm(1));
setClass('unitDpi150', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi150', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi150', function(this)cm(1)/150);
setClass('unitDpi200', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi200', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi200', function(this)cm(1)/200);
setClass('unitDpi300', contains = 'unitGeneric');
setMethod('initialize', 'unitDpi300', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitDpi300', function(this)cm(1)/300);
setClass('unitPoints', contains = 'unitGeneric');
setMethod('initialize', 'unitPoints', function(.Object, value)callNextMethod(.Object, value = value));
setMethod('factorToBase', 'unitPoints', function(this)cm(1)/72);
valueU = valueUnited = function(value, unit) {
class = getClass(Sprintf('unit%{unit}u'));
new(class, value = value)
}
toUnit = function(value, unit)fromUnitToUnit(value, valueU(as.numeric(NA), unit));
ToUnit = function(value, unit)toUnit(value, unit)@value;
#
# </p> unit model
#
cm2in = function(i) (i/2.54)
plotPoints = function(f=sin, interval=c(0,1), count = 1e2, steps = NULL, ...) {
if (!is.null(steps))
count = as.integer((interval[2] - interval[1]) / steps) else
steps = (interval[2] - interval[1]) / (count + 1);
xs = c(interval[1] + (0:(count - 1)) * steps, interval[2]);
#ys = apply(t(xs), 2, function(x)(f(x)));
#ys = Vectorize(function(x)f(x, ...))(xs);
ys = Vectorize(function(x)f(x))(xs);
data.frame(x = xs, y = ys)
}
plotRobust = function(f=sin, interval=c(0,1), count = 1e2, steps = NULL, points = F, ...) {
pts = plotPoints(f, interval, count, steps, points, ...);
if (points) {
points(pts$x, pts$y, type="l");
} else {
plot(pts$x, pts$y, type="l");
}
}
robustPlot = function(f=sin, interval=c(0,1), steps = 0.05, points = F, ...) {
plotRobust(f, interval, steps = steps, points = points, ...);
}
#
# <p> vector functions
#
vNorm = function(v)sqrt(sum(v^2))
vToNorm = toNorm = function(v) {
l = vNorm(v);
if (l == 0) NA else v/l
}
# orthogonal vector in 2D
vPerp = function(v)rev(v) * c(-1, 1)
# the normal of a vector (in 2D), i.e. the perpendicular unit vector
vNormal = function(v)vToNorm(vPerp(v))
#
# <p> graph drawing
#
# draw wedges
# x: x-coordinates
# y: y-coordinates
# w: widthes
wedge = function(x0, y0 = NULL, x1 = NULL, y1 = NULL, width = NULL, col = "black", ..., defaultWidth = .1) {
d = if (!is.null(y0)) data.frame(x0, y0, x1, y1) else x0;
if (is.null(width)) width = matrix(defaultWidth, ncol = 2, nrow = dim(x0)[1]);
pts = matrix(sapply(1:dim(d)[1], function(i) {
p1 = d[i, c("x0", "y0")];
p2 = d[i, c("x1", "y1")];
w = width[i, ];
n = vNormal(p2 - p1); # normal of line
c(p1 + n * w[1]/2, p1 - n * w[1]/2, p2 - n * w[2]/2, p2 + n * w[2]/2)
}), ncol = 2, byrow = T);
grid.polygon(x = pts[, 1], y = pts[, 2], id.lengths = rep(4, dim(d)[1]), gp = gpar(fill=1, col = col))
}
#
# <p> ggplot2
#
#library('ggplot2');
qplotFaceted = function(f, from = 0, to = 1, data, facets, geom = 'line', ..., by = 0.02) {
qplot.call = match.call(qplot);
vars = formula.vars(facets);
varLevels = unique(data[, vars, drop = F]);
print(varLevels);
xs = seq(from, to, by = by);
r = apply(varLevels, 1, function(r) {
environment(f) = f.env = new.env(parent = environment(f));
fl = as.list(r);
for (n in names(fl)) assign(n, fl[[n]], envir = f.env);
ys = f(xs);
d = data.frame(x = xs, y = ys, fl);
d
});
d = rbindDataFrames(r);
qplotArgs = c(as.list(qplot.call[-1]));
p = qplot(x, y, data = d, facets = facets, geom = geom, ...);
p
}
#
# plot to file
#
plot_file_DefaultOptions = list(width = 12, height = 12, dpi = 200);
plot_file = function(code_or_object, file = NULL, options = list(), ..., envir = parent.frame()) {
call = sys.call()[[2]];
if (is.null(file)) file = tempFileName('plot_file', 'pdf', inRtmp = T);
p = if (any(class(code_or_object) == 'ggplot')) {
o = merge.lists(plot_file_DefaultOptions, options, list(...));
with(o, { ggsave(code_or_object, file = file, width = width, height = height, dpi = dpi) });
code_or_object
} else {
device = get(splitPath(file)$ext);
device(file, ...);
eval(call, envir = envir);
dev.off();
encapsulateCall(call, envir__ = envir);
}
p
}
#
# <p> special plots
#
ggplot_qqunif = function(p.values, alpha = .05, fontsize = 6,
tr = function(x)-log(x, 10), trName = '-log10(P-value)', colorCI = "#000099") {
p.values = tr(sort(p.values));
N = length(p.values);
Ns = 1:N;
# j-th order statistic from a uniform(0,1) sample has beta(j,n-j+1) distribution
# (Casella & Berger, 2002, 2nd edition, pg 230, Duxbury)
ciU = tr(qbeta(1 - alpha/2, Ns, N - Ns + 1));
ciL = tr(qbeta( alpha/2, Ns, N - Ns + 1));
d = data.frame(theoretical = tr(Ns/N), ciU = ciU, ciL = ciL, p.value = p.values, colorCI = colorCI);
p = ggplot(d) +
geom_line(aes(x = theoretical, y = ciU, colour = colorCI)) +
geom_line(aes(x = theoretical, y = ciL, colour = colorCI)) +
geom_point(aes(x = theoretical, y = p.value), size = 1) +
theme_bw() + theme(legend.position = 'none') + coord_cartesian(ylim = c(0, max(p.values)*1.1)) +
scale_y_continuous(name = trName) +
theme(text = element_text(size = fontsize));
p
}
#ggplot_qqunif(seq(1e-2, 3e-2, length.out = 1e2))
vp_at = function(x, y)viewport(layout.pos.row = x, layout.pos.col = y);
plot_grid_grid = function(plots, coords) {
# <p> do plotting
grid.newpage();
# <!> layout might not be respected
nrow = max(coords[, 1]);
ncol = max(coords[, 2]);
pushViewport(viewport(layout = grid.layout(nrow, ncol)));
sapply(1:length(plots), function(i) {
print(plots[[i]], vp = vp_at(coords[i, 1], coords[i, 2]));
});
}
plot_grid_base = function(plots, coords, envirPlotVar = 'plot') {
# <p> do plotting
coordMat0 = matrix(0, nrow = max(coords[, 1]), ncol = max(coords[, 2]));
coordMat = matrix.assign(coordMat0, coords, 1:length(plots));
layout(coordMat);
sapply(1:length(plots), function(i) {
eval(get(envirPlotVar, plots[[i]]));
});
# if (is.environment(plots[[i]])) eval(get(envirPlotVar, plots[[i]])) else print(plots[[i]]);
}
plot_grid = function(plots, nrow, ncol, byrow = T, mapper = NULL, envirPlotVar = 'plot') {
if (missing(nrow)) {
if (missing(ncol)) {
ncol = 1;
nrow = length(plots);
} else {
nrow = ceiling(length(plots) / ncol);
}
} else if (missing(ncol)) ncol = ceiling(length(plots) / nrow);
coords = if (is.null(mapper))
merge.multi(1:nrow, 1:ncol, .first.constant = byrow) else
mapper(1:length(plots));
if (is.environment(plots[[1]]))
plot_grid_base(plots, coords, envirPlotVar) else
plot_grid_grid(plots, coords)
}
plot_grid_to_path = function(plots, ..., path,
width = valueU(21, 'cm'), height = valueU(29.7, 'cm'), NperPage = NULL, pdfOptions = list(paper = 'a4')) {
if (class(width) == 'numeric') width = valueU(width, 'inch');
if (class(height) == 'numeric') height = valueU(height, 'inch');
Nplots = length(plots);
pages = if (!is.null(NperPage)) {
Npages = ceiling(Nplots / NperPage);
lapply(1:Npages, function(i) {
Istrt = (i - 1) * NperPage + 1;
Istop = min(i * NperPage, Nplots);
Istrt:Istop
})
} else list(1:length(plots));
pdfArgs = c(list(
file = path, onefile = TRUE, width = ToUnit(width, 'inch'), height = ToUnit(height, 'inch')
), pdfOptions);
do.call(pdf, pdfArgs);
lapply(pages, function(plotIdcs) {
plot_grid(plots[plotIdcs], ...);
});
dev.off();
}
plot_adjacent = function(fts, factor, N = ncol(fts)) {
ns = names(fts);
ps = lapply(1:(N - 1), function(i){
x = eval({fts[, i]});
y = eval({fts[, i + 1]});
qplot(x, y, color = as.factor(factor), xlab = ns[i], ylab = ns[i + 1]);
});
}
plot_grid_pdf = function(plots, file, nrow, ncol, NperPage, byrow = T, mapper = NULL,
pdfOptions = list(paper = 'a4')) {
Nplots = length(plots);
if (missing(nrow)) nrow = NperPage / ncol;
if (missing(ncol)) ncol = NperPage / nrow;
if (missing(NperPage)) NperPage = ncol * nrow;
Npages = ceiling(Nplots / NperPage);
do.call(pdf, c(list(file = file), pdfOptions));
sapply(1:Npages, function(i) {
Istrt = (i - 1) * NperPage + 1;
Istop = min(i * NperPage, Nplots);
plot_grid(plots[Istrt:Istop], nrow, ncol, byrow = byrow, mapper = mapper);
});
dev.off();
}
#
# <p> Kaplan-Meier with ggplot
#
# stolen from the internet
createSurvivalFrame <- function(f.survfit){
# initialise frame variable
f.frame <- NULL
# check if more then one strata
if(length(names(f.survfit$strata)) == 0){
# create data.frame with data from survfit
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event,
n.censor = f.survfit$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower)
# create first two rows (start at 1)
f.start <- data.frame(time=c(0, f.frame$time[1]), n.risk=c(f.survfit$n, f.survfit$n), n.event=c(0,0),
n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1))
# add first row to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start)
} else {
# create vector for strata identification
f.strata <- NULL
for(f.i in 1:length(f.survfit$strata)){
# add vector for one strata according to number of rows of strata
f.strata <- c(f.strata, rep(names(f.survfit$strata)[f.i], f.survfit$strata[f.i]))
}
# create data.frame with data from survfit (create column for strata)
f.frame <- data.frame(time=f.survfit$time, n.risk=f.survfit$n.risk, n.event=f.survfit$n.event, n.censor = f.survfit
$n.censor, surv=f.survfit$surv, upper=f.survfit$upper, lower=f.survfit$lower, strata=factor(f.strata))
# remove temporary data
rm(f.strata)
# create first two rows (start at 1) for each strata
for(f.i in 1:length(f.survfit$strata)){
# take only subset for this strata from data
f.subset <- subset(f.frame, strata==names(f.survfit$strata)[f.i])
f.start <- data.frame(time=c(0, f.subset$time[1]), n.risk=rep(f.survfit[f.i]$n, 2), n.event=c(0,0), n.censor=c(0,0), surv=c(1,1), upper=c(1,1), lower=c(1,1), strata=rep(names(f.survfit$strata)[f.i], 2))
# add first two rows to dataset
f.frame <- rbind(f.start, f.frame)
# remove temporary data
rm(f.start, f.subset)
}
# reorder data
f.frame <- f.frame[order(f.frame$strata, f.frame$time), ]
# rename row.names
rownames(f.frame) <- NULL
}
# return frame
return(f.frame)
}
# define custom function to draw kaplan-meier curve with ggplot
qplot_survival = function(f.frame, f.CI = "default", f.shape = 3, ..., title = NULL, layers = NULL){
# use different plotting commands dependig whether or not strata's are given
p = if("strata" %in% names(f.frame) == FALSE) {
# confidence intervals are drawn if not specified otherwise
if(f.CI == "default" | f.CI == TRUE ){
# create plot with 4 layers (first 3 layers only events, last layer only censored)
# hint: censoring data for multiple censoring events at timepoint are overplotted
# (unlike in plot.survfit in survival package)
ggplot(data=f.frame, ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_step(aes(x=time, y=upper), directions="hv", linetype=2) +
geom_step(aes(x=time,y=lower), direction="hv", linetype=2) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
} else {
# create plot without confidence intervalls
ggplot(data=f.frame) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
}
} else {
# without CI
if(f.CI == "default" | f.CI == FALSE){
ggplot(data=f.frame, aes(group=strata, colour=strata), ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
} else {
ggplot(data=f.frame, aes(colour=strata, group=strata), ...) +
geom_step(aes(x=time, y=surv), direction="hv") +
geom_step(aes(x=time, y=upper), directions="hv", linetype=2, alpha=0.5) +
geom_step(aes(x=time,y=lower), direction="hv", linetype=2, alpha=0.5) +
geom_point(data=subset(f.frame, n.censor==1), aes(x=time, y=surv), shape=f.shape)
}
}
if (!is.null(title)) p = p + labs(title = title);
if (!is.null(layers)) p = p + layers;
p
}
quantileBinning = function(x, Nbins) {
cut(x, quantile(x, seq(0, 1, length = Nbins + 1)), labels = seq_len(Nbins), include.lowest = TRUE)
}
kaplanMeierStrat = function(d1, f1, levels = NULL, title = NULL) {
# <i> only allow one covariate
stratVar = all.vars(formula.rhs(f1))[1];
if (!is.null(levels)) {
d1[[stratVar]] = as.factor(quantileBinning(d1[[stratVar]], levels));
}
stratValue = levels(d1[[stratVar]]);
# <p> log-rank test
lr = survdiff(as.formula(f1), data = d1);
p.lr = pchisq(lr$chisq, df = dim(lr$n) - 1, lower.tail = F)
# <p> kaplan-meyer
fit = survfit(as.formula(f1), data = d1);
fit.frame = createSurvivalFrame(fit);
titleCooked = if (is.null(title))
sprintf('%s, [P = %.2e]', stratVar, p.lr) else
Sprintf('%{title}s, [P = %.2e]', p.lr)
p = qplot_survival(fit.frame, F, 20, title = titleCooked,
layers = theme_bw());
list(plot = p, level = stratValue)
}
kaplanMeierNested = function(d, f1, strata, combine = FALSE) {
dStrat = d[, strata];
cbs = valueCombinations(dStrat);
plots = apply(cbs, 1, function(r) {
sel1 = nif(apply(dStrat == r, 1, all));
sel = nif(sapply(1:nrow(d), function(i)all(dStrat[i,] == r)));
if (sum(sel) == 0) browser();
#if (sum(sel) == 0) return(NULL);
dSel = d[sel, , drop = F];
N = sum(sel);
title = Sprintf('Stratum: %{stratum}s, N = %{N}d',
stratum = paste(names(r), r, sep = '=', collapse = ', '));
kaplanMeierStrat(dSel, f1, title = title);
});
plots
}
#
# <p> histograms
#
histogram_colors = c("#000000", "#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7");
histogram_colors = c('red', 'blue', 'green', 'yellow');
#dayColor = list(`0` = 'red', `1` = 'blue', `3` = 'green', `8` = 'yellow');
histogram_overlayed = function(data, f1,
groupNames = levels(groups), palette = histogram_colors, log10 = T,
x_lab = formula.response(f1), title = 'histogram', alpha = .3, breaks = 30) {
# <p> column names, range
xn = formula.response(f1);
gn = formula.covariates(f1);
lvls = levels(data[[gn]]);
tab = table(cut(data[[xn]], breaks));
#mx = if (log10) 10^ceiling(log10(max(tab))) else max(tab);
mx = max(tab);
# <p> create legend using pseudo data (shifted out of view)
dp = Df(x = rep(0, length(lvls)), y = rep(mx + 1, length(lvls)), group = lvls);
p = ggplot(dp, aes(x = x)) +
geom_rect(data = dp, aes(xmin = x, xmax = x + 1, ymin = y, ymax = y + 1, fill = group)) +
scale_fill_manual(name = gn, values = palette);
# <p> histograms
for (i in 1:length(lvls)) {
p = p + geom_histogram(data = data.frame(x = data[[xn]][data[[gn]] == lvls[i]]),
fill = palette[i], alpha = alpha);
}
# <p> log transform
if (log10) p = p + scale_y_continuous(trans = 'log10') + coord_cartesian(ylim = c(1, mx));
# <p> final formatting
p = p + ggtitle(title) + xlab(x_lab);
p
}
#'@param data: data frame or list
histograms_alpha = function(data, palette = histogram_colors, log10 = F,
x_lab = '', title = 'histogram', alpha = .3, origin = NULL, binwidth = NULL, relative = FALSE,
textsize = 20) {
# <p> preparation
N = length(as.list(data));
columns = names(data);
mx = max(unlist(as.list(data)), na.rm = T);
mn = min(unlist(as.list(data)), na.rm = T);
# <p> create legend using pseudo data (shifted out of view)
dp = Df(x = rep(2*mx + 2, N), y = rep(0, N), group = columns);
p = ggplot(dp, aes(x = x)) +
geom_rect(data = dp, aes(xmin = x, xmax = x + .01, ymin = y, ymax = y + .01, fill = group)) +
scale_fill_manual(name = dp$group, values = palette);
# <p> histograms
for (i in 1:N) {
col = columns[i];
dfH = data.frame(x = data[[col]]);
p = p + if (relative)
geom_histogram(data = dfH, aes(y=..count../sum(..count..)),
fill = palette[i], alpha = alpha, binwidth = binwidth, origin = origin
) else
geom_histogram(data = dfH, fill = palette[i], alpha = alpha, binwidth = binwidth, origin = origin)
}
# <p> log transform
if (log10) p = p + scale_y_continuous(trans = 'log10') + coord_cartesian(ylim = c(1, mx));
# <p> final formatting
p = p + coord_cartesian(xlim = c(mn - 1, mx + 1)) + ggtitle(title) + xlab(x_lab) + theme_bw() +
theme(text = element_text(size = textsize));
if (relative) p = p + ylab('percentage');
p
}
#
# <p> saving of plots
#
# base unit is 600dpi
units_conv = list(
cm = list(from = function(cm)(cm/2.54*600), to = function(b)(b/600*2.54)),
points = list(from = function(points)(points/72*600), to = function(b)(b/600*72)),
inch = list(from = function(i)(i*600), to = function(b)(b/600)),
dpi150 = list(from = function(dpi)(dpi/150*600), to = function(b)(b*150/600))
);
units_default = list(jpeg = 'dpi150', pdf = 'cm', png = 'points');
plot_save_raw = function(object, ..., width = 20, height = 20, plot_path = NULL,
type = NULL, options = list(), unit = 'cm', unit_out = NULL, envir = parent.frame()) {
device = get(type);
if (is.null(unit_out)) unit_out = units_default[[type]];
width = toUnit(width, unit_out)@value;
height = toUnit(height, unit_out)@value;
Log(Sprintf('Saving %{type}s to "%{plot_path}s" [width: %{width}f %{height}f]'), 5);
device(plot_path, width = width, height = height, ...);
#ret = eval(object, envir = envir);
ret = if (any(class(object) %in% c('ggplot', 'plot'))) {
print(object)
} else {
eval(object, envir = envir);
}
dev.off();
}
plot_typeMap = list(jpg = 'jpeg');
plot_save = function(object, ..., width = valueU(20, 'cm'), height = valueU(20, 'cm'), plot_path = NULL,
type = NULL,
envir = parent.frame(), options = list(), simplify = T, unit_out = NULL, createDir = TRUE) {
if (class(width) == 'numeric') width = valueU(width, 'cm');
if (class(height) == 'numeric') height = valueU(height, 'cm');
if (is.null(plot_path)) file = tempFileName('plot_save', 'pdf', inRtmp = T);
ret = lapply(plot_path, function(plot_path) {
if (createDir) Dir.create(plot_path, recursive = T, treatPathAsFile = T);
if (is.null(type) && !is.null(plot_path)) {
ext = splitPath(plot_path)$ext;
type = firstDef(plot_typeMap[[ext]], ext);
}
Logs("plot_path: %{plot_path}s, device: %{type}s", logLevel = 5);
plot_save_raw(object, ..., type = type, width = width, height = height, plot_path = plot_path,
options = options, unit_out = unit_out, envir = envir);
});
if (length(plot_path) == 1 && simplify) ret = ret[[1]];
r = list(path = plot_path, ret = ret);
r
}
# USAGE:
# plts = exprR1$Eapply(function(data, probe_name) {
# delayedPlot({
# boxplot(model, data, main = main);
# beeswarm(model, data, add = T)
# })
# });
# eval(plts[[1]])
delayedPlot = function(plotExpr, envir = parent.frame()) {
e = new.env(parent = envir);
delayedAssign('plot', plotExpr, assign.env = e)
e
}
#
# Rreporting.R
#Mon 06 Feb 2006 11:41:43 AM EST
#
# <p> documentation (by example
#
# Example:
# create a Reporter instance to report to LaTeX
# r = new("Rreporter", final.path = "/tmp/output.pdf", patterns = "latex");
#
# </p> end documentation
#
#
# <p> generic reporting functions
#
row.standardFormatter = function(e, digits = NA) {
f = if (is.na(digits) || substring(digits, 1, 1) == 'p') {
e
} else {
e = as.numeric(e);
if (substring(digits, 1, 1) == "#") {
sprintf("%.*e", as.numeric(substring(digits, 2)), e)
} else if (substring(digits, 1, 1) == "%") {
sprintf('%.*f\\%%', as.numeric(substring(digits, 2)), e * 100)
} else if (as.numeric(digits) < 0) {
digits = as.integer(digits);
ifelse(floor(log10(abs(e))) <= digits,
sprintf("%.*g", -digits, e),
sprintf("%.*f", -digits, e))
} else { sprintf("%.*f", as.integer(digits), e) }
}
f
}
latex = list(
# table patterns
header = "{LONGTABLESTARTFMT\\begin{longtable}{COLUMN_FORMAT}\nLONGTABLECAPTION",
columnNames = "%s%s %s\\hline\n",
separator = " & ",
hline = "\\hline\n",
lineEnd = " \\\\\n",
subHeading = function(h, rowSpan)
sprintf("\\hline\n & \\multicolumn{%d}{l}{\\bf %s}\\\\\\hline\n", rowSpan, h),
footer = "\\end{longtable}}\n\n",
postProcess = function(s, df, row.formatters, digits, caption, na.value, subHeadings,
ignoreRowNames, patterns, alignment, startFmt, bars) {
if (is.null(alignment)) alignment = rep(NA, dim(df)[2]);
alignment[is.na(alignment) & !is.na(digits)] = 'r';
alignment[is.na(alignment)] = 'l';
paragraphs = !is.na(digits) & substring(digits, 1, 1) == 'p';
alignment[paragraphs] = digits[paragraphs];
bars = c(bars, rep(F, length(alignment) - length(bars)));
alignment = ifelse(!bars, alignment, paste(alignment, '|', sep = ''));
colFmt = sprintf("%s%s", ifelse(ignoreRowNames, "", "r|"),
paste(alignment, collapse = ""));
captionPt = if (caption == '') list(LONGTABLECAPTION = '') else
list(LONGTABLECAPTION = '\\caption{CAPTION}\\\\\n', CAPTION = caption)
s = mergeDictToString(merge.lists(
list(COLUMN_FORMAT = colFmt, LONGTABLESTARTFMT = startFmt),
captionPt), s);
s
},
quote = function(s, detectFormula = T) {
s = gsub('_', '\\\\_', s, perl = T);
s = gsub('&', '\\\\&', s, perl = T);
s = gsub('~', '$\\\\sim$', s, perl = T);
s = gsub('([<>])', '$\\1$', s, perl = T);
s = gsub('\\^2', '$^2$', s, perl = T);
#ifelse(length(grep('_', s)) > 0, gsub('_', '\\\\_', s, perl = T), s)
s
},
# general text formatting
newpage = "\n\n\\newpage\n\n",
section = "\\section{SECTION_NAME}\n\n",
subsection = "\\subsection{SECTION_NAME}\n\n",
paragraph = "PARAGRAPH_TEXT\\par\n\n",
# finalize
document = "HEADER\n\\begin{document}\nDOC_HERE\n\\end{document}\n",
docHeader = "\\documentclass[a4paper,oneside,11pt]{article}\n\\usepackage{setspace,amsmath,amssymb, amsthm, epsfig, epsf, amssymb, amsfonts, latexsym, rotating, longtable, setspace, natbib, a4wide,verbatim, caption}\n\\usepackage[utf8x]{inputenc}",
docCmd = "cd TMP_DIR ; pdflatex TMP_FILE_BASE 1&>/dev/null ; cp TMP_FILE_BASE.pdf OUTPUT_FILE",
# figure table
figureTable = list(
table = "\\begin{center}\\begin{tabular}{COLS}\nROWS\\end{tabular}\\end{center}",
figure = '\\includegraphics[width=%.3f\\textwidth]{%s}',
figureCaption = "\\begin{minipage}[b]{%.3f\\linewidth}\\centering
\\begin{tabular}{c}
%s\\\\
\\includegraphics[width=\\textwidth]{%s}
\\end{tabular}\\end{minipage}\n",
formatTable = function(rows, cols = 2, template = latex$figureTable$table) {
mergeDictToString(list(COLS = join(rep('c', cols), ''), ROWS = rows), template)
},
formatRows = function(rows, cols = 2) {
sep = c(rep(' & ', cols - 1), "\\\\\n");
seps = rep(sep, (length(rows) + cols - 1) %/% cols);
seps = seps[1:length(rows)];
rs = meshVectors(rows, seps);
r = join(c(pop(rs), "\n"), '');
#browser();
# texRows = sapply(1:(length(rows) - 1), function(i)sprintf('%s%s', rows[i],
# ifelse(i %% cols == 1, ' & ', "\\\\\n")));
# rs = join(c(texRows, rev(rows)[1], "\n"), '');
# rs
},
formatFigure = function(figure, cols = 2, width = 1/cols - 0.05,
template = latex$figureTable$figure, templateCaption = latex$figureTable$figureCaption,
caption = '') {
if (File.exists(figure)) figure = path.absolute(figure);
caption = if (firstDef(caption, '') != '')
sprintf(templateCaption, width, caption, figure) else
sprintf(template, width, figure)
}
)
);
# bars: parallel structure to digits: where to insert vertical bars
report.data.frame.toString = function(df = NULL,
row.formatters = c(row.standardFormatter), digits = NA, caption = "", na.value = "-",
subHeadings = NULL, ignoreRowNames = F, patterns = latex, names.as = NULL, alignment = NULL,
quoteHeader = T, quoteRows = T, quoteRowNames = quoteHeader, startFmt = '', bars = NULL) {
with(patterns, {
# <p> initialize
rFmtC = length(row.formatters);
if (length(digits) == 1) digits = rep(digits, dim(df)[2]);
t = header; # the nascent table as string
if (!is.null(names.as)) names(df) = names.as;
# <p> complete header
header = if (quoteHeader) sapply(dimnames(df)[[2]], quote) else dimnames(df)[[2]];
t = con(t, sprintf("%s%s%s%s", ifelse(!ignoreRowNames, separator, ""),
paste(header, collapse = separator), lineEnd, hline));
# <p> append rows
for (i in Seq(1, nrow(df))) {
row.fmt = row.formatters[[((i - 1) %% rFmtC) + 1]];
if (i %in% subHeadings$indeces) { # insert subheading
j = which(subHeadings$indeces == i);
t = con(t, subHeading(subHeadings$headings[j], dim(df)[2] - ignoreRowNames));
}
if (!ignoreRowNames) {
rowName = dimnames(df)[[1]][i];
t = con(t, sprintf("%s%s", if (quoteRowNames) quote(rowName) else rowName, separator));
}
# <p> formatting and quoting
values = sapply(1:ncol(df), function(j)
if (is.na(df[i, j])) na.value else row.fmt(as.character(df[i, j]), digits[j])
);
if (quoteRows) values = sapply(values, quote);
t = con(t, sprintf("%s%s", paste(values, collapse = separator), lineEnd));
}
t = con(t, footer);
t = postProcess(t, df, row.formatters, digits, caption, na.value, subHeadings,
ignoreRowNames, patterns, alignment, startFmt, bars);
})
}
report.figure.tableSingle = function(figures, cols = 2, width = 1/cols - 0.05, patterns = latex, captions = NULL)
with(patterns, with(figureTable, {
figs = sapply(1:length(figures), function(i){
formatFigure(figures[i], cols = cols, width = width, caption = captions[i])
});
rows = formatRows(figs, cols = cols);
table = formatTable(rows, cols = cols);
table
}))
report.figure.table = function(figures, cols = 2, width = 1/cols - 0.05, patterns = latex,
captions = NULL, maxRows = 5) with(patterns, {
NfiguresPerPage = maxRows * cols;
Nfigures = ceiling(ceiling(length(figures)/cols) / maxRows);
if (Nfigures > 1) {
tables = sapply(1:Nfigures, function(i) {
Is = ((i - 1)*NfiguresPerPage + 1): min((i*NfiguresPerPage), length(figures));
report.figure.tableSingle(figures[Is], cols, width, patterns, captions[Is])
});
join(tables, "\n")
} else report.figure.tableSingle(figures, cols, width, patterns, captions)
})
#
# <p> Rreporter (base on S4 methods)
#
setClass("Rreporter",
representation(tmp.path = "character", final.path = "character", patterns = "list"),
prototype(tmp.path = sprintf("%s.rr", tempfile()), final.path = NULL, patterns = latex)
);
setMethod("initialize", "Rreporter", function(.Object, final.path, patterns = latex) {
.Object@final.path = final.path;
.Object@patterns = if (is.character(patterns)) get(patterns) else patterns;
# create temp file
cat("", file = .Object@tmp.path);
.Object
});
# <p> generic methods
report.data.frame = function(self, df = NULL, row.formatters = c(row.standardFormatter),
digits = NA, caption = "", na.value = "-", subHeadings = NULL, ignoreRowNames = F, verbose = T) {
patterns = self@patterns;
s = report.data.frame.toString(df, row.formatters , digits, caption, na.value,
subHeadings, ignoreRowNames, patterns);
cat(s, file = self@tmp.path, append = T);
if (verbose) cat(s);
self
}
report.newpage = function(self) {
cat(self@patterns$newpage, file = self@tmp.path, append = T);
}
report.newsection = function(self, name) {
cat(
mergeDictToString(list(SECTION_NAME = name), self@patterns$section),
file = self@tmp.path, append = T
);
}
report.newsubsection = function(self, name) {
cat(
mergeDictToString(list(SECTION_NAME = name), self@patterns$subsection),
file = self@tmp.path, append = T
);
}
report.paragraph = function(self, text) {
cat(
mergeDictToString(list(PARAGRAPH_TEXT = text), self@patterns$paragraph),
file = self@tmp.path, append = T
);
}
report.finalize = finalize = function(self) {
cmd = sprintf("cp \"%s\" \"%s\"", self@tmp.path, absolutePath(self@final.path));
System(cmd);
}
report.finalizeAsDocument = function(self) {
# <p> read document to string
doc = readFile(self@tmp.path);
# <p> write processed document
sp = splitPath(self@tmp.path);
writeFile(sprintf("%s.tex", sp$fullbase),
mergeDictToString(list(
HEADER = self@patterns$docHeader, DOC_HERE = readFile(self@tmp.path)
), self@patterns$document)
);
cmd = mergeDictToString(
list(
TMP_DIR = sp$dir,
TMP_FILE = sp$path,
TMP_FILE_BASE = sp$fullbase,
OUTPUT_FILE = absolutePath(self@final.path)
)
, self@patterns$docCmd)
System(cmd);
}
#
# <p> end Rreporter (base on S4 methods)
#
#
# <p> convenience methods
#
reportDataFrame2pdf = function(df, file = tempfile(), row.formatters = c(row.standardFormatter),
digits = NA, caption = "", na.value = "-", subHeadings = NULL, ignoreRowNames = F, verbose = T) {
r = new("Rreporter", final.path = file);
report.data.frame(r, df,
row.formatters, digits, caption, na.value, subHeadings, ignoreRowNames, verbose);
report.finalizeAsDocument(r);
}
#
# <p> sweave
#
swaeveIt = function(file = NULL, N = 1) {
System(sprintf("R CMD Sweave '%s.Rnw'", file));
cmd = sprintf("sh -c 'pdflatex \"./%s\"'", file);
for (i in 1:N) System(cmd);
}
#
# <p> Sweave replacement
#
.REP.standardTemplate = '\\input{SETUP}
\\begin{document}
TEMPLATE_MAIN
\\end{document}
';
# REP.plot('Tag', Qplot(rate, geom = 'histogram', xlab = 'heterocygosity', file = 'dest'));
# REP.plot('Tag', Qplot(sample = ps, dist = qunif, file = 'results/qc-markers-hweQQ.jpg'));
Qplot_defaults = list(
width = 5, height = 5, dpi = 150,
dimx = c(0, 1), dimy = c(0, 100)
);
Qplot = function(..., file = NULL, pp = Qplot_defaults) {
pp = merge.lists(Qplot_defaults, pp);
args = list(...);
geom = firstDef(args$geom, 'default');
# <b> workaround for QQ-plot instead of the expected qplot(...)
p = if (any(class(args[[1]]) == 'ggplot')) {
args[[1]]
} else if (
# histogram
(all(is.na(args[[1]])) && geom == 'histogram')
# xy-plot
|| (all(is.na(args[[1]]) | is.na(args[[2]])))) {
ggplot(data = data.frame()) + geom_point() +
xlim(pp$dimx[1], pp$dimx[2]) +
ylim(pp$dimy[1], pp$dimx[2]);
} else do.call(qplot, list(...));
ggsave(p, file = file, width = pp$width, height = pp$height, dpi = pp$dpi);
file
}
GGplot = function(p, file = NULL, pp = list(width = 5, height = 5, dpi = 150)) {
ggsave(p, file = file, width = pp$width, height = pp$height, dpi = pp$dpi, encoding = 'AdobeStd');
file
}
PlotDefaults = list(
pdf = list(width = 6, height = 6),
jpeg = list(width = 2048, height = 2048)
);
Plot = function(..., file = NULL, .plotType = 'pdf', o = NULL, f = NULL) {
if (is.null(file)) file = tempFileName('reporter', .plotType);
device = get(.plotType);
plotFunction = firstDef(f, plot);
o = merge.lists(PlotDefaults[[.plotType]], o);
do.call(device, c(list(file = file), o));
do.call(plotFunction, list(...));
dev.off();
file
}
.REP.extractFromTemplates = function(templates, re = '(?s)(?<=TEMPLATE_BEGIN).*?(?=TEMPLATE_END)',
locations = c('.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
nst = names(templates);
# <p> set empty template names
if (is.null(nst)) nst = rep('', length(templates));
nst[nst == ''] = paste('TEMPL_', 1:sum(nst == ''), sep = '');
# <p> parse template definitions
ts = lapply(1:length(templates), function(i) {
# raw read templates
templ = readFile(templates[[i]], prefixes = locations);
tsRaw = fetchRegexpr(re, templ);
# inline templates
r = if (length(tsRaw) != 0) {
ns = sapplyn(tsRaw, function(e)fetchRegexpr('(?<=^:).*?(?=\\n)', e, globally = F));
# colon, new-line
ts = sapply(1:length(ns), function(i)substr(tsRaw[i], nchar(ns[i]) + 3, nchar(tsRaw[i])));
listKeyValue(ns, ts);
} else {
listKeyValue(nst[i], templ);
}
r
});
#r = unlist.n(ts, 1);
r = merge.lists(ts, listOfLists = T);
r
}
.REP.getTemplates = function(templates, locations = c('.', sprintf('%s/src/Rscripts', Sys.getenv('HOME')))) {
nst = names(templates);
# <p> set empty template names
if (is.null(nst)) nst = rep('', length(templates));
nst[nst == ''] = paste('TEMPL_', 1:sum(nst == ''), sep = '');
# <p> parse template definitions
ts = lapply(1:length(templates), function(i) {
# raw read templates
templ = readFile(templates[[i]], prefixes = locations);
tsRaw = fetchRegexpr('(?s)(?<=TEMPLATE_BEGIN).*?(?=TEMPLATE_END)', templ);
# inline templates
r = if (length(tsRaw) != 0) {
ns = sapplyn(tsRaw, function(e)fetchRegexpr('(?<=^:).*?(?=\\n)', e, globally = F));
# colon, new-line
ts = sapply(1:length(ns), function(i)substr(tsRaw[i], nchar(ns[i]) + 3, nchar(tsRaw[i])));
listKeyValue(ns, ts);
} else {
listKeyValue(nst[i], templ);
}
r
});
#r = unlist.n(ts, 1);
r = merge.lists(ts, listOfLists = T);
# backward compatibility: determine wether default template should be used
if (length(r) > 0) {
if (names(r)[1] != 'TEMPL_1') { # expect full document template tb specified otherwise
# interpolate first template into standard template
r[[1]] = mergeDictToString(list(TEMPLATE_MAIN = r[[1]]), .REP.standardTemplate);
}
}
r
}
.REP.getPatterns = function(templates) {
.REP.extractFromTemplates(templates, '(?s)(?<=KEY_BEGIN).*?(?=KEY_END)');
}
.REP.defaultParameters = list(
copy.files = 'setup.tex',
setup = 'setup.tex',
latex = 'pdflatex',
useDefaultTemplate = T
);
# create new, global reporter
REP.new = function(templates = NULL, cache = NULL, parameters = list(), resetCache = F,
latex = 'pdflatex', setup = 'setup.tex') {
copy.files = merge.lists(.REP.defaultParameters['copy.files'], list(copy.files = setup), concat = TRUE);
parameters = merge.lists(.REP.defaultParameters,
parameters,
list(latex = latex, setup = setup),
copy.files,
concat = FALSE);
if (!is.null(cache) && file.exists(cache) && !resetCache) {
REP.tex('SETUP', setup);
REP.setParameters(parameters);
load(file = cache, envir = .GlobalEnv);
} else {
templatePathes = c(as.list(templates), parameters$subTemplates);
ts = .REP.getTemplates(templatePathes);
ps = merge.lists(
list(SETUP = setup),
.REP.getPatterns(templatePathes)
);
mainPath = splitPath(as.vector(templates)[1]);
assign('.REPORTER.ITEMS', list(
# list of named templates
templates = ts,
# patterns to be interpolated
patterns = ps,
# housekeeping: tags for consecutively reported subtemplates
templateTags = list(),
# parameters passed in
parameters = parameters,
# path to the cache file
cache = cache,
# create default output name
output = sprintf('%s.pdf', mainPath$fullbase),
# name of the template to be used for the global, final document
mainTemplate = names(ts)[1],
templatePathes = templatePathes,
# conditionals
conditionals = list()
), pos = .GlobalEnv
);
}
NULL
}
REP.refreshTemplates = function(templates) {
if (!exists('.REPORTER.ITEMS')) return();
templatePathes = templates;
ts = .REP.getTemplates(as.list(templates));
ps = .REP.getPatterns(templatePathes);
.REPORTER.ITEMS$templates = ts;
.REPORTER.ITEMS$mainTemplate = names(ts)[1];
.REPORTER.ITEMS$templatePathes = templatePathes;
.REPORTER.ITEMS$patterns = merge.lists(.REPORTER.ITEMS$patterns, ps);
assign('.REPORTER.ITEMS', .REPORTER.ITEMS, pos = .GlobalEnv);
REP.save();
}
REP.save = function() {
if (!is.null(.REPORTER.ITEMS$cache)) {
dir = splitPath(.REPORTER.ITEMS$cache)$dir;
if (!file.exists(dir)) dir.create(dir, recursive = T);
save(.REPORTER.ITEMS, file = .REPORTER.ITEMS$cache);
}
NULL
}
REP.setParameters = function(parameters = .REP.defaultParameters) {
.REPORTER.ITEMS$parameters = merge.lists(.REP.defaultParameters, parameters);
assign('.REPORTER.ITEMS', .REPORTER.ITEMS, pos = .GlobalEnv);
REP.save();
}
REP.unreport = function(keys) {
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
idcs = which.indeces(keys, names(l$patterns));
if (!length(idcs)) return(NULL);
l$patterns = l$patterns[-idcs];
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
setREPentry = function(key, value) {
if (!exists('.REPORTER.ITEMS')) assign('.REPORTER.ITEMS', list(), pos = .GlobalEnv);
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
l$patterns[[key]] = value;
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
setRI = function(ri)assign('.REPORTER.ITEMS', ri, pos = .GlobalEnv);
REP.setConditional = function(name, v) {
l = get('.REPORTER.ITEMS', pos = .GlobalEnv);
if (is.null(l$conditionals)) l$conditionals = list();
l$conditionals[[name]] = v;
assign('.REPORTER.ITEMS', l, pos = .GlobalEnv);
REP.save();
}
outputOf = function(code, print = T, envir = parent.frame()) {
tempFile = tempFileName('reporter', inRtmp = T);
sink(tempFile);
if (print) print(eval(code, envir = envir)) else eval(code, envir = envir);
sink();
output = readFile(tempFile);
output
}
expression2str = function(exp, removeBraces = T) {
strs = deparse(exp);
if (removeBraces) strs = strs[2:(length(strs) - 1)];
sprintf("%s\n", join(strs, "\n"))
}
codeRepresentation = function(code) {
if (is.character(code)) {
codeExp = parse(text = code);
codeText = gsub('^\n?(.*)', '\\1', code); # remove leading \n
} else {
codeExp = code;
codeText = expression2str(code);
}
r = list(code = codeExp, text = codeText);
r
}
REP.format.sci = function(s, digits = 1) {
e = floor(log10(as.numeric(s)));
m = as.numeric(s) * 10^(-e);
if (round(m, digits) == 1) {
sprintf("$10^{%d}$", e)
} else {
sprintf("$%.*f \\times 10^{%d}$", digits, m, e)
}
}
REP.formats = list(
small = function(s)sprintf("{\n\\small %s\n}", s),
tiny = function(s)sprintf("{\n\\tiny %s\n}", s),
percent = function(s)sprintf("%.1f", 100 * as.numeric(s)),
`.1` = function(s)sprintf("%.1f", as.numeric(s)),
`.2` = function(s)sprintf("%.2f", as.numeric(s)),
`.3` = function(s)sprintf("%.3f", as.numeric(s)),
`.4` = function(s)sprintf("%.4f", as.numeric(s)),
sci0 = function(s) REP.format.sci(s, 0),
sci1 = function(s) REP.format.sci(s, 1),
sci2 = function(s) REP.format.sci(s, 2),
file = function(f) {
ri = .REPORTER.ITEMS;
# due to caching choose a persistent location <!> uniqueness
tdir = sprintf('/tmp/%s/Rpreporting/%s', Sys.getenv('USER'), names(ri$templates)[1]);
if (!file.exists(tdir)) dir.create(tdir, recursive = T);
tf = sprintf('%s/%s', tdir, splitPath(f)$file);
unlink(tf); # overwrite previous version
# <!> expect relative filename, spaces in file name not eliminated
file.symlink(sprintf('%s/%s', getwd(), f), tf);
tf
}
);
REP.tex = function(name, str, print = T, quote = F, fmt = NULL) {
if (!is.null(fmt) && !is.na(fmt)) {
str = if (is.null(REP.formats[[fmt]])) sprintf(fmt, str) else REP.formats[[fmt]](str);
}
if (quote) { #<i> use backend quoting
#str = gsub('_', '\\\\_', str, perl = T); # replace _
str = latex$quote(str);
}
setREPentry(sprintf('%s', name), str);
str
}
REP.texq = function(name, str, print = T, quote = T, fmt = NULL)REP.tex(name, str, print, quote, fmt)
REP.vector = function(name, v, print = T, quote = T, typewriter = T, sep = ', ', max = 50) {
if (max > 0) v = v[1:min(max, length(v))];
if (typewriter) {
v = sapply(v, function(s)sprintf('\\texttt{%s}', s));
}
REP.tex(name, sprintf('%s%s', join(v, sep), ifelse(length(v) > max, '...', '')), quote = quote);
}
REP = function(name, code, print = T, execute = T, envir = parent.frame()) {
c = codeRepresentation(as.list(sys.call())[[3]]);
setREPentry(sprintf('%s_code', name), c$text);
if (execute) {
output = outputOf(c$code, envir = envir);
setREPentry(sprintf('%s_out', name), output);
if (print) cat(output);
}
NULL
}
REP.plotDefaultOptions = list(width = 5, height = 5, dpi = 150);
REP.plot = function(name, code, ..., file = NULL, type = 'pdf', envir = parent.frame(),
options = list(), copyToTmp = F) {
#c = codeRepresentation(as.list(sys.call())[[3]]);
c = codeRepresentation(sys.call()[[3]]); # as of version R 3.0.1
if (is.null(file)) file = tempFileName('reporter', 'pdf', inRtmp = T);
if (type == 'ggplot') {
o = merge.lists(REP.plotDefaultOptions, options, list(...));
with(o, { ggsave(code, file = file, width = width, height = height, dpi = dpi) });
} else if (is.character(code)) {
file = code;
} else {
device = get(type);
device(file, ...);
eval(c$code, envir = envir);
dev.off();
}
pathToFile = path.absolute(file);
if (copyToTmp) {
fileTmp = tempFileName('reporter', splitPath(pathToFile)$ext, inRtmp = T);
file.copy(pathToFile, fileTmp, overwrite = T);
pathToFile = fileTmp;
}
if (file.info(pathToFile)$size == 0) {
pathToFile = '';
}
setREPentry(sprintf('%s_plot', name), pathToFile);
setREPentry(sprintf('%s_code', name), c$text);
NULL
}
# tag allows to search for overloading templates (_tag). This can be used in reportSubTemplate to
# conditionally report templates
.REP.interpolateTemplate = function(templName, conditionals = list(), tag = NULL) {
ri = .REPORTER.ITEMS;
if (!is.null(tag) && !is.null(ri$templates[[sprintf('%s_%s', templName, tag)]]))
templName = sprintf('%s_%s', templName, tag);
s = ri$templates[[templName]]
#s = readFile(tpath);
s = mergeDictToString(.REPORTER.ITEMS$patterns, s, iterative = T);
lengths = sapply(names(conditionals), nchar);
for (n in names(conditionals)[rev(order(lengths))]) {
s = gsub(sprintf('IF_%s(.*?)END_IF', n), if (conditionals[[n]]) '\\1' else '', s);
}
s
}
# initialize a series of reportSubTemplate calls followed by a finalizeSubTemplate call
REP.reportSubTemplateInitialize = function(subTemplate) {
patterns = .REPORTER.ITEMS$patterns;
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
REP.unreport(subPatterns);
}
REP.reportSubTemplate = function(subTemplate, tag = NULL, conditionals = list()) {
ri = .REPORTER.ITEMS;
# tag
if (is.null(tag)) {
tt = ri$templateTags;
tag = ri$templateTags[[subTemplate]] =
ifelse (is.null(tt[[subTemplate]]), 0, tt[[subTemplate]]) + 1;
setRI(ri);
}
# finalize subTemplates
patterns = ri$patterns;
subPattern = sprintf('TEMPLATE:%s_%s', subTemplate, as.character(tag));
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
# set own entry
setREPentry(subPattern, .REP.interpolateTemplate(subTemplate, tag = tag));
# collect all subTemplates
# for (st in names(ri$parameters$subTemplates)) {
# i = which.indeces(sprintf('TEMPLATE:%s_.*', st), names(.REPORTER.ITEMS$patterns), regex = T);
# setREPentry(sprintf('TEMPLATE:%s:subTemplates', st), join(unlist(names(patterns[i])), "\n"));
# }
#i = which.indeces(sprintf('TEMPLATE:%s_.*', subTemplate), names(.REPORTER.ITEMS$patterns), regex = T);
# append new element
setREPentry(subPatterns, join(c(patterns[[subPatterns]], subPattern), "\n"));
REP.save();
}
REP.finalizeSubTemplate = function(subTemplate) {
# finalize subTemplates
patterns = .REPORTER.ITEMS$patterns;
subPatterns = sprintf('TEMPLATE:%s:subTemplates', subTemplate);
text = mergeDictToString(patterns, patterns[[subPatterns]], iterative = T);
setREPentry(sprintf('TEMPLATE:%s', subTemplate), text);
# remove trail
if (is.null(subPatterns)) return(NULL);
subPattern = splitString("\n", .REPORTER.ITEMS$patterns[[subPatterns]]);
#print(c(subPatterns, subPattern));
REP.unreport(c(subPatterns, subPattern));
REP.save();
}
REP.finalize = function(conditionals = list(), verbose = FALSE, cycles = 1, output = NULL) {
# <p> vars
ri = .REPORTER.ITEMS;
# <p> prepare
dir = tempFileName('rreporter', inRtmp = T);
file.remove(dir);
dir.create(dir);
# <!> assume relative pathes
for (cpath in .REPORTER.ITEMS$parameters$copy.files) {
if (splitPath(cpath)$isAbsolute) {
dest = sprintf('%s/%s', dir, splitPath(cpath)$file);
Log(sprintf('Reporting: symlinking %s -> %s', cpath, dest), 4);
file.symlink(cpath, dest);
} else {
for (sdir in c('', getwd(), sapply(ri$templatePathes, function(tp)splitPath(tp)$dir))) {
source = sprintf('%s/%s/%s', getwd(), sdir, cpath);
Log(sprintf('Reporting: dir %s', sdir), 4);
if (file.exists(source)) {
dest = sprintf('%s/%s', dir, cpath);
Log(sprintf('Reporting: symlinking %s -> %s', source, dest), 4);
file.symlink(source, dest);
break;
}
}
}
}
# <p> create final document
tn = names(ri$templates)[1];
allConditionals = merge.lists(ri$conditionals, conditionals);
s = .REP.interpolateTemplate(ri$mainTemplate, allConditionals);
# <p> run latex to produce temp file
tmpPath = sprintf('%s/%s.tex', dir, tn);
writeFile(tmpPath, s);
Log(readFile(tmpPath), 5)
latexCmd = firstDef(ri$parameters$latex, 'pdflatex');
for (i in 1:cycles) {
r = System(Sprintf('cd %{dir}s ; %{latexCmd}s -interaction=nonstopmode \"%{tn}s\"'),
4, return.output = T);
if (r$error > 0) Log(Sprintf("%{latexCmd}s exited with error."), 1);
if (r$error > 0 || (verbose && i == 1)) Log(r$output, 1);
#if (r$error > 0) break;
}
# <p> output
postfix = join(names(conditionals[unlist(conditionals)]), '-');
if (postfix != '') postfix = sprintf('-%s', postfix);
#fileOut = sprintf('%s%s%s.pdf', splitPath(tpath)$base, if (postfix == '') '' else '-', postfix);
#fileOut = sprintf('%s%s%s.pdf', tn, if (postfix == '') '' else '-', postfix);
if (is.null(output))
output = if (exists('.globalOutput'))
.fn(sprintf('%s%s', splitPath(ri$output)$base, postfix), 'pdf') else ri$output;
Log(sprintf('Writing to output %s', output), 4);
file.copy(sprintf('%s.pdf', splitPath(tmpPath)$fullbase), output, overwrite = T);
file.copy(sprintf('%s.tex', splitPath(tmpPath)$fullbase),
sprintf('%s.tex', splitPath(output)$fullbase), overwrite = T);
}
#
# <p> helpers
#
REP.reportFigureTable = function(nameTag, namesPlots, cols = 2, captions = NULL) {
namesPlots = sapply(namesPlots, function(p) {
path = if ('ggplot' %in% class(p)) {
path = tempfile(fileext = '.pdf');
ggsave(path, plot = p);
path
} else p;
path
});
figureTable = report.figure.table(namesPlots, cols = cols, captions = captions);
REP.tex(nameTag, figureTable);
}
#
# Example code
#
# # refresh only valid after a REP.new call
# REP.refreshTemplates('gwas/reportGwas.tex')
# REP.new(
# 'gwas/reportGwas.tex',
# cache = sprintf('%s/reportGWAS_cache', outputDir),
# resetCache = resetCache
# );
# # reporting
# REP.tex('G:DESCRIPTION', firstDef(o$studyDescription, ''));
# REP.tex('G:ROUNDNAME', firstDef(o$runName, 'unnamed'));
# REP.finalize(verbose = T, output = sprintf('%s/reportGwas-%s.pdf', outputDir, o$runName), cycles = 3);
# # reporting patterns
# REP.tex('ASS:TABLE', report.data.frame.toString(
# psTop,
# digits = c(rep(NA, length(varsMap)), '#2', rep(2, length(Evars)), '#2', 2),
# names.as = rep.names, quoteHeader = F,
# caption = caption
# ), fmt = 'tiny');
# REP.tex('ASS:QQ:INFLATION', inflation, fmt = '.2');
# REP.plot('ASS:QQ:ASSOCIATION', Qplot(sample = ps$P, dist = qunif,
# file = sprintf('%s/ass-QQ-%s.jpg', outputDir, tag2fn(tag))));
# REP.tex('QC:SAMPLE:MDS:Outlier', fraction(qcMdsOutliers), fmt = 'percent');
#
# # sub-templates
# REP.reportSubTemplateInitialize('association');
# for (m in expandedModels$models) with(m, {
# REP.tex('ABC', 2);
# REP.reportSubTemplate('association', tag);
# });
# REP.finalizeSubTemplate('association');
#
# Rfunctions.R
#Tue 14 Aug 2007 01:39:42 PM CEST
#
# <§> abstract data functions
#
inverse = function(f, interval = c(-Inf, Inf)) {
Vectorize(
function(y, ...) {
optimize(function(x, ...){ (y - f(x, ...))^2 }, interval = interval, ...)$minimum
}
)
}
#
# <p> meta functions
#
callWithArgs = function(fctName, args) {
#arguments = paste(sapply(names(args), function(n)sprintf("%s = %s", n, args[[n]])), collapse = ", ");
fhead = sprintf("%s(%s)", fctName, paste(names(args), collapse = ", "));
eval(parse(text = fhead))
}
.do.call = function(f, args, restrictArgs = T) {
if (restrictArgs) {
fargs = names(as.list(args(f)));
fargs = fargs[fargs != ''];
if (all(fargs != '...')) args = args[which.indeces(fargs, names(args))];
}
do.call(f, args)
}
#
# <p> benchmarking
#
benchmark.timed = function(.f, ..., N__ = 1e1) {
t0 = Sys.time();
for (i in 1:N__) {
r = .f(...);
}
t1 = Sys.time();
r = list(time = (t1 - t0)/N__, lastResult = r, t0 = t0, t1 = t1);
print(r$time);
print(r$t0);
print(r$t1);
r
}
#
# Rstatistic.R
#Fri 19 Jan 2007 11:06:44 PM CET
# contains simple statistics to evaluate consulting questions
sizesDesc = function(s) {
col.frame(list(
mean = mean(s),
median = median(s),
stddev = sqrt(var(s)),
quantiles = quantile(s)
), do.paste = " ", digits = 1)
}
compareSamples = function(l) {
desc = data.frame(lapply(l, function(e)sizesDesc(e)));
print(desc);
tests = col.frame(list(
test.t = t.test(l[[1]], l[[2]])$p.value,
test.wilcoxon = wilcox.test(l[[1]], l[[2]])$p.value
));
print(tests);
}
df2numeric = function(df) apply(df, 2, function(col)as.numeric(as.vector(col)));
expandCounts = function(tab) unlist(apply(tab, 1, function(r){rep(r[1], r[2])}));
chisq.test.robust = function(tab, bootstrapCellCount = 5, B = 5e3) {
# reduce table by removing 0-marginals and check for degeneration
tab = tab[, !apply(tab, 2, function(c)all(c == 0))];
if (is.vector(tab)) return(list(p.value = NA));
tab = tab[!apply(tab, 1, function(r)all(r == 0)), ];
if (is.vector(tab)) return(list(p.value = NA));
# determine whether to bootstrap
r = if (any(tab < bootstrapCellCount))
chisq.test(tab, simulate.p.value = T, B = B) else
chisq.test(tab);
r
}
# depends on coin package <!>, unfinished
armitage.test.robust = function(formula, df, scores) {
tab = table(df);
# only eliminate 0-rows of table from score vector
zRows = sapply(1:dim(tab)[1], function(i){ all(tab[i,] == 0) });
scores[[1]] = scores[[1]][!zRows];
r = independence_test(formula, df, teststat = "quad", scores = scores);
r
}
# simulations in project 2014-02-Microsatellites
logSumExpRaw = function(v, pivot = median(v))(log(sum(exp(v - pivot))) + pivot)
logSumExpPivot = logSumExpMax = function(v)logSumExpRaw(v, pivot = max(v))
logSumExp = function(x) {
Imx = which.max(x);
log1p(sum(exp(x[-Imx] - x[Imx]))) + x[Imx]
}
# rejFrac = function(x, alpha = 0.05) {
# x = na.omit(x);
# f = count(x <= alpha) / length(x);
# f
# }
rejFrac = function(x, alpha = 0.05)mean(x <= alpha, na.rm = T);
vector.std = function(v, C = 1)(C * v / sum(v));
vector.std.log = function(v, C = 0)(v - (logSumExp(v) - C));
#
# <p> ml methods
#
lhWrapperFunctions = c("initialize",
"parsScale", "parsMap", "parsMapInv", "parsStart", "parsNames", "lh", "null2alt", "alt2null"
);
# <!> transition to S4-objects
lhGetWrapper = function(prefix, self, ...) {
createNullWrapper = F;
f = list();
if (substr(prefix, nchar(prefix) - 3, nchar(prefix)) == "null") {
createNullWrapper = T;
prefix = substr(prefix, 1, nchar(prefix) - 5);
}
for (n in lhWrapperFunctions) {
f[[n]] = mget(sprintf("%s.%s", prefix, n), envir = globalenv(), ifnotfound=list(NULL))[[1]];
}
f$self = if (is.null(self)) { if (is.null(f$initialize)) list(...) else f$initialize(...) } else self;
if (createNullWrapper) {
f1 = f;
self = f1$self = f$self;
f1$parsStart = function(self){ f$alt2null(self, f$parsStart(self)) };
f1$parsScale = function(self){ f$alt2null(self, f$parsScale(self)) };
f1$parsMap = function(self, p){ f$alt2null(self, f$parsMap(self, f$null2alt(self, p))) };
f1$parsMapInv = function(self, p){ f$alt2null(self, f$parsMapInv(self, f$null2alt(self, p))) };
f1$lh = function(self){ lhRaw = f$lh(self); function(p)lhRaw(f$null2alt(self, p)) };
return(f1);
}
f
}
lhCopyWrapper = function(name, template) {
for (f in lhWrapperFunctions) {
g = mget(sprintf("%s.%s", template, f), envir = globalenv(), ifnotfound=list(NULL))[[1]];
if (!is.null(g)) eval.parent(parse(text = sprintf("%s.%s = %s.%s;", name, f, template, f)));
}
}
lhInit = function(lhWrapper) {
}
mapU = function(y){ -log(1/y - 1) }
map2U = function(z){ 1/(1 + exp(-z)) }
# one-dimensional estimation
lhMlEstimatorOD = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
if (is.null(c)) c = list(tol = .Machine$double.eps^0.25);
f = lhGetWrapper(lhWrapper, c$self, ...);
lhRaw = f$lh(f$self);
lh = function(p) { lhRaw(mapU(f$parsMap(f$self, p))) }
o = try(optimize(lh, lower = 0, upper = 1, tol = c$tol, maximum = T));
r = list(par = mapU(f$parsMap(f$self, o$maximum)), par.os = o$maximum, value = o$objective);
r
}
# multi-dimensional estimation
lhMlEstimatorMD = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
if (is.null(c)) c = list(do.sann = F, sann.cycles = 1000);
f = lhGetWrapper(lhWrapper, c$self, ...);
eps = 1e-5;
#if (!is.null(start)) { starts = matrix(start, nrow = 1); }
if (is.null(start)) start = f$parsStart(f$self);
starts = if (!is.matrix(start)) matrix(as.numeric(unlist(start)), nrow = 1) else start;
parscale = f$parsScale(f$self);
lhRaw = f$lh(f$self);
lh = function(p) { lhRaw(f$parsMap(f$self, p)) }
os = apply(starts, 1, function(s) {
s = f$parsMapInv(f$self, s);
o = try(optim(s, lh, method = "Nelder-Mead",
control = list(fnscale = -1, parscale = parscale, maxit = 1000),
));
if (class(o) == "try-error") return(NA);
if (0) { # if (o$convergence > 0 || c$do.sann) { # Nelder-Mead failed to converged
o1 = try(optim(s, lh, method = "SANN",
control = list(fnscale = -1, parscale = parscale, maxit = c$sann.cycles),
));
#if (class(o1) == "try-error") return(NA);
if (o$convergence > 0 || o1$value > o$value) o = o1;
}
o$par.os = o$par; # parameter values in optimiztation space
o$par = f$parsMap(f$self, o$par);
o
});
if (all(is.na(os))) return(NA);
vs = sapply(os, function(o){o$value});
arg.max = which.max(vs);
estimate = os[[arg.max[1]]];
fisher = list();
#if (!is.null(c$computeFisher) & c$computeFisher)
if (!is.null(c$computeFisher)) fisher = estimate.fisher(d, estimate, fisher.eps = 1e-1);
r = c(estimate, fisher);
r
}
lhMlEstimator = function(lhWrapper = NULL, start = NULL, c = NULL, ...) {
f = lhGetWrapper(lhWrapper, c$self, ...);
r = if (length(f$parsStart(f$self)) > 1) {
lhMlEstimatorMD(lhWrapper, start, c, ...);
} else if (length(f$parsStart(f$self)) == 1) {
lhMlEstimatorOD(lhWrapper, start, c, ...);
} else { # null hypothesis w/o nuisance parameters
r = f$lh(f$self)();
}
r
}
lhLRtest = function(lhWrapper = NULL, start = NULL, c = list(do.sann = F, sann.cycles = 1000), ...) {
f = lhGetWrapper(lhWrapper, NULL, c$self, ...); # f$self is likelihood object and absorbs ellipsis parameters
self = f$self;
if (is.null(start)) start = f$parsStart(self);
startNull = if (is.matrix(start))
t(apply(start, 1, function(r)f$alt2null(self, r))) else
f$alt2null(self, start);
e.null = lhMlEstimator(sprintf("%s.%s", lhWrapper, "null"), startNull, c(c, list(self = self)));
start = rbind(start, f$null2alt(self, e.null$par));
e.alt = lhMlEstimator(lhWrapper, start, c(c, list(self = self)));
# <p> calcualte degrees of freedom
st = f$parsStart(self);
df = length(st) - length(f$alt2null(self, st));
stat = 2 * (e.alt$value - e.null$value);
list(ll.null = e.null$value, ll.alt = e.alt$value,
test.stat = stat, p = 1 - pchisq(stat, df), df = df, par.null = e.null$par, par.alt = e.alt$par
)
}
#
# lh-functions based on likelihood specification
#
# Example: see dataAnalysis.R in hwe project
# Example: binomial distribution
# lhBin = function(p, k, N)dbinom(k, N, p)
# spec_lhBin = list(
# ll = "lhBin",
# alt = list(
# start = c(.5), # also specifies number of parameters
# pars = list(list(name = "rho", type = "freq"))
# ),
# null = list(
# start = c(.5), # assume same likelihood and therefore #pars from alternative
# parsFree = 0 # alternative: list free parameters or specify tail from alt
# )
# );
# r = lhMl(spec_lhBin)
#define a function
toF = function(expr, args, env = parent.frame()) {
as.function(c(args, expr), env)
}
logitI = expit = function(x, min = 0, max = 1) { (max - min)/(1 + exp(-x)) + min }
expitD = toF(D(expression((max - min)/(1 + exp(-x)) + min), 'x'), list(x = NULL, min = 0, max = 1));
logit = function(x, min = 0, max = 1) { log((x - min)/(max - x)) }
logitD = toF(D(expression(log((x - min)/(max - x))), 'x'), list(x = NULL, min = 0, max = 1));
# templates assuming X as argument, p as parameter description list
lhArgMappers = list(
freq = "expit(X)",
int = "expit(X, min, max)",
real = "X",
positive = "log1p(exp(X))"
);
lhArgMappersD = list(
freq = NULL, #D(expression(expit(x), 'x')),
int = "expit(X, min, max)",
real = "X",
positive = "log1p(exp(X))"
);
lhArgMappersI = list(
freq = "logit(X)",
int = "logit(X, min, max)",
real = "X",
positive = "log(expm1(X))"
);
lhSpecificationDefaults = list(
# embed null-parameter into alt-parameter space: variables: npars, parsFree, s (specification),
# p: input parameters
# <i>: optimization: substitute literals from start
default = list(mapper = 'c(c(ARGS_FREE), c(ARGS_BOUND))', mapperInv = 'c(ARGS_FREE)')
);
# richest: richest parametrization of the likelihood
# lhInterface: call the likelihood function with a vector (vector) or with separate arguments formula
# the paramters (inline)
lhSpecificationDefault = list(richest = 'alt', lhInterface = 'vector');
lhSpecificationInterfaces = list(
vector = 'function(p, ...) { pm = mapper(p); if (any(abs(pm) > 1e10)) return(-Inf); lf(pm, ...) }',
inline = 'function(p, ...) { pm = mapper(p); if (any(abs(pm) > 1e10)) return(-Inf); lf(ARGS_INLINE, ...) }'
);
#
# <p> logit derivatives
#simulations in 2014-07-Borstkanker/src/borstKankerExp.R
logExpit1 = function(x)log(expit(x))
logExpit = logExpit2 = function(x)-log1p(exp(-x))
logitExp1 = function(x)logit(exp(x))
logitExp = logitExp2 = function(x)-log(expm1(-x))
logExpit1m1 = function(x)log(1 - expit(x))
logExpit1m = logExpit1m2 = function(x)-log1p(exp(x))
logit1mExp1 = function(x)logit(1 - exp(x))
logit1mExp = logit1mExp2 = function(x)log(expm1(-x))
#
# <p> helper functions
#
# mappers for individual parameters
# ps: list of parameters
# mappers: mapper templates to used
# target: name of variable on which to apply
# idcs: indeces to iterate
lhMapperPars = function(ps, mappers, target = 'p', idcs = 1:length(ps)) {
maps = if (length(idcs) == 0) c() else sapply(idcs, function(i) {
p = ps[[i]];
a = gsub("X", sprintf("%s[%s]", target,
deparse(if (length(p$entries)) p$entries else i)), mappers[[p$type]]);
a = mergeDictToString(ps[[i]]$args, a);
a
});
r = paste(maps, collapse = ", ");
r
}
# <!> auto inverse mapping has to heed mapperPost time of application
# mappers map individual arguments, mapper sub-sequently maps the whole vector
lhMapperFunction = function(s, mappers, mapper) {
free = 1:s$parsFree; # idcs of free variables
bound = if(s$parsFree < s$npars) (s$parsFree + 1):s$npars else c(); # idcs of bound variables
mStr = sprintf('function(p){%s}',
mergeDictToString(list(
ARGS_FREE = lhMapperPars(s$pars, mappers, 'p', free),
ARGS_BOUND = lhMapperPars(s$pars, mappers, 'start', bound)
), mapper));
mf = with(s, eval(parse(text = mStr)));
mf
}
lhMapperFunctions = function(s) {
r = list(
mapper = lhMapperFunction(s, lhArgMappers, s$mapper),
mapperInv = lhMapperFunction(s, lhArgMappersI, s$mapperInv)
);
r
}
#' Build wrapper function around likelihood
#'
#' @param template parameter specification used as template (usually richest parametrization tb reduced
#' for other hypotheses)
lhPreparePars = function(pars, defaults = lhSpecificationDefaults$default, spec = lhSpecificationDefault,
template = pars) {
# <p> determine free parameters
t = merge.lists(defaults, pars);
npars = length(template$pars);
if (!is.null(t$parsFree)) {
t$pars = if(t$parsFree == 0) list() else template$pars[(npars - t$parsFree): npars];
}
if (is.null(t$start)) t$start = template$start;
if (is.null(t$parsFree)) t$parsFree = length(t$pars);
# <p> construct mapped likelihood function
fs = mergeDictToString(
list(ARGS_INLINE =
paste(sapply(1:npars, function(i) { sprintf("pm[%s]",
deparse(if (length(template$pars[[i]]$entries)) template$pars[[i]]$entries else i)) }
), collapse = ', ')),
lhSpecificationInterfaces[[spec$lhInterface]]
);
t = merge.lists(t, list(npars = npars));
t = merge.lists(t, lhMapperFunctions(t), list(lf = get(spec$ll)));
f = with(t, eval(parse(text = fs)));
t = merge.lists(t, list(npars = npars, lh = f));
t
}
# types: names of specifications for which to define wrapped functions
# richest: name of specification for model that includes a superset of parameters of all other types
lhPrepare = function(s, types = c('null', 'alt')) {
# <p> preparation
s = merge.lists(lhSpecificationDefault, s);
ri = s[[s$richest]];
# number of parameter groups
npars = length(ri$pars);
# number of parameters of the likelihood function
#Npar = sum(list.kp(ri$pars, 'entries', template = 1));
# <p> build wrappers
m = nlapply(types, function(type) {
defaults = merge.lists(lhSpecificationDefaults$default, lhSpecificationDefaults[[type]]);
lhPreparePars(s[[type]], defaults, s, template = ri)
});
m = merge.lists(s, m);
m
}
# <N> free parameters come first
lhFreePars = function(s, p)with(s, {
r = if (parsFree > 0) {
idcs = unlist(list.kp(s$pars[1:parsFree], 'entries'));
if (length(idcs) == 0) idcs = 1:parsFree;
p[idcs]
} else c();
r
})
# second numeric derivative of x
Dn2f = function(f, x, ..., eps = 1e-5) {
(f(x + 2*eps, ...) + f(x - 2*eps, ...) - 2*f(x, ...))/(4*eps^2)
}
..OptimizeControl = list(fnscale = -1, tol = .Machine$double.eps^0.25);
# assume unconstraint arguments
Optimize = function(p, f, method = 'BFGS', control = ..OptimizeControl, ...,
hessian = T, ci = T, alpha = 5e-2) {
r = if (length(p) > 1) {
control = .list(control, .min = 'tol');
o = optim(p, f, method = method, control = control, hessian = hessian, ...);
} else if (length(p) == 1) {
f0 = function(p, ...) { f(logit(p), ...) };
o0 = try(optimize(f0, lower = 0, upper = 1,
tol = control$tol, maximum = control$fnscale < 0, ...));
o = if (class(o0) == 'try-error') list(par = NA, value = NA, hessian = NA) else
list(par = logit(o0$maximum), value = o0$objective,
hessian = if(hessian) matrix(Dn2f(f, logit(o0$maximum), ...)/o0$objective) else NA);
} else {
o = list(par = c(), value = f(...));
}
if (ci && hessian && !is.na(r$hessian)) {
var = -1/diag(r$hessian); # assume sharp cramer-rao bound
sd = sqrt(var);
r = c(r, list(ci = list(
ciL = qnorm(alpha/2, r$par, sd, lower.tail = T),
ciU = qnorm(alpha/2, r$par, sd, lower.tail = F), level = alpha, var = var)));
}
r
}
# p: matrix of row-wise start values
OptimizeMultiStart = function(p, f, method = 'BFGS', control = ..OptimizeControl, ...) {
r = if (is.null(p)) { # special case of degenerate matrix (does not work in R)
Optimize(c(), f, method = method, control = control, ...)
} else if (!is.matrix(p)) {
Optimize(p, f, method = method, control = control, ...)
} else {
os = apply(p, 1, function(s)Optimize(s, f, method = method, control = control, ...));
# find maximum
if (all(is.na(os))) return(NA);
vs = list.key(os, 'value');
arg.max = which.max(vs);
r = os[[arg.max[1]]];
}
r
}
lhEstMLRaw = function(t, start = NULL, ..., optim_method = 'BFGS') {
if (is.null(start)) start = t$start;
for (method in optim_method) {
o = try(OptimizeMultiStart(t$mapperInv(start), t$lh, method = method, ...));
if (!('try-error' %in% class(o))) break();
}
o$par = t$mapper(o$par);
o$ci$ciL = t$mapper(o$ci$ciL);
o$ci$ciU = t$mapper(o$ci$ciU);
o
}
lhEstML = lhMl = function(s, start = NULL, type = 'alt', ..., optim_method = 'BFGS') {
# <p> mapping of parameters
s = lhPrepare(s, types = type);
lhEstMLRaw(s[[type]], start = start, ..., optim_method = optim_method)
}
lfPrepare = function(s, ...) {
lhParsOrig = list(...);
prepare = sprintf('%s%s', s$ll, c('prepare', '_prepare'));
prepareExists = min(which(sapply(prepare, exists)));
lhPars = if (prepareExists < Inf) get(prepare[prepareExists])(...) else lhParsOrig;
lhPars
}
# specification based LR-test
lhTestLR = function(s, startNull = NULL, startAlt = NULL, types = c('null', 'alt'), ...,
optim_method = 'BFGS', addTypeArg = F) {
# <p> general preparation
s = lhPrepare(s, types = types);
null = s[[types[1]]];
alt = s[[types[2]]];
# <p> specific preparation (user defined)
lhPars = lfPrepare(s, ...);
# <p> null hypothesis
if (is.null(startNull))
startNull = if(null$parsFree == 0) NULL else matrix(lhFreePars(null, null$start), nrow = 1);
lhEstMLRawArgs = c(list(t = null, start = startNull), lhPars, list(optim_method = optim_method));
if (addTypeArg) lhEstMLRawArgs = c(lhEstMLRawArgs, list(lh_type__ = 'null'));
o0 = do.call(lhEstMLRaw, lhEstMLRawArgs);
# <p> alternative hypothesis
if (is.null(startAlt)) {
# build from fit under the null
parNull = lhFreePars(null, o0$par);
startAlt = matrix(c(parNull, alt$start[(length(parNull) + 1):length(alt$start)]), nrow = 1);
}
lhEstMLRawArgs = c(list(t = alt, start = startAlt), lhPars, list(optim_method = optim_method));
if (addTypeArg) lhEstMLRawArgs = c(lhEstMLRawArgs, list(lh_type__ = 'alt'));
o1 = do.call(lhEstMLRaw, lhEstMLRawArgs);
# <p> calcualte degrees of freedom
df = length(alt$start) - length(lhFreePars(null, o0$par));
stat = 2 * (o1$value - o0$value);
r = list(ll.null = o0$value, ll.alt = o1$value,
test.stat = stat, p = 1 - pchisq(stat, df), df = df, par.null = o0$par, par.alt = o1$par,
lh.pars = lhPars, lh.pars.orig = lhParsOrig
);
r
}
#
# <p> latest iteration of LH wrapper
#
lhPrepareFormula = function(s, type, formula, data, ...) {
# <o> compute on subset of data <N> cave: missingness
X = model.matrix(model.frame(formula, data = data), data = data);
# <p> expand paramters
t = s[[type]];
ps = t$pars;
fparsI = which(list.key(ps, 'name') == 'formula');
fpars = ps[[fparsI]]; # formula pars
ps[[fparsI]] = merge.lists(ps[[fparsI]], list(name = 'beta', count = ncol(X)));
# <p> determine slots
counts = cumsum(list.key(ps, 'count'));
countsStart = pop(c(1, counts + 1));
ps = lapply(seq_along(ps), function(i)merge.lists(ps[[i]], list(entries = countsStart[i]:counts[i])));
# <p> determine start
start = avu(sapply(ps, function(p)rep(p$start, p$count)));
# <p> map pars
t$pars = ps;
t = lhPreparePars(t, spec = merge.lists(lhSpecificationDefault, s));
t$start = start;
t
}
lhMlFormula = function(s, formula, data, type = 'formula', ..., optim_method = 'BFGS') {
# <p> mapping of parameters
t = lhPrepareFormula(s, type, formula, data, ...);
# <p> extra args
lhPars = lfPrepare(s, formula = formula, data = data, ...);
# <p> call optimizer
lhEstMLRawArgs = c(list(t = t, start = s$start), lhPars, list(optim_method = optim_method));
r = try(do.call(lhEstMLRaw, lhEstMLRawArgs), silent = T);
print(r);
if (class(r) == 'try-error') r = list(par = rep(NA, length(t$start)), value = NA, convergence = 1);
r
}
#
# <p> model manipulation
#
response.is.binary = function(r) {
vs = sort(unique(r));
if (length(vs) != 2) F else all(vs == c(0, 1));
}
#
# <p> clustered data
#
#
# <p> describe relationships (genetic) given a relational (database) model
#
# given relatedness in a data frame of ids and clusterIds, return a list of clusters containing ids
# clusterRelation2list_old = function(r, idName = "id", idClusterName = "idFam", byIndex = T) {
# r = r[, c(idName, idClusterName)];
# ns = sort(unique(r[, 2]));
# # <p> build clusters
# clusters = sapply(ns, function(e)list()); # holds members of clusters
# names(clusters) = ns;
# # <!> we can iterate the list, given it is ordered lexicographically
# for (i in 1:(dim(r)[1])) {
# clN = as.character(r[i, 2]);
# clusters[[clN]] = unlist(c(clusters[[clN]], ifelse(byIndex, i, as.character(r[i, 1]))));
# }
# clusters
# }
clusterRelation2list = function(r, idName = "id", idClusterName = "idFam", byIndex = T) {
r = r[, c(idName, idClusterName)];
clusters = nlapply(sort(unique(r[[idClusterName]])), function(n) {
idcs = which(r[[idClusterName]] == n);
c = if (byIndex) idcs else r[[idName]][idcs];
c
});
clusters
}
# permute clusters of identical size and within clusters
# cluster specification as given by clusterRelation2list assuming byIndex = T
# returned permutation is relative to refIds
permuteClusters = function(cls, refIds = NULL, selectIds = NULL) {
# allow to filter ids from cluster specification
if (!is.null(selectIds)) {
cls = lapply(cls, function(cl)intersect(cl, selectIds));
cls = clusters[sapply(cls, length) > 0];
}
cSizes = sapply(cls, function(e)length(e));
# which cluster sizes are present in the data set?
sizes = unique(cSizes);
# indexable list of ids
refIds = if (is.null(refIds)) sort(unlist(cls));
# final permutation of refIds, such that refIds[perm] gives new order
perm = 1:length(refIds);
for (s in sort(sizes, decreasing = T)) { # permute cluster of same size, permute within cluster
clsS = which(cSizes == s);
p1 = sample(1:length(clsS)); # permute clusters
for (i in 1:length(clsS)) {
p2 = sample(1:s);
# <p> indeces that are to be replaced
indT = which.indeces(cls[[clsS[i]]], refIds);
# <p> indeces where the replacement comes from
indF = which.indeces(cls[[clsS[p1[i]]]][p2], refIds);
# <p> save partial permutation
perm[indT] = indF;
}
}
perm
}
# clusters is a vector with cluster ids
clustersPermute = function(cls) {
permuteClusters(clusterRelation2list(data.frame(id = 1:length(cls), idFam = cls)))
}
#
# <p> wrap model fitting for lm/glm/gee fitters
#
#library("geepack"); # <i> move to init method
regressionMethods = list(
# assume formula to contain random effect
glmr = list(
fit = function(formula, data, clusterCol = NULL, ...) {
glmer(formula, data = data, ...)
},
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chisq)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
# use cluster column <!> untested
glmrcl = list(
fit = function(formula, data, clusterCol = NULL, ...) {
f = update(formula, as.formula(Sprintf('~ . + (1|%{clusterCol}s)')));
glmer(f, data = data, ...)
},
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chisq)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
glm = list(
fit = function(formula, data, clusterCol = NULL, ...)glm(formula, data = data, ...),
compare = function(m1, m0){
a = anova(m0$r, m1$r, test = "Chisq");
list(anova = a, m0 = m0, m1 = m1,
#p.value = a[["P(>|Chi|)"]][2],
p.value = a[['Pr(>Chi)']][2], # as of R 2.15.1
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
lm = list(
fit = function(formula, data, clusterCol = NULL, ...)lm(formula, data = data, ...),
compare = function(m1, m0){
a = anova(m0$r, m1$r);
list(anova = a, m0 = m0, m1 = m1, p.value = a[["Pr(>F)"]][2],
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std. Error"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std. Error"]
)
}
),
gee = list(
fit = function(formula, data, clusterCol, ...) {
if (!length(formula.covariates(formula))) return(NULL);
# geeglm needs ordered clusterIds <!>
data = data[order(data[[clusterCol]]), ];
names(data)[which(names(data) == clusterCol)] = "..gee.clusters"; # hack to make geeglm work
r = geeglm(formula, data = data, id = ..gee.clusters, ...);
r
},
compare = function(m1, m0){
a = if (is.null(m0)) anova(m1$r) else anova.geeglm(m0$r, m1$r);
list(anova = a, m0 = m0, m1 = m1, p.value = a[["P(>|Chi|)"]][1],
effects0 = coefficients(summary(m0$r))[, "Estimate"],
sdevs0 = coefficients(summary(m0$r))[, "Std.err"],
effects1 = coefficients(summary(m1$r))[, "Estimate"],
sdevs1 = coefficients(summary(m1$r))[, "Std.err"]
)
}
)
);
completeRows = function(f1, data) {
vars = all.vars(as.formula(f1));
rows = apply(data[, vars, drop = F], 1, function(r)all(!is.na(r)));
r = which(rows);
r
}
# <!> clusterIds is needed as argument although just forwarded
regressionFit = function(f, data, type, ...) {
r = regressionMethods[[type]]$fit(f, data, ...);
list(type = type, r = r)
}
regressionCompare = function(m1, m0) {
r = regressionMethods[[m1$type]]$compare(m1, m0);
r
}
regressionCompareModelsRaw = function(f1, f0, data, type = "lm", clusterCol = NULL, ...) {
# <p> jointly trim data according to missing data
#rows = which(apply(data[, c(formula.vars(f1), clusterCol)], 1, function(r)all(!is.na(r))));
# more robust version
row.names(data) = NULL;
#rows = as.integer(row.names(model.frame(f1, data = data)));
# robust for random effects
rows = apply(data[, all.vars(as.formula(f1)), drop = F], 1, function(r)!any(is.na(r)));
d0 = data[rows, ];
# <p> fit and compare models
m1 = regressionFit(as.formula(f1), data = d0, type = type, clusterCol = clusterCol, ...);
m0 = regressionFit(as.formula(f0), data = d0, type = type, clusterCol = clusterCol, ...);
a = regressionCompare(m1, m0);
a
}
permuteDefault = list(
p.value = 0, sdev.rel = .3, Nchunk = 1e3,
nuisanceCovariates = NULL, .clRunLocal = T
);
# idCol: used for permutation: column specifying identiy of individuals: could be filled automatically <i>
# permute:
# sdev.rel: sdev relative to p.value to decide how often to permute
regressionCompareModels = function(f1, f0, data, type = "lm", clusterCol = NULL, ...,
permute = permuteDefault) {
permute = merge.lists(permuteDefault, permute);
r = regressionCompareModelsRaw(f1, f0, data, type, clusterCol, ...);
if (!is.null(r) && !is.null(r$p.value) && !is.na(r$p.value) && r$p.value < permute$p.value)
r = regressionCompareModelsPermuted(f1, f0, data, type, clusterCol, ..., permute = permute);
r
}
#
# <p> permuted cluster regression
#
regressionCompareModelsPermuted = function(f1, f0, data, type = "lm", clusterCol = "cluster", ...,
idCol = "id", permute = permuteDefault, Nmax = 1e5) {
# <p> data p-value
a.data = regressionCompareModelsRaw(f1, f0, data, type, clusterCol = clusterCol, ...);
p.data = a.data$p.value;
# <p> logging
Log(sprintf("Permuting Regression: %s [p = %.2e]", paste(as.character(f1), collapse = " "), p.data), 4);
# <p> permutation variables indeces
pvs = setdiff(formula.covariates(f1), permute$nuisanceCovariates);
# <p> precompute cluster data structure
cls = clusterRelation2list(data.frame(id = 1:length(data[[clusterCol]]), idFam = data[[clusterCol]]))
ps = NULL;
d0 = data;
# adaptive permutation
repeat {
ps0 = clapply(1:permute$Nchunk, function(i, f1, f0, data, type, clusterCol, cls, pvs){
d0[, pvs] = if (is.null(clusterCol)) d0[sample(1:(dim(data)[1])), pvs] else
d0[permuteClusters(cls), pvs];
r = regressionCompareModelsRaw(f1, f0, d0, type, clusterCol, ...);
r$p.value
}, f1 = f1, f0 = f0, data = data, type = type, clusterCol = clusterCol, cls = cls, pvs = pvs,
.clRunLocal = permute$.clRunLocal);
ps0 = na.exclude(as.numeric(ps0));
ps = c(ps, ps0);
#print(ps[1:100]);
p.emp = fraction(ps <= p.data);
# <p> stopping criterion
p.break = if (p.emp == 0) 1 / length(ps) else p.emp;
sdev.rel = sqrt(p.break * (1 - p.break) / length(ps)) / p.break;
#print(list(sd = sdev.rel * p.break, sd.rel = sdev.rel, p = p.emp));
if (sdev.rel <= permute$sdev.rel) break;
# <p> final stop
if (length(ps) >= Nmax) break;
};
r = list(f1 = f1, f0 = f0, p.value = p.emp, p.data = p.data, anova = a.data$anova, ps = ps);
r
}
# permute covariates in order to obtain empirical p-values
# f1: model formula alternative
# f0: model formula hypothesis
# M: number of permutations
regressionCompareModelsEmp = function(f1, f0, data, nuisanceCovariates = c(), type = "lm", M = 1e3, ...,
idName = "id", idClusterName = "cluster", .clRunLocal = T) {
r = regressionCompareModelsPermuted(f1, f0, type, ..., clusterCol = idClusterName, idCol = idName,
permute = list(Nchunk = M, nuisanceCovariates = nuisanceCovariates, .clRunLocal = .clRunLocal));
r
}
# data: data.frame
# stat: function computing test statistic
# vars: formula for permuation
# Nperm: number of permutations
# Pvalue: c('upper', 'lower', 'two.tailed')
permute = function(data, stat, vars, ..., Nperm = 5e3, Pvalue = 'lower', na.rm = T, fracBadStatThres = .01,
returnT = TRUE) {
perm.vars = all.vars(as.formula(vars));
f = function(i, ...) {
};
Ts = Sapply(0:Nperm, function(i, data, ...) {
if (i > 0) data[, perm.vars] = data[sample(nrow(data)), perm.vars];
stat(data, ...)
}, data = data, ...);
fracBadStatistics = mean(is.na(Ts[-1]));
if (is.na(Ts[1]) || fracBadStatistics >= fracBadStatThres) return(list(p.value = NA));
Ts = Ts[!is.na(Ts)];
Tdata = Ts[1];
Ts = Ts[-1];
Plower = (1 + sum(Ts <= Tdata)) / Nperm;
Pupper = (1 + sum(Ts >= Tdata)) / Nperm;
p.value = switch(Pvalue,
lower = Plower,
upper = Pupper,
two.tailed = 2 * min(Plower, Pupper)
);
r = if (returnT)
list(p.value = p.value, t.data = Tdata, t.perm = Ts) else
list(p.value = p.value, t.data = Tdata);
r
}
#
# <p> error propagation
#
# as derived from the RB project and tested therein
errProd = function(x, sdx, y, sdy, covxy = 0) {
sdp = (x * y) * sqrt((sdx/x)^2 + (sdy/y)^2 + 2 * sdx * sdy * covxy);
sdp
}
errFrac = function(x, sdx, y, sdy, covxy = 0) {
sdp = (x / y) * sqrt((sdx/x)^2 + (sdy/y)^2 - 2 * sdx * sdy * covxy);
sdp
}
errSum = function(sdx, cx = 1, sdy = 0, cy = 1, covxy = 0) {
sds = sqrt((cx *sdx)^2 + (cy * sdy)^2 + 2 * cx * cy * covxy);
sds
}
#
# <§> some general statistical transformations
#
# convert confidence interval to standard dev based on a normality assumption
ciToSd = function(ci.lo, ci.up, level = .95) {
# upper centered limit
ciU = ci.up - mean(c(ci.lo, ci.up));
span = ci.up - ci.lo;
# corresponding sd
sd = Vectorize(inverse(function(s)qnorm(1 - (1 - level)/2, 0, s), interval = c(0, span * 8)))(ciU);
sd
}
ciToP = function(ci.lo, ci.up, level = .95, one.sided = F, against = 0) {
sd = ciToSd(ci.lo, ci.up, level)
P = peSdToP((ci.lo + ci.up)/2 - against, sd, one.sided);
P
}
# convert point estimate and SD to p-value (assuming normality)
peSdToP = function(beta, sd, one.sided = F) {
pnorm(-abs(beta), 0, sd, lower.tail = T) * ifelse(one.sided, 1, 2);
}
ciFromBetaSdev = function(beta, sdev, level = .95) {
r = list(effect = beta,
lower = qnorm((1 - level)/2, beta, sdev, lower.tail = T),
upper = qnorm((1 - level)/2, beta, sdev, lower.tail = F)
);
r
}
ciFromSummary = function(s, var, level = .95) {
cs = coefficients(s)[var, ];
ciFromBetaSdev(cs[["Estimate"]], cs[["Std. Error"]], level = level);
}
pFromBetaSd = function(beta, sd, null = 0)pnorm(null, abs(beta), sd)
sdFromBetaP = function(beta, p)Vectorize(inverse(function(s)peSdToP(beta, s), interval = c(0, 10)))(p);
betaPtoCi = function(beta, p) {
sd = sdFromBetaP(beta, p);
ciFromBetaSdev(beta, sd)
}
#
# meta analysis
#
# meta analysis row-wise
metaPvalue = function(ps) {
if (!is.matrix(ps)) ps = matrix(ps, nrow = 1);
if (!all(is.numeric(ps))) ps = apply(ps, 1:2, as.numeric);
cs = apply(ps, 1, function(r)sum(-2*log(r)));
psM = pchisq(cs, 2*dim(ps)[2], lower.tail = F);
psM
}
#
# data imputation
#
Sample = function(x, ...)if (length(x) == 1)x else sample(x, ...);
mi.simple = function(data, n.imp = 20) {
r = lapply(1:n.imp, function(i) {
for (v in names(data)) {
data[is.na(data[, v]), v] = Sample(na.omit(data[, v]), count(is.na(data[, v])));
}
data
})
r
}
cross.imputer = function(imputationData, imputationVars = NULL, doExpandFactors = T) {
if (is.null(imputationVars)) imputationVars = names(imputationData);
f = function(data) {
d0 = data;
for (v in imputationVars) { # cross impute from imputationData
d0[is.na(d0[, v]), v] = Sample(na.omit(imputationData[[v]]), count(is.na(d0[, v])));
}
if (doExpandFactors) d0 = dataExpandFactors(d0)[, vars];
d0
};
f
}
imputeMeanVar = function(col) {
mn = mean(col, na.rm = T);
col[is.na(col)] = mn;
col
}
imputeMean = function(data) {
d1 = apply(data, 2, imputeMeanVar);
d1
}
#
# <p> cross validation
#
# cross validation partitions for classification data
crossValidationPartitionsClassification = function(responses, K = 15, minEls = 3, maxTries = 15) {
N = length(responses);
cats = unique(responses);
for (i in 1:maxTries) {
# random permutation
perm = sample(1:N, N);
# compute partitions
parts = splitListEls(perm, K, returnElements = T);
counts = data.frame.types(
lapply(parts, function(p)table.n(responses[-p], categories = cats)),
names = cats, do.rbind = T
);
doReject = any(apply(counts, 1, function(r)any(r < minEls)));
if (!doReject) break;
}
r = if (i < maxTries) parts else {
Log("Error: failed to find suitable cross validation partition!");
NULL
}
r
}
# cross validation parititions for clustered data
# return indeces into cluster vector (cluster identities assumed to be given by integers)
# so far do not heed cluster sizes
crossValidationPartitionsClusters = function(clusters, K = 20) {
N = length(clusters);
# unique cluster ids
cls = unique(clusters);
# random permutation
perm = Sample(cls, length(cls));
# compute partitions
parts = splitListEls(perm, K, returnElements = T);
r = lapply(parts, function(p)which.indeces(p, clusters, match.multi = T));
r
}
#
# <p> optimization
#
nested.search = function(f, ..., key = NULL, parameters = list(p1 = c(0, 10)),
steps = 3, Ngrid = 4, rscale = 1, return.grid = F, par.as.vector = F, .clRunLocal = rget('.clRunLocal')) {
ps = ps0 = parameters;
for (i in 1:steps) {
# <p> create serach grid
pars = lapply(ps, function(p)seq(p[1], p[2], length.out = Ngrid));
grid = merge.multi.list(pars);
# <p> apply function
r = clapply(1:dim(grid)[1], function(j, grid, ...) {
args = if (par.as.vector) list(as.vector(grid[j, ]), ...) else c(as.list(grid[j, ]), list(...));
do.call(f, args);
}, grid = grid, ..., .clRunLocal = .clRunLocal);
# <p> search optimum
values = if (is.null(key)) r else list.kp(r, key, do.unlist = T);
opt = which.min(values * rscale);
pt = grid[opt, ]; # optimal point in the grid search
ps = lapply(1:length(ps), function(j){
from = max(pt[j] - (ps[[j]][2] - ps[[j]][1])/Ngrid, ps0[[j]][1]);
to = min(pt[j] + (ps[[j]][2] - ps[[j]][1])/Ngrid, ps0[[j]][2]);
c(from, to)
});
names(ps) = names(ps0);
}
r = if (return.grid) list(value = values[[opt]], par = pt, grid = r) else
list(value = values[[opt]], par = pt, r = r[[opt]]);
r
}
optim.nested.defaults = list(steps = 5, Ngrid = 4, rscale = 1, return.grid = F);
optim.nested = function(par = NULL, f, ..., lower = -Inf, upper = Inf, control = list())
with(merge.lists(optim.nested.defaults, control), {
parameters = apply(cbind(lower, upper), 1, function(r)list(r));
r = nested.search(f, ..., parameters,
steps = steps, Ngrid = Ngrid, rscale = rscale, return.grid = return.grid, par.as.vector = T);
r
})
#
# <p> correlation in data
#
Df.corr = function(df, eps = 1e-2) {
N = dim(df)[2];
rc = rcorr(df);
pairs = t(sapply(which(abs(rc$r) > (1 - eps)), function(e) {
row = ((e - 1) %/% N) + 1;
col = ((e - 1) %% N) + 1;
r = c(row, col);
r
}));
pairs = pairs[pairs[, 1] < pairs[, 2], ];
clusters = sub.graph(pairs);
remove = unlist(lapply(clusters, function(e)e[-1]));
r = list(clusters = clusters, cols.remove = remove);
r
}
identity = function(e)e
seq.transf = function(from = 0, to = 1, length.out = 1e1, ..., transf = log, transfI = exp, eps = 1e-5) {
s = transfI(seq(from = transf(from + eps), to = transf(to - eps), length.out = length.out, ...));
s
}
#
# <p> bug fixes for packages
#
model_matrix_from_formula = function(f, data, offset = NULL, ignore.case = F, remove.intercept = F) {
# <p> prepare data matrices
f1 = formula.re(f, data = data, ignore.case = ignore.case);
f1vars = all.vars(f1);
response = formula.response(f1);
responseValues = if (length(response) > 0) data[[response]] else NULL;
row.names(data) = NULL;
complete = !apply(data[, f1vars, drop = F], 1, function(r)any(is.na(r)));
data = droplevels(data[complete, ]);
responseValues = responseValues[complete];
offset = if (!is.null(offset)) offset[complete] else NULL;
mm = model.matrix(f1, model.frame(f1, data = data));
if (remove.intercept) mm = mm[, !(dimnames(mm)[[2]] == '(Intercept)')];
r = list(mm = mm, response = responseValues, offset = offset, indeces = as.integer(row.names(data)));
r
}
complete_from_formula = function(f, data, offset = NULL, ignore.case = F, remove.intercept = F) {
model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept)$indeces
}
complete_from_vars = function(vars, data, offset = NULL, ignore.case = F, remove.intercept = F) {
f = as.formula(Sprintf('~ %{vars}s', vars = join(vars, ' + ')));
model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept)$indeces
}
glmnet_re = function(f, data, ..., offset = NULL, ignore.case = F, remove.intercept = F,
lambdas = NULL, cv = T) {
d = model_matrix_from_formula(f, data, offset, ignore.case, remove.intercept);
# <p> fit model
r = if (cv) {
r0 = cv.glmnet(x = d$mm, y = d$response, lambda = lambdas, ..., offset = d$offset);
args = c(List(..., min_ = c('foldid', 'nfolds', 'grouped')),
list(x = d$mm, y = d$response, lambda = r0$lambda.min, offset = d$offset));
# list(x = d$mm, y = d$response, lambda = (3*r0$lambda.min + r0$lambda.1se)/4, offset = d$offset));
# list(x = d$mm, y = d$response, lambda = (r0$lambda.min), offset = d$offset));
do.call('glmnet', args);
} else glmnet(x = d$mm, y = d$response, lambda = lambdas, ..., offset = d$offset);
r = c(r, list(formula = f));
r
}
glmnet_re_refit = function(model, data, ..., var_cutoff = 1e-6, intercept = '1', impute = NULL) {
response = formula.response(model$formula);
if (model$df <= 1) return(list());
# <p> scrutinize model
coefs = model$beta;
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
if (!is.null(impute) && impute == 'mean') {
# <!> use model matrix <i>
d0 = sapply(varsSel, function(var) {
data[[var]][is.na(data[[var]])] = mean(data[[var]], na.rm = T);
});
data[, varsSel] = d0;
}
# <p> refit
f = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f, data = data, ...);
r0 = list(glm = glm1, score = as.vector(predict(glm1, data, type = 'link')))
r0
}
#library('glmnet');
grid.glmnet.raw = function(..., glmnet.f = cv.glmnet, max.tries = 3) {
for (i in 1:max.tries) {
fit = try(glmnet.f(...), silent = T);
if (all(class(fit) != 'try-error')) break();
}
if (any(class(fit) == 'try-error')) stop(fit[1]);
fit
}
grid.glmnet.control = list(steps = 4, Ngrid = 50, from = .01, to = .8, eps = 1e-5,
transf = identity, transfI = identity);
grid.glmnet = function(..., control = grid.glmnet.control)
with (merge.lists(grid.glmnet.control, control), {
# initialize
fit = NULL;
fromO = from;
toO = to;
options(warn = -1);
for (i in 1:steps) {
lambda = seq.transf(from, to, length.out = Ngrid + 1, eps = eps,
transf = transf, transfI = transfI);
fit = grid.glmnet.raw(..., lambda = sort(lambda, decreasing = T));
from = max(fit$lambda.min - (to - from)/Ngrid, 0);
to = fit$lambda.min + (to - from)/Ngrid;
}
options(warn = 0);
# choose lambdas to contain lambda.min also covering the range between from and to
lambda = c(
seq.transf(fromO, toO, length.out = Ngrid + 1, eps = eps,
transf = transf, transfI = transfI),
fit$lambda.min
);
fit0 = do.call('grid.glmnet.raw', c(list(...), list(lambda = sort(lambda, decreasing = T))));
args = List(..., min_ = c('nfolds', 'grouped'));
fit1 = do.call('grid.glmnet.raw', c(args, list(lambda = fit$lambda.min, glmnet.f = glmnet)));
r = fit0;
r$glmnet.fit = fit1;
r
})
# f: formula, passed through formula.re
# data: data frame
grid.glmnet.re = function(f, data, ..., offset = NULL, control = grid.glmnet.control,
ignore.case = F, remove.intercept = T)
with (merge.lists(grid.glmnet.control, control), {
# <p> prepare data matrices
f1 = formula.re(f, data = data, ignore.case = ignore.case);
f1vars = all.vars(f1);
response = formula.response(f1);
complete = !apply(data[, f1vars], 1, function(r)any(is.na(r)));
d1 = data[complete, ];
if (!is.null(offset)) offset = offset[complete];
mm = model.matrix(f1, model.frame(f1, data = d1));
if (remove.intercept) mm = mm[, !(dimnames(mm)[[2]] == '(Intercept)')];
# <p> fit model
r = grid.glmnet(x = mm, y = d1[[response]], ..., offset = offset, control = control);
r = c(r, list(formula = f1));
r
})
grid_glmnet_re_refit = function(model, data, ..., var_cutoff = 1e-6, intercept = '1', impute = NULL) {
# <p> scrutinize model
coefs = coefficients(model$glmnet.fit);
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
response = formula.response(model$formula);
if (!is.null(impute) && impute == 'mean') {
# <!> use model matrix <i>
d0 = sapply(varsSel, function(var) {
data[[var]][is.na(data[[var]])] = mean(data[[var]], na.rm = T);
});
data[, varsSel] = d0;
}
# <p> refit
f = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f, data = data, ...);
r0 = list(glm = glm1, score = as.vector(predict(glm1, data, type = 'link')))
r0
}
refitModel = function(model, f1, f0, data, ..., var_cutoff = 1e-6, ignore.case = F, intercept = '0') {
# <p> prepare formula and data set
f1 = formula.re(f1, data = data, ignore.case = ignore.case);
f0 = formula.re(f0, data = data, ignore.case = ignore.case);
f0covs = formula.covariates(f0);
f1vars = all.vars(f1);
response = formula.response(f1);
complete = complete.cases(data[, f1vars]); #!apply(data[, f1vars], 1, function(r)(any(is.na(r))));
d1 = data[complete, ];
# <p> extract data set according to model
coefs = coefficients(model);
varsSel = row.names(coefs)[abs(as.vector(coefs)) > var_cutoff];
varsSel = setdiff(varsSel, '(Intercept)');
varsSel0 = intersect(varsSel, f0covs);
if (!length(varsSel0)) return(
list(coefficients = coefs, anova = NA, r2 = NA, r20 = NA, raw = NA, model1 = NA, model0 = NA)
);
# <p> re-fit glm
f1 = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel), collapse = ' + ')));
glm1 = glm(f1, data = d1, ...);
f0 = as.formula(sprintf('%s ~ %s', response, paste(c(intercept, varsSel0), collapse = ' + ')));
glm0 = glm(f0, data = d1, ...);
# <p> anova
a = anova(glm0, glm1, test = 'Chisq');
# <p> R^2
mn = mean(d1[[response]]);
#mm = model.matrix(f1, model.frame(f1, data = d1));
pr = as.vector(predict(glm1, d1, type = 'response'));
#r2 = cor((pr - mn), (d1[[response]] - mn));
r2 = cor(pr, d1[[response]]);
pr0 = as.vector(predict(glm0, d1, type = 'response'));
#r20 = cor((pr0 - mn), (d1[[response]] - mn));
r20 = cor(pr0, d1[[response]]);
# <p> raw-model fit
fScore = as.formula(sprintf('y ~ score + %s', paste(c(intercept, varsSel0), collapse = ' + ')));
d2 = data.frame(
d1[, varsSel0], y = d1[[response]], score = as.vector(predict(glm1, d1))
);
if (length(varsSel0)) names(d2)[1:length(varsSel0)] = varsSel0;
raw = glm(fScore, data = d2, ...);
r = list(coefficients = coefs, anova = a, r2 = r2, r20 = r20,
raw = summary(raw), model1 = glm1, model0 = glm0);
r
}
#
# <p> crossvalidation
#
# <!> tb implemented
cv_summary_lm = function(model, pred, data, ...) {
summary(r0)$fstatistic[1]
r = mean( (pred - data)^2 );
r
}
cv_test_glm = function(model, formula, data, ...) {
response = formula.response(formula);
responseP = predict(model, data, type = 'response');
responseD = data[[response]];
ll = sum(log(responseP));
ll
}
# cv_prepare = function(data, argsFrom...)
# cv_train = function(data, argsFrom...)
# cv_test = function(model, data, argsFrom...)
# @arg cv_fold number of crossvalidation folds, denotes leave -cv_fold out if negative
crossvalidate = function(cv_train, cv_test, cv_prepare = function(data, ...)list(),
data, cv_fold = 20, cv_repeats = 1, ..., parallel = F, align_order = TRUE) {
if (cv_fold == 0) stop('crossvalidate: cv_fold must be an integer != 0');
if (!parallel) Lapply = lapply;
N = nrow(data);
r = with(cv_prepare(data = data, ...), {
Lapply(1:cv_repeats, function(i, ...) {
perm = Sample(1:N, N);
# compute partitions
fold = if (cv_fold > 0) cv_fold else as.integer(N/-cv_fold);
parts = splitListEls(perm, fold, returnElements = T);
o = order(unlist(parts));
r = Lapply(parts, function(part, cv_train, cv_test, data, cv_repeats, ...) {
d0 = data[-part, , drop = F];
d1 = data[part, , drop = F];
model = cv_train(..., data = d0);
r = cv_test(model = model, ..., data = d1);
gc();
r
}, cv_train = cv_train, cv_test = cv_test,
data = data, cv_repeats = cv_repeats, ...);
# re-establish order
r = if (align_order
&& all(sapply(r, class) %in% c('numeric', 'integer'))
&& all(sapply(r, length) == 1)) {
unlist(r)[o];
} else if (align_order && all(sapply(r, class) == 'data.frame') &&
sum(sapply(r, nrow)) == nrow(data)) {
#<!> untested
#r = rbindDataFrames(r, colsFromFirstDf = T);
r = do.call(rbind, r);
r[o, ]
} else if (align_order) stop("Crossvalidate: didn't know how to align order.") else r;
gc();
r
}, ...)});
r
}
#
# <p> data standardization
#
standardize = function(v)(v / sd(v));
df2z = function(data, vars = names(as.data.frame(data))) {
data = as.data.frame(data);
df = data.frame.types(sapply(vars, function(v) {
(data[[v]] - mean(data[[v]], na.rm = T)) / sd(data[[v]], na.rm = T)
}), do.transpose = F);
i = which.indeces(vars, names(data));
d0 = data.frame(data[, -i], df);
d0
}
lumpFactor = function(factor, minFreq = NULL, minN = 20, levelPrefix = 'l') {
# <p> preparation
f0 = as.factor(factor);
t0 = table(f0);
ls = levels(f0);
N = length(f0);
if (!is.null(minFreq)) minN = as.integer(minFreq * N + 0.5);
# <p> lumping
map = listKeyValue(ls, ls);
for (i in 1:length(t0)) {
t0 = table(factor);
if (all(t0 >= minN) || length(t0) < 2) break;
# combine two smallest groups
t1 = sort(t0);
newLevel = sprintf('%s%d', levelPrefix, i);
factor = as.character(factor);
factor[factor == names(t1)[1] | factor == names(t1)[2]] = newLevel;
map[[names(t1)[1]]] = map[[names(t1)[2]]] = newLevel;
map[[newLevel]] = newLevel;
}
# <p> normalize map
lsNew = as.character(ls);
repeat {
lsNew0 = lsNew;
lsNew = as.character(map[lsNew]);
if (all(lsNew == lsNew0)) break;
}
return(list(map = listKeyValue(ls, lsNew), factor = factor));
}
# lump a variable after checking other variables for non-missingness
lumpVariableOnVariables = function(data, var, vars, postfix = '_lump', minN = 20) {
# prepare confounder afkomst
lump = sapply(vars, function(v) {
dvar = data[[var]][!is.na(data[[v]])];
lump = lumpFactor(dvar, minN = minN);
dvarNew = as.character(lump$map[as.factor(data[[var]])]);
dvarNew[dvarNew == 'NULL'] = NA;
as.factor(dvarNew)
});
d = data.frame(lump);
names(d) = paste(var, paste(vars, postfix, sep = ''), sep = '_');
d
}
#
# <p> descriptive
#
compareVectors = function(l) {
sets = names(l);
# marginals
r0 = nlapply(sets, function(n)c(n, length(l[[n]])));
r1 = nlapply(sets, function(n)c(sprintf('%s-unique', n), length(unique(l[[n]]))));
r2 = nlapply(sets, function(n)c(sprintf("%s-NA", n), sum(is.na(l[[n]]))));
modelList = list(A = sets, B = sets);
r3 = iterateModels(modelList, .constraint = function(A, B)(A < B), function(i, A, B) {
r = list(
c(sprintf("%s inter %s", A, B), length(intersect(l[[A]], l[[B]]))),
c(sprintf("%s union %s", A, B), length(union(l[[A]], l[[B]]))),
c(sprintf("%s min %s", A, B), length(setdiff(l[[A]], l[[B]]))),
c(sprintf("%s min %s", B, A), length(setdiff(l[[B]], l[[A]])))
);
r
}, lapply__ = lapply)$results;
r = c(r0, r1, r2, unlist.n(r3, 1));
r = data.frame.types(r, do.rbind = T, names = c('type', 'count'));
r
}
pairs_std.panel.hist <- function(x, ...) {
usr <- par("usr"); on.exit(par(usr))
par(usr = c(usr[1:2], 0, 1.5) )
h <- hist(x, plot = FALSE)
breaks <- h$breaks; nB <- length(breaks)
y <- h$counts; y <- y/max(y)
rect(breaks[-nB], 0, breaks[-1], y, col = "grey", ...)
}
pairs_std.panel.cor <- function(x, y, digits = 2, prefix = "", cex.cor, ...) {
usr <- par("usr"); on.exit(par(usr))
par(usr = c(0, 1, 0, 1))
c0 = cor.test(x, y);
txt <- paste0(prefix,
sprintf("Cor: %.2f (%.2f, %.2f)", c0$estimate, c0$conf.int[1], c0$conf.int[2]), "\n",
sprintf("P-value: %.2e", c0$p.value)
)
if(missing(cex.cor)) cex.cor <- 0.8/strwidth(txt)
text(0.5, 0.5, txt, cex = cex.cor * 1) # used tb cex.cor * r
}
pairs_std = function(...) {
pairs(..., diag.panel = pairs_std.panel.hist, upper.panel = pairs_std.panel.cor)
}
#
# <p> omics data
#
quantileData = function(d, p) {
dS = sort(d);
q = dS[ceiling(p * length(dS))];
q
}
quantileReference = function(reference, direction = 2, center = TRUE) {
if (is.matrix(reference) && center) {
refC = matrixCenter(reference, direction);
reference = matrixDeCenter(refC$matrix, mean(refC$center), direction);
}
ref = na.omit(as.vector(as.matrix(reference)));
ref
}
Skewness = function(x, na.rm = T) {
x0 = if(na.rm) na.omit(x) else x;
N = length(x0);
x1 = x0 - mean(x0)
y = sqrt(N) * sum(x1^3) / (sum(x1^2)^(3/2))
s = y * ((1 - 1/N))^(3/2);
s
}
Noutliers = function(x, coef = 1.5)length(boxplot.stats(x, coef = coef)$out)
#' Quantile normalization of frame/matrix with respect to reference distribution
#'
#' Distribution to be normalized are represented as columns or rows of a matrix/data frame.
#' Each value is replaced by the quantile of the reference distribution as given by the value of the
#' empirical distribution function of the given value.
#'
#' @param reference numeric vector with realizations from the target distribution
#' @param data data frame or matrix with data to be normalized
#' @param direction is \code{data} organized per row or column?
#'
#' @examples
#' d = sapply(1:20, rnorm(1e4));
#' dNorm = quantileNormalization(as.vector(d), d)
quantileNormalization = function(reference, data, direction = 2,
impute = TRUE, ties = 'random', center = TRUE, referenceDirection = direction) {
ref = quantileReference(reference, referenceDirection, center);
if (impute) mns = apply(data, 3 - direction, median, na.rm = T);
dN = apply(data, direction, function(d) {
d0 = d;
if (impute) d[is.na(d0)] = mns[is.na(d0)];
r = quantile(ref, probs = rank(d, na.last = 'keep', ties = ties)/length(na.omit(d)))
if (impute) r[is.na(d0)] = NA;
r
});
if (direction == 1) dN = t(dN);
dimnames(dN) = dimnames(data);
dN
}
# quantile normalization based on samples picked on the basis of their medians (around the medians)
# Nqn: number of reference samples
quantileNormalizationMedians = function(data, direction = 2, Nqn = 5, impute = TRUE) {
# <p> determine median of medians, corresponding median, IQR
medians = apply(data, direction, median);
mediansO = order(medians);
medianOI = as.integer(length(mediansO)/2 + .5);
medianI = mediansO[medianOI];
refMed = summary(data[, medianI]);
refIQR = refMed[['3rd Qu.']] - refMed[['1st Qu.']];
# <p> reference samples
refL = as.integer(medianOI - Nqn/2 + .5);
refU = refL + Nqn - 1;
refSamples = mediansO[refL:refU];
#print(refSamples)
# <p> standardize reference samples wrt median, IQR
refSampleValues = sapply(refSamples, function(i) {
refI = summary(data[, i]);
refIIQR = refI[['3rd Qu.']] - refI[['1st Qu.']];
E = (data[, i] - refI[['Median']]) * refIQR/refIIQR + refMed[['Median']];
#refIE = summary(E);
#refIEIQR = refIE[['3rd Qu.']] - refIE[['1st Qu.']];
#print(list(refI = refI, refIIQR = refIIQR, refIE = refIE, refIEIQR = refIEIQR));
E
});
eQn = quantileNormalization(refSampleValues, data,
direction = direction, impute = impute, center = FALSE);
eQn
}
dataCentered = function(d, na.rm = T) {
dC = apply(d, 2, function(col)col - mean(col, na.rm = na.rm));
dC
}
#
# <p> distributions
#
qtruncnorm = function(p, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
plow = pnorm(lower, mean, sd);
pupp = pnorm(upper, mean, sd);
qnorm(p * (pupp - plow) + plow, mean, sd = sd)
}
rtruncnorm = function(N, mean = 0, sd = 1, lower = -Inf, upper = Inf) {
qtruncnorm(runif(N), mean, sd = sd, lower, upper)
}
qqDist = function(Nqts = 1e2, qdist, ...) {
qtls = (1:Nqtls)/(Nqtls + 1);
qtlsExp = qdist(qtls, ...);
qtlsObs = quantile(rtn, qtls);
qq = qplot(qtlsExp, qtlsObs) + theme_bw();
qq
}
qqSim = function(Nsim, dist = 'truncnorm', Nqts = Nsim/10, ...) {
require('ggplot2');
rdist = get(Sprintf('r%{dist}s'));
r = rdist(Nsim, ...);
qdist = get(Sprintf('q%{dist}s'));
qq = qqDist(Nqts, qdist, ...);
qq
}
#
# <p> entropy
#
table.entropy = function(d) {
p = table.freq(d);
p = p[p != 0];
H = - sum(p * log(p));
}
#
# <p> qvalue
#
Qvalue = function(P.value, ...) {
require('qvalue');
P.valuesNotNA = na.omit(P.value);
qv = qvalue(P.valuesNotNA, ...);
r = qv;
r$qvalue = vector.embed(rep(NA, sum(is.na(P.value))), which(!is.na(P.value)), qv$qvalue);
r
}
#
# Rpatches.R
#Fri Nov 20 17:18:37 CET 2009
# geepack patch
anovageePrim2 = function (m1, m2, ...)
{
mm1 <- model.matrix(m1)
mm2 <- model.matrix(m2)
P1 <- mm1 %*% solve(t(mm1) %*% mm1) %*% t(mm1)
P2 <- mm2 %*% solve(t(mm2) %*% mm2) %*% t(mm2)
e2 <- mm2 - P1 %*% mm2
e1 <- mm1 - P2 %*% mm1
m2inm1 <- all(apply(e2, 2, var) < 1e-10)
m1inm2 <- all(apply(e1, 2, var) < 1e-10)
if (!any(c(m2inm1, m1inm2)))
cat("Models not nested\n")
else if (all(c(m2inm1, m1inm2)))
cat("Models are identical\n")
else {
if (m1inm2) {
tmp <- m1
m1 <- m2
m2 <- tmp
}
mm1 <- model.matrix(m1)
mm2 <- model.matrix(m2)
mf1 <- paste(paste(formula(m1))[c(2, 1, 3)], collapse = " ")
mf2 <- paste(paste(formula(m2))[c(2, 1, 3)], collapse = " ")
mm <- cbind(mm2, mm1)
qmm <- qr(mm)
qmmq <- qr.Q(qmm)
nymm1 <- as.data.frame(qmmq[, 1:qmm$rank])
colnames(nymm1) <- paste("parm", 1:ncol(nymm1), sep = ".")
nymm2 <- nymm1[, 1:ncol(mm2), drop = FALSE]
formula1 <- formula(paste(formula(m1)[[2]], formula(m1)[[1]],
paste(c("-1", colnames(nymm1)), collapse = "+"),
collapse = ""))
m1call <- m1$call
nymm1[, paste(formula(m1)[[2]])] <- m1$y
nymm1[, paste(m1call$id)] <- m1$id
m1call$offset <- m1$offset
m1call$weights <- m1$weights
m1call$formula <- formula1
m1call$data <- nymm1
m1ny <- eval(m1call)
beta <- coef(m1ny)
vbeta <- summary(m1ny)$cov.unscaled
df <- dim(mm1)[2] - dim(mm2)[2]
rbeta <- rep(1, length(beta))
rbeta[1:df] <- 0
beta0 <- rev(rbeta)
zeroidx <- beta0 == 0
X2 <- t(beta[zeroidx]) %*% solve(vbeta[zeroidx, zeroidx,
drop = FALSE]) %*% beta[zeroidx]
topnote <- paste("Model 1", mf1, "\nModel 2", mf2)
title <- "Analysis of 'Wald statistic' Table\n"
table <- data.frame(Df = df, X2 = X2, p = 1 - pchisq(X2,
df))
dimnames(table) <- list("1", c("Df", "X2", "P(>|Chi|)"))
val <- structure(table, heading = c(title, topnote),
class = c("anova", "data.frame"))
return(val)
}
}
#
# Rdataset.R
#Tue Sep 28 14:53:47 2010
# a dataset is a list with two data.frames
# data: contains "data"
# meta: contains meta information about "data"
# meta data frame
# name string/re to describe variable
# type (admin|var|unknown)
# fullType (admin:cluster|id|idM|idF)
# index index of column
metaData = function(d, metaTemplate, ignore.case = T) {
ns = names(d);
dm = listOfLists2data.frame(lapply(1:length(ns), function(i) {
n = ns[i];
m = sapply(metaTemplate, function(mt)(length(grep(mt$name, n, ignore.case = ignore.case)) > 0));
r = metaTemplate[m];
r = if (length(r) != 1) list(name = n, type = 'unknown', fullType = 'unknown') else
merge.lists(r[[1]], list(name = n))[c('name', 'type', 'fullType')];
r = c(r, list(index = i));
r
}), idColumn = NULL);
dm
}
transformData = function(d, metaTemplate, ..., ignore.case = T) {
ns = names(d);
for (n in ns) {
m = sapply(metaTemplate, function(mt)(length(grep(mt$name, n, ignore.case = ignore.case)) > 0));
if (sum(m) == 1) {
mt = metaTemplate[m][[1]];
if (!is.null(mt$transf)) d[[n]] = mt$transf(d[[n]]);
}
}
d
}
columnsOfType = function(d, type)d$meta$name[d$meta$fullType == type];
#
# Rsimulation.R
#Mon 07 Jan 2008 06:56:12 PM CET
#
# <§> setup
#
#library(MASS);
#source(sprintf("%s/Rgeneric.R", Sys.getenv("MYRLIB")), chdir=TRUE);
#library(ggplot2); #firstUpper
#
# <§> implementation
#
#
# <p> helper methods
#
parameterCombinationsTwins = function(specification, parameters, twins) {
pars = strsplit(twins, ".", fixed = T)[[1]];
N = length(pars);
M = length(parameters[[pars[1]]]); # assume equal length here
df = data.frame(matrix(1:M, ncol = N, nrow = M));
names(df) = pars;
df
}
parameterCombinations = function(specification, parameters) {
# <p> initialization
parCnts = lapply(parameters, length);
# <p> handle constraints (<A> must not overlap)
if (!is.null(specification$constraints)) {
parsC = lapply(names(specification$constraints), function(c) {
fn = get(con("parameterCombinations", firstUpper(specification$constraints[[c]]$type)));
cs = fn(specification, parameters, c);
cs
})
names(parsC) = names(specification$constraints);
} else parsC = list();
# <p> add remaining parameters
parsF = if (!is.null(specification$constraints)) {
parameters[-unlist(sapply(names(specification$constraints), function(p) {
pars = strsplit(p, ".", fixed = T)[[1]];
idcs = which.indeces(pars, parameters);
idcs
}))]
} else parameters;
parsF = lapply(parsF, function(p)1:length(p));
parsA = c(parsC, parsF);
# <p> construct natural joint: unconstraint combinations
df = data.frame(..dummy = 1);
for (i in 1:length(parsA)) {
df = merge(df, parsA[i]);
}
df = df[, -1];
# <p> cleanup (names of df)
ns = unlist(lapply(parsC, function(p)names(p)));
ns = c(ns, names(parsF));
names(df) = ns;
df
}
# gIndex: global index for reference purposes
# lists are interpolated with arrays such that the name of the array
# becomes embedded as list element
collapseParameters = function(collapsingGroups, parameters, indeces, gIndex) {
iNs = names(indeces);
pars = lapply(collapsingGroups, function(g) {
# p = unlist.n(sapply(g$names, function(nm){
# as.list(parameters[[nm]][indeces[[nm]]])
# }), firstDef(g$collapse, 0));
p = unlist.n(lapply(g, function(nm){
po = parameters[[nm]][[indeces[[nm]]]]; # parameter object
if (!is.list(po)) {
po = list(po);
names(po) = nm;
}
po
}), 1);
p
});
#if (is.list(pars$system)) pars$system$globalIndex = gIndex;
pars
}
#
# <p> generic methods
#
parameterIteration = function(s, order = NULL, reverse = F) {
o = firstDef(order, 1:dim(s@combinations)[1], .dfInterpolate = F);
#order.df(s@combinations, names(s@parameters), reverse);
ps = lapply(o, function(i) {
p = collapseParameters(s@specification$collapse, s@parameters, as.list(s@combinations[i, ]), i);
p
});
i = list(parameters = ps, order = o);
i
}
# i is given in canonical ordering of parameters
simulationFile = function(s, i) {
spec = s@specification;
pars = parameterIteration(s); # canonical ordering
digits = ceiling(log10(length(pars$order))); # digits needed for enumeration
filename = sprintf("%s/%s-%0*d.RData", spec$resultsDir, spec$name, digits, i);
filename
}
# needs: spec$cluster(hosts, type), spec$resultsFile|spec$name, spec$simulationFunction
runIterationCluster = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = merge.lists(list(doSave = T, delaySave = F, local = F), s@specification);
simulationPars = parameterIteration(s, order = order, reverse = reverse);
# <p> initialize
if (!is.null(spec$init)) { eval(parse(text = spec$init)); }
f = get(spec$simulationFunction);
# <p> iteration function
clf = function(i, simulationPars, ...){
p = simulationPars$parameters[[i]];
t0 = sum(proc.time()[3]);
sim = try(f(p, ...));
t1 = sum(proc.time()[3]) - t0;
if (class(sim) != "try-error" & spec$doSave & !spec$delaySave) {
save(sim, file = simulationFile(s, simulationPars$order[i]));
}
r = list(
time = t1,
parameters = p,
result = ifelse(spec$delaySave, sim, class(sim) != "try-error")
);
r
};
if (!spec$local) {
# <p> make cluster
library("snow");
c = spec$cluster;
hosts = if (is.null(c$hosts)) rep("localhost", 8) else c$hosts; #<A> cave vectors
cl = makeCluster(hosts, type = firstDef(c$type, "SOCK"));
clusterSetupRNG(cl);
# <p> cluster intitalizations
if (!is.null(c$source)) {
textSource = sprintf("clusterEvalQ(cl, { %s })",
paste(c(sapply(c$source, function(s)sprintf("source(\"%s\")", s)), ""), collapse = "; ")
);
eval(parse(text = textSource));
}
clusterExport(cl, spec$simulationFunction);
}
# <p> iterate
textExec = sprintf(
"%s 1:length(simulationPars$parameters), clf, simulationPars = simulationPars, %s%s;",
ifelse(spec$local, "lapply(", "clusterApplyLB(cl,"), paste(spec$args, collapse = ", "), ")"
);
print(textExec);
simulations = eval(parse(text = textExec));
#print(simulations);
# <p> finish up
if (!spec$local) stopCluster(cl)
if (spec$delaySave) for (i in 1:length(simulations)) {
sim = simulations[[i]];
if (class(sim) != "try-error" & spec$doSave) save(sim, file = simulationFile(s, i, pars$order[i]));
}
simulationPars
}
runIterationPlain = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = s@specification;
pars = parameterIteration(s, order = order, reverse = reverse);
f = get(spec$simulationFunction);
# <p> iterate
simulations = lapply(1:length(pars$parameters), function(i){
p = pars$parameters[[i]];
t0 = sum(proc.time()[1:2]);
sim = try(f(p));
t1 = sum(proc.time()[1:2]) - t0;
if (class(sim) != "try-error" & spec$doSave & !spec$delaySave) {
save(sim, file = simulationFile(s, pars$order[i]));
}
r = list(
time = t1,
parameters = p,
result = ifelse(spec$delaySave, sim, class(sim) != "try-error"));
r
});
if (spec$delaySave) for (i in 1:length(simulations)) {
sim = simulations[[i]];
if (class(sim) != "try-error" & spec$doSave) save(sim, file = simulationFile(s, i, pars$order[i]));
}
pars
}
summarizeIteration = function(s, order = NULL, reverse = F) {
# <p> initialization
spec = s@specification;
pars = parameterIteration(s, order = order, reverse = reverse);
print(pars);
f = if (is.null(spec$summaryFunctionSingle)) NULL else get(spec$summaryFunctionSingle);
simulations = lapply(1:length(pars$order), function(i) {
parIndex = pars$order[i];
file = simulationFile(s, parIndex);
sim = if (file.exists(file)) { get(load(file)[1]) } else NULL;
# <%><N> interpolate old simulations
#if (length(sim) == 1) sim = sim[[1]];
r = if (is.null(f)){ NA } else f(s, sim, pars$parameters[[parIndex]]);
r
});
r = NULL;
if (!is.null(spec$summaryFunction)) {
summary = get(spec$summaryFunction);
r = summary(s, simulations, pars$order, pars);
}
r
}
runIteration = function(s, order = NULL, reverse = F) {
spec = s@specification;
methodName = sprintf("runIteration%s", firstUpper(firstDef(spec$iterationMethod, "plain")));
method = get(methodName);
Log(sprintf('Rsimulation: %s', methodName), 2);
method(s, order, reverse);
}
#
# <p> class
#
# specification contains restrictions on parameter combinations, grouping
# restrictions:
# twins: pair parameters as listed (e.g. model simulation, estimation)
# grouping: build final parameters by merging sublists
# conventional group:
# system: parameters other than involved in statistical concepts
# model: specification of the model
# parameters: model parameters
setClass("Rsimulation",
representation(specification = "list", parameters = "list", combinations = "data.frame",
mode = "character"),
prototype(specification = list(), parameters = list(), combinations = data.frame(), mode = NULL)
);
setMethod("initialize", "Rsimulation", function(.Object, simulationName, mode = NULL) {
s = get(simulationName);
specification = merge.lists(list(doSave = T, delaySave = F), s$specification);
specification$name = simulationName;
parameters = s$parameters;
if (specification$needsMode & is.null(mode)) {
stop(con("Need simulation mode [",
paste(names(specification$mode), collapse = ", "), "]"));
}
if (!is.null(mode)) {
specification = merge.lists(specification, specification$mode[[mode]]);
}
.Object@mode = mode;
.Object@specification = specification;
.Object@parameters = parameters;
.Object@combinations = parameterCombinations(specification, parameters);
.Object
});
#
# RpropertyList.R
#Fri Jan 7 17:40:12 2011
# wrap string for property list
ws = function(s) {
s = if (length(grep('^([_/\\a-zA-Z0-9.]+)$', s)) > 0) { s } else {
s = gsub('([\\"])', '\\\\\\1', s);
sprintf('"%s"', s);
}
s
}
# can a string be condensed into a single line
condense = function(s, ident, o) {
if (nchar(s) + ident * o$tabWidth - nchar(grep("\t", s)) < o$screenWidth) {
s = gsub("\n", ' ', s);
s = gsub("\t", '', s);
}
s
}
stringFromPropertyI = function(obj, ident, o) {
str = '';
inS = join(rep("\t", ident), '');
in1S = join(rep("\t", ident + 1), '');
if ( class(obj) == 'function' ) {
str = sprintf('%s%s', str, ws(join(deparse(obj), "\n")))
} else if ( class(obj) != 'list' & length(obj) == 1 & !(o$kp %in% o$forceVectors)) {
# <i> data support
str = sprintf('%s%s', str, ws(obj));
} else if (class(obj) == 'list' && !is.null(names(obj))) {
hash = sprintf("{\n%s%s;\n%s}", in1S, paste(sapply(names(obj), function(k) {
o = merge.lists(o, list(kp = sprintf('%s.%s', o$kp, k)));
r = sprintf('%s = %s', ws(k), stringFromPropertyI(obj[[k]], ident+1, o))
r
}), collapse = sprintf(";\n%s", in1S)), inS);
if (!o$noFormatting) hash = condense(hash, ident, o);
str = sprintf('%s%s', str, hash);
} else { # vector or anonymous list
obj = as.list(obj);
array = sprintf("(\n%s%s\n%s)", in1S, if (length(obj) < 1) '' else paste(
sapply(1:length(obj), function(i) {
e = obj[[i]];
o = merge.lists(o, list(kp = sprintf('%s.[%d]', o$kp, i)));
stringFromPropertyI(e, ident+1, o)
}), collapse = sprintf(",\n%s", in1S)), inS);
if (!o$noFormatting) array = condense(array, ident, o);
str = sprintf('%s%s', str, array);
}
str
}
defaults = list(screenWidth = 80, tabWidth = 4, noFormatting = F, kp = '');
stringFromProperty = function(obj, o = list()) {
o = merge.lists(defaults, o);
s = stringFromPropertyI(obj, 0, o);
if (o$noFormatting) {
s = gsub("[\n\t]", '', s);
}
s
}
# tokens: character vector of tokens
# ti: current token cursor (token index)
propertyFromStringRaw = function(tokens, ti = 1) {
if (length(tokens) < 1) stop("propertyFromString: out of tokens");
pl = if (tokens[ti] == '(') { # we have an array here # ')' (bracket)
a = NULL;
repeat {
ti = ti + 1; # advance to next token
if (ti > length(tokens) || tokens[ti] == ')') break; # <A> empty list
r = propertyFromStringRaw(tokens, ti); # sub propertyList
if (is.list(r$pl)) r$pl = list(r$pl); # <A> concatanating of lists
a = c(a, r$pl);
ti = r$ti + 1;
if (ti > length(tokens) || tokens[ti] == ')') break; # <A> returning to list end
if (tokens[ti] != ',') stop("propertyFromString: expected ',' or ')'");
}
if (ti > length(tokens) || tokens[ti] != ')') stop("propertyFromString: no array termination");
a
} else if (tokens[ti] == '{') {
dict = list();
repeat {
ti = ti + 1; # advance to next token
if (ti > length(tokens) || tokens[ti] == '}') break;
key = tokens[ti];
if (tokens[ti + 1] != '=') stop("propertyFromString: expected '='");
r = propertyFromStringRaw(tokens, ti + 2);
dict[[key]] = r$pl;
ti = r$ti + 1;
if (tokens[ti] != ';') stop("propertyFromString: expected ';'");
}
if (ti > length(tokens) || tokens[ti] != '}') stop("propertyFromString: no dict termination");;
dict
#} elsif ($token =~ /^<(.*)>$/so) { # we encountered data
# <N> data not supported
} else { # string
s = tokens[ti];
if (substr(s, 1, 1) == '"') s = substr(s, 2, nchar(s) - 1);
s
}
r = list(pl = pl, ti = ti);
r
}
plStringRE = '(?:(?:[_\\/\\-a-zA-Z0-9.]+)|(?:\"(?:(?:\\\\.)*(?:[^"\\\\]+(?:\\\\.)*)*)\"))';
plCommentRE = '(?:/\\*(?:.*?)\\*/)';
propertyFromString = function(plistString, o = list()) {
plistString = gsub(plCommentRE, '', plistString, perl = T);
tokens = fetchRegexpr(sprintf('%s|[(]|[)]|[{]|[}]|[=]|[,]|[;]|<.*?>', plStringRE), plistString);
pl = propertyFromStringRaw(tokens);
pl$pl
}
#
# Rlinux.R
#Tue May 8 18:05:44 2012
#
# <p> RsowReap.R
#Wed May 7 18:16:23 CEST 2014
# <p> Design
# These classes are meant to implement several Sow/Reap patterns
# Standard Pattern
# r = Reap(expression, returnResult = T);
# print(r$result);
# print(r$yield);
#
# AutoPrint sowed values, reap later
# SowerAddReaper(auto_reaper = printRepeaper, logLevel = 4);
# { Sow(my_tag = 4, logLevel = 3); }
# r = Reap();
#
# for (i in 1:10) {
# Sow(my_number = i);
# Sow(my_greeting = 'hello world');
# }
# # prints list of list w/ each entry beting list(my_number = i, my_greeting = ..)
# print(Reap(stacked = T));
#
# Sow to different categories
# SowerSetCatcher(default = StackingSowCatcherClass);
# SowerSetCatcher(exclusions = SowCatcherClass);
# Sow(1);
# Sow(individuals = 1:10, sow_field = 'exclusions');
# Collect(union, sow_field = 'exclusions'); # do not remove
ReaperAbstractClass = setRefClass('ReaperAbstract',
fields = list(),
methods = list(
#
# <p> methods
#
initialize = function(...) {
.self$initFields(...);
.self
},
reap = function(...) { }
#
# </p> methods
#
)
);
#ReaperAbstractClass$accessors(names(ReaperAbstractClass$fields()));
SowCatcherClass = setRefClass('SowCatcher', contains = 'ReaperAbstract',
fields = list(
auto_reapers = 'list',
seeds = 'list'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
auto_reapers <<- list();
seeds <<- list();
.self$initFields(...);
.self
},
sow_raw = function(seed) {
for (r in c(.self, auto_reapers)) r$reap(seed);
},
sow = function(...) {
.self$sow_raw(list(...)[1]);
},
reap = function(seed) {
seeds <<- c(seeds, seed);
},
last_seed = function() {
seeds[length(seeds)];
},
seed_count = function()length(seeds),
Seeds = function(fields = NULL) {
if (is.null(fields)) seeds else seeds[which.indeces(fields, names(seeds))]
},
set_seed_at = function(seed, pos) {
seeds[pos] <<- seed;
names(seeds)[pos] <<- names(seed);
NULL
},
push_reaper = function(r) {
auto_reapers <<- c(auto_reapers, r);
NULL
},
register = function(ensemble, field)NULL,
# <p> end a global SowReap session
conclude = function()NULL
#
# </p> methods
#
)
);
SowCatcherClass$accessors(names(SowCatcherClass$fields()));
SowCatcherPersistentClass = setRefClass('SowCatcherPersistent', contains = 'SowCatcher',
fields = list(
path = 'character',
splitRe = 'character',
cursor = 'integer'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
splitRe <<- '';
callSuper(...);
cursor <<- 1L;
.self
},
seed_path_name = function(n, i = length(seeds) + 1) {
key = if (splitRe != '') splitString(splitRe, n) else n;
key[1] = Sprintf('%{i}03d_%{k}s', k = key[1]);
seedPath = Sprintf('%{path}s/%{keyComponents}s.RData', keyComponents = join(key, '/'));
},
seed_path = function(seed, i = length(seeds) + 1) .self$seed_path_name(names(seed), i),
seed_save = function(seed, i = length(seeds) + 1) {
seedPath = .self$seed_path(seed, i);
s = seed[[1]];
Save(s, file = seedPath);
},
set_seed_at = function(seed, i) {
.self$seed_save(seed, i);
if (names(seeds)[i] != names(seed))
Logs('SowCatcherPersistent: Warning: seed key %{k2}s does not match seed slot %{k1}s',
k1 = names(seeds)[i], k2 = names(seeds), logLevel = 3);
},
reap_raw = function(seed) {
.self$seed_save(seed);
seeds <<- c(seeds, listKeyValue(names(seed), NA));
save(seeds, file = .self$seed_path_name('__seed_names', 0));
NULL
},
reap = function(seed) {
if (cursor > .self$seed_count()) {
.self$reap_raw(seed);
.self$setCursor(cursor + 1L);
return(NULL);
}
seed_nm = names(seed);
# <p> locate previous position
ns = names(.self$getSeeds());
occs = which(seed_nm == ns[Seq(1, cursor - 1, neg = T)]);
if (length(occs) == 0) {
Logs('SowCatcherPersistent: adding seed %{seed_nm}s of class %{cl}s not seen before.',
cl = class(seed[[1]]), 3);
.self$reap_raw(seed);
return(NULL);
}
new_cursor = cursor + min(occs) - 1L;
Logs('SowCatcherPersistent: Skipping to cursor %{new_cursor}s.', 5);
.self$set_seed_at(seed, new_cursor);
.self$setCursor(new_cursor + 1L);
},
Seeds = function(fields = NULL) {
idcs = if (is.null(fields)) Seq(1, length(seeds)) else which.indeces(fields, names(seeds));
r = lapply(idcs, function(i)get(load(.self$seed_path(seeds[i], i))[1]));
names(r) = names(seeds)[idcs];
r
},
register = function(ensemble, field, doReset = F) {
# <N> if path was not specified yet, try to query from ensemble, should exit on NULL
if (!length(.self$getPath())) {
.self$setPath(ensemble$getPath());
# <p> subpath for this field
path <<- Sprintf('%{path}s/%{field}s');
}
# <p> keep track of seeds
seedsPath = .self$seed_path_name('__seed_names', 0);
if (file.exists(seedsPath)) seeds <<- get(load(seedsPath)[1]);
if (doReset) {
unlink(sapply(Seq(1, length(seeds)), function(i).self$seed_path(seeds[i], i)));
if (file.exists(seedsPath)) unlink(seedsPath);
seeds <<- list();
}
NULL
}
#
# </p> methods
#
)
);
SowCatcherPersistentClass$accessors(names(SowCatcherPersistentClass$fields()));
SowCatcherStackClass = setRefClass('SowCatcherStack',
fields = list(
sowCatchers = 'list',
sowCatcherClass = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
sowCatchers <<- list();
sowCatcherClass <<- 'SowCatcher';
.self$initFields(...);
.self
},
push = function(sowCatcher = getRefClass(.self$sowCatcherClass)$new(), ...) {
sowCatchers[[length(sowCatchers) + 1]] <<- sowCatcher;
},
pop = function() {
currentCatcher = sowCatchers[[length(sowCatchers)]];
sowCatchers <<- sowCatchers[-length(sowCatchers)];
currentCatcher
},
sowCatcher = function() {
if (!length(sowCatchers)) .self$push(); # autovivify
sowCatchers[[length(sowCatchers)]]
},
reap = function(fields = NULL) {
r = lapply(sowCatchers, function(sc)sc$Seeds(fields))
},
register = function(ensemble, sow_field, ...)
lapply(sowCatchers, function(sc)sc$register(ensemble, sow_field, ...)),
conclude = function()lapply(rev(sowCatchers), function(sc)sc$conclude())
#
# </p> methods
#
)
);
SowCatcherStackClass$accessors(names(SowCatcherStackClass$fields()));
SowCatcherEnsembleClass = setRefClass('SowCatcherEnsemble',
fields = list(
sowers = 'list',
sowCatcherClass = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
sowers <<- list();
sowCatcherClass <<- 'SowCatcher';
.self$initFields(...);
.self
},
push = function(sowCatcher = SowCatcherStackClass$new(), sow_field = 'default', ...) {
# <b> default argument mechanism does not work
#if (is.null(sowCatcher)) sowCatcher = getRefClass('SowCatcher')$new();
if (is.null(sowers[[sow_field]])) sowers[[sow_field]] <<- SowCatcherStackClass$new();
sowers[[sow_field]]$push(sowCatcher)
sowCatcher$register(.self, sow_field, ...);
},
pop = function(sow_field = 'default')sowers[[sow_field]]$pop(),
sowCatcher = function(sow_field = 'default')sowers[[sow_field]]$sowCatcher(),
reap = function(sow_field = 'default', fields = NULL) sowers[[sow_field]]$reap(fields),
conclude = function() sapply(sowers, function(sower)sower$conclude())
#
# </p> methods
#
)
);
SowCatcherEnsembleClass$accessors(names(SowCatcherEnsembleClass$fields()));
SowCatcherEnsemblePersistentClass = setRefClass('SowCatcherEnsemblePersistent',
contains = 'SowCatcherEnsemble',
fields = list(
path = 'character'
),
methods = list(
#
# <p> methods
#
initialize = function(...) {
callSuper(...)
.self
},
push = function(sowCatcher = SowCatcherStackClass$new(), sow_field = 'default', ...) {
r = callSuper(sowCatcher, sow_field, ...);
.self$freeze();
r
},
pop = function(sow_field = 'default') {
r = callSuper(sow_field);
.self$freeze();
r
},
freeze_path = function()Sprintf('%{path}s/000_ensemble.RData'),
freeze = function() {
Save(.self, file = freeze_path());
NULL
},
thaw = function() {
e = get(load(freeze_path())[1]);
# SowCatchers have to recover their own state
lapply(names(e$sowers), function(n)e$sowers[[n]]$register(e, n));
e
}
#
# </p> methods
#
)
);
SowCatcherEnsemblePersistentClass$accessors(names(SowCatcherEnsemblePersistentClass$fields()));
if (!exists('SowReap_env__')) SowReap_env__ = new.env();
SowReap_env__ = new.env();
SowReapInit = function(ensembleClass = 'SowCatcherEnsemble', ...) {
ensemble = getRefClass(ensembleClass)$new(...);
assign('sowEnsemble', ensemble, envir = SowReap_env__);
ensemble
}
SowReapConclude = function() {
sowReapEnsemble()$conclude();
}
sowReapEnsemble = function() {
if (!exists('sowEnsemble', envir = SowReap_env__)) SowReapInit();
ensemble = get('sowEnsemble', envir = SowReap_env__);
ensemble
}
SowReapCreateField = function(sow_field, sowCatcherClass = 'SowCatcher', ...) {
e = sowReapEnsemble();
for (sf in sow_field) {
catcher = getRefClass(sowCatcherClass)$new();
e$push(catcher, sow_field = sf, ...);
}
NULL
}
SowReapReapField = function(sow_field) {
e = sowReapEnsemble();
e$pop(sow_field)$getSeeds();
}
Sow = function(..., sow_field = 'default') {
catcher = sowReapEnsemble()$sowCatcher(sow_field = sow_field);
catcher$sow(...)
}
Reap = function(expr, sow_field = 'default', fields = NULL, envir = parent.frame(), auto_unlist = T,
vivify = F) {
e = sowReapEnsemble();
r = if (missing(expr)) {
r = e$reap(sow_field, fields = fields);
if (vivify) {
r = lapply(r, function(e) {
tbVivified = setdiff(fields, names(e));
e = c(e, unlist.n(lapply(tbVivified, function(n)List(NULL, names_ = n)), 1));
e
});
}
if (auto_unlist && length(r) == 1) r = r[[1]];
r
} else {
catcher = getRefClass(e$getSowCatcherClass())$new();
e$push(catcher, sow_field = sow_field);
eval(expr, envir = envir);
e$pop(sow_field)$Seeds(fields);
}
r
}
ReapFromDisk = function(path, sow_field = 'default', fields = NULL, auto_unlist = T,
ensembleClass = 'SowCatcherEnsemblePersistent', vivify = F) {
e = getRefClass(ensembleClass)$new(path = path);
e = e$thaw();
r = e$reap(sow_field, fields = fields);
if (vivify) {
r = lapply(r, function(e) {
tbVivified = setdiff(fields, names(e));
e = c(e, lapply(tbVivified, function(n)List(NULL, names_ = n)));
e
});
}
if (auto_unlist && length(r) == 1) r = r[[1]];
r
}
#
# RparallelTools.R
#Fri Jul 26 09:13:16 2013
#
# <p> interface functions
#
Env.new = function(hash = T, parent = parent.frame(), size = 29L, content = list()) {
e = new.env(hash = hash, parent = parent, size = size);
nlapply(content, function(n) {
assign(n, content[[n]], envir = e);
NULL
});
e
}
#' Create a placeholder for an object to be loaded later
#'
#' @param path File system path to the file containing a saved R data structure
#'
delayed_load = function(path) {
new('ParallelizeDelayedLoad', path)
}
delayed_load_dummy = function(path) get(load(path)[1])
|
library(rEDM)
### Name: ccm_means
### Title: Take output from ccm and compute means as a function of library
### size.
### Aliases: ccm_means
### ** Examples
data("sardine_anchovy_sst")
anchovy_xmap_sst <- ccm(sardine_anchovy_sst, E = 3,
lib_column = "anchovy", target_column = "np_sst",
lib_sizes = seq(10, 80, by = 10), num_samples = 100)
a_xmap_t_means <- ccm_means(anchovy_xmap_sst)
|
/data/genthat_extracted_code/rEDM/examples/ccm_means.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 404
|
r
|
library(rEDM)
### Name: ccm_means
### Title: Take output from ccm and compute means as a function of library
### size.
### Aliases: ccm_means
### ** Examples
data("sardine_anchovy_sst")
anchovy_xmap_sst <- ccm(sardine_anchovy_sst, E = 3,
lib_column = "anchovy", target_column = "np_sst",
lib_sizes = seq(10, 80, by = 10), num_samples = 100)
a_xmap_t_means <- ccm_means(anchovy_xmap_sst)
|
#"ejercicio"
#1. crear un vector llamado ventas con los valores 120,140,90
#2. asignar estos valores "enero", "febrero, "marzo" como nombres de las columnas del vector
#3. calcular la mediana de ventas en los 3 meses
#4. crear un nuevo vector con los elementos mayores a 100
#5. calcular cual es el mes xon mayores ventas
# SOLUCION
#1.
ventas <- c(120,140,90)
#2
meses <- c('enero','febrero', 'marzo')
names(ventas) <- meses
print(ventas)
#3
mediana <- mean(ventas)
print(mediana)
#4.
filtro <- ventas > 100
ventasMayyores100 <- ventas[filtro]
print(ventasMayyores100)
#5.
filtroMax <- ventas == max(ventas)
print (ventas[filtroMax])
|
/ejercicio1.R
|
no_license
|
BOTOOM/learn-R
|
R
| false
| false
| 638
|
r
|
#"ejercicio"
#1. crear un vector llamado ventas con los valores 120,140,90
#2. asignar estos valores "enero", "febrero, "marzo" como nombres de las columnas del vector
#3. calcular la mediana de ventas en los 3 meses
#4. crear un nuevo vector con los elementos mayores a 100
#5. calcular cual es el mes xon mayores ventas
# SOLUCION
#1.
ventas <- c(120,140,90)
#2
meses <- c('enero','febrero', 'marzo')
names(ventas) <- meses
print(ventas)
#3
mediana <- mean(ventas)
print(mediana)
#4.
filtro <- ventas > 100
ventasMayyores100 <- ventas[filtro]
print(ventasMayyores100)
#5.
filtroMax <- ventas == max(ventas)
print (ventas[filtroMax])
|
rm(list=ls())
library(rvest)
setwd('D:/Project_R/Kamis Data/SD Bekasi')
#mencari SD di Kecamatan Bekasi Timur
url = 'https://referensi.data.kemdikbud.go.id/index11.php?kode=026500&level=2'
text = read_html(url) %>% html_nodes('a') %>% html_text()
links = read_html(url) %>% html_nodes('a') %>% html_attr('href')
data = data.frame(text,links)
data = data %>% filter(grepl('kec.',text,ignore.case = T)) %>%
mutate(links = paste('https://referensi.data.kemdikbud.go.id/',links,sep=''))
i=1
data.new = read_html(data$links[i]) %>% html_table(fill=T)
data.new = data.new[[2]]
for(i in 2:length(data$links)){
dummy = read_html(data$links[i]) %>% html_table(fill=T)
dummy = dummy[[2]]
data.new = rbind(data.new,dummy)
}
#ambil fungsi
library(devtools)
url='https://raw.githubusercontent.com/ikanx101/belajaR/master/All%20Func.R'
source_url(url)
#bersihin colnames
colnames(data.new) = tolong.bersihin.judul.donk(data.new)
#ambilin sd
data.new = data.new %>% filter(status!='SWASTA') %>% filter(grepl('sd',nama.satuan.pendidikan,ignore.case = T))
#liat titiknya
alamat = paste(data.new$nama.satuan.pendidikan,data.new$alamat,data.new$kelurahan,' kota bekasi, indonesia',sep=',')
#mulai fun parts nya
library(googleway)
lat = c()
long = c()
key = #ENTER YOUR KEY HERE
for(i in 1:length(alamat)){
hasil = google_geocode(address = alamat[i],key = key)
if(length(hasil$results)!=0){
lat[i] = hasil$results$geometry$location$lat
lng[i] = hasil$results$geometry$location$lng
} else {
lat[i] = NA
lng[i] = NA
}
}
#satukan hasilnya
result = data.frame(data.new$nama.satuan.pendidikan,alamat,lat,lng)
#yuk bikin map
library(leaflet)
map = leaflet() %>% addTiles() %>% addCircles(lat=result$lat,lng=result$lng,radius=1000)
library(htmlwidgets)
saveWidget(map,'Zonasi di Bekasi.html')
#passingthroughresearcher.wordpress.com
|
/Bukan Infografis/Infografis/Zonasi SD di Bekasi.R
|
no_license
|
ikanx101/belajaR
|
R
| false
| false
| 1,850
|
r
|
rm(list=ls())
library(rvest)
setwd('D:/Project_R/Kamis Data/SD Bekasi')
#mencari SD di Kecamatan Bekasi Timur
url = 'https://referensi.data.kemdikbud.go.id/index11.php?kode=026500&level=2'
text = read_html(url) %>% html_nodes('a') %>% html_text()
links = read_html(url) %>% html_nodes('a') %>% html_attr('href')
data = data.frame(text,links)
data = data %>% filter(grepl('kec.',text,ignore.case = T)) %>%
mutate(links = paste('https://referensi.data.kemdikbud.go.id/',links,sep=''))
i=1
data.new = read_html(data$links[i]) %>% html_table(fill=T)
data.new = data.new[[2]]
for(i in 2:length(data$links)){
dummy = read_html(data$links[i]) %>% html_table(fill=T)
dummy = dummy[[2]]
data.new = rbind(data.new,dummy)
}
#ambil fungsi
library(devtools)
url='https://raw.githubusercontent.com/ikanx101/belajaR/master/All%20Func.R'
source_url(url)
#bersihin colnames
colnames(data.new) = tolong.bersihin.judul.donk(data.new)
#ambilin sd
data.new = data.new %>% filter(status!='SWASTA') %>% filter(grepl('sd',nama.satuan.pendidikan,ignore.case = T))
#liat titiknya
alamat = paste(data.new$nama.satuan.pendidikan,data.new$alamat,data.new$kelurahan,' kota bekasi, indonesia',sep=',')
#mulai fun parts nya
library(googleway)
lat = c()
long = c()
key = #ENTER YOUR KEY HERE
for(i in 1:length(alamat)){
hasil = google_geocode(address = alamat[i],key = key)
if(length(hasil$results)!=0){
lat[i] = hasil$results$geometry$location$lat
lng[i] = hasil$results$geometry$location$lng
} else {
lat[i] = NA
lng[i] = NA
}
}
#satukan hasilnya
result = data.frame(data.new$nama.satuan.pendidikan,alamat,lat,lng)
#yuk bikin map
library(leaflet)
map = leaflet() %>% addTiles() %>% addCircles(lat=result$lat,lng=result$lng,radius=1000)
library(htmlwidgets)
saveWidget(map,'Zonasi di Bekasi.html')
#passingthroughresearcher.wordpress.com
|
library(tidyverse)
library(sp) #Transforming latitude and longitude
library("iNEXT")
library(openxlsx)
#library(readxl)
library(parzer) #parse coordinates
dir_ini <- getwd()
##########################
#Data: 42_MiaParkApple_2009
##########################
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/ForSTEP_AppleNY2009_11.xlsx",
sheet = "2009 spp by site")
# Fix organisms' names
initial_column_names <- names(data_raw)
genus_names <- initial_column_names[7:length(initial_column_names)]
species_names <- as.character(data_raw[1,7:ncol(data_raw)])
subgenus_names <- as.character(data_raw[2,7:ncol(data_raw)])
full_name <- genus_names
for (i in 1:length(genus_names)){
full_name[i] <- paste(full_name[i],species_names[i],sep = " ")
}
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/ForSTEP_AppleNY2009_11.xlsx",
sheet = "2009 spp by site", startRow = 3)
# Replace the old column names
colnames(data_raw) <- c(c("crop","sampling_year","latitude","longitude","site_id","aux"),
full_name)
data_raw <- as_tibble(data_raw)
# Filter data by year
data_raw <- data_raw %>% filter(sampling_year==2009)
# There should be 12 sites
data_raw %>% group_by(site_id) %>% count()
##############
# Data site
##############
data.site <- data_raw %>% select("site_id","crop","sampling_year","latitude","longitude")
# We add data site ID
data.site$study_id <- "Mia_Park_Malus_domestica_USA_2009"
data.site$crop <- "Malus domestica"
data.site$variety <- NA
data.site$management <- NA
data.site$country <- "USA"
data.site$X_UTM <- NA
data.site$Y_UTM <- NA
data.site$zone_UTM <- NA
data.site$sampling_start_month <- 4
data.site$sampling_end_month <- 5
data.site$sampling_year <- 2009
data.site$field_size <- NA
data.site$yield <- NA
data.site$yield_units <- NA
data.site$yield2 <- NA
data.site$yield2_units <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators2 <- NA
data.site$yield_treatments_pollen_supplement2 <- NA
data.site$fruits_per_plant <- NA
data.site$fruit_weight <- NA
data.site$plant_density <- NA
data.site$seeds_per_fruit <- NA
data.site$seeds_per_plant <- NA
data.site$seed_weight <- NA
data.site$Publication <- "10.1038/ncomms8414"
data.site$Credit <- "Mia Park"
data.site$Email_contact <- "mia.park@ndsu.edu"
data.area_mng <-
read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/Park_field size mgmt.xlsx") %>%
select(orchard,ha,management) %>% rename(site_id=orchard,field_size=ha)
data.area_mng$management[data.area_mng$management=="Organic"] <- "organic"
data.site <- data.site %>% left_join(data.area_mng,by="site_id") %>%
rename(management=management.y,field_size=field_size.y) %>%
select(-management.x,-field_size.x)
###########################
# SAMPLING DATA
###########################
data_raw_obs <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/Park_Temporal variation meta-data template v2020.xlsx",
sheet = "Abundance data",startRow = 3)
# Extract sampling months
data_raw_obs$Sampling_Date <- openxlsx::convertToDate(data_raw_obs$Sampling_Date)
data_raw_obs %>% filter(Year==2009) %>% select(Site,Sampling_Date,Number_Censuses) %>% unique()
data_raw_obs_year <- data_raw_obs %>% filter(Year==2009) %>%
mutate(total_sampled_area=100*Number_Censuses,
total_sampled_time=Number_Censuses*15,
total_sampled_flowers=Number_Censuses*Floral.abundance) %>%
select(Site,Species,Abundance,total_sampled_area,
total_sampled_time,total_sampled_flowers) %>%
rename(site_id=Site,Organism_ID=Species,abundance=Abundance)
data_raw_obs_year %>% group_by(site_id,Organism_ID) %>% count() %>% filter(n>1)
#Add guild via guild list
gild_list_raw <- read_csv("Processing_files/Thesaurus_Pollinators/Table_organism_guild_META.csv")
gild_list <- gild_list_raw %>% select(-Family) %>% unique()
list_organisms <- select(data_raw_obs_year,Organism_ID) %>% unique() %>% filter(!is.na(Organism_ID))
list_organisms_guild <- list_organisms %>% left_join(gild_list,by=c("Organism_ID"))
#Check NA's in guild
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
library(taxize)
list_organisms_guild$Guild[grepl("bombus",list_organisms_guild$Organism_ID,ignore.case = TRUE)] <- "bumblebees"
list_organisms_guild$Guild[grepl("mellif",list_organisms_guild$Organism_ID,ignore.case = TRUE)] <- "honeybees"
list_organisms_guild$Guild[is.na(list_organisms_guild$Guild)] <- "other_wild_bees"
#Sanity Checks
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
#Add guild to observations
data_obs_guild <- data_raw_obs_year %>% left_join(list_organisms_guild, by = "Organism_ID")
#######################
# INSECT SAMPLING
#######################
# Remove entries with zero abundance
data_obs_guild <- data_obs_guild %>% filter(abundance>0)
data.sampling <- data_obs_guild %>% select(site_id, total_sampled_time, total_sampled_area) %>%
unique()
data.site <- data.site %>% left_join(data.sampling,by="site_id")
insect_sampling <- tibble(
study_id = "Mia_Park_Malus_domestica_USA_2009",
site_id = data_obs_guild$site_id,
pollinator = data_obs_guild$Organism_ID,
guild = data_obs_guild$Guild,
sampling_method = "netting",
abundance = data_obs_guild$abundance,
total_sampled_area = data_obs_guild$total_sampled_area,
total_sampled_time = data_obs_guild$total_sampled_time,
total_sampled_flowers = data_obs_guild$total_sampled_flowers,
Description = "Multiple 15-minute (aerial netting) transects were conducted at each site along blooming tree rows. During each survey, collectors walked a steady pace along 50 m of each side of two-adjacent tree rows and netted all bees observed to be visiting apple blossoms."
)
#MIA: Please delete the following field sites from our data set: 2009 Hemlock01
insect_sampling <- insect_sampling %>% filter(!site_id %in% c("Hemlock01"))
#sanity check
insect_sampling$site_id %>% unique()
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(insect_sampling, "Processing_files/Datasets_storage/insect_sampling_Mia_Park_Malus_domestica_USA_2009.csv")
#setwd(dir_ini)
#######################################
# ABUNDANCE
#######################################
# Add site observations
data_obs_guild_2 <- data_obs_guild %>%
group_by(site_id,Organism_ID,Guild) %>% summarise_all(sum,na.rm=TRUE)
abundance_aux <- data_obs_guild_2 %>%
group_by(site_id,Guild) %>% count(wt=abundance) %>%
spread(key=Guild, value=n)
names(abundance_aux)
# There are "bumblebees" "other_wild_bees" "honeybees"
# GUILDS:honeybees, bumblebees, other wild bees, syrphids, humbleflies,
# other flies, beetles, non-bee hymenoptera, lepidoptera, and other
abundance_aux <- abundance_aux %>% mutate(lepidoptera=0,beetles=0,other_flies=0,
syrphids=0,other=0,humbleflies=0,
non_bee_hymenoptera=0,
total=0)
abundance_aux[is.na(abundance_aux)] <- 0
abundance_aux$total <- rowSums(abundance_aux[,c(2:ncol(abundance_aux))])
data.site <- data.site %>% left_join(abundance_aux, by = "site_id")
######################################################
# ESTIMATING CHAO INDEX
######################################################
abundace_field <- data_obs_guild %>%
select(site_id,Organism_ID,abundance)%>%
group_by(site_id,Organism_ID) %>% count(wt=abundance)
abundace_field <- abundace_field %>% spread(key=Organism_ID,value=n)
abundace_field[is.na(abundace_field)] <- 0
abundace_field$r_obser <- 0
abundace_field$r_chao <- 0
for (i in 1:nrow(abundace_field)) {
x <- as.numeric(abundace_field[i,2:(ncol(abundace_field)-2)])
chao <- ChaoRichness(x, datatype = "abundance", conf = 0.95)
abundace_field$r_obser[i] <- chao$Observed
abundace_field$r_chao[i] <- chao$Estimator
}
# Load our estimation for taxonomic resolution
percentage_species_morphos <- 0.9
richness_aux <- abundace_field %>% select(site_id,r_obser,r_chao)
richness_aux <- richness_aux %>% rename(observed_pollinator_richness=r_obser,
other_pollinator_richness=r_chao) %>%
mutate(other_richness_estimator_method="Chao1",richness_restriction="only bees")
if (percentage_species_morphos < 0.8){
richness_aux[,2:ncol(richness_aux)] <- NA
}
data.site <- data.site %>% left_join(richness_aux, by = "site_id")
###############################
# FIELD LEVEL DATA
###############################
field_level_data <- tibble(
study_id = data.site$study_id,
site_id = data.site$site_id,
crop = data.site$crop,
variety = data.site$variety,
management = data.site$management,
country = data.site$country,
latitude = data.site$latitude,
longitude = data.site$longitude,
X_UTM=data.site$X_UTM,
Y_UTM=data.site$Y_UTM,
zone_UTM=data.site$zone_UTM,
sampling_start_month = data.site$sampling_start_month,
sampling_end_month = data.site$sampling_end_month,
sampling_year = data.site$sampling_year,
field_size = data.site$field_size,
yield=data.site$yield,
yield_units=data.site$yield_units,
yield2=data.site$yield2,
yield2_units=data.site$yield2_units,
yield_treatments_no_pollinators=data.site$yield_treatments_no_pollinators,
yield_treatments_pollen_supplement=data.site$yield_treatments_no_pollinators,
yield_treatments_no_pollinators2=data.site$yield_treatments_no_pollinators2,
yield_treatments_pollen_supplement2=data.site$yield_treatments_pollen_supplement2,
fruits_per_plant=data.site$fruits_per_plant,
fruit_weight= data.site$fruit_weight,
plant_density=data.site$plant_density,
seeds_per_fruit=data.site$seeds_per_fruit,
seeds_per_plant=data.site$seeds_per_plant,
seed_weight=data.site$seed_weight,
observed_pollinator_richness=data.site$observed_pollinator_richness,
other_pollinator_richness=data.site$other_pollinator_richness,
other_richness_estimator_method=data.site$other_richness_estimator_method,
richness_restriction = data.site$richness_restriction,
abundance = data.site$total,
ab_honeybee = data.site$honeybees,
ab_bombus = data.site$bumblebees,
ab_wildbees = data.site$other_wild_bees,
ab_syrphids = data.site$syrphids,
ab_humbleflies= data.site$humbleflies,
ab_other_flies= data.site$other_flies,
ab_beetles=data.site$beetles,
ab_lepidoptera=data.site$lepidoptera,
ab_nonbee_hymenoptera=data.site$non_bee_hymenoptera,
ab_others = data.site$other,
total_sampled_area = data.site$total_sampled_area,
total_sampled_time = data.site$total_sampled_time,
visitation_rate_units = NA,
visitation_rate = NA,
visit_honeybee = NA,
visit_bombus = NA,
visit_wildbees = NA,
visit_syrphids = NA,
visit_humbleflies = NA,
visit_other_flies = NA,
visit_beetles = NA,
visit_lepidoptera = NA,
visit_nonbee_hymenoptera = NA,
visit_others = NA,
Publication = data.site$Publication,
Credit = data.site$Credit,
Email_contact = data.site$Email_contact
)
#MIA: Please delete the following field sites from our data set: 2009 Hemlock01
field_level_data <- field_level_data %>% filter(!site_id %in% c("Hemlock01"))
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(field_level_data, "Processing_files/Datasets_storage/field_level_data_Mia_Park_Malus_domestica_USA_2009.csv")
#setwd(dir_ini)
|
/Processing_files/Datasets_Processing/KLEIJN 2015 DATABASE/Mia_Park_Malus_domestica_USA_2009_NEW2.R
|
permissive
|
ibartomeus/OBservData
|
R
| false
| false
| 11,671
|
r
|
library(tidyverse)
library(sp) #Transforming latitude and longitude
library("iNEXT")
library(openxlsx)
#library(readxl)
library(parzer) #parse coordinates
dir_ini <- getwd()
##########################
#Data: 42_MiaParkApple_2009
##########################
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/ForSTEP_AppleNY2009_11.xlsx",
sheet = "2009 spp by site")
# Fix organisms' names
initial_column_names <- names(data_raw)
genus_names <- initial_column_names[7:length(initial_column_names)]
species_names <- as.character(data_raw[1,7:ncol(data_raw)])
subgenus_names <- as.character(data_raw[2,7:ncol(data_raw)])
full_name <- genus_names
for (i in 1:length(genus_names)){
full_name[i] <- paste(full_name[i],species_names[i],sep = " ")
}
data_raw <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/ForSTEP_AppleNY2009_11.xlsx",
sheet = "2009 spp by site", startRow = 3)
# Replace the old column names
colnames(data_raw) <- c(c("crop","sampling_year","latitude","longitude","site_id","aux"),
full_name)
data_raw <- as_tibble(data_raw)
# Filter data by year
data_raw <- data_raw %>% filter(sampling_year==2009)
# There should be 12 sites
data_raw %>% group_by(site_id) %>% count()
##############
# Data site
##############
data.site <- data_raw %>% select("site_id","crop","sampling_year","latitude","longitude")
# We add data site ID
data.site$study_id <- "Mia_Park_Malus_domestica_USA_2009"
data.site$crop <- "Malus domestica"
data.site$variety <- NA
data.site$management <- NA
data.site$country <- "USA"
data.site$X_UTM <- NA
data.site$Y_UTM <- NA
data.site$zone_UTM <- NA
data.site$sampling_start_month <- 4
data.site$sampling_end_month <- 5
data.site$sampling_year <- 2009
data.site$field_size <- NA
data.site$yield <- NA
data.site$yield_units <- NA
data.site$yield2 <- NA
data.site$yield2_units <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators <- NA
data.site$yield_treatments_no_pollinators2 <- NA
data.site$yield_treatments_pollen_supplement2 <- NA
data.site$fruits_per_plant <- NA
data.site$fruit_weight <- NA
data.site$plant_density <- NA
data.site$seeds_per_fruit <- NA
data.site$seeds_per_plant <- NA
data.site$seed_weight <- NA
data.site$Publication <- "10.1038/ncomms8414"
data.site$Credit <- "Mia Park"
data.site$Email_contact <- "mia.park@ndsu.edu"
data.area_mng <-
read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/Park_field size mgmt.xlsx") %>%
select(orchard,ha,management) %>% rename(site_id=orchard,field_size=ha)
data.area_mng$management[data.area_mng$management=="Organic"] <- "organic"
data.site <- data.site %>% left_join(data.area_mng,by="site_id") %>%
rename(management=management.y,field_size=field_size.y) %>%
select(-management.x,-field_size.x)
###########################
# SAMPLING DATA
###########################
data_raw_obs <- read.xlsx("Processing_files/Datasets_processing/KLEIJN 2015 DATABASE/42_44_MiaParkApple_2009_2010_2011/Park_Temporal variation meta-data template v2020.xlsx",
sheet = "Abundance data",startRow = 3)
# Extract sampling months
data_raw_obs$Sampling_Date <- openxlsx::convertToDate(data_raw_obs$Sampling_Date)
data_raw_obs %>% filter(Year==2009) %>% select(Site,Sampling_Date,Number_Censuses) %>% unique()
data_raw_obs_year <- data_raw_obs %>% filter(Year==2009) %>%
mutate(total_sampled_area=100*Number_Censuses,
total_sampled_time=Number_Censuses*15,
total_sampled_flowers=Number_Censuses*Floral.abundance) %>%
select(Site,Species,Abundance,total_sampled_area,
total_sampled_time,total_sampled_flowers) %>%
rename(site_id=Site,Organism_ID=Species,abundance=Abundance)
data_raw_obs_year %>% group_by(site_id,Organism_ID) %>% count() %>% filter(n>1)
#Add guild via guild list
gild_list_raw <- read_csv("Processing_files/Thesaurus_Pollinators/Table_organism_guild_META.csv")
gild_list <- gild_list_raw %>% select(-Family) %>% unique()
list_organisms <- select(data_raw_obs_year,Organism_ID) %>% unique() %>% filter(!is.na(Organism_ID))
list_organisms_guild <- list_organisms %>% left_join(gild_list,by=c("Organism_ID"))
#Check NA's in guild
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
library(taxize)
list_organisms_guild$Guild[grepl("bombus",list_organisms_guild$Organism_ID,ignore.case = TRUE)] <- "bumblebees"
list_organisms_guild$Guild[grepl("mellif",list_organisms_guild$Organism_ID,ignore.case = TRUE)] <- "honeybees"
list_organisms_guild$Guild[is.na(list_organisms_guild$Guild)] <- "other_wild_bees"
#Sanity Checks
list_organisms_guild %>% filter(is.na(Guild)) %>% group_by(Organism_ID) %>% count()
#Add guild to observations
data_obs_guild <- data_raw_obs_year %>% left_join(list_organisms_guild, by = "Organism_ID")
#######################
# INSECT SAMPLING
#######################
# Remove entries with zero abundance
data_obs_guild <- data_obs_guild %>% filter(abundance>0)
data.sampling <- data_obs_guild %>% select(site_id, total_sampled_time, total_sampled_area) %>%
unique()
data.site <- data.site %>% left_join(data.sampling,by="site_id")
insect_sampling <- tibble(
study_id = "Mia_Park_Malus_domestica_USA_2009",
site_id = data_obs_guild$site_id,
pollinator = data_obs_guild$Organism_ID,
guild = data_obs_guild$Guild,
sampling_method = "netting",
abundance = data_obs_guild$abundance,
total_sampled_area = data_obs_guild$total_sampled_area,
total_sampled_time = data_obs_guild$total_sampled_time,
total_sampled_flowers = data_obs_guild$total_sampled_flowers,
Description = "Multiple 15-minute (aerial netting) transects were conducted at each site along blooming tree rows. During each survey, collectors walked a steady pace along 50 m of each side of two-adjacent tree rows and netted all bees observed to be visiting apple blossoms."
)
#MIA: Please delete the following field sites from our data set: 2009 Hemlock01
insect_sampling <- insect_sampling %>% filter(!site_id %in% c("Hemlock01"))
#sanity check
insect_sampling$site_id %>% unique()
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(insect_sampling, "Processing_files/Datasets_storage/insect_sampling_Mia_Park_Malus_domestica_USA_2009.csv")
#setwd(dir_ini)
#######################################
# ABUNDANCE
#######################################
# Add site observations
data_obs_guild_2 <- data_obs_guild %>%
group_by(site_id,Organism_ID,Guild) %>% summarise_all(sum,na.rm=TRUE)
abundance_aux <- data_obs_guild_2 %>%
group_by(site_id,Guild) %>% count(wt=abundance) %>%
spread(key=Guild, value=n)
names(abundance_aux)
# There are "bumblebees" "other_wild_bees" "honeybees"
# GUILDS:honeybees, bumblebees, other wild bees, syrphids, humbleflies,
# other flies, beetles, non-bee hymenoptera, lepidoptera, and other
abundance_aux <- abundance_aux %>% mutate(lepidoptera=0,beetles=0,other_flies=0,
syrphids=0,other=0,humbleflies=0,
non_bee_hymenoptera=0,
total=0)
abundance_aux[is.na(abundance_aux)] <- 0
abundance_aux$total <- rowSums(abundance_aux[,c(2:ncol(abundance_aux))])
data.site <- data.site %>% left_join(abundance_aux, by = "site_id")
######################################################
# ESTIMATING CHAO INDEX
######################################################
abundace_field <- data_obs_guild %>%
select(site_id,Organism_ID,abundance)%>%
group_by(site_id,Organism_ID) %>% count(wt=abundance)
abundace_field <- abundace_field %>% spread(key=Organism_ID,value=n)
abundace_field[is.na(abundace_field)] <- 0
abundace_field$r_obser <- 0
abundace_field$r_chao <- 0
for (i in 1:nrow(abundace_field)) {
x <- as.numeric(abundace_field[i,2:(ncol(abundace_field)-2)])
chao <- ChaoRichness(x, datatype = "abundance", conf = 0.95)
abundace_field$r_obser[i] <- chao$Observed
abundace_field$r_chao[i] <- chao$Estimator
}
# Load our estimation for taxonomic resolution
percentage_species_morphos <- 0.9
richness_aux <- abundace_field %>% select(site_id,r_obser,r_chao)
richness_aux <- richness_aux %>% rename(observed_pollinator_richness=r_obser,
other_pollinator_richness=r_chao) %>%
mutate(other_richness_estimator_method="Chao1",richness_restriction="only bees")
if (percentage_species_morphos < 0.8){
richness_aux[,2:ncol(richness_aux)] <- NA
}
data.site <- data.site %>% left_join(richness_aux, by = "site_id")
###############################
# FIELD LEVEL DATA
###############################
field_level_data <- tibble(
study_id = data.site$study_id,
site_id = data.site$site_id,
crop = data.site$crop,
variety = data.site$variety,
management = data.site$management,
country = data.site$country,
latitude = data.site$latitude,
longitude = data.site$longitude,
X_UTM=data.site$X_UTM,
Y_UTM=data.site$Y_UTM,
zone_UTM=data.site$zone_UTM,
sampling_start_month = data.site$sampling_start_month,
sampling_end_month = data.site$sampling_end_month,
sampling_year = data.site$sampling_year,
field_size = data.site$field_size,
yield=data.site$yield,
yield_units=data.site$yield_units,
yield2=data.site$yield2,
yield2_units=data.site$yield2_units,
yield_treatments_no_pollinators=data.site$yield_treatments_no_pollinators,
yield_treatments_pollen_supplement=data.site$yield_treatments_no_pollinators,
yield_treatments_no_pollinators2=data.site$yield_treatments_no_pollinators2,
yield_treatments_pollen_supplement2=data.site$yield_treatments_pollen_supplement2,
fruits_per_plant=data.site$fruits_per_plant,
fruit_weight= data.site$fruit_weight,
plant_density=data.site$plant_density,
seeds_per_fruit=data.site$seeds_per_fruit,
seeds_per_plant=data.site$seeds_per_plant,
seed_weight=data.site$seed_weight,
observed_pollinator_richness=data.site$observed_pollinator_richness,
other_pollinator_richness=data.site$other_pollinator_richness,
other_richness_estimator_method=data.site$other_richness_estimator_method,
richness_restriction = data.site$richness_restriction,
abundance = data.site$total,
ab_honeybee = data.site$honeybees,
ab_bombus = data.site$bumblebees,
ab_wildbees = data.site$other_wild_bees,
ab_syrphids = data.site$syrphids,
ab_humbleflies= data.site$humbleflies,
ab_other_flies= data.site$other_flies,
ab_beetles=data.site$beetles,
ab_lepidoptera=data.site$lepidoptera,
ab_nonbee_hymenoptera=data.site$non_bee_hymenoptera,
ab_others = data.site$other,
total_sampled_area = data.site$total_sampled_area,
total_sampled_time = data.site$total_sampled_time,
visitation_rate_units = NA,
visitation_rate = NA,
visit_honeybee = NA,
visit_bombus = NA,
visit_wildbees = NA,
visit_syrphids = NA,
visit_humbleflies = NA,
visit_other_flies = NA,
visit_beetles = NA,
visit_lepidoptera = NA,
visit_nonbee_hymenoptera = NA,
visit_others = NA,
Publication = data.site$Publication,
Credit = data.site$Credit,
Email_contact = data.site$Email_contact
)
#MIA: Please delete the following field sites from our data set: 2009 Hemlock01
field_level_data <- field_level_data %>% filter(!site_id %in% c("Hemlock01"))
#setwd("C:/Users/USUARIO/Desktop/OBservData/Datasets_storage")
write_csv(field_level_data, "Processing_files/Datasets_storage/field_level_data_Mia_Park_Malus_domestica_USA_2009.csv")
#setwd(dir_ini)
|
#'Get hyperparameter values
#'
#'@param models which algorithms?
#'@param n Number observations
#'@param k Number features
#'@param model_class "classification" or "regression"
#'
#'@return Named list of data frames. Each data frame corresponds to an
#' algorithm, and each column in each data fram corresponds to a hyperparameter
#' for that algorithm. This is the same format that should be provided to
#' \code{tune_models(hyperparameters = )} to specify hyperparameter values.
#'
#'@export
#'@aliases hyperparameters
#'@seealso \code{\link{models}} for model and hyperparameter details
#'@details Get hyperparameters for model training.
#' \code{get_hyperparameter_defaults} returns a list of 1-row data frames
#' (except for glm, which is a 10-row data frame) with default hyperparameter
#' values that are used by \code{flash_models}.
#' \code{get_random_hyperparameters} returns a list of data frames with
#' combinations of random values of hyperparameters to tune over in
#' \code{tune_models}; the number of rows in the data frames is given by
#' `tune_depth`.
#'
#' For \code{get_hyperparameter_defaults}
#' XGBoost defaults are from caret and XGBoost documentation:
#' eta = 0.3, gamma = 0, max_depth = 6, subsample = 0.7,
#' colsample_bytree = 0.8, min_child_weight = 1, and nrounds = 50.
#' Random forest defaults are from Intro to
#' Statistical Learning and caret: mtry = sqrt(k), splitrule = "extratrees",
#' min.node.size = 1 for classification, 5 for regression.
#' glm defaults are
#' from caret: alpha = 1, and because glmnet fits sequences of lambda nearly as
#' fast as an individual value, lambda is a sequence from 1e-4 to 8.
get_hyperparameter_defaults <- function(models = get_supported_models(),
n = 100,
k = 10,
model_class = "classification") {
defaults <-
list(
rf = tibble::tibble(
mtry = floor(sqrt(k)),
splitrule = "extratrees",
min.node.size = if (model_class == "classification") 1L else 5L),
xgb = tibble::tibble(
eta = .3,
gamma = 0,
max_depth = 6,
subsample = .7,
colsample_bytree = .8,
min_child_weight = 1,
nrounds = 50
),
# For glmnet, fitting 10 lambdas is only ~30% slower than an individual
# value, and it's so important for performance, so go ahead and fit 10
glm = tibble::tibble(
alpha = 1,
lambda = 2 ^ seq(-10, 3, len = 10)
)
)
return(defaults[models])
}
#' @param tune_depth How many combinations of hyperparameter values?
#' @export
#' @importFrom stats runif
#' @rdname get_hyperparameter_defaults
get_random_hyperparameters <- function(models = get_supported_models(),
n = 100,
k = 10,
tune_depth = 5,
model_class = "classification") {
replace_ks <- k < tune_depth
grids <- list()
if ("rf" %in% models) {
split_rules <-
if (model_class == "classification") {
c("gini", "extratrees")
} else {
c("variance", "extratrees")
}
grids$rf <-
tibble::tibble(
mtry = sample(seq_len(k), tune_depth, TRUE, prob = 1 / seq_len(k)),
splitrule = sample(split_rules, tune_depth, TRUE),
min.node.size = sample(min(n, 20), tune_depth, TRUE)
)
}
if ("xgb" %in% models) {
grids$xgb <-
tibble::tibble(
eta = runif(tune_depth, 0.001, .5),
gamma = runif(tune_depth, 0, 10),
max_depth = sample(10, tune_depth, replace = TRUE),
subsample = runif(tune_depth, .35, 1),
colsample_bytree = runif(tune_depth, .5, .9),
min_child_weight = stats::rexp(tune_depth, .2),
nrounds = sample(25:1000, tune_depth, prob = 1 / (25:1000))
)
}
if ("glm" %in% models) {
grids$glm <-
expand.grid(
alpha = c(0, 1),
lambda = 2 ^ runif(tune_depth, -10, 3)
) %>%
dplyr::arrange(alpha) %>%
tibble::as_tibble()
}
return(grids)
}
|
/R/setup_hyperparameters.R
|
permissive
|
reloadbrain/healthcareai-r
|
R
| false
| false
| 4,197
|
r
|
#'Get hyperparameter values
#'
#'@param models which algorithms?
#'@param n Number observations
#'@param k Number features
#'@param model_class "classification" or "regression"
#'
#'@return Named list of data frames. Each data frame corresponds to an
#' algorithm, and each column in each data fram corresponds to a hyperparameter
#' for that algorithm. This is the same format that should be provided to
#' \code{tune_models(hyperparameters = )} to specify hyperparameter values.
#'
#'@export
#'@aliases hyperparameters
#'@seealso \code{\link{models}} for model and hyperparameter details
#'@details Get hyperparameters for model training.
#' \code{get_hyperparameter_defaults} returns a list of 1-row data frames
#' (except for glm, which is a 10-row data frame) with default hyperparameter
#' values that are used by \code{flash_models}.
#' \code{get_random_hyperparameters} returns a list of data frames with
#' combinations of random values of hyperparameters to tune over in
#' \code{tune_models}; the number of rows in the data frames is given by
#' `tune_depth`.
#'
#' For \code{get_hyperparameter_defaults}
#' XGBoost defaults are from caret and XGBoost documentation:
#' eta = 0.3, gamma = 0, max_depth = 6, subsample = 0.7,
#' colsample_bytree = 0.8, min_child_weight = 1, and nrounds = 50.
#' Random forest defaults are from Intro to
#' Statistical Learning and caret: mtry = sqrt(k), splitrule = "extratrees",
#' min.node.size = 1 for classification, 5 for regression.
#' glm defaults are
#' from caret: alpha = 1, and because glmnet fits sequences of lambda nearly as
#' fast as an individual value, lambda is a sequence from 1e-4 to 8.
get_hyperparameter_defaults <- function(models = get_supported_models(),
n = 100,
k = 10,
model_class = "classification") {
defaults <-
list(
rf = tibble::tibble(
mtry = floor(sqrt(k)),
splitrule = "extratrees",
min.node.size = if (model_class == "classification") 1L else 5L),
xgb = tibble::tibble(
eta = .3,
gamma = 0,
max_depth = 6,
subsample = .7,
colsample_bytree = .8,
min_child_weight = 1,
nrounds = 50
),
# For glmnet, fitting 10 lambdas is only ~30% slower than an individual
# value, and it's so important for performance, so go ahead and fit 10
glm = tibble::tibble(
alpha = 1,
lambda = 2 ^ seq(-10, 3, len = 10)
)
)
return(defaults[models])
}
#' @param tune_depth How many combinations of hyperparameter values?
#' @export
#' @importFrom stats runif
#' @rdname get_hyperparameter_defaults
get_random_hyperparameters <- function(models = get_supported_models(),
n = 100,
k = 10,
tune_depth = 5,
model_class = "classification") {
replace_ks <- k < tune_depth
grids <- list()
if ("rf" %in% models) {
split_rules <-
if (model_class == "classification") {
c("gini", "extratrees")
} else {
c("variance", "extratrees")
}
grids$rf <-
tibble::tibble(
mtry = sample(seq_len(k), tune_depth, TRUE, prob = 1 / seq_len(k)),
splitrule = sample(split_rules, tune_depth, TRUE),
min.node.size = sample(min(n, 20), tune_depth, TRUE)
)
}
if ("xgb" %in% models) {
grids$xgb <-
tibble::tibble(
eta = runif(tune_depth, 0.001, .5),
gamma = runif(tune_depth, 0, 10),
max_depth = sample(10, tune_depth, replace = TRUE),
subsample = runif(tune_depth, .35, 1),
colsample_bytree = runif(tune_depth, .5, .9),
min_child_weight = stats::rexp(tune_depth, .2),
nrounds = sample(25:1000, tune_depth, prob = 1 / (25:1000))
)
}
if ("glm" %in% models) {
grids$glm <-
expand.grid(
alpha = c(0, 1),
lambda = 2 ^ runif(tune_depth, -10, 3)
) %>%
dplyr::arrange(alpha) %>%
tibble::as_tibble()
}
return(grids)
}
|
library(rosr)
### Name: sub_projects
### Title: Display available sub projects
### Aliases: sub_projects
### ** Examples
sub_projects()
|
/data/genthat_extracted_code/rosr/examples/sub_projects.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 143
|
r
|
library(rosr)
### Name: sub_projects
### Title: Display available sub projects
### Aliases: sub_projects
### ** Examples
sub_projects()
|
## gets a special matrix and inverse it.
## first it sets the matrix, then it get its, then it sets
##the inverse, to later get the inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(z){
x <<- z
m <<- NULL
}
get <- function()x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## it sets the inverrse if you didnt get it in the above one
##then if its not the inverse it inverse it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
lol <- x$get()
m <- solve(mat,...)
x$setInverse(m)
m
}
|
/cachematrix.R
|
no_license
|
BioVAp/ProgrammingAssignment2
|
R
| false
| false
| 796
|
r
|
## gets a special matrix and inverse it.
## first it sets the matrix, then it get its, then it sets
##the inverse, to later get the inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(z){
x <<- z
m <<- NULL
}
get <- function()x
setInverse <- function(inverse) m <<- inverse
getInverse <- function() m
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## it sets the inverrse if you didnt get it in the above one
##then if its not the inverse it inverse it.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
if(!is.null(m)){
message("getting cached data")
return(m)
}
lol <- x$get()
m <- solve(mat,...)
x$setInverse(m)
m
}
|
library(dlm)
### Name: mcmc
### Title: Utility functions for MCMC output analysis
### Aliases: mcmcMean mcmcMeans mcmcSD ergMean
### Keywords: misc
### ** Examples
x <- matrix(rexp(1000), nc=4)
dimnames(x) <- list(NULL, LETTERS[1:NCOL(x)])
mcmcSD(x)
mcmcMean(x)
em <- ergMean(x, m = 51)
plot(ts(em, start=51), xlab="Iteration", main="Ergodic means")
|
/data/genthat_extracted_code/dlm/examples/mcmc.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 357
|
r
|
library(dlm)
### Name: mcmc
### Title: Utility functions for MCMC output analysis
### Aliases: mcmcMean mcmcMeans mcmcSD ergMean
### Keywords: misc
### ** Examples
x <- matrix(rexp(1000), nc=4)
dimnames(x) <- list(NULL, LETTERS[1:NCOL(x)])
mcmcSD(x)
mcmcMean(x)
em <- ergMean(x, m = 51)
plot(ts(em, start=51), xlab="Iteration", main="Ergodic means")
|
las_business_ids <- jsonlite::read_json(("data/Las Vegas.json"))
las_business_ids <- sapply(las_business_ids, function(x) x$business_id)
page_handler <- local({
ldf <- list()
function(x) {
if (missing(x)) {
return(ldf)
}
x <- x[x$business_id %in% las_business_ids, ]
ldf <<- append(ldf, list(x))
ldf
}
})
jsonlite::stream_in(
file("yelp_dataset/yelp_academic_dataset_review.json"),
handler = page_handler,
pagesize = 50000
)
reviews <- do.call(rbind, page_handler())
library(tibble)
reviews <- as_tibble(reviews)
library(dplyr)
reviews <- dplyr::arrange(reviews, business_id, user_id)
reviews
jsonlite::write_json(reviews, "data/Las Vegas_reviews.json")
|
/data/code/data_processing.R
|
no_license
|
meibanfa/CSE6242_Team34
|
R
| false
| false
| 735
|
r
|
las_business_ids <- jsonlite::read_json(("data/Las Vegas.json"))
las_business_ids <- sapply(las_business_ids, function(x) x$business_id)
page_handler <- local({
ldf <- list()
function(x) {
if (missing(x)) {
return(ldf)
}
x <- x[x$business_id %in% las_business_ids, ]
ldf <<- append(ldf, list(x))
ldf
}
})
jsonlite::stream_in(
file("yelp_dataset/yelp_academic_dataset_review.json"),
handler = page_handler,
pagesize = 50000
)
reviews <- do.call(rbind, page_handler())
library(tibble)
reviews <- as_tibble(reviews)
library(dplyr)
reviews <- dplyr::arrange(reviews, business_id, user_id)
reviews
jsonlite::write_json(reviews, "data/Las Vegas_reviews.json")
|
## Below are two functions that are used to cache the inverse of a matrix
## `makeCacheMatrix`: This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invr <- NULL ## invr will store our inverse matrix and it's reset
## to NULL every time makeCacheMatrix is called
get <- function() { x } ## this function returns the value of the
## original matrix
setinvr <- function(solve) ## this is called by cacheSolve() during
{ invr <<- solve } ## the first cacheSolve() access and it
## will store the value using
## superassignment
getinvr <- function() { invr } ## this will return the cached value to
## cacheSolve() on subsequent accesses
list(get = get, ## This list is returned with the newly created
setinvr = setinvr, ## object. It lists all the functions
getinvr = getinvr) ## ("methods") that are part of the object.
## If a function is not on the list then it
## cannot be accessed externally.
}
## `cacheSolve`: This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# the input is an object created by makeCacheMatrix
invr <- x$getinvr() ## accesses the object 'invr' and gets the value
if(!is.null(invr)) { ## of the inverse if it was already cached
## (not NULL) ...
message("getting cached data") ## send this message to the console
return(invr) ## and return the inverse.
}
data <- x$get() ## we reach this code only if x$getinvr()
invr <- solve(data, ...) ## returned NULL if invr was NULL then we
x$setinvr(invr) ## have to invert(solve) the matrix and store
## the result in x (see setinvr() in
## makeCacheMatrix)
invr ## return the matrix to the code that
## called this function.
}
|
/cachematrix.R
|
no_license
|
Dermot-/ProgrammingAssignment2
|
R
| false
| false
| 2,667
|
r
|
## Below are two functions that are used to cache the inverse of a matrix
## `makeCacheMatrix`: This function creates a special "matrix" object
## that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invr <- NULL ## invr will store our inverse matrix and it's reset
## to NULL every time makeCacheMatrix is called
get <- function() { x } ## this function returns the value of the
## original matrix
setinvr <- function(solve) ## this is called by cacheSolve() during
{ invr <<- solve } ## the first cacheSolve() access and it
## will store the value using
## superassignment
getinvr <- function() { invr } ## this will return the cached value to
## cacheSolve() on subsequent accesses
list(get = get, ## This list is returned with the newly created
setinvr = setinvr, ## object. It lists all the functions
getinvr = getinvr) ## ("methods") that are part of the object.
## If a function is not on the list then it
## cannot be accessed externally.
}
## `cacheSolve`: This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` will retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
# the input is an object created by makeCacheMatrix
invr <- x$getinvr() ## accesses the object 'invr' and gets the value
if(!is.null(invr)) { ## of the inverse if it was already cached
## (not NULL) ...
message("getting cached data") ## send this message to the console
return(invr) ## and return the inverse.
}
data <- x$get() ## we reach this code only if x$getinvr()
invr <- solve(data, ...) ## returned NULL if invr was NULL then we
x$setinvr(invr) ## have to invert(solve) the matrix and store
## the result in x (see setinvr() in
## makeCacheMatrix)
invr ## return the matrix to the code that
## called this function.
}
|
\name{nbMult}
\alias{nbMult}
\alias{nbMult}
\title{ convert a spatial nb object to a matching STF object }
\description{ convert a spatial nb object to a matching STF object }
\usage{
nbMult(nb, st, addT = TRUE, addST = FALSE)
}
\arguments{
\item{nb}{ object of class nb (see package spdep), which is valid for
the spatial slot of object \code{st}: \code{length(nb)} should equal
\code{length(st@sp)}}
\item{st}{ object of class STF }
\item{addT}{ logical; should temporal neighbours be added? }
\item{addST}{ logical; should spatio-temporal neighbours be added? }
}
\value{ object of class \code{nb} }
\details{ if both \code{addT} and \code{addST} are false, only
spatial neighbours are added for each time replicate.
details are found in
Giovana M. de Espindola, Edzer Pebesma, Gilberto
Câmara, 2011. Spatio-temporal regression
models for deforestation in the Brazilian Amazon.
STDM 2011, The International Symposium on Spatial-Temporal Analysis
and Data Mining, University College London - 18th-20th July 2011.
}
\author{Edzer Pebesma}
\keyword{manip}
|
/man/nbmult.Rd
|
no_license
|
cran/spacetime
|
R
| false
| false
| 1,063
|
rd
|
\name{nbMult}
\alias{nbMult}
\alias{nbMult}
\title{ convert a spatial nb object to a matching STF object }
\description{ convert a spatial nb object to a matching STF object }
\usage{
nbMult(nb, st, addT = TRUE, addST = FALSE)
}
\arguments{
\item{nb}{ object of class nb (see package spdep), which is valid for
the spatial slot of object \code{st}: \code{length(nb)} should equal
\code{length(st@sp)}}
\item{st}{ object of class STF }
\item{addT}{ logical; should temporal neighbours be added? }
\item{addST}{ logical; should spatio-temporal neighbours be added? }
}
\value{ object of class \code{nb} }
\details{ if both \code{addT} and \code{addST} are false, only
spatial neighbours are added for each time replicate.
details are found in
Giovana M. de Espindola, Edzer Pebesma, Gilberto
Câmara, 2011. Spatio-temporal regression
models for deforestation in the Brazilian Amazon.
STDM 2011, The International Symposium on Spatial-Temporal Analysis
and Data Mining, University College London - 18th-20th July 2011.
}
\author{Edzer Pebesma}
\keyword{manip}
|
#' Z-Score applied to seasonal data divergence
#' @description
#' Supports analytics and display of seasonal data. Z-Score is
#' computed on residuals conditional on their seasonal period.
#' Beware that most seasonal charts in industry e.g. (NG Storage)
#' is not de-trended so results once you apply an STL decomposition
#' will vary from the unajusted seasonal plot.
#' @param df Long data frame with columns series, date and value. `tibble`
#' @param title Default is a blank space returning the unique value in df$series. `character`
#' @param per
#' Frequency of seasonality "yearweek" (DEFAULT). "yearmonth", "yearquarter" `character`
#' @param output
#' "stl" for STL decomposition chart,
#' "stats" for STL fitted statistics.
#' "res" for STL fitted data.
#' "zscore" for residuals Z-score,
#' "seasonal" for standard seasonal chart.
#' @param chart
#' "seasons" for feasts::gg_season() (DEFAULT)
#' "series" for feasts::gg_subseries()
#' @returns Time series of STL decomposition residuals Z-Scores, or
#' standard seasonal chart with feast package.
#' @importFrom tsibble as_tsibble index_by group_by_key
#' @export chart_zscore
#' @author Philippe Cote
#' @examples
#' \dontrun{
#' df <- eiaStocks %>% dplyr::filter(series == "NGLower48")
#' title <- "NGLower48"
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "stl", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "stats", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "res", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "zscore", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "seasonal", chart = "seasons")
#' }
chart_zscore <- function(df = df, title = "NG Storage Z Score", per = "yearweek", output = "zscore", chart = "seasons") {
if (!requireNamespace("feasts", quietly = TRUE)) {stop("Package \"feasts\" needed for this function to work. Please install it.", call. = FALSE)}
if (!requireNamespace("fabletools", quietly = TRUE)) {stop("Package \"fabletools\" needed for this function to work. Please install it.", call. = FALSE)}
if (nchar(title) == 0) {
title <- unique(df$series)
}
if (!output %in% c("zscore", "seasonal", "stats", "stl", "res")) {
stop("Incorrect output parameter")
}
if (!per %in% c("yearweek", "yearmonth", "yearquarter")) {
stop("Incorrect period parameter")
}
if (per %in% c("yearweek", "yearquarter")) {
s <- 7
e <- 8
}
if (per == "yearmonth") {
s <- 6
e <- 8
}
# s <- list(freq = ~yearweek(.))
if (output == "stl") {
x <- df %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key()
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
# tsibble::index_by(freq = ~do.call(per,args=list(.))) %>%
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
ggplot2::autoplot() + ggplot2::ggtitle(title)
return(x)
}
if (output == "stats") {
x <- rbind(df, df %>% dplyr::mutate(series = title)) %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::features(value, feasts::feat_stl)
return(x)
}
if (output == "res") {
x <- rbind(df, df %>% dplyr::mutate(series = title)) %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components()
return(x)
}
df <- df %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
df <- df %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
df <- df %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
df <- df %>% tsibble::index_by(freq = ~ yearquarter(.))
}
df <- df %>%
# tsibble::index_by(freq = ~do.call(per,args=list(.))) %>%
dplyr::summarise(value = mean(value))
z <- df %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
dplyr::transmute(
per = as.numeric(do.call(stringr::str_sub, args = list(freq, start = s, end = e))),
year = lubridate::year(freq),
value = remainder
) %>%
dplyr::as_tibble() %>%
dplyr::group_by(per) %>%
dplyr::summarise(u = mean(value), sigma = stats::sd(value))
x <- df %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
dplyr::mutate(per = as.numeric(do.call(stringr::str_sub, args = list(freq, start = s, end = e)))) %>%
dplyr::left_join(z, by = c("per")) %>%
dplyr::mutate(z.score = (remainder - u) / sigma)
if (output == "seasonal") {
if (chart == "seasons") {
x <- df %>% feasts::gg_season(value) + ggplot2::ggtitle(title)
}
if (chart == "series") {
x <- df %>% feasts::gg_subseries(value) + ggplot2::ggtitle(title)
}
}
if (output == "zscore") {
pal <- c("red", "orange", "green", "orange", "red")
x <- x %>%
dplyr::mutate(date = as.Date(freq)) %>%
#dplyr::filter(date > ) %>%
plotly::plot_ly(x = ~date, y = ~z.score, color = ~z.score, colors = pal) %>%
plotly::add_bars() %>%
plotly::layout(
title = list(text = title, x = 0),
xaxis = list(title = "", range = c(Sys.Date() - months(120), Sys.Date())),
yaxis = list(title = "Z Score of Seasonally-Adjusted Residuals", range = c(3, -3), separators = ".,", tickformat = ".2f")
)
}
return(x)
}
|
/R/chart_zscore.R
|
permissive
|
risktoollib/RTL
|
R
| false
| false
| 6,714
|
r
|
#' Z-Score applied to seasonal data divergence
#' @description
#' Supports analytics and display of seasonal data. Z-Score is
#' computed on residuals conditional on their seasonal period.
#' Beware that most seasonal charts in industry e.g. (NG Storage)
#' is not de-trended so results once you apply an STL decomposition
#' will vary from the unajusted seasonal plot.
#' @param df Long data frame with columns series, date and value. `tibble`
#' @param title Default is a blank space returning the unique value in df$series. `character`
#' @param per
#' Frequency of seasonality "yearweek" (DEFAULT). "yearmonth", "yearquarter" `character`
#' @param output
#' "stl" for STL decomposition chart,
#' "stats" for STL fitted statistics.
#' "res" for STL fitted data.
#' "zscore" for residuals Z-score,
#' "seasonal" for standard seasonal chart.
#' @param chart
#' "seasons" for feasts::gg_season() (DEFAULT)
#' "series" for feasts::gg_subseries()
#' @returns Time series of STL decomposition residuals Z-Scores, or
#' standard seasonal chart with feast package.
#' @importFrom tsibble as_tsibble index_by group_by_key
#' @export chart_zscore
#' @author Philippe Cote
#' @examples
#' \dontrun{
#' df <- eiaStocks %>% dplyr::filter(series == "NGLower48")
#' title <- "NGLower48"
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "stl", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "stats", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "res", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "zscore", chart = "seasons")
#' chart_zscore(df = df, title = " ", per = "yearweek", output = "seasonal", chart = "seasons")
#' }
chart_zscore <- function(df = df, title = "NG Storage Z Score", per = "yearweek", output = "zscore", chart = "seasons") {
if (!requireNamespace("feasts", quietly = TRUE)) {stop("Package \"feasts\" needed for this function to work. Please install it.", call. = FALSE)}
if (!requireNamespace("fabletools", quietly = TRUE)) {stop("Package \"fabletools\" needed for this function to work. Please install it.", call. = FALSE)}
if (nchar(title) == 0) {
title <- unique(df$series)
}
if (!output %in% c("zscore", "seasonal", "stats", "stl", "res")) {
stop("Incorrect output parameter")
}
if (!per %in% c("yearweek", "yearmonth", "yearquarter")) {
stop("Incorrect period parameter")
}
if (per %in% c("yearweek", "yearquarter")) {
s <- 7
e <- 8
}
if (per == "yearmonth") {
s <- 6
e <- 8
}
# s <- list(freq = ~yearweek(.))
if (output == "stl") {
x <- df %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key()
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
# tsibble::index_by(freq = ~do.call(per,args=list(.))) %>%
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
ggplot2::autoplot() + ggplot2::ggtitle(title)
return(x)
}
if (output == "stats") {
x <- rbind(df, df %>% dplyr::mutate(series = title)) %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::features(value, feasts::feat_stl)
return(x)
}
if (output == "res") {
x <- rbind(df, df %>% dplyr::mutate(series = title)) %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
x <- x %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
x <- x %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
x <- x %>% tsibble::index_by(freq = ~ yearquarter(.))
}
x <- x %>%
dplyr::summarise(value = mean(value)) %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components()
return(x)
}
df <- df %>%
tsibble::as_tsibble(key = series, index = date) %>%
tsibble::group_by_key() # %>%
if (per %in% c("yearweek")) {
df <- df %>% tsibble::index_by(freq = ~ yearweek(.))
}
if (per %in% c("yearmonth")) {
df <- df %>% tsibble::index_by(freq = ~ yearmonth(.))
}
if (per %in% c("yearquarter")) {
df <- df %>% tsibble::index_by(freq = ~ yearquarter(.))
}
df <- df %>%
# tsibble::index_by(freq = ~do.call(per,args=list(.))) %>%
dplyr::summarise(value = mean(value))
z <- df %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
dplyr::transmute(
per = as.numeric(do.call(stringr::str_sub, args = list(freq, start = s, end = e))),
year = lubridate::year(freq),
value = remainder
) %>%
dplyr::as_tibble() %>%
dplyr::group_by(per) %>%
dplyr::summarise(u = mean(value), sigma = stats::sd(value))
x <- df %>%
fabletools::model(feasts::STL(value ~ season(window = Inf))) %>%
fabletools::components() %>%
dplyr::mutate(per = as.numeric(do.call(stringr::str_sub, args = list(freq, start = s, end = e)))) %>%
dplyr::left_join(z, by = c("per")) %>%
dplyr::mutate(z.score = (remainder - u) / sigma)
if (output == "seasonal") {
if (chart == "seasons") {
x <- df %>% feasts::gg_season(value) + ggplot2::ggtitle(title)
}
if (chart == "series") {
x <- df %>% feasts::gg_subseries(value) + ggplot2::ggtitle(title)
}
}
if (output == "zscore") {
pal <- c("red", "orange", "green", "orange", "red")
x <- x %>%
dplyr::mutate(date = as.Date(freq)) %>%
#dplyr::filter(date > ) %>%
plotly::plot_ly(x = ~date, y = ~z.score, color = ~z.score, colors = pal) %>%
plotly::add_bars() %>%
plotly::layout(
title = list(text = title, x = 0),
xaxis = list(title = "", range = c(Sys.Date() - months(120), Sys.Date())),
yaxis = list(title = "Z Score of Seasonally-Adjusted Residuals", range = c(3, -3), separators = ".,", tickformat = ".2f")
)
}
return(x)
}
|
/股價檔案/API 視覺化.R
|
no_license
|
qf108071601/Others
|
R
| false
| false
| 1,458
|
r
| ||
#' A meta-analysis approach with filtering for identifying gene-level
#' gene-environment interactions with genetic association data
#'
#' This function first conducts a meta-filtering test to filter out unpromising
#' SNPs. It then runs a test of omnibus-filtering-based GxE meta-analysis
#' (ofGEM) that combines the strengths of the fixed- and random-effects
#' meta-analysis with meta-filtering. It can also analyze data from multiple
#' ethnic groups. The p-values are calculated using a sequential sampling
#' approach.
#'
#'
#' @param Z a matrix of test statistics for gene-environment interactions (GxE)
#' from consortium data. Each row corresponds to a SNP in a set (e.g., a gene),
#' and each column represents a study. For multi-ethnic groups, Z is a list
#' with each element being the matrix for each ethnic group.
#' @param X a matrix of filtering statistics for GxE. Each row corresponds to a
#' SNP in a set, and each column represents a study. For multi-ethnic groups, X
#' is a list with each element being the matrix for each ethnic group.
#' @param R the correlation matrix of test statistics for SNPs in a set. One
#' may use the genotype LD matrix for the set of SNPs to approximate it. This
#' matrix is used when sampling correlated testing and filtering statistics
#' under the null hypothesis and to obtain the null meta-analysis statistics.
#' For multi-ethnic groups, R is a list with each element being the correlation
#' matrix for each ethnic group.
#' @param weight the weight vector for each study, or the weight matrix for
#' each SNP and each study. If the weight is the same across SNPs, it is a
#' vector with length equaling to the number of studies. If the weight is
#' different for different SNPs, it is a matrix with each row corresponding to
#' each SNP and each column representing each study.
#' @param threshold a fixed p-value threshold for filtering test. The default
#' is 0.1.
#' @param maxSim the maximum number of samplings performed in obtaining the
#' sets of correlated testing and filtering statistics under the null. The
#' default is 1e6. This number determines the precision of the p-value
#' calculation.
#' @param tol the tolerance number to stop the sequential sampling procedure.
#' The default is 10. We count the number of sampling-based null
#' meta-statistics that is more extreme than the observed meta-statistics. We
#' sequentially increase the number of sampling with an increment of 100. The
#' sequential sampling will stop if the cumulative count reaches tol. The idea
#' is to stop pursuing a higher precision with more sampling of null if the
#' p-value appears to be not significant. If tol = 0, the number of samplings
#' equals to maxSim.
#' @return A list containing \item{pval_random_mf}{the p-value based on the
#' random-effects meta-analysis test with its corresponding meta-filtering.}
#' \item{pval_fixed_mf}{the p-value based on the fixed-effects meta-analysis
#' test with its corresponding meta-filtering.} \item{pval_ofGEM}{the p-value
#' based on using Fisher's method to aggregating the p-values of fixed- and
#' random-effects meta-analysis tests with meta-filtering} \item{nsim}{the
#' number of samplings being performed.}
#' @references Wang, Jiebiao, Qianying Liu, Brandon L. Pierce, Dezheng Huo,
#' Olufunmilayo I. Olopade, Habibul Ahsan, and Lin S. Chen.
#' "A meta-analysis approach with filtering for identifying gene-level gene-environment interactions."
#' Genetic epidemiology (2018). https://doi.org/10.1002/gepi.22115
#' @examples
#'
#'
#' data(sim_dat)
#'
#' pval = ofGEM(Z = sim_dat$Z, X = sim_dat$X, R = sim_dat$R, weight = rep(1/6, 6))
#'
#'
#' @export ofGEM
ofGEM = function(Z, X, R, weight, threshold = 0.1, maxSim = 1e6, tol = 10) {
# if multi-ethnic
if(is.list(Z)) {
Nsnp = nrow(X[[1]])
T_fixed_mf = T_random_mf = 0
for(i in 1:length(Z)) {
# test if weight is a matrix first
weight_mat = matrix(rep(weight[[i]], Nsnp),byrow=T,nrow=Nsnp)
Z_weighted = Z[[i]] * weight_mat
# MF_fixed
MF_fixed = rowSums(X[[i]] * weight_mat)
# MF_random
MF_random = rowSums(X[[i]]^2 * weight_mat^2)
## test statistics
T_fixed_mf = T_fixed_mf + sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf = T_random_mf + sum(rowSums(Z_weighted^2) *
(sapply(Nsnp, function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
} else {
Nsnp = nrow(X)
weight_mat = matrix(rep(weight, Nsnp),byrow=T,nrow=Nsnp)
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf = sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf = sum(rowSums(Z_weighted^2) *
(sapply(Nsnp, function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
## Calculate p-values with the sequential sampling strategy
pval = rep(1, 2)
nsim = 0
if (!all(c(T_random_mf, T_fixed_mf) == 0, na.rm = T)) {
nsim = 100
count = sim(n = nsim, R, weight, threshold, T_random_mf, T_fixed_mf)
while (!all(count >= tol, na.rm = T) & nsim < maxSim) {
count = count + sim(n=100, R, weight, threshold, T_random_mf, T_fixed_mf)
nsim = nsim + 100
}
pval = count / nsim
}
# oGEMf
return(list(nsim = nsim, pval_random_mf = pval[1], pval_fixed_mf = pval[2], pval_ofGEM = Fisher.test(pval)$p.value))
}
# simulate the statistics under the null
sim = function(n, R, weight, threshold, T_random_mf, T_fixed_mf) {
T_random_mf_null = T_fixed_mf_null = NULL
for (i in 1:n) {
# if multi-ethnic
if(is.list(R)) {
# X = Z = matrix(NA, ncol(R[[1]]), nstudy)
T_fixed_mf_null[i] = T_random_mf_null[i] = 0
for(j in 1:length(R)) {
nstudy = length(weight[[1]])
weight_mat = matrix(rep(weight[[j]], ncol(R[[j]])),byrow=T,nrow=ncol(R[[j]]))
## for study-specific R: ignore it first
# X[,j] = t(mvrnorm(1, rep(0, ncol(R[[j]])), R[[j]]))
# Z[,j] = t(mvrnorm(1, rep(0, ncol(R[[j]])), R[[j]]))
X = t(mvrnorm(nstudy, rep(0, ncol(R[[j]])), R[[j]]))
Z = t(mvrnorm(nstudy, rep(0, ncol(R[[j]])), R[[j]]))
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf_null[i] = T_fixed_mf_null[i] + sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf_null[i] = T_random_mf_null[i] + sum(rowSums(Z_weighted^2) *
(sapply(length(MF_random),
function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
} else {
nstudy = length(weight)
weight_mat = matrix(rep(weight, ncol(R)),byrow=T,nrow=ncol(R))
X = t(mvrnorm(nstudy, rep(0, ncol(R)), R))
Z = t(mvrnorm(nstudy, rep(0, ncol(R)), R))
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf_null[i] = sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf_null[i] = sum(rowSums(Z_weighted^2) *
(sapply(length(MF_random),
function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
}
return(c(sum(T_random_mf_null>=T_random_mf,na.rm=T), sum(T_fixed_mf_null>=T_fixed_mf,na.rm=T)))
}
## Fisher's method aggregate p-values from two tests
Fisher.test = function(p) {
Xsq = -2*sum(log(p))
p.val = pchisq(Xsq, df = 2*length(p), lower.tail = FALSE)
return(list(Xsq = Xsq, p.value = p.val))
}
|
/R/ofGEM.r
|
no_license
|
cran/ofGEM
|
R
| false
| false
| 8,548
|
r
|
#' A meta-analysis approach with filtering for identifying gene-level
#' gene-environment interactions with genetic association data
#'
#' This function first conducts a meta-filtering test to filter out unpromising
#' SNPs. It then runs a test of omnibus-filtering-based GxE meta-analysis
#' (ofGEM) that combines the strengths of the fixed- and random-effects
#' meta-analysis with meta-filtering. It can also analyze data from multiple
#' ethnic groups. The p-values are calculated using a sequential sampling
#' approach.
#'
#'
#' @param Z a matrix of test statistics for gene-environment interactions (GxE)
#' from consortium data. Each row corresponds to a SNP in a set (e.g., a gene),
#' and each column represents a study. For multi-ethnic groups, Z is a list
#' with each element being the matrix for each ethnic group.
#' @param X a matrix of filtering statistics for GxE. Each row corresponds to a
#' SNP in a set, and each column represents a study. For multi-ethnic groups, X
#' is a list with each element being the matrix for each ethnic group.
#' @param R the correlation matrix of test statistics for SNPs in a set. One
#' may use the genotype LD matrix for the set of SNPs to approximate it. This
#' matrix is used when sampling correlated testing and filtering statistics
#' under the null hypothesis and to obtain the null meta-analysis statistics.
#' For multi-ethnic groups, R is a list with each element being the correlation
#' matrix for each ethnic group.
#' @param weight the weight vector for each study, or the weight matrix for
#' each SNP and each study. If the weight is the same across SNPs, it is a
#' vector with length equaling to the number of studies. If the weight is
#' different for different SNPs, it is a matrix with each row corresponding to
#' each SNP and each column representing each study.
#' @param threshold a fixed p-value threshold for filtering test. The default
#' is 0.1.
#' @param maxSim the maximum number of samplings performed in obtaining the
#' sets of correlated testing and filtering statistics under the null. The
#' default is 1e6. This number determines the precision of the p-value
#' calculation.
#' @param tol the tolerance number to stop the sequential sampling procedure.
#' The default is 10. We count the number of sampling-based null
#' meta-statistics that is more extreme than the observed meta-statistics. We
#' sequentially increase the number of sampling with an increment of 100. The
#' sequential sampling will stop if the cumulative count reaches tol. The idea
#' is to stop pursuing a higher precision with more sampling of null if the
#' p-value appears to be not significant. If tol = 0, the number of samplings
#' equals to maxSim.
#' @return A list containing \item{pval_random_mf}{the p-value based on the
#' random-effects meta-analysis test with its corresponding meta-filtering.}
#' \item{pval_fixed_mf}{the p-value based on the fixed-effects meta-analysis
#' test with its corresponding meta-filtering.} \item{pval_ofGEM}{the p-value
#' based on using Fisher's method to aggregating the p-values of fixed- and
#' random-effects meta-analysis tests with meta-filtering} \item{nsim}{the
#' number of samplings being performed.}
#' @references Wang, Jiebiao, Qianying Liu, Brandon L. Pierce, Dezheng Huo,
#' Olufunmilayo I. Olopade, Habibul Ahsan, and Lin S. Chen.
#' "A meta-analysis approach with filtering for identifying gene-level gene-environment interactions."
#' Genetic epidemiology (2018). https://doi.org/10.1002/gepi.22115
#' @examples
#'
#'
#' data(sim_dat)
#'
#' pval = ofGEM(Z = sim_dat$Z, X = sim_dat$X, R = sim_dat$R, weight = rep(1/6, 6))
#'
#'
#' @export ofGEM
ofGEM = function(Z, X, R, weight, threshold = 0.1, maxSim = 1e6, tol = 10) {
# if multi-ethnic
if(is.list(Z)) {
Nsnp = nrow(X[[1]])
T_fixed_mf = T_random_mf = 0
for(i in 1:length(Z)) {
# test if weight is a matrix first
weight_mat = matrix(rep(weight[[i]], Nsnp),byrow=T,nrow=Nsnp)
Z_weighted = Z[[i]] * weight_mat
# MF_fixed
MF_fixed = rowSums(X[[i]] * weight_mat)
# MF_random
MF_random = rowSums(X[[i]]^2 * weight_mat^2)
## test statistics
T_fixed_mf = T_fixed_mf + sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf = T_random_mf + sum(rowSums(Z_weighted^2) *
(sapply(Nsnp, function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
} else {
Nsnp = nrow(X)
weight_mat = matrix(rep(weight, Nsnp),byrow=T,nrow=Nsnp)
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf = sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf = sum(rowSums(Z_weighted^2) *
(sapply(Nsnp, function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
## Calculate p-values with the sequential sampling strategy
pval = rep(1, 2)
nsim = 0
if (!all(c(T_random_mf, T_fixed_mf) == 0, na.rm = T)) {
nsim = 100
count = sim(n = nsim, R, weight, threshold, T_random_mf, T_fixed_mf)
while (!all(count >= tol, na.rm = T) & nsim < maxSim) {
count = count + sim(n=100, R, weight, threshold, T_random_mf, T_fixed_mf)
nsim = nsim + 100
}
pval = count / nsim
}
# oGEMf
return(list(nsim = nsim, pval_random_mf = pval[1], pval_fixed_mf = pval[2], pval_ofGEM = Fisher.test(pval)$p.value))
}
# simulate the statistics under the null
sim = function(n, R, weight, threshold, T_random_mf, T_fixed_mf) {
T_random_mf_null = T_fixed_mf_null = NULL
for (i in 1:n) {
# if multi-ethnic
if(is.list(R)) {
# X = Z = matrix(NA, ncol(R[[1]]), nstudy)
T_fixed_mf_null[i] = T_random_mf_null[i] = 0
for(j in 1:length(R)) {
nstudy = length(weight[[1]])
weight_mat = matrix(rep(weight[[j]], ncol(R[[j]])),byrow=T,nrow=ncol(R[[j]]))
## for study-specific R: ignore it first
# X[,j] = t(mvrnorm(1, rep(0, ncol(R[[j]])), R[[j]]))
# Z[,j] = t(mvrnorm(1, rep(0, ncol(R[[j]])), R[[j]]))
X = t(mvrnorm(nstudy, rep(0, ncol(R[[j]])), R[[j]]))
Z = t(mvrnorm(nstudy, rep(0, ncol(R[[j]])), R[[j]]))
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf_null[i] = T_fixed_mf_null[i] + sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf_null[i] = T_random_mf_null[i] + sum(rowSums(Z_weighted^2) *
(sapply(length(MF_random),
function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
} else {
nstudy = length(weight)
weight_mat = matrix(rep(weight, ncol(R)),byrow=T,nrow=ncol(R))
X = t(mvrnorm(nstudy, rep(0, ncol(R)), R))
Z = t(mvrnorm(nstudy, rep(0, ncol(R)), R))
Z_weighted = Z * weight_mat
# MF_fixed
MF_fixed = rowSums(X * weight_mat)
# MF_random
MF_random = rowSums(X^2 * weight_mat^2)
T_fixed_mf_null[i] = sum(rowSums(Z_weighted)^2 * (abs(MF_fixed) > qnorm((1 - threshold/2))))
T_random_mf_null[i] = sum(rowSums(Z_weighted^2) *
(sapply(length(MF_random),
function(q) davies(MF_random[q], lambda=weight_mat[q,]^2)$Qq) < threshold))
}
}
return(c(sum(T_random_mf_null>=T_random_mf,na.rm=T), sum(T_fixed_mf_null>=T_fixed_mf,na.rm=T)))
}
## Fisher's method aggregate p-values from two tests
Fisher.test = function(p) {
Xsq = -2*sum(log(p))
p.val = pchisq(Xsq, df = 2*length(p), lower.tail = FALSE)
return(list(Xsq = Xsq, p.value = p.val))
}
|
write_quiz_html_alph= function(question) {
# initialize: three part table with heading, question, answers
output= list()
# check html
simple_html_checker(question$text)
# write the question part as paragraphs
output$question= write_in_wrapper(question$text, "p")
# write the table heading
output$heading= write_in_wrapper(c(question$q, sprintf("(question type: %s)", question$type)), "p")
# output the table
output= write_three_part_table(output$heading, output$question)
return(output)
}
|
/engine/write_quiz_html_alph.R
|
no_license
|
alucas69/CanvasQuizR
|
R
| false
| false
| 518
|
r
|
write_quiz_html_alph= function(question) {
# initialize: three part table with heading, question, answers
output= list()
# check html
simple_html_checker(question$text)
# write the question part as paragraphs
output$question= write_in_wrapper(question$text, "p")
# write the table heading
output$heading= write_in_wrapper(c(question$q, sprintf("(question type: %s)", question$type)), "p")
# output the table
output= write_three_part_table(output$heading, output$question)
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairingGame.R
\name{pairingGame}
\alias{pairingGame}
\title{Play Pairing Game}
\usage{
pairingGame(a, b, strata = pickerFirst, stratb = pickerFirst)
}
\arguments{
\item{a}{numeric vector of player a pieces}
\item{b}{numeric vector of player b pieces of length a}
\item{strata}{picker function to select between two numbers for player a}
\item{stratb}{picker function to select between two numbers for player b}
}
\value{
length 2 vector number of wins for player a and player b
}
\description{
Play a game by providing a sequence of numbers (typically 5).
The game is played in the following sequence:\enumerate{
\item player a reveals her first number
\item player b reveals his first two numbers
\item player a selects one of player b's revealed numbers to match with her revealed number
\item player a reveals her next two numbers
\item player b selects one of player a's revealed numbers to match with his revealed number
\item player b reveals his next two numbers
\item player a selects one of player b's revealed numbers to match with her revealed number
\item player a reveals her next two numbers
\item player b selects one of player a's revealed numbers to match with his revealed number
\item the remaining two numbers are matched.
}
For each match, the player with the higher number wins one point.
The player with the most points wins the game.
The picker functions must have arguments
active, revealed, type and exclude.
active and revealed must be numeric vectors.
type must be a length 1 character vector with value "reveal" or "pair".
If type is "reveal", the function must pick the index of active to reveal.
If type is "pair", the function must pick index of revealed to pair.
exclude must be NA, or a numeric vector,
which is the index of revealed to ignore when type is "reveal".
}
\examples{
pairingGame(a = 1:5, b = 5:1)
pairingGame(a = 10:14, b = 10:14)
pairingGame(a = 1:5, b = c(5, 1:4))
pairingGame(a = rep(1, 5), b = rep(1, 5),
strata = pickerFirst, stratb = pickerFirst)
pairingGame(a = 1:5, b = 5:1, strata = pickerRandom)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerRandom)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerMax)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerJustMax)
set.seed(22532)
pairingGame(a = sample(1:5, size = 5), b = sample(1:5, size = 5),
strata = pickerRandom, stratb = pickerRandom)
}
|
/man/pairingGame.Rd
|
no_license
|
CSJCampbell/throwdown
|
R
| false
| true
| 2,578
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pairingGame.R
\name{pairingGame}
\alias{pairingGame}
\title{Play Pairing Game}
\usage{
pairingGame(a, b, strata = pickerFirst, stratb = pickerFirst)
}
\arguments{
\item{a}{numeric vector of player a pieces}
\item{b}{numeric vector of player b pieces of length a}
\item{strata}{picker function to select between two numbers for player a}
\item{stratb}{picker function to select between two numbers for player b}
}
\value{
length 2 vector number of wins for player a and player b
}
\description{
Play a game by providing a sequence of numbers (typically 5).
The game is played in the following sequence:\enumerate{
\item player a reveals her first number
\item player b reveals his first two numbers
\item player a selects one of player b's revealed numbers to match with her revealed number
\item player a reveals her next two numbers
\item player b selects one of player a's revealed numbers to match with his revealed number
\item player b reveals his next two numbers
\item player a selects one of player b's revealed numbers to match with her revealed number
\item player a reveals her next two numbers
\item player b selects one of player a's revealed numbers to match with his revealed number
\item the remaining two numbers are matched.
}
For each match, the player with the higher number wins one point.
The player with the most points wins the game.
The picker functions must have arguments
active, revealed, type and exclude.
active and revealed must be numeric vectors.
type must be a length 1 character vector with value "reveal" or "pair".
If type is "reveal", the function must pick the index of active to reveal.
If type is "pair", the function must pick index of revealed to pair.
exclude must be NA, or a numeric vector,
which is the index of revealed to ignore when type is "reveal".
}
\examples{
pairingGame(a = 1:5, b = 5:1)
pairingGame(a = 10:14, b = 10:14)
pairingGame(a = 1:5, b = c(5, 1:4))
pairingGame(a = rep(1, 5), b = rep(1, 5),
strata = pickerFirst, stratb = pickerFirst)
pairingGame(a = 1:5, b = 5:1, strata = pickerRandom)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerRandom)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerMax)
pairingGame(a = 1:5, b = c(5, 1:4),
strata = pickerRandom, stratb = pickerJustMax)
set.seed(22532)
pairingGame(a = sample(1:5, size = 5), b = sample(1:5, size = 5),
strata = pickerRandom, stratb = pickerRandom)
}
|
context("pgFull class methods")
pg <- .loadPgExample(withGroups = T, withParalogues = T)
location <- tempdir()
unzip(system.file('extdata', 'Mycoplasma.zip', package='FindMyFriends'),
exdir=location)
genomeFiles <- list.files(location, full.names=TRUE, pattern='*.fasta')[1:5]
realGenes <- Biostrings::readAAStringSet(genomeFiles)
test_that("genes getter works", {
expect_equal(genes(pg), realGenes)
subset <- sample(nGenes(pg), 10)
expect_equal(genes(pg, subset=subset), realGenes[subset])
expect_is(genes(pg, split = 'organism'), 'AAStringSetList')
expect_equal(length(genes(pg, split='organism')), nOrganisms(pg))
expect_equal(genes(pg, split='organism')[3], genes(pg, split='organism', subset=3))
expect_equal(genes(pg, split='organism')[[3]], realGenes[seqToOrg(pg)==3])
expect_is(genes(pg, split = 'group'), 'AAStringSetList')
expect_equal(length(genes(pg, split='group')), nGeneGroups(pg))
expect_equal(genes(pg, split='group')[3], genes(pg, split='group', subset=3))
expect_equal(genes(pg, split='group')[[3]], realGenes[seqToGeneGroup(pg)==3])
expect_is(genes(pg, split = 'paralogue'), 'AAStringSetList')
expect_equal(length(genes(pg, split='paralogue')), length(unique(groupInfo(pg)$paralogue)))
expect_equal(genes(pg, split='paralogue')[3], genes(pg, split='paralogue', subset=3))
expect_equal(genes(pg, split='paralogue')[[3]], realGenes[seqToGeneGroup(pg) %in% which(groupInfo(pg)$paralogue == 3)])
})
test_that("Gene names setter and getter works", {
expect_equal(geneNames(pg), names(realGenes))
geneNames(pg) <- as.character(1:nGenes(pg))
expect_equal(geneNames(pg), as.character(1:nGenes(pg)))
geneNames(pg) <- names(realGenes)
})
test_that("Gene width works", {
expect_equal(geneWidth(pg), Biostrings::width(realGenes))
})
|
/tests/testthat/test-pgFull.R
|
no_license
|
thomasp85/FindMyFriends
|
R
| false
| false
| 1,832
|
r
|
context("pgFull class methods")
pg <- .loadPgExample(withGroups = T, withParalogues = T)
location <- tempdir()
unzip(system.file('extdata', 'Mycoplasma.zip', package='FindMyFriends'),
exdir=location)
genomeFiles <- list.files(location, full.names=TRUE, pattern='*.fasta')[1:5]
realGenes <- Biostrings::readAAStringSet(genomeFiles)
test_that("genes getter works", {
expect_equal(genes(pg), realGenes)
subset <- sample(nGenes(pg), 10)
expect_equal(genes(pg, subset=subset), realGenes[subset])
expect_is(genes(pg, split = 'organism'), 'AAStringSetList')
expect_equal(length(genes(pg, split='organism')), nOrganisms(pg))
expect_equal(genes(pg, split='organism')[3], genes(pg, split='organism', subset=3))
expect_equal(genes(pg, split='organism')[[3]], realGenes[seqToOrg(pg)==3])
expect_is(genes(pg, split = 'group'), 'AAStringSetList')
expect_equal(length(genes(pg, split='group')), nGeneGroups(pg))
expect_equal(genes(pg, split='group')[3], genes(pg, split='group', subset=3))
expect_equal(genes(pg, split='group')[[3]], realGenes[seqToGeneGroup(pg)==3])
expect_is(genes(pg, split = 'paralogue'), 'AAStringSetList')
expect_equal(length(genes(pg, split='paralogue')), length(unique(groupInfo(pg)$paralogue)))
expect_equal(genes(pg, split='paralogue')[3], genes(pg, split='paralogue', subset=3))
expect_equal(genes(pg, split='paralogue')[[3]], realGenes[seqToGeneGroup(pg) %in% which(groupInfo(pg)$paralogue == 3)])
})
test_that("Gene names setter and getter works", {
expect_equal(geneNames(pg), names(realGenes))
geneNames(pg) <- as.character(1:nGenes(pg))
expect_equal(geneNames(pg), as.character(1:nGenes(pg)))
geneNames(pg) <- names(realGenes)
})
test_that("Gene width works", {
expect_equal(geneWidth(pg), Biostrings::width(realGenes))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{e_inspect}
\alias{e_inspect}
\alias{echarts_from_json}
\title{To & From JSON}
\usage{
e_inspect(e, json = FALSE, ...)
echarts_from_json(txt)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{json}{Whether to return the JSON, otherwise returns a \code{list}.}
\item{...}{Additional options to pass to \link[=jsonlite]{toJSON}.}
\item{txt}{JSON character string, url, or file.}
}
\value{
\code{e_inspect} Returns a \code{list} if \code{json} is \code{FALSE} and a
JSON string otherwise. \code{echarts_from_json} returns an object of class \code{echarts4r}.
}
\description{
Get JSON options from an echarts4r object and build one from JSON.
}
\details{
\code{txt} should contain the full list of options required to build a chart.
This is subsequently passed to the \code{setOption} ECharts (JavaScript) function.
}
\note{
Must be passed as last option.
}
\examples{
p <- cars |>
e_charts(dist) |>
e_scatter(speed, symbol_size = 10)
p # plot
# extract the JSON
json <- p |>
e_inspect(
json = TRUE,
pretty = TRUE
)
# print json
json
# rebuild plot
echarts_from_json(json) |>
e_theme("dark") # modify
}
|
/man/echartsNJSON.Rd
|
no_license
|
cran/echarts4r
|
R
| false
| true
| 1,319
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/helpers.R
\name{e_inspect}
\alias{e_inspect}
\alias{echarts_from_json}
\title{To & From JSON}
\usage{
e_inspect(e, json = FALSE, ...)
echarts_from_json(txt)
}
\arguments{
\item{e}{An \code{echarts4r} object as returned by \code{\link{e_charts}} or
a proxy as returned by \code{\link{echarts4rProxy}}.}
\item{json}{Whether to return the JSON, otherwise returns a \code{list}.}
\item{...}{Additional options to pass to \link[=jsonlite]{toJSON}.}
\item{txt}{JSON character string, url, or file.}
}
\value{
\code{e_inspect} Returns a \code{list} if \code{json} is \code{FALSE} and a
JSON string otherwise. \code{echarts_from_json} returns an object of class \code{echarts4r}.
}
\description{
Get JSON options from an echarts4r object and build one from JSON.
}
\details{
\code{txt} should contain the full list of options required to build a chart.
This is subsequently passed to the \code{setOption} ECharts (JavaScript) function.
}
\note{
Must be passed as last option.
}
\examples{
p <- cars |>
e_charts(dist) |>
e_scatter(speed, symbol_size = 10)
p # plot
# extract the JSON
json <- p |>
e_inspect(
json = TRUE,
pretty = TRUE
)
# print json
json
# rebuild plot
echarts_from_json(json) |>
e_theme("dark") # modify
}
|
# Load libraries required for script
library(lubridate)
library(dplyr)
#
# read data table from current working directory
power <- read.table(
'./household_power_consumption.txt',
header=T,sep=";",stringsAsFactors = F,na.strings = "?")
#
# add column with date and time pasted together
power.1<-mutate(power,DateTime = paste(Date,Time))
#
# convert new column into date and time format
power.1[,"DateTime"]<- dmy_hms(power.1[,"DateTime"])
#
# subset data for the dates indicated
power.2<-subset(power.1,DateTime>ymd("2007-02-01") & DateTime<ymd("2007-02-03"))
#
# Plot 3 Overlayed line graphs with legend
png(filename = "plot3.png", width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
with(power.2,plot(DateTime, Sub_metering_1, type = "n", xlab = "",
ylab = "Energy sub metering"))
with(power.2,lines(DateTime,Sub_metering_1))
with(power.2,lines(DateTime,Sub_metering_2,col="red"))
with(power.2,lines(DateTime,Sub_metering_3,col="blue"))
legend("topright",lty=1, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
/plot3.R
|
no_license
|
jplaxton/ExData_Plotting1
|
R
| false
| false
| 1,145
|
r
|
# Load libraries required for script
library(lubridate)
library(dplyr)
#
# read data table from current working directory
power <- read.table(
'./household_power_consumption.txt',
header=T,sep=";",stringsAsFactors = F,na.strings = "?")
#
# add column with date and time pasted together
power.1<-mutate(power,DateTime = paste(Date,Time))
#
# convert new column into date and time format
power.1[,"DateTime"]<- dmy_hms(power.1[,"DateTime"])
#
# subset data for the dates indicated
power.2<-subset(power.1,DateTime>ymd("2007-02-01") & DateTime<ymd("2007-02-03"))
#
# Plot 3 Overlayed line graphs with legend
png(filename = "plot3.png", width = 480, height = 480, units = "px")
par(mfrow=c(1,1))
with(power.2,plot(DateTime, Sub_metering_1, type = "n", xlab = "",
ylab = "Energy sub metering"))
with(power.2,lines(DateTime,Sub_metering_1))
with(power.2,lines(DateTime,Sub_metering_2,col="red"))
with(power.2,lines(DateTime,Sub_metering_3,col="blue"))
legend("topright",lty=1, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_heatmap_with_sideplots.R
\name{h.cluster_data}
\alias{h.cluster_data}
\title{clust data.table accessor}
\usage{
h.cluster_data(ssvH2)
}
\arguments{
\item{ssvH2}{output from ssvHeatmap2()}
}
\value{
data supplied to geom_raster() with cluster assignments
}
\description{
clust data.table accessor
}
\examples{
h.cluster_data(ssvHeatmap2(heatmap_demo_matrix))
}
|
/man/h.cluster_data.Rd
|
no_license
|
hjanime/ssvRecipes
|
R
| false
| true
| 447
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function_heatmap_with_sideplots.R
\name{h.cluster_data}
\alias{h.cluster_data}
\title{clust data.table accessor}
\usage{
h.cluster_data(ssvH2)
}
\arguments{
\item{ssvH2}{output from ssvHeatmap2()}
}
\value{
data supplied to geom_raster() with cluster assignments
}
\description{
clust data.table accessor
}
\examples{
h.cluster_data(ssvHeatmap2(heatmap_demo_matrix))
}
|
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_100mBW-1.txt")
cnames <- c("time", "srcip", "srcport", "dstip",
"dstport", "id", "interval", "data",
"tput")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 100
dat$delay <- 0
dat$drop <- 0
all <- dat
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_500mBW-1.txt")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 500
dat$delay <- 0
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_1000mBW-1.txt")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 1000
dat$delay <- 0
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_100mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 100
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_500mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 500
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_1000mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 1000
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_100mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 100
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_500mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 500
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_1000mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 1000
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
all$case <- as.factor(all$case)
library(reshape2)
all <- transform(all, interval = colsplit(interval,"-", names = c('begin', 'end')))
all$interval.begin <- all$interval$begin
all$interval.end <- all$interval$end
all$interval <- NULL
totals <- all[all$interval.begin<=1 & all$interval.end>=10,]
details <- all[!(all$interval.begin<=1 & all$interval.end>=10),]
library(ggplot2)
q <- ggplot(details)
q <- q + geom_point(aes(x=bandwidth, y=tput, colour=case))
q <- q + geom_line(aes(x=bandwidth, y=tput, colour=case, linetype=case))
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Boxplot of TCP for different cases")
q
|
/review/projects/0002/el6383-project/Rscript/Rscript_tcp_part1.R
|
no_license
|
Roshni-Natarajan/HSN-Lab
|
R
| false
| false
| 2,509
|
r
|
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_100mBW-1.txt")
cnames <- c("time", "srcip", "srcport", "dstip",
"dstport", "id", "interval", "data",
"tput")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 100
dat$delay <- 0
dat$drop <- 0
all <- dat
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_500mBW-1.txt")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 500
dat$delay <- 0
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_1000mBW-1.txt")
names(dat) <- cnames
dat$case <- "case1"
dat$bandwidth <- 1000
dat$delay <- 0
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_100mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 100
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_500mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 500
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_50delay_1000mBW_0loss.txt")
names(dat) <- cnames
dat$case <- "case2"
dat$bandwidth <- 1000
dat$delay <- 50
dat$drop <- 0
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_100mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 100
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_500mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 500
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
dat <- read.csv("el6383-project/el6383-project/tcp_0delay_1000mBW_0.01loss.txt")
names(dat) <- cnames
dat$case <- "case3"
dat$bandwidth <- 1000
dat$delay <- 0
dat$drop <- 0.01
all <- rbind(all, dat)
all$case <- as.factor(all$case)
library(reshape2)
all <- transform(all, interval = colsplit(interval,"-", names = c('begin', 'end')))
all$interval.begin <- all$interval$begin
all$interval.end <- all$interval$end
all$interval <- NULL
totals <- all[all$interval.begin<=1 & all$interval.end>=10,]
details <- all[!(all$interval.begin<=1 & all$interval.end>=10),]
library(ggplot2)
q <- ggplot(details)
q <- q + geom_point(aes(x=bandwidth, y=tput, colour=case))
q <- q + geom_line(aes(x=bandwidth, y=tput, colour=case, linetype=case))
q <- q + scale_y_continuous("Throughput (bps)")
q <- q + ggtitle("Boxplot of TCP for different cases")
q
|
\encoding{UTF-8}
\name{note_for_ChiValss_input}
\alias{note_for_ChiValss_input}
\title{Note for child valproate input information}
\description{
Input information of child valproate including individual information, dosing history and sampling history.
}
\keyword{misc}
|
/man/note_for_ChiValss_input.Rd
|
no_license
|
cran/tdm
|
R
| false
| false
| 287
|
rd
|
\encoding{UTF-8}
\name{note_for_ChiValss_input}
\alias{note_for_ChiValss_input}
\title{Note for child valproate input information}
\description{
Input information of child valproate including individual information, dosing history and sampling history.
}
\keyword{misc}
|
IR_save <- IR_state_new %>%
select(FIPS, ci_l, ci_u) %>%
mutate(ci_l = if_else(ci_l <0, 0, ci_l))
PR_save <- PR_state_new_r %>%
select(FIPS, ci_l, ci_u) %>%
mutate(ci_l = if_else(ci_l <0, 0, ci_l))
save(IR_save,file="IR_save.Rda")
save(PR_save,file="PR_save.Rda")
|
/Script/Testing_weights_7b.R
|
no_license
|
raedkm/TTI-Astham-CI
|
R
| false
| false
| 281
|
r
|
IR_save <- IR_state_new %>%
select(FIPS, ci_l, ci_u) %>%
mutate(ci_l = if_else(ci_l <0, 0, ci_l))
PR_save <- PR_state_new_r %>%
select(FIPS, ci_l, ci_u) %>%
mutate(ci_l = if_else(ci_l <0, 0, ci_l))
save(IR_save,file="IR_save.Rda")
save(PR_save,file="PR_save.Rda")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwr_prop_2pop.R
\name{pwr_prop_2pop}
\alias{pwr_prop_2pop}
\title{Power and sample size for t-test for two proportions.}
\usage{
pwr_prop_2pop(p1, p2, delta0 = 0, n1 = NULL, n2 = NULL, pwr = NULL,
alternative = "two.sided", sig_level = 0.05)
}
\arguments{
\item{p1}{proportion for the first population}
\item{p2}{proportion fot the second population}
\item{delta0}{difference of proportions}
\item{n1}{number of observations (sample size) for the first population}
\item{n2}{number of observations (sample size) for the second population}
\item{pwr}{power of test \eqn{1 + \beta} (1 minus type II error probability)}
\item{alternative}{a character string specifying the alternative hypothesis,
must be one of "two.sided" (default), "greater" or "less"}
\item{sig_level}{significance level (Type I error probability)}
}
\value{
\code{pwr_prop_2pop} returns a list with the following
components:
\describe{
\item{p1}{proportion for the first population}
\item{p2}{proportion for the second population}
\item{sig_level}{significance level}
\item{power_sampleSize}{A \code{tibble} with sample size, \code{n1} for the
first population and \code{n2} for the second population}
}
}
\description{
\code{pwr_prop_2pop} computes the power and the sample size for testing
two proportions.
}
\details{
Exactly one of the parameters samples sizes ('n1' and 'n2')
and 'pwr' must be passed as NULL, and that parameter is determined from
the other. Notice that the last one has non-NULL default so NULL must be
explicitly passed if you want to compute it.
#' The parameters 'p1' and 'p2' are required.
The effect size is computed internally.
}
\examples{
# Power
pwr_prop_2pop(p1 = 0.3, p2 = 0.15, n1 = 10, n2 = 10, pwr = NULL,
alternative = "two.sided", sig_level = 0.05)
# Sample size
pwr_prop_2pop(p1 = 0.3, p2 = 0.15, n1 = NULL, n2 = NULL, pwr = 0.99,
alternative = "two.sided", sig_level = 0.05)
}
\keyword{difference}
\keyword{hypothesis}
\keyword{level,}
\keyword{of}
\keyword{populations,}
\keyword{power,}
\keyword{proportions,}
\keyword{sample}
\keyword{significance}
\keyword{size}
\keyword{test,}
\keyword{testing,}
\keyword{two}
\keyword{variables,}
\keyword{z}
|
/man/pwr_prop_2pop.Rd
|
no_license
|
gilberto-sassi/power
|
R
| false
| true
| 2,245
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pwr_prop_2pop.R
\name{pwr_prop_2pop}
\alias{pwr_prop_2pop}
\title{Power and sample size for t-test for two proportions.}
\usage{
pwr_prop_2pop(p1, p2, delta0 = 0, n1 = NULL, n2 = NULL, pwr = NULL,
alternative = "two.sided", sig_level = 0.05)
}
\arguments{
\item{p1}{proportion for the first population}
\item{p2}{proportion fot the second population}
\item{delta0}{difference of proportions}
\item{n1}{number of observations (sample size) for the first population}
\item{n2}{number of observations (sample size) for the second population}
\item{pwr}{power of test \eqn{1 + \beta} (1 minus type II error probability)}
\item{alternative}{a character string specifying the alternative hypothesis,
must be one of "two.sided" (default), "greater" or "less"}
\item{sig_level}{significance level (Type I error probability)}
}
\value{
\code{pwr_prop_2pop} returns a list with the following
components:
\describe{
\item{p1}{proportion for the first population}
\item{p2}{proportion for the second population}
\item{sig_level}{significance level}
\item{power_sampleSize}{A \code{tibble} with sample size, \code{n1} for the
first population and \code{n2} for the second population}
}
}
\description{
\code{pwr_prop_2pop} computes the power and the sample size for testing
two proportions.
}
\details{
Exactly one of the parameters samples sizes ('n1' and 'n2')
and 'pwr' must be passed as NULL, and that parameter is determined from
the other. Notice that the last one has non-NULL default so NULL must be
explicitly passed if you want to compute it.
#' The parameters 'p1' and 'p2' are required.
The effect size is computed internally.
}
\examples{
# Power
pwr_prop_2pop(p1 = 0.3, p2 = 0.15, n1 = 10, n2 = 10, pwr = NULL,
alternative = "two.sided", sig_level = 0.05)
# Sample size
pwr_prop_2pop(p1 = 0.3, p2 = 0.15, n1 = NULL, n2 = NULL, pwr = 0.99,
alternative = "two.sided", sig_level = 0.05)
}
\keyword{difference}
\keyword{hypothesis}
\keyword{level,}
\keyword{of}
\keyword{populations,}
\keyword{power,}
\keyword{proportions,}
\keyword{sample}
\keyword{significance}
\keyword{size}
\keyword{test,}
\keyword{testing,}
\keyword{two}
\keyword{variables,}
\keyword{z}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.11099476730851e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613114514-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 251
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.11099476730851e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EvaluatePlp.R
\name{specificity}
\alias{specificity}
\title{Calculate the specificity}
\usage{
specificity(TP, TN, FN, FP)
}
\arguments{
\item{TP}{Number of true positives}
\item{TN}{Number of true negatives}
\item{FN}{Number of false negatives}
\item{FP}{Number of false positives}
}
\value{
specificity value
}
\description{
Calculate the specificity
}
\details{
Calculate the specificity
}
|
/man/specificity.Rd
|
permissive
|
schuemie/PatientLevelPrediction
|
R
| false
| true
| 474
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EvaluatePlp.R
\name{specificity}
\alias{specificity}
\title{Calculate the specificity}
\usage{
specificity(TP, TN, FN, FP)
}
\arguments{
\item{TP}{Number of true positives}
\item{TN}{Number of true negatives}
\item{FN}{Number of false negatives}
\item{FP}{Number of false positives}
}
\value{
specificity value
}
\description{
Calculate the specificity
}
\details{
Calculate the specificity
}
|
attach(mtcars)
plot(wt,mpg)
abline(lm(mpg~wt))
title("regression of MPG on Weight")
detach(mtcars)
|
/src/dg/r-mon/courseware/script1.R
|
no_license
|
xenron/sandbox-da-r
|
R
| false
| false
| 99
|
r
|
attach(mtcars)
plot(wt,mpg)
abline(lm(mpg~wt))
title("regression of MPG on Weight")
detach(mtcars)
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(tidyverse)
# Directories
plotdir <- "figures"
datadir <- "data/capture_projections/data"
outputdir <- "/Volumes/GoogleDrive/Shared drives/emlab/projects/current-projects/blue-paper-2/data/output/processed/"
# Read FAO data
fao_orig <- readRDS("/Users/cfree/Dropbox/Chris/UCSB/data/fao/aquaculture/processed/1950_2017_fao_aquaculture_data.Rds")
# Read FIFO data
load("data/feed_params/processed/fifo_trends_projections.Rdata")
# Read forage fish availability data
ffdata <- read.csv("data/feed_params/processed/forage_fish_availability.csv", as.is=T)
# Calculate ecological limits for finfish mariculture
################################################################################
# Period key
period_key <- tibble(year=c(2021:2030, 2051:2060, 2091:2100),
period=sort(rep(c("2021-2030", "2051-2060", "2091-2100"), 10)))
# FIFO averages
fifo_avgs <- fifo_proj_g %>%
group_by(year) %>%
summarize(fifo_avg=mean(fifo)) %>%
rename(scenario=year) %>%
mutate(scenario=recode(scenario,
"2030"="Business-as-usual",
"2050"="Progressive reforms"))
# Calculate ecological limits of FAQ based on FF in period, climate, mgmt, scenario
faq_limits <- ffdata %>%
# Add period
left_join(period_key) %>%
filter(!is.na(period)) %>%
# Summarize
group_by(rcp, mgmt_scenario, feed_scenario, period) %>%
summarize(across(.cols=catch_mt:catch_ff_mt_maq, .fns = mean)) %>%
ungroup() %>%
# Add big-picture scenarios
mutate(scenario=ifelse(mgmt_scenario=="BAU fisheries management" & feed_scenario=="BAU feed use", "Business-as-usual", NA),
scenario=ifelse(mgmt_scenario=="Reformed fisheries management" & feed_scenario=="Reformed feed use", "Progressive reforms", scenario)) %>%
filter(!is.na(scenario)) %>%
# Add mean FIFO for scenario
left_join(fifo_avgs) %>%
mutate(meat_mt=catch_ff_mt_maq / fifo_avg) %>%
# Add sector
mutate(sector="Finfish mariculture") %>%
mutate(sector=factor(sector, levels=c("Finfish mariculture", "Bivalve mariculture")))
# Setup
################################################################################
# COME BACK TO THIS
# BUILD KEY TO IDENTIFY SPECIES THAT ARE FAIR TO COUNT
# Build mariculture progress data
maq_nonzero <- fao_orig %>%
# Marine/brackish finfish/bivalves
filter(environment %in% c("Marine", "Brackishwater") & major_group %in% c("Pisces", "Mollusca")) %>%
# Remove stragglers
filter(!isscaap %in% c("Squids, cuttlefishes, octopuses", "Miscellaneous freshwater fishes", "Freshwater molluscs", "Pearls, mother-of-pearl, shells")) %>%
# Remove zero records
filter(quantity_mt>0)
# Inspect countries and species
sort(unique(maq_nonzero$species_orig))
# Production stats
stats <- maq_nonzero %>%
# Calculate annual stats
group_by(year, major_group) %>%
summarise(ncountries=n_distinct(country_orig),
prod_mt=sum(quantity_mt),
value_usd_t=sum(value_usd_t),
nspp=n_distinct(species_orig)) %>%
ungroup() %>%
# Reclass type
rename(type=major_group) %>%
mutate(type=recode_factor(type,
"Pisces"="Finfish mariculture",
"Mollusca"="Bivalve mariculture"))
# Plot data
################################################################################
# Small plot theme
small_plot_theme <- theme(axis.text=element_text(size=5),
axis.title=element_text(size=7),
axis.title.x=element_blank(),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.title=element_blank(),
plot.tag=element_text(size=8, face="bold"),
legend.title=element_blank(),
legend.text = element_text(size=5),
legend.background = element_rect(fill=alpha('blue', 0)),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# Small plot theme
big_plot_theme <- theme(axis.text=element_text(size=5),
axis.title=element_text(size=7),
axis.title.x=element_blank(),
# axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.title=element_blank(),
strip.text=element_text(size=7),
plot.tag=element_text(size=8, face="bold"),
legend.title=element_text(size=7),
legend.text = element_text(size=5),
legend.background = element_rect(fill=alpha('blue', 0)),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
legend.margin=unit(0, "cm"))
# Mariculture progress plots
############################################
# MAQ production
g1 <- ggplot(stats, aes(x=year, y=prod_mt/1e6, color=type)) +
geom_line() +
labs(x="", y="Production\n(millions of mt)", tag="a") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
annotate(geom="text", x=1950, y=13.8, hjust=0, label="Bivalve mariculture", color="navy", inherit.aes = F, size=1.9) +
annotate(geom="text", x=1950, y=15.5, hjust=0, label="Finfish mariculture", color="salmon", inherit.aes = F, size=1.9) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g1
# MAQ producing countries
g2 <- ggplot(stats, aes(x=year, y=ncountries, color=type)) +
geom_line() +
labs(x="", y="Number of\nproducing countries", tag="b") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g2
# MAQ species
g3 <- ggplot(stats, aes(x=year, y=nspp, color=type)) +
geom_line() +
labs(x="", y="Number of\ncultured species", tag="c") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g3
# FIFO progress plots
############################################
# Format data
group_order <- fifo_preds %>%
filter(year==2000) %>%
arrange(desc(fifo)) %>%
pull(group)
fifos_g <- fifos_g %>%
mutate(group=factor(group, levels=group_order))
fifo_preds <- fifo_preds %>%
mutate(group=factor(group, levels=group_order))
# FIFO trends
groups <-tibble(group=group_order)
g4 <- ggplot(fifos_g, aes(x=year, y=fifo, color=group)) +
# Plot exponential decline fits
geom_line(data=fifo_preds, mapping=aes(x=year, y=fifo, color=group), lwd=0.3) +
# Plot points
geom_point(size=0.3) +
# Plot text labels
geom_text(data=groups, mapping=aes(x=2050, y=seq(4.6, 2.4, length.out = nrow(groups)), hjust=1, label=group, color=group), size=1.9) +
# Limits
scale_x_continuous(breaks=seq(2000,2050, 10), limits = c(2000, 2050)) +
# Labels
labs(x="", y='FIFO ratio\n("fish in, fish out")', tag="d") +
# Reference line
geom_hline(yintercept=1, linetype="dashed", lwd=0.4) +
# Theme
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g4
# Number of countries
# g7 <- ggplot(pdata_use, aes(x=period, y=ncountries, fill=rcp)) +
# facet_wrap(~sector, scales="free") +
# labs(x="Period", y="Number of countries") +
# geom_bar(stat="identity", position="dodge")
# g7
# Aquaculture results plots
############################################
# Build data
aqfiles <- list.files(outputdir, pattern="rational_use_new_costs1.Rds")
pdata <- purrr::map_df(aqfiles , function(x){
# Read file
sdata <- readRDS(file.path(outputdir, x))
# Data info
type <- ifelse(grepl("Bivalve", x), "Bivalve mariculture", "Finfish mariculture")
rcp <- paste("RCP", substr(x, 4, 5))
# Calculate statistics
pstats <- sdata %>%
# Calc stats
group_by(period) %>%
summarize(ncells=n(),
area_sqkm=ncells*100,
ncountries=n_distinct(ter1_name),
prod_mt=sum(prod_mt_yr)) %>%
# Add columns
mutate(sector=type,
rcp=rcp,
catch2meat=ifelse(type=="Bivalve mariculture", 0.17, 0.87),
meat_mt=prod_mt*catch2meat) %>%
# Arrange
select(sector, rcp, period, everything())
})
# Format
pdata_use <- pdata %>%
mutate(rcp=recode(rcp,
"RCP 26"="RCP 2.6",
"RCP 45"="RCP 4.5",
"RCP 60"="RCP 6.0",
"RCP 85"="RCP 8.5"),
sector=factor(sector, levels=c("Finfish mariculture",
"Bivalve mariculture")))
# Suitable area
g5 <- ggplot(pdata_use, aes(x=period, y=area_sqkm/1e6, fill=rcp)) +
facet_wrap(~sector, scales="free") +
geom_bar(stat="identity", position="dodge") +
# Labels
labs(x="Period", y="Profitable area\n(millions of sq. km)", tag="e") +
# Legend
scale_fill_manual(name="Climate scenario", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(),
guide = guide_legend(title.position = "top")) +
# Theme
theme_bw() + big_plot_theme +
theme(legend.position = "bottom")
g5
# Demand
demand_df <- tibble(type="Demand limit",
sector=c("Bivalve mariculture", "Finfish mariculture"),
meat_mt_demand=c(103*1e6*0.17, 103*1e6*0.27)) %>%
mutate(sector=factor(sector, levels=c("Finfish mariculture", "Bivalve mariculture")))
# Production potential (not log scale)
# g6 <- ggplot(pdata_use, aes(x=period, y=meat_mt/1e9, fill=rcp)) +
# facet_wrap(~sector, scales="free") +
# geom_bar(stat="identity", position="dodge", show.legend = F) +
# # Axes
# # Reference line
# geom_hline(data=demand_df, mapping=aes(yintercept=meat_mt_demand/1e9, linetype=type), lwd=0.5, show.legend = T) +
# # Labels
# labs(x="Period", y="Production potential\n(billions of mt of meat)", tag="f") +
# # Legend
# scale_fill_manual(name="", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(), guide="none") +
# scale_linetype_manual(name="", values="dotted") +
# # Theme
# theme_bw() + big_plot_theme +
# theme(legend.position = "bottom")
# g6
# Production potential (log scale)
g6 <- ggplot(pdata_use, aes(x=period, y=meat_mt/1e6, fill=rcp)) +
facet_wrap(~sector, scales="free") +
geom_bar(stat="identity", position="dodge", show.legend = F) +
# Axes
scale_y_log10(limit=c(1,25000), breaks=c(1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 25000)) + # millions
# scale_y_log10(limit=c(0.0001,25), breaks=c(0.5, 1, 2, 5, 10, 15, 25)) + # billions
# Reference line
geom_hline(data=demand_df, mapping=aes(yintercept=meat_mt_demand/1e6, linetype=type), lwd=0.4, show.legend = F) +
# Add reference points
geom_point(data=faq_limits, mapping=aes(x=period, y=meat_mt/1e6, shape=scenario, group=rcp), position=position_dodge(width=0.9), size=0.8) +
# Labels
labs(x="Period", y="Production potential\n(millions of mt of meat per year)", tag="f") +
# Legend
scale_fill_manual(name="", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(), guide="none") +
scale_linetype_manual(name="", values="dotted") +
scale_shape_manual(name="Feed limitation under:", guide = guide_legend(title.position = "top"), values=c(1,19)) +
# Theme
theme_bw() + big_plot_theme +
theme(legend.position = "bottom")
g6
# Merge and export
############################################
# Empty plot
gfill <- ggplot() + theme_void()
# Merge
layout_matrix <- matrix(data=c(1, 2, 3, 4,
5, 5, 6, 6), ncol=4, byrow=T)
g <- gridExtra::grid.arrange(g1, g2, g3, g4, g5, g6,
heights=c(0.4, 0.6),
layout_matrix=layout_matrix)
g
# Export figure
ggsave(g, filename=file.path(plotdir, "Fig2_mariculture_results.pdf"),
width=6.5, height=3.5, units="in", dpi=600)
|
/code/ms_figures/Fig2_mariculture_results.R
|
no_license
|
cfree14/aquacast
|
R
| false
| false
| 12,505
|
r
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(tidyverse)
# Directories
plotdir <- "figures"
datadir <- "data/capture_projections/data"
outputdir <- "/Volumes/GoogleDrive/Shared drives/emlab/projects/current-projects/blue-paper-2/data/output/processed/"
# Read FAO data
fao_orig <- readRDS("/Users/cfree/Dropbox/Chris/UCSB/data/fao/aquaculture/processed/1950_2017_fao_aquaculture_data.Rds")
# Read FIFO data
load("data/feed_params/processed/fifo_trends_projections.Rdata")
# Read forage fish availability data
ffdata <- read.csv("data/feed_params/processed/forage_fish_availability.csv", as.is=T)
# Calculate ecological limits for finfish mariculture
################################################################################
# Period key
period_key <- tibble(year=c(2021:2030, 2051:2060, 2091:2100),
period=sort(rep(c("2021-2030", "2051-2060", "2091-2100"), 10)))
# FIFO averages
fifo_avgs <- fifo_proj_g %>%
group_by(year) %>%
summarize(fifo_avg=mean(fifo)) %>%
rename(scenario=year) %>%
mutate(scenario=recode(scenario,
"2030"="Business-as-usual",
"2050"="Progressive reforms"))
# Calculate ecological limits of FAQ based on FF in period, climate, mgmt, scenario
faq_limits <- ffdata %>%
# Add period
left_join(period_key) %>%
filter(!is.na(period)) %>%
# Summarize
group_by(rcp, mgmt_scenario, feed_scenario, period) %>%
summarize(across(.cols=catch_mt:catch_ff_mt_maq, .fns = mean)) %>%
ungroup() %>%
# Add big-picture scenarios
mutate(scenario=ifelse(mgmt_scenario=="BAU fisheries management" & feed_scenario=="BAU feed use", "Business-as-usual", NA),
scenario=ifelse(mgmt_scenario=="Reformed fisheries management" & feed_scenario=="Reformed feed use", "Progressive reforms", scenario)) %>%
filter(!is.na(scenario)) %>%
# Add mean FIFO for scenario
left_join(fifo_avgs) %>%
mutate(meat_mt=catch_ff_mt_maq / fifo_avg) %>%
# Add sector
mutate(sector="Finfish mariculture") %>%
mutate(sector=factor(sector, levels=c("Finfish mariculture", "Bivalve mariculture")))
# Setup
################################################################################
# COME BACK TO THIS
# BUILD KEY TO IDENTIFY SPECIES THAT ARE FAIR TO COUNT
# Build mariculture progress data
maq_nonzero <- fao_orig %>%
# Marine/brackish finfish/bivalves
filter(environment %in% c("Marine", "Brackishwater") & major_group %in% c("Pisces", "Mollusca")) %>%
# Remove stragglers
filter(!isscaap %in% c("Squids, cuttlefishes, octopuses", "Miscellaneous freshwater fishes", "Freshwater molluscs", "Pearls, mother-of-pearl, shells")) %>%
# Remove zero records
filter(quantity_mt>0)
# Inspect countries and species
sort(unique(maq_nonzero$species_orig))
# Production stats
stats <- maq_nonzero %>%
# Calculate annual stats
group_by(year, major_group) %>%
summarise(ncountries=n_distinct(country_orig),
prod_mt=sum(quantity_mt),
value_usd_t=sum(value_usd_t),
nspp=n_distinct(species_orig)) %>%
ungroup() %>%
# Reclass type
rename(type=major_group) %>%
mutate(type=recode_factor(type,
"Pisces"="Finfish mariculture",
"Mollusca"="Bivalve mariculture"))
# Plot data
################################################################################
# Small plot theme
small_plot_theme <- theme(axis.text=element_text(size=5),
axis.title=element_text(size=7),
axis.title.x=element_blank(),
axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.title=element_blank(),
plot.tag=element_text(size=8, face="bold"),
legend.title=element_blank(),
legend.text = element_text(size=5),
legend.background = element_rect(fill=alpha('blue', 0)),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"))
# Small plot theme
big_plot_theme <- theme(axis.text=element_text(size=5),
axis.title=element_text(size=7),
axis.title.x=element_blank(),
# axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1),
plot.title=element_blank(),
strip.text=element_text(size=7),
plot.tag=element_text(size=8, face="bold"),
legend.title=element_text(size=7),
legend.text = element_text(size=5),
legend.background = element_rect(fill=alpha('blue', 0)),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.line = element_line(colour = "black"),
legend.margin=unit(0, "cm"))
# Mariculture progress plots
############################################
# MAQ production
g1 <- ggplot(stats, aes(x=year, y=prod_mt/1e6, color=type)) +
geom_line() +
labs(x="", y="Production\n(millions of mt)", tag="a") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
annotate(geom="text", x=1950, y=13.8, hjust=0, label="Bivalve mariculture", color="navy", inherit.aes = F, size=1.9) +
annotate(geom="text", x=1950, y=15.5, hjust=0, label="Finfish mariculture", color="salmon", inherit.aes = F, size=1.9) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g1
# MAQ producing countries
g2 <- ggplot(stats, aes(x=year, y=ncountries, color=type)) +
geom_line() +
labs(x="", y="Number of\nproducing countries", tag="b") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g2
# MAQ species
g3 <- ggplot(stats, aes(x=year, y=nspp, color=type)) +
geom_line() +
labs(x="", y="Number of\ncultured species", tag="c") +
lims(y=c(0,NA)) +
scale_x_continuous(breaks=seq(1950,2020,10)) +
scale_color_manual(name="", values=c("salmon", "navy")) +
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g3
# FIFO progress plots
############################################
# Format data
group_order <- fifo_preds %>%
filter(year==2000) %>%
arrange(desc(fifo)) %>%
pull(group)
fifos_g <- fifos_g %>%
mutate(group=factor(group, levels=group_order))
fifo_preds <- fifo_preds %>%
mutate(group=factor(group, levels=group_order))
# FIFO trends
groups <-tibble(group=group_order)
g4 <- ggplot(fifos_g, aes(x=year, y=fifo, color=group)) +
# Plot exponential decline fits
geom_line(data=fifo_preds, mapping=aes(x=year, y=fifo, color=group), lwd=0.3) +
# Plot points
geom_point(size=0.3) +
# Plot text labels
geom_text(data=groups, mapping=aes(x=2050, y=seq(4.6, 2.4, length.out = nrow(groups)), hjust=1, label=group, color=group), size=1.9) +
# Limits
scale_x_continuous(breaks=seq(2000,2050, 10), limits = c(2000, 2050)) +
# Labels
labs(x="", y='FIFO ratio\n("fish in, fish out")', tag="d") +
# Reference line
geom_hline(yintercept=1, linetype="dashed", lwd=0.4) +
# Theme
theme_bw() + small_plot_theme +
theme(legend.position = "none")
g4
# Number of countries
# g7 <- ggplot(pdata_use, aes(x=period, y=ncountries, fill=rcp)) +
# facet_wrap(~sector, scales="free") +
# labs(x="Period", y="Number of countries") +
# geom_bar(stat="identity", position="dodge")
# g7
# Aquaculture results plots
############################################
# Build data
aqfiles <- list.files(outputdir, pattern="rational_use_new_costs1.Rds")
pdata <- purrr::map_df(aqfiles , function(x){
# Read file
sdata <- readRDS(file.path(outputdir, x))
# Data info
type <- ifelse(grepl("Bivalve", x), "Bivalve mariculture", "Finfish mariculture")
rcp <- paste("RCP", substr(x, 4, 5))
# Calculate statistics
pstats <- sdata %>%
# Calc stats
group_by(period) %>%
summarize(ncells=n(),
area_sqkm=ncells*100,
ncountries=n_distinct(ter1_name),
prod_mt=sum(prod_mt_yr)) %>%
# Add columns
mutate(sector=type,
rcp=rcp,
catch2meat=ifelse(type=="Bivalve mariculture", 0.17, 0.87),
meat_mt=prod_mt*catch2meat) %>%
# Arrange
select(sector, rcp, period, everything())
})
# Format
pdata_use <- pdata %>%
mutate(rcp=recode(rcp,
"RCP 26"="RCP 2.6",
"RCP 45"="RCP 4.5",
"RCP 60"="RCP 6.0",
"RCP 85"="RCP 8.5"),
sector=factor(sector, levels=c("Finfish mariculture",
"Bivalve mariculture")))
# Suitable area
g5 <- ggplot(pdata_use, aes(x=period, y=area_sqkm/1e6, fill=rcp)) +
facet_wrap(~sector, scales="free") +
geom_bar(stat="identity", position="dodge") +
# Labels
labs(x="Period", y="Profitable area\n(millions of sq. km)", tag="e") +
# Legend
scale_fill_manual(name="Climate scenario", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(),
guide = guide_legend(title.position = "top")) +
# Theme
theme_bw() + big_plot_theme +
theme(legend.position = "bottom")
g5
# Demand
demand_df <- tibble(type="Demand limit",
sector=c("Bivalve mariculture", "Finfish mariculture"),
meat_mt_demand=c(103*1e6*0.17, 103*1e6*0.27)) %>%
mutate(sector=factor(sector, levels=c("Finfish mariculture", "Bivalve mariculture")))
# Production potential (not log scale)
# g6 <- ggplot(pdata_use, aes(x=period, y=meat_mt/1e9, fill=rcp)) +
# facet_wrap(~sector, scales="free") +
# geom_bar(stat="identity", position="dodge", show.legend = F) +
# # Axes
# # Reference line
# geom_hline(data=demand_df, mapping=aes(yintercept=meat_mt_demand/1e9, linetype=type), lwd=0.5, show.legend = T) +
# # Labels
# labs(x="Period", y="Production potential\n(billions of mt of meat)", tag="f") +
# # Legend
# scale_fill_manual(name="", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(), guide="none") +
# scale_linetype_manual(name="", values="dotted") +
# # Theme
# theme_bw() + big_plot_theme +
# theme(legend.position = "bottom")
# g6
# Production potential (log scale)
g6 <- ggplot(pdata_use, aes(x=period, y=meat_mt/1e6, fill=rcp)) +
facet_wrap(~sector, scales="free") +
geom_bar(stat="identity", position="dodge", show.legend = F) +
# Axes
scale_y_log10(limit=c(1,25000), breaks=c(1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 25000)) + # millions
# scale_y_log10(limit=c(0.0001,25), breaks=c(0.5, 1, 2, 5, 10, 15, 25)) + # billions
# Reference line
geom_hline(data=demand_df, mapping=aes(yintercept=meat_mt_demand/1e6, linetype=type), lwd=0.4, show.legend = F) +
# Add reference points
geom_point(data=faq_limits, mapping=aes(x=period, y=meat_mt/1e6, shape=scenario, group=rcp), position=position_dodge(width=0.9), size=0.8) +
# Labels
labs(x="Period", y="Production potential\n(millions of mt of meat per year)", tag="f") +
# Legend
scale_fill_manual(name="", values=RColorBrewer::brewer.pal(4, name="RdBu") %>% rev(), guide="none") +
scale_linetype_manual(name="", values="dotted") +
scale_shape_manual(name="Feed limitation under:", guide = guide_legend(title.position = "top"), values=c(1,19)) +
# Theme
theme_bw() + big_plot_theme +
theme(legend.position = "bottom")
g6
# Merge and export
############################################
# Empty plot
gfill <- ggplot() + theme_void()
# Merge
layout_matrix <- matrix(data=c(1, 2, 3, 4,
5, 5, 6, 6), ncol=4, byrow=T)
g <- gridExtra::grid.arrange(g1, g2, g3, g4, g5, g6,
heights=c(0.4, 0.6),
layout_matrix=layout_matrix)
g
# Export figure
ggsave(g, filename=file.path(plotdir, "Fig2_mariculture_results.pdf"),
width=6.5, height=3.5, units="in", dpi=600)
|
# sunagriAPI
#
# An instance of OpenSILEX WebService
#
# OpenAPI spec version: 3.3.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' PropertyDTO Class
#'
#' @field rdfType
#' @field relation
#' @field value
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
PropertyDTO <- R6::R6Class(
'PropertyDTO',
public = list(
`rdfType` = NULL,
`relation` = NULL,
`value` = NULL,
initialize = function(`rdfType`, `relation`, `value`){
if (!missing(`rdfType`)) {
stopifnot(is.character(`rdfType`), length(`rdfType`) == 1)
self$`rdfType` <- `rdfType`
}
if (!missing(`relation`)) {
stopifnot(is.character(`relation`), length(`relation`) == 1)
self$`relation` <- `relation`
}
if (!missing(`value`)) {
stopifnot(is.character(`value`), length(`value`) == 1)
self$`value` <- `value`
}
},
toJSON = function() {
PropertyDTOObject <- list()
if (!is.null(self$`rdfType`)) {
PropertyDTOObject[['rdfType']] <- self$`rdfType`
}
if (!is.null(self$`relation`)) {
PropertyDTOObject[['relation']] <- self$`relation`
}
if (!is.null(self$`value`)) {
PropertyDTOObject[['value']] <- self$`value`
}
PropertyDTOObject
},
fromJSON = function(PropertyDTOJson) {
PropertyDTOObject <- jsonlite::fromJSON(PropertyDTOJson)
if (!is.null(PropertyDTOObject$`rdfType`)) {
self$`rdfType` <- PropertyDTOObject$`rdfType`
}
if (!is.null(PropertyDTOObject$`relation`)) {
self$`relation` <- PropertyDTOObject$`relation`
}
if (!is.null(PropertyDTOObject$`value`)) {
self$`value` <- PropertyDTOObject$`value`
}
},
fromJSONObject = function(PropertyDTOObject) {
if (!is.null(PropertyDTOObject$`rdfType`)) {
self$`rdfType` <- PropertyDTOObject$`rdfType`
}
if (!is.null(PropertyDTOObject$`relation`)) {
self$`relation` <- PropertyDTOObject$`relation`
}
if (!is.null(PropertyDTOObject$`value`)) {
self$`value` <- PropertyDTOObject$`value`
}
},
toJSONString = function() {
sprintf(
'{
"rdfType": %s,
"relation": %s,
"value": %s
}',
jsonlite::toJSON(self$`rdfType`,auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`relation`,auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`value`,auto_unbox=TRUE, null = "null")
)
},
fromJSONString = function(PropertyDTOJson) {
PropertyDTOObject <- jsonlite::fromJSON(PropertyDTOJson)
self$`rdfType` <- PropertyDTOObject$`rdfType`
self$`relation` <- PropertyDTOObject$`relation`
self$`value` <- PropertyDTOObject$`value`
}
)
)
|
/R/PropertyDTO.r
|
no_license
|
OpenSILEX/phis-ws-client-r-tool
|
R
| false
| false
| 2,835
|
r
|
# sunagriAPI
#
# An instance of OpenSILEX WebService
#
# OpenAPI spec version: 3.3.0
#
# Generated by: https://github.com/swagger-api/swagger-codegen.git
#' PropertyDTO Class
#'
#' @field rdfType
#' @field relation
#' @field value
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
PropertyDTO <- R6::R6Class(
'PropertyDTO',
public = list(
`rdfType` = NULL,
`relation` = NULL,
`value` = NULL,
initialize = function(`rdfType`, `relation`, `value`){
if (!missing(`rdfType`)) {
stopifnot(is.character(`rdfType`), length(`rdfType`) == 1)
self$`rdfType` <- `rdfType`
}
if (!missing(`relation`)) {
stopifnot(is.character(`relation`), length(`relation`) == 1)
self$`relation` <- `relation`
}
if (!missing(`value`)) {
stopifnot(is.character(`value`), length(`value`) == 1)
self$`value` <- `value`
}
},
toJSON = function() {
PropertyDTOObject <- list()
if (!is.null(self$`rdfType`)) {
PropertyDTOObject[['rdfType']] <- self$`rdfType`
}
if (!is.null(self$`relation`)) {
PropertyDTOObject[['relation']] <- self$`relation`
}
if (!is.null(self$`value`)) {
PropertyDTOObject[['value']] <- self$`value`
}
PropertyDTOObject
},
fromJSON = function(PropertyDTOJson) {
PropertyDTOObject <- jsonlite::fromJSON(PropertyDTOJson)
if (!is.null(PropertyDTOObject$`rdfType`)) {
self$`rdfType` <- PropertyDTOObject$`rdfType`
}
if (!is.null(PropertyDTOObject$`relation`)) {
self$`relation` <- PropertyDTOObject$`relation`
}
if (!is.null(PropertyDTOObject$`value`)) {
self$`value` <- PropertyDTOObject$`value`
}
},
fromJSONObject = function(PropertyDTOObject) {
if (!is.null(PropertyDTOObject$`rdfType`)) {
self$`rdfType` <- PropertyDTOObject$`rdfType`
}
if (!is.null(PropertyDTOObject$`relation`)) {
self$`relation` <- PropertyDTOObject$`relation`
}
if (!is.null(PropertyDTOObject$`value`)) {
self$`value` <- PropertyDTOObject$`value`
}
},
toJSONString = function() {
sprintf(
'{
"rdfType": %s,
"relation": %s,
"value": %s
}',
jsonlite::toJSON(self$`rdfType`,auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`relation`,auto_unbox=TRUE, null = "null"),
jsonlite::toJSON(self$`value`,auto_unbox=TRUE, null = "null")
)
},
fromJSONString = function(PropertyDTOJson) {
PropertyDTOObject <- jsonlite::fromJSON(PropertyDTOJson)
self$`rdfType` <- PropertyDTOObject$`rdfType`
self$`relation` <- PropertyDTOObject$`relation`
self$`value` <- PropertyDTOObject$`value`
}
)
)
|
\name{make.time.periods}
\alias{make.time.periods}
\title{make a time.periods object}
\usage{
make.time.periods(start, durations, names)
}
\arguments{
\item{start}{the start of the time of interest}
\item{durations}{the durations of the subsequent time
periods}
\item{names}{the names of the time periods}
}
\value{
an object (list) with the widths, names, and number of
time periods as well as a matrix called template which
has the start and end of each time period. the intervals
in template are closed on the left but not on the right;
that is, start of 1900 and end of 1910 means [1900, 1910)
in terms of exact times.
}
\description{
make a time.periods object
}
|
/man/make.time.periods.Rd
|
no_license
|
smorisseau/dhstools
|
R
| false
| false
| 695
|
rd
|
\name{make.time.periods}
\alias{make.time.periods}
\title{make a time.periods object}
\usage{
make.time.periods(start, durations, names)
}
\arguments{
\item{start}{the start of the time of interest}
\item{durations}{the durations of the subsequent time
periods}
\item{names}{the names of the time periods}
}
\value{
an object (list) with the widths, names, and number of
time periods as well as a matrix called template which
has the start and end of each time period. the intervals
in template are closed on the left but not on the right;
that is, start of 1900 and end of 1910 means [1900, 1910)
in terms of exact times.
}
\description{
make a time.periods object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{DiagnoseClusterRequest}
\alias{DiagnoseClusterRequest}
\title{DiagnoseClusterRequest Object}
\usage{
DiagnoseClusterRequest()
}
\value{
DiagnoseClusterRequest object
}
\description{
DiagnoseClusterRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A request to collect cluster diagnostic information.
}
\seealso{
Other DiagnoseClusterRequest functions: \code{\link{projects.regions.clusters.diagnose}}
}
|
/googledataprocv1.auto/man/DiagnoseClusterRequest.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false
| true
| 551
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataproc_objects.R
\name{DiagnoseClusterRequest}
\alias{DiagnoseClusterRequest}
\title{DiagnoseClusterRequest Object}
\usage{
DiagnoseClusterRequest()
}
\value{
DiagnoseClusterRequest object
}
\description{
DiagnoseClusterRequest Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A request to collect cluster diagnostic information.
}
\seealso{
Other DiagnoseClusterRequest functions: \code{\link{projects.regions.clusters.diagnose}}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.