content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(tidyverse)
load("data/pm10.RData")
load("data/imgw.RData")
opad <- imgw_pszczyna %>%
count(data,rr_type) %>%
filter(rr_type!="")
ggplot(opad, aes(x=data, y=n, fill=rr_type)) +
geom_col()
ggplot(imgw_pszczyna, aes(x=data, y=rr_daily, fill=rr_type)) +
geom_col()
# wiatr
ggplot(imgw_pszczyna, aes(x=data, y=ws_mean_daily)) +
geom_col()
| /03_opad.R | no_license | lwawrowski/smog | R | false | false | 360 | r | library(tidyverse)
load("data/pm10.RData")
load("data/imgw.RData")
opad <- imgw_pszczyna %>%
count(data,rr_type) %>%
filter(rr_type!="")
ggplot(opad, aes(x=data, y=n, fill=rr_type)) +
geom_col()
ggplot(imgw_pszczyna, aes(x=data, y=rr_daily, fill=rr_type)) +
geom_col()
# wiatr
ggplot(imgw_pszczyna, aes(x=data, y=ws_mean_daily)) +
geom_col()
|
\name{plot.Maeforecast}
\alias{plot.Maeforecast}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot Results from Maeforecast Functions
%% ~~function to do ... ~~
}
\description{This function plots the point forecasts along side the realized values using \code{ggplot}.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
## S3 method for class 'Maeforecast'
plot(forecasts, start=NULL, frequency='month',
forecast.lab="Forecasts", true.lab="Realized",
x.lab="Time", y.lab="Value", title=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{forecasts}{an object of class "Maeforecast". Can be returned by functions \code{\link{maeforecast}} and \code{\link{Bagging}}.}
\item{start}{the starting time of the forecasts. Should have the format \code{"\%Y-\%m-\%d"}. If omitted, time will be shown as integers starting from 1.}
\item{frequency}{if \code{start} is not omitted, \code{frequency} is used to contruct the time sequence, which indicates the frequency of the time series. Options include \code{"year"}, \code{"month"}, \code{"week"}, and \code{"day"}.}
\item{forecast.lab}{character, label of the forecasts. Default is \code{"Forecasts"}.}
\item{true.lab}{character, label of the realized values. Default is \code{"Realized"}.}
\item{x.lab}{character, label of the x-axis. Default is \code{"Time"}.}
\item{y.labe}{character, label of the y-axis. Default is \code{"Value"}.}
\item{title}{character, title of the plot.}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
AR.For<-maeforecast(mydata, w_size=72, window="recursive",
model="ar")
plot(AR.For, start="2010-02-01", frequency="month")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/plot.Maeforecast.Rd | permissive | google-trends-v1/gtm | R | false | false | 2,372 | rd | \name{plot.Maeforecast}
\alias{plot.Maeforecast}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{Plot Results from Maeforecast Functions
%% ~~function to do ... ~~
}
\description{This function plots the point forecasts along side the realized values using \code{ggplot}.
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
## S3 method for class 'Maeforecast'
plot(forecasts, start=NULL, frequency='month',
forecast.lab="Forecasts", true.lab="Realized",
x.lab="Time", y.lab="Value", title=NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{forecasts}{an object of class "Maeforecast". Can be returned by functions \code{\link{maeforecast}} and \code{\link{Bagging}}.}
\item{start}{the starting time of the forecasts. Should have the format \code{"\%Y-\%m-\%d"}. If omitted, time will be shown as integers starting from 1.}
\item{frequency}{if \code{start} is not omitted, \code{frequency} is used to contruct the time sequence, which indicates the frequency of the time series. Options include \code{"year"}, \code{"month"}, \code{"week"}, and \code{"day"}.}
\item{forecast.lab}{character, label of the forecasts. Default is \code{"Forecasts"}.}
\item{true.lab}{character, label of the realized values. Default is \code{"Realized"}.}
\item{x.lab}{character, label of the x-axis. Default is \code{"Time"}.}
\item{y.labe}{character, label of the y-axis. Default is \code{"Value"}.}
\item{title}{character, title of the plot.}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
AR.For<-maeforecast(mydata, w_size=72, window="recursive",
model="ar")
plot(AR.For, start="2010-02-01", frequency="month")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
# lab5.r script file for lab5 calculations
#
# author: Eric Zivot
# created: October 20, 2003
# revised: July 12, 2012
#
# comments:
# Data for the lab are
# monthly continuously compounded returns on Vanguard long term bond index fund
#(VBLTX), Fidelity Magellan stock mutual fund (FMAGX), and Starbucks stock (SBUX)
#
# This lab requires the following packages
# PerformanceAnalytics return and risk analytics
# zoo Zeilie's ordered observations
# tseries various time series functions
# make sure you install these packages before you load them.
options(digits=4, width=70)
library(PerformanceAnalytics)
library(zoo)
library(tseries)
# get monthly adjusted closing price data on VBLTX, FMAGX and SBUX from Yahoo
# using the tseries function get.hist.quote(). Set sample to Jan 1998 through
# Dec 2009. Note: if you are not careful with the start and end dates
# or if you set the retclass to "ts" then results might look weird
# look at help on get.hist.quote
?get.hist.quote
# get the adjusted closing prices from Yahoo!
VBLTX.prices = get.hist.quote(instrument="vbltx", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
# change class of time index to yearmon which is appropriate for monthly data
# index() and as.yearmon() are functions in the zoo package
#
index(VBLTX.prices) = as.yearmon(index(VBLTX.prices))
class(VBLTX.prices)
colnames(VBLTX.prices)
start(VBLTX.prices)
end(VBLTX.prices)
FMAGX.prices = get.hist.quote(instrument="fmagx", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
index(FMAGX.prices) = as.yearmon(index(FMAGX.prices))
SBUX.prices = get.hist.quote(instrument="sbux", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
index(SBUX.prices) = as.yearmon(index(SBUX.prices))
# create merged price data
lab5Prices.z = merge(VBLTX.prices, FMAGX.prices, SBUX.prices)
# rename columns
colnames(lab5Prices.z) = c("VBLTX", "FMAGX", "SBUX")
# calculate cc returns as difference in log prices
lab5Returns.z = diff(log(lab5Prices.z))
#
# See the document "Working with Time Series in R" on the
# class webpage for more details on zoo objects
#
# look at the return data
start(lab5Returns.z)
end(lab5Returns.z)
colnames(lab5Returns.z)
head(lab5Returns.z)
################################################################################
# Part I
################################################################################
#
# 3. Create time plots of data
#
# 3 panel plot (each y axis has different scale)
# note: here, the generic plot() function invokes the plot method for objects
# of class zoo. See the help on plot.zoo
#
plot(lab5Returns.z,col="blue", lwd=2, main="Monthly cc returns on 3 assets")
# all on the same graph
plot(lab5Returns.z, plot.type="single", col=c("black","blue","red"), lwd=2,
main="Monthly cc returns on 3 assets",
ylab="Return")
legend(x="bottom", legend=colnames(lab5Returns.z), col=c("black","blue","red"), lwd=2)
abline(h=0)
# plot returns using the PerformanceAnalytics function chart.TimeSeries()
# this create a slightly nicer looking plot that plot.zoo()
?chart.TimeSeries
chart.TimeSeries(lab5Returns.z, legend.loc="bottom", main="")
# the previous charts are a bit hard to read. the PerformanceAnalytics function
# chart.Bar makes it easier to compare the returns of different assets on the
# same plot
?chart.Bar
chart.Bar(lab5Returns.z, legend.loc="bottom", main="")
# cumulative return plot - must use simple returns and not cc returns for this
# use PerformanceAnalytics function chart.CumReturns()
?chart.CumReturns
chart.CumReturns(diff(lab5Prices.z)/lag(lab5Prices.z, k=-1),
legend.loc="topleft", wealth.index=TRUE,
main="Future Value of $1 invested")
#
# 4. Create matrix of return data. some core R functions don't work
# correctly with zoo objects
#
ret.mat = coredata(lab5Returns.z)
class(ret.mat)
colnames(ret.mat)
head(ret.mat)
#
# 5. Create graphical summaries of each data series
#
# online help on hist, boxplot, density, qqnorm
?hist
?boxplot
?density
?qqnorm
# here are the 4 panel plots
par(mfrow=c(2,2))
hist(ret.mat[,"VBLTX"],main="VBLTX monthly returns",
xlab="VBLTX", probability=T, col="slateblue1")
boxplot(ret.mat[,"VBLTX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"VBLTX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"VBLTX"], col="slateblue1")
qqline(ret.mat[,"VBLTX"])
par(mfrow=c(1,1))
par(mfrow=c(2,2))
hist(ret.mat[,"FMAGX"],main="FMAGX monthly returns",
xlab="FMAGX", probability=T, col="slateblue1")
boxplot(ret.mat[,"FMAGX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"FMAGX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"FMAGX"], col="slateblue1")
qqline(ret.mat[,"FMAGX"])
par(mfrow=c(1,1))
par(mfrow=c(2,2))
hist(ret.mat[,"SBUX"],main="SBUX monthly returns",
xlab="SBUX", probability=T, col="slateblue1")
boxplot(ret.mat[,"SBUX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"SBUX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"SBUX"], col="slateblue1")
qqline(ret.mat[,"SBUX"])
par(mfrow=c(1,1))
# show boxplot of three series on one plot
boxplot(ret.mat[,"VBLTX"], ret.mat[,"FMAGX"], ret.mat[,"SBUX"],
names=colnames(ret.mat), col="slateblue1")
# do the same thing using the PerformanceAnalytics function chart.Boxplot
chart.Boxplot(lab5Returns.z)
#
# 6. Compute univariate descriptive statistics
#
summary(ret.mat)
# compute descriptive statistics by column using the base R function apply()
# note: skewness and kurtosis are in the package PerformanceAnalytics
# note: kurtosis returns excess kurtosis
?apply
args(apply)
apply(ret.mat, 2, mean)
apply(ret.mat, 2, var)
apply(ret.mat, 2, sd)
apply(ret.mat, 2, skewness)
apply(ret.mat, 2, kurtosis)
# A nice PerformanceAnalytics function that computes all of the relevant
# descriptive statistics is table.Stats
?table.Stats
table.Stats(lab5Returns.z)
#
# 7. Annualize monthly estimates
#
# annualized cc mean
12*apply(ret.mat, 2, mean)
# annualized simple mean
exp(12*apply(ret.mat, 2, mean)) - 1
# annualized sd values
sqrt(12)*apply(ret.mat, 2, sd)
#
# 8. Compute bivariate descriptive statistics
#
# online help on pairs
?pairs
pairs(ret.mat, col="slateblue1", pch=16)
# online help on var and cor
?var
?cor
# compute 3 x 3 covariance and correlation matrices
var(ret.mat)
cor(ret.mat)
#
# 9. Compute time series diagnostics
#
# autocorrelations
# online help on acf
?acf
par(mfrow=c(3,1))
acf.msft = acf(ret.mat[,"VBLTX"], main="VBLTX")
acf.sbux = acf(ret.mat[,"FMAGX"], main="FMAGX")
acf.sp500 = acf(ret.mat[,"SBUX"], main="SBUX")
par(mfrow=c(1,1))
| /Week_5/week5script.R | no_license | mamsdiallo/CompFinance | R | false | false | 7,489 | r | # lab5.r script file for lab5 calculations
#
# author: Eric Zivot
# created: October 20, 2003
# revised: July 12, 2012
#
# comments:
# Data for the lab are
# monthly continuously compounded returns on Vanguard long term bond index fund
#(VBLTX), Fidelity Magellan stock mutual fund (FMAGX), and Starbucks stock (SBUX)
#
# This lab requires the following packages
# PerformanceAnalytics return and risk analytics
# zoo Zeilie's ordered observations
# tseries various time series functions
# make sure you install these packages before you load them.
options(digits=4, width=70)
library(PerformanceAnalytics)
library(zoo)
library(tseries)
# get monthly adjusted closing price data on VBLTX, FMAGX and SBUX from Yahoo
# using the tseries function get.hist.quote(). Set sample to Jan 1998 through
# Dec 2009. Note: if you are not careful with the start and end dates
# or if you set the retclass to "ts" then results might look weird
# look at help on get.hist.quote
?get.hist.quote
# get the adjusted closing prices from Yahoo!
VBLTX.prices = get.hist.quote(instrument="vbltx", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
# change class of time index to yearmon which is appropriate for monthly data
# index() and as.yearmon() are functions in the zoo package
#
index(VBLTX.prices) = as.yearmon(index(VBLTX.prices))
class(VBLTX.prices)
colnames(VBLTX.prices)
start(VBLTX.prices)
end(VBLTX.prices)
FMAGX.prices = get.hist.quote(instrument="fmagx", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
index(FMAGX.prices) = as.yearmon(index(FMAGX.prices))
SBUX.prices = get.hist.quote(instrument="sbux", start="1998-01-01",
end="2009-12-31", quote="AdjClose",
provider="yahoo", origin="1970-01-01",
compression="m", retclass="zoo")
index(SBUX.prices) = as.yearmon(index(SBUX.prices))
# create merged price data
lab5Prices.z = merge(VBLTX.prices, FMAGX.prices, SBUX.prices)
# rename columns
colnames(lab5Prices.z) = c("VBLTX", "FMAGX", "SBUX")
# calculate cc returns as difference in log prices
lab5Returns.z = diff(log(lab5Prices.z))
#
# See the document "Working with Time Series in R" on the
# class webpage for more details on zoo objects
#
# look at the return data
start(lab5Returns.z)
end(lab5Returns.z)
colnames(lab5Returns.z)
head(lab5Returns.z)
################################################################################
# Part I
################################################################################
#
# 3. Create time plots of data
#
# 3 panel plot (each y axis has different scale)
# note: here, the generic plot() function invokes the plot method for objects
# of class zoo. See the help on plot.zoo
#
plot(lab5Returns.z,col="blue", lwd=2, main="Monthly cc returns on 3 assets")
# all on the same graph
plot(lab5Returns.z, plot.type="single", col=c("black","blue","red"), lwd=2,
main="Monthly cc returns on 3 assets",
ylab="Return")
legend(x="bottom", legend=colnames(lab5Returns.z), col=c("black","blue","red"), lwd=2)
abline(h=0)
# plot returns using the PerformanceAnalytics function chart.TimeSeries()
# this create a slightly nicer looking plot that plot.zoo()
?chart.TimeSeries
chart.TimeSeries(lab5Returns.z, legend.loc="bottom", main="")
# the previous charts are a bit hard to read. the PerformanceAnalytics function
# chart.Bar makes it easier to compare the returns of different assets on the
# same plot
?chart.Bar
chart.Bar(lab5Returns.z, legend.loc="bottom", main="")
# cumulative return plot - must use simple returns and not cc returns for this
# use PerformanceAnalytics function chart.CumReturns()
?chart.CumReturns
chart.CumReturns(diff(lab5Prices.z)/lag(lab5Prices.z, k=-1),
legend.loc="topleft", wealth.index=TRUE,
main="Future Value of $1 invested")
#
# 4. Create matrix of return data. some core R functions don't work
# correctly with zoo objects
#
ret.mat = coredata(lab5Returns.z)
class(ret.mat)
colnames(ret.mat)
head(ret.mat)
#
# 5. Create graphical summaries of each data series
#
# online help on hist, boxplot, density, qqnorm
?hist
?boxplot
?density
?qqnorm
# here are the 4 panel plots
par(mfrow=c(2,2))
hist(ret.mat[,"VBLTX"],main="VBLTX monthly returns",
xlab="VBLTX", probability=T, col="slateblue1")
boxplot(ret.mat[,"VBLTX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"VBLTX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"VBLTX"], col="slateblue1")
qqline(ret.mat[,"VBLTX"])
par(mfrow=c(1,1))
par(mfrow=c(2,2))
hist(ret.mat[,"FMAGX"],main="FMAGX monthly returns",
xlab="FMAGX", probability=T, col="slateblue1")
boxplot(ret.mat[,"FMAGX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"FMAGX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"FMAGX"], col="slateblue1")
qqline(ret.mat[,"FMAGX"])
par(mfrow=c(1,1))
par(mfrow=c(2,2))
hist(ret.mat[,"SBUX"],main="SBUX monthly returns",
xlab="SBUX", probability=T, col="slateblue1")
boxplot(ret.mat[,"SBUX"],outchar=T, main="Boxplot", col="slateblue1")
plot(density(ret.mat[,"SBUX"]),type="l", main="Smoothed density",
xlab="monthly return", ylab="density estimate", col="slateblue1")
qqnorm(ret.mat[,"SBUX"], col="slateblue1")
qqline(ret.mat[,"SBUX"])
par(mfrow=c(1,1))
# show boxplot of three series on one plot
boxplot(ret.mat[,"VBLTX"], ret.mat[,"FMAGX"], ret.mat[,"SBUX"],
names=colnames(ret.mat), col="slateblue1")
# do the same thing using the PerformanceAnalytics function chart.Boxplot
chart.Boxplot(lab5Returns.z)
#
# 6. Compute univariate descriptive statistics
#
summary(ret.mat)
# compute descriptive statistics by column using the base R function apply()
# note: skewness and kurtosis are in the package PerformanceAnalytics
# note: kurtosis returns excess kurtosis
?apply
args(apply)
apply(ret.mat, 2, mean)
apply(ret.mat, 2, var)
apply(ret.mat, 2, sd)
apply(ret.mat, 2, skewness)
apply(ret.mat, 2, kurtosis)
# A nice PerformanceAnalytics function that computes all of the relevant
# descriptive statistics is table.Stats
?table.Stats
table.Stats(lab5Returns.z)
#
# 7. Annualize monthly estimates
#
# annualized cc mean
12*apply(ret.mat, 2, mean)
# annualized simple mean
exp(12*apply(ret.mat, 2, mean)) - 1
# annualized sd values
sqrt(12)*apply(ret.mat, 2, sd)
#
# 8. Compute bivariate descriptive statistics
#
# online help on pairs
?pairs
pairs(ret.mat, col="slateblue1", pch=16)
# online help on var and cor
?var
?cor
# compute 3 x 3 covariance and correlation matrices
var(ret.mat)
cor(ret.mat)
#
# 9. Compute time series diagnostics
#
# autocorrelations
# online help on acf
?acf
par(mfrow=c(3,1))
acf.msft = acf(ret.mat[,"VBLTX"], main="VBLTX")
acf.sbux = acf(ret.mat[,"FMAGX"], main="FMAGX")
acf.sp500 = acf(ret.mat[,"SBUX"], main="SBUX")
par(mfrow=c(1,1))
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
library(randomForest)
test.DRF.smallcat <- function() {
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
Log.info("Importing alphabet_cattest.csv data...\n")
alphabet.hex <- h2o.uploadFile(locate("smalldata/gbm_test/alphabet_cattest.csv"), destination_frame = "alphabet.hex")
alphabet.hex$y <- as.factor(alphabet.hex$y)
Log.info("Summary of alphabet_cattest.csv from H2O:\n")
print(summary(alphabet.hex))
# Import CSV data for R to use in comparison
alphabet.data <- read.csv(locate("smalldata/gbm_test/alphabet_cattest.csv"), header = TRUE)
alphabet.data$y <- as.factor(alphabet.data$y)
Log.info("Summary of alphabet_cattest.csv from R:\n")
print(summary(alphabet.data))
# Train H2O DRF Model:
Log.info("H2O DRF (Group Split) with parameters:\nclassification = TRUE, ntree = 1, depth = 1, nbins = 100\n")
drfmodel <- h2o.randomForest(x = "X", y = "y", training_frame = alphabet.hex,
ntrees = 1, max_depth = 1, min_rows = 100)
print(drfmodel)
# Check AUC and overall prediction error at least as good with group split than without
Log.info("Expect DRF with Group Split to give Perfect Prediction in Single Iteration")
drfperf <- h2o.performance(drfmodel)
print(h2o.confusionMatrix(drfmodel,alphabet.hex))
expect_equal(h2o.auc(drfperf), 1)
# No errors off the diagonal
default_cm <- h2o.confusionMatrix(drfmodel,alphabet.hex)[[1]]
#iexpect_equal(default_cm[1,2], 0)
#expect_equal(default_cm[2,1], 0)
# Train R DRF Model:
# Log.info("R DRF with same parameters:")
# drfmodel.r <- randomForest(y ~ ., data = alphabet.data, ntree = 1, nodesize = 1)
# drfmodel.r.pred <- predict(drfmodel.r, alphabet.data, type = "response")
# Compute confusion matrices
# Log.info("R Confusion Matrix:"); print(drfmodel.r$confusion)
# Log.info("H2O (Group Split) Confusion Matrix:"); print(drfmodel.grpsplit@model$confusion)
# Compute the AUC - need to convert factors back to numeric
# actual <- ifelse(alphabet.data$y == "0", 0, 1)
# pred <- ifelse(drfmodel.r.pred == "0", 0, 1)
# R.auc = gbm.roc.area(actual, pred)
# Log.info(paste("R AUC:", R.auc, "\tH2O (Group Split) AUC:", drfmodel.grpsplit@model$AUC))
}
doTest("DRF Test: Classification with 26 categorical level predictor", test.DRF.smallcat)
| /h2o-r/tests/testdir_algos/randomforest/runit_RF_smallcat.R | permissive | h2oai/h2o-3 | R | false | false | 2,537 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
library(randomForest)
test.DRF.smallcat <- function() {
# Training set has 26 categories from A to Z
# Categories A, C, E, G, ... are perfect predictors of y = 1
# Categories B, D, F, H, ... are perfect predictors of y = 0
Log.info("Importing alphabet_cattest.csv data...\n")
alphabet.hex <- h2o.uploadFile(locate("smalldata/gbm_test/alphabet_cattest.csv"), destination_frame = "alphabet.hex")
alphabet.hex$y <- as.factor(alphabet.hex$y)
Log.info("Summary of alphabet_cattest.csv from H2O:\n")
print(summary(alphabet.hex))
# Import CSV data for R to use in comparison
alphabet.data <- read.csv(locate("smalldata/gbm_test/alphabet_cattest.csv"), header = TRUE)
alphabet.data$y <- as.factor(alphabet.data$y)
Log.info("Summary of alphabet_cattest.csv from R:\n")
print(summary(alphabet.data))
# Train H2O DRF Model:
Log.info("H2O DRF (Group Split) with parameters:\nclassification = TRUE, ntree = 1, depth = 1, nbins = 100\n")
drfmodel <- h2o.randomForest(x = "X", y = "y", training_frame = alphabet.hex,
ntrees = 1, max_depth = 1, min_rows = 100)
print(drfmodel)
# Check AUC and overall prediction error at least as good with group split than without
Log.info("Expect DRF with Group Split to give Perfect Prediction in Single Iteration")
drfperf <- h2o.performance(drfmodel)
print(h2o.confusionMatrix(drfmodel,alphabet.hex))
expect_equal(h2o.auc(drfperf), 1)
# No errors off the diagonal
default_cm <- h2o.confusionMatrix(drfmodel,alphabet.hex)[[1]]
#iexpect_equal(default_cm[1,2], 0)
#expect_equal(default_cm[2,1], 0)
# Train R DRF Model:
# Log.info("R DRF with same parameters:")
# drfmodel.r <- randomForest(y ~ ., data = alphabet.data, ntree = 1, nodesize = 1)
# drfmodel.r.pred <- predict(drfmodel.r, alphabet.data, type = "response")
# Compute confusion matrices
# Log.info("R Confusion Matrix:"); print(drfmodel.r$confusion)
# Log.info("H2O (Group Split) Confusion Matrix:"); print(drfmodel.grpsplit@model$confusion)
# Compute the AUC - need to convert factors back to numeric
# actual <- ifelse(alphabet.data$y == "0", 0, 1)
# pred <- ifelse(drfmodel.r.pred == "0", 0, 1)
# R.auc = gbm.roc.area(actual, pred)
# Log.info(paste("R AUC:", R.auc, "\tH2O (Group Split) AUC:", drfmodel.grpsplit@model$AUC))
}
doTest("DRF Test: Classification with 26 categorical level predictor", test.DRF.smallcat)
|
library(e1071)
### Name: hanning.window
### Title: Computes the Coefficients of a Hanning Window.
### Aliases: hanning.window
### Keywords: ts
### ** Examples
hanning.window(10)
x<-rnorm(500)
y<-stft(x, wtype="hanning.window")
plot(y)
| /data/genthat_extracted_code/e1071/examples/hanning.window.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 242 | r | library(e1071)
### Name: hanning.window
### Title: Computes the Coefficients of a Hanning Window.
### Aliases: hanning.window
### Keywords: ts
### ** Examples
hanning.window(10)
x<-rnorm(500)
y<-stft(x, wtype="hanning.window")
plot(y)
|
#:::::::::::
# deval
#:::::::::::
subroutine deval (vmu, s, lds, nint, qraux, nobs, nnull, tol, jpvt, M,
*ldq, nq, q, z, y, low, upp, nlaht, score,
*varht,info, work, twk, twk2, qwork)
character vmu
integer ldq, n, info, jpvt(*), lds,
*nnull, nobs, nq, n, nint
double precision q(ldq,*), M(ldq,*), tol, z(*), y(*), low,
*upp, nlaht, score, varht, twk(2,*), twk2(*), work(*),
*qwork(ldq,*), qraux(*)
# Purpose: To evaluate GCV/GML function based on tridiagonal form and to
# search minimum on an interval by equally spaced (in log10 scale) grid
# search.
character*1 vmu
integer ldq, n, nint, info
double precision q(ldq,*), z(*), low, upp, nlaht, score(*), varht,_
twk(2,*), work(*)
# On entry:
# vmu 'v': GCV criterion.
# 'm': GML criterion.
# 'u': unbiased risk estimate.
# q tidiagonal matrix in diagonal and super diagonal.
# ldq leading dimension of Q.
# n size of the matrix.
# z U^{T} F_{2}^{T} y.
# nint number of intervals (number of grids minus 1).
# low lower limit of log10(n*lambda).
# upp upper limit of log10(n*lambda).
# varht known variance if vmu=='u'.
# On exit:
# nlaht the estimated log10(n*lambda).
# score the GCV/GML/URE score vector on grid points.
# varht the variance estimate at the estimated n*lambda.
# info 0: normal termination.
# -1: dimension error.
# -2: tridiagonal form is not non-negative definite.
# -3: vmu or nint is out of scope.
# Work arrays:
# twk array of length at least (2,n).
# work array of length at least (n).
# Routines called directly:
# Fortran -- dfloat
# Blas -- daxpy, dcopy
# Rkpack -- dtrev
# Other -- dset
# Written: Chong Gu, Statistics, Purdue, 12/29/91 latest version.
double precision tmp, minscr, mlo, varhtwk
integer j
info = 0
# interchange boundaries if necessary
if ( upp < low ) {
mlo = low
low = upp
upp = mlo
}
# check job requests
if ( (vmu != 'v' & vmu != 'm' & vmu != 'u') | nint < 1 ) {
info = -3
return
}
# check dimension
if ( 1 > n | n > ldq ) {
info = -1
return
}
# evaluation
for (j=1;j<=nint+1;j=j+1) {
tmp = low + dfloat (j-1) * ( upp - low ) / dfloat (nint)
call dgstup ( s, M, lds, nobs, nnull, qraux, q, ldq, nobs,
*nq, info, work, qwork, mlo)
call dsytr (qwork(n0+1,n0+1), ldq, n, tol, info, work)
if ( info != 0 ) return
##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## z(lambda) : = U(lambda)^{T} z_{2}
## copy lower triangle of U T U^T into work
call dcopy (n-2, qwork(n0+2,n0+1), ldq+1, work, 1)
## z(n0+2) := U^T F_2^T y(n0+2)
call dqrsl (qwork(n0+2,n0+1), ldq, n-1, n-2, work, y(n0+2), dum, z(n0+2),dum, dum, dum, 01000, info)
call dggold(vmu, M, q(n0+1,n0+1), ldq, n, z(n0+1), low, upp, mlo, tmpl,
*varht, info, twk, work)
###-----------
if ( score(j) <= minscr | j == 1 ) {
minscr = score(j)
nlaht = tmp
varhtwk = varht
}
}
varht = varhtwk
return
end
| /src/ratfor/deval.r | no_license | weirichd/cautious-guacamole | R | false | false | 3,361 | r |
#:::::::::::
# deval
#:::::::::::
subroutine deval (vmu, s, lds, nint, qraux, nobs, nnull, tol, jpvt, M,
*ldq, nq, q, z, y, low, upp, nlaht, score,
*varht,info, work, twk, twk2, qwork)
character vmu
integer ldq, n, info, jpvt(*), lds,
*nnull, nobs, nq, n, nint
double precision q(ldq,*), M(ldq,*), tol, z(*), y(*), low,
*upp, nlaht, score, varht, twk(2,*), twk2(*), work(*),
*qwork(ldq,*), qraux(*)
# Purpose: To evaluate GCV/GML function based on tridiagonal form and to
# search minimum on an interval by equally spaced (in log10 scale) grid
# search.
character*1 vmu
integer ldq, n, nint, info
double precision q(ldq,*), z(*), low, upp, nlaht, score(*), varht,_
twk(2,*), work(*)
# On entry:
# vmu 'v': GCV criterion.
# 'm': GML criterion.
# 'u': unbiased risk estimate.
# q tidiagonal matrix in diagonal and super diagonal.
# ldq leading dimension of Q.
# n size of the matrix.
# z U^{T} F_{2}^{T} y.
# nint number of intervals (number of grids minus 1).
# low lower limit of log10(n*lambda).
# upp upper limit of log10(n*lambda).
# varht known variance if vmu=='u'.
# On exit:
# nlaht the estimated log10(n*lambda).
# score the GCV/GML/URE score vector on grid points.
# varht the variance estimate at the estimated n*lambda.
# info 0: normal termination.
# -1: dimension error.
# -2: tridiagonal form is not non-negative definite.
# -3: vmu or nint is out of scope.
# Work arrays:
# twk array of length at least (2,n).
# work array of length at least (n).
# Routines called directly:
# Fortran -- dfloat
# Blas -- daxpy, dcopy
# Rkpack -- dtrev
# Other -- dset
# Written: Chong Gu, Statistics, Purdue, 12/29/91 latest version.
double precision tmp, minscr, mlo, varhtwk
integer j
info = 0
# interchange boundaries if necessary
if ( upp < low ) {
mlo = low
low = upp
upp = mlo
}
# check job requests
if ( (vmu != 'v' & vmu != 'm' & vmu != 'u') | nint < 1 ) {
info = -3
return
}
# check dimension
if ( 1 > n | n > ldq ) {
info = -1
return
}
# evaluation
for (j=1;j<=nint+1;j=j+1) {
tmp = low + dfloat (j-1) * ( upp - low ) / dfloat (nint)
call dgstup ( s, M, lds, nobs, nnull, qraux, q, ldq, nobs,
*nq, info, work, qwork, mlo)
call dsytr (qwork(n0+1,n0+1), ldq, n, tol, info, work)
if ( info != 0 ) return
##++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## z(lambda) : = U(lambda)^{T} z_{2}
## copy lower triangle of U T U^T into work
call dcopy (n-2, qwork(n0+2,n0+1), ldq+1, work, 1)
## z(n0+2) := U^T F_2^T y(n0+2)
call dqrsl (qwork(n0+2,n0+1), ldq, n-1, n-2, work, y(n0+2), dum, z(n0+2),dum, dum, dum, 01000, info)
call dggold(vmu, M, q(n0+1,n0+1), ldq, n, z(n0+1), low, upp, mlo, tmpl,
*varht, info, twk, work)
###-----------
if ( score(j) <= minscr | j == 1 ) {
minscr = score(j)
nlaht = tmp
varhtwk = varht
}
}
varht = varhtwk
return
end
|
\name{identifyDirectory}
\alias{identifyDirectory}
\title{
Identify Working Directory}
\description{
This function identifies the path to the folder where data files and logs are saved. Results files will also be saved to this folder}
\usage{
identifyDirectory(path)
}
\arguments{
\item{path}{
A character string identifying the path to the working directory}
}
\value{
A character string identifying the path to the working directory}
\author{
Kate Lyden}
\examples{
\dontrun{
identifyDirectory("/Users/jsmith/Documents/...")
identifyDirectory("C:/Documents and Settings/...")
}
}
\keyword{ library }
| /man/identifyDirectory.Rd | no_license | cran/activpalProcessing | R | false | false | 609 | rd | \name{identifyDirectory}
\alias{identifyDirectory}
\title{
Identify Working Directory}
\description{
This function identifies the path to the folder where data files and logs are saved. Results files will also be saved to this folder}
\usage{
identifyDirectory(path)
}
\arguments{
\item{path}{
A character string identifying the path to the working directory}
}
\value{
A character string identifying the path to the working directory}
\author{
Kate Lyden}
\examples{
\dontrun{
identifyDirectory("/Users/jsmith/Documents/...")
identifyDirectory("C:/Documents and Settings/...")
}
}
\keyword{ library }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simDRM.R
\name{simDRM}
\alias{simDRM}
\title{simulate data according to Rasch model}
\usage{
simDRM(itempar, persons = 500, seed = NULL)
}
\arguments{
\item{itempar}{a vector with item difficulty parameters}
\item{persons}{number of persons for the generated data set}
\item{seed}{a seed for the random number generated can optionally be set}
}
\value{
\item{datmat}{simulated data set} \item{true_itempar}{the fixed item
parameters according to the input} \item{true_perspar}{the fixed person
parameters}
}
\description{
With this function data sets according to the dichotomous
Rasch model (DRM) are simulated
}
\details{
Data are generated with category values 0 and 1.
Person parameters are generated by a standard normal distribution.
}
\examples{
#set item parameters
item_p <- c(-1.5,-0.3,0,0.3,1.5)
#number of persons
pn <- 500
#simulate data set
simdatD <- simDRM(item_p, pn)
}
\references{
Fischer, G. H. (1974). Einfuehrung in die Theorie
psychologischer Tests [Introduction to test theory]. Bern: Huber.
}
\seealso{
\code{\link{simMPRM}}\code{\link{simCRSM}}
}
\author{
Christine Hohensinn
}
| /pcIRT/man/simDRM.Rd | no_license | akhikolla/InformationHouse | R | false | true | 1,190 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simDRM.R
\name{simDRM}
\alias{simDRM}
\title{simulate data according to Rasch model}
\usage{
simDRM(itempar, persons = 500, seed = NULL)
}
\arguments{
\item{itempar}{a vector with item difficulty parameters}
\item{persons}{number of persons for the generated data set}
\item{seed}{a seed for the random number generated can optionally be set}
}
\value{
\item{datmat}{simulated data set} \item{true_itempar}{the fixed item
parameters according to the input} \item{true_perspar}{the fixed person
parameters}
}
\description{
With this function data sets according to the dichotomous
Rasch model (DRM) are simulated
}
\details{
Data are generated with category values 0 and 1.
Person parameters are generated by a standard normal distribution.
}
\examples{
#set item parameters
item_p <- c(-1.5,-0.3,0,0.3,1.5)
#number of persons
pn <- 500
#simulate data set
simdatD <- simDRM(item_p, pn)
}
\references{
Fischer, G. H. (1974). Einfuehrung in die Theorie
psychologischer Tests [Introduction to test theory]. Bern: Huber.
}
\seealso{
\code{\link{simMPRM}}\code{\link{simCRSM}}
}
\author{
Christine Hohensinn
}
|
##############################################################################
## Title: CFS Composite Phenotype File Creation for Encore GWAS Analysis
## Version: 1
## Author: Regina Manansala
## Date Created: 02-March-2020
## Date Modified: 30-March-2020
##############################################################################
library(lavaan)
library(data.table)
library(dplyr)
library(stringr)
data_dir <- "~/Documents/Inflammation_Data/"
setwd(data_dir)
## Import Freeze8 Sample Data
samples <- fread(paste0(data_dir,"freeze8_sample_annot_2019-03-28.txt"))
## Get list of sub-directories named for each study included
study <- list.dirs(path = data_dir, full.names = F, recursive = F)
## Import principal component data
pcs <- fread(paste0(data_dir,"pcair_results.txt"), header=F)
## IMPORT INFLAMMATION AND DEMOGRAPHICS DATA ##
for(i in 1:length(study)){
## Get files within the study subdirectories
sub_dir <- list.files(paste0(data_dir, study[i]))
if(length(grep('topmed_dcc_inflammation_v1', sub_dir)) > 0){
inf_path <- paste0(data_dir, "/", study[i], "/", sub_dir[grep('topmed_dcc_inflammation_v1', sub_dir)])
inf_files <- list.files(path = inf_path)
assign(paste0(study[i], "_inf"), fread(file = paste0(inf_path, "/", inf_files[grep("topmed_dcc_inflammation_v1.txt", inf_files)])))
dem_path <- paste0(data_dir, "/", study[i], "/", sub_dir[grep('topmed_dcc_demographic_v3', sub_dir)])
dem_files <- list.files(path = dem_path)
assign(paste0(study[i], "_dem"), fread(file = paste0(dem_path, "/", dem_files[grep("topmed_dcc_demographic_v3.txt", dem_files)])))
}
if(length(grep('topmed_dcc_inflammation_v1', sub_dir)) == 0){
if(study[i] != "FHS"){
assign(paste0(study[i], "_inf"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep(".csv", sub_dir, ignore.case = T)])))
}
if(study[i] == "FHS"){
assign(paste0(study[i], "_og"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep("inflammation.*.csv", sub_dir, ignore.case = T)])))
assign(paste0(study[i], "_add"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep("additional.*.csv", sub_dir, ignore.case = T)])))
FHS_inf <- left_join(FHS_og, FHS_add, by = "shareid")
}
}
}
## Update Study List
study <- sub("_inf", "", ls(pattern="_inf"))
## COMBINE INFLAMMATION AND DEMOGRAPHICS DATA ##
## FILTER BY SAMPLE ID ##
inf <- ls(pattern = "_inf")
dem <- ls(pattern = "_dem")
for(i in 1:length(study)){
## Remove observations with 'DS' in consent column
samples <- samples[samples$study %in% c("GeneSTAR", "CFS") | grepl("(-DS|-DS-|DS-)", samples$consent) == FALSE,]
## Get all studies with separate demographic data. Combine with associated inflammation data using the unique subject key
if(length(grep(study[i], dem)) > 0){
inf_dat <- grep(study[i], inf, value = TRUE)
dem_dat <- grep(study[i], dem, value = TRUE)
foo <- left_join(get(inf_dat), get(dem_dat), by = "unique_subject_key")
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
assign(study[i], foo2)
}
## Get all studies with inflammation and demographic data combined (except WHI) and create a unique subject key using study name and ID
if(length(grep(study[i], dem)) == 0 & study[i] != "WHI"){
foo <- get(paste0(study[i], "_inf"))
foo$unique_subject_key <- paste(study[i], foo[[grep("(_id|shareid)", colnames(foo), ignore.case = T, value = T)]], sep = "_") #### HOW TO DEAL WITH UCASE/LCASE IN COLNAME
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
assign(study[i], foo2)
}
## For WHI, create unique subject key using study name and ID
if(length(grep(study[i], dem)) == 0 & study[i] == "WHI"){
foo <- get(paste0(study[i], "_inf"))
foo$unique_subject_key <- paste(study[i], foo[[grep("(_id|shareid)", colnames(foo), ignore.case = T, value = T)]], sep = "_") #### HOW TO DEAL WITH UCASE/LCASE IN COLNAME
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
# Find duplicate measures and drop based on assay type frequency
foo2 <- foo2 %>%
group_by(CRP_ASSAY) %>%
mutate(ASSAY_NUM = table(CRP_ASSAY)) %>%
ungroup() %>%
arrange(dbGaP_ID, ASSAY_NUM) %>%
subset(CRP_ASSAY != "Latex-enhanced nephelometry (N High Sensitivity CRP assay) on BN II nephelometer (Dade Behring, Inc.)")
# dim(foo2[duplicated(foo2$unique_subject_key) | duplicated(foo2$unique_subject_key, fromLast = TRUE), ])
foo3 <- foo2[!duplicated(foo2$unique_subject_key, fromLast = TRUE),]
assign(study[i], foo3)
}
}
## ADD SD AND RACE EXCLUSION CRITERIA
#FHS 554 vars
varlist <- grep("(^crp($|_1)|^il6($|_1)|^il8($|_1)|^il10($|_1)|^il18($|_1)|^icam($|_1)|^tnfa($|_1)|^pselectin($|_1)|^eselectin($|_1)|^l1_beta($|_1)|^tnfa_r1($|_1)|^mmp1($|_1)|^mmp9($|_1)|^cd40($|_1)|^isoprostane_8_epi_pgf2a($|_1)|^lppla2_act($|_1)|^lppla2_mass($|_1)|^mcp1($|_1)|^mpo($|_1)|^opg($|_1)|^tnfr2($|_1))",
colnames(FHS), value = TRUE, ignore.case = TRUE)
for(j in 1:length(varlist)){
# varcol <- get(study[i])[[varlist[j]]] %>% as.numeric()
if(sum(is.na(FHS[, varlist[j]])) == nrow(FHS)){
varlist[j] <- NA
}
}
varlist <- varlist[!is.na(varlist)]
## Transform inflammation phenotypes
trans.df <- FHS
for(k in 1:length(varlist)){
if(is.numeric(trans.df[[varlist[k]]]) == FALSE){
trans.df <- trans.df %>% mutate_at(varlist[k], as.numeric)
}
int <- function(x, na.rm = FALSE) (qnorm((rank(x,na.last="keep")-0.5)/sum(!is.na(x))))
trans.df <- trans.df %>% mutate_at(varlist, int)
}
trans.df <- left_join(trans.df, samples[!duplicated(unique_subject_key), c("sample.id", "unique_subject_key")], by = "unique_subject_key")
assign(paste("FHS", "trans", sep = "_"), trans.df)
# FHS_cc <- FHS_trans[complete.cases(FHS_trans[,c("CRP","IL6","IL18","ICAM","TNFA","PSELECTIN","MMP9")]),]
summary(FHS_trans[, c("IL6", "CRP", "IL18", "ICAM", "TNFA", "PSELECTIN", "MMP9", "OPG", "MCP1", "LPPLA2_ACT", "LPPLA2_MASS")])
## Calculate composite phenotype
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + PSELECTIN + MMP9 + OPG + MCP1 + LPPLA2_ACT + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + PSELECTIN + MMP9 + OPG + MCP1 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG + MCP1 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG + MCP1"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + MCP1"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9 + LPPLA2_ACT"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9"
fit <- cfa(mod, data=FHS_trans,missing = "ml")
summary(fit, standardized = T, fit.measures = T)
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + ICAM + PSELECTIN + MMP9"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + PSELECTIN + MMP9"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + ICAM + PSELECTIN + MMP9"
# # RMSEA 0.125
# # Comparative Fit Index (CFI) 0.847
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
# fit_id <- fit@Data@case.idx[[1]]
# pred <- data.frame(predict(fit), id = fit_id)
# # https://groups.google.com/forum/#!msg/lavaan/UPrU8qG5nOs/70OyCU-1u4EJ
# FHS_lv <- tibble::rownames_to_column(trans.df, "id") %>% mutate(id = as.numeric(id)) %>% left_join(., pred, by = "id") %>% dplyr::select(-1)
## Format and export
# FHS_encore_prelim <- left_join(FHS_lv[, c("sample.id", "comp.pheno", "SEX", "ANCESTRY")], pcs, by = c("sample.id"="V1"))
# names(FHS_encore_prelim)[5:15] <- c("PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7", "PC8", "PC9", "PC10", "PC11")
# write.table(FHS_encore_prelim, "../Inflammation_SEM/FHS_encore_prelim.txt", sep = "\t", row.names = FALSE, col.names = TRUE, quote = FALSE)
| /Latent_Variable_Analysis/LV_Analysis_FHS.R | no_license | rgnmnl/Inflammation | R | false | false | 8,993 | r | ##############################################################################
## Title: CFS Composite Phenotype File Creation for Encore GWAS Analysis
## Version: 1
## Author: Regina Manansala
## Date Created: 02-March-2020
## Date Modified: 30-March-2020
##############################################################################
library(lavaan)
library(data.table)
library(dplyr)
library(stringr)
data_dir <- "~/Documents/Inflammation_Data/"
setwd(data_dir)
## Import Freeze8 Sample Data
samples <- fread(paste0(data_dir,"freeze8_sample_annot_2019-03-28.txt"))
## Get list of sub-directories named for each study included
study <- list.dirs(path = data_dir, full.names = F, recursive = F)
## Import principal component data
pcs <- fread(paste0(data_dir,"pcair_results.txt"), header=F)
## IMPORT INFLAMMATION AND DEMOGRAPHICS DATA ##
for(i in 1:length(study)){
## Get files within the study subdirectories
sub_dir <- list.files(paste0(data_dir, study[i]))
if(length(grep('topmed_dcc_inflammation_v1', sub_dir)) > 0){
inf_path <- paste0(data_dir, "/", study[i], "/", sub_dir[grep('topmed_dcc_inflammation_v1', sub_dir)])
inf_files <- list.files(path = inf_path)
assign(paste0(study[i], "_inf"), fread(file = paste0(inf_path, "/", inf_files[grep("topmed_dcc_inflammation_v1.txt", inf_files)])))
dem_path <- paste0(data_dir, "/", study[i], "/", sub_dir[grep('topmed_dcc_demographic_v3', sub_dir)])
dem_files <- list.files(path = dem_path)
assign(paste0(study[i], "_dem"), fread(file = paste0(dem_path, "/", dem_files[grep("topmed_dcc_demographic_v3.txt", dem_files)])))
}
if(length(grep('topmed_dcc_inflammation_v1', sub_dir)) == 0){
if(study[i] != "FHS"){
assign(paste0(study[i], "_inf"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep(".csv", sub_dir, ignore.case = T)])))
}
if(study[i] == "FHS"){
assign(paste0(study[i], "_og"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep("inflammation.*.csv", sub_dir, ignore.case = T)])))
assign(paste0(study[i], "_add"), fread(file = paste0(data_dir, study[i], "/", sub_dir[grep("additional.*.csv", sub_dir, ignore.case = T)])))
FHS_inf <- left_join(FHS_og, FHS_add, by = "shareid")
}
}
}
## Update Study List
study <- sub("_inf", "", ls(pattern="_inf"))
## COMBINE INFLAMMATION AND DEMOGRAPHICS DATA ##
## FILTER BY SAMPLE ID ##
inf <- ls(pattern = "_inf")
dem <- ls(pattern = "_dem")
for(i in 1:length(study)){
## Remove observations with 'DS' in consent column
samples <- samples[samples$study %in% c("GeneSTAR", "CFS") | grepl("(-DS|-DS-|DS-)", samples$consent) == FALSE,]
## Get all studies with separate demographic data. Combine with associated inflammation data using the unique subject key
if(length(grep(study[i], dem)) > 0){
inf_dat <- grep(study[i], inf, value = TRUE)
dem_dat <- grep(study[i], dem, value = TRUE)
foo <- left_join(get(inf_dat), get(dem_dat), by = "unique_subject_key")
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
assign(study[i], foo2)
}
## Get all studies with inflammation and demographic data combined (except WHI) and create a unique subject key using study name and ID
if(length(grep(study[i], dem)) == 0 & study[i] != "WHI"){
foo <- get(paste0(study[i], "_inf"))
foo$unique_subject_key <- paste(study[i], foo[[grep("(_id|shareid)", colnames(foo), ignore.case = T, value = T)]], sep = "_") #### HOW TO DEAL WITH UCASE/LCASE IN COLNAME
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
assign(study[i], foo2)
}
## For WHI, create unique subject key using study name and ID
if(length(grep(study[i], dem)) == 0 & study[i] == "WHI"){
foo <- get(paste0(study[i], "_inf"))
foo$unique_subject_key <- paste(study[i], foo[[grep("(_id|shareid)", colnames(foo), ignore.case = T, value = T)]], sep = "_") #### HOW TO DEAL WITH UCASE/LCASE IN COLNAME
foo2 <- foo[foo$unique_subject_key %in% samples$unique_subject_key,]
# Find duplicate measures and drop based on assay type frequency
foo2 <- foo2 %>%
group_by(CRP_ASSAY) %>%
mutate(ASSAY_NUM = table(CRP_ASSAY)) %>%
ungroup() %>%
arrange(dbGaP_ID, ASSAY_NUM) %>%
subset(CRP_ASSAY != "Latex-enhanced nephelometry (N High Sensitivity CRP assay) on BN II nephelometer (Dade Behring, Inc.)")
# dim(foo2[duplicated(foo2$unique_subject_key) | duplicated(foo2$unique_subject_key, fromLast = TRUE), ])
foo3 <- foo2[!duplicated(foo2$unique_subject_key, fromLast = TRUE),]
assign(study[i], foo3)
}
}
## ADD SD AND RACE EXCLUSION CRITERIA
#FHS 554 vars
varlist <- grep("(^crp($|_1)|^il6($|_1)|^il8($|_1)|^il10($|_1)|^il18($|_1)|^icam($|_1)|^tnfa($|_1)|^pselectin($|_1)|^eselectin($|_1)|^l1_beta($|_1)|^tnfa_r1($|_1)|^mmp1($|_1)|^mmp9($|_1)|^cd40($|_1)|^isoprostane_8_epi_pgf2a($|_1)|^lppla2_act($|_1)|^lppla2_mass($|_1)|^mcp1($|_1)|^mpo($|_1)|^opg($|_1)|^tnfr2($|_1))",
colnames(FHS), value = TRUE, ignore.case = TRUE)
for(j in 1:length(varlist)){
# varcol <- get(study[i])[[varlist[j]]] %>% as.numeric()
if(sum(is.na(FHS[, varlist[j]])) == nrow(FHS)){
varlist[j] <- NA
}
}
varlist <- varlist[!is.na(varlist)]
## Transform inflammation phenotypes
trans.df <- FHS
for(k in 1:length(varlist)){
if(is.numeric(trans.df[[varlist[k]]]) == FALSE){
trans.df <- trans.df %>% mutate_at(varlist[k], as.numeric)
}
int <- function(x, na.rm = FALSE) (qnorm((rank(x,na.last="keep")-0.5)/sum(!is.na(x))))
trans.df <- trans.df %>% mutate_at(varlist, int)
}
trans.df <- left_join(trans.df, samples[!duplicated(unique_subject_key), c("sample.id", "unique_subject_key")], by = "unique_subject_key")
assign(paste("FHS", "trans", sep = "_"), trans.df)
# FHS_cc <- FHS_trans[complete.cases(FHS_trans[,c("CRP","IL6","IL18","ICAM","TNFA","PSELECTIN","MMP9")]),]
summary(FHS_trans[, c("IL6", "CRP", "IL18", "ICAM", "TNFA", "PSELECTIN", "MMP9", "OPG", "MCP1", "LPPLA2_ACT", "LPPLA2_MASS")])
## Calculate composite phenotype
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + PSELECTIN + MMP9 + OPG + MCP1 + LPPLA2_ACT + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + PSELECTIN + MMP9 + OPG + MCP1 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG + MCP1 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG + MCP1"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + MCP1"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ CRP + IL6 + IL18 + ICAM + TNFA + MMP9 + OPG"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9 + LPPLA2_MASS"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9 + LPPLA2_ACT"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
mod <- "comp.pheno =~ IL6 + CRP + IL18 + TNFA + MMP9"
fit <- cfa(mod, data=FHS_trans,missing = "ml")
summary(fit, standardized = T, fit.measures = T)
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + ICAM + PSELECTIN + MMP9"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + IL18 + PSELECTIN + MMP9"
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
#
# mod <- "comp.pheno =~ IL6 + CRP + ICAM + PSELECTIN + MMP9"
# # RMSEA 0.125
# # Comparative Fit Index (CFI) 0.847
# fit <- cfa(mod, data=FHS_trans,missing = "ml")
# summary(fit, standardized = T, fit.measures = T)
# fit_id <- fit@Data@case.idx[[1]]
# pred <- data.frame(predict(fit), id = fit_id)
# # https://groups.google.com/forum/#!msg/lavaan/UPrU8qG5nOs/70OyCU-1u4EJ
# FHS_lv <- tibble::rownames_to_column(trans.df, "id") %>% mutate(id = as.numeric(id)) %>% left_join(., pred, by = "id") %>% dplyr::select(-1)
## Format and export
# FHS_encore_prelim <- left_join(FHS_lv[, c("sample.id", "comp.pheno", "SEX", "ANCESTRY")], pcs, by = c("sample.id"="V1"))
# names(FHS_encore_prelim)[5:15] <- c("PC1", "PC2", "PC3", "PC4", "PC5", "PC6", "PC7", "PC8", "PC9", "PC10", "PC11")
# write.table(FHS_encore_prelim, "../Inflammation_SEM/FHS_encore_prelim.txt", sep = "\t", row.names = FALSE, col.names = TRUE, quote = FALSE)
|
#' Checks synonyms against GenBank
#'
#' Searches which of the synonyms of an input species list are present on GenBank
#'
#' @param synonyms a vector of class \code{character} containing fungal species names
#'
#'
#' @details The function searches which of the synonmys are present at GenBank. If you have a species list e.g. from a field study and want to derive sequences from GenBank but only get a subset of the species; then you might ignore the synonyms.
#'
#'
#' @return an data.frame with the synomyms and the GIs, if present.
#'
#' @author Franz-Sebastian Krah
#'
#' @examples
#' # Use the synonyms derived from mycobank (syno_mycobank)
#' syns <- syno_mycobank(taxon = "Heterobasidion annosum")
#' gis <- lapply(syns, syns_on_ncbi)
#' gis
#'
#' # or any vector of synonyms
#' synonyms <- c("Heterobasidion annosum", "Polyporus annosus","Polyporus subpileatus",
#' "Polyporus scoticus","Polyporus makraulos","Polyporus macraulos","Trametes radiciperda",
#' "Poria macraula","Poria macraula","Polyporus irregularis","Polystictoides fuscus",
#' "Polyporus atramosus","Polyporus marginatoides","Polyporus atrannosus",
#' "Heterobasidion annosum f. macraulos")
#' gi <- syns_on_ncbi(synonyms)
syns_on_ncbi <- function(synonyms) {
if (is.null(synonyms)) warning(" argument not of class character ")
if (is.null(synonyms)) {res <- NA}
if (!is.null(synonyms)) {
require("taxize")
res <- get_uid(synonyms)
res <- cbind.data.frame(synonym = synonyms, GI = as.numeric(res))
if(length(grep("\\d", res$GI))==0){res <- NA}
else {res <- res[grep("\\d", res$GI),]
res$synonym <- as.character(res$synonym)}
}
return(res)
}
| /R/syns_on_ncbi.R | no_license | FranzKrah/rmycobank | R | false | false | 1,663 | r | #' Checks synonyms against GenBank
#'
#' Searches which of the synonyms of an input species list are present on GenBank
#'
#' @param synonyms a vector of class \code{character} containing fungal species names
#'
#'
#' @details The function searches which of the synonmys are present at GenBank. If you have a species list e.g. from a field study and want to derive sequences from GenBank but only get a subset of the species; then you might ignore the synonyms.
#'
#'
#' @return an data.frame with the synomyms and the GIs, if present.
#'
#' @author Franz-Sebastian Krah
#'
#' @examples
#' # Use the synonyms derived from mycobank (syno_mycobank)
#' syns <- syno_mycobank(taxon = "Heterobasidion annosum")
#' gis <- lapply(syns, syns_on_ncbi)
#' gis
#'
#' # or any vector of synonyms
#' synonyms <- c("Heterobasidion annosum", "Polyporus annosus","Polyporus subpileatus",
#' "Polyporus scoticus","Polyporus makraulos","Polyporus macraulos","Trametes radiciperda",
#' "Poria macraula","Poria macraula","Polyporus irregularis","Polystictoides fuscus",
#' "Polyporus atramosus","Polyporus marginatoides","Polyporus atrannosus",
#' "Heterobasidion annosum f. macraulos")
#' gi <- syns_on_ncbi(synonyms)
syns_on_ncbi <- function(synonyms) {
if (is.null(synonyms)) warning(" argument not of class character ")
if (is.null(synonyms)) {res <- NA}
if (!is.null(synonyms)) {
require("taxize")
res <- get_uid(synonyms)
res <- cbind.data.frame(synonym = synonyms, GI = as.numeric(res))
if(length(grep("\\d", res$GI))==0){res <- NA}
else {res <- res[grep("\\d", res$GI),]
res$synonym <- as.character(res$synonym)}
}
return(res)
}
|
S5_additive <- function(X, y, K = 5, model, tuning = 0.5*nrow(X), tem, ITER=20,S=30, C0=5, verbose=TRUE){
requireNamespace("splines2")
requireNamespace("Matrix")
n = nrow(X)
p = ncol(X)
#y = y -mean(y)
tau = tuning
g = 1
Matrix = Matrix::Matrix
if(missing(tem)){tem = seq(0.4,1,length.out=30)^2}
##################################################
ind_fun = BayesS5::ind_fun_NLfP
#ind_fun = ind_fun_NLfP
index = function(j){
a = (K*(j-1)+2):(K*j+1)
return(a)
}
#assign("index", index, .GlobalEnv)
index.tot = function(ind2){
ind = sapply(ind2,index)#;a[ind] = 1
return(as.vector(ind))
}
screening = function(j,phi,res){
ind3 = index.tot(j)
fit = solve(crossprod(phi[,c(1,ind3)])+0.0001*diag(K+1))%*%crossprod(phi[,c(1,ind3)],res)
fit.f = phi[,c(1,ind3)]%*%fit
a = crossprod(fit.f - mean(fit.f))
return(a)
}
###########################################################
if(missing(model)){
print("The model prior is unspecified. The default is Bernoulli_Uniform")
model = BayesS5::Bernoulli_Uniform
}
A3 = S; r0=1
verb = verbose
P0 = tcrossprod(rep(1,n))/n
phi0 = matrix(0,n,K*p)
Knots = matrix(0,p,2)
colnames(Knots) = c("Lower","Upper")
for(j in 1:p){
Knots[j, ] = c(min(X[,j])-1.0,max(X[,j])+1.0)
phi0[,(K*(j-1)+1):(K*j)] = splines2::bSpline(X[,j], df = K, Boundary.knots = Knots[j, ])
}
phi = cbind(rep(1,n),phi0)
IP = diag(n) - tcrossprod(rep(1,n))/n
IP.phi = IP%*%phi
#assign("IP", IP, .GlobalEnv)
#assign("IP.phi", IP.phi, .GlobalEnv)
ind2 = sample(1:p,2)
#ind2 = true
gam = rep(0,p);
gam[ind2]=1
ind2 = which(gam==1)
GAM.screen = Matrix(0,p,50000,sparse=TRUE)
ID.screen = rep(-100000000,50000)
save = rep(0,p)
p.g = length(ind2)
ind3 = index.tot(ind2)
if(p.g>0){
fit = solve(crossprod(phi[,c(1,ind3)])+0.001*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y)
res = y-phi[,c(1,ind3)]%*%fit}else{res=y}
save = sapply(1:p,screening,phi,res)
ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix
corr = as.vector(cor(res,X))
ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix
IND = c(ind2,union(ind.ix[1:S],ind.l[1:5]))
IND = unique(IND)
p.ind = length(IND)
ID.screen[1] = sum(5^(log(ind2)))
GAM.screen[IND,1] = 1
#####
j = 1; NNN = 10000
kk = stats::rchisq(NNN,K-1)
aa = log(mean(exp(-1/kk)))
C.prior3 = rep(0,p)
C.g2 = rep(0,p)
for(j in 1:p){
C.g2[j] = -0.5*log(det(crossprod(phi[,(K*(j-1)+2):(K*j+1)])))
C.prior3[j] = C.g2[j] + aa
}
# assign("C.prior3", C.prior3, .GlobalEnv)
# assign("C.g2", C.prior3, .GlobalEnv)
#assign("tau", tau, .GlobalEnv)
#assign("g", g, .GlobalEnv)
aa = 0; j = 1; NNN = 10000
for(h in 1:NNN){
kk = stats::rnorm(K)*sqrt(g)
aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk))
}
C.prior1 = log(aa/NNN)
C.prior1 = as.numeric(C.prior1)
# assign("C.prior1", C.prior1, .GlobalEnv)
aa = 0; j = 1; NNN = 10000
for(h in 1:NNN){
kk = stats::rcauchy(K)
aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk))
}
C.prior2 = log(aa/NNN)
C.prior2 = as.numeric(C.prior2)
#assign("C.prior2", C.prior2, .GlobalEnv)
pmt = proc.time()
print("#################################")
print("S5 starts running")
IT = length(tem)
IT.seq = rep(ITER,IT)
curr = -1000000000
tryCatch({
curr = ind_fun(ind2, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind2, p )
},error=function(e){})
p.g=sum(gam)
GAM.fin0 = NULL
OBJ.fin0 = NULL
for(uu in 1:C0){
C.p = rep(-1000000000,p)
C.m = rep(-1000000000,p)
GAM = gam
OBJ = curr
obj = OBJ
p.g=sum(gam)
C.p = rep(-100000000,p)
for(i in (p.g+1):p.ind){
j=IND[i]
gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1)
int = -10000000
int = ind_fun(ind.p, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p )
obj.p = c(int)
if(is.na(obj.p)==TRUE){obj.p = -100000000}
C.p[j] = obj.p
}
C.m = rep(-100000000,p)
IND.m = ind2
p.ind.m = length(IND.m)
for(i in 1:p.g){
j=ind2[i]
gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1)
int = -10000000
int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m, p)
obj.m = c(int)
if(is.na(obj.m)==TRUE){obj.m = -100000000}
C.m[j] = obj.m
}
p.g = sum(gam)
OBJ.m0 = matrix(C.m,p,1)
OBJ.p0 = matrix(C.p,p,1)
ID = sum(5^(log(ind2)))
ID.obj = ID
it=1
#GAM.total = matrix(0,p,50000)
GAM.total = Matrix(0,p,50000,sparse=TRUE)
OBJ.total = rep(-100000000,50000)
GAM.total[,1] = gam
OBJ.total[1] = obj
time.total = rep(0,50000)
it=1
INT = NULL
pmt0 = proc.time()
for(it in 1:IT){
IT0 = IT.seq[it]
pq=0
for(iter in 1:IT0){
id = sum(5^(log(ind2)))
id.ind = which(id==ID)
leng = length(id.ind)
if(leng==0){
ID = c(ID,id)
C.p = rep(-100000000,p)
for(i in (p.g+1):p.ind){
j=IND[i]
gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1)
int = -10000000
#tryCatch({
int = ind_fun(ind.p,y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p)
#},error=function(e){})
obj.p = c(int)
if(is.na(obj.p)==TRUE){obj.p = -100000000}
C.p[j] = obj.p
ind.total = which(OBJ.total< -90000000)[1]
OBJ.total[ind.total] = obj.p
GAM.total[,ind.total] = gam.p
time.total[ind.total] = (proc.time()-pmt0)[3]
}
p.g = sum(gam)
C.m = rep(-100000000,p)
IND.m = ind2
p.ind.m = length(IND.m)
for(i in 1:p.g){
j=ind2[i]
gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1)
int = -10000000
#tryCatch({
int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m,p)
#},error=function(e){})
obj.m = c(int)
if(is.na(obj.m)==TRUE){obj.m = -100000000}
C.m[j] = obj.m
ind.total = which(OBJ.total< -90000000)[1]
OBJ.total[ind.total] = obj.m
GAM.total[,ind.total] = gam.m
time.total[ind.total] = (proc.time()-pmt0)[3]
}
OBJ.p0 = cbind(OBJ.p0,C.p)
OBJ.m0 = cbind(OBJ.m0,C.m)
}else{
pq= pq+1
C.p = OBJ.p0[,(id.ind[1])];C.m = OBJ.m0[,(id.ind[1])]
}
prop = exp(tem[it]*(C.p-max(C.p)))
sample.p = sample(1:length(prop),1,prob=prop)
obj.p = C.p[sample.p]
#obj.p
prop = exp(tem[it]*(C.m-max(C.m)))
sample.m = sample(1:length(prop),1,prob=prop)
obj.m = C.m[sample.m]
#obj.m
l = 1/(1+exp(tem[it]*obj.m-tem[it]*obj.p))
if(l>runif(1)){ gam[sample.p]=1;obj = obj.p;curr=obj.p
}else{
gam[sample.m]=0;obj = obj.m;curr=obj.m
}
ind2 = which(gam==1)
p.g = sum(gam)
#int = -100000000
#tryCatch({
# int = ind_fun(ind2) + model(ind2)
#},error=function(e){})
#curr = int
#jjj = sample(1:3,1)
#if(jjj==1){
#pmt0 = proc.time()
id = sum(5^(log(ind2)))
id.ind = which(id==ID.screen)
leng = length(id.ind)
if(leng==0){
jjj = sample(1:2,1)
if(jjj==1){
save = rep(0,p)
ind3 = index.tot(ind2)
if(p.g>0){
fit = solve(crossprod(phi[,c(1,ind3)])+0.01*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y)
res = y-phi[,c(1,ind3)]%*%fit}else{res=y}
save = sapply(1:p,screening,phi,res)
ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix
corr = as.vector(cor(res,X))
ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix
IND = c(ind2,union(ind.ix[1:S],ind.l[1:5]))
p.ind = length(IND)
ind.id = which(ID.screen< 0)[1]
ID.screen[ind.id] = id
GAM.screen[IND,ind.id] = 1}
}else{
IND = which(GAM.screen[,id.ind[1]]==1)
p.ind = length(IND)
}
#print(proc.time()-pmt0)
#}
id = sum(5^(log(ind2)))
id.ind = which(id==ID.obj)
leng = length(id.ind)
if(leng==0){
ID.obj = c(ID.obj,id)
OBJ = c(OBJ,curr)
GAM= cbind(GAM,gam)
}
}
if(verbose==TRUE){
print("#################################")
gam.pr = GAM.total[,which.max(OBJ.total)]
obj.pr = max(OBJ.total)
ind2.pr = which(gam.pr==1)
print("Inverse Temperature");print(tem[it]);print("The Selected Variables in the Searched MAP Model");
print(ind2.pr);print("The Evaluated Object Value at the Searched MAP Model");print(obj.pr);
print("Current Model");print(ind2);
print("The Evaluated Object Value at the Current Model");print(curr);
print("Total Searched Variables");
print(IND)
print("The Number of Total Searched Models");
print(length(unique(OBJ.total)))
#print(length(which(OBJ.total> -10000)))
print(paste("tuning parameter = ", tau));
}
}
time0 = proc.time()-pmt0
print(time0)
rm(OBJ.p0);rm(C.p)
rm(OBJ.m0);rm(C.m)
gam = GAM.total[,which.max(OBJ.total)]
ind2 = which(gam==1)
ind.total = which(OBJ.total> -100000000)
OBJ.fin = unique(OBJ.total[ind.total])
w = length(OBJ.fin)
time.fin = rep(0,w)
GAM.fin = matrix(0,p,w);GAM.fin[,1] = GAM.total[,which(OBJ.total==OBJ.fin[1])[1]]
if(w>1){
for(i in 2:length(OBJ.fin)){
GAM.fin[,i] = GAM.total[,which(OBJ.total==OBJ.fin[i])[1]]
}
}
rm(GAM.total);rm(OBJ.total)
const = sum(exp(OBJ.fin-max(OBJ.fin)))
posterior = exp(OBJ.fin-max(OBJ.fin))/const
total.size = length(OBJ.fin)
m = max(OBJ.fin)
ind.m0 = which.max(OBJ.fin)
gam = GAM.fin[,ind.m0]
ind2 = which(gam==1);p.g = sum(gam)
GAM.fin0 = cbind(GAM.fin0,GAM.fin)
OBJ.fin0 = c(OBJ.fin0,OBJ.fin)
#ind2 = true
#gam = rep(0,p);
#gam[ind2]=1
}
print("#################################")
print("Post-process starts")
print("#################################")
OBJ.fin1 = unique(OBJ.fin0)
w = length(OBJ.fin1)
time.fin = rep(0,w)
GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]]
if(w>1){
for(i in 2:w){
GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]]
}
}
rm(GAM.fin0)
GAM = GAM.fin1
OBJ = OBJ.fin1
print("Done!")
gam.map = GAM[, which.max(OBJ)]
ind.map = which(gam.map==1);p.map = length(ind.map)
POST_model = exp(OBJ - max(OBJ))/sum(exp(OBJ - max(OBJ)))
POST_incl_prob = GAM%*%POST_model
hppm = 1/sum(exp(OBJ - max(OBJ)))
ind.MAP = which(gam.map == 1)
print(ind.MAP)
print("# of Searched Models by S5")
print(length(OBJ))
ind.marg = which(as.vector(POST_incl_prob) > 0.5)
return(list(GAM = GAM, OBJ = OBJ, phi = phi, Knots= Knots, K = K, post = POST_model, marg.prob = as.vector(POST_incl_prob),
ind.hppm = ind.MAP, ind.marg = ind.marg, hppm.prob = hppm, tuning=tau ))
}
| /R/S5_additive.R | no_license | cran/BayesS5 | R | false | false | 12,155 | r | S5_additive <- function(X, y, K = 5, model, tuning = 0.5*nrow(X), tem, ITER=20,S=30, C0=5, verbose=TRUE){
requireNamespace("splines2")
requireNamespace("Matrix")
n = nrow(X)
p = ncol(X)
#y = y -mean(y)
tau = tuning
g = 1
Matrix = Matrix::Matrix
if(missing(tem)){tem = seq(0.4,1,length.out=30)^2}
##################################################
ind_fun = BayesS5::ind_fun_NLfP
#ind_fun = ind_fun_NLfP
index = function(j){
a = (K*(j-1)+2):(K*j+1)
return(a)
}
#assign("index", index, .GlobalEnv)
index.tot = function(ind2){
ind = sapply(ind2,index)#;a[ind] = 1
return(as.vector(ind))
}
screening = function(j,phi,res){
ind3 = index.tot(j)
fit = solve(crossprod(phi[,c(1,ind3)])+0.0001*diag(K+1))%*%crossprod(phi[,c(1,ind3)],res)
fit.f = phi[,c(1,ind3)]%*%fit
a = crossprod(fit.f - mean(fit.f))
return(a)
}
###########################################################
if(missing(model)){
print("The model prior is unspecified. The default is Bernoulli_Uniform")
model = BayesS5::Bernoulli_Uniform
}
A3 = S; r0=1
verb = verbose
P0 = tcrossprod(rep(1,n))/n
phi0 = matrix(0,n,K*p)
Knots = matrix(0,p,2)
colnames(Knots) = c("Lower","Upper")
for(j in 1:p){
Knots[j, ] = c(min(X[,j])-1.0,max(X[,j])+1.0)
phi0[,(K*(j-1)+1):(K*j)] = splines2::bSpline(X[,j], df = K, Boundary.knots = Knots[j, ])
}
phi = cbind(rep(1,n),phi0)
IP = diag(n) - tcrossprod(rep(1,n))/n
IP.phi = IP%*%phi
#assign("IP", IP, .GlobalEnv)
#assign("IP.phi", IP.phi, .GlobalEnv)
ind2 = sample(1:p,2)
#ind2 = true
gam = rep(0,p);
gam[ind2]=1
ind2 = which(gam==1)
GAM.screen = Matrix(0,p,50000,sparse=TRUE)
ID.screen = rep(-100000000,50000)
save = rep(0,p)
p.g = length(ind2)
ind3 = index.tot(ind2)
if(p.g>0){
fit = solve(crossprod(phi[,c(1,ind3)])+0.001*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y)
res = y-phi[,c(1,ind3)]%*%fit}else{res=y}
save = sapply(1:p,screening,phi,res)
ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix
corr = as.vector(cor(res,X))
ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix
IND = c(ind2,union(ind.ix[1:S],ind.l[1:5]))
IND = unique(IND)
p.ind = length(IND)
ID.screen[1] = sum(5^(log(ind2)))
GAM.screen[IND,1] = 1
#####
j = 1; NNN = 10000
kk = stats::rchisq(NNN,K-1)
aa = log(mean(exp(-1/kk)))
C.prior3 = rep(0,p)
C.g2 = rep(0,p)
for(j in 1:p){
C.g2[j] = -0.5*log(det(crossprod(phi[,(K*(j-1)+2):(K*j+1)])))
C.prior3[j] = C.g2[j] + aa
}
# assign("C.prior3", C.prior3, .GlobalEnv)
# assign("C.g2", C.prior3, .GlobalEnv)
#assign("tau", tau, .GlobalEnv)
#assign("g", g, .GlobalEnv)
aa = 0; j = 1; NNN = 10000
for(h in 1:NNN){
kk = stats::rnorm(K)*sqrt(g)
aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk))
}
C.prior1 = log(aa/NNN)
C.prior1 = as.numeric(C.prior1)
# assign("C.prior1", C.prior1, .GlobalEnv)
aa = 0; j = 1; NNN = 10000
for(h in 1:NNN){
kk = stats::rcauchy(K)
aa = aa + exp(-tau*n/crossprod(IP.phi[,(K*(j-1)+2):(K*j+1)]%*%kk))
}
C.prior2 = log(aa/NNN)
C.prior2 = as.numeric(C.prior2)
#assign("C.prior2", C.prior2, .GlobalEnv)
pmt = proc.time()
print("#################################")
print("S5 starts running")
IT = length(tem)
IT.seq = rep(ITER,IT)
curr = -1000000000
tryCatch({
curr = ind_fun(ind2, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind2, p )
},error=function(e){})
p.g=sum(gam)
GAM.fin0 = NULL
OBJ.fin0 = NULL
for(uu in 1:C0){
C.p = rep(-1000000000,p)
C.m = rep(-1000000000,p)
GAM = gam
OBJ = curr
obj = OBJ
p.g=sum(gam)
C.p = rep(-100000000,p)
for(i in (p.g+1):p.ind){
j=IND[i]
gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1)
int = -10000000
int = ind_fun(ind.p, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p )
obj.p = c(int)
if(is.na(obj.p)==TRUE){obj.p = -100000000}
C.p[j] = obj.p
}
C.m = rep(-100000000,p)
IND.m = ind2
p.ind.m = length(IND.m)
for(i in 1:p.g){
j=ind2[i]
gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1)
int = -10000000
int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m, p)
obj.m = c(int)
if(is.na(obj.m)==TRUE){obj.m = -100000000}
C.m[j] = obj.m
}
p.g = sum(gam)
OBJ.m0 = matrix(C.m,p,1)
OBJ.p0 = matrix(C.p,p,1)
ID = sum(5^(log(ind2)))
ID.obj = ID
it=1
#GAM.total = matrix(0,p,50000)
GAM.total = Matrix(0,p,50000,sparse=TRUE)
OBJ.total = rep(-100000000,50000)
GAM.total[,1] = gam
OBJ.total[1] = obj
time.total = rep(0,50000)
it=1
INT = NULL
pmt0 = proc.time()
for(it in 1:IT){
IT0 = IT.seq[it]
pq=0
for(iter in 1:IT0){
id = sum(5^(log(ind2)))
id.ind = which(id==ID)
leng = length(id.ind)
if(leng==0){
ID = c(ID,id)
C.p = rep(-100000000,p)
for(i in (p.g+1):p.ind){
j=IND[i]
gam.p = gam;gam.p[j]=1;ind.p=which(gam.p==1)
int = -10000000
#tryCatch({
int = ind_fun(ind.p,y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.p, p)
#},error=function(e){})
obj.p = c(int)
if(is.na(obj.p)==TRUE){obj.p = -100000000}
C.p[j] = obj.p
ind.total = which(OBJ.total< -90000000)[1]
OBJ.total[ind.total] = obj.p
GAM.total[,ind.total] = gam.p
time.total[ind.total] = (proc.time()-pmt0)[3]
}
p.g = sum(gam)
C.m = rep(-100000000,p)
IND.m = ind2
p.ind.m = length(IND.m)
for(i in 1:p.g){
j=ind2[i]
gam.m = gam;gam.m[j]=0;ind.m=which(gam.m==1)
int = -10000000
#tryCatch({
int = ind_fun(ind.m, y, phi, n, p, K, IP.phi, C.prior1, tuning) + model(ind.m,p)
#},error=function(e){})
obj.m = c(int)
if(is.na(obj.m)==TRUE){obj.m = -100000000}
C.m[j] = obj.m
ind.total = which(OBJ.total< -90000000)[1]
OBJ.total[ind.total] = obj.m
GAM.total[,ind.total] = gam.m
time.total[ind.total] = (proc.time()-pmt0)[3]
}
OBJ.p0 = cbind(OBJ.p0,C.p)
OBJ.m0 = cbind(OBJ.m0,C.m)
}else{
pq= pq+1
C.p = OBJ.p0[,(id.ind[1])];C.m = OBJ.m0[,(id.ind[1])]
}
prop = exp(tem[it]*(C.p-max(C.p)))
sample.p = sample(1:length(prop),1,prob=prop)
obj.p = C.p[sample.p]
#obj.p
prop = exp(tem[it]*(C.m-max(C.m)))
sample.m = sample(1:length(prop),1,prob=prop)
obj.m = C.m[sample.m]
#obj.m
l = 1/(1+exp(tem[it]*obj.m-tem[it]*obj.p))
if(l>runif(1)){ gam[sample.p]=1;obj = obj.p;curr=obj.p
}else{
gam[sample.m]=0;obj = obj.m;curr=obj.m
}
ind2 = which(gam==1)
p.g = sum(gam)
#int = -100000000
#tryCatch({
# int = ind_fun(ind2) + model(ind2)
#},error=function(e){})
#curr = int
#jjj = sample(1:3,1)
#if(jjj==1){
#pmt0 = proc.time()
id = sum(5^(log(ind2)))
id.ind = which(id==ID.screen)
leng = length(id.ind)
if(leng==0){
jjj = sample(1:2,1)
if(jjj==1){
save = rep(0,p)
ind3 = index.tot(ind2)
if(p.g>0){
fit = solve(crossprod(phi[,c(1,ind3)])+0.01*diag(p.g*K+1))%*%crossprod(phi[,c(1,ind3)],y)
res = y-phi[,c(1,ind3)]%*%fit}else{res=y}
save = sapply(1:p,screening,phi,res)
ind.ix = sort.int(save,decreasing=TRUE,index.return=TRUE)$ix
corr = as.vector(cor(res,X))
ind.l = sort.int(abs(corr),decreasing=TRUE,index.return=TRUE)$ix
IND = c(ind2,union(ind.ix[1:S],ind.l[1:5]))
p.ind = length(IND)
ind.id = which(ID.screen< 0)[1]
ID.screen[ind.id] = id
GAM.screen[IND,ind.id] = 1}
}else{
IND = which(GAM.screen[,id.ind[1]]==1)
p.ind = length(IND)
}
#print(proc.time()-pmt0)
#}
id = sum(5^(log(ind2)))
id.ind = which(id==ID.obj)
leng = length(id.ind)
if(leng==0){
ID.obj = c(ID.obj,id)
OBJ = c(OBJ,curr)
GAM= cbind(GAM,gam)
}
}
if(verbose==TRUE){
print("#################################")
gam.pr = GAM.total[,which.max(OBJ.total)]
obj.pr = max(OBJ.total)
ind2.pr = which(gam.pr==1)
print("Inverse Temperature");print(tem[it]);print("The Selected Variables in the Searched MAP Model");
print(ind2.pr);print("The Evaluated Object Value at the Searched MAP Model");print(obj.pr);
print("Current Model");print(ind2);
print("The Evaluated Object Value at the Current Model");print(curr);
print("Total Searched Variables");
print(IND)
print("The Number of Total Searched Models");
print(length(unique(OBJ.total)))
#print(length(which(OBJ.total> -10000)))
print(paste("tuning parameter = ", tau));
}
}
time0 = proc.time()-pmt0
print(time0)
rm(OBJ.p0);rm(C.p)
rm(OBJ.m0);rm(C.m)
gam = GAM.total[,which.max(OBJ.total)]
ind2 = which(gam==1)
ind.total = which(OBJ.total> -100000000)
OBJ.fin = unique(OBJ.total[ind.total])
w = length(OBJ.fin)
time.fin = rep(0,w)
GAM.fin = matrix(0,p,w);GAM.fin[,1] = GAM.total[,which(OBJ.total==OBJ.fin[1])[1]]
if(w>1){
for(i in 2:length(OBJ.fin)){
GAM.fin[,i] = GAM.total[,which(OBJ.total==OBJ.fin[i])[1]]
}
}
rm(GAM.total);rm(OBJ.total)
const = sum(exp(OBJ.fin-max(OBJ.fin)))
posterior = exp(OBJ.fin-max(OBJ.fin))/const
total.size = length(OBJ.fin)
m = max(OBJ.fin)
ind.m0 = which.max(OBJ.fin)
gam = GAM.fin[,ind.m0]
ind2 = which(gam==1);p.g = sum(gam)
GAM.fin0 = cbind(GAM.fin0,GAM.fin)
OBJ.fin0 = c(OBJ.fin0,OBJ.fin)
#ind2 = true
#gam = rep(0,p);
#gam[ind2]=1
}
print("#################################")
print("Post-process starts")
print("#################################")
OBJ.fin1 = unique(OBJ.fin0)
w = length(OBJ.fin1)
time.fin = rep(0,w)
GAM.fin1 = Matrix(0,p,w,sparse=TRUE);GAM.fin1[,1] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[1])[1]]
if(w>1){
for(i in 2:w){
GAM.fin1[,i] = GAM.fin0[,which(OBJ.fin0==OBJ.fin1[i])[1]]
}
}
rm(GAM.fin0)
GAM = GAM.fin1
OBJ = OBJ.fin1
print("Done!")
gam.map = GAM[, which.max(OBJ)]
ind.map = which(gam.map==1);p.map = length(ind.map)
POST_model = exp(OBJ - max(OBJ))/sum(exp(OBJ - max(OBJ)))
POST_incl_prob = GAM%*%POST_model
hppm = 1/sum(exp(OBJ - max(OBJ)))
ind.MAP = which(gam.map == 1)
print(ind.MAP)
print("# of Searched Models by S5")
print(length(OBJ))
ind.marg = which(as.vector(POST_incl_prob) > 0.5)
return(list(GAM = GAM, OBJ = OBJ, phi = phi, Knots= Knots, K = K, post = POST_model, marg.prob = as.vector(POST_incl_prob),
ind.hppm = ind.MAP, ind.marg = ind.marg, hppm.prob = hppm, tuning=tau ))
}
|
##### Test for Scatterplot Data Merge #####
# run data processing file first
if(!require(dplyr)) install.packages("magrittr", repos = "http://cran.us.r-project.org") # for joining
scatterDataMerge <- function(x, y, year) {
# joins final data tables on country code for year given
x_data <- x %>% select(c("ISO2", toString(year))) # selects correct year and all countries
head(x_data)
y_data <- y %>% select(c("ISO2", toString(year))) # selects correct year and all countries
head(y_data)
joinedTable <- inner_join(x_data, y_data, by = c("ISO2" = "ISO2"))
colnames(joinedTable)[2] = "x"
colnames(joinedTable)[3] = "y"
return(joinedTable)
}
gdpmortfemtest<- scatterDataMerge(gdpFinal, mortalityFemaleFinal, 2015)
# for ggvis plot
if(!require(ggvis)) install.packages("magrittr", repos = "http://cran.us.r-project.org")
createScatterplot <- function(data){
# creates a scatterplot from table with x and y columns
plot <- data %>%
ggvis(~x, ~y) %>%
layer_points() %>%
add_axis("x", title = "x title here") %>% add_axis("y", title = "y title here") %>%
set_options(width = 500, height = 500) %>% layer_smooths()
return(plot)
}
# x value of scatter
scatter_x_reactive = reactive({
format(input$x_factor)
})
# y value of scatter
scatter_y_reactive = reactive({
format(input$y_factor)
})
# year of scatter
scatter_year_reactive = reactive({
format(input$scatter_year)
})
# reactive data of scatter
scatterDataReactive = reactive({
scatterDataMerge(x = scatter_x_reactive(), y = scatter_y_reactive(), year = scatter_year_reactive())
})
# create scatter plot output
output$scatter_plot <- renderPlot({
createScatterplot(scatterDataReactive())
})
| /testFile.R | no_license | brycehuffman/sys_2202_finalproject | R | false | false | 1,700 | r | ##### Test for Scatterplot Data Merge #####
# run data processing file first
if(!require(dplyr)) install.packages("magrittr", repos = "http://cran.us.r-project.org") # for joining
scatterDataMerge <- function(x, y, year) {
# joins final data tables on country code for year given
x_data <- x %>% select(c("ISO2", toString(year))) # selects correct year and all countries
head(x_data)
y_data <- y %>% select(c("ISO2", toString(year))) # selects correct year and all countries
head(y_data)
joinedTable <- inner_join(x_data, y_data, by = c("ISO2" = "ISO2"))
colnames(joinedTable)[2] = "x"
colnames(joinedTable)[3] = "y"
return(joinedTable)
}
gdpmortfemtest<- scatterDataMerge(gdpFinal, mortalityFemaleFinal, 2015)
# for ggvis plot
if(!require(ggvis)) install.packages("magrittr", repos = "http://cran.us.r-project.org")
createScatterplot <- function(data){
# creates a scatterplot from table with x and y columns
plot <- data %>%
ggvis(~x, ~y) %>%
layer_points() %>%
add_axis("x", title = "x title here") %>% add_axis("y", title = "y title here") %>%
set_options(width = 500, height = 500) %>% layer_smooths()
return(plot)
}
# x value of scatter
scatter_x_reactive = reactive({
format(input$x_factor)
})
# y value of scatter
scatter_y_reactive = reactive({
format(input$y_factor)
})
# year of scatter
scatter_year_reactive = reactive({
format(input$scatter_year)
})
# reactive data of scatter
scatterDataReactive = reactive({
scatterDataMerge(x = scatter_x_reactive(), y = scatter_y_reactive(), year = scatter_year_reactive())
})
# create scatter plot output
output$scatter_plot <- renderPlot({
createScatterplot(scatterDataReactive())
})
|
# Copyright (C) 2014 Hans W. Borchers. All Rights Reserved.
# This code is published under the L-GPL.
#
# File: lbfgs.R
# Author: Hans W. Borchers
# Date: 27 January 2014
#
# Wrapper to solve optimization problem using Low-storage BFGS.
lbfgs <-
function(x0, fn, gr = NULL, lower = NULL, upper = NULL,
nl.info = FALSE, control = list(), ...)
{
opts <- nl.opts(control)
opts["algorithm"] <- "NLOPT_LD_LBFGS"
fun <- match.fun(fn)
fn <- function(x) fun(x, ...)
if (is.null(gr)) {
gr <- function(x) nl.grad(x, fn)
} else {
.gr <- match.fun(gr)
gr <- function(x) .gr(x, ...)
}
S0 <- nloptr(x0,
eval_f = fn,
eval_grad_f = gr,
lb = lower,
ub = upper,
opts = opts)
if (nl.info) print(S0)
S1 <- list(par = S0$solution, value = S0$objective, iter = S0$iterations,
convergence = S0$status, message = S0$message)
return(S1)
}
| /nloptr/R/lbfgs.R | no_license | ingted/R-Examples | R | false | false | 1,038 | r | # Copyright (C) 2014 Hans W. Borchers. All Rights Reserved.
# This code is published under the L-GPL.
#
# File: lbfgs.R
# Author: Hans W. Borchers
# Date: 27 January 2014
#
# Wrapper to solve optimization problem using Low-storage BFGS.
lbfgs <-
function(x0, fn, gr = NULL, lower = NULL, upper = NULL,
nl.info = FALSE, control = list(), ...)
{
opts <- nl.opts(control)
opts["algorithm"] <- "NLOPT_LD_LBFGS"
fun <- match.fun(fn)
fn <- function(x) fun(x, ...)
if (is.null(gr)) {
gr <- function(x) nl.grad(x, fn)
} else {
.gr <- match.fun(gr)
gr <- function(x) .gr(x, ...)
}
S0 <- nloptr(x0,
eval_f = fn,
eval_grad_f = gr,
lb = lower,
ub = upper,
opts = opts)
if (nl.info) print(S0)
S1 <- list(par = S0$solution, value = S0$objective, iter = S0$iterations,
convergence = S0$status, message = S0$message)
return(S1)
}
|
pmlTrainRaw<-read.csv("pml-training.csv",na.strings=c("NA",""))
pmlTest<-read.csv("pml-testing.csv",na.strings=c("NA",""))
# Remove tagging data
pmlTrainRaw<-pmlTrainRaw[,8:160]
pmlTest<-pmlTest[,8:160]
# Rows have either 19216 NAs or none
#sapply(pmlTrainRaw,function(x)sum(is.na(x)))
nona<-apply(is.na(pmlTrainRaw),2,sum)==0
pmlTrainRaw<-pmlTrainRaw[,nona]
pmlTest<-pmlTest[,nona]
# Make separate training and validation sets
inTrain<-createDataPartition(pmlTrainRaw$classe,p=0.1,list=FALSE)
pmlTrain<-pmlTrainRaw[inTrain,]
pmlValidate<-pmlTrainRaw[-inTrain,]
set.seed(42)
#fitControl <- trainControl(method="repeatedcv",number=5,repeats=1)
#tgrid<-expand.grid(mtry=c(6))
#modFit<-train(classe ~ .,data=pmlTrain,method="rf",prox=TRUE,trControl=fitControl,tuneGrid=tgrid)
modFit<-train(classe ~ .,data=pmlTrain,method="rpart")
predTrain<-predict(modFit,newdata=pmlTrain)
confusionMatrix(predTrain,pmlTrain$classe)$overall[1]
predValidate<-predict(modFit,newdata=pmlValidate)
confusionMatrix(predValidate,pmlValidate$classe)$overall[1] | /John Hopkins Data Science/Practical Machine Learning/Project 1/project1.R | no_license | kj6aqr/Coursera | R | false | false | 1,042 | r | pmlTrainRaw<-read.csv("pml-training.csv",na.strings=c("NA",""))
pmlTest<-read.csv("pml-testing.csv",na.strings=c("NA",""))
# Remove tagging data
pmlTrainRaw<-pmlTrainRaw[,8:160]
pmlTest<-pmlTest[,8:160]
# Rows have either 19216 NAs or none
#sapply(pmlTrainRaw,function(x)sum(is.na(x)))
nona<-apply(is.na(pmlTrainRaw),2,sum)==0
pmlTrainRaw<-pmlTrainRaw[,nona]
pmlTest<-pmlTest[,nona]
# Make separate training and validation sets
inTrain<-createDataPartition(pmlTrainRaw$classe,p=0.1,list=FALSE)
pmlTrain<-pmlTrainRaw[inTrain,]
pmlValidate<-pmlTrainRaw[-inTrain,]
set.seed(42)
#fitControl <- trainControl(method="repeatedcv",number=5,repeats=1)
#tgrid<-expand.grid(mtry=c(6))
#modFit<-train(classe ~ .,data=pmlTrain,method="rf",prox=TRUE,trControl=fitControl,tuneGrid=tgrid)
modFit<-train(classe ~ .,data=pmlTrain,method="rpart")
predTrain<-predict(modFit,newdata=pmlTrain)
confusionMatrix(predTrain,pmlTrain$classe)$overall[1]
predValidate<-predict(modFit,newdata=pmlValidate)
confusionMatrix(predValidate,pmlValidate$classe)$overall[1] |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataStores.R
\name{CreateDataStore}
\alias{CreateDataStore}
\title{Create a data store.}
\usage{
CreateDataStore(type, canonicalName, driverId, jdbcUrl)
}
\arguments{
\item{type}{character. The type of data store.}
\item{canonicalName}{character. The user-friendly name of the data store.}
\item{driverId}{character. The ID of the driver to use.}
\item{jdbcUrl}{character. The full JDBC url.}
}
\description{
Create a data store.
}
\examples{
\dontrun{
CreateDataStore(type = "jdbc",
canonicalName = "Demo DB",
driverId = "57a7c978c808916f4a630f89",
jdbcUrl = "jdbc:postgresql://my.db.address.org:5432/my_db")
}
}
| /man/CreateDataStore.Rd | no_license | cran/datarobot | R | false | true | 747 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DataStores.R
\name{CreateDataStore}
\alias{CreateDataStore}
\title{Create a data store.}
\usage{
CreateDataStore(type, canonicalName, driverId, jdbcUrl)
}
\arguments{
\item{type}{character. The type of data store.}
\item{canonicalName}{character. The user-friendly name of the data store.}
\item{driverId}{character. The ID of the driver to use.}
\item{jdbcUrl}{character. The full JDBC url.}
}
\description{
Create a data store.
}
\examples{
\dontrun{
CreateDataStore(type = "jdbc",
canonicalName = "Demo DB",
driverId = "57a7c978c808916f4a630f89",
jdbcUrl = "jdbc:postgresql://my.db.address.org:5432/my_db")
}
}
|
###############################################################################
# A script for fitting a GAMM to observed WorldClim data and the outputs of #
# CCSM4 climate models over Europe. Includes code to test model fit. #
# The GAMM models are saved at the end for use in other code. #
# #
# Author: Nicolas Gauthier #
###############################################################################
###############################################################################
# Load necessary R packages
library(raster) # functions for managing and processing raster data
library(mgcv) # functions to fit and analyze GAMs
library(dismo) # functions to sample points weighted by latitude
library(magrittr) # piping functions for code readability
###############################################################################
# Import the observed present-day climatologies which we'll use to calibrate the
# model. We use WorldClim data at 5min resolution here, which can be easily
# downloaded using the *getData()* function in the *raster* package.
importWC <- function(var){
raster::getData('worldclim', var = var, res = 5, download = T) %>%
crop(extent(-15, 40, 30, 58)) %>%
set_names(month.name)
}
obs.tmp <- importWC('tmean') %>% divide_by(10) # convert from degrees Celsius * 10
obs.prc <- importWC('prec')
###############################################################################
# Import climate-model outputs from the CCSM4 20th century historical run. Subset
# to the last 50 years of the simulation, average each month to create a
# climatology, rotate the map so that western hemisphere longitudes are negative,
# reproject to 5 arc minute resolution, and mask out ocean pixels
importGCM <- function(dir, level = 1){
gcm.in <- brick(dir, level = level) %>%
extract2(1201:1800) %>% # years 1950-2000
stackApply(indices = 1:12, fun = mean) # monthly averages
extent(gcm.in) <- extent(0, 360, -90, 90) # adjustment needed for rotate command
gcm.in %>% rotate %>% projectRaster(obs.prc) %>% mask(obs.prc)
}
hist.prcl <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PRECL.185001-200512.nc') %>% multiply_by(2629743830)
hist.prcc <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PRECC.185001-200512.nc') %>% multiply_by(2629743830)
hist.tmp <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.TREFHT.185001-200512.nc') %>% subtract(273.15)
hist.psl <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PSL.185001-200512.nc') %>% divide_by(1000)
hist.q <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.QREFHT.185001-200512.nc')
hist.u <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.U.185001-200512.nc', level = 26) # ~60m
hist.v <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.V.185001-200512.nc', level = 26) # ~60m
hist.z <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.Z3.185001-200512.nc', level = 23) # ~850 geopotential
###############################################################################
# Import a DEM and use it to generate topographic predictors: elevation,
# distance to the ocean, and the intensity of orographic lifting.
elev <- raster('alt_5m_bil/alt.bil') %>% crop(extent(-15, 40, 30, 58)) # import WorldClim 5m dem
# Calculate diffusive continentality (DCO) or distance to ocean in km.
dco <- raster('alt_5m_bil/alt.bil') %>% # reimport WorldClim 5m dem
crop(extent(-20, 45, 30, 65)) %>% # start with a wider region to get accurate distances
reclassify(c(-Inf, Inf, NA, NA, NA, 1)) %>% # reverse NA and non-NA cells
distance(doEdge = T) %>% # calculate the distances
crop(extent(-15, 40, 30, 58)) %>% # crop to study region
mask(elev) %>% # mask out ocean cells
divide_by(1000) # convert to km
# Calculate the velocity of orographic lifting as a function of wind direction,
# velocity, and terrain slope and aspect
dir <- (270 - overlay(hist.v, hist.u, fun = atan2) * (180 / pi)) %% 360
vel <- sqrt(hist.u ^ 2 + hist.v ^ 2)
aspect <- elev %>% terrain(opt='aspect', unit = 'degrees')
slope <- elev %>% terrain(opt = 'slope', unit = 'degrees')
delta <- abs(dir - aspect) # distance between wind angle and slope orientation
values(delta) <- ifelse(values(delta) > 180, 360 - values(delta), values(delta))
oro <- sin(slope * 2 * pi / 180) * cos(delta * pi / 180) * vel * .5
###############################################################################
# We can expect some random effects based on location and month, calculate these
# values.
month <- obs.prc %>% setValues(c(as.factor(rep(1:12, each = ncell(oro)))))
lat <- elev %>%
coordinates %>%
extract( , 2) %>%
setValues(elev, .)
lon <- elev %>%
coordinates %>%
extract( , 1) %>%
setValues(elev, .)
###############################################################################
# Put all the predictor and response variables together, month by month, and
# remove the original files from the workspace.
cal.vars <- sapply(1:12, function(x){
brick(obs.tmp[[x]], obs.prc[[x]], hist.tmp[[x]], hist.q[[x]], hist.prcc[[x]],
hist.prcl[[x]], hist.psl[[x]], hist.z[[x]], elev, dco, oro[[x]],
month[[x]], lat, lon) %>%
setNames(c('obs.tmp', 'obs.prc','TREFHT', 'Q', 'PRCC', 'PRCL', 'PSL', 'Z3',
'elev','dco', 'oro', 'month', 'lat','lon'))
})
rm(hist.tmp, hist.q, hist.prcc, hist.prcl, hist.psl, hist.z, hist.v, hist.u,
slope, aspect, oro, dir, vel, delta)
# Sample the variables at random points, weighting for latitude
cal.data <- lapply(cal.vars, function(x) (raster::extract(x, randomPoints(elev, 20000)) %>% data.frame)) %>%
do.call(rbind, .)
write.csv(cal.data, 'cal_data.csv')
###############################################################################
# Fit the GAM for temperature
fit.tmp <- gam(obs.tmp ~ s(TREFHT, bs = 'cr') +
s(Z3, bs = 'cr') +
s(elev, bs = 'cr') +
s(dco, bs = 'cr'),
method = 'REML', data = cal.data)
fit.tmp
summary(fit.tmp)
gam.check(fit.tmp)
plot(fit.tmp, shade=T, seWithMean = T, pages = 1)
###############################################################################
# Fit the GAMs for precipitation occurrence and amount
fit.prc.occur <- bam(factor(obs.prc >= 1) ~ s(PRCC) +
s(PRCL) +
s(Z3) +
s(TREFHT),
family = binomial, method = 'REML', data = cal.data)
summary(fit.prc.occur)
gam.check(fit.prc.occur)
plot(fit.prc.occur, seWithMean = T, shade = T, pages = 1)
levelplot(predict(cal.vars[[7]], fit.prc.occur, type = 'response'))
levelplot(obs.prc[[7]] < 1)
fit.prc <- bam(obs.prc ~ s(PRCC, bs = 'cr') +
s(PRCL, bs = 'cr') +
s(PSL, bs = 'cr') +
s(Q, bs = 'cr') +
s(oro, bs = 'cr') +
s(Z3, bs = 'cr') +
s(elev, bs = 'cr') +
s(dco, bs = 'cr') +
s(month, bs = 're'),
family = Gamma(link = 'log'), method = 'REML', data = cal.data[cal.data$obs.prc >= 1, ])
## set up an automatic system to validate outside of calibration domain, but same lat
# just calculate the RMSE and try to minimize | /Fitting GAM to CCSM.R | no_license | nick-gauthier/Risk-Resilience | R | false | false | 7,374 | r | ###############################################################################
# A script for fitting a GAMM to observed WorldClim data and the outputs of #
# CCSM4 climate models over Europe. Includes code to test model fit. #
# The GAMM models are saved at the end for use in other code. #
# #
# Author: Nicolas Gauthier #
###############################################################################
###############################################################################
# Load necessary R packages
library(raster) # functions for managing and processing raster data
library(mgcv) # functions to fit and analyze GAMs
library(dismo) # functions to sample points weighted by latitude
library(magrittr) # piping functions for code readability
###############################################################################
# Import the observed present-day climatologies which we'll use to calibrate the
# model. We use WorldClim data at 5min resolution here, which can be easily
# downloaded using the *getData()* function in the *raster* package.
importWC <- function(var){
raster::getData('worldclim', var = var, res = 5, download = T) %>%
crop(extent(-15, 40, 30, 58)) %>%
set_names(month.name)
}
obs.tmp <- importWC('tmean') %>% divide_by(10) # convert from degrees Celsius * 10
obs.prc <- importWC('prec')
###############################################################################
# Import climate-model outputs from the CCSM4 20th century historical run. Subset
# to the last 50 years of the simulation, average each month to create a
# climatology, rotate the map so that western hemisphere longitudes are negative,
# reproject to 5 arc minute resolution, and mask out ocean pixels
importGCM <- function(dir, level = 1){
gcm.in <- brick(dir, level = level) %>%
extract2(1201:1800) %>% # years 1950-2000
stackApply(indices = 1:12, fun = mean) # monthly averages
extent(gcm.in) <- extent(0, 360, -90, 90) # adjustment needed for rotate command
gcm.in %>% rotate %>% projectRaster(obs.prc) %>% mask(obs.prc)
}
hist.prcl <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PRECL.185001-200512.nc') %>% multiply_by(2629743830)
hist.prcc <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PRECC.185001-200512.nc') %>% multiply_by(2629743830)
hist.tmp <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.TREFHT.185001-200512.nc') %>% subtract(273.15)
hist.psl <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.PSL.185001-200512.nc') %>% divide_by(1000)
hist.q <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.QREFHT.185001-200512.nc')
hist.u <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.U.185001-200512.nc', level = 26) # ~60m
hist.v <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.V.185001-200512.nc', level = 26) # ~60m
hist.z <- importGCM('GCM/b40.20th.track1.1deg.005.cam2.h0.Z3.185001-200512.nc', level = 23) # ~850 geopotential
###############################################################################
# Import a DEM and use it to generate topographic predictors: elevation,
# distance to the ocean, and the intensity of orographic lifting.
elev <- raster('alt_5m_bil/alt.bil') %>% crop(extent(-15, 40, 30, 58)) # import WorldClim 5m dem
# Calculate diffusive continentality (DCO) or distance to ocean in km.
dco <- raster('alt_5m_bil/alt.bil') %>% # reimport WorldClim 5m dem
crop(extent(-20, 45, 30, 65)) %>% # start with a wider region to get accurate distances
reclassify(c(-Inf, Inf, NA, NA, NA, 1)) %>% # reverse NA and non-NA cells
distance(doEdge = T) %>% # calculate the distances
crop(extent(-15, 40, 30, 58)) %>% # crop to study region
mask(elev) %>% # mask out ocean cells
divide_by(1000) # convert to km
# Calculate the velocity of orographic lifting as a function of wind direction,
# velocity, and terrain slope and aspect
dir <- (270 - overlay(hist.v, hist.u, fun = atan2) * (180 / pi)) %% 360
vel <- sqrt(hist.u ^ 2 + hist.v ^ 2)
aspect <- elev %>% terrain(opt='aspect', unit = 'degrees')
slope <- elev %>% terrain(opt = 'slope', unit = 'degrees')
delta <- abs(dir - aspect) # distance between wind angle and slope orientation
values(delta) <- ifelse(values(delta) > 180, 360 - values(delta), values(delta))
oro <- sin(slope * 2 * pi / 180) * cos(delta * pi / 180) * vel * .5
###############################################################################
# We can expect some random effects based on location and month, calculate these
# values.
month <- obs.prc %>% setValues(c(as.factor(rep(1:12, each = ncell(oro)))))
lat <- elev %>%
coordinates %>%
extract( , 2) %>%
setValues(elev, .)
lon <- elev %>%
coordinates %>%
extract( , 1) %>%
setValues(elev, .)
###############################################################################
# Put all the predictor and response variables together, month by month, and
# remove the original files from the workspace.
cal.vars <- sapply(1:12, function(x){
brick(obs.tmp[[x]], obs.prc[[x]], hist.tmp[[x]], hist.q[[x]], hist.prcc[[x]],
hist.prcl[[x]], hist.psl[[x]], hist.z[[x]], elev, dco, oro[[x]],
month[[x]], lat, lon) %>%
setNames(c('obs.tmp', 'obs.prc','TREFHT', 'Q', 'PRCC', 'PRCL', 'PSL', 'Z3',
'elev','dco', 'oro', 'month', 'lat','lon'))
})
rm(hist.tmp, hist.q, hist.prcc, hist.prcl, hist.psl, hist.z, hist.v, hist.u,
slope, aspect, oro, dir, vel, delta)
# Sample the variables at random points, weighting for latitude
cal.data <- lapply(cal.vars, function(x) (raster::extract(x, randomPoints(elev, 20000)) %>% data.frame)) %>%
do.call(rbind, .)
write.csv(cal.data, 'cal_data.csv')
###############################################################################
# Fit the GAM for temperature
fit.tmp <- gam(obs.tmp ~ s(TREFHT, bs = 'cr') +
s(Z3, bs = 'cr') +
s(elev, bs = 'cr') +
s(dco, bs = 'cr'),
method = 'REML', data = cal.data)
fit.tmp
summary(fit.tmp)
gam.check(fit.tmp)
plot(fit.tmp, shade=T, seWithMean = T, pages = 1)
###############################################################################
# Fit the GAMs for precipitation occurrence and amount
fit.prc.occur <- bam(factor(obs.prc >= 1) ~ s(PRCC) +
s(PRCL) +
s(Z3) +
s(TREFHT),
family = binomial, method = 'REML', data = cal.data)
summary(fit.prc.occur)
gam.check(fit.prc.occur)
plot(fit.prc.occur, seWithMean = T, shade = T, pages = 1)
levelplot(predict(cal.vars[[7]], fit.prc.occur, type = 'response'))
levelplot(obs.prc[[7]] < 1)
fit.prc <- bam(obs.prc ~ s(PRCC, bs = 'cr') +
s(PRCL, bs = 'cr') +
s(PSL, bs = 'cr') +
s(Q, bs = 'cr') +
s(oro, bs = 'cr') +
s(Z3, bs = 'cr') +
s(elev, bs = 'cr') +
s(dco, bs = 'cr') +
s(month, bs = 're'),
family = Gamma(link = 'log'), method = 'REML', data = cal.data[cal.data$obs.prc >= 1, ])
## set up an automatic system to validate outside of calibration domain, but same lat
# just calculate the RMSE and try to minimize |
tabPanelAbout <- source("about.R",local=T)$value
headerPanel_2 <- function(title, h, windowTitle=title) {
tagList(
tags$head(tags$title(windowTitle)),
h(title)
)
}
shinyUI(fluidPage(
source("header.R",local=T)$value,
fluidRow(
source("sidebar.R",local=T)$value,
source("main.R",local=T)$value
)
))
| /plot3D/ui.R | no_license | matlhatsir/shiny-apps | R | false | false | 326 | r | tabPanelAbout <- source("about.R",local=T)$value
headerPanel_2 <- function(title, h, windowTitle=title) {
tagList(
tags$head(tags$title(windowTitle)),
h(title)
)
}
shinyUI(fluidPage(
source("header.R",local=T)$value,
fluidRow(
source("sidebar.R",local=T)$value,
source("main.R",local=T)$value
)
))
|
library(funData)
### Name: tensorProduct
### Title: Tensor product for univariate functions on one-dimensional
### domains
### Aliases: tensorProduct
### ** Examples
### Tensor product of two functional data objects
x <- seq(0, 2*pi, 0.1)
f1 <- funData(x, outer(seq(0.75, 1.25, 0.1), sin(x)))
y <- seq(-pi, pi, 0.1)
f2 <- funData(y, outer(seq(0.25, 0.75, 0.1), sin(y)))
plot(f1, main = "f1")
plot(f2, main = "f2")
tP <- tensorProduct(f1, f2)
dimSupp(tP)
plot(tP, obs = 1)
### Tensor product of three functional data objects
z <- seq(-1, 1, 0.05)
f3 <- funData(z, outer(seq(0.75, 1.25, 0.1), z^2))
plot(f1, main = "f1")
plot(f2, main = "f2")
plot(f3, main = "f3")
tP2 <- tensorProduct(f1, f2, f3)
dimSupp(tP2)
| /data/genthat_extracted_code/funData/examples/tensorProduct.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 724 | r | library(funData)
### Name: tensorProduct
### Title: Tensor product for univariate functions on one-dimensional
### domains
### Aliases: tensorProduct
### ** Examples
### Tensor product of two functional data objects
x <- seq(0, 2*pi, 0.1)
f1 <- funData(x, outer(seq(0.75, 1.25, 0.1), sin(x)))
y <- seq(-pi, pi, 0.1)
f2 <- funData(y, outer(seq(0.25, 0.75, 0.1), sin(y)))
plot(f1, main = "f1")
plot(f2, main = "f2")
tP <- tensorProduct(f1, f2)
dimSupp(tP)
plot(tP, obs = 1)
### Tensor product of three functional data objects
z <- seq(-1, 1, 0.05)
f3 <- funData(z, outer(seq(0.75, 1.25, 0.1), z^2))
plot(f1, main = "f1")
plot(f2, main = "f2")
plot(f3, main = "f3")
tP2 <- tensorProduct(f1, f2, f3)
dimSupp(tP2)
|
\name{QCAGUI-package}
\alias{QCAGUI-package}
\alias{QCAGUI}
\docType{package}
\title{
Qualitative Comparative Analysis GUI
}
\description{
Starting with version 2.5, the package QCAGUI was merged back in the main package \pkg{QCA}.
The entire namespace and code from \pkg{QCAGUI} has been moved to package \pkg{QCA}.
For details, see:
?QCA
}
\details{
\tabular{ll}{
Package: \tab QCAGUI\cr
Type: \tab Package\cr
Version: \tab 2.5\cr
Date: \tab 2016-11-13\cr
License: \tab GPL (>= 2)\cr
}
}
\author{
\bold{Authors}:\cr
Adrian Dusa\cr
Department of Sociology\cr
University of Bucharest\cr
\email{dusa.adrian@unibuc.ro}
\bold{Maintainer}:\cr
Adrian Dusa
}
\keyword{package}
| /man/QCAGUI.package.Rd | no_license | cran/QCAGUI | R | false | false | 719 | rd | \name{QCAGUI-package}
\alias{QCAGUI-package}
\alias{QCAGUI}
\docType{package}
\title{
Qualitative Comparative Analysis GUI
}
\description{
Starting with version 2.5, the package QCAGUI was merged back in the main package \pkg{QCA}.
The entire namespace and code from \pkg{QCAGUI} has been moved to package \pkg{QCA}.
For details, see:
?QCA
}
\details{
\tabular{ll}{
Package: \tab QCAGUI\cr
Type: \tab Package\cr
Version: \tab 2.5\cr
Date: \tab 2016-11-13\cr
License: \tab GPL (>= 2)\cr
}
}
\author{
\bold{Authors}:\cr
Adrian Dusa\cr
Department of Sociology\cr
University of Bucharest\cr
\email{dusa.adrian@unibuc.ro}
\bold{Maintainer}:\cr
Adrian Dusa
}
\keyword{package}
|
# This R script gets and performs some cleaning on Human Activity Recognition database
# built from the recordings of 30 subjects performing activities of daily living (ADL)
# while carrying a waist-mounted smartphone with embedded inertial sensors.
# The full description of the data set is available at:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
library(plyr)
##Download the data from source. Unzip it in data folder
downloadData = function() {
"Checks for data directory and creates one if it doesn't exist"
if (!file.exists("data")) {
message("Creating data directory")
dir.create("data")
}
if (!file.exists("data/UCI HAR Dataset")) {
# download the data
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile="data/UCI_HAR_data.zip"
message("Downloading data")
download.file(fileURL, destfile=zipfile, method="curl")
unzip(zipfile, exdir="data")
}
}
## Merge training and test datasets
mergeDatasets = function() {
"Merge training and test datasets"
# Read data
message("reading X_train.txt")
trainingX <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
trainingY <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
trainingSubject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
testX <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
testY <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
testSubject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge
mergedX <- rbind(trainingX, testX)
mergedY <- rbind(trainingY, testY)
mergedSubject <- rbind(trainingSubject, testSubject)
# merge train and test datasets and return
list(x=mergedX, y=mergedY, subject=mergedSubject)
}
## Given the dataset (x values), extract only the measurements on the mean
## and standard deviation for each measurement.
extractMeanAndStd = function(df) {
# Read the feature list file
features <- read.table("data/UCI HAR Dataset/features.txt")
# Find the mean and std columns
meanCol <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
stdCol <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
# Extract them from the data
edf <- df[, (meanCol | stdCol)]
colnames(edf) <- features[(meanCol | stdCol), 2]
edf
}
## Use descriptive activity names to name the activities in the dataset
nameActivities = function(df) {
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
##Function which needs to be called to create the tidy data
cleanData = function() {
# Download data
downloadData()
# merge training and test datasets. merge.datasets function returns a list
# of three dataframes: X, y, and subject
merged <- mergeDatasets()
# Extract only the measurements of the mean and standard deviation for each
# measurement
cx <- extractMeanAndStd(merged$x)
# Name activities
cy <- nameActivities(merged$y)
# Use descriptive column name for subjects
colnames(merged$subject) <- c("subject")
#Combine mean-std values (x), activities (y) and subjects into one data frame
combined <- cbind(cx, cy, merged$subject)
# Given X values, y values and subjects, create an independent tidy dataset
# with the average of each variable for each activity and each subject.
tidy <- ddply(combined, .(subject, activity), function(x) colMeans(x[,1:60]))
# Write tidy dataset as csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
}
| /GettingAndCleaningDataCourseProject/run_analysis.R | no_license | ssprasad/datasciencecoursera | R | false | false | 4,095 | r | # This R script gets and performs some cleaning on Human Activity Recognition database
# built from the recordings of 30 subjects performing activities of daily living (ADL)
# while carrying a waist-mounted smartphone with embedded inertial sensors.
# The full description of the data set is available at:
# http://archive.ics.uci.edu/ml/datasets/Human+Activity+Recognition+Using+Smartphones
library(plyr)
##Download the data from source. Unzip it in data folder
downloadData = function() {
"Checks for data directory and creates one if it doesn't exist"
if (!file.exists("data")) {
message("Creating data directory")
dir.create("data")
}
if (!file.exists("data/UCI HAR Dataset")) {
# download the data
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
zipfile="data/UCI_HAR_data.zip"
message("Downloading data")
download.file(fileURL, destfile=zipfile, method="curl")
unzip(zipfile, exdir="data")
}
}
## Merge training and test datasets
mergeDatasets = function() {
"Merge training and test datasets"
# Read data
message("reading X_train.txt")
trainingX <- read.table("data/UCI HAR Dataset/train/X_train.txt")
message("reading y_train.txt")
trainingY <- read.table("data/UCI HAR Dataset/train/y_train.txt")
message("reading subject_train.txt")
trainingSubject <- read.table("data/UCI HAR Dataset/train/subject_train.txt")
message("reading X_test.txt")
testX <- read.table("data/UCI HAR Dataset/test/X_test.txt")
message("reading y_test.txt")
testY <- read.table("data/UCI HAR Dataset/test/y_test.txt")
message("reading subject_test.txt")
testSubject <- read.table("data/UCI HAR Dataset/test/subject_test.txt")
# Merge
mergedX <- rbind(trainingX, testX)
mergedY <- rbind(trainingY, testY)
mergedSubject <- rbind(trainingSubject, testSubject)
# merge train and test datasets and return
list(x=mergedX, y=mergedY, subject=mergedSubject)
}
## Given the dataset (x values), extract only the measurements on the mean
## and standard deviation for each measurement.
extractMeanAndStd = function(df) {
# Read the feature list file
features <- read.table("data/UCI HAR Dataset/features.txt")
# Find the mean and std columns
meanCol <- sapply(features[,2], function(x) grepl("mean()", x, fixed=T))
stdCol <- sapply(features[,2], function(x) grepl("std()", x, fixed=T))
# Extract them from the data
edf <- df[, (meanCol | stdCol)]
colnames(edf) <- features[(meanCol | stdCol), 2]
edf
}
## Use descriptive activity names to name the activities in the dataset
nameActivities = function(df) {
colnames(df) <- "activity"
df$activity[df$activity == 1] = "WALKING"
df$activity[df$activity == 2] = "WALKING_UPSTAIRS"
df$activity[df$activity == 3] = "WALKING_DOWNSTAIRS"
df$activity[df$activity == 4] = "SITTING"
df$activity[df$activity == 5] = "STANDING"
df$activity[df$activity == 6] = "LAYING"
df
}
##Function which needs to be called to create the tidy data
cleanData = function() {
# Download data
downloadData()
# merge training and test datasets. merge.datasets function returns a list
# of three dataframes: X, y, and subject
merged <- mergeDatasets()
# Extract only the measurements of the mean and standard deviation for each
# measurement
cx <- extractMeanAndStd(merged$x)
# Name activities
cy <- nameActivities(merged$y)
# Use descriptive column name for subjects
colnames(merged$subject) <- c("subject")
#Combine mean-std values (x), activities (y) and subjects into one data frame
combined <- cbind(cx, cy, merged$subject)
# Given X values, y values and subjects, create an independent tidy dataset
# with the average of each variable for each activity and each subject.
tidy <- ddply(combined, .(subject, activity), function(x) colMeans(x[,1:60]))
# Write tidy dataset as csv
write.csv(tidy, "UCI_HAR_tidy.csv", row.names=FALSE)
}
|
# using Rscript
# if doesn't work, add Rscript to PATH
args <- commandArgs(trailingOnly = TRUE);
source('setup.R');
source('loadAll.R');
print(args);
errFile <- args[1];
fnFile <- args[2];
trFile <- args[3];
# error plot first...
png(filename="../classification_err.png");
err <- readcsv(errFile);
error_plot(err$it, err$y1, err$it, err$y2);
dev.off();
# ...and then function plot
png(filename="../classification_fun.png");
fn <- readcsv(fnFile);
tr <- readcsv(trFile);
fnCl <- prepare_classes(fn);
trCl <- prepare_classes(tr);
class_plot(trCl$clX, trCl$clY, fnCl$clX, fnCl$clY);
dev.off(); | /Rplots/classification_script.R | permissive | sealionkat/sn-mlp | R | false | false | 601 | r | # using Rscript
# if doesn't work, add Rscript to PATH
args <- commandArgs(trailingOnly = TRUE);
source('setup.R');
source('loadAll.R');
print(args);
errFile <- args[1];
fnFile <- args[2];
trFile <- args[3];
# error plot first...
png(filename="../classification_err.png");
err <- readcsv(errFile);
error_plot(err$it, err$y1, err$it, err$y2);
dev.off();
# ...and then function plot
png(filename="../classification_fun.png");
fn <- readcsv(fnFile);
tr <- readcsv(trFile);
fnCl <- prepare_classes(fn);
trCl <- prepare_classes(tr);
class_plot(trCl$clX, trCl$clY, fnCl$clX, fnCl$clY);
dev.off(); |
library(detrendeR)
### Name: detrender
### Title: R detrendeR
### Aliases: detrender
### ** Examples
detrender()
| /data/genthat_extracted_code/detrendeR/examples/detrender.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 120 | r | library(detrendeR)
### Name: detrender
### Title: R detrendeR
### Aliases: detrender
### ** Examples
detrender()
|
plotdir <- '~/Documents/gistempWebsite/websiteErrataMaterials'
# all of the time indicies used in plotting
tYear <- 1854:2014
tDec <- seq(1850, 2010, by=10)
tYearFull <- 1850:2017
tYearFinal <- 1880:2016
tYear2018 <- 1880:2018
# decades to be used when plotting a limited range
subDec <- 4:17
# Old SST
annMatSWrong <- extratTSRdat(ensemblesS,'~/Documents/gistempRaw/ERSSTMeanWrong',tYear)
sigma2SWrong <- apply(annMatSWrong,1,var)
sigma2SFinalWrong <- c(sigma2SWrong[tYear %in% tYearFinal], rep(sigma2SWrong[length(sigma2SWrong)],2))
# New SST
annMatS <- extratTSRdat(ensemblesS,'Data/Shared/ERSST',tYear)
sigma2S <- apply(annMatS,1,var)
sigma2SFinal <- c(sigma2S[tYear %in% tYearFinal], rep(sigma2S[length(sigma2S)],2))
# Show changes to the SST uncertainty
pdf(sprintf('%s/sstCorrection.pdf',plotdir),10,7)
par(mar=c(5, 5, 4, 3) + 0.1)
plot(tYearFinal,sqrt(sigma2SFinalWrong)*1.96,type='l',lwd=1.5,col='red',ylim=c(0,0.2),xlab='Year',
ylab='Annual Mean SST Uncertainty 95% Confidence Interval', main='Corrections to the GISTEMP Global SST Uncertainty')
points(tYearFinal,sqrt(sigma2SFinal)*1.96,type='l',lwd=1.5)
grid(lwd=1.5)
legend('bottomleft',c('Original','Correction'),
col=rep(c('red','black'),1),lwd=2,bg='white',cex=1.25)
dev.off()
# Show changes to the resulting total uncertainty
# load things
reanalysis <- 'ERA'
load(sprintf('Data/%s/meanTimeSeries_%s.Rda',reanalysis,reanalysis))
load(sprintf('Data/%s/area_%s.Rda',reanalysis,reanalysis))
totalHomog <- read.csv('Data/Shared/GHCN/total-homog-uncertainty.csv',header=FALSE)
paramHomog <- read.csv('Data/Shared/GHCN/parametric-uncertainty.csv',header=FALSE)
# calculate the LSAT
globalLandCI <- totalLandUncertainty(resultsList,tDec)
# Global Uncertainties:
# land calcs (empirical estimates) [take only through 2014]
sigma2L <- decadeToYear(globalLandCI$diffVar,tYear,tDec)[1:length(tYear)]
# land variance calcs running over all 1880-2016
sigma2LFinal <- decadeToYear(globalLandCI$diffVar,tYearFull,tDec)[tYearFull %in% tYearFinal]
# total land CI calculation (adding in homogonization uncertainty)
totalCIWrong <- sqrt(AL^2 * totalLandVar + AS^2 * sigma2SFinalWrong)*1.96
totalCI <- sqrt(AL^2 * totalLandVar + AS^2 * sigma2SFinal)*1.96
# make the plot!
pdf(sprintf('%s/totalCorrection.pdf',plotdir),10,7)
par(mar=c(5, 5, 4, 3) + 0.1)
plot(tYearFinal,totalCIWrong,type='l',lwd=1.5,col='red',ylim=c(0,0.2),xlab='Year',
ylab='Annual Mean SST Uncertainty 95% Confidence Interval', main='Corrections to the GISTEMP Global SST Uncertainty')
points(tYearFinal,totalCI,type='l',lwd=1.5)
grid(lwd=1.5)
legend('bottomleft',c('Original','Correction'),
col=rep(c('red','black'),1),lwd=2,bg='white',cex=1.25)
dev.off()
| /Code/Scratch/documentSstChanges.R | no_license | nlenssen/GistempUncertainty | R | false | false | 2,715 | r | plotdir <- '~/Documents/gistempWebsite/websiteErrataMaterials'
# all of the time indicies used in plotting
tYear <- 1854:2014
tDec <- seq(1850, 2010, by=10)
tYearFull <- 1850:2017
tYearFinal <- 1880:2016
tYear2018 <- 1880:2018
# decades to be used when plotting a limited range
subDec <- 4:17
# Old SST
annMatSWrong <- extratTSRdat(ensemblesS,'~/Documents/gistempRaw/ERSSTMeanWrong',tYear)
sigma2SWrong <- apply(annMatSWrong,1,var)
sigma2SFinalWrong <- c(sigma2SWrong[tYear %in% tYearFinal], rep(sigma2SWrong[length(sigma2SWrong)],2))
# New SST
annMatS <- extratTSRdat(ensemblesS,'Data/Shared/ERSST',tYear)
sigma2S <- apply(annMatS,1,var)
sigma2SFinal <- c(sigma2S[tYear %in% tYearFinal], rep(sigma2S[length(sigma2S)],2))
# Show changes to the SST uncertainty
pdf(sprintf('%s/sstCorrection.pdf',plotdir),10,7)
par(mar=c(5, 5, 4, 3) + 0.1)
plot(tYearFinal,sqrt(sigma2SFinalWrong)*1.96,type='l',lwd=1.5,col='red',ylim=c(0,0.2),xlab='Year',
ylab='Annual Mean SST Uncertainty 95% Confidence Interval', main='Corrections to the GISTEMP Global SST Uncertainty')
points(tYearFinal,sqrt(sigma2SFinal)*1.96,type='l',lwd=1.5)
grid(lwd=1.5)
legend('bottomleft',c('Original','Correction'),
col=rep(c('red','black'),1),lwd=2,bg='white',cex=1.25)
dev.off()
# Show changes to the resulting total uncertainty
# load things
reanalysis <- 'ERA'
load(sprintf('Data/%s/meanTimeSeries_%s.Rda',reanalysis,reanalysis))
load(sprintf('Data/%s/area_%s.Rda',reanalysis,reanalysis))
totalHomog <- read.csv('Data/Shared/GHCN/total-homog-uncertainty.csv',header=FALSE)
paramHomog <- read.csv('Data/Shared/GHCN/parametric-uncertainty.csv',header=FALSE)
# calculate the LSAT
globalLandCI <- totalLandUncertainty(resultsList,tDec)
# Global Uncertainties:
# land calcs (empirical estimates) [take only through 2014]
sigma2L <- decadeToYear(globalLandCI$diffVar,tYear,tDec)[1:length(tYear)]
# land variance calcs running over all 1880-2016
sigma2LFinal <- decadeToYear(globalLandCI$diffVar,tYearFull,tDec)[tYearFull %in% tYearFinal]
# total land CI calculation (adding in homogonization uncertainty)
totalCIWrong <- sqrt(AL^2 * totalLandVar + AS^2 * sigma2SFinalWrong)*1.96
totalCI <- sqrt(AL^2 * totalLandVar + AS^2 * sigma2SFinal)*1.96
# make the plot!
pdf(sprintf('%s/totalCorrection.pdf',plotdir),10,7)
par(mar=c(5, 5, 4, 3) + 0.1)
plot(tYearFinal,totalCIWrong,type='l',lwd=1.5,col='red',ylim=c(0,0.2),xlab='Year',
ylab='Annual Mean SST Uncertainty 95% Confidence Interval', main='Corrections to the GISTEMP Global SST Uncertainty')
points(tYearFinal,totalCI,type='l',lwd=1.5)
grid(lwd=1.5)
legend('bottomleft',c('Original','Correction'),
col=rep(c('red','black'),1),lwd=2,bg='white',cex=1.25)
dev.off()
|
topic_funs <- function(rd) {
funs <- parse_usage(rd)
# Remove all methods for functions documented in this file
name <- purrr::map_chr(funs, "name")
type <- purrr::map_chr(funs, "type")
gens <- name[type == "fun"]
self_meth <- (name %in% gens) & (type %in% c("s3", "s4"))
purrr::map_chr(funs[!self_meth], ~ short_name(.$name, .$type, .$signature))
}
parse_usage <- function(x) {
if (!inherits(x, "tag")) {
usage <- paste0("\\usage{", x, "}")
x <- rd_text(usage, fragment = FALSE)
}
r <- usage_code(x)
if (length(r) == 0) {
return(list())
}
exprs <- tryCatch(
{
parse_exprs(r)
},
error = function(e) {
warning("Failed to parse usage:\n", r, call. = FALSE, immediate. = TRUE)
list()
}
)
purrr::map(exprs, usage_type)
}
short_name <- function(name, type, signature) {
if (!is_syntactic(name)) {
qname <- paste0("`", name, "`")
} else {
qname <- name
}
if (type == "data") {
qname
} else if (type == "fun") {
if (is_infix(name)) {
qname
} else {
paste0(qname, "()")
}
} else {
paste0(qname, "(", paste0("<i><", signature, "></i>", collapse = ","), ")")
}
}
# Given single expression generated from usage_code, extract
usage_type <- function(x) {
if (is_symbol(x)) {
list(type = "data", name = as.character(x))
} else if (is_call(x, "data")) {
list(type = "data", name = as.character(x[[2]]))
} else if (is.call(x)) {
if (identical(x[[1]], quote(`<-`))) {
replacement <- TRUE
x <- x[[2]]
} else {
replacement <- FALSE
}
out <- fun_info(x)
out$replacement <- replacement
out$infix <- is_infix(out$name)
if (replacement) {
out$name <- paste0(out$name, "<-")
}
out
} else {
stop("Unknown type: ", typeof(x), call. = FALSE)
}
}
is_infix <- function(x) {
x <- as.character(x)
ops <- c(
"+", "-", "*", "^", "/",
"==", ">", "<", "!=", "<=", ">=",
"&", "|",
"[[", "[", "$"
)
grepl("^%.*%$", x) || x %in% ops
}
fun_info <- function(x) {
stopifnot(is.call(x))
if (is.call(x[[1]])) {
x <- x[[1]]
if (identical(x[[1]], quote(S3method))) {
list(
type = "s3",
name = as.character(x[[2]]),
signature = as.character(x[[3]])
)
} else if (identical(x[[1]], quote(S4method))) {
list(
type = "s4",
name = as.character(x[[2]]),
signature = purrr::map_chr(as.list(x[[3]][-1]), as.character)
)
} else {
stop("Unknown call: ", as.character(x[[1]]))
}
} else {
list(
type = "fun",
name = as.character(x[[1]]),
signature = NULL
)
}
}
# usage_code --------------------------------------------------------------
# Transform Rd embedded inside usage into parseable R code
usage_code <- function(x) {
UseMethod("usage_code")
}
#' @export
usage_code.Rd <- function(x) {
usage <- purrr::detect(x, inherits, "tag_usage")
usage_code(usage)
}
#' @export
usage_code.NULL <- function(x) character()
# Tag without additional class use
#' @export
usage_code.tag <- function(x) {
if (!identical(class(x), "tag")) {
stop("Undefined tag ", class(x)[[1]], call. = FALSE)
}
paste0(purrr::flatten_chr(purrr::map(x, usage_code)), collapse = "")
}
#' @export
usage_code.tag_dots <- function(x) "..."
#' @export
usage_code.TEXT <- function(x) as.character(x)
#' @export
usage_code.RCODE <- function(x) as.character(x)
#' @export
usage_code.VERB <- function(x) as.character(x)
#' @export
usage_code.COMMENT <- function(x) character()
#' @export
usage_code.tag_S3method <- function(x) {
generic <- paste0(usage_code(x[[1]]), collapse = "")
class <- paste0(usage_code(x[[2]]), collapse = "")
paste0("S3method(`", generic, "`, ", class, ")")
}
#' @export
usage_code.tag_method <- usage_code.tag_S3method
#' @export
usage_code.tag_S4method <- function(x) {
generic <- paste0(usage_code(x[[1]]), collapse = "")
class <- paste0(usage_code(x[[2]]), collapse = "")
paste0("S4method(`", generic, "`, list(", class, "))")
}
#' @export
usage_code.tag_usage <- function(x) {
paste0(purrr::flatten_chr(purrr::map(x, usage_code)), collapse = "")
}
| /R/usage.R | permissive | magosil86/pkgdown | R | false | false | 4,221 | r | topic_funs <- function(rd) {
funs <- parse_usage(rd)
# Remove all methods for functions documented in this file
name <- purrr::map_chr(funs, "name")
type <- purrr::map_chr(funs, "type")
gens <- name[type == "fun"]
self_meth <- (name %in% gens) & (type %in% c("s3", "s4"))
purrr::map_chr(funs[!self_meth], ~ short_name(.$name, .$type, .$signature))
}
parse_usage <- function(x) {
if (!inherits(x, "tag")) {
usage <- paste0("\\usage{", x, "}")
x <- rd_text(usage, fragment = FALSE)
}
r <- usage_code(x)
if (length(r) == 0) {
return(list())
}
exprs <- tryCatch(
{
parse_exprs(r)
},
error = function(e) {
warning("Failed to parse usage:\n", r, call. = FALSE, immediate. = TRUE)
list()
}
)
purrr::map(exprs, usage_type)
}
short_name <- function(name, type, signature) {
if (!is_syntactic(name)) {
qname <- paste0("`", name, "`")
} else {
qname <- name
}
if (type == "data") {
qname
} else if (type == "fun") {
if (is_infix(name)) {
qname
} else {
paste0(qname, "()")
}
} else {
paste0(qname, "(", paste0("<i><", signature, "></i>", collapse = ","), ")")
}
}
# Given single expression generated from usage_code, extract
usage_type <- function(x) {
if (is_symbol(x)) {
list(type = "data", name = as.character(x))
} else if (is_call(x, "data")) {
list(type = "data", name = as.character(x[[2]]))
} else if (is.call(x)) {
if (identical(x[[1]], quote(`<-`))) {
replacement <- TRUE
x <- x[[2]]
} else {
replacement <- FALSE
}
out <- fun_info(x)
out$replacement <- replacement
out$infix <- is_infix(out$name)
if (replacement) {
out$name <- paste0(out$name, "<-")
}
out
} else {
stop("Unknown type: ", typeof(x), call. = FALSE)
}
}
is_infix <- function(x) {
x <- as.character(x)
ops <- c(
"+", "-", "*", "^", "/",
"==", ">", "<", "!=", "<=", ">=",
"&", "|",
"[[", "[", "$"
)
grepl("^%.*%$", x) || x %in% ops
}
fun_info <- function(x) {
stopifnot(is.call(x))
if (is.call(x[[1]])) {
x <- x[[1]]
if (identical(x[[1]], quote(S3method))) {
list(
type = "s3",
name = as.character(x[[2]]),
signature = as.character(x[[3]])
)
} else if (identical(x[[1]], quote(S4method))) {
list(
type = "s4",
name = as.character(x[[2]]),
signature = purrr::map_chr(as.list(x[[3]][-1]), as.character)
)
} else {
stop("Unknown call: ", as.character(x[[1]]))
}
} else {
list(
type = "fun",
name = as.character(x[[1]]),
signature = NULL
)
}
}
# usage_code --------------------------------------------------------------
# Transform Rd embedded inside usage into parseable R code
usage_code <- function(x) {
UseMethod("usage_code")
}
#' @export
usage_code.Rd <- function(x) {
usage <- purrr::detect(x, inherits, "tag_usage")
usage_code(usage)
}
#' @export
usage_code.NULL <- function(x) character()
# Tag without additional class use
#' @export
usage_code.tag <- function(x) {
if (!identical(class(x), "tag")) {
stop("Undefined tag ", class(x)[[1]], call. = FALSE)
}
paste0(purrr::flatten_chr(purrr::map(x, usage_code)), collapse = "")
}
#' @export
usage_code.tag_dots <- function(x) "..."
#' @export
usage_code.TEXT <- function(x) as.character(x)
#' @export
usage_code.RCODE <- function(x) as.character(x)
#' @export
usage_code.VERB <- function(x) as.character(x)
#' @export
usage_code.COMMENT <- function(x) character()
#' @export
usage_code.tag_S3method <- function(x) {
generic <- paste0(usage_code(x[[1]]), collapse = "")
class <- paste0(usage_code(x[[2]]), collapse = "")
paste0("S3method(`", generic, "`, ", class, ")")
}
#' @export
usage_code.tag_method <- usage_code.tag_S3method
#' @export
usage_code.tag_S4method <- function(x) {
generic <- paste0(usage_code(x[[1]]), collapse = "")
class <- paste0(usage_code(x[[2]]), collapse = "")
paste0("S4method(`", generic, "`, list(", class, "))")
}
#' @export
usage_code.tag_usage <- function(x) {
paste0(purrr::flatten_chr(purrr::map(x, usage_code)), collapse = "")
}
|
library(VSURF)
### Name: VSURF_interp
### Title: Interpretation step of VSURF
### Aliases: VSURF_interp VSURF_interp.default VSURF_interp.formula
### ** Examples
data(iris)
iris.thres <- VSURF_thres(iris[,1:4], iris[,5], ntree = 100, nfor.thres = 20)
iris.interp <- VSURF_interp(iris[,1:4], iris[,5], vars = iris.thres$varselect.thres,
nfor.interp = 10)
iris.interp
## Not run:
##D # A more interesting example with toys data (see \code{toys})
##D # (a few minutes to execute)
##D data(toys)
##D toys.thres <- VSURF_thres(toys$x, toys$y)
##D toys.interp <- VSURF_interp(toys$x, toys$y, vars = toys.thres$varselect.thres)
##D toys.interp
## End(Not run)
| /data/genthat_extracted_code/VSURF/examples/VSURF_interp.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 692 | r | library(VSURF)
### Name: VSURF_interp
### Title: Interpretation step of VSURF
### Aliases: VSURF_interp VSURF_interp.default VSURF_interp.formula
### ** Examples
data(iris)
iris.thres <- VSURF_thres(iris[,1:4], iris[,5], ntree = 100, nfor.thres = 20)
iris.interp <- VSURF_interp(iris[,1:4], iris[,5], vars = iris.thres$varselect.thres,
nfor.interp = 10)
iris.interp
## Not run:
##D # A more interesting example with toys data (see \code{toys})
##D # (a few minutes to execute)
##D data(toys)
##D toys.thres <- VSURF_thres(toys$x, toys$y)
##D toys.interp <- VSURF_interp(toys$x, toys$y, vars = toys.thres$varselect.thres)
##D toys.interp
## End(Not run)
|
euclidian_dist <- function(u, v)
{
return (sqrt(sum((u - v)^2)))
}
sort_by_dist <- function(data, u)
{
row <- dim(data)[1]
col <- dim(data)[2]
dst <- matrix(NA, row, 2)
for(i in 1:row)
{
curr_dist <- euclidian_dist(data[i, 1:col-1], u)
dst[i, ] <- c(i, curr_dist)
}
ordered_data <- data[order(dst[, 2]), ]
return (ordered_data)
}
knn <- function(data, u, k=1)
{
# This function gets the data and a point and returns
# the class of the nearest one.
row <- dim(data)[1]
col <- dim(data)[2]
data <- sort_by_dist(data, u)
classes <- data[1:k, col]
counts <- table(classes)
answer <- names(which.max(counts))
return (answer)
}
main <- function()
{
colors <- c("setosa" = "red", "versicolor" = "green3", "virginica" = "blue")
train <- iris[,3:5]
# drawing a plot of petal parameters
plot(train[1:2], pch=21, bg=colors[iris$Species],
col=colors[iris$Species], asp=1, xlab="Length", ylab="Width",
main="1nn on 10 tests")
# generate 10 tests
test <- cbind(runif(10, min=0.1, max=6.9),
runif(10, min=0.1, max=2.4))
# testing knn for k=1
k <- 1
for(i in 1:10)
{
answer <- knn(train, c(test[i, 1], test[i, 2]), k)
if(answer == -1)
{
print('There are no neighbours')
break
}
points(test[i, 1], test[i, 2], pch = 22, bg = colors[answer], asp = 1)
}
legend("bottomright", c("Training objects", "Classified objects"),
pch=c(21, 22), inset=0.01)
}
main() | /main.R | no_license | ametkay12345/ml1 | R | false | false | 1,490 | r | euclidian_dist <- function(u, v)
{
return (sqrt(sum((u - v)^2)))
}
sort_by_dist <- function(data, u)
{
row <- dim(data)[1]
col <- dim(data)[2]
dst <- matrix(NA, row, 2)
for(i in 1:row)
{
curr_dist <- euclidian_dist(data[i, 1:col-1], u)
dst[i, ] <- c(i, curr_dist)
}
ordered_data <- data[order(dst[, 2]), ]
return (ordered_data)
}
knn <- function(data, u, k=1)
{
# This function gets the data and a point and returns
# the class of the nearest one.
row <- dim(data)[1]
col <- dim(data)[2]
data <- sort_by_dist(data, u)
classes <- data[1:k, col]
counts <- table(classes)
answer <- names(which.max(counts))
return (answer)
}
main <- function()
{
colors <- c("setosa" = "red", "versicolor" = "green3", "virginica" = "blue")
train <- iris[,3:5]
# drawing a plot of petal parameters
plot(train[1:2], pch=21, bg=colors[iris$Species],
col=colors[iris$Species], asp=1, xlab="Length", ylab="Width",
main="1nn on 10 tests")
# generate 10 tests
test <- cbind(runif(10, min=0.1, max=6.9),
runif(10, min=0.1, max=2.4))
# testing knn for k=1
k <- 1
for(i in 1:10)
{
answer <- knn(train, c(test[i, 1], test[i, 2]), k)
if(answer == -1)
{
print('There are no neighbours')
break
}
points(test[i, 1], test[i, 2], pch = 22, bg = colors[answer], asp = 1)
}
legend("bottomright", c("Training objects", "Classified objects"),
pch=c(21, 22), inset=0.01)
}
main() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LMEffectMatrices.R
\name{LMEffectMatrices}
\alias{LMEffectMatrices}
\title{Computing the Effect Matrices}
\usage{
LMEffectMatrices(ResLMModelMatrix, outcomes, SS = TRUE,
newSSmethod = TRUE, contrastList = NA)
}
\arguments{
\item{ResLMModelMatrix}{A list of 3 from \code{\link{LMModelMatrix}}}
\item{outcomes}{A \emph{nxm} matrix with n observations and m response variables}
\item{SS}{a logical whether to compute the effect percentage variations}
\item{newSSmethod}{a logical whether to use the new optimized method to compute SS}
\item{contrastList}{a list of contrast for each parameter. The function creates automatically the list by default}
}
\value{
A list with the following elements:
\describe{
\item{\code{formula}}{A formula object with the expression of the GLM used to predict the outcomes}
\item{\code{design}}{A \emph{nxk} data frame with the "free encoded" experimental design.}
\item{\code{ModelMatrix}}{A \emph{nxK} model matrix specifically encoded for the ASCA-GLM method.}
\item{\code{outcomes}}{A \emph{nxm} matrix with n observations and m response variables}
\item{\code{effectMatrices}}{A list of \emph{p} effect matrices for each model terms}
\item{\code{modelMatrixByEffect}}{A list of \emph{p} model matrices by models terms }
\item{\code{predictedvalues}}{A \emph{nxm} matrix with the predicted values}
\item{\code{residuals}}{A \emph{nxm} matrix with the residuals}
\item{\code{parameters}}{A \emph{pxm} matrix with the coefficients of every parameters by response variables}
\item{\code{covariateEffectsNamesUnique}}{A character vector with the \emph{p} unique name of the model terms}
\item{\code{covariateEffectsNames}}{A character vector with \emph{K} names of the coefficients}
}
}
\description{
Runs a GLM model and decomposes the outcomes into effect matrices for each model terms
}
\examples{
data('UCH')
ResLMModelMatrix <- LMModelMatrix(formula=as.formula(UCH$formula),design=UCH$design)
LMEffectMatrices(ResLMModelMatrix,outcomes=UCH$outcomes)
}
| /man/LMEffectMatrices.Rd | no_license | Daadia/LMWiRe-1 | R | false | true | 2,075 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/LMEffectMatrices.R
\name{LMEffectMatrices}
\alias{LMEffectMatrices}
\title{Computing the Effect Matrices}
\usage{
LMEffectMatrices(ResLMModelMatrix, outcomes, SS = TRUE,
newSSmethod = TRUE, contrastList = NA)
}
\arguments{
\item{ResLMModelMatrix}{A list of 3 from \code{\link{LMModelMatrix}}}
\item{outcomes}{A \emph{nxm} matrix with n observations and m response variables}
\item{SS}{a logical whether to compute the effect percentage variations}
\item{newSSmethod}{a logical whether to use the new optimized method to compute SS}
\item{contrastList}{a list of contrast for each parameter. The function creates automatically the list by default}
}
\value{
A list with the following elements:
\describe{
\item{\code{formula}}{A formula object with the expression of the GLM used to predict the outcomes}
\item{\code{design}}{A \emph{nxk} data frame with the "free encoded" experimental design.}
\item{\code{ModelMatrix}}{A \emph{nxK} model matrix specifically encoded for the ASCA-GLM method.}
\item{\code{outcomes}}{A \emph{nxm} matrix with n observations and m response variables}
\item{\code{effectMatrices}}{A list of \emph{p} effect matrices for each model terms}
\item{\code{modelMatrixByEffect}}{A list of \emph{p} model matrices by models terms }
\item{\code{predictedvalues}}{A \emph{nxm} matrix with the predicted values}
\item{\code{residuals}}{A \emph{nxm} matrix with the residuals}
\item{\code{parameters}}{A \emph{pxm} matrix with the coefficients of every parameters by response variables}
\item{\code{covariateEffectsNamesUnique}}{A character vector with the \emph{p} unique name of the model terms}
\item{\code{covariateEffectsNames}}{A character vector with \emph{K} names of the coefficients}
}
}
\description{
Runs a GLM model and decomposes the outcomes into effect matrices for each model terms
}
\examples{
data('UCH')
ResLMModelMatrix <- LMModelMatrix(formula=as.formula(UCH$formula),design=UCH$design)
LMEffectMatrices(ResLMModelMatrix,outcomes=UCH$outcomes)
}
|
#### name:main ####
# NSW air quality data (received from EPA) -----
library (dplyr)
library(openair)
library(reshape2)
library(stringr)
library(tidyr)
library(readr)
library(lubridate)
projdir <- "Q:/Research/Environment_General/Air_Pollution_Monitoring_Stations_NSW/AP_monitor_NSW_1994_2013_hrly/"
setwd(projdir)
dir()
indir <- "data_provided"
dir(indir)
#importing the data (using the openair import function in combination with tb_df)
nsw9498 <- tbl_df (import (file.path("data_provided", "/OEH (1994-1998)_AllSites_1hrlyData.csv")))
nsw9903 <- tbl_df (import (file.path("data_provided", "/OEH (1999-2003)__AllSites_1hrlyData.csv")))
nsw0408 <- tbl_df (import (file.path("data_provided", "/OEH (2004-2008)__AllSites_1hrlyData.csv")))
nsw0913 <- tbl_df (import (file.path("data_provided", "/OEH (2009-2013)__AllSites_1hrlyData.csv")))
# remove the units
names(nsw9498)[2:ncol(nsw9498)] <- word(colnames(nsw9498)[2:ncol(nsw9498)], start = 1, end = -2)
names(nsw9903)[2:ncol(nsw9903)] <- word(colnames(nsw9903)[2:ncol(nsw9903)], start = 1, end = -2)
names(nsw0408)[2:ncol(nsw0408)] <- word(colnames(nsw0408)[2:ncol(nsw0408)], start = 1, end = -2)
names(nsw0913)[2:ncol(nsw0913)] <- word(colnames(nsw0913)[2:ncol(nsw0913)], start = 1, end = -2)
# importing sites locations data
nsw.locations <- read_csv(file.path("data_provided", "/locations.csv"))
# binding them together
nswaq9413 <- bind_rows (nsw9498,nsw9903,nsw0408, nsw0913)
# removing the unnecessary data
remove (nsw9498,nsw9903,nsw0408, nsw0913)
#lowercase the column names
names(nswaq9413) <- tolower(colnames(nswaq9413))
#convert all columns but date to "double"
cols = seq(from=2, to=ncol(nswaq9413));
# this next line could be made more efficient.
nswaq9413[,cols] = apply(nswaq9413[,cols], 2, function(x) as.double(as.character(x)))
# remove the columns which all values are NA's
nswaq9413 <- nswaq9413[,colSums(is.na(nswaq9413))< nrow(nswaq9413)]
# keep the variables of interest (pm2.5, pm10, o3, no, co, humidity, and temperature)
nswaq9413 <- nswaq9413 %>%
select (date, contains ("pm2.5"), contains("pm10"), contains ("ozone"), contains ("hum"), contains ("tem"), contains ("no"), contains ("co"), contains ("so2"))
#### TODO check if we might alternatively set these to zero, or some
#### code to allow sens test?
# replace all negative values with NA
nswaq9413 [nswaq9413 < 0] <- NA
# check the class of each column
lapply (nswaq9413, class)
write.table(nswaq9413, 'data_derived/nswaq9413.hrly.csv', row.names = F, sep = ",")
library(disentangle)
str(nswaq9413)
dd <- data_dictionary(as.data.frame(nswaq9413))
write.csv(dd, "data_derived/nswaq9413.hrly.data_dictionary.csv", row.names = F)
vl <- variable_names_and_labels(datadict=dd)
write.csv(vl, "data_derived/nswaq9413.hrly.variable_names_and_labels.csv", row.names = F)
remove(dd, vl)
# calculate the daily average (at least 75% of data needed for each day otherwise NA)
# WS and WD data should not be averaged using this approach (the columns heading needs to change to ws and wd firstly)
nswaq9413.daily.avg <- timeAverage (nswaq9413, avg.time = "day", data.thresh = 75, interval = "hour")
nswaq9413.daily.1hrmax <- timeAverage (nswaq9413, avg.time = "day", statistic = "max", data.thresh = 75, interval = "hour") %>%
select ( contains("date"), contains ("no2"), contains ("ozone"))
colnames <- colnames (nswaq9413.daily.1hrmax)
colnames <- gsub ("no2","no2max",colnames)
colnames <- gsub ("ozone","o3max",colnames)
colnames (nswaq9413.daily.1hrmax) <- colnames
nswaq9413.daily <- left_join (nswaq9413.daily.avg, nswaq9413.daily.1hrmax)
# make a long formatted data
nswaq9413.daily.long <- melt (nswaq9413.daily, id = "date", value.name = "concentration")
# splitting the variable to site and the type of observation
site <- word (nswaq9413.daily.long$variable,start = 1, end = 3) # get the first 3 words (some sites' names include more than one word)
# remove unnecessary words to get down to the site name only
site <- gsub ("1h","",site)
site <- gsub (" o3max", "", site)
site <- gsub (" no2max", "", site)
site <- gsub (" pm10","",site)
site <- gsub (" pm2.5","",site)
site <- gsub (" temp","",site)
site <- gsub (" humid", "", site)
site <- gsub (" ozone", "", site)
site <- gsub (" nox", "", site)
site <- gsub (" no2", "", site)
site <- gsub (" no", "", site)
site <- gsub (" co", "", site)
site <- gsub (" so2", "", site)
site <- str_trim(site, side = "both") #remove space from both sides of the name
site <- gsub (" ", ".", site) #replace space with dot
#remove space from both side
variable <- str_trim(nswaq9413.daily.long$variable, side = "both")
variable <- word(variable, -3) # get the 3rd word from the end (it is what we want (the name of the variable))
variables <- c ("humid" = "humidity", "ozone" = "o3", "pm10" ="pm10", "pm2.5" = "pm2.5", "temp" = "temp", "co" = "co", "no" = "no", "no2" = "no2", "nox" = "nox", "so2" = "so2", "no2max" = "no2max", "o3max" = "o3max")
# add the "observation" and "site" column to our data
nswaq9413.daily.long$observation <- factor (variables[variable], levels = variables)
nswaq9413.daily.long$site <- site
# remove unnecessary data
remove (site,variable,variables)
# build the tidy data (each column a variable)
nswaq9413.daily <-
tbl_df(nswaq9413.daily.long) %>%
select (-variable) %>%
filter (!is.na(observation)) %>%
spread (observation, concentration) %>%
arrange (site)
# removing unnecessary data
remove (nswaq9413.daily.long)
# attach the sites locations (lon and lat)
nswaq9413.daily <- left_join (nswaq9413.daily, nsw.locations)
# remove unnecessary data
remove (nsw.locations,nswaq9413.daily.avg,nswaq9413.daily.1hrmax)
# change date format to Date
nswaq9413.daily <- nswaq9413.daily %>% mutate (date = as.Date(date))
#subsetting to sydney stations
nswaq9413.daily.sydney <- nswaq9413.daily %>%
filter ( site %in% c ("bringelly", "camden", "chullora", "campbelltown.west","earlwood","lindfield","liverpool","oakdale","prospect","randwick","richmond","rozelle","st.marys","vineyard"))
# selecting the sites which have at least 75% of data available for each variable
sites <- nswaq9413.daily.sydney %>%
group_by (site) %>%
summarise (total.count = n(), na.pm2.5 = sum(is.na(pm2.5)), na.pm10 = sum(is.na(pm10)), na.humidity = sum(is.na(humidity)), na.o3 = sum(is.na(o3)), na.o3max = sum(is.na(o3max)), na.temp = sum(is.na(temp)), na.co = sum(is.na(co)), na.no = sum(is.na(no)), na.no2 = sum(is.na(no2)), na.nox = sum(is.na(nox)), na.so2 = sum(is.na(so2))) %>%
mutate (pm2.5.na.percent = na.pm2.5/total.count, pm10.na.percent = na.pm10/total.count, humidity.na.percent = na.humidity/total.count, o3.na.percent = na.o3/total.count,o3max.na.percent = na.o3max/total.count, temp.na.percent = na.temp/total.count, co.na.percent = na.co/total.count, no.na.percent = na.co/total.count, no2.na.percent = na.no2/total.count, nox.na.percent = na.nox/total.count, so2.na.percent = na.so2/total.count)
pm2.5.sites <- data.frame(site=c("earlwood","liverpool","richmond"))
pm10.sites <- sites %>% filter (pm10.na.percent <= 0.25) %>% select (site)
o3.sites <- sites %>% filter (o3.na.percent <= 0.25) %>% select (site)
o3max.sites <- sites %>% filter (o3max.na.percent <= 0.25) %>% select (site)
humidity.sites <- sites %>% filter (humidity.na.percent <= 0.25) %>% select (site)
temp.sites <- sites %>% filter (temp.na.percent <= 0.25) %>% select (site)
co.sites <- sites %>% filter (co.na.percent <= 0.25) %>% select (site)
no.sites <- sites %>% filter (no.na.percent <= 0.25) %>% select (site)
no2.sites <- sites %>% filter (no2.na.percent <= 0.25) %>% select (site)
nox.sites <- sites %>% filter (nox.na.percent <= 0.25) %>% select (site)
so2.sites <- sites %>% filter (so2.na.percent <= 0.25) %>% select (site)
nswaq9413.pm2.5.daily.sydney <- left_join (pm2.5.sites, nswaq9413.daily.sydney)
nswaq9413.pm10.daily.sydney <- left_join (pm10.sites, nswaq9413.daily.sydney)
nswaq9413.o3.daily.sydney <- left_join (o3.sites, nswaq9413.daily.sydney)
nswaq9413.o3max.daily.sydney <- left_join (o3max.sites, nswaq9413.daily.sydney)
nswaq9413.humidity.daily.sydney <- left_join (humidity.sites, nswaq9413.daily.sydney)
nswaq9413.temp.daily.sydney <- left_join (temp.sites, nswaq9413.daily.sydney)
nswaq9413.co.daily.sydney <- left_join (co.sites, nswaq9413.daily.sydney)
nswaq9413.no.daily.sydney <- left_join (no.sites, nswaq9413.daily.sydney)
nswaq9413.no2.daily.sydney <- left_join (no2.sites, nswaq9413.daily.sydney)
nswaq9413.nox.daily.sydney <- left_join (nox.sites, nswaq9413.daily.sydney)
nswaq9413.so2.daily.sydney <- left_join (so2.sites, nswaq9413.daily.sydney)
#pm2.5 imputation
data <- nswaq9413.pm2.5.daily.sydney %>% select (date, site, pm2.5)
data <- data %>% mutate (month = month(date), year = year(date)) %>%
mutate(season = ifelse (month == 12 | month ==1 | month == 2, "summer",.) %>%
ifelse (month == 3 | month == 4 | month == 5,"autumn",.) %>%
ifelse(month == 6 | month ==7 | month == 8, "winter",.) %>%
ifelse(month == 9 | month ==10 | month == 11, "spring",.)) %>%
mutate (season = as.character(season))
data.siteaverage <- data %>% group_by(site,year,season) %>% summarise(site.mean.pm2.5 = mean(pm2.5, na.rm =TRUE))
data.othersitesaverage2 <- data %>% filter (site != "earlwood") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="earlwood")
data.othersitesaverage3 <- data %>% filter (site != "liverpool") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="liverpool")
data.othersitesaverage4 <- data %>% filter (site != "richmond") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="richmond")
data.othersitesaverage <- rbind_list(data.othersitesaverage2,data.othersitesaverage3,data.othersitesaverage4)
data.siteandotheraverage <- full_join(data.siteaverage,data.othersitesaverage)
data.siteandotheraverage <- data.siteandotheraverage %>% mutate (factor = site.mean.pm2.5/othersites.mean.pm2.5)
data.dailyaverage <- data %>% group_by (date) %>% summarise (mean.pm2.5 = mean(pm2.5, na.rm = TRUE)) %>%
mutate (month = month(date), year = year(date)) %>%
mutate(season = ifelse (month == 12 | month ==1 | month == 2, "summer",.) %>%
ifelse (month == 3 | month == 4 | month == 5,"autumn",.) %>%
ifelse(month == 6 | month ==7 | month == 8, "winter",.) %>%
ifelse(month == 9 | month ==10 | month == 11, "spring",.)) %>%
mutate (season = as.character(season))
data.impute <- left_join (data.dailyaverage,data.siteandotheraverage)
data.impute <- data.impute %>% mutate (pm2.5.impute = mean.pm2.5 * factor) %>% select (date,site,pm2.5.impute)
data.new <- left_join (data, data.impute)
data.new1 <- data.new %>% filter (is.na(pm2.5)) %>% mutate (pm2.5 = pm2.5.impute)
data.new <- data.new %>% filter (!is.na(pm2.5))
data.new <- rbind_list(data.new, data.new1)
data.new <- data.new %>% select(-c(pm2.5.impute, season, year, month))
nswaq9413.pm2.5.daily.sydney <- nswaq9413.pm2.5.daily.sydney %>% select(-pm2.5)
nswaq9413.pm2.5.daily.sydney <- left_join(nswaq9413.pm2.5.daily.sydney, data.new)
remove (data.othersitesaverage3,data.othersitesaverage4,data.siteaverage,data.othersitesaverage,data.siteandotheraverage,data.dailyaverage,data.impute,data.new,data.new1)
nswaq9413.pm2.5.daily.sydney <- nswaq9413.pm2.5.daily.sydney %>% group_by (date) %>% summarise (pm2.5 = mean (pm2.5, na.rm = TRUE))
nswaq9413.pm10.daily.sydney <- nswaq9413.pm10.daily.sydney %>% group_by (date) %>% summarise (pm10 = mean (pm10, na.rm = TRUE))
nswaq9413.o3.daily.sydney <- nswaq9413.o3.daily.sydney %>% group_by (date) %>% summarise (o3 = mean (o3, na.rm = TRUE))
nswaq9413.o3max.daily.sydney <- nswaq9413.o3max.daily.sydney %>% group_by (date) %>% summarise (o3max = mean (o3max, na.rm = TRUE))
nswaq9413.humidity.daily.sydney <- nswaq9413.humidity.daily.sydney %>% group_by (date) %>% summarise (humidity = mean (humidity, na.rm = TRUE))
nswaq9413.temp.daily.sydney <- nswaq9413.temp.daily.sydney %>% group_by (date) %>% summarise (temp = mean (temp, na.rm = TRUE))
nswaq9413.co.daily.sydney <- nswaq9413.co.daily.sydney %>% group_by (date) %>% summarise (co = mean (co, na.rm = TRUE))
nswaq9413.no.daily.sydney <- nswaq9413.no.daily.sydney %>% group_by (date) %>% summarise (no = mean (no, na.rm = TRUE))
nswaq9413.no2.daily.sydney <- nswaq9413.no2.daily.sydney %>% group_by (date) %>% summarise (no2 = mean (no2, na.rm = TRUE))
nswaq9413.nox.daily.sydney <- nswaq9413.nox.daily.sydney %>% group_by (date) %>% summarise (nox = mean (nox, na.rm = TRUE))
nswaq9413.so2.daily.sydney <- nswaq9413.so2.daily.sydney %>% group_by (date) %>% summarise (so2 = mean (so2, na.rm = TRUE))
nswaq9413.daily.sydney <- left_join (nswaq9413.pm2.5.daily.sydney, nswaq9413.pm10.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.o3.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.o3max.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.humidity.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.temp.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.co.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.no.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.no2.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.nox.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.so2.daily.sydney)
# remove unnecessary data
remove (temp.sites, sites, so2.sites, pm10.sites, pm2.5.sites, o3.sites,humidity.sites, nswaq9413.humidity.daily.sydney, nswaq9413.o3.daily.sydney,nswaq9413.o3max.daily.sydney, nswaq9413.pm10.daily.sydney,nswaq9413.so2.daily.sydney, nswaq9413.pm2.5.daily.sydney, nswaq9413.temp.daily.sydney,nswaq9413.co.daily.sydney, nswaq9413.no.daily.sydney, nswaq9413.no2.daily.sydney, nswaq9413.nox.daily.sydney , co.sites, no.sites, no2.sites, nox.sites)
# if NA's are less than 5%, replace NA with the mean of the values from the previous and next days
first_nonna_pm25_row <- min(which(!is.na(nswaq9413.daily.sydney$pm2.5)))
napercent_pm25 <- nswaq9413.daily.sydney %>%
slice (first_nonna_pm25_row:nrow(nswaq9413.daily.sydney)) %>%
summarise (total.count = n(), na.pm2.5 = sum(is.na(pm2.5)), na.pm2.5.percent =100*na.pm2.5/total.count)
if (napercent_pm25$na.pm2.5.percent<5) {
nswaq9413.daily.sydney <- nswaq9413.daily.sydney %>% arrange(date) %>%
mutate(pm2.5_lag= lag(pm2.5), pm2.5_lead=lead(pm2.5)) %>%
mutate(pm2.5= ifelse(is.na(pm2.5), 0.5*(pm2.5_lag+pm2.5_lead), pm2.5))
}
# rounding to one decimal point
nswaq9413.daily.sydney <- nswaq9413.daily.sydney %>% mutate (pm2.5=round (pm2.5, digits=1),pm10=round (pm10, digits=1),o3=round (o3, digits=1),o3max=round (o3max, digits=1),humidity=round (humidity, digits=1),temp=round (temp, digits=1),co=round (co, digits=1),no=round (no, digits=1), no2=round (no2, digits=1), nox =round (nox, digits=1), so2 =round (so2, digits=1))
# saving the file
write_csv (nswaq9413.daily.sydney, path ="data_derived/nswaq9413.daily.sydney.csv")
dd <- data_dictionary(as.data.frame(nswaq9413.daily.sydney))
write.csv(dd, "data_derived/nswaq9413.daily.sydney.data_dictionary.csv", row.names = F)
vl <- variable_names_and_labels(datadict=dd)
write.csv(vl, "data_derived/nswaq9413.daily.sydney.variable_names_and_labels.csv", row.names = F)
| /main.R | no_license | farhadsalimi/AP_monitor_NSW_1994_2013_hrly | R | false | false | 15,595 | r |
#### name:main ####
# NSW air quality data (received from EPA) -----
library (dplyr)
library(openair)
library(reshape2)
library(stringr)
library(tidyr)
library(readr)
library(lubridate)
projdir <- "Q:/Research/Environment_General/Air_Pollution_Monitoring_Stations_NSW/AP_monitor_NSW_1994_2013_hrly/"
setwd(projdir)
dir()
indir <- "data_provided"
dir(indir)
#importing the data (using the openair import function in combination with tb_df)
nsw9498 <- tbl_df (import (file.path("data_provided", "/OEH (1994-1998)_AllSites_1hrlyData.csv")))
nsw9903 <- tbl_df (import (file.path("data_provided", "/OEH (1999-2003)__AllSites_1hrlyData.csv")))
nsw0408 <- tbl_df (import (file.path("data_provided", "/OEH (2004-2008)__AllSites_1hrlyData.csv")))
nsw0913 <- tbl_df (import (file.path("data_provided", "/OEH (2009-2013)__AllSites_1hrlyData.csv")))
# remove the units
names(nsw9498)[2:ncol(nsw9498)] <- word(colnames(nsw9498)[2:ncol(nsw9498)], start = 1, end = -2)
names(nsw9903)[2:ncol(nsw9903)] <- word(colnames(nsw9903)[2:ncol(nsw9903)], start = 1, end = -2)
names(nsw0408)[2:ncol(nsw0408)] <- word(colnames(nsw0408)[2:ncol(nsw0408)], start = 1, end = -2)
names(nsw0913)[2:ncol(nsw0913)] <- word(colnames(nsw0913)[2:ncol(nsw0913)], start = 1, end = -2)
# importing sites locations data
nsw.locations <- read_csv(file.path("data_provided", "/locations.csv"))
# binding them together
nswaq9413 <- bind_rows (nsw9498,nsw9903,nsw0408, nsw0913)
# removing the unnecessary data
remove (nsw9498,nsw9903,nsw0408, nsw0913)
#lowercase the column names
names(nswaq9413) <- tolower(colnames(nswaq9413))
#convert all columns but date to "double"
cols = seq(from=2, to=ncol(nswaq9413));
# this next line could be made more efficient.
nswaq9413[,cols] = apply(nswaq9413[,cols], 2, function(x) as.double(as.character(x)))
# remove the columns which all values are NA's
nswaq9413 <- nswaq9413[,colSums(is.na(nswaq9413))< nrow(nswaq9413)]
# keep the variables of interest (pm2.5, pm10, o3, no, co, humidity, and temperature)
nswaq9413 <- nswaq9413 %>%
select (date, contains ("pm2.5"), contains("pm10"), contains ("ozone"), contains ("hum"), contains ("tem"), contains ("no"), contains ("co"), contains ("so2"))
#### TODO check if we might alternatively set these to zero, or some
#### code to allow sens test?
# replace all negative values with NA
nswaq9413 [nswaq9413 < 0] <- NA
# check the class of each column
lapply (nswaq9413, class)
write.table(nswaq9413, 'data_derived/nswaq9413.hrly.csv', row.names = F, sep = ",")
library(disentangle)
str(nswaq9413)
dd <- data_dictionary(as.data.frame(nswaq9413))
write.csv(dd, "data_derived/nswaq9413.hrly.data_dictionary.csv", row.names = F)
vl <- variable_names_and_labels(datadict=dd)
write.csv(vl, "data_derived/nswaq9413.hrly.variable_names_and_labels.csv", row.names = F)
remove(dd, vl)
# calculate the daily average (at least 75% of data needed for each day otherwise NA)
# WS and WD data should not be averaged using this approach (the columns heading needs to change to ws and wd firstly)
nswaq9413.daily.avg <- timeAverage (nswaq9413, avg.time = "day", data.thresh = 75, interval = "hour")
nswaq9413.daily.1hrmax <- timeAverage (nswaq9413, avg.time = "day", statistic = "max", data.thresh = 75, interval = "hour") %>%
select ( contains("date"), contains ("no2"), contains ("ozone"))
colnames <- colnames (nswaq9413.daily.1hrmax)
colnames <- gsub ("no2","no2max",colnames)
colnames <- gsub ("ozone","o3max",colnames)
colnames (nswaq9413.daily.1hrmax) <- colnames
nswaq9413.daily <- left_join (nswaq9413.daily.avg, nswaq9413.daily.1hrmax)
# make a long formatted data
nswaq9413.daily.long <- melt (nswaq9413.daily, id = "date", value.name = "concentration")
# splitting the variable to site and the type of observation
site <- word (nswaq9413.daily.long$variable,start = 1, end = 3) # get the first 3 words (some sites' names include more than one word)
# remove unnecessary words to get down to the site name only
site <- gsub ("1h","",site)
site <- gsub (" o3max", "", site)
site <- gsub (" no2max", "", site)
site <- gsub (" pm10","",site)
site <- gsub (" pm2.5","",site)
site <- gsub (" temp","",site)
site <- gsub (" humid", "", site)
site <- gsub (" ozone", "", site)
site <- gsub (" nox", "", site)
site <- gsub (" no2", "", site)
site <- gsub (" no", "", site)
site <- gsub (" co", "", site)
site <- gsub (" so2", "", site)
site <- str_trim(site, side = "both") #remove space from both sides of the name
site <- gsub (" ", ".", site) #replace space with dot
#remove space from both side
variable <- str_trim(nswaq9413.daily.long$variable, side = "both")
variable <- word(variable, -3) # get the 3rd word from the end (it is what we want (the name of the variable))
variables <- c ("humid" = "humidity", "ozone" = "o3", "pm10" ="pm10", "pm2.5" = "pm2.5", "temp" = "temp", "co" = "co", "no" = "no", "no2" = "no2", "nox" = "nox", "so2" = "so2", "no2max" = "no2max", "o3max" = "o3max")
# add the "observation" and "site" column to our data
nswaq9413.daily.long$observation <- factor (variables[variable], levels = variables)
nswaq9413.daily.long$site <- site
# remove unnecessary data
remove (site,variable,variables)
# build the tidy data (each column a variable)
nswaq9413.daily <-
tbl_df(nswaq9413.daily.long) %>%
select (-variable) %>%
filter (!is.na(observation)) %>%
spread (observation, concentration) %>%
arrange (site)
# removing unnecessary data
remove (nswaq9413.daily.long)
# attach the sites locations (lon and lat)
nswaq9413.daily <- left_join (nswaq9413.daily, nsw.locations)
# remove unnecessary data
remove (nsw.locations,nswaq9413.daily.avg,nswaq9413.daily.1hrmax)
# change date format to Date
nswaq9413.daily <- nswaq9413.daily %>% mutate (date = as.Date(date))
#subsetting to sydney stations
nswaq9413.daily.sydney <- nswaq9413.daily %>%
filter ( site %in% c ("bringelly", "camden", "chullora", "campbelltown.west","earlwood","lindfield","liverpool","oakdale","prospect","randwick","richmond","rozelle","st.marys","vineyard"))
# selecting the sites which have at least 75% of data available for each variable
sites <- nswaq9413.daily.sydney %>%
group_by (site) %>%
summarise (total.count = n(), na.pm2.5 = sum(is.na(pm2.5)), na.pm10 = sum(is.na(pm10)), na.humidity = sum(is.na(humidity)), na.o3 = sum(is.na(o3)), na.o3max = sum(is.na(o3max)), na.temp = sum(is.na(temp)), na.co = sum(is.na(co)), na.no = sum(is.na(no)), na.no2 = sum(is.na(no2)), na.nox = sum(is.na(nox)), na.so2 = sum(is.na(so2))) %>%
mutate (pm2.5.na.percent = na.pm2.5/total.count, pm10.na.percent = na.pm10/total.count, humidity.na.percent = na.humidity/total.count, o3.na.percent = na.o3/total.count,o3max.na.percent = na.o3max/total.count, temp.na.percent = na.temp/total.count, co.na.percent = na.co/total.count, no.na.percent = na.co/total.count, no2.na.percent = na.no2/total.count, nox.na.percent = na.nox/total.count, so2.na.percent = na.so2/total.count)
pm2.5.sites <- data.frame(site=c("earlwood","liverpool","richmond"))
pm10.sites <- sites %>% filter (pm10.na.percent <= 0.25) %>% select (site)
o3.sites <- sites %>% filter (o3.na.percent <= 0.25) %>% select (site)
o3max.sites <- sites %>% filter (o3max.na.percent <= 0.25) %>% select (site)
humidity.sites <- sites %>% filter (humidity.na.percent <= 0.25) %>% select (site)
temp.sites <- sites %>% filter (temp.na.percent <= 0.25) %>% select (site)
co.sites <- sites %>% filter (co.na.percent <= 0.25) %>% select (site)
no.sites <- sites %>% filter (no.na.percent <= 0.25) %>% select (site)
no2.sites <- sites %>% filter (no2.na.percent <= 0.25) %>% select (site)
nox.sites <- sites %>% filter (nox.na.percent <= 0.25) %>% select (site)
so2.sites <- sites %>% filter (so2.na.percent <= 0.25) %>% select (site)
nswaq9413.pm2.5.daily.sydney <- left_join (pm2.5.sites, nswaq9413.daily.sydney)
nswaq9413.pm10.daily.sydney <- left_join (pm10.sites, nswaq9413.daily.sydney)
nswaq9413.o3.daily.sydney <- left_join (o3.sites, nswaq9413.daily.sydney)
nswaq9413.o3max.daily.sydney <- left_join (o3max.sites, nswaq9413.daily.sydney)
nswaq9413.humidity.daily.sydney <- left_join (humidity.sites, nswaq9413.daily.sydney)
nswaq9413.temp.daily.sydney <- left_join (temp.sites, nswaq9413.daily.sydney)
nswaq9413.co.daily.sydney <- left_join (co.sites, nswaq9413.daily.sydney)
nswaq9413.no.daily.sydney <- left_join (no.sites, nswaq9413.daily.sydney)
nswaq9413.no2.daily.sydney <- left_join (no2.sites, nswaq9413.daily.sydney)
nswaq9413.nox.daily.sydney <- left_join (nox.sites, nswaq9413.daily.sydney)
nswaq9413.so2.daily.sydney <- left_join (so2.sites, nswaq9413.daily.sydney)
#pm2.5 imputation
data <- nswaq9413.pm2.5.daily.sydney %>% select (date, site, pm2.5)
data <- data %>% mutate (month = month(date), year = year(date)) %>%
mutate(season = ifelse (month == 12 | month ==1 | month == 2, "summer",.) %>%
ifelse (month == 3 | month == 4 | month == 5,"autumn",.) %>%
ifelse(month == 6 | month ==7 | month == 8, "winter",.) %>%
ifelse(month == 9 | month ==10 | month == 11, "spring",.)) %>%
mutate (season = as.character(season))
data.siteaverage <- data %>% group_by(site,year,season) %>% summarise(site.mean.pm2.5 = mean(pm2.5, na.rm =TRUE))
data.othersitesaverage2 <- data %>% filter (site != "earlwood") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="earlwood")
data.othersitesaverage3 <- data %>% filter (site != "liverpool") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="liverpool")
data.othersitesaverage4 <- data %>% filter (site != "richmond") %>% group_by(year,season) %>% summarise(othersites.mean.pm2.5 = mean(pm2.5, na.rm =TRUE)) %>% mutate(site="richmond")
data.othersitesaverage <- rbind_list(data.othersitesaverage2,data.othersitesaverage3,data.othersitesaverage4)
data.siteandotheraverage <- full_join(data.siteaverage,data.othersitesaverage)
data.siteandotheraverage <- data.siteandotheraverage %>% mutate (factor = site.mean.pm2.5/othersites.mean.pm2.5)
data.dailyaverage <- data %>% group_by (date) %>% summarise (mean.pm2.5 = mean(pm2.5, na.rm = TRUE)) %>%
mutate (month = month(date), year = year(date)) %>%
mutate(season = ifelse (month == 12 | month ==1 | month == 2, "summer",.) %>%
ifelse (month == 3 | month == 4 | month == 5,"autumn",.) %>%
ifelse(month == 6 | month ==7 | month == 8, "winter",.) %>%
ifelse(month == 9 | month ==10 | month == 11, "spring",.)) %>%
mutate (season = as.character(season))
data.impute <- left_join (data.dailyaverage,data.siteandotheraverage)
data.impute <- data.impute %>% mutate (pm2.5.impute = mean.pm2.5 * factor) %>% select (date,site,pm2.5.impute)
data.new <- left_join (data, data.impute)
data.new1 <- data.new %>% filter (is.na(pm2.5)) %>% mutate (pm2.5 = pm2.5.impute)
data.new <- data.new %>% filter (!is.na(pm2.5))
data.new <- rbind_list(data.new, data.new1)
data.new <- data.new %>% select(-c(pm2.5.impute, season, year, month))
nswaq9413.pm2.5.daily.sydney <- nswaq9413.pm2.5.daily.sydney %>% select(-pm2.5)
nswaq9413.pm2.5.daily.sydney <- left_join(nswaq9413.pm2.5.daily.sydney, data.new)
remove (data.othersitesaverage3,data.othersitesaverage4,data.siteaverage,data.othersitesaverage,data.siteandotheraverage,data.dailyaverage,data.impute,data.new,data.new1)
nswaq9413.pm2.5.daily.sydney <- nswaq9413.pm2.5.daily.sydney %>% group_by (date) %>% summarise (pm2.5 = mean (pm2.5, na.rm = TRUE))
nswaq9413.pm10.daily.sydney <- nswaq9413.pm10.daily.sydney %>% group_by (date) %>% summarise (pm10 = mean (pm10, na.rm = TRUE))
nswaq9413.o3.daily.sydney <- nswaq9413.o3.daily.sydney %>% group_by (date) %>% summarise (o3 = mean (o3, na.rm = TRUE))
nswaq9413.o3max.daily.sydney <- nswaq9413.o3max.daily.sydney %>% group_by (date) %>% summarise (o3max = mean (o3max, na.rm = TRUE))
nswaq9413.humidity.daily.sydney <- nswaq9413.humidity.daily.sydney %>% group_by (date) %>% summarise (humidity = mean (humidity, na.rm = TRUE))
nswaq9413.temp.daily.sydney <- nswaq9413.temp.daily.sydney %>% group_by (date) %>% summarise (temp = mean (temp, na.rm = TRUE))
nswaq9413.co.daily.sydney <- nswaq9413.co.daily.sydney %>% group_by (date) %>% summarise (co = mean (co, na.rm = TRUE))
nswaq9413.no.daily.sydney <- nswaq9413.no.daily.sydney %>% group_by (date) %>% summarise (no = mean (no, na.rm = TRUE))
nswaq9413.no2.daily.sydney <- nswaq9413.no2.daily.sydney %>% group_by (date) %>% summarise (no2 = mean (no2, na.rm = TRUE))
nswaq9413.nox.daily.sydney <- nswaq9413.nox.daily.sydney %>% group_by (date) %>% summarise (nox = mean (nox, na.rm = TRUE))
nswaq9413.so2.daily.sydney <- nswaq9413.so2.daily.sydney %>% group_by (date) %>% summarise (so2 = mean (so2, na.rm = TRUE))
nswaq9413.daily.sydney <- left_join (nswaq9413.pm2.5.daily.sydney, nswaq9413.pm10.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.o3.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.o3max.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.humidity.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.temp.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.co.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.no.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.no2.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.nox.daily.sydney)
nswaq9413.daily.sydney <- left_join (nswaq9413.daily.sydney, nswaq9413.so2.daily.sydney)
# remove unnecessary data
remove (temp.sites, sites, so2.sites, pm10.sites, pm2.5.sites, o3.sites,humidity.sites, nswaq9413.humidity.daily.sydney, nswaq9413.o3.daily.sydney,nswaq9413.o3max.daily.sydney, nswaq9413.pm10.daily.sydney,nswaq9413.so2.daily.sydney, nswaq9413.pm2.5.daily.sydney, nswaq9413.temp.daily.sydney,nswaq9413.co.daily.sydney, nswaq9413.no.daily.sydney, nswaq9413.no2.daily.sydney, nswaq9413.nox.daily.sydney , co.sites, no.sites, no2.sites, nox.sites)
# if NA's are less than 5%, replace NA with the mean of the values from the previous and next days
first_nonna_pm25_row <- min(which(!is.na(nswaq9413.daily.sydney$pm2.5)))
napercent_pm25 <- nswaq9413.daily.sydney %>%
slice (first_nonna_pm25_row:nrow(nswaq9413.daily.sydney)) %>%
summarise (total.count = n(), na.pm2.5 = sum(is.na(pm2.5)), na.pm2.5.percent =100*na.pm2.5/total.count)
if (napercent_pm25$na.pm2.5.percent<5) {
nswaq9413.daily.sydney <- nswaq9413.daily.sydney %>% arrange(date) %>%
mutate(pm2.5_lag= lag(pm2.5), pm2.5_lead=lead(pm2.5)) %>%
mutate(pm2.5= ifelse(is.na(pm2.5), 0.5*(pm2.5_lag+pm2.5_lead), pm2.5))
}
# rounding to one decimal point
nswaq9413.daily.sydney <- nswaq9413.daily.sydney %>% mutate (pm2.5=round (pm2.5, digits=1),pm10=round (pm10, digits=1),o3=round (o3, digits=1),o3max=round (o3max, digits=1),humidity=round (humidity, digits=1),temp=round (temp, digits=1),co=round (co, digits=1),no=round (no, digits=1), no2=round (no2, digits=1), nox =round (nox, digits=1), so2 =round (so2, digits=1))
# saving the file
write_csv (nswaq9413.daily.sydney, path ="data_derived/nswaq9413.daily.sydney.csv")
dd <- data_dictionary(as.data.frame(nswaq9413.daily.sydney))
write.csv(dd, "data_derived/nswaq9413.daily.sydney.data_dictionary.csv", row.names = F)
vl <- variable_names_and_labels(datadict=dd)
write.csv(vl, "data_derived/nswaq9413.daily.sydney.variable_names_and_labels.csv", row.names = F)
|
#Libraries
###########################
library("scater", quietly = TRUE)
library("Seurat")
library("gridExtra")
library("ggpubr")
library("biomaRt")
library("scran")
library('KEGG.db')
library('ReactomePA')
library('pathview')
library('GSEABase')
library('org.Hs.eg.db')
library('annotate')
library('GOstats')
library('AnnotationDbi')
library("xlsx")
library("gtools")
library("minet")
library('fmsb') | /seurat_melanoma_timeseries_libraries.R | no_license | SingleCellKIKAUSTNB/Seurat_analysis_pipeline | R | false | false | 405 | r | #Libraries
###########################
library("scater", quietly = TRUE)
library("Seurat")
library("gridExtra")
library("ggpubr")
library("biomaRt")
library("scran")
library('KEGG.db')
library('ReactomePA')
library('pathview')
library('GSEABase')
library('org.Hs.eg.db')
library('annotate')
library('GOstats')
library('AnnotationDbi')
library("xlsx")
library("gtools")
library("minet")
library('fmsb') |
### Cross-Validation
DecomposeC = function(data,frequency){
frame<-matrix(data = NA, nrow = length(data), ncol = 1, byrow = FALSE,dimnames = NULL)
frame<-data.frame(frame); colnames(frame)<-c("ID"); frame$Data<-data
frame$kmo<-NA # moving average based on frequency
seasonality<-frequency
ID<-c(); IDref<-c(1:seasonality) # which month is this observation?
for (i in 1:(length(data)%/%seasonality)){ ID<-c(ID,IDref) }
ID<-c(ID,head(IDref,(length(data)%%seasonality))) ;frame$ID<-ID
if (frequency==1){
frame$Seasonality<-1
}else if (frequency==2){
for (i in 4:(length(data)-3)){
n1<-(frame$Data[i+3]+frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2])/6
n2<-(frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+frame$Data[i-3])/6
frame$kmo[i]=(n1+n2)/2
}
}else if (frequency==3){
for (i in 3:(length(data)-2)){
n1<-(frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1])/4
n2<-(frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2])/4
frame$kmo[i]=(n1+n2)/2
}
}else if (frequency==4){
for (i in 2:(length(data)-1)){
frame$kmo[i]=(frame$Data[i+1]+frame$Data[i]+frame$Data[i-1])/3
}
}else if (frequency==12){
for (i in 7:(length(data)-6)){
n1<-(frame$Data[i+5]+frame$Data[i+4]+frame$Data[i+3]+frame$Data[i+2]+
frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+
frame$Data[i-3]+frame$Data[i-4]+frame$Data[i-5]+frame$Data[i+6])/12
n2<-(frame$Data[i+5]+frame$Data[i+4]+frame$Data[i+3]+frame$Data[i+2]+
frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+
frame$Data[i-3]+frame$Data[i-4]+frame$Data[i-5]+frame$Data[i-6])/12
frame$kmo[i]=(n1+n2)/2
}
}
#Calculate SR and SI
if (frequency>1){
frame$LE<-frame$Data/frame$kmo
LE<-matrix(data = NA, nrow = seasonality, ncol = 2, byrow = FALSE,dimnames = NULL)
LE<-data.frame(LE); colnames(LE)<-c("ID","LE"); LE$ID<-c(1:seasonality)
for (i in 1:seasonality){
LE$LE[i]<-mean(frame$LE[ (frame$ID==i) & (is.na(frame$LE)==FALSE) & (frame$LE<max(frame$LE[(frame$ID==i)&(is.na(frame$LE)==FALSE)])) & (frame$LE>min(frame$LE[(frame$ID==i)&(is.na(frame$LE)==FALSE)])) ])
}
sndarize=mean(LE$LE)
LE$LE<-LE$LE/sndarize
frame$kmo<-NA
DE<-c(); DEref<-LE$LE
for (i in 1:(length(data)%/%seasonality)){ DE<-c(DE,DEref) }
DE<-c(DE,head(DEref,(length(data)%%seasonality)))
frame$Seasonality<-DE
}
frame$Deseasonalized<-frame$Data/frame$Seasonality
#Calculate Randomness
for (i in 2:(length(data)-1)){
frame$kmo[i]=(frame$Deseasonalized[i+1]+frame$Deseasonalized[i]+frame$Deseasonalized[i-1])/3
}
frame$kmo[1]<-(2*frame$Deseasonalized[1]+frame$Deseasonalized[2])/3
frame$kmo[length(data)]<-(2*frame$Deseasonalized[length(data)]+frame$Deseasonalized[length(data)-1])/3
frame$kmo3<-NA
for (i in 2:(length(data)-1)){
frame$kmo3[i]=(frame$kmo[i+1]+frame$kmo[i]+frame$kmo[i-1])/3
}
frame$kmo3[1]<-(2*frame$kmo[1]+frame$kmo[2])/3
frame$kmo3[length(data)]<-(2*frame$kmo[length(data)]+frame$kmo[length(data)-1])/3
frame$Randomness<-frame$Deseasonalized/frame$kmo3
frame$kmo3=frame$kmo=frame$ID<-NULL
#Calculate Linear Trend and Cyrcle
TC<-frame$Deseasonalized/frame$Randomness ; frame$Deseasonalized<-NULL
xs<-c(1:length(data))
frame$Trend<-as.numeric(predict(lm(TC~xs)))
frame$Cyrcle<-TC/frame$Trend
# frame$LE<-LE$LE
return(frame)
}
| /SeasonalityTest.R | no_license | electraa/GoalKeeper | R | false | false | 3,600 | r | ### Cross-Validation
DecomposeC = function(data,frequency){
frame<-matrix(data = NA, nrow = length(data), ncol = 1, byrow = FALSE,dimnames = NULL)
frame<-data.frame(frame); colnames(frame)<-c("ID"); frame$Data<-data
frame$kmo<-NA # moving average based on frequency
seasonality<-frequency
ID<-c(); IDref<-c(1:seasonality) # which month is this observation?
for (i in 1:(length(data)%/%seasonality)){ ID<-c(ID,IDref) }
ID<-c(ID,head(IDref,(length(data)%%seasonality))) ;frame$ID<-ID
if (frequency==1){
frame$Seasonality<-1
}else if (frequency==2){
for (i in 4:(length(data)-3)){
n1<-(frame$Data[i+3]+frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2])/6
n2<-(frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+frame$Data[i-3])/6
frame$kmo[i]=(n1+n2)/2
}
}else if (frequency==3){
for (i in 3:(length(data)-2)){
n1<-(frame$Data[i+2]+frame$Data[i+1]+frame$Data[i]+frame$Data[i-1])/4
n2<-(frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2])/4
frame$kmo[i]=(n1+n2)/2
}
}else if (frequency==4){
for (i in 2:(length(data)-1)){
frame$kmo[i]=(frame$Data[i+1]+frame$Data[i]+frame$Data[i-1])/3
}
}else if (frequency==12){
for (i in 7:(length(data)-6)){
n1<-(frame$Data[i+5]+frame$Data[i+4]+frame$Data[i+3]+frame$Data[i+2]+
frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+
frame$Data[i-3]+frame$Data[i-4]+frame$Data[i-5]+frame$Data[i+6])/12
n2<-(frame$Data[i+5]+frame$Data[i+4]+frame$Data[i+3]+frame$Data[i+2]+
frame$Data[i+1]+frame$Data[i]+frame$Data[i-1]+frame$Data[i-2]+
frame$Data[i-3]+frame$Data[i-4]+frame$Data[i-5]+frame$Data[i-6])/12
frame$kmo[i]=(n1+n2)/2
}
}
#Calculate SR and SI
if (frequency>1){
frame$LE<-frame$Data/frame$kmo
LE<-matrix(data = NA, nrow = seasonality, ncol = 2, byrow = FALSE,dimnames = NULL)
LE<-data.frame(LE); colnames(LE)<-c("ID","LE"); LE$ID<-c(1:seasonality)
for (i in 1:seasonality){
LE$LE[i]<-mean(frame$LE[ (frame$ID==i) & (is.na(frame$LE)==FALSE) & (frame$LE<max(frame$LE[(frame$ID==i)&(is.na(frame$LE)==FALSE)])) & (frame$LE>min(frame$LE[(frame$ID==i)&(is.na(frame$LE)==FALSE)])) ])
}
sndarize=mean(LE$LE)
LE$LE<-LE$LE/sndarize
frame$kmo<-NA
DE<-c(); DEref<-LE$LE
for (i in 1:(length(data)%/%seasonality)){ DE<-c(DE,DEref) }
DE<-c(DE,head(DEref,(length(data)%%seasonality)))
frame$Seasonality<-DE
}
frame$Deseasonalized<-frame$Data/frame$Seasonality
#Calculate Randomness
for (i in 2:(length(data)-1)){
frame$kmo[i]=(frame$Deseasonalized[i+1]+frame$Deseasonalized[i]+frame$Deseasonalized[i-1])/3
}
frame$kmo[1]<-(2*frame$Deseasonalized[1]+frame$Deseasonalized[2])/3
frame$kmo[length(data)]<-(2*frame$Deseasonalized[length(data)]+frame$Deseasonalized[length(data)-1])/3
frame$kmo3<-NA
for (i in 2:(length(data)-1)){
frame$kmo3[i]=(frame$kmo[i+1]+frame$kmo[i]+frame$kmo[i-1])/3
}
frame$kmo3[1]<-(2*frame$kmo[1]+frame$kmo[2])/3
frame$kmo3[length(data)]<-(2*frame$kmo[length(data)]+frame$kmo[length(data)-1])/3
frame$Randomness<-frame$Deseasonalized/frame$kmo3
frame$kmo3=frame$kmo=frame$ID<-NULL
#Calculate Linear Trend and Cyrcle
TC<-frame$Deseasonalized/frame$Randomness ; frame$Deseasonalized<-NULL
xs<-c(1:length(data))
frame$Trend<-as.numeric(predict(lm(TC~xs)))
frame$Cyrcle<-TC/frame$Trend
# frame$LE<-LE$LE
return(frame)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateData.R
\name{generateRead}
\alias{generateRead}
\title{Generate a noisy read from a true sequence.}
\usage{
generateRead(seq = "ATGCGGATCG", qv = NULL, seed = NULL,
paramgphmm = initializeGphmm())
}
\arguments{
\item{seq}{- character vector of true sequence.}
\item{qv}{- integer, wanted phred quality score for the read.}
\item{seed}{- integer, seed for reproducibility.}
\item{paramgphmm}{- list of parameters.}
}
\description{
\code{generateRead} returns a list with the noisy read, the true sequence, the path, and the phred quality score.
}
| /man/generateRead.Rd | no_license | wholebiome/gphmm | R | false | true | 637 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateData.R
\name{generateRead}
\alias{generateRead}
\title{Generate a noisy read from a true sequence.}
\usage{
generateRead(seq = "ATGCGGATCG", qv = NULL, seed = NULL,
paramgphmm = initializeGphmm())
}
\arguments{
\item{seq}{- character vector of true sequence.}
\item{qv}{- integer, wanted phred quality score for the read.}
\item{seed}{- integer, seed for reproducibility.}
\item{paramgphmm}{- list of parameters.}
}
\description{
\code{generateRead} returns a list with the noisy read, the true sequence, the path, and the phred quality score.
}
|
source('player_stats_scr_bat.R')
source('player_stats_scr_pit.R')
source('wl_scrapping.R')
source('bs_scraping.R')
library(dplyr)
#master
bx <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_03/day_31/master_scoreboard.xml")
#games
j1 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_camaaa_leoaaa_1/rawboxscore.xml")
j2 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mtyaaa_vaqaaa_1/rawboxscore.xml")
j3 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mvaaaa_duraaa_1/rawboxscore.xml")
j4 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mxoaaa_sltaaa_1/rawboxscore.xml")
j5 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_oaxaaa_yucaaa_1/rawboxscore.xml")
j6 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_pueaaa_tabaaa_1/rawboxscore.xml")
j7 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_tijaaa_aguaaa_1/rawboxscore.xml")
j8 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_vraaaa_quiaaa_1/rawboxscore.xml")
#COLLECT BATTING STATS
g1.b <- py_st_bt(j1)
g2.b <- py_st_bt(j2)
g3.b <- py_st_bt(j3)
g4.b <- py_st_bt(j4)
g5.b <- py_st_bt(j5)
g6.b <- py_st_bt(j6)
g7.b <- py_st_bt(j7)
g8.b <- py_st_bt(j8)
#COLLECT PITCHING STATS
g1.p <- py_st_pt(j1)
g2.p <- py_st_pt(j2)
g3.p <- py_st_pt(j3)
g4.p <- py_st_pt(j4)
g5.p <- py_st_pt(j5)
g6.p <- py_st_pt(j6)
g7.p <- py_st_pt(j7)
g8.p <- py_st_pt(j8)
#COLLECT SCORE
g1.s <- bs(j1)
g2.s <- bs(j2)
g3.s <- bs(j3)
g4.s <- bs(j4)
g5.s <- bs(j5)
g6.s <- bs(j6)
g7.s <- bs(j7)
g8.s <- bs(j8)
#WL series
wls <- wl(bx)
#FIRST
LMB_2018_bat <- rbind(g1.b,g2.b,g3,b,g4.b,g5,b,g6,b,g7.b,g8.b)
LMB_2018_pit <- rbind(g1.p,g2.p,g3.p,g4.p,g5.p,g6.p,g7.p,g8.p)
#UPDATE
LMB_2018_bat_UP <- rbind(g1.b,g2.b,g3,b,g4.b,g5,b,g6,b,g7.b,g8.b)
LMB_2018_pit_UP <- rbind(g1.p,g2.p,g3.p,g4.p,g5.p,g6.p,g7.p,g8.p)
LMB_2018_bat <- LMB_2018_bat %>%
rbind(., LMB_2018_bat_UP) %>%
group_by(Player_Name) %>%
summarise(AB = sum(AB),
R = sum(R),
H = sum(H),
D = sum(D),
Tr = sum(Tr),
HR = sum(HR),
RBI = sum(RBI),
BB = sum(BB),
SO = sum(SO),
HBP = sum(HBP),
SB = sum(SB),
SF = sum(SF),
SH = sum(SH),
PA = sum(PA))
LMB_2018_pit <- LMB_2018_pit %>%
rbind(.,LMB_2018_pit_UP) %>%
summarise(OUT = sum(OUT),
H = sum(H),
HR = sum(HR),
ER = sum(HR),
SO = sum(SO),
BB = sum(BB),
BK = sum(BK),
W = sum(W),
L = sum(L),
HLD = sum(HLD),
SV = sum(SV))
#BATTING STATS
bat_bas <- mutate(LMB_2017_bat,
PA = AB+BB+SF+SH)
bat_bas <- bat_bas %>%
filter(PA > 0)
bat_bas <- mutate(bat_bas,
AVG = round((H/AB),3),
OBP = round(((H+BB+HBP)/(AB+BB+HBP+SF)),3),
SLG = round((((1*H)+(2*D)+(3*Tr)+(4*HR))/AB),3),
OPS = round((OBP+SLG),3))
#bat_sab <-
#PITCHING STATS
pit_bas <- select(pit1, Pitcher_Name, H:SV)
pit_bas <- mutate(pit_bas,
IP = round(as.numeric(paste0(trunc(pit1$OUT/3),".",pit1$OUT%%3)),2),
ERA = round(9*(ER/IP),2),
WHIP = round((BB+H)/IP,2))
pit_sab <- select(pit_bas, Pitcher_Name, SO:BB,IP:WHIP)
pit_sab <- mutate(pit_sab,
'BB/9' = round(9*(BB/IP),2),
'K/9' = round(9*(SO/IP),2))
#MASTER BOXSCORE
wls1 <- wl(bx)
wls <- rbind(wls,wls1)
#SCORES
LMB_2018_SC <- rbind(g1.s,g2.s,g3,s,g4.s,g5.s,g6.s,g7.s,g8.s)
LMB_2018_SC_UP <- rbind(g1.s,g2.s,g3,s,g4.s,g5.s,g6.s,g7.s,g8.s)
LMB_2018_SC <- rbind(LMB_2018_SC,LMB_2018_SC_UP)
sc <- LMB_2018_SC
###
sc #Game scores, attendance, times
wls #W-L series
pit_bas #pitching stats basic
pit_sab #pitching stats sabr
bat_bas #batting stats basic
bat_sab #batting stats sabr
| /master_daily_collect.R | no_license | axelmora/boxscore_scraping | R | false | false | 4,261 | r | source('player_stats_scr_bat.R')
source('player_stats_scr_pit.R')
source('wl_scrapping.R')
source('bs_scraping.R')
library(dplyr)
#master
bx <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_03/day_31/master_scoreboard.xml")
#games
j1 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_camaaa_leoaaa_1/rawboxscore.xml")
j2 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mtyaaa_vaqaaa_1/rawboxscore.xml")
j3 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mvaaaa_duraaa_1/rawboxscore.xml")
j4 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_mxoaaa_sltaaa_1/rawboxscore.xml")
j5 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_oaxaaa_yucaaa_1/rawboxscore.xml")
j6 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_pueaaa_tabaaa_1/rawboxscore.xml")
j7 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_tijaaa_aguaaa_1/rawboxscore.xml")
j8 <- read_xml("http://www.milb.com/gdcross/components/game/aaa/year_2017/month_05/day_04/gid_2017_05_04_vraaaa_quiaaa_1/rawboxscore.xml")
#COLLECT BATTING STATS
g1.b <- py_st_bt(j1)
g2.b <- py_st_bt(j2)
g3.b <- py_st_bt(j3)
g4.b <- py_st_bt(j4)
g5.b <- py_st_bt(j5)
g6.b <- py_st_bt(j6)
g7.b <- py_st_bt(j7)
g8.b <- py_st_bt(j8)
#COLLECT PITCHING STATS
g1.p <- py_st_pt(j1)
g2.p <- py_st_pt(j2)
g3.p <- py_st_pt(j3)
g4.p <- py_st_pt(j4)
g5.p <- py_st_pt(j5)
g6.p <- py_st_pt(j6)
g7.p <- py_st_pt(j7)
g8.p <- py_st_pt(j8)
#COLLECT SCORE
g1.s <- bs(j1)
g2.s <- bs(j2)
g3.s <- bs(j3)
g4.s <- bs(j4)
g5.s <- bs(j5)
g6.s <- bs(j6)
g7.s <- bs(j7)
g8.s <- bs(j8)
#WL series
wls <- wl(bx)
#FIRST
LMB_2018_bat <- rbind(g1.b,g2.b,g3,b,g4.b,g5,b,g6,b,g7.b,g8.b)
LMB_2018_pit <- rbind(g1.p,g2.p,g3.p,g4.p,g5.p,g6.p,g7.p,g8.p)
#UPDATE
LMB_2018_bat_UP <- rbind(g1.b,g2.b,g3,b,g4.b,g5,b,g6,b,g7.b,g8.b)
LMB_2018_pit_UP <- rbind(g1.p,g2.p,g3.p,g4.p,g5.p,g6.p,g7.p,g8.p)
LMB_2018_bat <- LMB_2018_bat %>%
rbind(., LMB_2018_bat_UP) %>%
group_by(Player_Name) %>%
summarise(AB = sum(AB),
R = sum(R),
H = sum(H),
D = sum(D),
Tr = sum(Tr),
HR = sum(HR),
RBI = sum(RBI),
BB = sum(BB),
SO = sum(SO),
HBP = sum(HBP),
SB = sum(SB),
SF = sum(SF),
SH = sum(SH),
PA = sum(PA))
LMB_2018_pit <- LMB_2018_pit %>%
rbind(.,LMB_2018_pit_UP) %>%
summarise(OUT = sum(OUT),
H = sum(H),
HR = sum(HR),
ER = sum(HR),
SO = sum(SO),
BB = sum(BB),
BK = sum(BK),
W = sum(W),
L = sum(L),
HLD = sum(HLD),
SV = sum(SV))
#BATTING STATS
bat_bas <- mutate(LMB_2017_bat,
PA = AB+BB+SF+SH)
bat_bas <- bat_bas %>%
filter(PA > 0)
bat_bas <- mutate(bat_bas,
AVG = round((H/AB),3),
OBP = round(((H+BB+HBP)/(AB+BB+HBP+SF)),3),
SLG = round((((1*H)+(2*D)+(3*Tr)+(4*HR))/AB),3),
OPS = round((OBP+SLG),3))
#bat_sab <-
#PITCHING STATS
pit_bas <- select(pit1, Pitcher_Name, H:SV)
pit_bas <- mutate(pit_bas,
IP = round(as.numeric(paste0(trunc(pit1$OUT/3),".",pit1$OUT%%3)),2),
ERA = round(9*(ER/IP),2),
WHIP = round((BB+H)/IP,2))
pit_sab <- select(pit_bas, Pitcher_Name, SO:BB,IP:WHIP)
pit_sab <- mutate(pit_sab,
'BB/9' = round(9*(BB/IP),2),
'K/9' = round(9*(SO/IP),2))
#MASTER BOXSCORE
wls1 <- wl(bx)
wls <- rbind(wls,wls1)
#SCORES
LMB_2018_SC <- rbind(g1.s,g2.s,g3,s,g4.s,g5.s,g6.s,g7.s,g8.s)
LMB_2018_SC_UP <- rbind(g1.s,g2.s,g3,s,g4.s,g5.s,g6.s,g7.s,g8.s)
LMB_2018_SC <- rbind(LMB_2018_SC,LMB_2018_SC_UP)
sc <- LMB_2018_SC
###
sc #Game scores, attendance, times
wls #W-L series
pit_bas #pitching stats basic
pit_sab #pitching stats sabr
bat_bas #batting stats basic
bat_sab #batting stats sabr
|
#------------------------------------------------------------------------------#
# map GLPK(...) function names to glpkAPI function names #
#------------------------------------------------------------------------------#
# GLPK(...) # glpkAPI
#library(glpkAPI,warn.conflicts=FALSE,verbose=FALSE)
glp_add_cols <- function(...) addColsGLPK(...)
glp_add_rows <- function(...) addRowsGLPK(...)
glp_adv_basis <- function(...) advBasisGLPK(...)
glp_bf_exists <- function(...) bfExistsGLPK(...)
glp_bf_updated <- function(...) bfUpdatedGLPK(...)
glp_check_dup <- function(...) checkDupGLPK(...)
glp_copy_prob <- function(...) copyProbGLPK(...)
glp_cpx_basis <- function(...) cpxBasisGLPK(...)
glp_create_index <- function(...) createIndexGLPK(...)
glp_create_prob <- function(...) initProbGLPK(...)
glp_del_cols <- function(...) delColsGLPK(...)
glp_del_rows <- function(...) delRowsGLPK(...)
glp_delete_index <- function(...) deleteIndexGLPK(...)
glp_delete_prob <- function(...) delProbGLPK(...)
glp_erase_prob <- function(...) eraseProbGLPK(...)
glp_exact <- function(...) solveSimplexExactGLPK(...)
glp_factorize <- function(...) factorizeGLPK(...)
glp_find_col <- function(...) findColGLPK(...)
glp_find_row <- function(...) findRowGLPK(...)
glp_get_bfcp <- function(...) getBfcpGLPK(...)
glp_get_bhead <- function(...) getBheadGLPK(...)
glp_get_col_bind <- function(...) getCbindGLPK(...)
glp_get_col_dual <- function(...) getColDualGLPK(...)
glp_get_col_kind <- function(...) getColKindGLPK(...)
glp_get_col_lb <- function(...) getColLowBndGLPK(...)
glp_get_col_name <- function(...) getColNameGLPK(...)
glp_get_col_prim <- function(...) getColPrimGLPK(...)
glp_get_col_stat <- function(...) getColStatGLPK(...)
glp_get_col_type <- function(...) getColTypeGLPK(...)
glp_get_col_ub <- function(...) getColUppBndGLPK(...)
glp_get_dual_stat <- function(...) getDualStatGLPK(...)
glp_get_mat_col <- function(...) getMatColGLPK(...)
glp_get_mat_row <- function(...) getMatRowGLPK(...)
glp_get_num_bin <- function(...) getNumBinGLPK(...)
glp_get_num_cols <- function(...) getNumColsGLPK(...)
glp_get_num_int <- function(...) getNumIntGLPK(...)
glp_get_num_nz <- function(...) getNumNnzGLPK(...)
glp_get_num_rows <- function(...) getNumRowsGLPK(...)
glp_get_obj_coef <- function(...) getObjCoefGLPK(...)
glp_get_obj_dir <- function(...) getObjDirGLPK(...)
glp_get_obj_name <- function(...) getObjNameGLPK(...)
glp_get_obj_val <- function(...) getObjValGLPK(...)
glp_get_prim_stat <- function(...) getPrimStatGLPK(...)
glp_get_prob_name <- function(...) getProbNameGLPK(...)
glp_get_rii <- function(...) getRiiGLPK(...)
glp_get_row_bind <- function(...) getRbindGLPK(...)
glp_get_row_dual <- function(...) getRowDualGLPK(...)
glp_get_row_lb <- function(...) getRowLowBndGLPK(...)
glp_get_row_name <- function(...) getRowNameGLPK(...)
glp_get_row_prim <- function(...) getRowPrimGLPK(...)
glp_get_row_stat <- function(...) getRowStatGLPK(...)
glp_get_row_type <- function(...) getRowTypeGLPK(...)
glp_get_row_ub <- function(...) getRowUppBndGLPK(...)
glp_get_sjj <- function(...) getSjjGLPK(...)
glp_get_status <- function(...) getSolStatGLPK(...)
glp_get_unbnd_ray <- function(...) getUnbndRayGLPK(...)
glp_init_iocp <- function(...) setDefaultMIPParmGLPK(...)
glp_init_iptcp <- function(...) setDefaultIptParmGLPK(...)
glp_interior <- function(...) solveInteriorGLPK(...)
glp_intopt <- function(...) solveMIPGLPK(...)
glp_ipt_col_dual <- function(...) getColDualIptGLPK(...)
glp_ipt_col_prim <- function(...) getColPrimIptGLPK(...)
glp_ipt_obj_val <- function(...) getObjValIptGLPK(...)
glp_ipt_row_dual <- function(...) getRowDualIptGLPK(...)
glp_ipt_row_prim <- function(...) getRowPrimIptGLPK(...)
glp_ipt_status <- function(...) getSolStatIptGLPK(...)
glp_load_matrix <- function(...) loadMatrixGLPK(...)
glp_mip_col_val <- function(...) mipColValGLPK(...)
glp_mip_obj_val <- function(...) mipObjValGLPK(...)
glp_mip_row_val <- function(...) mipRowValGLPK(...)
glp_mip_status <- function(...) mipStatusGLPK(...)
glp_mpl_alloc_wksp <- function(...) mplAllocWkspGLPK(...)
glp_mpl_build_prob <- function(...) mplBuildProbGLPK(...)
glp_mpl_free_wksp <- function(...) mplFreeWkspGLPK(...)
glp_mpl_generate <- function(...) mplGenerateGLPK(...)
glp_mpl_postsolve <- function(...) mplPostsolveGLPK(...)
glp_mpl_read_data <- function(...) mplReadDataGLPK(...)
glp_mpl_read_model <- function(...) mplReadModelGLPK(...)
glp_print_ipt <- function(...) printIptGLPK(...)
glp_print_mip <- function(...) printMIPGLPK(...)
glp_print_ranges <- function(...) printRangesGLPK(...)
glp_print_sol <- function(...) printSolGLPK(...)
glp_read_ipt <- function(...) readIptGLPK(...)
glp_read_lp <- function(...) readLPGLPK(...)
glp_read_mip <- function(...) readMIPGLPK(...)
glp_read_mps <- function(...) readMPSGLPK(...)
glp_read_prob <- function(...) readProbGLPK(...)
glp_read_sol <- function(...) readSolGLPK(...)
glp_scale_prob <- function(...) scaleProbGLPK(...)
glp_set_bfcp <- function(...) setBfcpGLPK(...)
glp_set_col_bnds <- function(...) setColBndGLPK(...)
glp_set_col_kind <- function(...) setColKindGLPK(...)
glp_set_col_name <- function(...) setColNameGLPK(...)
glp_set_col_stat <- function(...) setColStatGLPK(...)
glp_set_mat_col <- function(...) setMatColGLPK(...)
glp_set_mat_row <- function(...) setMatRowGLPK(...)
glp_set_obj_coef <- function(...) setObjCoefGLPK(...)
glp_set_obj_dir <- function(...) setObjDirGLPK(...)
glp_set_obj_name <- function(...) setObjNameGLPK(...)
glp_set_prob_name <- function(...) setProbNameGLPK(...)
glp_set_rii <- function(...) setRiiGLPK(...)
glp_set_row_bnds <- function(...) setRowBndGLPK(...)
glp_set_row_name <- function(...) setRowNameGLPK(...)
glp_set_row_stat <- function(...) setRowStatGLPK(...)
glp_set_sjj <- function(...) setSjjGLPK(...)
glp_simplex <- function(...) solveSimplexGLPK(...)
glp_sort_matrix <- function(...) sortMatrixGLPK(...)
glp_std_basis <- function(...) stdBasisGLPK(...)
glp_term_out <- function(...) termOutGLPK(...)
glp_unscale_prob <- function(...) unscaleProbGLPK(...)
glp_version <- function(...) versionGLPK(...)
glp_warm_up <- function(...) warmUpGLPK(...)
glp_write_ipt <- function(...) writeIptGLPK(...)
glp_write_lp <- function(...) writeLPGLPK(...)
glp_write_mip <- function(...) writeMIPGLPK(...)
glp_write_mps <- function(...) writeMPSGLPK(...)
glp_write_prob <- function(...) writeProbGLPK(...)
glp_write_sol <- function(...) writeSolGLPK(...)
| /R/glpkAPI_mapping.r | no_license | skranz/repgame | R | false | false | 7,006 | r | #------------------------------------------------------------------------------#
# map GLPK(...) function names to glpkAPI function names #
#------------------------------------------------------------------------------#
# GLPK(...) # glpkAPI
#library(glpkAPI,warn.conflicts=FALSE,verbose=FALSE)
glp_add_cols <- function(...) addColsGLPK(...)
glp_add_rows <- function(...) addRowsGLPK(...)
glp_adv_basis <- function(...) advBasisGLPK(...)
glp_bf_exists <- function(...) bfExistsGLPK(...)
glp_bf_updated <- function(...) bfUpdatedGLPK(...)
glp_check_dup <- function(...) checkDupGLPK(...)
glp_copy_prob <- function(...) copyProbGLPK(...)
glp_cpx_basis <- function(...) cpxBasisGLPK(...)
glp_create_index <- function(...) createIndexGLPK(...)
glp_create_prob <- function(...) initProbGLPK(...)
glp_del_cols <- function(...) delColsGLPK(...)
glp_del_rows <- function(...) delRowsGLPK(...)
glp_delete_index <- function(...) deleteIndexGLPK(...)
glp_delete_prob <- function(...) delProbGLPK(...)
glp_erase_prob <- function(...) eraseProbGLPK(...)
glp_exact <- function(...) solveSimplexExactGLPK(...)
glp_factorize <- function(...) factorizeGLPK(...)
glp_find_col <- function(...) findColGLPK(...)
glp_find_row <- function(...) findRowGLPK(...)
glp_get_bfcp <- function(...) getBfcpGLPK(...)
glp_get_bhead <- function(...) getBheadGLPK(...)
glp_get_col_bind <- function(...) getCbindGLPK(...)
glp_get_col_dual <- function(...) getColDualGLPK(...)
glp_get_col_kind <- function(...) getColKindGLPK(...)
glp_get_col_lb <- function(...) getColLowBndGLPK(...)
glp_get_col_name <- function(...) getColNameGLPK(...)
glp_get_col_prim <- function(...) getColPrimGLPK(...)
glp_get_col_stat <- function(...) getColStatGLPK(...)
glp_get_col_type <- function(...) getColTypeGLPK(...)
glp_get_col_ub <- function(...) getColUppBndGLPK(...)
glp_get_dual_stat <- function(...) getDualStatGLPK(...)
glp_get_mat_col <- function(...) getMatColGLPK(...)
glp_get_mat_row <- function(...) getMatRowGLPK(...)
glp_get_num_bin <- function(...) getNumBinGLPK(...)
glp_get_num_cols <- function(...) getNumColsGLPK(...)
glp_get_num_int <- function(...) getNumIntGLPK(...)
glp_get_num_nz <- function(...) getNumNnzGLPK(...)
glp_get_num_rows <- function(...) getNumRowsGLPK(...)
glp_get_obj_coef <- function(...) getObjCoefGLPK(...)
glp_get_obj_dir <- function(...) getObjDirGLPK(...)
glp_get_obj_name <- function(...) getObjNameGLPK(...)
glp_get_obj_val <- function(...) getObjValGLPK(...)
glp_get_prim_stat <- function(...) getPrimStatGLPK(...)
glp_get_prob_name <- function(...) getProbNameGLPK(...)
glp_get_rii <- function(...) getRiiGLPK(...)
glp_get_row_bind <- function(...) getRbindGLPK(...)
glp_get_row_dual <- function(...) getRowDualGLPK(...)
glp_get_row_lb <- function(...) getRowLowBndGLPK(...)
glp_get_row_name <- function(...) getRowNameGLPK(...)
glp_get_row_prim <- function(...) getRowPrimGLPK(...)
glp_get_row_stat <- function(...) getRowStatGLPK(...)
glp_get_row_type <- function(...) getRowTypeGLPK(...)
glp_get_row_ub <- function(...) getRowUppBndGLPK(...)
glp_get_sjj <- function(...) getSjjGLPK(...)
glp_get_status <- function(...) getSolStatGLPK(...)
glp_get_unbnd_ray <- function(...) getUnbndRayGLPK(...)
glp_init_iocp <- function(...) setDefaultMIPParmGLPK(...)
glp_init_iptcp <- function(...) setDefaultIptParmGLPK(...)
glp_interior <- function(...) solveInteriorGLPK(...)
glp_intopt <- function(...) solveMIPGLPK(...)
glp_ipt_col_dual <- function(...) getColDualIptGLPK(...)
glp_ipt_col_prim <- function(...) getColPrimIptGLPK(...)
glp_ipt_obj_val <- function(...) getObjValIptGLPK(...)
glp_ipt_row_dual <- function(...) getRowDualIptGLPK(...)
glp_ipt_row_prim <- function(...) getRowPrimIptGLPK(...)
glp_ipt_status <- function(...) getSolStatIptGLPK(...)
glp_load_matrix <- function(...) loadMatrixGLPK(...)
glp_mip_col_val <- function(...) mipColValGLPK(...)
glp_mip_obj_val <- function(...) mipObjValGLPK(...)
glp_mip_row_val <- function(...) mipRowValGLPK(...)
glp_mip_status <- function(...) mipStatusGLPK(...)
glp_mpl_alloc_wksp <- function(...) mplAllocWkspGLPK(...)
glp_mpl_build_prob <- function(...) mplBuildProbGLPK(...)
glp_mpl_free_wksp <- function(...) mplFreeWkspGLPK(...)
glp_mpl_generate <- function(...) mplGenerateGLPK(...)
glp_mpl_postsolve <- function(...) mplPostsolveGLPK(...)
glp_mpl_read_data <- function(...) mplReadDataGLPK(...)
glp_mpl_read_model <- function(...) mplReadModelGLPK(...)
glp_print_ipt <- function(...) printIptGLPK(...)
glp_print_mip <- function(...) printMIPGLPK(...)
glp_print_ranges <- function(...) printRangesGLPK(...)
glp_print_sol <- function(...) printSolGLPK(...)
glp_read_ipt <- function(...) readIptGLPK(...)
glp_read_lp <- function(...) readLPGLPK(...)
glp_read_mip <- function(...) readMIPGLPK(...)
glp_read_mps <- function(...) readMPSGLPK(...)
glp_read_prob <- function(...) readProbGLPK(...)
glp_read_sol <- function(...) readSolGLPK(...)
glp_scale_prob <- function(...) scaleProbGLPK(...)
glp_set_bfcp <- function(...) setBfcpGLPK(...)
glp_set_col_bnds <- function(...) setColBndGLPK(...)
glp_set_col_kind <- function(...) setColKindGLPK(...)
glp_set_col_name <- function(...) setColNameGLPK(...)
glp_set_col_stat <- function(...) setColStatGLPK(...)
glp_set_mat_col <- function(...) setMatColGLPK(...)
glp_set_mat_row <- function(...) setMatRowGLPK(...)
glp_set_obj_coef <- function(...) setObjCoefGLPK(...)
glp_set_obj_dir <- function(...) setObjDirGLPK(...)
glp_set_obj_name <- function(...) setObjNameGLPK(...)
glp_set_prob_name <- function(...) setProbNameGLPK(...)
glp_set_rii <- function(...) setRiiGLPK(...)
glp_set_row_bnds <- function(...) setRowBndGLPK(...)
glp_set_row_name <- function(...) setRowNameGLPK(...)
glp_set_row_stat <- function(...) setRowStatGLPK(...)
glp_set_sjj <- function(...) setSjjGLPK(...)
glp_simplex <- function(...) solveSimplexGLPK(...)
glp_sort_matrix <- function(...) sortMatrixGLPK(...)
glp_std_basis <- function(...) stdBasisGLPK(...)
glp_term_out <- function(...) termOutGLPK(...)
glp_unscale_prob <- function(...) unscaleProbGLPK(...)
glp_version <- function(...) versionGLPK(...)
glp_warm_up <- function(...) warmUpGLPK(...)
glp_write_ipt <- function(...) writeIptGLPK(...)
glp_write_lp <- function(...) writeLPGLPK(...)
glp_write_mip <- function(...) writeMIPGLPK(...)
glp_write_mps <- function(...) writeMPSGLPK(...)
glp_write_prob <- function(...) writeProbGLPK(...)
glp_write_sol <- function(...) writeSolGLPK(...)
|
#' Fully-connected RNN where the output is to be fed back to input.
#'
#' @inheritParams layer_dense
#'
#' @param units Positive integer, dimensionality of the output space.
#' @param activation Activation function to use. If you pass `NULL`, no
#' activation is applied (ie. "linear" activation: `a(x) = x`).
#' @param use_bias Boolean, whether the layer uses a bias vector.
#' @param return_sequences Boolean. Whether to return the last output in the
#' output sequence, or the full sequence.
#' @param return_state Boolean (default FALSE). Whether to return the last state
#' in addition to the output.
#' @param go_backwards Boolean (default FALSE). If TRUE, process the input
#' sequence backwards and return the reversed sequence.
#' @param stateful Boolean (default FALSE). If TRUE, the last state for each
#' sample at index i in a batch will be used as initial state for the sample
#' of index i in the following batch.
#' @param unroll Boolean (default FALSE). If TRUE, the network will be unrolled,
#' else a symbolic loop will be used. Unrolling can speed-up a RNN, although
#' it tends to be more memory-intensive. Unrolling is only suitable for short
#' sequences.
#' @param kernel_initializer Initializer for the `kernel` weights matrix, used
#' for the linear transformation of the inputs..
#' @param recurrent_initializer Initializer for the `recurrent_kernel` weights
#' matrix, used for the linear transformation of the recurrent state..
#' @param bias_initializer Initializer for the bias vector.
#' @param kernel_regularizer Regularizer function applied to the `kernel`
#' weights matrix.
#' @param recurrent_regularizer Regularizer function applied to the
#' `recurrent_kernel` weights matrix.
#' @param bias_regularizer Regularizer function applied to the bias vector.
#' @param activity_regularizer Regularizer function applied to the output of the
#' layer (its "activation")..
#' @param kernel_constraint Constraint function applied to the `kernel` weights
#' matrix.
#' @param recurrent_constraint Constraint function applied to the
#' `recurrent_kernel` weights matrix.
#' @param bias_constraint Constraint function applied to the bias vector.
#' @param dropout Float between 0 and 1. Fraction of the units to drop for the
#' linear transformation of the inputs.
#' @param recurrent_dropout Float between 0 and 1. Fraction of the units to drop
#' for the linear transformation of the recurrent state.
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#'
#' @export
layer_simple_rnn <- function(object, units, activation = "tanh", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$SimpleRNN, object, args)
}
#' Gated Recurrent Unit - Cho et al.
#'
#' @inheritParams layer_simple_rnn
#'
#' @param recurrent_activation Activation function to use for the recurrent
#' step.
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [On the Properties of Neural Machine Translation:
#' Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
#' - [Empirical
#' Evaluation of Gated Recurrent Neural Networks on Sequence
#' Modeling](http://arxiv.org/abs/1412.3555v1)
#' - [A Theoretically Grounded
#' Application of Dropout in Recurrent Neural
#' Networks](http://arxiv.org/abs/1512.05287)
#'
#' @export
layer_gru <- function(object, units, activation = "tanh", recurrent_activation = "hard_sigmoid", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
recurrent_activation = recurrent_activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$GRU, object, args)
}
#' Fast GRU implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).
#'
#' Can only be run on GPU, with the TensorFlow backend.
#'
#' @inheritParams layer_simple_rnn
#'
#' @family recurrent layers
#'
#' @section References:
#' - [On the Properties of Neural Machine Translation:
#' Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
#' - [Empirical
#' Evaluation of Gated Recurrent Neural Networks on Sequence
#' Modeling](http://arxiv.org/abs/1412.3555v1)
#' - [A Theoretically Grounded
#' Application of Dropout in Recurrent Neural
#' Networks](http://arxiv.org/abs/1512.05287)
#'
#' @export
layer_cudnn_gru <- function(object, units,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, return_state = FALSE, stateful = FALSE,
input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
return_sequences = return_sequences,
return_state = return_state,
stateful = stateful,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
create_layer(keras$layers$CuDNNGRU, object, args)
}
#' Long-Short Term Memory unit - Hochreiter 1997.
#'
#' For a step-by-step description of the algorithm, see [this
#' tutorial](http://deeplearning.net/tutorial/lstm.html).
#'
#' @inheritParams layer_gru
#'
#' @param unit_forget_bias Boolean. If TRUE, add 1 to the bias of the forget
#' gate at initialization. Setting it to true will also force
#' `bias_initializer="zeros"`. This is recommended in [Jozefowicz et
#' al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
#' - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#' @family recurrent layers
#'
#' @export
layer_lstm <- function(object, units, activation = "tanh", recurrent_activation = "hard_sigmoid", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
unit_forget_bias = TRUE, kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
recurrent_activation = recurrent_activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
unit_forget_bias = unit_forget_bias,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$LSTM, object, args)
}
#' Fast LSTM implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).
#'
#' Can only be run on GPU, with the TensorFlow backend.
#'
#' @inheritParams layer_lstm
#'
#' @section References:
#' - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
#' - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#' @family recurrent layers
#'
#' @export
layer_cudnn_lstm <- function(object, units,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal",
bias_initializer = "zeros", unit_forget_bias = TRUE,
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, return_state = FALSE, stateful = FALSE,
input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
unit_forget_bias = unit_forget_bias,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
return_sequences = return_sequences,
return_state = return_state,
stateful = stateful,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
create_layer(keras$layers$CuDNNLSTM, object, args)
}
| /R/layers-recurrent.R | no_license | rdrr1990/keras | R | false | false | 14,867 | r |
#' Fully-connected RNN where the output is to be fed back to input.
#'
#' @inheritParams layer_dense
#'
#' @param units Positive integer, dimensionality of the output space.
#' @param activation Activation function to use. If you pass `NULL`, no
#' activation is applied (ie. "linear" activation: `a(x) = x`).
#' @param use_bias Boolean, whether the layer uses a bias vector.
#' @param return_sequences Boolean. Whether to return the last output in the
#' output sequence, or the full sequence.
#' @param return_state Boolean (default FALSE). Whether to return the last state
#' in addition to the output.
#' @param go_backwards Boolean (default FALSE). If TRUE, process the input
#' sequence backwards and return the reversed sequence.
#' @param stateful Boolean (default FALSE). If TRUE, the last state for each
#' sample at index i in a batch will be used as initial state for the sample
#' of index i in the following batch.
#' @param unroll Boolean (default FALSE). If TRUE, the network will be unrolled,
#' else a symbolic loop will be used. Unrolling can speed-up a RNN, although
#' it tends to be more memory-intensive. Unrolling is only suitable for short
#' sequences.
#' @param kernel_initializer Initializer for the `kernel` weights matrix, used
#' for the linear transformation of the inputs..
#' @param recurrent_initializer Initializer for the `recurrent_kernel` weights
#' matrix, used for the linear transformation of the recurrent state..
#' @param bias_initializer Initializer for the bias vector.
#' @param kernel_regularizer Regularizer function applied to the `kernel`
#' weights matrix.
#' @param recurrent_regularizer Regularizer function applied to the
#' `recurrent_kernel` weights matrix.
#' @param bias_regularizer Regularizer function applied to the bias vector.
#' @param activity_regularizer Regularizer function applied to the output of the
#' layer (its "activation")..
#' @param kernel_constraint Constraint function applied to the `kernel` weights
#' matrix.
#' @param recurrent_constraint Constraint function applied to the
#' `recurrent_kernel` weights matrix.
#' @param bias_constraint Constraint function applied to the bias vector.
#' @param dropout Float between 0 and 1. Fraction of the units to drop for the
#' linear transformation of the inputs.
#' @param recurrent_dropout Float between 0 and 1. Fraction of the units to drop
#' for the linear transformation of the recurrent state.
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#'
#' @export
layer_simple_rnn <- function(object, units, activation = "tanh", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$SimpleRNN, object, args)
}
#' Gated Recurrent Unit - Cho et al.
#'
#' @inheritParams layer_simple_rnn
#'
#' @param recurrent_activation Activation function to use for the recurrent
#' step.
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [On the Properties of Neural Machine Translation:
#' Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
#' - [Empirical
#' Evaluation of Gated Recurrent Neural Networks on Sequence
#' Modeling](http://arxiv.org/abs/1412.3555v1)
#' - [A Theoretically Grounded
#' Application of Dropout in Recurrent Neural
#' Networks](http://arxiv.org/abs/1512.05287)
#'
#' @export
layer_gru <- function(object, units, activation = "tanh", recurrent_activation = "hard_sigmoid", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
recurrent_activation = recurrent_activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$GRU, object, args)
}
#' Fast GRU implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).
#'
#' Can only be run on GPU, with the TensorFlow backend.
#'
#' @inheritParams layer_simple_rnn
#'
#' @family recurrent layers
#'
#' @section References:
#' - [On the Properties of Neural Machine Translation:
#' Encoder-Decoder Approaches](https://arxiv.org/abs/1409.1259)
#' - [Empirical
#' Evaluation of Gated Recurrent Neural Networks on Sequence
#' Modeling](http://arxiv.org/abs/1412.3555v1)
#' - [A Theoretically Grounded
#' Application of Dropout in Recurrent Neural
#' Networks](http://arxiv.org/abs/1512.05287)
#'
#' @export
layer_cudnn_gru <- function(object, units,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, return_state = FALSE, stateful = FALSE,
input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
return_sequences = return_sequences,
return_state = return_state,
stateful = stateful,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
create_layer(keras$layers$CuDNNGRU, object, args)
}
#' Long-Short Term Memory unit - Hochreiter 1997.
#'
#' For a step-by-step description of the algorithm, see [this
#' tutorial](http://deeplearning.net/tutorial/lstm.html).
#'
#' @inheritParams layer_gru
#'
#' @param unit_forget_bias Boolean. If TRUE, add 1 to the bias of the forget
#' gate at initialization. Setting it to true will also force
#' `bias_initializer="zeros"`. This is recommended in [Jozefowicz et
#' al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
#'
#' @template roxlate-recurrent-layer
#'
#' @section References:
#' - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
#' - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#' @family recurrent layers
#'
#' @export
layer_lstm <- function(object, units, activation = "tanh", recurrent_activation = "hard_sigmoid", use_bias = TRUE,
return_sequences = FALSE, return_state = FALSE, go_backwards = FALSE, stateful = FALSE, unroll = FALSE,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal", bias_initializer = "zeros",
unit_forget_bias = TRUE, kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL,
activity_regularizer = NULL, kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
dropout = 0.0, recurrent_dropout = 0.0, input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
activation = activation,
recurrent_activation = recurrent_activation,
use_bias = use_bias,
return_sequences = return_sequences,
go_backwards = go_backwards,
stateful = stateful,
unroll = unroll,
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
unit_forget_bias = unit_forget_bias,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
if (keras_version() >= "2.0.5")
args$return_state <- return_state
create_layer(keras$layers$LSTM, object, args)
}
#' Fast LSTM implementation backed by [CuDNN](https://developer.nvidia.com/cudnn).
#'
#' Can only be run on GPU, with the TensorFlow backend.
#'
#' @inheritParams layer_lstm
#'
#' @section References:
#' - [Long short-term memory](http://www.bioinf.jku.at/publications/older/2604.pdf) (original 1997 paper)
#' - [Supervised sequence labeling with recurrent neural networks](http://www.cs.toronto.edu/~graves/preprint.pdf)
#' - [A Theoretically Grounded Application of Dropout in Recurrent Neural Networks](http://arxiv.org/abs/1512.05287)
#'
#' @family recurrent layers
#'
#' @export
layer_cudnn_lstm <- function(object, units,
kernel_initializer = "glorot_uniform", recurrent_initializer = "orthogonal",
bias_initializer = "zeros", unit_forget_bias = TRUE,
kernel_regularizer = NULL, recurrent_regularizer = NULL, bias_regularizer = NULL, activity_regularizer = NULL,
kernel_constraint = NULL, recurrent_constraint = NULL, bias_constraint = NULL,
return_sequences = FALSE, return_state = FALSE, stateful = FALSE,
input_shape = NULL, batch_input_shape = NULL, batch_size = NULL,
dtype = NULL, name = NULL, trainable = NULL, weights = NULL) {
args <- list(
units = as.integer(units),
kernel_initializer = kernel_initializer,
recurrent_initializer = recurrent_initializer,
bias_initializer = bias_initializer,
unit_forget_bias = unit_forget_bias,
kernel_regularizer = kernel_regularizer,
recurrent_regularizer = recurrent_regularizer,
bias_regularizer = bias_regularizer,
activity_regularizer = activity_regularizer,
kernel_constraint = kernel_constraint,
recurrent_constraint = recurrent_constraint,
bias_constraint = bias_constraint,
return_sequences = return_sequences,
return_state = return_state,
stateful = stateful,
input_shape = normalize_shape(input_shape),
batch_input_shape = normalize_shape(batch_input_shape),
batch_size = as_nullable_integer(batch_size),
dtype = dtype,
name = name,
trainable = trainable,
weights = weights
)
create_layer(keras$layers$CuDNNLSTM, object, args)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/determine.subgroups.R
\name{determine.subgroups}
\alias{determine.subgroups}
\title{Determines subgroups.}
\usage{
determine.subgroups(data_list, base_syntax, n_subj, chisq_cutoff,
file_order, elig_paths, confirm_subgroup, out_path = NULL, sub_feature)
}
\arguments{
\item{data_list}{A list of all datasets.}
\item{base_syntax}{A character vector containing syntax that never changes.}
\item{n_subj}{The number of subjects in the sample.}
\item{chisq_cutoff}{Cutoff used in order for MI to be considered significant.}
\item{file_order}{A data frame containing the order of the files and
the names of the files. Used to merge in subgroup assignment and preserve
order.}
\item{elig_paths}{A character vector containing eligible paths that
gimme is allowed to add to the model. Ensures only EPCs from allowable paths
are considered in the creation of the similarity matrix.}
\item{confirm_subgroup}{A dataframe with the first column a string vector of data file names
without extensions and the second vector a integer vector of subgroup labels.}
}
\value{
Returns sub object containing similarity matrix, the number of
subgroups, the modularity associated with the subgroup memberships,
and a data frame containing the file names and subgroup memberships.
}
\description{
Determines subgroups.
}
\keyword{internal}
| /gimme/man/determine.subgroups.Rd | no_license | kaduffy/gimme-OLD | R | false | true | 1,401 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/determine.subgroups.R
\name{determine.subgroups}
\alias{determine.subgroups}
\title{Determines subgroups.}
\usage{
determine.subgroups(data_list, base_syntax, n_subj, chisq_cutoff,
file_order, elig_paths, confirm_subgroup, out_path = NULL, sub_feature)
}
\arguments{
\item{data_list}{A list of all datasets.}
\item{base_syntax}{A character vector containing syntax that never changes.}
\item{n_subj}{The number of subjects in the sample.}
\item{chisq_cutoff}{Cutoff used in order for MI to be considered significant.}
\item{file_order}{A data frame containing the order of the files and
the names of the files. Used to merge in subgroup assignment and preserve
order.}
\item{elig_paths}{A character vector containing eligible paths that
gimme is allowed to add to the model. Ensures only EPCs from allowable paths
are considered in the creation of the similarity matrix.}
\item{confirm_subgroup}{A dataframe with the first column a string vector of data file names
without extensions and the second vector a integer vector of subgroup labels.}
}
\value{
Returns sub object containing similarity matrix, the number of
subgroups, the modularity associated with the subgroup memberships,
and a data frame containing the file names and subgroup memberships.
}
\description{
Determines subgroups.
}
\keyword{internal}
|
# ------------------------------------------------------------------------
#
# Title : London
# By : Victor
# Date : 2018-05-11
#
# ------------------------------------------------------------------------
# Packages ----------------------------------------------------------------
library( r2d3maps )
library( sf )
library( rmapshaper )
# Data --------------------------------------------------------------------
# from: https://data.london.gov.uk/dataset/statistical-gis-boundary-files-london
london <- read_sf("dev/London-wards-2014/London-wards-2014_ESRI/London_Ward.shp")
plot(st_geometry(london))
london
london <- st_transform(london, crs = 4326)
# Simplify shapes
london2 <- ms_simplify(london)
# pryr::object_size(london)
# ##> 2.96 MB
# pryr::object_size(london2)
# ##> 532 kB
# D3 map ------------------------------------------------------------------
d3_map(shape = london2) %>%
add_tooltip("{NAME}") %>%
add_labs("London city")
| /dev/london.R | no_license | dreamRs/r2d3maps | R | false | false | 974 | r |
# ------------------------------------------------------------------------
#
# Title : London
# By : Victor
# Date : 2018-05-11
#
# ------------------------------------------------------------------------
# Packages ----------------------------------------------------------------
library( r2d3maps )
library( sf )
library( rmapshaper )
# Data --------------------------------------------------------------------
# from: https://data.london.gov.uk/dataset/statistical-gis-boundary-files-london
london <- read_sf("dev/London-wards-2014/London-wards-2014_ESRI/London_Ward.shp")
plot(st_geometry(london))
london
london <- st_transform(london, crs = 4326)
# Simplify shapes
london2 <- ms_simplify(london)
# pryr::object_size(london)
# ##> 2.96 MB
# pryr::object_size(london2)
# ##> 532 kB
# D3 map ------------------------------------------------------------------
d3_map(shape = london2) %>%
add_tooltip("{NAME}") %>%
add_labs("London city")
|
library(DescTools)
### Name: Canvas
### Title: Canvas for Geometric Plotting
### Aliases: Canvas
### Keywords: hplot
### ** Examples
Canvas(7)
text(0, 0, "Hello world!", cex=5)
| /data/genthat_extracted_code/DescTools/examples/Canvas.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 184 | r | library(DescTools)
### Name: Canvas
### Title: Canvas for Geometric Plotting
### Aliases: Canvas
### Keywords: hplot
### ** Examples
Canvas(7)
text(0, 0, "Hello world!", cex=5)
|
library(keras)
library(lime)
library(readr)
library(dplyr)
library(tidyr)
library(reticulate)
library(tensorflow)
library(tidyquant)
library(rsample)
library(recipes)
library(yardstick)
library(corrr)
library(ggplot2)
library(forcats)
#Functions
NcalcMeasures<-function(TP,FN,FP,TN){
NcalcAccuracy<-function(TP,FP,TN,FN){return(100.0*((TP+TN)/(TP+FP+FN+TN)))}
NcalcPgood<-function(TP,FP,TN,FN){return(100.0*(TP/(TP+FP)))}
NcalcPbad<-function(TP,FP,TN,FN){return(100.0*(TN/(FN+TN)))}
NcalcFPR<-function(TP,FP,TN,FN){return(100.0*(FP/(FP+TN)))}
NcalcTPR<-function(TP,FP,TN,FN){return(100.0*(TP/(TP+FN)))}
NcalcMCC<-function(TP,FP,TN,FN){return( ((TP*TN)-(FP*FN))/sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)))}
retList<-list( "TP"=TP,
"FN"=FN,
"TN"=TN,
"FP"=FP,
"accuracy"=NcalcAccuracy(TP,FP,TN,FN),
"pgood"=NcalcPgood(TP,FP,TN,FN),
"pbad"=NcalcPbad(TP,FP,TN,FN),
"FPR"=NcalcFPR(TP,FP,TN,FN),
"TPR"=NcalcTPR(TP,FP,TN,FN),
"MCC"=NcalcMCC(TP,FP,TN,FN)
)
return(retList)}
tab_int2double <- function(intTab){
doubleTab <- intTab
for(i in nrow(intTab)){
for (j in ncol(intTab)) {
doubleTab[i,j] <- as.double(intTab[i,j])
}
}
return(doubleTab)
}
#DATSET
churn_data_raw <- read_csv("D:WA_Fn-UseC_-Telco-Customer-Churn.csv")
glimpse(churn_data_raw)
churn_data_tbl <- churn_data_raw %>%
drop_na() %>%
dplyr::select(Churn, everything())
# Split test/training sets
set.seed(100)
train_test_split <- initial_split(churn_data_tbl, prop = 0.8)
train_test_split
# Retrieve train and test sets
train_tbl_with_ids <- training(train_test_split)
test_tbl_with_ids <- training(train_test_split)
train_tbl <- select(train_tbl_with_ids, -customerID)
test_tbl <- select(test_tbl_with_ids, -customerID)
churn_data_tbl %>%
ggplot(aes(x = tenure)) +
geom_histogram()
churn_data_tbl %>%
ggplot(aes(x = tenure)) +
geom_histogram(bins = 6)
churn_data_tbl %>%
ggplot(aes(x = TotalCharges)) +
geom_histogram(bins = 100)
churn_data_tbl %>%
ggplot(aes(x = log(TotalCharges))) +
geom_histogram(bins = 100)
rec_obj <- recipe(Churn ~ ., data = train_tbl) %>%
step_discretize(tenure, options = list(cuts = 6)) %>%
step_log(TotalCharges) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_center(all_predictors(), -all_outcomes()) %>%
step_scale(all_predictors(), -all_outcomes()) %>%
prep(data = train_tbl)
rec_obj
x_train_tbl <- bake(rec_obj, train_tbl) %>% dplyr::select(-Churn)
x_test_tbl <- bake(rec_obj, test_tbl) %>% dplyr::select(-Churn)
glimpse(x_train_tbl)
y_train_vec <- ifelse(pull(train_tbl, Churn) == "Yes", 1, 0)
y_test_vec <- ifelse(pull(test_tbl, Churn) == "Yes", 1, 0)
model_keras <- keras_model_sequential() %>%
layer_dense(
units = 64,
kernel_initializer = "uniform",
activation = "relu",
input_shape = ncol(x_train_tbl)
) %>%
layer_dropout(rate = 0.3) %>%
layer_dense(
units = 64,
kernel_initializer = "uniform",
activation = "relu"
) %>%
layer_dropout(rate = 0.3) %>%
layer_dense(
units = 1,
kernel_initializer = "uniform",
activation = "sigmoid")
model_keras %>% compile(
loss = "binary_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
history <- model_keras %>% keras::fit(
as.matrix(x_train_tbl),
y_train_vec,
batch_size = 50,
epochs = 35,
validation_split = 0.30
)
save_model_hdf5(model_keras, 'D:/R/Churn Prediction/customer_churn.hdf5', overwrite = TRUE, include_optimizer = TRUE)
yhat_keras_class_vec <- model_keras %>% predict_classes(as.matrix(x_test_tbl)) %>% as.vector()
yhat_keras_prob_vec <- model_keras %>% predict_proba(as.matrix(x_test_tbl)) %>% as.vector()
estimates_keras_tbl <- tibble(
truth = as.factor(y_test_vec) %>% forcats::fct_recode(yes = "1", no = "0"),
estimate = as.factor(yhat_keras_class_vec) %>% forcats::fct_recode(yes = "1", no = "0"),
class_prob = yhat_keras_prob_vec
)
estimates_keras_tbl
estimates_keras_tbl %>%
conf_mat(truth, estimate) %>%
pluck(1) %>%
as_tibble() %>%
# Visualize with ggplot
ggplot(aes(Prediction, Truth, alpha = n)) +
geom_tile(show.legend = FALSE) +
geom_text(aes(label = n), colour = "white", alpha = 1, size = 8)
confmat4 <- estimates_keras_tbl %>% conf_mat(truth, estimate)
cm1 <- confmat4$table
confmat4$table
measures1 <- NcalcMeasures(as.double(cm1[2,2]),cm1[1,2],
cm1[2,1],cm1[1,1])
measures1
x <- confusionMatrix(truth, estimate, positive = "Yes")
TN <- confmat4$table[1]/1760
FP <- confmat4$table[2]/1760
FN <- confmat4$table[3]/1760
TP <- confmat4$table[4]/1760
cost_simple = FN*300 + TP*60 + FP*60 + TN*0
cost_simple
confmat4 <- estimates_keras_tbl %>% conf_mat(truth, estimate)
#Churn Rate
TN <- confmat4$table[1]/12
FP <- confmat4$table[2]/12
FN <- confmat4$table[3]/12
TP <- confmat4$table[4]/12
churn_rate = TP / FP + TP
churn_rate
#Hit Rate
TN <- confmat4$table[1]/13
FP <- confmat4$table[2]/13
FN <- confmat4$table[3]/13
TP <- confmat4$table[4]/13
hit_rate = TP / FN + TP
hit_rate
thresh <- seq(0.1,1.0, length = 10)
cost = rep(0,length(thresh))
dat <- data.frame(
model = c(rep("optimized",10),"simple"),
cost_thresh = c(cost,cost_simple),
thresh_plot = c(thresh,0.5)
)
savings_per_customer = cost_simple - min(cost)
total_savings = 500000*savings_per_customer
total_savings
options(yardstick.event_first = TRUE)
options(yardstick.event_first = FALSE)
estimates_keras_tbl %>% roc_auc(truth, class_prob)
estimates_keras_tbl %>% conf_mat(truth, estimate)
estimates_keras_tbl %>% metrics(truth, estimate)
estimates_keras_tbl %>% precision(truth, estimate)
estimates_keras_tbl %>% recall(truth, estimate)
estimates_keras_tbl %>% f_meas(truth, estimate, beta = 1)
class(model_keras)
model_type.keras.models.Sequential <- function(x, ...) {
"classification"}
predict_model.keras.engine.sequential.Sequential <- function(x, newdata, type, ...) {
pred <- predict_proba(object = x, x = as.matrix(newdata))
data.frame(Yes = pred, No = 1 - pred)
}
predict_model(x = model_keras, newdata = x_test_tbl, type = 'raw') %>%
tibble::as_tibble()
explainer <- lime::lime (
x = x_train_tbl,
model = model_keras,
bin_continuous = TRUE)
explanation <- lime::explain(
x_test_tbl[1:10,],
explainer = explainer,
n_labels = 1,
n_features = 4,
kernel_width = 0.5
)
plot_features(explanation) +
labs(title = "LIME Feature Importance Visualization",
subtitle = "Hold Out (Test) Set, First 10 Cases Shown")
plot_explanations(explanation) +
labs(title = "LIME Feature Importance Heatmap",
subtitle = "Hold Out (Test) Set, First 10 Cases Shown")
rna <- keras_model_sequential()
rna %>%
layer_dense(units = 4, input_shape = 6, kernel_initializer = 'normal', activation = 'relu') %>%
layer_dense(units = 4, kernel_initializer = 'normal', activation = 'relu') %>%
layer_dense(units = 1, kernel_initializer = 'normal', activation = 'linear')
summary(rna)
rna %>%
compile(loss = 'mse', optimizer = optimizer_adam())
save(list = ls(), file = 'D:/R/Churn Prediction/customer_churn.RData')
| /Churn Prediction/ANN.R | no_license | Rangow4562/Customer-Churn-Analysis | R | false | false | 7,540 | r | library(keras)
library(lime)
library(readr)
library(dplyr)
library(tidyr)
library(reticulate)
library(tensorflow)
library(tidyquant)
library(rsample)
library(recipes)
library(yardstick)
library(corrr)
library(ggplot2)
library(forcats)
#Functions
NcalcMeasures<-function(TP,FN,FP,TN){
NcalcAccuracy<-function(TP,FP,TN,FN){return(100.0*((TP+TN)/(TP+FP+FN+TN)))}
NcalcPgood<-function(TP,FP,TN,FN){return(100.0*(TP/(TP+FP)))}
NcalcPbad<-function(TP,FP,TN,FN){return(100.0*(TN/(FN+TN)))}
NcalcFPR<-function(TP,FP,TN,FN){return(100.0*(FP/(FP+TN)))}
NcalcTPR<-function(TP,FP,TN,FN){return(100.0*(TP/(TP+FN)))}
NcalcMCC<-function(TP,FP,TN,FN){return( ((TP*TN)-(FP*FN))/sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN)))}
retList<-list( "TP"=TP,
"FN"=FN,
"TN"=TN,
"FP"=FP,
"accuracy"=NcalcAccuracy(TP,FP,TN,FN),
"pgood"=NcalcPgood(TP,FP,TN,FN),
"pbad"=NcalcPbad(TP,FP,TN,FN),
"FPR"=NcalcFPR(TP,FP,TN,FN),
"TPR"=NcalcTPR(TP,FP,TN,FN),
"MCC"=NcalcMCC(TP,FP,TN,FN)
)
return(retList)}
tab_int2double <- function(intTab){
doubleTab <- intTab
for(i in nrow(intTab)){
for (j in ncol(intTab)) {
doubleTab[i,j] <- as.double(intTab[i,j])
}
}
return(doubleTab)
}
#DATSET
churn_data_raw <- read_csv("D:WA_Fn-UseC_-Telco-Customer-Churn.csv")
glimpse(churn_data_raw)
churn_data_tbl <- churn_data_raw %>%
drop_na() %>%
dplyr::select(Churn, everything())
# Split test/training sets
set.seed(100)
train_test_split <- initial_split(churn_data_tbl, prop = 0.8)
train_test_split
# Retrieve train and test sets
train_tbl_with_ids <- training(train_test_split)
test_tbl_with_ids <- training(train_test_split)
train_tbl <- select(train_tbl_with_ids, -customerID)
test_tbl <- select(test_tbl_with_ids, -customerID)
churn_data_tbl %>%
ggplot(aes(x = tenure)) +
geom_histogram()
churn_data_tbl %>%
ggplot(aes(x = tenure)) +
geom_histogram(bins = 6)
churn_data_tbl %>%
ggplot(aes(x = TotalCharges)) +
geom_histogram(bins = 100)
churn_data_tbl %>%
ggplot(aes(x = log(TotalCharges))) +
geom_histogram(bins = 100)
rec_obj <- recipe(Churn ~ ., data = train_tbl) %>%
step_discretize(tenure, options = list(cuts = 6)) %>%
step_log(TotalCharges) %>%
step_dummy(all_nominal(), -all_outcomes()) %>%
step_center(all_predictors(), -all_outcomes()) %>%
step_scale(all_predictors(), -all_outcomes()) %>%
prep(data = train_tbl)
rec_obj
x_train_tbl <- bake(rec_obj, train_tbl) %>% dplyr::select(-Churn)
x_test_tbl <- bake(rec_obj, test_tbl) %>% dplyr::select(-Churn)
glimpse(x_train_tbl)
y_train_vec <- ifelse(pull(train_tbl, Churn) == "Yes", 1, 0)
y_test_vec <- ifelse(pull(test_tbl, Churn) == "Yes", 1, 0)
model_keras <- keras_model_sequential() %>%
layer_dense(
units = 64,
kernel_initializer = "uniform",
activation = "relu",
input_shape = ncol(x_train_tbl)
) %>%
layer_dropout(rate = 0.3) %>%
layer_dense(
units = 64,
kernel_initializer = "uniform",
activation = "relu"
) %>%
layer_dropout(rate = 0.3) %>%
layer_dense(
units = 1,
kernel_initializer = "uniform",
activation = "sigmoid")
model_keras %>% compile(
loss = "binary_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
history <- model_keras %>% keras::fit(
as.matrix(x_train_tbl),
y_train_vec,
batch_size = 50,
epochs = 35,
validation_split = 0.30
)
save_model_hdf5(model_keras, 'D:/R/Churn Prediction/customer_churn.hdf5', overwrite = TRUE, include_optimizer = TRUE)
yhat_keras_class_vec <- model_keras %>% predict_classes(as.matrix(x_test_tbl)) %>% as.vector()
yhat_keras_prob_vec <- model_keras %>% predict_proba(as.matrix(x_test_tbl)) %>% as.vector()
estimates_keras_tbl <- tibble(
truth = as.factor(y_test_vec) %>% forcats::fct_recode(yes = "1", no = "0"),
estimate = as.factor(yhat_keras_class_vec) %>% forcats::fct_recode(yes = "1", no = "0"),
class_prob = yhat_keras_prob_vec
)
estimates_keras_tbl
estimates_keras_tbl %>%
conf_mat(truth, estimate) %>%
pluck(1) %>%
as_tibble() %>%
# Visualize with ggplot
ggplot(aes(Prediction, Truth, alpha = n)) +
geom_tile(show.legend = FALSE) +
geom_text(aes(label = n), colour = "white", alpha = 1, size = 8)
confmat4 <- estimates_keras_tbl %>% conf_mat(truth, estimate)
cm1 <- confmat4$table
confmat4$table
measures1 <- NcalcMeasures(as.double(cm1[2,2]),cm1[1,2],
cm1[2,1],cm1[1,1])
measures1
x <- confusionMatrix(truth, estimate, positive = "Yes")
TN <- confmat4$table[1]/1760
FP <- confmat4$table[2]/1760
FN <- confmat4$table[3]/1760
TP <- confmat4$table[4]/1760
cost_simple = FN*300 + TP*60 + FP*60 + TN*0
cost_simple
confmat4 <- estimates_keras_tbl %>% conf_mat(truth, estimate)
#Churn Rate
TN <- confmat4$table[1]/12
FP <- confmat4$table[2]/12
FN <- confmat4$table[3]/12
TP <- confmat4$table[4]/12
churn_rate = TP / FP + TP
churn_rate
#Hit Rate
TN <- confmat4$table[1]/13
FP <- confmat4$table[2]/13
FN <- confmat4$table[3]/13
TP <- confmat4$table[4]/13
hit_rate = TP / FN + TP
hit_rate
thresh <- seq(0.1,1.0, length = 10)
cost = rep(0,length(thresh))
dat <- data.frame(
model = c(rep("optimized",10),"simple"),
cost_thresh = c(cost,cost_simple),
thresh_plot = c(thresh,0.5)
)
savings_per_customer = cost_simple - min(cost)
total_savings = 500000*savings_per_customer
total_savings
options(yardstick.event_first = TRUE)
options(yardstick.event_first = FALSE)
estimates_keras_tbl %>% roc_auc(truth, class_prob)
estimates_keras_tbl %>% conf_mat(truth, estimate)
estimates_keras_tbl %>% metrics(truth, estimate)
estimates_keras_tbl %>% precision(truth, estimate)
estimates_keras_tbl %>% recall(truth, estimate)
estimates_keras_tbl %>% f_meas(truth, estimate, beta = 1)
class(model_keras)
model_type.keras.models.Sequential <- function(x, ...) {
"classification"}
predict_model.keras.engine.sequential.Sequential <- function(x, newdata, type, ...) {
pred <- predict_proba(object = x, x = as.matrix(newdata))
data.frame(Yes = pred, No = 1 - pred)
}
predict_model(x = model_keras, newdata = x_test_tbl, type = 'raw') %>%
tibble::as_tibble()
explainer <- lime::lime (
x = x_train_tbl,
model = model_keras,
bin_continuous = TRUE)
explanation <- lime::explain(
x_test_tbl[1:10,],
explainer = explainer,
n_labels = 1,
n_features = 4,
kernel_width = 0.5
)
plot_features(explanation) +
labs(title = "LIME Feature Importance Visualization",
subtitle = "Hold Out (Test) Set, First 10 Cases Shown")
plot_explanations(explanation) +
labs(title = "LIME Feature Importance Heatmap",
subtitle = "Hold Out (Test) Set, First 10 Cases Shown")
rna <- keras_model_sequential()
rna %>%
layer_dense(units = 4, input_shape = 6, kernel_initializer = 'normal', activation = 'relu') %>%
layer_dense(units = 4, kernel_initializer = 'normal', activation = 'relu') %>%
layer_dense(units = 1, kernel_initializer = 'normal', activation = 'linear')
summary(rna)
rna %>%
compile(loss = 'mse', optimizer = optimizer_adam())
save(list = ls(), file = 'D:/R/Churn Prediction/customer_churn.RData')
|
##Use normalize.invariant set to find a list of genes which do not change rank across arrays and then normalise to the
##previously defined target distrbution
rankInvariantNormalise = function(exprs, T=NULL){
require("affy")
if(is.null(T)){
T = apply(exprs, 1, mean, na.rm=TRUE)
}
for (i in 1:ncol(exprs)){
curve = normalize.invariantset(exprs[,i], T)
exprs[,i] = predict(curve$n.curve, exprs[,i])$y
}
exprs
}
medianNormalise = function(exprs, log=TRUE){
exprs = as.matrix(exprs)
narrays = ncol(exprs)
if(log){
exprs = log2(exprs)
}
med = median(exprs,na.rm=TRUE)
for(i in 1:narrays){
exprs[,i] = exprs[,i] - median(exprs[,i], na.rm=TRUE)
}
exprs = exprs + med
exprs
}
normaliseIllumina = function(BSData, method="quantile", transform="none", T=NULL, status=fData(BSData)$Status,negctrl="negative",regular="regular",...) {
rownms = rownames(exprs(BSData))
colnms = colnames(exprs(BSData))
transform = match.arg(transform, c("none", "vst", "log2"))
method = match.arg(method, c("quantile", "qspline", "vsn", "rankInvariant", "median", "none", "neqc", "rsn"))
if(method=="vsn" && transform!="none"){
cat(paste("\nmethod =", method, "not compatible with transform =", transform, "\nResetting transform = \"none\"\n\n"))
transform="none"
}
if(transform=="log2" && method != "neqc") {
BSData = assayDataElementReplace(BSData, "exprs", as.matrix(log2(exprs(BSData))))
}
else if(transform=="vst") {
require("lumi")
x = new("LumiBatch")
x = assayDataElementReplace(x, "exprs", exprs(BSData))
x = assayDataElementReplace(x, "se.exprs", se.exprs(BSData))
x = assayDataElementReplace(x, "beadNum", nObservations(BSData))
if(!all(is.na(Detection(BSData))))
x = assayDataElementReplace(x, "detection", Detection(BSData))
else try(x <- assayDataElementReplace(x, "detection", calculateDetection(BSData)))
BSData = assayDataElementReplace(BSData, "exprs", exprs(lumiT(x, method="vst", ...)))
rm(x)
}
switch(method,
quantile={
BSData = assayDataElementReplace(BSData, "exprs", normalizeQuantiles(as.matrix(exprs(BSData))))
rownames(exprs(BSData)) = rownms
colnames(exprs(BSData)) = colnms
},
qspline={
require("affy")
BSData = assayDataElementReplace(BSData, "exprs", normalize.qspline(as.matrix(exprs(BSData))))
rownames(exprs(BSData)) = rownms
colnames(exprs(BSData)) = colnms
},
vsn={
require("vsn")
BSData = assayDataElementReplace(BSData, "exprs", exprs(vsn2(exprs(BSData))))
},
rankInvariant={
BSData = assayDataElementReplace(BSData, "exprs", rankInvariantNormalise(exprs(BSData), T=T))
},
median={
BSData = assayDataElementReplace(BSData, "exprs", medianNormalise(exprs(BSData), log=FALSE))
},
rsn={
require("lumi")
##Need to check there are no NAs in the data
noNA <- apply(exprs(BSData), 1, function(x) !any(is.na(x)))
newObj <- BSData
exprs(newObj)[noNA,] = rsn(exprs(BSData)[noNA,],...)
BSData = newObj
},
neqc={
#noNA <- apply(exprs(BSData), 1, function(x) !any(is.na(x)))
##note that neqc removes any control probes when it returns it output
newObj <- BSData[which(status == regular),]
# tmp <- neqc(exprs(BSData)[noNA,],status=status[noNA], negctrl=negctrl, regular = regular,...)
tmp <- neqc(exprs(BSData),status=status, negctrl=negctrl, regular = regular,...)
# exprs(newObj) <- tmp[match(featureNames(newObj), rownames(tmp)),]
exprs(newObj) <- tmp
BSData = newObj
})
BSData
}
| /R/ExpressionSetIlluminaNormalisation.R | permissive | markdunning/beadarray | R | false | false | 3,850 | r | ##Use normalize.invariant set to find a list of genes which do not change rank across arrays and then normalise to the
##previously defined target distrbution
rankInvariantNormalise = function(exprs, T=NULL){
require("affy")
if(is.null(T)){
T = apply(exprs, 1, mean, na.rm=TRUE)
}
for (i in 1:ncol(exprs)){
curve = normalize.invariantset(exprs[,i], T)
exprs[,i] = predict(curve$n.curve, exprs[,i])$y
}
exprs
}
medianNormalise = function(exprs, log=TRUE){
exprs = as.matrix(exprs)
narrays = ncol(exprs)
if(log){
exprs = log2(exprs)
}
med = median(exprs,na.rm=TRUE)
for(i in 1:narrays){
exprs[,i] = exprs[,i] - median(exprs[,i], na.rm=TRUE)
}
exprs = exprs + med
exprs
}
normaliseIllumina = function(BSData, method="quantile", transform="none", T=NULL, status=fData(BSData)$Status,negctrl="negative",regular="regular",...) {
rownms = rownames(exprs(BSData))
colnms = colnames(exprs(BSData))
transform = match.arg(transform, c("none", "vst", "log2"))
method = match.arg(method, c("quantile", "qspline", "vsn", "rankInvariant", "median", "none", "neqc", "rsn"))
if(method=="vsn" && transform!="none"){
cat(paste("\nmethod =", method, "not compatible with transform =", transform, "\nResetting transform = \"none\"\n\n"))
transform="none"
}
if(transform=="log2" && method != "neqc") {
BSData = assayDataElementReplace(BSData, "exprs", as.matrix(log2(exprs(BSData))))
}
else if(transform=="vst") {
require("lumi")
x = new("LumiBatch")
x = assayDataElementReplace(x, "exprs", exprs(BSData))
x = assayDataElementReplace(x, "se.exprs", se.exprs(BSData))
x = assayDataElementReplace(x, "beadNum", nObservations(BSData))
if(!all(is.na(Detection(BSData))))
x = assayDataElementReplace(x, "detection", Detection(BSData))
else try(x <- assayDataElementReplace(x, "detection", calculateDetection(BSData)))
BSData = assayDataElementReplace(BSData, "exprs", exprs(lumiT(x, method="vst", ...)))
rm(x)
}
switch(method,
quantile={
BSData = assayDataElementReplace(BSData, "exprs", normalizeQuantiles(as.matrix(exprs(BSData))))
rownames(exprs(BSData)) = rownms
colnames(exprs(BSData)) = colnms
},
qspline={
require("affy")
BSData = assayDataElementReplace(BSData, "exprs", normalize.qspline(as.matrix(exprs(BSData))))
rownames(exprs(BSData)) = rownms
colnames(exprs(BSData)) = colnms
},
vsn={
require("vsn")
BSData = assayDataElementReplace(BSData, "exprs", exprs(vsn2(exprs(BSData))))
},
rankInvariant={
BSData = assayDataElementReplace(BSData, "exprs", rankInvariantNormalise(exprs(BSData), T=T))
},
median={
BSData = assayDataElementReplace(BSData, "exprs", medianNormalise(exprs(BSData), log=FALSE))
},
rsn={
require("lumi")
##Need to check there are no NAs in the data
noNA <- apply(exprs(BSData), 1, function(x) !any(is.na(x)))
newObj <- BSData
exprs(newObj)[noNA,] = rsn(exprs(BSData)[noNA,],...)
BSData = newObj
},
neqc={
#noNA <- apply(exprs(BSData), 1, function(x) !any(is.na(x)))
##note that neqc removes any control probes when it returns it output
newObj <- BSData[which(status == regular),]
# tmp <- neqc(exprs(BSData)[noNA,],status=status[noNA], negctrl=negctrl, regular = regular,...)
tmp <- neqc(exprs(BSData),status=status, negctrl=negctrl, regular = regular,...)
# exprs(newObj) <- tmp[match(featureNames(newObj), rownames(tmp)),]
exprs(newObj) <- tmp
BSData = newObj
})
BSData
}
|
library(Rlibstemmer)
langs = getStemLanguages()
langs = setdiff(langs, c("romanian", "turkish"))
snowballDir = "~/Downloads/snowball_all"
if(file.exists(snowballDir)) {
prefix = sprintf("%s/algorithms", snowballDir)
} else
prefix = "http://snowball.tartarus.org"
urls = sprintf("%s/%s/voc.txt", prefix, langs)
outputs = sprintf("%s/%s/output.txt", prefix, langs)
names(urls) = langs
names(outputs) = langs
ans = mapply(function(u, lang, output) {
input = iconv(readLines(u), "", "latin1")
out = wordStem(input, lang)
correct = iconv(readLines(output), "", "latin1")
all(out == correct)
}, urls, langs, outputs)
if(FALSE) {
snowballDir = "~/Downloads/snowball_all/"
if(file.exists(snowballDir)) {
french = iconv(readLines(sprintf("%s/algorithms/french/voc.txt", snowballDir)), "", "latin1")
out = wordStem(french, "french")
correct = iconv(readLines("~/Downloads/snowball_all/algorithms/french/output.txt"), "", "latin1")
all(correct == out)
i = c(2, 7, 240, 1298) # These have accents.
out[i]
italian = iconv(readLines(sprintf("%s/algorithms/italian/voc.txt", snowballDir)), "", "latin1")
out = wordStem(italian, "italian")
all(correct == out)
sum(italian != out)/length(italian)
}
}
| /tests/languages.R | no_license | omegahat/Rlibstemmer | R | false | false | 1,306 | r | library(Rlibstemmer)
langs = getStemLanguages()
langs = setdiff(langs, c("romanian", "turkish"))
snowballDir = "~/Downloads/snowball_all"
if(file.exists(snowballDir)) {
prefix = sprintf("%s/algorithms", snowballDir)
} else
prefix = "http://snowball.tartarus.org"
urls = sprintf("%s/%s/voc.txt", prefix, langs)
outputs = sprintf("%s/%s/output.txt", prefix, langs)
names(urls) = langs
names(outputs) = langs
ans = mapply(function(u, lang, output) {
input = iconv(readLines(u), "", "latin1")
out = wordStem(input, lang)
correct = iconv(readLines(output), "", "latin1")
all(out == correct)
}, urls, langs, outputs)
if(FALSE) {
snowballDir = "~/Downloads/snowball_all/"
if(file.exists(snowballDir)) {
french = iconv(readLines(sprintf("%s/algorithms/french/voc.txt", snowballDir)), "", "latin1")
out = wordStem(french, "french")
correct = iconv(readLines("~/Downloads/snowball_all/algorithms/french/output.txt"), "", "latin1")
all(correct == out)
i = c(2, 7, 240, 1298) # These have accents.
out[i]
italian = iconv(readLines(sprintf("%s/algorithms/italian/voc.txt", snowballDir)), "", "latin1")
out = wordStem(italian, "italian")
all(correct == out)
sum(italian != out)/length(italian)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calDistance.R
\name{calDistance}
\alias{calDistance}
\title{calDistance}
\usage{
calDistance(typedist = 1, tabVal = NULL, listZonePoint = NULL,
zoneN = NULL, surfVoronoi = NULL, meanZone = NULL, pErr = 0.9)
}
\arguments{
\item{typedist}{default value is 1, other values not implemented yet.}
\item{tabVal}{SpatialPointsDataFrame, contains data points to be used for zoning (spatial coordinates plus attribute values)
result of call to \code{\link{genMap}}}
\item{listZonePoint}{list of indices of data points within zones, result of call to \code{\link{calNei}}}
\item{zoneN}{zone neighborhood matrix (TRUE values on diagonal), result of call to \code{\link{calNei}}}
\item{surfVoronoi}{vector of Voronoi polygon surfaces corresponding to all data points,result of call to \code{\link{genMap}}}
\item{meanZone}{vector of average attribute values for all zones}
\item{pErr}{error percentage for correcting distances}
}
\value{
a list with components
\describe{
\item{matDistance}{matrix of real values, corresponding to heterogeneities between neighbour zones. All other values are set to 0.}
\item{matDistanceCorr}{corrected distance matrix using pErr}
\item{cost}{sum or errors obtained by replacing all data values within a zone by the zone mean value}
}
}
\description{
calDistance
}
\details{
calculates matrix of heterogeneities between neighbour zones.
max(sigmai2[i],(fxmean*pErr/100)^2) + max(sigmai2[j],(fymean*pErr/100)^2) + (fxmean-fymean)^2
}
\examples{
# load test map with simulated data
data(mapTest)
# load zoning results from test file
data(resZTest)
K=resZTest
resD = calDistance(typedist=1,mapTest$krigData,K$listZonePoint,K$zoneN,
mapTest$krigSurfVoronoi,K$meanZone,pErr=0.9)
}
| /man/calDistance.Rd | no_license | hazaeljones/geozoning | R | false | true | 1,791 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calDistance.R
\name{calDistance}
\alias{calDistance}
\title{calDistance}
\usage{
calDistance(typedist = 1, tabVal = NULL, listZonePoint = NULL,
zoneN = NULL, surfVoronoi = NULL, meanZone = NULL, pErr = 0.9)
}
\arguments{
\item{typedist}{default value is 1, other values not implemented yet.}
\item{tabVal}{SpatialPointsDataFrame, contains data points to be used for zoning (spatial coordinates plus attribute values)
result of call to \code{\link{genMap}}}
\item{listZonePoint}{list of indices of data points within zones, result of call to \code{\link{calNei}}}
\item{zoneN}{zone neighborhood matrix (TRUE values on diagonal), result of call to \code{\link{calNei}}}
\item{surfVoronoi}{vector of Voronoi polygon surfaces corresponding to all data points,result of call to \code{\link{genMap}}}
\item{meanZone}{vector of average attribute values for all zones}
\item{pErr}{error percentage for correcting distances}
}
\value{
a list with components
\describe{
\item{matDistance}{matrix of real values, corresponding to heterogeneities between neighbour zones. All other values are set to 0.}
\item{matDistanceCorr}{corrected distance matrix using pErr}
\item{cost}{sum or errors obtained by replacing all data values within a zone by the zone mean value}
}
}
\description{
calDistance
}
\details{
calculates matrix of heterogeneities between neighbour zones.
max(sigmai2[i],(fxmean*pErr/100)^2) + max(sigmai2[j],(fymean*pErr/100)^2) + (fxmean-fymean)^2
}
\examples{
# load test map with simulated data
data(mapTest)
# load zoning results from test file
data(resZTest)
K=resZTest
resD = calDistance(typedist=1,mapTest$krigData,K$listZonePoint,K$zoneN,
mapTest$krigSurfVoronoi,K$meanZone,pErr=0.9)
}
|
runUpdateTests <- function(db) {
test_that("update works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(2), city=c("LA"))
dbxUpdate(db, "events", update_events, where_cols=c("id"))
res <- dbxSelect(db, "SELECT city FROM events WHERE id = 2")
expect_equal(res$city, c("LA"))
})
test_that("update multiple columns works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), counter=c(10, 11), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(1, 2), city=c("LA", "Boston"), counter=c(20, 21))
dbxUpdate(db, "events", update_events, where_cols=c("id", "city"))
res <- dbxSelect(db, "SELECT counter FROM events")
expect_equal(res$counter, c(10, 21))
})
test_that("update multiple columns where_cols order not important", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), counter=c(10, 11), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(1, 2), city=c("LA", "Boston"), counter=c(20, 21))
dbxUpdate(db, "events", update_events, where_cols=c("city", "id"))
res <- dbxSelect(db, "SELECT counter FROM events")
expect_equal(res$counter, c(10, 21))
})
test_that("update missing column raises error", {
update_events <- data.frame(id=c(2), city=c("LA"))
expect_error(dbxUpdate(db, "events", update_events, where_cols=c("missing")), "where_cols not in records")
})
test_that("empty update works", {
dbxUpdate(db, "events", data.frame(id = as.numeric(), active = as.logical()), where_cols=c("id"))
expect_true(TRUE)
})
test_that("update with transaction works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(2), city=c("LA"))
DBI::dbWithTransaction(db, {
dbxUpdate(db, "events", update_events, where_cols=c("id"), transaction=FALSE)
})
res <- dbxSelect(db, "SELECT city FROM events WHERE id = 2")
expect_equal(res$city, c("LA"))
})
}
| /tests/testthat/helper-run-update-tests.R | no_license | stjordanis/dbx | R | false | false | 2,225 | r | runUpdateTests <- function(db) {
test_that("update works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(2), city=c("LA"))
dbxUpdate(db, "events", update_events, where_cols=c("id"))
res <- dbxSelect(db, "SELECT city FROM events WHERE id = 2")
expect_equal(res$city, c("LA"))
})
test_that("update multiple columns works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), counter=c(10, 11), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(1, 2), city=c("LA", "Boston"), counter=c(20, 21))
dbxUpdate(db, "events", update_events, where_cols=c("id", "city"))
res <- dbxSelect(db, "SELECT counter FROM events")
expect_equal(res$counter, c(10, 21))
})
test_that("update multiple columns where_cols order not important", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), counter=c(10, 11), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(1, 2), city=c("LA", "Boston"), counter=c(20, 21))
dbxUpdate(db, "events", update_events, where_cols=c("city", "id"))
res <- dbxSelect(db, "SELECT counter FROM events")
expect_equal(res$counter, c(10, 21))
})
test_that("update missing column raises error", {
update_events <- data.frame(id=c(2), city=c("LA"))
expect_error(dbxUpdate(db, "events", update_events, where_cols=c("missing")), "where_cols not in records")
})
test_that("empty update works", {
dbxUpdate(db, "events", data.frame(id = as.numeric(), active = as.logical()), where_cols=c("id"))
expect_true(TRUE)
})
test_that("update with transaction works", {
events <- data.frame(id=c(1, 2), city=c("San Francisco", "Boston"), stringsAsFactors=FALSE)
dbxInsert(db, "events", events)
update_events <- data.frame(id=c(2), city=c("LA"))
DBI::dbWithTransaction(db, {
dbxUpdate(db, "events", update_events, where_cols=c("id"), transaction=FALSE)
})
res <- dbxSelect(db, "SELECT city FROM events WHERE id = 2")
expect_equal(res$city, c("LA"))
})
}
|
est_lin1<-function(y,maxit=25,candy=FALSE,acc=0.1,plots=FALSE,eps=0.000001,unique=TRUE,notion="dS")
{
LT<-lin1_theta_eps(y,eps)$t1
sv<-median(LT)
dists<-sapply(LT,Ele_Norm,center=sv)
cands<-LT[dists<quantile(dists,acc)]
if(notion=="dS")
{
sapply(cands,dS_lin1,y=y)->ev
}
if(notion=="dS1")
{
sapply(cands,dS1_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS2")
{
sapply(cands,dS2_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS3")
{
sapply(cands,dS3_lin2,model="linAR1woI",y=y)->ev
}
se<-cands[ev==max(ev)]
itc<-1
for(i in 1:maxit)
{
itc<-itc+1
sv<-se
dists<-sapply(LT,Ele_Norm,center=sv)
cands<-LT[dists<quantile(dists,acc)]
if(notion=="dS")
{
sapply(cands,dS_lin1,y=y)->ev
}
if(notion=="dS1")
{
sapply(cands,dS1_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS2")
{
sapply(cands,dS2_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS3")
{
sapply(cands,dS3_lin2,model="linAR1woI",y=y)->ev
}
se<-cands[ev==max(ev)]
if(sv[1]==se[1])
{break}
}
max(ev)
if(unique==TRUE)
{
se<-mean(se)
}
list(estimate=se,value=max(ev),numit=itc)
} | /R/est_lin1.R | no_license | SimoneHermann/rexpar | R | false | false | 1,206 | r | est_lin1<-function(y,maxit=25,candy=FALSE,acc=0.1,plots=FALSE,eps=0.000001,unique=TRUE,notion="dS")
{
LT<-lin1_theta_eps(y,eps)$t1
sv<-median(LT)
dists<-sapply(LT,Ele_Norm,center=sv)
cands<-LT[dists<quantile(dists,acc)]
if(notion=="dS")
{
sapply(cands,dS_lin1,y=y)->ev
}
if(notion=="dS1")
{
sapply(cands,dS1_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS2")
{
sapply(cands,dS2_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS3")
{
sapply(cands,dS3_lin2,model="linAR1woI",y=y)->ev
}
se<-cands[ev==max(ev)]
itc<-1
for(i in 1:maxit)
{
itc<-itc+1
sv<-se
dists<-sapply(LT,Ele_Norm,center=sv)
cands<-LT[dists<quantile(dists,acc)]
if(notion=="dS")
{
sapply(cands,dS_lin1,y=y)->ev
}
if(notion=="dS1")
{
sapply(cands,dS1_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS2")
{
sapply(cands,dS2_lin2,model="linAR1woI",y=y)->ev
}
if(notion=="dS3")
{
sapply(cands,dS3_lin2,model="linAR1woI",y=y)->ev
}
se<-cands[ev==max(ev)]
if(sv[1]==se[1])
{break}
}
max(ev)
if(unique==TRUE)
{
se<-mean(se)
}
list(estimate=se,value=max(ev),numit=itc)
} |
library(testthat)
library(safetyguide)
test_check("safetyguide")
| /tests/testthat.R | no_license | adolgert/safetyguide | R | false | false | 66 | r | library(testthat)
library(safetyguide)
test_check("safetyguide")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/person_data.R
\name{person_data}
\alias{person_data}
\title{Person data}
\usage{
person_data()
}
\description{
This function extract data from person table
}
\keyword{gemini}
| /man/person_data.Rd | no_license | zkzofn/GEMINI | R | false | true | 253 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/person_data.R
\name{person_data}
\alias{person_data}
\title{Person data}
\usage{
person_data()
}
\description{
This function extract data from person table
}
\keyword{gemini}
|
par(mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with(data, {
plot(Global_active_power ~ Datetime, type = "l",
ylab = "Global Active Power", xlab = "")
plot(Voltage ~ Datetime, type = "l", ylab = "Voltage", xlab = "datetime")
plot(Sub_metering_1 ~ Datetime, type = "l", ylab = "Energy sub metering",
xlab = "")
lines(Sub_metering_2 ~ Datetime, col = 'Red')
lines(Sub_metering_3 ~ Datetime, col = 'Blue')
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power ~ Datetime, type = "l",
ylab = "Global_rective_power", xlab = "datetime")
}) | /plot4.R | no_license | arunphilips/ExData_Plotting1 | R | false | false | 731 | r | par(mfrow = c(2,2), mar = c(4,4,2,1), oma = c(0,0,2,0))
with(data, {
plot(Global_active_power ~ Datetime, type = "l",
ylab = "Global Active Power", xlab = "")
plot(Voltage ~ Datetime, type = "l", ylab = "Voltage", xlab = "datetime")
plot(Sub_metering_1 ~ Datetime, type = "l", ylab = "Energy sub metering",
xlab = "")
lines(Sub_metering_2 ~ Datetime, col = 'Red')
lines(Sub_metering_3 ~ Datetime, col = 'Blue')
legend("topright", col = c("black", "red", "blue"), lty = 1, lwd = 2,
bty = "n",
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(Global_reactive_power ~ Datetime, type = "l",
ylab = "Global_rective_power", xlab = "datetime")
}) |
E6TminF40clim2000.R GISS ModelE Lat-Lon Atmosphere Model, climatological ocn/atm, dust minerals
E6TminF40clim2000: based on E6TomaF40clim2000. OMA with computed dust minerals
only, all other aerosol forcings prescribed
Lat-lon: 2x2.5 degree horizontal resolution
F40: 40 vertical layers with standard hybrid coordinate, top at .1 mb
Atmospheric composition for year 2000
Ocean climatology prescribed from years 1996-2005, CMIP6
Uses turbulence scheme (no dry conv), grav.wave drag
Time steps: dynamics 3.75 min leap frog; physics 30 min.; radiation 2.5 hrs
Filters: U,V in E-W and N-S direction (after every physics time step)
U,V in E-W direction near poles (after every dynamics time step)
sea level pressure (after every physics time step)
Preprocessor Options
#define STDHYB ! standard hybrid vertical coordinate
#define ATM_LAYERING L40 ! 40 layers, top at .1 mb
#define NEW_IO ! new I/O (netcdf) on
#define IRRIGATION_ON
#define MODIS_LAI
#define NEW_BCdalbsn
!---> generic tracers code start
#define TRAC_ADV_CPU ! timing index for tracer advection on
#define TRACERS_ON ! include tracers code
#define TRACERS_WATER ! wet deposition and water tracer
#define TRACERS_DRYDEP ! default dry deposition
#define TRDIAG_WETDEPO ! additional wet deposition diags for tracers
! OFF #define CALCULATE_LIGHTNING ! Calculate lightning flash rates when NOx is not needed
! OFF #define AUTOTUNE_LIGHTNING ! Automatically generate lightning tuning parameters (present-day only)
!<--- generic tracers code end
!---> OMA start
#define TRACERS_MINERALS ! include dust mineral tracers
#define TRACERS_DUST_Silt4 ! include 4th silt size class of dust
!<--- OMA end
! OFF #define NUDGE_ON ! nudge the meteorology
#define CACHED_SUBDD
End Preprocessor Options
Object modules:
! resolution-specific source codes
Atm144x90 ! horizontal resolution is 144x90 -> 2x2.5deg
AtmLayering ! vertical resolution
DIAG_RES_F ! diagnostics
FFT144 ! Fast Fourier Transform
IO_DRV ! new i/o
! GISS dynamics with gravity wave drag
ATMDYN MOMEN2ND ! atmospheric dynamics
QUS_DRV QUS3D ! advection of Q/tracers
STRATDYN STRAT_DIAG ! stratospheric dynamics (incl. gw drag)
#include "latlon_source_files"
#include "modelE4_source_files"
#include "static_ocn_source_files"
#include "tracer_shared_source_files"
#include "tracer_minerals_source_files"
TRDIAG ! new i/o
SUBDD
! flammability_drv flammability ! Olga's fire model
Components:
#include "E4_components_nc" /* without "Ent" */
tracers
Ent
Component Options:
OPTS_Ent = ONLINE=YES PS_MODEL=FBB PFT_MODEL=ENT /* needed for "Ent" only */
OPTS_dd2d = NC_IO=PNETCDF
Data input files:
#include "IC_144x90_input_files"
#include "static_ocn_2000_144x90_input_files"
RVR=RD_Fd.nc ! river direction file
NAMERVR=RD_Fd.names.txt ! named river outlets
#include "land144x90_input_files"
#include "rad_input_files"
#include "rad_144x90_input_files_CMIP6clim"
#include "dust_tracer_144x90_input_files"
#include "dry_depos_144x90_input_files"
MSU_wts=MSU_SSU_RSS_weights.txt ! MSU-diag
REG=REG2X2.5 ! special regions-diag
Label and Namelist: (next 2 lines)
E6TminF40clim2000 (climatological prescribed ocean atmospheric tracer model with OMA dust minerals only)
&&PARAMETERS
#include "static_ocn_params"
#include "sdragF40_params"
#include "gwdragF40_params"
! cond_scheme=2 ! newer conductance scheme (N. Kiang) ! not used with Ent
! The following two lines are only used when aerosol/radiation interactions are off
FS8OPX=1.,1.,1.,1.,1.5,1.5,1.,1.
FT8OPX=1.,1.,1.,1.,1.,1.,1.3,1.
! Increasing U00a decreases the high cloud cover; increasing U00b decreases net rad at TOA
U00a=0.625 ! above 850mb w/o MC region; tune this first to get 30-35% high clouds
U00b=1.00 ! below 850mb and MC regions; tune this last to get rad.balance
WMUI_multiplier = 2.
use_vmp=1
radius_multiplier=1.1
PTLISO=0. ! pressure(mb) above which radiation assumes isothermal layers
H2ObyCH4=1. ! if =1. activates stratospheric H2O generated by CH4 without interactive chemistry
KSOLAR=2 ! 2: use long annual mean file ; 1: use short monthly file
#include "atmCompos_2000_params"
!!!!!!!!!!!!!!!!!!!!!!!
! Please note that making o3_yr non-zero tells the model
! to override the transient chemistry tracer emissions'
! use of model year and use abs(o3_yr) instead!
!!!!!!!!!!!!!!!!!!!!!!!
#include "aerosol_OMA_params_CMIP6"
#include "mineral_params_model"
#include "common_tracer_params_CMIP6"
#include "chemistry_params_CMIP6"
#include "ch4_params_CMIP6_2000"
DTsrc=1800. ! cannot be changed after a run has been started
DT=225.
! parameters that control the Shapiro filter
DT_XUfilter=225. ! Shapiro filter on U in E-W direction; usually same as DT
DT_XVfilter=225. ! Shapiro filter on V in E-W direction; usually same as DT
DT_YVfilter=0. ! Shapiro filter on V in N-S direction
DT_YUfilter=0. ! Shapiro filter on U in N-S direction
NIsurf=2 ! surface interaction computed NIsurf times per source time step
NRAD=5 ! radiation computed NRAD times per source time step
#include "diag_params"
! save3dAOD=1 ! needed if 3D AOD (itAOD or ictAOD) SUBDDs are on and adiurn_dust=0
Nssw=2 ! until diurnal diags are fixed, Nssw has to be even
Ndisk=960 ! write fort.1.nc or fort.2.nc every NDISK source time step
&&END_PARAMETERS
&INPUTZ
YEARI=1999,MONTHI=12,DATEI=1,HOURI=0, ! pick IYEAR1=YEARI (default) or < YEARI
YEARE=1999,MONTHE=12,DATEE=2,HOURE=0, KDIAG=12*0,9,
ISTART=2,IRANDI=0, YEARE=1999,MONTHE=12,DATEE=1,HOURE=1,
/
| /templates/E6TminF40clim2000.R | no_license | Racha711/GCAP2 | R | false | false | 5,953 | r | E6TminF40clim2000.R GISS ModelE Lat-Lon Atmosphere Model, climatological ocn/atm, dust minerals
E6TminF40clim2000: based on E6TomaF40clim2000. OMA with computed dust minerals
only, all other aerosol forcings prescribed
Lat-lon: 2x2.5 degree horizontal resolution
F40: 40 vertical layers with standard hybrid coordinate, top at .1 mb
Atmospheric composition for year 2000
Ocean climatology prescribed from years 1996-2005, CMIP6
Uses turbulence scheme (no dry conv), grav.wave drag
Time steps: dynamics 3.75 min leap frog; physics 30 min.; radiation 2.5 hrs
Filters: U,V in E-W and N-S direction (after every physics time step)
U,V in E-W direction near poles (after every dynamics time step)
sea level pressure (after every physics time step)
Preprocessor Options
#define STDHYB ! standard hybrid vertical coordinate
#define ATM_LAYERING L40 ! 40 layers, top at .1 mb
#define NEW_IO ! new I/O (netcdf) on
#define IRRIGATION_ON
#define MODIS_LAI
#define NEW_BCdalbsn
!---> generic tracers code start
#define TRAC_ADV_CPU ! timing index for tracer advection on
#define TRACERS_ON ! include tracers code
#define TRACERS_WATER ! wet deposition and water tracer
#define TRACERS_DRYDEP ! default dry deposition
#define TRDIAG_WETDEPO ! additional wet deposition diags for tracers
! OFF #define CALCULATE_LIGHTNING ! Calculate lightning flash rates when NOx is not needed
! OFF #define AUTOTUNE_LIGHTNING ! Automatically generate lightning tuning parameters (present-day only)
!<--- generic tracers code end
!---> OMA start
#define TRACERS_MINERALS ! include dust mineral tracers
#define TRACERS_DUST_Silt4 ! include 4th silt size class of dust
!<--- OMA end
! OFF #define NUDGE_ON ! nudge the meteorology
#define CACHED_SUBDD
End Preprocessor Options
Object modules:
! resolution-specific source codes
Atm144x90 ! horizontal resolution is 144x90 -> 2x2.5deg
AtmLayering ! vertical resolution
DIAG_RES_F ! diagnostics
FFT144 ! Fast Fourier Transform
IO_DRV ! new i/o
! GISS dynamics with gravity wave drag
ATMDYN MOMEN2ND ! atmospheric dynamics
QUS_DRV QUS3D ! advection of Q/tracers
STRATDYN STRAT_DIAG ! stratospheric dynamics (incl. gw drag)
#include "latlon_source_files"
#include "modelE4_source_files"
#include "static_ocn_source_files"
#include "tracer_shared_source_files"
#include "tracer_minerals_source_files"
TRDIAG ! new i/o
SUBDD
! flammability_drv flammability ! Olga's fire model
Components:
#include "E4_components_nc" /* without "Ent" */
tracers
Ent
Component Options:
OPTS_Ent = ONLINE=YES PS_MODEL=FBB PFT_MODEL=ENT /* needed for "Ent" only */
OPTS_dd2d = NC_IO=PNETCDF
Data input files:
#include "IC_144x90_input_files"
#include "static_ocn_2000_144x90_input_files"
RVR=RD_Fd.nc ! river direction file
NAMERVR=RD_Fd.names.txt ! named river outlets
#include "land144x90_input_files"
#include "rad_input_files"
#include "rad_144x90_input_files_CMIP6clim"
#include "dust_tracer_144x90_input_files"
#include "dry_depos_144x90_input_files"
MSU_wts=MSU_SSU_RSS_weights.txt ! MSU-diag
REG=REG2X2.5 ! special regions-diag
Label and Namelist: (next 2 lines)
E6TminF40clim2000 (climatological prescribed ocean atmospheric tracer model with OMA dust minerals only)
&&PARAMETERS
#include "static_ocn_params"
#include "sdragF40_params"
#include "gwdragF40_params"
! cond_scheme=2 ! newer conductance scheme (N. Kiang) ! not used with Ent
! The following two lines are only used when aerosol/radiation interactions are off
FS8OPX=1.,1.,1.,1.,1.5,1.5,1.,1.
FT8OPX=1.,1.,1.,1.,1.,1.,1.3,1.
! Increasing U00a decreases the high cloud cover; increasing U00b decreases net rad at TOA
U00a=0.625 ! above 850mb w/o MC region; tune this first to get 30-35% high clouds
U00b=1.00 ! below 850mb and MC regions; tune this last to get rad.balance
WMUI_multiplier = 2.
use_vmp=1
radius_multiplier=1.1
PTLISO=0. ! pressure(mb) above which radiation assumes isothermal layers
H2ObyCH4=1. ! if =1. activates stratospheric H2O generated by CH4 without interactive chemistry
KSOLAR=2 ! 2: use long annual mean file ; 1: use short monthly file
#include "atmCompos_2000_params"
!!!!!!!!!!!!!!!!!!!!!!!
! Please note that making o3_yr non-zero tells the model
! to override the transient chemistry tracer emissions'
! use of model year and use abs(o3_yr) instead!
!!!!!!!!!!!!!!!!!!!!!!!
#include "aerosol_OMA_params_CMIP6"
#include "mineral_params_model"
#include "common_tracer_params_CMIP6"
#include "chemistry_params_CMIP6"
#include "ch4_params_CMIP6_2000"
DTsrc=1800. ! cannot be changed after a run has been started
DT=225.
! parameters that control the Shapiro filter
DT_XUfilter=225. ! Shapiro filter on U in E-W direction; usually same as DT
DT_XVfilter=225. ! Shapiro filter on V in E-W direction; usually same as DT
DT_YVfilter=0. ! Shapiro filter on V in N-S direction
DT_YUfilter=0. ! Shapiro filter on U in N-S direction
NIsurf=2 ! surface interaction computed NIsurf times per source time step
NRAD=5 ! radiation computed NRAD times per source time step
#include "diag_params"
! save3dAOD=1 ! needed if 3D AOD (itAOD or ictAOD) SUBDDs are on and adiurn_dust=0
Nssw=2 ! until diurnal diags are fixed, Nssw has to be even
Ndisk=960 ! write fort.1.nc or fort.2.nc every NDISK source time step
&&END_PARAMETERS
&INPUTZ
YEARI=1999,MONTHI=12,DATEI=1,HOURI=0, ! pick IYEAR1=YEARI (default) or < YEARI
YEARE=1999,MONTHE=12,DATEE=2,HOURE=0, KDIAG=12*0,9,
ISTART=2,IRANDI=0, YEARE=1999,MONTHE=12,DATEE=1,HOURE=1,
/
|
diamonds %>%
ggplot(aes(x=cut, y= price, color = cut))+
geom_jitter(width = )+
geom_violin( alpha = .8, position=position_jitter(0.2))+
labs("Distribution of Price by Cut Type")
| /art.r | no_license | delabj/tidyTues | R | false | false | 186 | r | diamonds %>%
ggplot(aes(x=cut, y= price, color = cut))+
geom_jitter(width = )+
geom_violin( alpha = .8, position=position_jitter(0.2))+
labs("Distribution of Price by Cut Type")
|
options(scipen=100)
# Rscript coexpr_alternative_samplings.R 10
# Rscript coexpr_alternative_samplings.R 1000
script_name <- "coexpr_alternative_samplings.R"
startTime <- Sys.time()
cat("> START ",script_name, "\n")
SSHFS <- FALSE
buildData <- TRUE
setDir <- ifelse(SSHFS, "/media/electron", "") # needed to load the setting file...
require(foreach)
require(reshape2)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 90))
source("utils_fct.R")
nSampled =1000
nSampled <- commandArgs(trailingOnly = TRUE)
nSampled <- as.numeric(nSampled)
stopifnot(!is.na(nSampled))
outFolder <- file.path("COEXPR_ALTERNATIVE_SAMPLINGS", nSampled)
dir.create(outFolder, recursive=TRUE)
inFold <- file.path("CREATE_SAMPLE_ALTERNATIVE", nSampled)
stopifnot(dir.exists(inFold))
inFile <- file.path(inFold, "all_ds_sample_byTAD.Rdata")
stopifnot(file.exists(inFile))
all_ds_sample_data <- eval(parse(text = load(inFile)))
corrMeth <- "pearson"
script0_name <- "0_prepGeneData"
pipFolder <- file.path("..", "Yuanlong_Cancer_HiC_data_TAD_DA")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
all_ds <- names(all_ds_sample_data)
nDS <- length(all_ds)
cat("... found nDS = ", nDS, "\n")
ds = all_ds[2]
all_ds=all_ds[1]
all_ds_sampling_corr <- foreach(ds = all_ds ) %do% {
cat(paste0("... start DS = ", ds, "\n"))
hicds <- dirname(ds)
exprds <- basename(ds)
stopifnot(dir.exists(file.path(pipFolder, hicds)))
geneList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_geneList.Rdata")
stopifnot(file.exists(geneList_file))
geneList <- eval(parse(text = load(geneList_file)))
regionList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_regionList.Rdata")
stopifnot(file.exists(regionList_file))
regionList <- eval(parse(text = load(regionList_file)))
settingFile <- file.path(pipFolder, "PIPELINE", "INPUT_FILES", hicds, paste0("run_settings_", exprds, ".R"))
stopifnot(file.exists(settingFile))
source(settingFile)
sample1_file <- file.path(setDir, sample1_file)
sample2_file <- file.path(setDir, sample2_file)
stopifnot(file.exists(sample1_file))
stopifnot(file.exists(sample2_file))
cond1_ID <- eval(parse(text = load(sample1_file)))
cond2_ID <- eval(parse(text = load(sample2_file)))
qqnormDTfile <- file.path(pipOutFolder, hicds, exprds,script0_name, "rna_qqnorm_rnaseqDT.Rdata")
stopifnot(file.exists(qqnormDTfile))
qqnormDT <- eval(parse(text = load(qqnormDTfile)))
stopifnot(names(geneList) %in% rownames(qqnormDT))
stopifnot(setequal(colnames(qqnormDT), c(cond1_ID, cond2_ID)))
norm_rnaseqDT <- qqnormDT[names(geneList),] # !!! ENSURE THAT THE QQNORM IN THE SAME ORDER AS THE GENELIST !!!
stopifnot(rownames(norm_rnaseqDT) == names(geneList))
stopifnot(!duplicated(names(geneList)))
ds_sample_data <- all_ds_sample_data[[paste0(ds)]]
all_chr <- names(ds_sample_data)
chr = all_chr[7]
ds_all_corr_data <- foreach(chr = all_chr) %dopar% {
cat(paste0("... start DS = ", ds, " - ", chr, "\n"))
chr_sample_data <- ds_sample_data[[paste0(chr)]]
stopifnot(length(chr_sample_data) > 0)
i=1
chr_corr_data <- lapply(c(1:length(chr_sample_data)), function(i) {
sample_data <- chr_sample_data[[i]]
TADsize <- gsub(".+TADsOfSize_(.+)", "\\1", names(chr_sample_data)[i])
TADsize <- as.numeric(TADsize)
stopifnot(!is.na(TADsize))
sample_data <- Filter(function(x) !is.null(x), sample_data)
if(length(sample_data) == 0) return(NA)
sample_genes = sample_data[[1]]
sample_corr_data <- lapply(sample_data, function(sample_genes) {
stopifnot(length(sample_genes) == TADsize)
corr_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
sub_normDT <- norm_rnaseqDT[c(corr_genes),]
stopifnot(nrow(sub_normDT) == length(corr_genes))
stopifnot(cond1_ID %in% colnames(sub_normDT))
stopifnot(cond2_ID %in% colnames(sub_normDT))
sub_normDT_cond1 <- sub_normDT[,cond1_ID]
sub_normDT_cond2 <- sub_normDT[,cond2_ID]
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
meanCorr_all <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT,
sample_genes = corr_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
cormet = corrMeth
)
meanCorr_cond1 <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT_cond1,
sample_genes = corr_genes,
cormet = corrMeth
)
meanCorr_cond2 <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT_cond2,
sample_genes = corr_genes,
cormet = corrMeth
)
list(meanCorr_all=meanCorr_all,
meanCorr_cond1 = meanCorr_cond1,
meanCorr_cond2 = meanCorr_cond2)
}) # end-lapply over sample_data (=list of genes for the current sampling)
outFile <- file.path(outFolder, "sample_corr_data.Rdata")
save(sample_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
return(sample_corr_data)
}) # end-lapply over the different TAD size of the current chromo
outFile <- file.path(outFolder, "chr_corr_data.Rdata")
save(chr_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
names(chr_corr_data) <- names(chr_sample_data)
return(chr_corr_data)
} # end-foreach iterating over the chromo
names(ds_all_corr_data) <- all_chr
outFile <- file.path(outFolder, "ds_all_corr_data.Rdata")
save(ds_all_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
ds_all_corr_data
} #end-foreach iterating over the datasets
names(all_ds_sampling_corr) <- all_ds
outFile <- file.path(outFolder, "all_ds_sampling_corr.Rdata")
save(all_ds_sampling_corr, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
####################################################################################
####################################################################################3
####################################################################################3
txt <- paste0(startTime, "\n", Sys.time(), "\n")
cat(paste0(txt))
cat(paste0("*** DONE: ", script_name, "\n"))
#
# if(tad_data$nGenes > 0) {
#
# ########## => TAKING THE TADs AND SAMPLING ON BOTH SIDES OF THE TADs
# sample_genes <- tad_data$genes
# tad_genes <- tad_data$tad_genes
#
# stopifnot(! sample_genes %in% tad_genes)
# stopifnot(sample_genes %in% geneList)
#
# inTAD_genes <- names(geneList)[geneList %in% tad_genes] # the names used in the nrom_rnaseqDT
# outTAD_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
#
# nTotGenes <- length(inTAD_genes) + length(outTAD_genes)
#
# stopifnot(! inTAD_genes %in% outTAD_genes)
# stopifnot(! outTAD_genes %in% inTAD_genes)
# stopifnot(inTAD_genes %in% rownames(norm_rnaseqDT))
# stopifnot(outTAD_genes %in% rownames(norm_rnaseqDT))
#
# sub_normDT <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes),]
#
# stopifnot(nrow(sub_normDT) == nTotGenes)
# stopifnot(rownames(sub_normDT) == c(inTAD_genes, outTAD_genes))
# stopifnot(cond1_ID %in% colnames(sub_normDT))
# stopifnot(cond2_ID %in% colnames(sub_normDT))
#
# sub_normDT_cond1 <- sub_normDT[,cond1_ID]
# sub_normDT_cond2 <- sub_normDT[,cond2_ID]
#
# stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
# stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
# stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
# stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
#
#
#
# meanCorr_all <- get_meanCorr_value(
# exprMatrix = sub_normDT,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
# meanCorr_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1,
# inside_genes = inTAD_genes,
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
#
# meanCorr_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2,
# inside_genes = inTAD_genes,
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
#
#
# ########## => TAKING THE TADs AND SAMPLING ON THE RIGHT ONLY
# if(tad_data$nGenes_right > 0) {
#
# sample_genes_right <- tad_data$genes_right
# outTAD_genes_right <- names(geneList)[geneList %in% sample_genes_right] # the names used in the nrom_rnaseqDT
#
# stopifnot(! sample_genes_right %in% tad_genes)
#
#
# nTotGenes_right <- length(inTAD_genes) + length(outTAD_genes_right)
#
# stopifnot(! inTAD_genes %in% outTAD_genes_right)
# stopifnot(! outTAD_genes_right %in% inTAD_genes)
#
# stopifnot(outTAD_genes_right %in% rownames(norm_rnaseqDT))
#
# sub_normDT_right <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_right),]
#
# stopifnot(nrow(sub_normDT_right) == nTotGenes_right)
# stopifnot(rownames(sub_normDT_right) == c(inTAD_genes, outTAD_genes_right))
# stopifnot(cond1_ID %in% colnames(sub_normDT_right))
# stopifnot(cond2_ID %in% colnames(sub_normDT_right))
#
# sub_normDT_cond1_right <- sub_normDT_right[,cond1_ID]
# sub_normDT_cond2_right <- sub_normDT_right[,cond2_ID]
#
# stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond1_right))
# stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond2_right))
# stopifnot( ncol(sub_normDT_cond1_right) + ncol(sub_normDT_cond2_right) == ncol(sub_normDT_right))
# stopifnot( ncol(sub_normDT_cond1_right) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2_right) == length(cond2_ID))
#
#
# meanCorrRight_all <- get_meanCorr_value(
# exprMatrix = sub_normDT_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
# meanCorrRight_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
# meanCorrRight_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
#
# } else {
#
#
# meanCorrRight_all <- NA
# meanCorrRight_cond1 <- NA
# meanCorrRight_cond2 <- NA
#
# }
# if(tad_data$nGenes_left > 0) {
#
# sample_genes_left <- tad_data$genes_left
#
# stopifnot(! sample_genes_left %in% tad_genes)
#
# outTAD_genes_left <- names(geneList)[geneList %in% sample_genes_left] # the names used in the nrom_rnaseqDT
#
#
#
# stopifnot(! sample_genes_left %in% tad_genes)
#
#
# nTotGenes_left <- length(inTAD_genes) + length(outTAD_genes_left)
#
# stopifnot(! inTAD_genes %in% outTAD_genes_left)
# stopifnot(! outTAD_genes_left %in% inTAD_genes)
#
# stopifnot(outTAD_genes_left %in% rownames(norm_rnaseqDT))
#
# sub_normDT_left <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_left),]
#
# stopifnot(nrow(sub_normDT_left) == nTotGenes_left)
# stopifnot(rownames(sub_normDT_left) == c(inTAD_genes, outTAD_genes_left))
# stopifnot(cond1_ID %in% colnames(sub_normDT_left))
# stopifnot(cond2_ID %in% colnames(sub_normDT_left))
#
# sub_normDT_cond1_left <- sub_normDT_left[,cond1_ID]
# sub_normDT_cond2_left <- sub_normDT_left[,cond2_ID]
#
# stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond1_left))
# stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond2_left))
# stopifnot( ncol(sub_normDT_cond1_left) + ncol(sub_normDT_cond2_left) == ncol(sub_normDT_left))
# stopifnot( ncol(sub_normDT_cond1_left) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2_left) == length(cond2_ID))
#
#
# meanCorrLeft_all <- get_meanCorr_value(
# exprMatrix = sub_normDT_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
# meanCorrLeft_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
# meanCorrLeft_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
#
# } else {
#
#
# meanCorrLeft_all <- NA
# meanCorrLeft_cond1 <- NA
# meanCorrLeft_cond2 <- NA
#
# }
#
#
#
# } else {
# meanCorr_all <- NA
# meanCorr_cond1 <- NA
# meanCorr_cond2 <- NA
#
# meanCorrRight_all <- NA
# meanCorrRight_cond1 <- NA
# meanCorrRight_cond2 <- NA
#
#
# meanCorrLeft_all <- NA
# meanCorrLeft_cond1 <- NA
# meanCorrLeft_cond2 <- NA
#
# }
#
# list(
# nGenes = tad_data$nGenes,
# meanCorr = meanCorr_all,
# meanCorr_cond1 = meanCorr_cond1,
# meanCorr_cond2 = meanCorr_cond2,
# nGenes_right = tad_data$nGenes_right,
# meanCorr_right = meanCorrRight_all,
# meanCorr_cond1_right = meanCorrRight_cond1,
# meanCorr_cond2_right = meanCorrRight_cond2,
# nGenes_left = tad_data$nGenes_left,
# meanCorr_left = meanCorrLeft_all,
# meanCorr_cond1_left = meanCorrLeft_cond1,
# meanCorr_cond2_left = meanCorrLeft_cond2
# )
#
# } # end iterating over all TADs for the current dataset
# names(ds_all_corr_data) <- all_regs
# ds_all_corr_data
# } # end iterating over all DS
# names(all_ds_around_TADs_corr) <- all_ds
#
#
| /2_Yuanlong_Cancer_HiC_data_TAD_DA/coexpr_alternative_samplings.R | no_license | marzuf/2Yuanlong_Cancer_HiC_data_TAD_DA | R | false | false | 16,034 | r |
options(scipen=100)
# Rscript coexpr_alternative_samplings.R 10
# Rscript coexpr_alternative_samplings.R 1000
script_name <- "coexpr_alternative_samplings.R"
startTime <- Sys.time()
cat("> START ",script_name, "\n")
SSHFS <- FALSE
buildData <- TRUE
setDir <- ifelse(SSHFS, "/media/electron", "") # needed to load the setting file...
require(foreach)
require(reshape2)
require(doMC)
registerDoMC(ifelse(SSHFS, 2, 90))
source("utils_fct.R")
nSampled =1000
nSampled <- commandArgs(trailingOnly = TRUE)
nSampled <- as.numeric(nSampled)
stopifnot(!is.na(nSampled))
outFolder <- file.path("COEXPR_ALTERNATIVE_SAMPLINGS", nSampled)
dir.create(outFolder, recursive=TRUE)
inFold <- file.path("CREATE_SAMPLE_ALTERNATIVE", nSampled)
stopifnot(dir.exists(inFold))
inFile <- file.path(inFold, "all_ds_sample_byTAD.Rdata")
stopifnot(file.exists(inFile))
all_ds_sample_data <- eval(parse(text = load(inFile)))
corrMeth <- "pearson"
script0_name <- "0_prepGeneData"
pipFolder <- file.path("..", "Yuanlong_Cancer_HiC_data_TAD_DA")
stopifnot(dir.exists(pipFolder))
pipOutFolder <- file.path(pipFolder, "PIPELINE", "OUTPUT_FOLDER")
stopifnot(dir.exists(pipOutFolder))
all_ds <- names(all_ds_sample_data)
nDS <- length(all_ds)
cat("... found nDS = ", nDS, "\n")
ds = all_ds[2]
all_ds=all_ds[1]
all_ds_sampling_corr <- foreach(ds = all_ds ) %do% {
cat(paste0("... start DS = ", ds, "\n"))
hicds <- dirname(ds)
exprds <- basename(ds)
stopifnot(dir.exists(file.path(pipFolder, hicds)))
geneList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_geneList.Rdata")
stopifnot(file.exists(geneList_file))
geneList <- eval(parse(text = load(geneList_file)))
regionList_file <- file.path(pipOutFolder, hicds, exprds, script0_name, "pipeline_regionList.Rdata")
stopifnot(file.exists(regionList_file))
regionList <- eval(parse(text = load(regionList_file)))
settingFile <- file.path(pipFolder, "PIPELINE", "INPUT_FILES", hicds, paste0("run_settings_", exprds, ".R"))
stopifnot(file.exists(settingFile))
source(settingFile)
sample1_file <- file.path(setDir, sample1_file)
sample2_file <- file.path(setDir, sample2_file)
stopifnot(file.exists(sample1_file))
stopifnot(file.exists(sample2_file))
cond1_ID <- eval(parse(text = load(sample1_file)))
cond2_ID <- eval(parse(text = load(sample2_file)))
qqnormDTfile <- file.path(pipOutFolder, hicds, exprds,script0_name, "rna_qqnorm_rnaseqDT.Rdata")
stopifnot(file.exists(qqnormDTfile))
qqnormDT <- eval(parse(text = load(qqnormDTfile)))
stopifnot(names(geneList) %in% rownames(qqnormDT))
stopifnot(setequal(colnames(qqnormDT), c(cond1_ID, cond2_ID)))
norm_rnaseqDT <- qqnormDT[names(geneList),] # !!! ENSURE THAT THE QQNORM IN THE SAME ORDER AS THE GENELIST !!!
stopifnot(rownames(norm_rnaseqDT) == names(geneList))
stopifnot(!duplicated(names(geneList)))
ds_sample_data <- all_ds_sample_data[[paste0(ds)]]
all_chr <- names(ds_sample_data)
chr = all_chr[7]
ds_all_corr_data <- foreach(chr = all_chr) %dopar% {
cat(paste0("... start DS = ", ds, " - ", chr, "\n"))
chr_sample_data <- ds_sample_data[[paste0(chr)]]
stopifnot(length(chr_sample_data) > 0)
i=1
chr_corr_data <- lapply(c(1:length(chr_sample_data)), function(i) {
sample_data <- chr_sample_data[[i]]
TADsize <- gsub(".+TADsOfSize_(.+)", "\\1", names(chr_sample_data)[i])
TADsize <- as.numeric(TADsize)
stopifnot(!is.na(TADsize))
sample_data <- Filter(function(x) !is.null(x), sample_data)
if(length(sample_data) == 0) return(NA)
sample_genes = sample_data[[1]]
sample_corr_data <- lapply(sample_data, function(sample_genes) {
stopifnot(length(sample_genes) == TADsize)
corr_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
sub_normDT <- norm_rnaseqDT[c(corr_genes),]
stopifnot(nrow(sub_normDT) == length(corr_genes))
stopifnot(cond1_ID %in% colnames(sub_normDT))
stopifnot(cond2_ID %in% colnames(sub_normDT))
sub_normDT_cond1 <- sub_normDT[,cond1_ID]
sub_normDT_cond2 <- sub_normDT[,cond2_ID]
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
meanCorr_all <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT,
sample_genes = corr_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
cormet = corrMeth
)
meanCorr_cond1 <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT_cond1,
sample_genes = corr_genes,
cormet = corrMeth
)
meanCorr_cond2 <- get_meanCorr_value_alternative(
exprMatrix = sub_normDT_cond2,
sample_genes = corr_genes,
cormet = corrMeth
)
list(meanCorr_all=meanCorr_all,
meanCorr_cond1 = meanCorr_cond1,
meanCorr_cond2 = meanCorr_cond2)
}) # end-lapply over sample_data (=list of genes for the current sampling)
outFile <- file.path(outFolder, "sample_corr_data.Rdata")
save(sample_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
return(sample_corr_data)
}) # end-lapply over the different TAD size of the current chromo
outFile <- file.path(outFolder, "chr_corr_data.Rdata")
save(chr_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
names(chr_corr_data) <- names(chr_sample_data)
return(chr_corr_data)
} # end-foreach iterating over the chromo
names(ds_all_corr_data) <- all_chr
outFile <- file.path(outFolder, "ds_all_corr_data.Rdata")
save(ds_all_corr_data, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
ds_all_corr_data
} #end-foreach iterating over the datasets
names(all_ds_sampling_corr) <- all_ds
outFile <- file.path(outFolder, "all_ds_sampling_corr.Rdata")
save(all_ds_sampling_corr, file = outFile)
cat(paste0("... written: ", outFile, "\n"))
####################################################################################
####################################################################################3
####################################################################################3
txt <- paste0(startTime, "\n", Sys.time(), "\n")
cat(paste0(txt))
cat(paste0("*** DONE: ", script_name, "\n"))
#
# if(tad_data$nGenes > 0) {
#
# ########## => TAKING THE TADs AND SAMPLING ON BOTH SIDES OF THE TADs
# sample_genes <- tad_data$genes
# tad_genes <- tad_data$tad_genes
#
# stopifnot(! sample_genes %in% tad_genes)
# stopifnot(sample_genes %in% geneList)
#
# inTAD_genes <- names(geneList)[geneList %in% tad_genes] # the names used in the nrom_rnaseqDT
# outTAD_genes <- names(geneList)[geneList %in% sample_genes] # the names used in the nrom_rnaseqDT
#
# nTotGenes <- length(inTAD_genes) + length(outTAD_genes)
#
# stopifnot(! inTAD_genes %in% outTAD_genes)
# stopifnot(! outTAD_genes %in% inTAD_genes)
# stopifnot(inTAD_genes %in% rownames(norm_rnaseqDT))
# stopifnot(outTAD_genes %in% rownames(norm_rnaseqDT))
#
# sub_normDT <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes),]
#
# stopifnot(nrow(sub_normDT) == nTotGenes)
# stopifnot(rownames(sub_normDT) == c(inTAD_genes, outTAD_genes))
# stopifnot(cond1_ID %in% colnames(sub_normDT))
# stopifnot(cond2_ID %in% colnames(sub_normDT))
#
# sub_normDT_cond1 <- sub_normDT[,cond1_ID]
# sub_normDT_cond2 <- sub_normDT[,cond2_ID]
#
# stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond1))
# stopifnot(nrow(sub_normDT) == nrow(sub_normDT_cond2))
# stopifnot( ncol(sub_normDT_cond1) + ncol(sub_normDT_cond2) == ncol(sub_normDT))
# stopifnot( ncol(sub_normDT_cond1) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2) == length(cond2_ID))
#
#
#
# meanCorr_all <- get_meanCorr_value(
# exprMatrix = sub_normDT,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
# meanCorr_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1,
# inside_genes = inTAD_genes,
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
#
# meanCorr_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2,
# inside_genes = inTAD_genes,
# outside_genes = outTAD_genes,
# cormet = corrMeth
# )
#
#
#
# ########## => TAKING THE TADs AND SAMPLING ON THE RIGHT ONLY
# if(tad_data$nGenes_right > 0) {
#
# sample_genes_right <- tad_data$genes_right
# outTAD_genes_right <- names(geneList)[geneList %in% sample_genes_right] # the names used in the nrom_rnaseqDT
#
# stopifnot(! sample_genes_right %in% tad_genes)
#
#
# nTotGenes_right <- length(inTAD_genes) + length(outTAD_genes_right)
#
# stopifnot(! inTAD_genes %in% outTAD_genes_right)
# stopifnot(! outTAD_genes_right %in% inTAD_genes)
#
# stopifnot(outTAD_genes_right %in% rownames(norm_rnaseqDT))
#
# sub_normDT_right <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_right),]
#
# stopifnot(nrow(sub_normDT_right) == nTotGenes_right)
# stopifnot(rownames(sub_normDT_right) == c(inTAD_genes, outTAD_genes_right))
# stopifnot(cond1_ID %in% colnames(sub_normDT_right))
# stopifnot(cond2_ID %in% colnames(sub_normDT_right))
#
# sub_normDT_cond1_right <- sub_normDT_right[,cond1_ID]
# sub_normDT_cond2_right <- sub_normDT_right[,cond2_ID]
#
# stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond1_right))
# stopifnot(nrow(sub_normDT_right) == nrow(sub_normDT_cond2_right))
# stopifnot( ncol(sub_normDT_cond1_right) + ncol(sub_normDT_cond2_right) == ncol(sub_normDT_right))
# stopifnot( ncol(sub_normDT_cond1_right) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2_right) == length(cond2_ID))
#
#
# meanCorrRight_all <- get_meanCorr_value(
# exprMatrix = sub_normDT_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
# meanCorrRight_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
# meanCorrRight_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2_right,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_right,
# cormet = corrMeth
# )
#
#
# } else {
#
#
# meanCorrRight_all <- NA
# meanCorrRight_cond1 <- NA
# meanCorrRight_cond2 <- NA
#
# }
# if(tad_data$nGenes_left > 0) {
#
# sample_genes_left <- tad_data$genes_left
#
# stopifnot(! sample_genes_left %in% tad_genes)
#
# outTAD_genes_left <- names(geneList)[geneList %in% sample_genes_left] # the names used in the nrom_rnaseqDT
#
#
#
# stopifnot(! sample_genes_left %in% tad_genes)
#
#
# nTotGenes_left <- length(inTAD_genes) + length(outTAD_genes_left)
#
# stopifnot(! inTAD_genes %in% outTAD_genes_left)
# stopifnot(! outTAD_genes_left %in% inTAD_genes)
#
# stopifnot(outTAD_genes_left %in% rownames(norm_rnaseqDT))
#
# sub_normDT_left <- norm_rnaseqDT[c(inTAD_genes, outTAD_genes_left),]
#
# stopifnot(nrow(sub_normDT_left) == nTotGenes_left)
# stopifnot(rownames(sub_normDT_left) == c(inTAD_genes, outTAD_genes_left))
# stopifnot(cond1_ID %in% colnames(sub_normDT_left))
# stopifnot(cond2_ID %in% colnames(sub_normDT_left))
#
# sub_normDT_cond1_left <- sub_normDT_left[,cond1_ID]
# sub_normDT_cond2_left <- sub_normDT_left[,cond2_ID]
#
# stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond1_left))
# stopifnot(nrow(sub_normDT_left) == nrow(sub_normDT_cond2_left))
# stopifnot( ncol(sub_normDT_cond1_left) + ncol(sub_normDT_cond2_left) == ncol(sub_normDT_left))
# stopifnot( ncol(sub_normDT_cond1_left) == length(cond1_ID))
# stopifnot(ncol(sub_normDT_cond2_left) == length(cond2_ID))
#
#
# meanCorrLeft_all <- get_meanCorr_value(
# exprMatrix = sub_normDT_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
# meanCorrLeft_cond1 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond1_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
# meanCorrLeft_cond2 <- get_meanCorr_value(
# exprMatrix = sub_normDT_cond2_left,
# inside_genes = inTAD_genes, # inside_genes and outside_genes should be in rownames of exprMatrix
# outside_genes = outTAD_genes_left,
# cormet = corrMeth
# )
#
#
# } else {
#
#
# meanCorrLeft_all <- NA
# meanCorrLeft_cond1 <- NA
# meanCorrLeft_cond2 <- NA
#
# }
#
#
#
# } else {
# meanCorr_all <- NA
# meanCorr_cond1 <- NA
# meanCorr_cond2 <- NA
#
# meanCorrRight_all <- NA
# meanCorrRight_cond1 <- NA
# meanCorrRight_cond2 <- NA
#
#
# meanCorrLeft_all <- NA
# meanCorrLeft_cond1 <- NA
# meanCorrLeft_cond2 <- NA
#
# }
#
# list(
# nGenes = tad_data$nGenes,
# meanCorr = meanCorr_all,
# meanCorr_cond1 = meanCorr_cond1,
# meanCorr_cond2 = meanCorr_cond2,
# nGenes_right = tad_data$nGenes_right,
# meanCorr_right = meanCorrRight_all,
# meanCorr_cond1_right = meanCorrRight_cond1,
# meanCorr_cond2_right = meanCorrRight_cond2,
# nGenes_left = tad_data$nGenes_left,
# meanCorr_left = meanCorrLeft_all,
# meanCorr_cond1_left = meanCorrLeft_cond1,
# meanCorr_cond2_left = meanCorrLeft_cond2
# )
#
# } # end iterating over all TADs for the current dataset
# names(ds_all_corr_data) <- all_regs
# ds_all_corr_data
# } # end iterating over all DS
# names(all_ds_around_TADs_corr) <- all_ds
#
#
|
proj1 <- read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE)
proj1 <- subset(proj1, Date=="1/2/2007" | Date=="2/2/2007")
proj1$Global_active_power <- as.numeric(proj1$Global_active_power)
proj1$Voltage <- as.numeric(proj1$Voltage)
Sys.setlocale(category = "LC_TIME", locale = "C")
png(file = "plot4.png",width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=" ")
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Voltage,type="l",ylab="Global Active Power (kilowatts)",xlab="datetime")
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_1,type="l",ylab="Energy sub metering",xlab=" ")
lines(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_2, type="l",col="red")
lines(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_3, type="l",col="blue")
legend("topright",bty="n",lwd=1,col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime")
dev.off() | /figure/plot4.R | no_license | MariaLuisaSantaMaria/ExData_Plotting1 | R | false | false | 1,383 | r | proj1 <- read.table("household_power_consumption.txt",header=TRUE,sep=";",stringsAsFactors=FALSE)
proj1 <- subset(proj1, Date=="1/2/2007" | Date=="2/2/2007")
proj1$Global_active_power <- as.numeric(proj1$Global_active_power)
proj1$Voltage <- as.numeric(proj1$Voltage)
Sys.setlocale(category = "LC_TIME", locale = "C")
png(file = "plot4.png",width = 480, height = 480, units = "px")
par(mfrow=c(2,2))
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Global_active_power,type="l",ylab="Global Active Power (kilowatts)",xlab=" ")
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Voltage,type="l",ylab="Global Active Power (kilowatts)",xlab="datetime")
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_1,type="l",ylab="Energy sub metering",xlab=" ")
lines(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_2, type="l",col="red")
lines(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Sub_metering_3, type="l",col="blue")
legend("topright",bty="n",lwd=1,col=c("black","red","blue"),legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(strptime(paste(proj1$Date, proj1$Time), format = "%d/%m/%Y %H:%M:%S"),proj1$Global_reactive_power,type="l",ylab="Global_reactive_power",xlab="datetime")
dev.off() |
#' @name summary.mWaveD
#' @title Summary Output for the mWaveD object
#'
#' @param object A mWaveD object which is a list containing all the information for a multichannel
#' deconvolution analysis produced by the \code{\link{multiWaveD}} function.
#' @param ... Arguments to be passed to methods.
#' @description Gives some numerical summaries of a \code{mWaveD} object.
#' @return Text output giving summary information of the input and output analysis including, \itemize{
#' \item Degree of Meyer wavelet used in the analysis.
#' \item Number of observations, within each channel and number of channels present.
#' \item Resolution levels used (j0 to j1)
#' \item Blur type assumed in the analysis (direct, smooth or box.car)
#' \item Matrix summarising the noise levels in each channel (and Fourier decay information for the smooth case)
#' \item Summaries of the severity of the thresholding applied amongst the resolutions.
#' }
#' @seealso \code{\link{multiWaveD}}
#'
#' @examples
#' library(mwaved)
#' # Simulate the multichannel doppler signal.
#' m <- 3
#' n <- 2^10
#' t <- (1:n)/n
#' signal <- makeDoppler(n)
#' # Create multichannel version with smooth blur
#' shape <- seq(from = 0.5, to = 1, length = m)
#' scale <- rep(0.25, m)
#' G <- gammaBlur(n, shape, scale)
#' X <- blurSignal(signal, G)
#' # Add noise with custom signal to noise ratio
#' SNR <- c(10,15,20)
#' E <- multiNoise(n, sigma = sigmaSNR(X, SNR), alpha = c(0.5, 0.75, 1))
#' # Create noisy & blurred multichannel signal
#' Y <- X + E
#' mWaveDObject <- multiWaveD(Y, G)
#' summary(mWaveDObject)
#' @export
summary.mWaveD <- function(object, ...){
n <- length(object$estimate)
m <- dim(object$signal)[2]
cat("Degree of Meyer wavelet =", object$degree, " , Coarse resolution level j0 =", object$j0)
cat("\n")
cat("Sample size per channel = ", n, ", Maximum possible resolution level = ", log2(n) - 1, ".", sep = '')
cat("\n\n")
cat("Number of channels: m =", m,"\n")
cat('Detected Blur Type:',detectBlur(object$G), '\n\n')
cat('Resolution selection method: ',object$resolution,'\n\n')
cat("Estimated Channel information:\n\n")
if (object$blurDetected == "direct" && object$resolution == "smooth") {
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3), object$blurInfo$freq, rep(object$j1, m))
colnames(mat) <- c("Sigma.hat", "Alpha", "Fourier number cutoff", "Highest resolution")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
} else {
# If Smooth blur is used, display the matrix of values
if (object$resolution == "smooth"){
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3), object$blurInfo$freq, object$blurInfo$maxLevels)
colnames(mat) <- c("Sigma.hat", "Alpha", "Fourier number cutoff", "Highest resolution")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
cat("\n")
cat("Estimated best channel = Channel", object$blurInfo$bestChannel)
} else {
if (object$resolution == "block"){
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3))
colnames(mat) <- c("Sigma.hat", "Alpha")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
} else {
warning('Unrecognised resolution selection method.')
}
}
}
cat("\n\n")
cat("mWaveD optimal finest resolution level j1 =", object$j1)
cat("\n\n")
cat("Thresholding method:", object$shrinkType, " Tuning parameter: eta =", object$eta,'\n\n')
threshMatrix <- cbind(round(object$levelMax,4), round(object$thresh,4), object$percent)
rownames(threshMatrix) <- paste("Level", object$j0:object$j1,":" )
colnames(threshMatrix) <- c("Max|w|", "Threshold", "% Shrinkage" )
print(threshMatrix, ...)
}
#' @name plot.waveletCoef
#' @title Multi-Resolution Analysis plot of wavelet coefficients
#'
#' @description Plots the wavelet coefficient object in the multiresolution analysis
#'
#' @param x A list of class waveletCoef.
#' @param y An optional numeric vector of trimmed wavelet coefficients to be overlayed on top of the plot for comparison with the \code{x} wavelet coefficients.
#' @param labels Optional character vector with two elements to give name labels to \code{x} and \code{y} respectively.
#' @param ... Arguments to be passed to methods.
#' @param lowest Specifies the coarsest resolution to display in the Multi-resolution plot.
#' @param highest Specifies the finest resolution to display in the Multi-resolution plot.
#' @param scaling A numeric value that acts as a graphical scaling parameter to rescale the wavelet coefficients in the plot. A larger scaling value will reduce the size of the coefficients in the plot.
#' @param ggplot A logical value to specify if the user wants to use base graphics (FALSE) or ggplot2 graphics (TRUE).
#'
#' @seealso \code{\link{multiCoef}} for generating a list of class `waveletCoef`
#'
#' @export
plot.waveletCoef <- function(x, y = NULL, labels = NULL, ..., lowest = NULL, highest = NULL, scaling = 1, ggplot = TRUE){
stopifnot(class(x) == "waveletCoef")
if (!is.null(y) && class(y) != "waveletCoef") {
stop('y must be a waveletCoef object')
}
n <- length(x$coef)
J <- floor(log2(n))
fine <- ceiling(J) - 1
# Check resolution ranges
if (is.null(lowest)) {
lowest <- x$j0
} else if (lowest < x$j0 ) {
warning("lowest level shouldn't be smaller than j0 specified in wavelet coefficient object.")
}
# Check resolution levels aren't empty.
ind <- which.max(rev(x$coef) != 0)
lastres <- floor(log2(n - ind + 1)) - 1
if (is.null(highest)) {
highest <- lastres
} else if (highest > fine) {
warning(paste('highest level too high. Resetting highest level to the maximum at j1 = ', fine))
} else if (highest < lowest) {
warning('highest level must be higher than the lowest level.')
highest <- lowest
}
js <- rep(lowest:highest, 2^(lowest:highest))
ks <- unlist(lapply(lowest:highest, function(i) 0:(2^i-1)/2^i))
wi <- (2^lowest + 1):2^(highest + 1)
nw <- length(wi)
w <- x$coef[wi]*scaling
wf <- 2.05 * max(abs(w))/scaling
w <- w/wf
ws <- w + js
# check shrink input is a waveletCoef object
if (!is.null(y)) {
if (length(x$coef) != length(y$coef)) {
stop('length of y coefficients is different to the length of x coefficients')
}
j0Trim <- y$j0
if (j0Trim != x$j0) {
warning('y object has a different coarse resolution j0 than x object, interpret lower resolution levels with caution')
}
wShrink <- y$coef[wi]/wf
survived <- which(wShrink != 0)
kss <- ks[survived]
jss <- js[survived]
wss <- wShrink[survived] + jss
ns <- 2
nss <- length(survived)
} else {
ns <- 1
}
mraTitle <- 'MRA'
mraLabels <- c("Location", "Resolution Level")
if (!is.null(labels)) {
if (length(labels) != ns) {
warning('length of labels might not be long enough.')
labels = c('x', 'y')
}
} else {
labels = c('x', 'y')
}
if (ggplot) {
ggAvailable <- requireNamespace("ggplot2", quietly = TRUE)
if (!ggAvailable) {
ggplot = FALSE
}
}
if (ggplot) {
if (ns == 2) {
nData <- data.frame(w = c(ws,wss), js = c(js, jss), ks = c(ks, kss), col = rep(labels, c(nw, nss)))
mraPlot <- ggplot2::ggplot(nData) + ggplot2::geom_segment(ggplot2::aes(x = ks, xend = ks, y = js, yend = w, colour = col, size = col)) + ggplot2::labs(x = mraLabels[1], y = mraLabels[2]) + ggplot2::scale_size_discrete(range = c(1, 2))
# Fix legend
mraPlot <- mraPlot + ggplot2::theme(legend.position = "top", axis.text.y = ggplot2::element_text(angle = 90)) + ggplot2::guides(colour = ggplot2::guide_legend(title = mraTitle), size = ggplot2::guide_legend(title = mraTitle))
} else {
nData <- data.frame(w = ws, js = js, ks = ks)
mraPlot <- ggplot2::ggplot(nData) + ggplot2::geom_segment(ggplot2::aes(x = ks, xend = ks, y = js, yend = w), colour = 'red') + ggplot2::ggtitle(mraTitle)
}
mraPlot + ggplot2::labs(x = mraLabels[1], y = mraLabels[2]) + ggplot2::scale_y_continuous(breaks = lowest:highest)
} else {
buf <- 0.5
plot(0, type = "n", xlim = c(0,1), ylim = c(lowest - buf, highest + buf), yaxt = 'n', xlab = mraLabels[1], ylab = mraLabels[2], main = mraTitle)
axis(2, at = lowest:highest)
if (!is.null(y)) {
col <- 'red'
} else {
col <- 1
}
segments(ks, js, ks, ws, col = col)
abline(h = lowest:highest, v = axTicks(1), col="gray", lty=3)
if (!is.null(y)) {
segments(kss, jss, kss, wss, lwd = 2, col = 'blue')
}
}
}
#' @name plot.mWaveD
#' @title Plot Output for the mWaveD object
#'
#' @description Creates plot output that summarises the \code{mWaveD} object produced by the \code{\link{multiWaveD}} function.
#'
#' @param x A mWaveD object to be plotted (list created by \code{\link{multiWaveD}})
#' @param ... Arguments to be passed to methods.
#' @param which A numeric vector that specifies which plots to output. Default value is \code{1:4} which specifies that all four plots are to be displayed.
#' @param ask A logical value that specifies whether the user is \emph{ask}ed before each plot output.
#' @param singlePlot A logical value that controls whether all plots should appear on a single window. The plot window is resized depending on the value of \code{which}.
#' @param ggplot A logical value to specify if the user wants to use base graphics (FALSE) or ggplot2 graphics (TRUE).
#'
#' @details Four plots are output that summarise the multichannel input, a visualisation of the characteristics of the channels and the output estimate and a multi-resolution analysis plot.\itemize{
#' \item Plot 1: Multichannel input signal overlayed.
#' \item Plot 2: Estimated output signal using the mWaveD approach.
#' \item Plot 3: Plot of the log decay of Fourier coefficients against the log bounds (direct and smooth case) or the blockwise resolution levels against their limit (box car case)
#' \item Plot 4: Multi-resolution plot of the raw wavelet coefficients and the trimmed wavelet coefficients}
#' @references
#' Kulik, R., Sapatinas, T. and Wishart, J.R. (2014) \emph{Multichannel wavelet deconvolution with long range dependence. Upper bounds on the L_p risk} Appl. Comput. Harmon. Anal. (to appear in).
#' \url{http://dx.doi.org/10.1016/j.acha.2014.04.004}
#'
#' Wishart, J.R. (2014) \emph{Data-driven wavelet resolution choice in multichannel box-car deconvolution with long memory}, Proceedings of COMPSTAT 2014, Geneva Switzerland, Physica Verlag, Heidelberg (to appear)
#' @seealso \code{\link{multiWaveD}}
#' @importFrom grid grid.newpage pushViewport viewport grid.layout
#' @export
plot.mWaveD <- function(x, ..., which = 1L:4L, singlePlot = TRUE, ask = !singlePlot, ggplot = TRUE){
# Check if ggplot is available if requested
if (ggplot) {
ggAvailable <- requireNamespace("ggplot2", quietly = TRUE)
# Check optional dependency
if (ggAvailable) {
hsize <- 1
lsize <- 0.5
asize <- 0.5
# Initialise list
ggList <- list(NULL)
i <- 1
}
} else {
ggAvailable <- FALSE
}
show <- rep(FALSE, 4)
# Make sure which argument is numeric
if (!is.numeric(which)) {
stop('`which` argument must be a vector containing numerical elements')
}
# Only consider the integer bits between 1:4
which <- which[which %in% 1L:4L]
# Complain if there are no values in 1:4
if (length(which) == 0) {
stop('`which` argument must be a vector containing elements: 1, 2, 3 or 4')
}
show[which] <- TRUE
n <- length(x$estimate)
n2 <- n/2
m <- dim(x$signal)[2]
t <- (1:n)/n
blurInfo <- x$blurInfo
resolution <- x$resolution
j0 <- x$j0
j1 <- x$j1
estimateTitle <- 'mWaveD estimate'
signalTitle <- 'Input Signal'
fourierLabel <- 'Fourier number'
fourierTitle <- 'Kernel decay'
blockTitle <- 'Block wise resolution selection'
mraLabels <- c('raw',paste(x$shrinkType, ' thresholded', sep = ''))
mraTitle <- 'Multiresolution Analysis'
if (show[3L]) {
if (resolution != "block") {
xw = fourierWindow(n)
blur <- mirrorSpec(blurInfo$decay)
cut <- mirrorSpec(blurInfo$cutoff)
ylim <- c(min(blurInfo$cutoff[2, ]), 0)
if (x$blurDetected == 'direct') {
xlim = c(-n2, n2)
} else {
xbest <- max(blurInfo$freqCutoffs) - 1
ybest <- cut[n/2 + xbest, blurInfo$bestChannel]
xlim <- min(2*max(blurInfo$freqCutoffs), n/2)
xlim <- c(-xlim, xlim)
}
} else {
J <- floor(log2(n)) - 1
j <- j0:min(c(J, 2 * j1))
blkV <- blurInfo$blockVar[1:length(j)]
blkc <- blurInfo$blockCutoff[1:length(j)]
ylim <- range(c(blurInfo$blockVar, blurInfo$blockCutoff))
}
}
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
if (ggplot) {
if (show[1L]) {
signalData <- data.frame(Y = as.vector(x$signal), x = rep(t, m), Channel = rep(LETTERS[1:m], each = n))
signalPlot <- ggplot2::ggplot(signalData, ggplot2::aes_string(x = 'x', y = 'Y', colour = 'Channel')) + ggplot2::geom_line(size = lsize, alpha = asize) + ggplot2::ggtitle(signalTitle) + ggplot2::labs(x = '', y = '')
ggList[[i]] <- signalPlot
i <- i + 1
}
if (show[2L]) {
estimateData <- data.frame(Y = as.vector(x$estimate), x = t)
estimatePlot <- ggplot2::ggplot(estimateData, ggplot2::aes_string(x = 'x', y = 'Y')) + ggplot2::geom_line(size = lsize, alpha = asize) + ggplot2::ggtitle(estimateTitle) + ggplot2::labs(x = '', y = '')
ggList[[i]] <- estimatePlot
i <- i + 1
}
if (show[3L]) {
if (resolution != 'block') {
fourierData <- data.frame(Y = as.vector(blur), x = rep(xw,m), Ycut = as.vector(cut), Channel=rep(LETTERS[1:m],each=n), m = m)
resolutionPlot <- ggplot2::ggplot(fourierData) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Y', colour = 'Channel', group = 'Channel'),size = 1) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Ycut', colour = 'Channel'), linetype='dashed', size = 1) + ggplot2::ggtitle(fourierTitle) + ggplot2::labs(x = fourierLabel, y = '') + ggplot2::coord_cartesian(xlim = xlim)
if (resolution == 'smooth' && x$blurDetected != 'direct') {
rightLine <- ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = data.frame(x = rep(xbest,2), y = c(ybest, -Inf)))
leftLine <- ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = data.frame(x = rep(-xbest,2), y = c(ybest, -Inf)))
pointDots <- ggplot2::geom_point(ggplot2::aes_string(x = 'xbest', y = 'ybest'), shape = 1, size = 4, data = data.frame(xbest = c(-xbest, xbest), ybest = rep(ybest, 2)))
resolutionPlot <- resolutionPlot + leftLine + rightLine + pointDots
}
} else {
resolutionData <- data.frame(Y = c(blkV, blkc), x = rep(j,2), colour = rep(c("Resolution var.",'Resolution bounds'), each = length(j)) , Ycut = blkc)
bestV <- blkV[j == j1]
highlightData <- data.frame(x = c(j1, j1), y = c(ylim[1], bestV))
pointData <- data.frame(j1 = j1, bestV = bestV)
resolutionPlot <- ggplot2::ggplot(resolutionData) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Y', colour = 'colour', linetype = 'colour'), size = hsize) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = highlightData) + ggplot2::labs(x = 'j', y = '') + ggplot2::geom_point( ggplot2::aes_string(x = 'j1', y = 'bestV'), size = 4, shape = 1, data = pointData) + ggplot2::scale_color_discrete(labels= c('Resolution bounds', 'Resolution var.'), guide=ggplot2::guide_legend(title.position='left',title.theme = ggplot2::element_text(size=15,angle=0))) + ggplot2::scale_size(guide='none') + ggplot2::guides(colour = ggplot2::guide_legend( title='Blockwise resolution decay')) + ggplot2::theme(legend.position="top", legend.key = ggplot2::element_rect(fill = NA), axis.text.y = ggplot2::element_text(angle = 90)) + ggplot2::scale_linetype_manual(values=c(1,2), name="Blockwise resolution decay", labels=c('Resolution bounds', 'Resolution var.')) + ggplot2::scale_x_continuous(breaks = j)
}
ggList[[i]] <- resolutionPlot
i <- i + 1
}
if (show[4L]) {
mraPlot <- plot(x$coef, x$shrinkCoef, highest = j1, labels = c('Raw', paste('Thresholded (', x$shrinkType, ')', sep = '')), ggplot = TRUE)
ggList[[i]] <- mraPlot
}
# Plot them
if (singlePlot) {
nPlots <- sum(show)
if (nPlots < 4) {
cols <- 1
} else {
cols <- 2
}
layout <- matrix(seq(1, cols * ceiling(nPlots/cols)),
ncol = cols, nrow = ceiling(nPlots/cols),
byrow = TRUE)
if (nPlots == 1) {
print(ggList[[1]])
} else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
for (i in 1:nPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(ggList[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} else {
if (show[1L]) {
print(signalPlot)
}
if (show[2L]) {
print(estimatePlot)
}
if (show[3L]) {
print(resolutionPlot)
}
if (show[4L]) {
print(mraPlot)
}
}
} else {
if (singlePlot) {
plotDims <- switch(sum(show), c(1, 1), c(2, 1), c(3, 1), c(2, 2))
par(mfrow = plotDims)
} else {
par(mfrow = c(1,1))
}
if (show[1L]) {
matplot(t, x$signal, type = 'l', main = signalTitle, ylab = '', xlab = '', lty = 1, cex = 0.8)
grid()
}
if (show[2L]) {
# Plot mWaveD estimate
plot(t, x$estimate, type = 'l', main = estimateTitle, ylab = '', xlab = '', ...)
grid()
}
if (show[3L]) {
# Plot resolution analysis
if (resolution != 'block') {
iw = fourierWindow(n)
matplot(iw, blur, type = 'l', lty = 1, xlim = xlim, ylim = ylim, main = fourierTitle, xlab = fourierLabel, ylab = "")
matlines(iw, cut, lty = 2)
grid()
if (resolution == 'smooth' && x$blurDetected != "direct") {
points(xbest, ybest, col='blue')
points(-xbest, ybest, col = 'blue')
xbest <- rep(xbest, 2)
ybest <- c(ylim[1], ybest)
lines(xbest, ybest, lty = 'dotted')
lines(-xbest, ybest, lty = 'dotted')
}
} else {
rang = range(as.vector(c(blkV, blkc)))
buf = 0.1 * diff(rang)
ylims = c(rang[1] - buf, rang[2] + buf)
plot(j, blkV, type = 'b', xlab = 'j', ylab = '', main = blockTitle, ylim = ylims)
lines(j, blkc, col = 2)
points(j1, blurInfo$blockVar[j == j1], col='blue')
lines(c(j1, j1), c(ylims[1], blurInfo$blockVar[j == j1]), lty = 'dashed')
grid()
}
}
if (show[4L]) {
plot(x$coef, x$shrinkCoef, highest = j1, ..., ggplot = FALSE)
}
}
}
# Auxiliary function to obtain domain for plot as a func of Fourier freq
mirrorSpec <- function(x) {
if (is.matrix(x)){
n <- dim(x)[1]
x <- rbind(as.matrix(x[(n-1):2, ]), x)
} else {
n <- length(x)
x <- c(x[(n-1):2], x)
}
x
}
# Auxiliary function to obtain domain for plot as a func of Fourier freq
fourierWindow <- function(n) {
n2 <- floor(n/2)
iw = -(n2 - 1):n2
iw
}
#' @name mWaveDDemo
#' @title Interactive Demonstration
#' @description Interactive Demonstration
#' @importFrom shiny runApp
#' @export
mWaveDDemo <- function (){
runApp(system.file('mWaveDDemo', package = 'mwaved'))
} | /mwaved/R/utility-mwaved.R | no_license | ingted/R-Examples | R | false | false | 20,294 | r | #' @name summary.mWaveD
#' @title Summary Output for the mWaveD object
#'
#' @param object A mWaveD object which is a list containing all the information for a multichannel
#' deconvolution analysis produced by the \code{\link{multiWaveD}} function.
#' @param ... Arguments to be passed to methods.
#' @description Gives some numerical summaries of a \code{mWaveD} object.
#' @return Text output giving summary information of the input and output analysis including, \itemize{
#' \item Degree of Meyer wavelet used in the analysis.
#' \item Number of observations, within each channel and number of channels present.
#' \item Resolution levels used (j0 to j1)
#' \item Blur type assumed in the analysis (direct, smooth or box.car)
#' \item Matrix summarising the noise levels in each channel (and Fourier decay information for the smooth case)
#' \item Summaries of the severity of the thresholding applied amongst the resolutions.
#' }
#' @seealso \code{\link{multiWaveD}}
#'
#' @examples
#' library(mwaved)
#' # Simulate the multichannel doppler signal.
#' m <- 3
#' n <- 2^10
#' t <- (1:n)/n
#' signal <- makeDoppler(n)
#' # Create multichannel version with smooth blur
#' shape <- seq(from = 0.5, to = 1, length = m)
#' scale <- rep(0.25, m)
#' G <- gammaBlur(n, shape, scale)
#' X <- blurSignal(signal, G)
#' # Add noise with custom signal to noise ratio
#' SNR <- c(10,15,20)
#' E <- multiNoise(n, sigma = sigmaSNR(X, SNR), alpha = c(0.5, 0.75, 1))
#' # Create noisy & blurred multichannel signal
#' Y <- X + E
#' mWaveDObject <- multiWaveD(Y, G)
#' summary(mWaveDObject)
#' @export
summary.mWaveD <- function(object, ...){
n <- length(object$estimate)
m <- dim(object$signal)[2]
cat("Degree of Meyer wavelet =", object$degree, " , Coarse resolution level j0 =", object$j0)
cat("\n")
cat("Sample size per channel = ", n, ", Maximum possible resolution level = ", log2(n) - 1, ".", sep = '')
cat("\n\n")
cat("Number of channels: m =", m,"\n")
cat('Detected Blur Type:',detectBlur(object$G), '\n\n')
cat('Resolution selection method: ',object$resolution,'\n\n')
cat("Estimated Channel information:\n\n")
if (object$blurDetected == "direct" && object$resolution == "smooth") {
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3), object$blurInfo$freq, rep(object$j1, m))
colnames(mat) <- c("Sigma.hat", "Alpha", "Fourier number cutoff", "Highest resolution")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
} else {
# If Smooth blur is used, display the matrix of values
if (object$resolution == "smooth"){
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3), object$blurInfo$freq, object$blurInfo$maxLevels)
colnames(mat) <- c("Sigma.hat", "Alpha", "Fourier number cutoff", "Highest resolution")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
cat("\n")
cat("Estimated best channel = Channel", object$blurInfo$bestChannel)
} else {
if (object$resolution == "block"){
mat <- cbind(round(object$sigma, 3), round(object$alpha, 3))
colnames(mat) <- c("Sigma.hat", "Alpha")
rownames(mat) <- paste("Channel ", 1:m,':', sep='')
print(mat, ...)
} else {
warning('Unrecognised resolution selection method.')
}
}
}
cat("\n\n")
cat("mWaveD optimal finest resolution level j1 =", object$j1)
cat("\n\n")
cat("Thresholding method:", object$shrinkType, " Tuning parameter: eta =", object$eta,'\n\n')
threshMatrix <- cbind(round(object$levelMax,4), round(object$thresh,4), object$percent)
rownames(threshMatrix) <- paste("Level", object$j0:object$j1,":" )
colnames(threshMatrix) <- c("Max|w|", "Threshold", "% Shrinkage" )
print(threshMatrix, ...)
}
#' @name plot.waveletCoef
#' @title Multi-Resolution Analysis plot of wavelet coefficients
#'
#' @description Plots the wavelet coefficient object in the multiresolution analysis
#'
#' @param x A list of class waveletCoef.
#' @param y An optional numeric vector of trimmed wavelet coefficients to be overlayed on top of the plot for comparison with the \code{x} wavelet coefficients.
#' @param labels Optional character vector with two elements to give name labels to \code{x} and \code{y} respectively.
#' @param ... Arguments to be passed to methods.
#' @param lowest Specifies the coarsest resolution to display in the Multi-resolution plot.
#' @param highest Specifies the finest resolution to display in the Multi-resolution plot.
#' @param scaling A numeric value that acts as a graphical scaling parameter to rescale the wavelet coefficients in the plot. A larger scaling value will reduce the size of the coefficients in the plot.
#' @param ggplot A logical value to specify if the user wants to use base graphics (FALSE) or ggplot2 graphics (TRUE).
#'
#' @seealso \code{\link{multiCoef}} for generating a list of class `waveletCoef`
#'
#' @export
plot.waveletCoef <- function(x, y = NULL, labels = NULL, ..., lowest = NULL, highest = NULL, scaling = 1, ggplot = TRUE){
stopifnot(class(x) == "waveletCoef")
if (!is.null(y) && class(y) != "waveletCoef") {
stop('y must be a waveletCoef object')
}
n <- length(x$coef)
J <- floor(log2(n))
fine <- ceiling(J) - 1
# Check resolution ranges
if (is.null(lowest)) {
lowest <- x$j0
} else if (lowest < x$j0 ) {
warning("lowest level shouldn't be smaller than j0 specified in wavelet coefficient object.")
}
# Check resolution levels aren't empty.
ind <- which.max(rev(x$coef) != 0)
lastres <- floor(log2(n - ind + 1)) - 1
if (is.null(highest)) {
highest <- lastres
} else if (highest > fine) {
warning(paste('highest level too high. Resetting highest level to the maximum at j1 = ', fine))
} else if (highest < lowest) {
warning('highest level must be higher than the lowest level.')
highest <- lowest
}
js <- rep(lowest:highest, 2^(lowest:highest))
ks <- unlist(lapply(lowest:highest, function(i) 0:(2^i-1)/2^i))
wi <- (2^lowest + 1):2^(highest + 1)
nw <- length(wi)
w <- x$coef[wi]*scaling
wf <- 2.05 * max(abs(w))/scaling
w <- w/wf
ws <- w + js
# check shrink input is a waveletCoef object
if (!is.null(y)) {
if (length(x$coef) != length(y$coef)) {
stop('length of y coefficients is different to the length of x coefficients')
}
j0Trim <- y$j0
if (j0Trim != x$j0) {
warning('y object has a different coarse resolution j0 than x object, interpret lower resolution levels with caution')
}
wShrink <- y$coef[wi]/wf
survived <- which(wShrink != 0)
kss <- ks[survived]
jss <- js[survived]
wss <- wShrink[survived] + jss
ns <- 2
nss <- length(survived)
} else {
ns <- 1
}
mraTitle <- 'MRA'
mraLabels <- c("Location", "Resolution Level")
if (!is.null(labels)) {
if (length(labels) != ns) {
warning('length of labels might not be long enough.')
labels = c('x', 'y')
}
} else {
labels = c('x', 'y')
}
if (ggplot) {
ggAvailable <- requireNamespace("ggplot2", quietly = TRUE)
if (!ggAvailable) {
ggplot = FALSE
}
}
if (ggplot) {
if (ns == 2) {
nData <- data.frame(w = c(ws,wss), js = c(js, jss), ks = c(ks, kss), col = rep(labels, c(nw, nss)))
mraPlot <- ggplot2::ggplot(nData) + ggplot2::geom_segment(ggplot2::aes(x = ks, xend = ks, y = js, yend = w, colour = col, size = col)) + ggplot2::labs(x = mraLabels[1], y = mraLabels[2]) + ggplot2::scale_size_discrete(range = c(1, 2))
# Fix legend
mraPlot <- mraPlot + ggplot2::theme(legend.position = "top", axis.text.y = ggplot2::element_text(angle = 90)) + ggplot2::guides(colour = ggplot2::guide_legend(title = mraTitle), size = ggplot2::guide_legend(title = mraTitle))
} else {
nData <- data.frame(w = ws, js = js, ks = ks)
mraPlot <- ggplot2::ggplot(nData) + ggplot2::geom_segment(ggplot2::aes(x = ks, xend = ks, y = js, yend = w), colour = 'red') + ggplot2::ggtitle(mraTitle)
}
mraPlot + ggplot2::labs(x = mraLabels[1], y = mraLabels[2]) + ggplot2::scale_y_continuous(breaks = lowest:highest)
} else {
buf <- 0.5
plot(0, type = "n", xlim = c(0,1), ylim = c(lowest - buf, highest + buf), yaxt = 'n', xlab = mraLabels[1], ylab = mraLabels[2], main = mraTitle)
axis(2, at = lowest:highest)
if (!is.null(y)) {
col <- 'red'
} else {
col <- 1
}
segments(ks, js, ks, ws, col = col)
abline(h = lowest:highest, v = axTicks(1), col="gray", lty=3)
if (!is.null(y)) {
segments(kss, jss, kss, wss, lwd = 2, col = 'blue')
}
}
}
#' @name plot.mWaveD
#' @title Plot Output for the mWaveD object
#'
#' @description Creates plot output that summarises the \code{mWaveD} object produced by the \code{\link{multiWaveD}} function.
#'
#' @param x A mWaveD object to be plotted (list created by \code{\link{multiWaveD}})
#' @param ... Arguments to be passed to methods.
#' @param which A numeric vector that specifies which plots to output. Default value is \code{1:4} which specifies that all four plots are to be displayed.
#' @param ask A logical value that specifies whether the user is \emph{ask}ed before each plot output.
#' @param singlePlot A logical value that controls whether all plots should appear on a single window. The plot window is resized depending on the value of \code{which}.
#' @param ggplot A logical value to specify if the user wants to use base graphics (FALSE) or ggplot2 graphics (TRUE).
#'
#' @details Four plots are output that summarise the multichannel input, a visualisation of the characteristics of the channels and the output estimate and a multi-resolution analysis plot.\itemize{
#' \item Plot 1: Multichannel input signal overlayed.
#' \item Plot 2: Estimated output signal using the mWaveD approach.
#' \item Plot 3: Plot of the log decay of Fourier coefficients against the log bounds (direct and smooth case) or the blockwise resolution levels against their limit (box car case)
#' \item Plot 4: Multi-resolution plot of the raw wavelet coefficients and the trimmed wavelet coefficients}
#' @references
#' Kulik, R., Sapatinas, T. and Wishart, J.R. (2014) \emph{Multichannel wavelet deconvolution with long range dependence. Upper bounds on the L_p risk} Appl. Comput. Harmon. Anal. (to appear in).
#' \url{http://dx.doi.org/10.1016/j.acha.2014.04.004}
#'
#' Wishart, J.R. (2014) \emph{Data-driven wavelet resolution choice in multichannel box-car deconvolution with long memory}, Proceedings of COMPSTAT 2014, Geneva Switzerland, Physica Verlag, Heidelberg (to appear)
#' @seealso \code{\link{multiWaveD}}
#' @importFrom grid grid.newpage pushViewport viewport grid.layout
#' @export
plot.mWaveD <- function(x, ..., which = 1L:4L, singlePlot = TRUE, ask = !singlePlot, ggplot = TRUE){
# Check if ggplot is available if requested
if (ggplot) {
ggAvailable <- requireNamespace("ggplot2", quietly = TRUE)
# Check optional dependency
if (ggAvailable) {
hsize <- 1
lsize <- 0.5
asize <- 0.5
# Initialise list
ggList <- list(NULL)
i <- 1
}
} else {
ggAvailable <- FALSE
}
show <- rep(FALSE, 4)
# Make sure which argument is numeric
if (!is.numeric(which)) {
stop('`which` argument must be a vector containing numerical elements')
}
# Only consider the integer bits between 1:4
which <- which[which %in% 1L:4L]
# Complain if there are no values in 1:4
if (length(which) == 0) {
stop('`which` argument must be a vector containing elements: 1, 2, 3 or 4')
}
show[which] <- TRUE
n <- length(x$estimate)
n2 <- n/2
m <- dim(x$signal)[2]
t <- (1:n)/n
blurInfo <- x$blurInfo
resolution <- x$resolution
j0 <- x$j0
j1 <- x$j1
estimateTitle <- 'mWaveD estimate'
signalTitle <- 'Input Signal'
fourierLabel <- 'Fourier number'
fourierTitle <- 'Kernel decay'
blockTitle <- 'Block wise resolution selection'
mraLabels <- c('raw',paste(x$shrinkType, ' thresholded', sep = ''))
mraTitle <- 'Multiresolution Analysis'
if (show[3L]) {
if (resolution != "block") {
xw = fourierWindow(n)
blur <- mirrorSpec(blurInfo$decay)
cut <- mirrorSpec(blurInfo$cutoff)
ylim <- c(min(blurInfo$cutoff[2, ]), 0)
if (x$blurDetected == 'direct') {
xlim = c(-n2, n2)
} else {
xbest <- max(blurInfo$freqCutoffs) - 1
ybest <- cut[n/2 + xbest, blurInfo$bestChannel]
xlim <- min(2*max(blurInfo$freqCutoffs), n/2)
xlim <- c(-xlim, xlim)
}
} else {
J <- floor(log2(n)) - 1
j <- j0:min(c(J, 2 * j1))
blkV <- blurInfo$blockVar[1:length(j)]
blkc <- blurInfo$blockCutoff[1:length(j)]
ylim <- range(c(blurInfo$blockVar, blurInfo$blockCutoff))
}
}
if (ask) {
oask <- devAskNewPage(TRUE)
on.exit(devAskNewPage(oask))
}
if (ggplot) {
if (show[1L]) {
signalData <- data.frame(Y = as.vector(x$signal), x = rep(t, m), Channel = rep(LETTERS[1:m], each = n))
signalPlot <- ggplot2::ggplot(signalData, ggplot2::aes_string(x = 'x', y = 'Y', colour = 'Channel')) + ggplot2::geom_line(size = lsize, alpha = asize) + ggplot2::ggtitle(signalTitle) + ggplot2::labs(x = '', y = '')
ggList[[i]] <- signalPlot
i <- i + 1
}
if (show[2L]) {
estimateData <- data.frame(Y = as.vector(x$estimate), x = t)
estimatePlot <- ggplot2::ggplot(estimateData, ggplot2::aes_string(x = 'x', y = 'Y')) + ggplot2::geom_line(size = lsize, alpha = asize) + ggplot2::ggtitle(estimateTitle) + ggplot2::labs(x = '', y = '')
ggList[[i]] <- estimatePlot
i <- i + 1
}
if (show[3L]) {
if (resolution != 'block') {
fourierData <- data.frame(Y = as.vector(blur), x = rep(xw,m), Ycut = as.vector(cut), Channel=rep(LETTERS[1:m],each=n), m = m)
resolutionPlot <- ggplot2::ggplot(fourierData) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Y', colour = 'Channel', group = 'Channel'),size = 1) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Ycut', colour = 'Channel'), linetype='dashed', size = 1) + ggplot2::ggtitle(fourierTitle) + ggplot2::labs(x = fourierLabel, y = '') + ggplot2::coord_cartesian(xlim = xlim)
if (resolution == 'smooth' && x$blurDetected != 'direct') {
rightLine <- ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = data.frame(x = rep(xbest,2), y = c(ybest, -Inf)))
leftLine <- ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = data.frame(x = rep(-xbest,2), y = c(ybest, -Inf)))
pointDots <- ggplot2::geom_point(ggplot2::aes_string(x = 'xbest', y = 'ybest'), shape = 1, size = 4, data = data.frame(xbest = c(-xbest, xbest), ybest = rep(ybest, 2)))
resolutionPlot <- resolutionPlot + leftLine + rightLine + pointDots
}
} else {
resolutionData <- data.frame(Y = c(blkV, blkc), x = rep(j,2), colour = rep(c("Resolution var.",'Resolution bounds'), each = length(j)) , Ycut = blkc)
bestV <- blkV[j == j1]
highlightData <- data.frame(x = c(j1, j1), y = c(ylim[1], bestV))
pointData <- data.frame(j1 = j1, bestV = bestV)
resolutionPlot <- ggplot2::ggplot(resolutionData) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'Y', colour = 'colour', linetype = 'colour'), size = hsize) + ggplot2::geom_line(ggplot2::aes_string(x = 'x', y = 'y'), linetype = 'dotted', data = highlightData) + ggplot2::labs(x = 'j', y = '') + ggplot2::geom_point( ggplot2::aes_string(x = 'j1', y = 'bestV'), size = 4, shape = 1, data = pointData) + ggplot2::scale_color_discrete(labels= c('Resolution bounds', 'Resolution var.'), guide=ggplot2::guide_legend(title.position='left',title.theme = ggplot2::element_text(size=15,angle=0))) + ggplot2::scale_size(guide='none') + ggplot2::guides(colour = ggplot2::guide_legend( title='Blockwise resolution decay')) + ggplot2::theme(legend.position="top", legend.key = ggplot2::element_rect(fill = NA), axis.text.y = ggplot2::element_text(angle = 90)) + ggplot2::scale_linetype_manual(values=c(1,2), name="Blockwise resolution decay", labels=c('Resolution bounds', 'Resolution var.')) + ggplot2::scale_x_continuous(breaks = j)
}
ggList[[i]] <- resolutionPlot
i <- i + 1
}
if (show[4L]) {
mraPlot <- plot(x$coef, x$shrinkCoef, highest = j1, labels = c('Raw', paste('Thresholded (', x$shrinkType, ')', sep = '')), ggplot = TRUE)
ggList[[i]] <- mraPlot
}
# Plot them
if (singlePlot) {
nPlots <- sum(show)
if (nPlots < 4) {
cols <- 1
} else {
cols <- 2
}
layout <- matrix(seq(1, cols * ceiling(nPlots/cols)),
ncol = cols, nrow = ceiling(nPlots/cols),
byrow = TRUE)
if (nPlots == 1) {
print(ggList[[1]])
} else {
grid.newpage()
pushViewport(viewport(layout = grid.layout(nrow(layout), ncol(layout))))
for (i in 1:nPlots) {
matchidx <- as.data.frame(which(layout == i, arr.ind = TRUE))
print(ggList[[i]], vp = viewport(layout.pos.row = matchidx$row,
layout.pos.col = matchidx$col))
}
}
} else {
if (show[1L]) {
print(signalPlot)
}
if (show[2L]) {
print(estimatePlot)
}
if (show[3L]) {
print(resolutionPlot)
}
if (show[4L]) {
print(mraPlot)
}
}
} else {
if (singlePlot) {
plotDims <- switch(sum(show), c(1, 1), c(2, 1), c(3, 1), c(2, 2))
par(mfrow = plotDims)
} else {
par(mfrow = c(1,1))
}
if (show[1L]) {
matplot(t, x$signal, type = 'l', main = signalTitle, ylab = '', xlab = '', lty = 1, cex = 0.8)
grid()
}
if (show[2L]) {
# Plot mWaveD estimate
plot(t, x$estimate, type = 'l', main = estimateTitle, ylab = '', xlab = '', ...)
grid()
}
if (show[3L]) {
# Plot resolution analysis
if (resolution != 'block') {
iw = fourierWindow(n)
matplot(iw, blur, type = 'l', lty = 1, xlim = xlim, ylim = ylim, main = fourierTitle, xlab = fourierLabel, ylab = "")
matlines(iw, cut, lty = 2)
grid()
if (resolution == 'smooth' && x$blurDetected != "direct") {
points(xbest, ybest, col='blue')
points(-xbest, ybest, col = 'blue')
xbest <- rep(xbest, 2)
ybest <- c(ylim[1], ybest)
lines(xbest, ybest, lty = 'dotted')
lines(-xbest, ybest, lty = 'dotted')
}
} else {
rang = range(as.vector(c(blkV, blkc)))
buf = 0.1 * diff(rang)
ylims = c(rang[1] - buf, rang[2] + buf)
plot(j, blkV, type = 'b', xlab = 'j', ylab = '', main = blockTitle, ylim = ylims)
lines(j, blkc, col = 2)
points(j1, blurInfo$blockVar[j == j1], col='blue')
lines(c(j1, j1), c(ylims[1], blurInfo$blockVar[j == j1]), lty = 'dashed')
grid()
}
}
if (show[4L]) {
plot(x$coef, x$shrinkCoef, highest = j1, ..., ggplot = FALSE)
}
}
}
# Auxiliary function to obtain domain for plot as a func of Fourier freq
mirrorSpec <- function(x) {
if (is.matrix(x)){
n <- dim(x)[1]
x <- rbind(as.matrix(x[(n-1):2, ]), x)
} else {
n <- length(x)
x <- c(x[(n-1):2], x)
}
x
}
# Auxiliary function to obtain domain for plot as a func of Fourier freq
fourierWindow <- function(n) {
n2 <- floor(n/2)
iw = -(n2 - 1):n2
iw
}
#' @name mWaveDDemo
#' @title Interactive Demonstration
#' @description Interactive Demonstration
#' @importFrom shiny runApp
#' @export
mWaveDDemo <- function (){
runApp(system.file('mWaveDDemo', package = 'mwaved'))
} |
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{n.ap}
\alias{n.ap}
\title{Sample size for apparent prevalence}
\usage{
n.ap(p, precision, conf = 0.95)
}
\arguments{
\item{p}{expected proportion, scalar or vector of values}
\item{precision}{absolute precision, +/- proportion equivalent to
half the width of the desired confidence interval, scalar or vector of values,
note: at least one of p and precision must be a scalar}
\item{conf}{level of confidence required, default = 0.95 (scalar)}
}
\value{
a vector of sample sizes
}
\description{
Calculates sample size for estimating apparent
prevalence (simple proportion)
}
\examples{
# examples of n.ap
n.ap(0.5, 0.1)
n.ap(0.5, 0.1, conf=0.99)
n.ap(seq(0.1, 0.5, by = 0.1), 0.05)
n.ap(0.2, c(0.01, 0.02, 0.05, 0.1))
}
\keyword{methods}
| /man/n.ap.Rd | no_license | gustavossvet/RSurveillance-1 | R | false | false | 799 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{n.ap}
\alias{n.ap}
\title{Sample size for apparent prevalence}
\usage{
n.ap(p, precision, conf = 0.95)
}
\arguments{
\item{p}{expected proportion, scalar or vector of values}
\item{precision}{absolute precision, +/- proportion equivalent to
half the width of the desired confidence interval, scalar or vector of values,
note: at least one of p and precision must be a scalar}
\item{conf}{level of confidence required, default = 0.95 (scalar)}
}
\value{
a vector of sample sizes
}
\description{
Calculates sample size for estimating apparent
prevalence (simple proportion)
}
\examples{
# examples of n.ap
n.ap(0.5, 0.1)
n.ap(0.5, 0.1, conf=0.99)
n.ap(seq(0.1, 0.5, by = 0.1), 0.05)
n.ap(0.2, c(0.01, 0.02, 0.05, 0.1))
}
\keyword{methods}
|
#' calculate single factor model (CAPM) beta
#'
#' The single factor model or CAPM Beta is the beta of an asset to the variance
#' and covariance of an initial portfolio. Used to determine diversification potential.
#'
#' This function uses a linear intercept model to achieve the same results as
#' the symbolic model used by \code{\link{BetaCoVariance}}
#'
#' \deqn{\beta_{a,b}=\frac{CoV_{a,b}}{\sigma_{b}}=\frac{\sum((R_{a}-\bar{R_{a}})(R_{b}-\bar{R_{b}}))}{\sum(R_{b}-\bar{R_{b}})^{2}}}{beta
#' = cov(Ra,Rb)/var(R)}
#'
#' Ruppert(2004) reports that this equation will give the estimated slope of
#' the linear regression of \eqn{R_{a}}{Ra} on \eqn{R_{b}}{Rb} and that this
#' slope can be used to determine the risk premium or excess expected return
#' (see Eq. 7.9 and 7.10, p. 230-231).
#'
#' Two other functions apply the same notion of best fit to positive and
#' negative market returns, separately. The \code{CAPM.beta.bull} is a
#' regression for only positive market returns, which can be used to understand
#' the behavior of the asset or portfolio in positive or 'bull' markets.
#' Alternatively, \code{CAPM.beta.bear} provides the calculation on negative
#' market returns.
#'
#' The \code{TimingRatio} may help assess whether the manager is a good timer
#' of asset allocation decisions. The ratio, which is calculated as
#' \deqn{TimingRatio =\frac{\beta^{+}}{\beta^{-}}}{Timing Ratio = beta+/beta-}
#' is best when greater than one in a rising market and less than one in a
#' falling market.
#'
#' While the classical CAPM has been almost completely discredited by the
#' literature, it is an example of a simple single factor model,
#' comparing an asset to any arbitrary benchmark.
#'
#' @aliases CAPM.beta CAPM.beta.bull CAPM.beta.bear TimingRatio SFM.beta
#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of
#' asset returns
#' @param Rb return vector of the benchmark asset
#' @param Rf risk free rate, in same period as your returns
#' @author Peter Carl
#' @seealso \code{\link{BetaCoVariance}} \code{\link{CAPM.alpha}}
#' \code{\link{CAPM.utils}}
#' @references Sharpe, W.F. Capital Asset Prices: A theory of market
#' equilibrium under conditions of risk. \emph{Journal of finance}, vol 19,
#' 1964, 425-442. \cr Ruppert, David. \emph{Statistics and Finance, an
#' Introduction}. Springer. 2004. \cr Bacon, Carl. \emph{Practical portfolio
#' performance measurement and attribution}. Wiley. 2004. \cr
###keywords ts multivariate distribution models
#' @examples
#'
#' data(managers)
#' CAPM.alpha(managers[,1,drop=FALSE],
#' managers[,8,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1,drop=FALSE],
#' managers[,8,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.alpha(managers[,1:6],
#' managers[,8,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1:6],
#' managers[,8,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.alpha(managers[,1:6],
#' managers[,8:7,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1:6],
#' managers[,8:7,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.beta(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' CAPM.beta.bull(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' CAPM.beta.bear(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' TimingRatio(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' chart.Regression(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE],
#' fit="conditional",
#' main="Conditional Beta")
#'
#' @rdname CAPM.beta
#' @export CAPM.beta SFM.beta
CAPM.beta <- SFM.beta <- function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a CAPM beta.
# Inputs:
# Ra: vector of returns for the asset being tested
# Rb: vector of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a vector
# of the same length as x and y.
# Output:
#
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]]), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
CAPM.beta.bull <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a conditional CAPM beta for up markets.
# Inputs:
# Ra: time series of returns for the asset being tested
# Rb: time series of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a time series
# of the same length as x and y.
# Output:
# Bear market beta
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
# patch: .beta fails if subset contains no positive values, !sum(Rb > 0) is true
if (!sum(xRb > 0)) {
message("Function CAPM.beta.bull: Cannot perform lm. All Rb values are negative.")
return(NA)
}
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]], xRb[,n[2]] > 0), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Bull Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
CAPM.beta.bear <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a conditional CAPM beta for down markets
# Inputs:
# Ra: time series of returns for the asset being tested
# Rb: time series of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a time series
# of the same length as Ra and Rb.
# Output:
# Bear market beta
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
# patch: .beta fails if subset contains no negative values, !sum(Rb < 0) is true
if (!sum(xRb < 0)) {
message("Function CAPM.beta.bear: Cannot perform lm. All Rb values are positive.")
return(NA)
}
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]], xRb[,n[2]] < 0), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Bear Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
TimingRatio <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This function calculates the ratio of the two conditional CAPM betas (up and down).
beta.bull = CAPM.beta.bull(Ra, Rb, Rf = Rf)
beta.bear = CAPM.beta.bear(Ra, Rb, Rf = Rf)
result = beta.bull/beta.bear
if(length(result) ==1)
return(result)
else {
names = colnames(Rb)
rownames(result) = paste("Timing Ratio:", names)
return(result)
}
}
.beta <- function (xRa, xRb, subset) {
# subset is assumed to be a logical vector
if(missing(subset))
subset <- TRUE
# check columns
if(NCOL(xRa)!=1L || NCOL(xRb)!=1L || NCOL(subset)!=1L)
stop("all arguments must have only one column")
# merge, drop NA
merged <- as.data.frame(na.omit(cbind(xRa, xRb, subset)))
# return NA if no non-NA values
if(NROW(merged)==0)
return(NA)
# add column names and convert subset back to logical
colnames(merged) <- c("xRa","xRb","subset")
merged$subset <- as.logical(merged$subset)
# calculate beta
model.lm = lm(xRa ~ xRb, data=merged, subset=merged$subset)
beta = coef(model.lm)[[2]]
beta
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2015 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
| /R/CAPM.beta.R | no_license | nemochina2008/PerformanceAnalytics | R | false | false | 9,551 | r | #' calculate single factor model (CAPM) beta
#'
#' The single factor model or CAPM Beta is the beta of an asset to the variance
#' and covariance of an initial portfolio. Used to determine diversification potential.
#'
#' This function uses a linear intercept model to achieve the same results as
#' the symbolic model used by \code{\link{BetaCoVariance}}
#'
#' \deqn{\beta_{a,b}=\frac{CoV_{a,b}}{\sigma_{b}}=\frac{\sum((R_{a}-\bar{R_{a}})(R_{b}-\bar{R_{b}}))}{\sum(R_{b}-\bar{R_{b}})^{2}}}{beta
#' = cov(Ra,Rb)/var(R)}
#'
#' Ruppert(2004) reports that this equation will give the estimated slope of
#' the linear regression of \eqn{R_{a}}{Ra} on \eqn{R_{b}}{Rb} and that this
#' slope can be used to determine the risk premium or excess expected return
#' (see Eq. 7.9 and 7.10, p. 230-231).
#'
#' Two other functions apply the same notion of best fit to positive and
#' negative market returns, separately. The \code{CAPM.beta.bull} is a
#' regression for only positive market returns, which can be used to understand
#' the behavior of the asset or portfolio in positive or 'bull' markets.
#' Alternatively, \code{CAPM.beta.bear} provides the calculation on negative
#' market returns.
#'
#' The \code{TimingRatio} may help assess whether the manager is a good timer
#' of asset allocation decisions. The ratio, which is calculated as
#' \deqn{TimingRatio =\frac{\beta^{+}}{\beta^{-}}}{Timing Ratio = beta+/beta-}
#' is best when greater than one in a rising market and less than one in a
#' falling market.
#'
#' While the classical CAPM has been almost completely discredited by the
#' literature, it is an example of a simple single factor model,
#' comparing an asset to any arbitrary benchmark.
#'
#' @aliases CAPM.beta CAPM.beta.bull CAPM.beta.bear TimingRatio SFM.beta
#' @param Ra an xts, vector, matrix, data frame, timeSeries or zoo object of
#' asset returns
#' @param Rb return vector of the benchmark asset
#' @param Rf risk free rate, in same period as your returns
#' @author Peter Carl
#' @seealso \code{\link{BetaCoVariance}} \code{\link{CAPM.alpha}}
#' \code{\link{CAPM.utils}}
#' @references Sharpe, W.F. Capital Asset Prices: A theory of market
#' equilibrium under conditions of risk. \emph{Journal of finance}, vol 19,
#' 1964, 425-442. \cr Ruppert, David. \emph{Statistics and Finance, an
#' Introduction}. Springer. 2004. \cr Bacon, Carl. \emph{Practical portfolio
#' performance measurement and attribution}. Wiley. 2004. \cr
###keywords ts multivariate distribution models
#' @examples
#'
#' data(managers)
#' CAPM.alpha(managers[,1,drop=FALSE],
#' managers[,8,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1,drop=FALSE],
#' managers[,8,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.alpha(managers[,1:6],
#' managers[,8,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1:6],
#' managers[,8,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.alpha(managers[,1:6],
#' managers[,8:7,drop=FALSE],
#' Rf=.035/12)
#' CAPM.alpha(managers[,1:6],
#' managers[,8:7,drop=FALSE],
#' Rf = managers[,10,drop=FALSE])
#' CAPM.beta(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' CAPM.beta.bull(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' CAPM.beta.bear(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' TimingRatio(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE])
#' chart.Regression(managers[, "HAM2", drop=FALSE],
#' managers[, "SP500 TR", drop=FALSE],
#' Rf = managers[, "US 3m TR", drop=FALSE],
#' fit="conditional",
#' main="Conditional Beta")
#'
#' @rdname CAPM.beta
#' @export CAPM.beta SFM.beta
CAPM.beta <- SFM.beta <- function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a CAPM beta.
# Inputs:
# Ra: vector of returns for the asset being tested
# Rb: vector of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a vector
# of the same length as x and y.
# Output:
#
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]]), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
CAPM.beta.bull <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a conditional CAPM beta for up markets.
# Inputs:
# Ra: time series of returns for the asset being tested
# Rb: time series of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a time series
# of the same length as x and y.
# Output:
# Bear market beta
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
# patch: .beta fails if subset contains no positive values, !sum(Rb > 0) is true
if (!sum(xRb > 0)) {
message("Function CAPM.beta.bull: Cannot perform lm. All Rb values are negative.")
return(NA)
}
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]], xRb[,n[2]] > 0), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Bull Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
CAPM.beta.bear <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This is a wrapper for calculating a conditional CAPM beta for down markets
# Inputs:
# Ra: time series of returns for the asset being tested
# Rb: time series of returns for the benchmark the asset is being gauged against
# Rf: risk free rate in the same periodicity as the returns. May be a time series
# of the same length as Ra and Rb.
# Output:
# Bear market beta
# FUNCTION:
Ra = checkData(Ra)
Rb = checkData(Rb)
if(!is.null(dim(Rf)))
Rf = checkData(Rf)
Ra.ncols = NCOL(Ra)
Rb.ncols = NCOL(Rb)
xRa = Return.excess(Ra, Rf)
xRb = Return.excess(Rb, Rf)
pairs = expand.grid(1:Ra.ncols, 1:Rb.ncols)
# patch: .beta fails if subset contains no negative values, !sum(Rb < 0) is true
if (!sum(xRb < 0)) {
message("Function CAPM.beta.bear: Cannot perform lm. All Rb values are positive.")
return(NA)
}
result = apply(pairs, 1, FUN = function(n, xRa, xRb)
.beta(xRa[,n[1]], xRb[,n[2]], xRb[,n[2]] < 0), xRa = xRa, xRb = xRb)
if(length(result) ==1)
return(result)
else {
dim(result) = c(Ra.ncols, Rb.ncols)
colnames(result) = paste("Bear Beta:", colnames(Rb))
rownames(result) = colnames(Ra)
return(t(result))
}
}
#' @rdname CAPM.beta
#' @export
TimingRatio <-
function (Ra, Rb, Rf = 0)
{ # @author Peter Carl
# DESCRIPTION:
# This function calculates the ratio of the two conditional CAPM betas (up and down).
beta.bull = CAPM.beta.bull(Ra, Rb, Rf = Rf)
beta.bear = CAPM.beta.bear(Ra, Rb, Rf = Rf)
result = beta.bull/beta.bear
if(length(result) ==1)
return(result)
else {
names = colnames(Rb)
rownames(result) = paste("Timing Ratio:", names)
return(result)
}
}
.beta <- function (xRa, xRb, subset) {
# subset is assumed to be a logical vector
if(missing(subset))
subset <- TRUE
# check columns
if(NCOL(xRa)!=1L || NCOL(xRb)!=1L || NCOL(subset)!=1L)
stop("all arguments must have only one column")
# merge, drop NA
merged <- as.data.frame(na.omit(cbind(xRa, xRb, subset)))
# return NA if no non-NA values
if(NROW(merged)==0)
return(NA)
# add column names and convert subset back to logical
colnames(merged) <- c("xRa","xRb","subset")
merged$subset <- as.logical(merged$subset)
# calculate beta
model.lm = lm(xRa ~ xRb, data=merged, subset=merged$subset)
beta = coef(model.lm)[[2]]
beta
}
###############################################################################
# R (http://r-project.org/) Econometrics for Performance and Risk Analysis
#
# Copyright (c) 2004-2015 Peter Carl and Brian G. Peterson
#
# This R package is distributed under the terms of the GNU Public License (GPL)
# for full details see the file COPYING
#
# $Id$
#
###############################################################################
|
#' @importFrom ggplot2 ggplot aes_string theme_bw xlab ylab theme element_text
#' guides guide_colorbar scale_colour_gradient geom_point geom_hline
#' @importFrom stats quantile na.omit
dm_plotPrecision <- function(genewise_precision, mean_expression,
nr_features = NULL, common_precision = NULL, low_color = "royalblue2",
high_color = "red2", na_value_color = "red2"){
if(!is.null(nr_features)){
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision), nr_features = nr_features)
df_quant <- min(quantile(na.omit(df$nr_features), probs = 0.95), 30)
breaks <- seq(2, df_quant, ceiling(df_quant/10))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision",
colour = "nr_features" )) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(alpha = 0.7, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top") +
guides(colour = guide_colorbar(barwidth = 20, barheight = 0.5)) +
scale_colour_gradient(limits = c(2, max(breaks)),
breaks = breaks, low = low_color, high = high_color,
name = "Number of features", na.value = na_value_color)
}else{
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision")) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(size = 1, alpha = 0.4, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top")
}
if(!is.null(common_precision)){
ggp <- ggp + geom_hline(yintercept = log10(common_precision),
colour = "black", linetype = "dashed")
}
return(ggp)
}
| /R/dm_plotPrecision.R | no_license | gosianow/DRIMSeq_before_BioC3.6 | R | false | false | 2,258 | r | #' @importFrom ggplot2 ggplot aes_string theme_bw xlab ylab theme element_text
#' guides guide_colorbar scale_colour_gradient geom_point geom_hline
#' @importFrom stats quantile na.omit
dm_plotPrecision <- function(genewise_precision, mean_expression,
nr_features = NULL, common_precision = NULL, low_color = "royalblue2",
high_color = "red2", na_value_color = "red2"){
if(!is.null(nr_features)){
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision), nr_features = nr_features)
df_quant <- min(quantile(na.omit(df$nr_features), probs = 0.95), 30)
breaks <- seq(2, df_quant, ceiling(df_quant/10))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision",
colour = "nr_features" )) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(alpha = 0.7, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top") +
guides(colour = guide_colorbar(barwidth = 20, barheight = 0.5)) +
scale_colour_gradient(limits = c(2, max(breaks)),
breaks = breaks, low = low_color, high = high_color,
name = "Number of features", na.value = na_value_color)
}else{
df <- data.frame(mean_expression = log10(mean_expression + 1),
precision = log10(genewise_precision))
ggp <- ggplot(df, aes_string(x = "mean_expression", y = "precision")) +
theme_bw() +
xlab("Log10 of mean expression") +
ylab("Log10 of precision") +
geom_point(size = 1, alpha = 0.4, na.rm = TRUE) +
theme(axis.text = element_text(size=16),
axis.title = element_text(size=18, face="bold"),
legend.title = element_text(size=16, face="bold"),
legend.text = element_text(size = 14),
legend.position = "top")
}
if(!is.null(common_precision)){
ggp <- ggp + geom_hline(yintercept = log10(common_precision),
colour = "black", linetype = "dashed")
}
return(ggp)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webpower.R
\name{nuniroot}
\alias{nuniroot}
\title{Solve A Single Equation}
\usage{
nuniroot(f, interval, maxlength = 100)
}
\arguments{
\item{f}{Function for which the root is sought.}
\item{interval}{A vector containing the end-points of the interval to be searched for the root.}
\item{maxlength}{The number of vaulue points in the interval to be searched. It is 100 by default.}
}
\value{
A list with at least four components: root and f.root give the location of the root and the value of the function evaluated at that point.
iter and estim.prec give the number of iterations used and an approximate estimated precision for root.
(If the root occurs at one of the endpoints, the estimated precision is NA.)
}
\description{
The function searches in an interval for a root (i.e., zero) of the function f with respect to its first argument.
The argument interval is for the input of x, the corresponding outcome interval will be used as the interval to be searched in.
}
\examples{
f <- function(x) 1+x-0.5*x^2
interval <- c(-3,6)
nuniroot(f,interval)
}
| /man/nuniroot.Rd | no_license | johnnyzhz/WebPower | R | false | true | 1,137 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webpower.R
\name{nuniroot}
\alias{nuniroot}
\title{Solve A Single Equation}
\usage{
nuniroot(f, interval, maxlength = 100)
}
\arguments{
\item{f}{Function for which the root is sought.}
\item{interval}{A vector containing the end-points of the interval to be searched for the root.}
\item{maxlength}{The number of vaulue points in the interval to be searched. It is 100 by default.}
}
\value{
A list with at least four components: root and f.root give the location of the root and the value of the function evaluated at that point.
iter and estim.prec give the number of iterations used and an approximate estimated precision for root.
(If the root occurs at one of the endpoints, the estimated precision is NA.)
}
\description{
The function searches in an interval for a root (i.e., zero) of the function f with respect to its first argument.
The argument interval is for the input of x, the corresponding outcome interval will be used as the interval to be searched in.
}
\examples{
f <- function(x) 1+x-0.5*x^2
interval <- c(-3,6)
nuniroot(f,interval)
}
|
dataos <- reactive({
set.seed(2)
nodes <- data.frame(id = 1:15, label = paste("Label", 1:15),
group = sample(LETTERS[1:3], 15, replace = TRUE),
group2 = paste(sample(LETTERS[1:3], 15, replace = TRUE), sample(LETTERS[1:3], 15, replace = TRUE), sep= ","))
edges <- data.frame(id = 1:15, from = trunc(runif(15)*(15-1))+1,
to = trunc(runif(15)*(15-1))+1)
list(nodes = nodes, edges = edges)
})
output$network_proxy_options <- renderVisNetwork({
visNetwork(dataos()$nodes, dataos()$edges) %>% visEdges(arrows = "to") %>%
visLegend()
})
observe({
col <- paste0('rgba(200,200,200,', input$opahigh, ')')
visNetworkProxy("network_proxy_options") %>%
visOptions(highlightNearest = list(enabled = input$highlightNearest, hover = input$hover,
algorithm = input$algorithm, degree = input$deg, hideColor = col))
})
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(nodesIdSelection = list(enabled = input$nodesIdSelection, selected = 5))
})
observe({
if(input$selectedby){
col <- paste0('rgba(200,200,200,', input$opasel, ')')
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = list(variable = "group", hideColor = col, highlight = input$selectedbyHighlight))
}else{
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = NULL)
}
})
observe({
if(input$open_collapse){
visNetworkProxy("network_proxy_options") %>% visEvents(type = "on", doubleClick = "networkOpenCluster")
} else {
visNetworkProxy("network_proxy_options") %>% visEvents(type = "off", doubleClick = "networkOpenCluster")
}
})
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(collapse = list(enabled = input$collapse, fit = input$fit_collapse,
resetHighlight = input$reset_collapse,
keepCoord = input$keep_coord,
labelSuffix = input$labelSuffix))
})
output$code_proxy_options <- renderText({
'
# highlight
observe({
col <- paste0("rgba(200,200,200,", input$opahigh, ")")
visNetworkProxy("network_proxy_options") %>%
visOptions(highlightNearest = list(enabled = input$highlightNearest, hover = input$hover,
algorithm = input$algorithm, degree = input$deg, hideColor = col))
})
# nodesIdSelection
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(nodesIdSelection = list(enabled = input$nodesIdSelection, selected = 5))
})
# selectedBy
observe({
if(input$selectedby){
col <- paste0("rgba(200,200,200,", input$opasel, ")")
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = list(variable = "group", hideColor = col))
}else{
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = NULL)
}
})
# collapse
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(collapse = list(enabled = input$collapse, fit = input$fit_collapse,
resetHighlight = input$reset_collapse,
keepCoord = input$keep_coord,
labelSuffix = input$labelSuffix))
})
observe({
if(input$open_collapse){
visNetworkProxy("network_id") %>% visEvents(type = "on", doubleClick = "networkOpenCluster")
} else {
visNetworkProxy("network_id") %>% visEvents(type = "off", doubleClick = "networkOpenCluster")
}
})
'
}) | /inst/shiny/src/server/proxy_options_server.R | permissive | cran/visNetwork | R | false | false | 3,640 | r | dataos <- reactive({
set.seed(2)
nodes <- data.frame(id = 1:15, label = paste("Label", 1:15),
group = sample(LETTERS[1:3], 15, replace = TRUE),
group2 = paste(sample(LETTERS[1:3], 15, replace = TRUE), sample(LETTERS[1:3], 15, replace = TRUE), sep= ","))
edges <- data.frame(id = 1:15, from = trunc(runif(15)*(15-1))+1,
to = trunc(runif(15)*(15-1))+1)
list(nodes = nodes, edges = edges)
})
output$network_proxy_options <- renderVisNetwork({
visNetwork(dataos()$nodes, dataos()$edges) %>% visEdges(arrows = "to") %>%
visLegend()
})
observe({
col <- paste0('rgba(200,200,200,', input$opahigh, ')')
visNetworkProxy("network_proxy_options") %>%
visOptions(highlightNearest = list(enabled = input$highlightNearest, hover = input$hover,
algorithm = input$algorithm, degree = input$deg, hideColor = col))
})
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(nodesIdSelection = list(enabled = input$nodesIdSelection, selected = 5))
})
observe({
if(input$selectedby){
col <- paste0('rgba(200,200,200,', input$opasel, ')')
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = list(variable = "group", hideColor = col, highlight = input$selectedbyHighlight))
}else{
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = NULL)
}
})
observe({
if(input$open_collapse){
visNetworkProxy("network_proxy_options") %>% visEvents(type = "on", doubleClick = "networkOpenCluster")
} else {
visNetworkProxy("network_proxy_options") %>% visEvents(type = "off", doubleClick = "networkOpenCluster")
}
})
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(collapse = list(enabled = input$collapse, fit = input$fit_collapse,
resetHighlight = input$reset_collapse,
keepCoord = input$keep_coord,
labelSuffix = input$labelSuffix))
})
output$code_proxy_options <- renderText({
'
# highlight
observe({
col <- paste0("rgba(200,200,200,", input$opahigh, ")")
visNetworkProxy("network_proxy_options") %>%
visOptions(highlightNearest = list(enabled = input$highlightNearest, hover = input$hover,
algorithm = input$algorithm, degree = input$deg, hideColor = col))
})
# nodesIdSelection
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(nodesIdSelection = list(enabled = input$nodesIdSelection, selected = 5))
})
# selectedBy
observe({
if(input$selectedby){
col <- paste0("rgba(200,200,200,", input$opasel, ")")
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = list(variable = "group", hideColor = col))
}else{
visNetworkProxy("network_proxy_options") %>%
visOptions(selectedBy = NULL)
}
})
# collapse
observe({
visNetworkProxy("network_proxy_options") %>%
visOptions(collapse = list(enabled = input$collapse, fit = input$fit_collapse,
resetHighlight = input$reset_collapse,
keepCoord = input$keep_coord,
labelSuffix = input$labelSuffix))
})
observe({
if(input$open_collapse){
visNetworkProxy("network_id") %>% visEvents(type = "on", doubleClick = "networkOpenCluster")
} else {
visNetworkProxy("network_id") %>% visEvents(type = "off", doubleClick = "networkOpenCluster")
}
})
'
}) |
###########################
###########################
# #
# # MYRIAD RUN SCRIPT
# #
# # This is an R script that runs a single attack to failure accroding to a set of input parameters. It is designed to be run on
# # an HPC cluster, allowing each attack to run independtly when scheduled
# #
# # A question is how much time is spent loading the necessary packages and data as this may mean that several attacks should be combined
# # to reduce the overhead.
# #
# #
###########################
###########################
start_time <- Sys.time()
packages <- c("rlang", "dplyr", "tidyr", "purrr", "tibble", "forcats", "igraph", "devtools", "minpack.lm")#, "devtools", "minpack.lm" )#this may be can be removeed
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
sapply(packages, library, character.only = TRUE)
#install_github("JonnoB/PowerGridNetworking")
library(PowerGridNetworking)
#Set up file system to read the correct folders this switches between aws and windows mode
#creates the correct root depending on whether this is on the cloud or not
if(dir.exists("/home/jonno")){
#This folder is for use on my machine
project_folder <- "/home/jonno/Dropbox/IEEE_Networks"
basewd <- "/home/jonno"
analysis_parameter_file_path <- file.path(project_folder, "analysis_parameter_files")
HPC_script_path <- file.path(project_folder, "HPC_parameter_files")
load_data_files_path <- file.path(project_folder) #load the files
save_data_files_path <- file.path(project_folder) #save the files
}else{
#This is for the folder that is on the cloud
project_folder <- getwd()
basewd <- "/home/ucabbou"
analysis_parameter_file_path <- file.path(basewd, "analysis_parameter_files") #In myriad the parameter files are in a folder on the home dir
#not in the project folder like when it is done on my own comp
HPC_script_path <- file.path(basewd, "HPC_parameter_files")
load_data_files_path <- file.path(basewd) #load the files
save_data_files_path <- file.path(project_folder) #save the files
#If it is not on my computer then the variables need to be loaded from the system environment
#Get the task ID
task_id <- Sys.getenv("SGE_TASK_ID")
HPC_start_up_file <- Sys.getenv("HPC_start_up_file")
}
#Load some other useful functions
list.files(file.path(basewd, "Useful_PhD__R_Functions"), pattern = ".R", full.names = T) %>%
walk(~source(.x))
list.files(file.path(basewd, "Flow_Spring_System"), pattern = ".R", full.names = T) %>%
walk(~source(.x))
temp <- read.delim(file.path(HPC_script_path, HPC_start_up_file), sep =" ", header = TRUE) %>%
filter(compute_group_strain == task_id)
compute_group_value <- temp$compute_group_strain
#compute_group <- temp$compute_group #as the df variable is called compute group, this cannot be otherise the variable just has to equal itself and the grouping is not used
param_v <- temp$v
param_ec <- temp$ec
param_fract <- temp$fract
load_file <- temp$load_file
parameter_df_temp <- readRDS(file.path(analysis_parameter_file_path, load_file)) %>% #arrange(compute_group) %>%#temporary to get timings
filter(
compute_group_strain == compute_group_value, #this variable is inserted into the file from the program that runs it
v == param_v,
ec == param_ec,
fract == param_fract,
simulation_id ==1, #This value is set to 1 as there are 100 simulations all using an identical graph, we only need 1 of them.
)
print(paste("pararmeters loaded. Task number", task_id))
1:nrow(parameter_df_temp) %>%
walk(~{
##
##
##This block below gets the variables necessary to perform the calculation
##
##
Iter <- parameter_df_temp %>%
slice(.x)
scramble_network <- Iter$scramble_network #Needs to be added into the target orders file
graph_path <- file.path(load_data_files_path, Iter$graph_path)
Iter_embedding_path <- file.path(save_data_files_path, Iter$embeddings_path)
scramble_seed <- Iter$seed
ec <- Iter$ec
v <- Iter$v
fract <- Iter$fract
permutation <- Iter$permutation
deletion_seed <- Iter$deletion_seed
simulation_id <- Iter$simulation_id
##
##
## Check to see if the file already exists if it does skip this iteration. This allows easier restarts
##
##
if(!file.exists(Iter_embedding_path)){
##
##
##Once the variables have been taken from the parameter file the calculation can begin
##
##
g <- readRDS(file = graph_path) #read the target graph
#Proportionally load the network
g <- Proportional_Load(g, alpha = ec, PowerFlow = "power_flow",
Link.Limit = "edge_limit")
#For most of the simulations we will scramble the proportionally loaded networks,
#However when we are calculating the limits of 1 and Inf as well as the proportional line for the test cases
#scrambling is not required. As a result these lines of code make the overall script more flexible.
if(scramble_network){
#scramble the excess capaacity
edge_order_df <- create_scrambled_edges(g, scramble_seed, fract = fract)
g <- g %>%
set.edge.attribute(., "edge_limit", value = edge_order_df$edge_limit)
}
common_time <- 0.01
common_Iter <- 20000
common_tol <- 1e-10
common_mass <- 1
#Sets up the graph so that all the embedding stuff can be calculated without problem
current_graph <- g %>%
set.edge.attribute(. , "distance", value = 1) %>%
set.edge.attribute(., "Area", value = 1) %>%
calc_spring_youngs_modulus(., "power_flow", "edge_limit", minimum_value = 100, stretch_range = 1000) %>%
calc_spring_constant(., E ="E", A = "Area", distance = "distance") %>%
normalise_dc_load(.,
generation = "generation",
demand = "demand",
net_generation = "net_generation",
capacity = "edge_limit",
edge_name = "edge_name",
node_name = "name",
power_flow = "power_flow")
print("Full graph complete")
List_of_BiConComps <- create_balanced_blocks(current_graph,
force = "net_generation",
flow = "power_flow")
#find the largest component and use that as the origin block
giant_componant <-List_of_BiConComps %>% map_dbl(~vcount(.x)) %>% which.max()
print("Giant component found")
#use the largest block to set the simulation parameters k and m.
#k needs to be sufficiently stretched to allow enough topology variation. otherwise all that happens is a
#surface angled in the direct of net power flow. Which is interesting but not that interesting
OriginBlock_complete <- Find_network_balance(g = List_of_BiConComps[[giant_componant]],
force ="net_generation",
flow = "power_flow",
distance = "distance",
capacity = "edge_limit",
edge_name = "edge_name",
tstep = common_time,
tol = common_tol,
maxIter = common_Iter,
mass = common_mass,
verbose = FALSE)
print("Origin block complete")
#Calculate the height embeddings using the Orgin block as a base
height_embeddings_df <- Create_stabilised_blocks(g = current_graph,
OriginBlock = OriginBlock_complete,
OriginBlock_number = giant_componant,
force ="net_generation",
flow = "power_flow",
distance = "distance",
capacity = "edge_limit",
edge_name = "edge_name",
tstep = common_time,
tol = common_tol,
maxIter = common_Iter,
mass = common_mass,
verbose = FALSE)
print("Height embeddings complete")
#Extract edge tension and strain from the network
tension_strain_embeddings <- calc_tension_strain(g = current_graph,
height_embeddings_df,
distance = "distance",
capacity = "edge_limit",
flow = "power_flow",
edge_name = "edge_name",
k = "k")
print("Strain and Tension embeddings complete")
embeddings_data <- list(node_embeddings = height_embeddings_df, edge_embeddings = tension_strain_embeddings)
#The structure is generated as needed and so any new paths can just be created at this point.
#There is very little overhead in doing it this way
if(!file.exists(dirname(Iter_embedding_path))){ dir.create(dirname(Iter_embedding_path), recursive = T) }
print("Saving file")
saveRDS(embeddings_data, file = Iter_embedding_path)
#After the simulation and its summary are saved to the drive the next in the compute group is calculated
}
return(NULL) #dump everything when the loop finishes. This is an attempt to retain memory and speed up parallel processing...
#I don't know if it works
})
stop_time <- Sys.time()
print(stop_time-start_time)
#Once all the simulations in the compute group have been saved the script is complete
#######################
#######################
##
##
##END
##
##
########################
######################## | /HPC_files/HPC_strain_script.R | permissive | JonnoB/setse_and_network_robustness | R | false | false | 10,772 | r |
###########################
###########################
# #
# # MYRIAD RUN SCRIPT
# #
# # This is an R script that runs a single attack to failure accroding to a set of input parameters. It is designed to be run on
# # an HPC cluster, allowing each attack to run independtly when scheduled
# #
# # A question is how much time is spent loading the necessary packages and data as this may mean that several attacks should be combined
# # to reduce the overhead.
# #
# #
###########################
###########################
start_time <- Sys.time()
packages <- c("rlang", "dplyr", "tidyr", "purrr", "tibble", "forcats", "igraph", "devtools", "minpack.lm")#, "devtools", "minpack.lm" )#this may be can be removeed
new.packages <- packages[!(packages %in% installed.packages()[,"Package"])]
if(length(new.packages)) install.packages(new.packages)
sapply(packages, library, character.only = TRUE)
#install_github("JonnoB/PowerGridNetworking")
library(PowerGridNetworking)
#Set up file system to read the correct folders this switches between aws and windows mode
#creates the correct root depending on whether this is on the cloud or not
if(dir.exists("/home/jonno")){
#This folder is for use on my machine
project_folder <- "/home/jonno/Dropbox/IEEE_Networks"
basewd <- "/home/jonno"
analysis_parameter_file_path <- file.path(project_folder, "analysis_parameter_files")
HPC_script_path <- file.path(project_folder, "HPC_parameter_files")
load_data_files_path <- file.path(project_folder) #load the files
save_data_files_path <- file.path(project_folder) #save the files
}else{
#This is for the folder that is on the cloud
project_folder <- getwd()
basewd <- "/home/ucabbou"
analysis_parameter_file_path <- file.path(basewd, "analysis_parameter_files") #In myriad the parameter files are in a folder on the home dir
#not in the project folder like when it is done on my own comp
HPC_script_path <- file.path(basewd, "HPC_parameter_files")
load_data_files_path <- file.path(basewd) #load the files
save_data_files_path <- file.path(project_folder) #save the files
#If it is not on my computer then the variables need to be loaded from the system environment
#Get the task ID
task_id <- Sys.getenv("SGE_TASK_ID")
HPC_start_up_file <- Sys.getenv("HPC_start_up_file")
}
#Load some other useful functions
list.files(file.path(basewd, "Useful_PhD__R_Functions"), pattern = ".R", full.names = T) %>%
walk(~source(.x))
list.files(file.path(basewd, "Flow_Spring_System"), pattern = ".R", full.names = T) %>%
walk(~source(.x))
temp <- read.delim(file.path(HPC_script_path, HPC_start_up_file), sep =" ", header = TRUE) %>%
filter(compute_group_strain == task_id)
compute_group_value <- temp$compute_group_strain
#compute_group <- temp$compute_group #as the df variable is called compute group, this cannot be otherise the variable just has to equal itself and the grouping is not used
param_v <- temp$v
param_ec <- temp$ec
param_fract <- temp$fract
load_file <- temp$load_file
parameter_df_temp <- readRDS(file.path(analysis_parameter_file_path, load_file)) %>% #arrange(compute_group) %>%#temporary to get timings
filter(
compute_group_strain == compute_group_value, #this variable is inserted into the file from the program that runs it
v == param_v,
ec == param_ec,
fract == param_fract,
simulation_id ==1, #This value is set to 1 as there are 100 simulations all using an identical graph, we only need 1 of them.
)
print(paste("pararmeters loaded. Task number", task_id))
1:nrow(parameter_df_temp) %>%
walk(~{
##
##
##This block below gets the variables necessary to perform the calculation
##
##
Iter <- parameter_df_temp %>%
slice(.x)
scramble_network <- Iter$scramble_network #Needs to be added into the target orders file
graph_path <- file.path(load_data_files_path, Iter$graph_path)
Iter_embedding_path <- file.path(save_data_files_path, Iter$embeddings_path)
scramble_seed <- Iter$seed
ec <- Iter$ec
v <- Iter$v
fract <- Iter$fract
permutation <- Iter$permutation
deletion_seed <- Iter$deletion_seed
simulation_id <- Iter$simulation_id
##
##
## Check to see if the file already exists if it does skip this iteration. This allows easier restarts
##
##
if(!file.exists(Iter_embedding_path)){
##
##
##Once the variables have been taken from the parameter file the calculation can begin
##
##
g <- readRDS(file = graph_path) #read the target graph
#Proportionally load the network
g <- Proportional_Load(g, alpha = ec, PowerFlow = "power_flow",
Link.Limit = "edge_limit")
#For most of the simulations we will scramble the proportionally loaded networks,
#However when we are calculating the limits of 1 and Inf as well as the proportional line for the test cases
#scrambling is not required. As a result these lines of code make the overall script more flexible.
if(scramble_network){
#scramble the excess capaacity
edge_order_df <- create_scrambled_edges(g, scramble_seed, fract = fract)
g <- g %>%
set.edge.attribute(., "edge_limit", value = edge_order_df$edge_limit)
}
common_time <- 0.01
common_Iter <- 20000
common_tol <- 1e-10
common_mass <- 1
#Sets up the graph so that all the embedding stuff can be calculated without problem
current_graph <- g %>%
set.edge.attribute(. , "distance", value = 1) %>%
set.edge.attribute(., "Area", value = 1) %>%
calc_spring_youngs_modulus(., "power_flow", "edge_limit", minimum_value = 100, stretch_range = 1000) %>%
calc_spring_constant(., E ="E", A = "Area", distance = "distance") %>%
normalise_dc_load(.,
generation = "generation",
demand = "demand",
net_generation = "net_generation",
capacity = "edge_limit",
edge_name = "edge_name",
node_name = "name",
power_flow = "power_flow")
print("Full graph complete")
List_of_BiConComps <- create_balanced_blocks(current_graph,
force = "net_generation",
flow = "power_flow")
#find the largest component and use that as the origin block
giant_componant <-List_of_BiConComps %>% map_dbl(~vcount(.x)) %>% which.max()
print("Giant component found")
#use the largest block to set the simulation parameters k and m.
#k needs to be sufficiently stretched to allow enough topology variation. otherwise all that happens is a
#surface angled in the direct of net power flow. Which is interesting but not that interesting
OriginBlock_complete <- Find_network_balance(g = List_of_BiConComps[[giant_componant]],
force ="net_generation",
flow = "power_flow",
distance = "distance",
capacity = "edge_limit",
edge_name = "edge_name",
tstep = common_time,
tol = common_tol,
maxIter = common_Iter,
mass = common_mass,
verbose = FALSE)
print("Origin block complete")
#Calculate the height embeddings using the Orgin block as a base
height_embeddings_df <- Create_stabilised_blocks(g = current_graph,
OriginBlock = OriginBlock_complete,
OriginBlock_number = giant_componant,
force ="net_generation",
flow = "power_flow",
distance = "distance",
capacity = "edge_limit",
edge_name = "edge_name",
tstep = common_time,
tol = common_tol,
maxIter = common_Iter,
mass = common_mass,
verbose = FALSE)
print("Height embeddings complete")
#Extract edge tension and strain from the network
tension_strain_embeddings <- calc_tension_strain(g = current_graph,
height_embeddings_df,
distance = "distance",
capacity = "edge_limit",
flow = "power_flow",
edge_name = "edge_name",
k = "k")
print("Strain and Tension embeddings complete")
embeddings_data <- list(node_embeddings = height_embeddings_df, edge_embeddings = tension_strain_embeddings)
#The structure is generated as needed and so any new paths can just be created at this point.
#There is very little overhead in doing it this way
if(!file.exists(dirname(Iter_embedding_path))){ dir.create(dirname(Iter_embedding_path), recursive = T) }
print("Saving file")
saveRDS(embeddings_data, file = Iter_embedding_path)
#After the simulation and its summary are saved to the drive the next in the compute group is calculated
}
return(NULL) #dump everything when the loop finishes. This is an attempt to retain memory and speed up parallel processing...
#I don't know if it works
})
stop_time <- Sys.time()
print(stop_time-start_time)
#Once all the simulations in the compute group have been saved the script is complete
#######################
#######################
##
##
##END
##
##
########################
######################## |
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "churn")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gbm", par.vals = list(n.trees = 150, n.minobsinnode = 10), predict.type = "prob")
#:# hash
#:# 2e1ec1291a353b46a337f35dedd453cf
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_churn/classification_class/2e1ec1291a353b46a337f35dedd453cf/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 745 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "churn")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "class")
lrn = makeLearner("classif.gbm", par.vals = list(n.trees = 150, n.minobsinnode = 10), predict.type = "prob")
#:# hash
#:# 2e1ec1291a353b46a337f35dedd453cf
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
data(unemployment)
df_mapdata_dist <- get_data_from_map(download_map_data("countries/us/us-all-all"))
yo = download_map_data("countries/us/us-all-all")
highchart(type = "map") %>%
hc_chart(backgroundColor = "#161C20") %>%
hc_add_series(mapData = hi, showInLegend = FALSE, nullColor = "#424242",
borderWidth = 0) %>%
hc_add_series(data = unemployment, type = "", name = "Unemployment", value = "value", joinBy = c("hc-key", "code"),
borderColor = "transparent") %>%
hc
data("USArrests", package = "datasets")
data("usgeojson")
USArrests <- mutate(USArrests, state = rownames(USArrests))
highchart() %>%
hc_title(text = "Violent Crime Rates by US State") %>%
hc_subtitle(text = "Source: USArrests data") %>%
hc_add_series_map(usgeojson, USArrests, name = "Murder arrests (per 100,000)",
value = "Murder", joinBy = c("woename", "state"),
dataLabels = list(enabled = TRUE,
format = '{point.properties.postalcode}')) %>%
hc_colorAxis(stops = color_stops()) %>%
hc_legend(valueDecimals = 0, valueSuffix = "%") %>%
hc_mapNavigation(enabled = TRUE)
highchart() %>%
hc_add_series_map(usgeojson, df_state_summary, value = "prop_rep_projects",
joinBy = c("woename", "name"), name = "Proportion Donating Multiple Projects",
borderColor = "transparent") %>%
hc_tooltip(valueDecimals = 1, valueSuffix = "%") %>%
hc_colorAxis(stops = color_stops(6)) %>%
hc_legend(layout = "vertical", align = "right",
floating = TRUE, valueDecimals = 0, valueSuffix = "%") %>%
hc_title(text = "Proportion of Donors Donating to Multiple Projects by State")
df_st
hcmap(yo, data = unemployment,
name = "Unemployment", value = "value", joinBy = c("hc-key", "code"),
borderColor = "transparent") %>%
hc_colorAxis(dataClasses = color_classes(c(seq(0, 10, by = 2), 50))) %>%
hc_legend(layout = "vertical", align = "right",
floating = TRUE, valueDecimals = 0, valueSuffix = "%")
df_mapdata_hc <- get_data_from_map(download_map_data("countries/us/us-all"))
data_fake <- df_mapdata_hc %>%
select(code = `hc-a2`) %>%
mutate(value = 1e5 * abs(rt(nrow(.), df = 10)))
glimpse(data_fake)
hcmap("countries/us/us-all", data = data_fake, value = "value",
joinBy = c("hc-a2", "code"), name = "Fake data",
dataLabels = list(enabled = TRUE, format = '{point.name}'),
borderColor = "#FAFAFA", borderWidth = 0.1,
tooltip = list(valueDecimals = 2, valuePrefix = "$", valueSuffix = " USD"))
sf_fifty <- sf::st_as_sf(fifty_states, coords = c("long", "lat")) %>%
group_by(id) %>%
summarize(do_union=FALSE) %>%
st_cast("POLYGON") %>%
ungroup()
leaflet(sf_fifty) %>% addPolygons()
crimes <- data.frame(state = tolower(rownames(USArrests)), USArrests)
# map_id creates the aesthetic mapping to the state name column in your data
p <- ggplot(crimes, aes(map_id = state)) +
# map points to the fifty_states shape data
geom_map(aes(fill = Assault), map = fifty_states) +
expand_limits(x = fifty_states$long, y = fifty_states$lat) +
coord_map() +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL) +
labs(x = "", y = "") +
theme(legend.position = "bottom",
panel.background = element_blank())
p
states_gg <- map_data('state')
states_gg %>%
ggplot() +
geom_polygon(aes(x = long, y = lat, group = group), color = "white")
states <-
geojson_read(
x = "https://raw.githubusercontent.com/PublicaMundi/MappingAPI/master/data/geojson/us-states.json"
, what = "sp"
)
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
pal <- colorBin("YlOrRd", domain = states$density, bins = bins)
labels <- sprintf(
"<strong>%s</strong><br/>%g people / mi<sup>2</sup>",
states$name, states$density
) %>% lapply(htmltools::HTML)
leaflet(states) %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN'))) %>%
addPolygons(
fillColor = ~pal(density),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlight = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLegend(pal = pal, values = ~density, opacity = 0.7, title = NULL,
position = "bottomright") | /scripts/map_junk.R | no_license | sam-m-caldwell/donor-choose-kaggle | R | false | false | 4,687 | r | data(unemployment)
df_mapdata_dist <- get_data_from_map(download_map_data("countries/us/us-all-all"))
yo = download_map_data("countries/us/us-all-all")
highchart(type = "map") %>%
hc_chart(backgroundColor = "#161C20") %>%
hc_add_series(mapData = hi, showInLegend = FALSE, nullColor = "#424242",
borderWidth = 0) %>%
hc_add_series(data = unemployment, type = "", name = "Unemployment", value = "value", joinBy = c("hc-key", "code"),
borderColor = "transparent") %>%
hc
data("USArrests", package = "datasets")
data("usgeojson")
USArrests <- mutate(USArrests, state = rownames(USArrests))
highchart() %>%
hc_title(text = "Violent Crime Rates by US State") %>%
hc_subtitle(text = "Source: USArrests data") %>%
hc_add_series_map(usgeojson, USArrests, name = "Murder arrests (per 100,000)",
value = "Murder", joinBy = c("woename", "state"),
dataLabels = list(enabled = TRUE,
format = '{point.properties.postalcode}')) %>%
hc_colorAxis(stops = color_stops()) %>%
hc_legend(valueDecimals = 0, valueSuffix = "%") %>%
hc_mapNavigation(enabled = TRUE)
highchart() %>%
hc_add_series_map(usgeojson, df_state_summary, value = "prop_rep_projects",
joinBy = c("woename", "name"), name = "Proportion Donating Multiple Projects",
borderColor = "transparent") %>%
hc_tooltip(valueDecimals = 1, valueSuffix = "%") %>%
hc_colorAxis(stops = color_stops(6)) %>%
hc_legend(layout = "vertical", align = "right",
floating = TRUE, valueDecimals = 0, valueSuffix = "%") %>%
hc_title(text = "Proportion of Donors Donating to Multiple Projects by State")
df_st
hcmap(yo, data = unemployment,
name = "Unemployment", value = "value", joinBy = c("hc-key", "code"),
borderColor = "transparent") %>%
hc_colorAxis(dataClasses = color_classes(c(seq(0, 10, by = 2), 50))) %>%
hc_legend(layout = "vertical", align = "right",
floating = TRUE, valueDecimals = 0, valueSuffix = "%")
df_mapdata_hc <- get_data_from_map(download_map_data("countries/us/us-all"))
data_fake <- df_mapdata_hc %>%
select(code = `hc-a2`) %>%
mutate(value = 1e5 * abs(rt(nrow(.), df = 10)))
glimpse(data_fake)
hcmap("countries/us/us-all", data = data_fake, value = "value",
joinBy = c("hc-a2", "code"), name = "Fake data",
dataLabels = list(enabled = TRUE, format = '{point.name}'),
borderColor = "#FAFAFA", borderWidth = 0.1,
tooltip = list(valueDecimals = 2, valuePrefix = "$", valueSuffix = " USD"))
sf_fifty <- sf::st_as_sf(fifty_states, coords = c("long", "lat")) %>%
group_by(id) %>%
summarize(do_union=FALSE) %>%
st_cast("POLYGON") %>%
ungroup()
leaflet(sf_fifty) %>% addPolygons()
crimes <- data.frame(state = tolower(rownames(USArrests)), USArrests)
# map_id creates the aesthetic mapping to the state name column in your data
p <- ggplot(crimes, aes(map_id = state)) +
# map points to the fifty_states shape data
geom_map(aes(fill = Assault), map = fifty_states) +
expand_limits(x = fifty_states$long, y = fifty_states$lat) +
coord_map() +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL) +
labs(x = "", y = "") +
theme(legend.position = "bottom",
panel.background = element_blank())
p
states_gg <- map_data('state')
states_gg %>%
ggplot() +
geom_polygon(aes(x = long, y = lat, group = group), color = "white")
states <-
geojson_read(
x = "https://raw.githubusercontent.com/PublicaMundi/MappingAPI/master/data/geojson/us-states.json"
, what = "sp"
)
bins <- c(0, 10, 20, 50, 100, 200, 500, 1000, Inf)
pal <- colorBin("YlOrRd", domain = states$density, bins = bins)
labels <- sprintf(
"<strong>%s</strong><br/>%g people / mi<sup>2</sup>",
states$name, states$density
) %>% lapply(htmltools::HTML)
leaflet(states) %>%
setView(-96, 37.8, 4) %>%
addProviderTiles("MapBox", options = providerTileOptions(
id = "mapbox.light",
accessToken = Sys.getenv('MAPBOX_ACCESS_TOKEN'))) %>%
addPolygons(
fillColor = ~pal(density),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
fillOpacity = 0.7,
highlight = highlightOptions(
weight = 5,
color = "#666",
dashArray = "",
fillOpacity = 0.7,
bringToFront = TRUE),
label = labels,
labelOptions = labelOptions(
style = list("font-weight" = "normal", padding = "3px 8px"),
textsize = "15px",
direction = "auto")) %>%
addLegend(pal = pal, values = ~density, opacity = 0.7, title = NULL,
position = "bottomright") |
library("matlib")
A <- cbind(c(4, 2), c(-5, -3))
powerMethod(A)
B<-cbind(c(0,-2,-4),c(11,17,26),c(-5.-7,-10))
powerMethod(B) | /tugaspowerMethod-17523184-17523153.R | no_license | tasyaulfha/R-Language | R | false | false | 128 | r | library("matlib")
A <- cbind(c(4, 2), c(-5, -3))
powerMethod(A)
B<-cbind(c(0,-2,-4),c(11,17,26),c(-5.-7,-10))
powerMethod(B) |
\name{cimpl-package}
\alias{cimpl-package}
\alias{cimpl}
\docType{package}
\title{
Common Insertion site Mapping PLatform
}
\description{
An analysis package for multi sample insertional mutagenesis data (including viral- and transposon-based systems) using Gaussian kernel convolution to identify common insertion sites.
}
\author{
Jelle ten Hoeve and Jeroen de Ridder
}
\references{
De Ridder J, Uren A, Kool J, Reinders M, and Wessels L. Detecting statistically significant common insertion sites in retroviral insertional mutagenesis screens. PLoS Comput Biol, 2:e166, 2006.
}
\keyword{ package }
\seealso{
\code{\link{doCimplAnalysis}}, \code{\link{plot}}
}
\examples{
\dontrun{
library(BSgenome.Mmusculus.UCSC.mm9)
data(colorectal)
# do 'cimpl' analysis
sampleCa <- doCimplAnalysis(colorectal, scales = c(100e3), chromosomes = c('chr19'), n_iterations = 100, BSgenome = Mmusculus, system = 'SB', lhc.method='none')
}
data(sampleCa) # load sample data to reduce package build time
# make some plots
plot(sampleCa, type='kse', interactive=FALSE)
plot(sampleCa, type='scale.space', interactive=FALSE)
plot(sampleCa, type='null.cdf', interactive=FALSE)
genes <- getEnsemblGenes(sampleCa)
# retrieve CISs
ciss <- getCISs(sampleCa, genes=genes, alpha=0.05, mul.test=TRUE) # NB: set significance level (alpha) and multiple testing correction
write.csv(ciss, file='ciss.csv')
# export result to html
export.html(sampleCa, genes=genes, alpha=0.05, mul.test=TRUE)
# make a matrix linking insertions to CISs
cisMatrix <- getCISMatrix(sampleCa, ciss)
write.csv(cisMatrix, file='cisMatrix.csv', row.names=FALSE)
# export kse and bg_density to .wig file
export.wig(sampleCa, file='kse.wig')
# export CISs to .bed file
export.bed(ciss, file='ciss.bed')
}
| /man/cimpl-package.Rd | no_license | NKI-CCB/cimpl | R | false | false | 1,758 | rd | \name{cimpl-package}
\alias{cimpl-package}
\alias{cimpl}
\docType{package}
\title{
Common Insertion site Mapping PLatform
}
\description{
An analysis package for multi sample insertional mutagenesis data (including viral- and transposon-based systems) using Gaussian kernel convolution to identify common insertion sites.
}
\author{
Jelle ten Hoeve and Jeroen de Ridder
}
\references{
De Ridder J, Uren A, Kool J, Reinders M, and Wessels L. Detecting statistically significant common insertion sites in retroviral insertional mutagenesis screens. PLoS Comput Biol, 2:e166, 2006.
}
\keyword{ package }
\seealso{
\code{\link{doCimplAnalysis}}, \code{\link{plot}}
}
\examples{
\dontrun{
library(BSgenome.Mmusculus.UCSC.mm9)
data(colorectal)
# do 'cimpl' analysis
sampleCa <- doCimplAnalysis(colorectal, scales = c(100e3), chromosomes = c('chr19'), n_iterations = 100, BSgenome = Mmusculus, system = 'SB', lhc.method='none')
}
data(sampleCa) # load sample data to reduce package build time
# make some plots
plot(sampleCa, type='kse', interactive=FALSE)
plot(sampleCa, type='scale.space', interactive=FALSE)
plot(sampleCa, type='null.cdf', interactive=FALSE)
genes <- getEnsemblGenes(sampleCa)
# retrieve CISs
ciss <- getCISs(sampleCa, genes=genes, alpha=0.05, mul.test=TRUE) # NB: set significance level (alpha) and multiple testing correction
write.csv(ciss, file='ciss.csv')
# export result to html
export.html(sampleCa, genes=genes, alpha=0.05, mul.test=TRUE)
# make a matrix linking insertions to CISs
cisMatrix <- getCISMatrix(sampleCa, ciss)
write.csv(cisMatrix, file='cisMatrix.csv', row.names=FALSE)
# export kse and bg_density to .wig file
export.wig(sampleCa, file='kse.wig')
# export CISs to .bed file
export.bed(ciss, file='ciss.bed')
}
|
\name{trXX_dXX}
\alias{trXX_dXX}
\alias{trXX_d10}
\alias{tr01_d10}
\alias{tr10_d10}
\alias{tr11_d10}
\alias{tr12_d10}
\alias{tr17_d10}
\alias{tr18_d10}
\alias{tr23_d10}
\alias{tr24_d10}
\alias{tr28_d10}
\alias{tr33_d10}
\alias{tr37_d10}
\alias{tr39_d10}
\alias{tr42_d10}
\alias{tr44_d10}
\alias{tr45_d10}
\alias{tr47_d10}
\alias{tr50_d10}
\alias{tr51_d10}
\alias{tr54_d10}
\alias{tr72_d10}
\docType{data}
\title{
Provides the U.S. 2000 Census Tract Boundary data datasets for 20
States, District, and Territory East of the Mississippi River for use with
\var{SeerMapper} Package
}
\description{
There are six supplemental census tract boundary packages for
use with \var{SeerMapper}. The are \var{SeerMapperRegs},
\var{SeerMapperEast}, and \var{SeerMapperWest} for the 2000 census and
\var{Seer2010MapperRegs}, \var{Seer2010MapperEast} and
\var{SeerMapper2010West} for the 2010 census. The combination of the
either three provide a set of census tract boundaries for one U. S.
census year (either 2000 or 2010). The \var{SeerMapperRegs} and
\var{Seer2010MapperRegs} packages contain the census tract
boundaries for the 19 states the have U. S. Seer Registries. The
remaining census tract boundaries are split into two packages for
each census year based on whether the state is east or west of the
Mississippi river. The census tracts for 20 states, district and territory without
registries east of the Mississippi river at contained in the
\var{SeerMapperEast} and \var{Seer2010MapperEast} packages. The
tract boundaries for the 13 states west of the Mississippi river are
in the \var{SeerMapperWest} and \var{Seer2010MapperWest} packages.
The states include the District of Columbia and Puerto Rico.
This package provides the 2010 census tract boundaries for the 20
states and district/territory without registries east of the Mississippi river.
}
\usage{data(tr01_d10)}
\format{
Each file contains a set of SpatialPolygons structures for the 20 states, district,
and territory
east of the Mississippi that do not have Seer Registries.
Each state structure is a list of the census tracts boundaries that
the state. Refer to the documentation on the \var{sp} package for
details on the strucures. Each list element is a "polygons" class
structure containing attributes of the census tract (label point,
area, ID, etc.) and a list of "Polygons" class elements that define
the boundary polygons.
}
\details{
The \var{SeerMapperEast} and \var{SeerMapper2010East} packages contains
20 eastern states including:
\preformatted{
FIPS Name
01 Alabama
10 Delaware
11 District of Columbia
12 Florida
17 Illinois
18 Indiana
23 Maine
24 Maryland
28 Mississippi
33 New Hampshire
37 North Carolina
39 Ohio
42 Pennsylvania
44 Rhode Island
45 South Carolina
47 Tennessee
50 Vermont
51 Virginia
54 West Virginia
72 Puerto Rico
}
The \var{SeerMapper} package contains the state and county boundary
data for all 51 states and DC and the Seer Registry boundary data for
the 20 established registries. Due to space limitations, the base
packages cannot contain the census tract boundary data.
Three supplemental data packages are used for each census year to
provide the census tract boundary data to \var{SeerMapper} package.
Each state ".rda" file (dataset) contains state's census tract
boundary data. The file name structure is trXX_dYY, where tr
identifies the file as census tract boundary data, "XX" is the state
(2 digits) fips code and "YY" represents the census year (e.g., 00 =
2000 or 10 for 2010)
To conserve disk space and downloading/installation time, the rda
files are compressed using the "xy" method.
The U. S. Census Bureau census tract boundary shape file data was pre-
processed by "www.MapShaper.org" website version 01.15 using modified
Visvalingam method with the intersection repair and prevent shape
removal options enabled, the coordinate precision value set to 0.0 and
the simplify parameter set to 10%. This reduces the space requirements
by 85% while maintaining a reasonable boundary image and edge
alignments.
When the \var{SeerMapper} package is install, the six (6) census tract
boundary supplement packages are also install automatically by R.
The \var{SeerMapper} package then manages the loading of the packages
to ensure the needed boundary files are available.
}
\source{
The census tract boundary shapefiles were downloaded from the
CENSUS.GOV web site for the 2010 Census census tract boundary shapefiles,
https://www2.census.gov/geo/tiger/GENZ2010/gz_2010_XX_140_00_500k.zip,
where XX is the state FIPS code.
}
\examples{
#
# These examples are a test to ensure each census tract file
# can be read and a plot of the state generated.
#
require("sp")
#
# If you want to save and see the example output PDF files, change the
# the following "tempDir()" to the path you want to save the output files.
# For Example: outDir <- "c:/RTestPDFs"
outDir <- tempdir()
cat("Output Directory:",outDir,"\n")
stList <- c("01","10","11","12","17",
"18","23","24","28","33",
"37","39","42","44","45",
"47","50","51","54","72")
stName <- c("Alabama","Delaware","Dist.of Columbia","Florida","Illinois",
"Indiana","Maine","Maryland","Mississippi","New Hampshire",
"North Carolina","Ohio","Pennsylvania","Rhode Island","South Carolina",
"Tennessee","Vermont","Virginia","West Virginia","Puerto Rico")
cY <- "10"
outFile <- paste0("SeerMapperEast",cY,"-CT.pdf")
outFN <- file.path(outDir,outFile)
cat("Output example PDF file:",outFN,"\n")
pdf(outFN,width=7,height=10)
for (stN in seq(from=1, to=length(stList), by=8)) { # Test print 3 of 20 states.
stID <- stList[stN]
stNa <- stName[stN]
trFN <- paste0("tr",stID,"_d",cY)
TT_tr <- paste0("U. S. Census Tracts - ",stNa," Fips=",stID," file=",trFN)
data(list=trFN)
wrSP <- get(trFN)
#str(wrSP)
plot(wrSP,main=TT_tr)
rm(list=trFN)
}
dev.off()
}
\keyword{Census2000}
\keyword{Census2010}
\keyword{datasets}
| /man/trXX_dXX.Rd | no_license | cran/SeerMapper2010East | R | false | false | 6,469 | rd | \name{trXX_dXX}
\alias{trXX_dXX}
\alias{trXX_d10}
\alias{tr01_d10}
\alias{tr10_d10}
\alias{tr11_d10}
\alias{tr12_d10}
\alias{tr17_d10}
\alias{tr18_d10}
\alias{tr23_d10}
\alias{tr24_d10}
\alias{tr28_d10}
\alias{tr33_d10}
\alias{tr37_d10}
\alias{tr39_d10}
\alias{tr42_d10}
\alias{tr44_d10}
\alias{tr45_d10}
\alias{tr47_d10}
\alias{tr50_d10}
\alias{tr51_d10}
\alias{tr54_d10}
\alias{tr72_d10}
\docType{data}
\title{
Provides the U.S. 2000 Census Tract Boundary data datasets for 20
States, District, and Territory East of the Mississippi River for use with
\var{SeerMapper} Package
}
\description{
There are six supplemental census tract boundary packages for
use with \var{SeerMapper}. The are \var{SeerMapperRegs},
\var{SeerMapperEast}, and \var{SeerMapperWest} for the 2000 census and
\var{Seer2010MapperRegs}, \var{Seer2010MapperEast} and
\var{SeerMapper2010West} for the 2010 census. The combination of the
either three provide a set of census tract boundaries for one U. S.
census year (either 2000 or 2010). The \var{SeerMapperRegs} and
\var{Seer2010MapperRegs} packages contain the census tract
boundaries for the 19 states the have U. S. Seer Registries. The
remaining census tract boundaries are split into two packages for
each census year based on whether the state is east or west of the
Mississippi river. The census tracts for 20 states, district and territory without
registries east of the Mississippi river at contained in the
\var{SeerMapperEast} and \var{Seer2010MapperEast} packages. The
tract boundaries for the 13 states west of the Mississippi river are
in the \var{SeerMapperWest} and \var{Seer2010MapperWest} packages.
The states include the District of Columbia and Puerto Rico.
This package provides the 2010 census tract boundaries for the 20
states and district/territory without registries east of the Mississippi river.
}
\usage{data(tr01_d10)}
\format{
Each file contains a set of SpatialPolygons structures for the 20 states, district,
and territory
east of the Mississippi that do not have Seer Registries.
Each state structure is a list of the census tracts boundaries that
the state. Refer to the documentation on the \var{sp} package for
details on the strucures. Each list element is a "polygons" class
structure containing attributes of the census tract (label point,
area, ID, etc.) and a list of "Polygons" class elements that define
the boundary polygons.
}
\details{
The \var{SeerMapperEast} and \var{SeerMapper2010East} packages contains
20 eastern states including:
\preformatted{
FIPS Name
01 Alabama
10 Delaware
11 District of Columbia
12 Florida
17 Illinois
18 Indiana
23 Maine
24 Maryland
28 Mississippi
33 New Hampshire
37 North Carolina
39 Ohio
42 Pennsylvania
44 Rhode Island
45 South Carolina
47 Tennessee
50 Vermont
51 Virginia
54 West Virginia
72 Puerto Rico
}
The \var{SeerMapper} package contains the state and county boundary
data for all 51 states and DC and the Seer Registry boundary data for
the 20 established registries. Due to space limitations, the base
packages cannot contain the census tract boundary data.
Three supplemental data packages are used for each census year to
provide the census tract boundary data to \var{SeerMapper} package.
Each state ".rda" file (dataset) contains state's census tract
boundary data. The file name structure is trXX_dYY, where tr
identifies the file as census tract boundary data, "XX" is the state
(2 digits) fips code and "YY" represents the census year (e.g., 00 =
2000 or 10 for 2010)
To conserve disk space and downloading/installation time, the rda
files are compressed using the "xy" method.
The U. S. Census Bureau census tract boundary shape file data was pre-
processed by "www.MapShaper.org" website version 01.15 using modified
Visvalingam method with the intersection repair and prevent shape
removal options enabled, the coordinate precision value set to 0.0 and
the simplify parameter set to 10%. This reduces the space requirements
by 85% while maintaining a reasonable boundary image and edge
alignments.
When the \var{SeerMapper} package is install, the six (6) census tract
boundary supplement packages are also install automatically by R.
The \var{SeerMapper} package then manages the loading of the packages
to ensure the needed boundary files are available.
}
\source{
The census tract boundary shapefiles were downloaded from the
CENSUS.GOV web site for the 2010 Census census tract boundary shapefiles,
https://www2.census.gov/geo/tiger/GENZ2010/gz_2010_XX_140_00_500k.zip,
where XX is the state FIPS code.
}
\examples{
#
# These examples are a test to ensure each census tract file
# can be read and a plot of the state generated.
#
require("sp")
#
# If you want to save and see the example output PDF files, change the
# the following "tempDir()" to the path you want to save the output files.
# For Example: outDir <- "c:/RTestPDFs"
outDir <- tempdir()
cat("Output Directory:",outDir,"\n")
stList <- c("01","10","11","12","17",
"18","23","24","28","33",
"37","39","42","44","45",
"47","50","51","54","72")
stName <- c("Alabama","Delaware","Dist.of Columbia","Florida","Illinois",
"Indiana","Maine","Maryland","Mississippi","New Hampshire",
"North Carolina","Ohio","Pennsylvania","Rhode Island","South Carolina",
"Tennessee","Vermont","Virginia","West Virginia","Puerto Rico")
cY <- "10"
outFile <- paste0("SeerMapperEast",cY,"-CT.pdf")
outFN <- file.path(outDir,outFile)
cat("Output example PDF file:",outFN,"\n")
pdf(outFN,width=7,height=10)
for (stN in seq(from=1, to=length(stList), by=8)) { # Test print 3 of 20 states.
stID <- stList[stN]
stNa <- stName[stN]
trFN <- paste0("tr",stID,"_d",cY)
TT_tr <- paste0("U. S. Census Tracts - ",stNa," Fips=",stID," file=",trFN)
data(list=trFN)
wrSP <- get(trFN)
#str(wrSP)
plot(wrSP,main=TT_tr)
rm(list=trFN)
}
dev.off()
}
\keyword{Census2000}
\keyword{Census2010}
\keyword{datasets}
|
#Takes a character vector with one entry per document and its metadata
textProcessor <- function(documents, metadata=NULL,
lowercase=TRUE, removestopwords=TRUE, removenumbers=TRUE, removepunctuation=TRUE, stem=TRUE,
sparselevel=1, language="en", extrastops=NULL,
verbose=TRUE) {
if(!require(tm,quietly=TRUE)) stop("Please install tm package to use this function. You will also need SnowballC if stemming.")
if(stem) {
if(!require(SnowballC, quietly=TRUE)) stop("Please install SnowballC to use stemming.")
}
#If there is only one item assume its a url and load it.
if(length(documents)==1) {
filelist <- list.files(path=documents, full.names=TRUE, recursive=TRUE)
documents <- vector(length=length(filelist))
if(verbose) cat(sprintf("Loading %i files from directory...\n", length(documents)))
for(i in 1:length(filelist)) {
documents[i] <- paste(readLines(filelist[i]), collapse=" ")
}
} else {
documents <- as.character(documents)
}
if(verbose) cat("Building corpus... \n")
txt <- VCorpus(VectorSource(documents), readerControl=list(language= language))
#Apply filters
txt <- tm_map(txt, stripWhitespace)
if(lowercase){
if(verbose) cat("Converting to Lower Case... \n")
#Convert to Lower case
#(Note that this is slightly more complicated due to API change in tm)
if(packageVersion("tm") >= "0.6") {
txt <- tm_map(txt, content_transformer(tolower))
} else {
txt <- tm_map(txt, tolower)
}
}
if(removestopwords){
if(verbose) cat("Removing stopwords... \n")
if(!is.null(extrastops)){
txt <- tm_map(txt, removeWords, c(stopwords(language), extrastops)) #Remove extra stopwords
}
if(is.null(extrastops)){
txt <- tm_map(txt, removeWords, stopwords(language)) #Remove just built-in stopwords
}
}
if(removenumbers){
if(verbose) cat("Removing numbers... \n")
txt <- tm_map(txt, removeNumbers) #Remove numbers
}
if(removepunctuation){
if(verbose) cat("Removing punctuation... \n")
txt <- tm_map(txt, removePunctuation) #Remove punctuation
}
if(stem){
if(verbose) cat("Stemming... \n")
txt <- tm_map(txt, stemDocument, language=language)
}
if(!is.null(metadata)) {
for(i in 1:ncol(metadata)) {
meta(txt, colnames(metadata)[i]) <- metadata[,i]
}
}
#Make a matrix
if(verbose) cat("Creating Output... \n")
dtm <- DocumentTermMatrix(txt)
if(sparselevel!=1) {
V <- ncol(dtm)
dtm <- removeSparseTerms(dtm, sparselevel) #remove terms that are sparse
if(ncol(dtm) < V & verbose) {
message <- sprintf("Removed %i of %i words due to sparselevel of %s \n",
V-ncol(dtm), V, sparselevel)
cat(message)
}
}
#If there is metadata we need to remove some documents
if(!is.null(metadata)) {
docindex <- unique(dtm$i)
metadata <- meta(txt)[docindex,]
}
out <- read.slam(dtm) #using the read.slam() function in stm to convert
vocab <- as.character(out$vocab)
return(list(documents=out$documents, vocab=vocab, meta=metadata))
}
| /R/textProcessor.R | no_license | ahalterman/stm | R | false | false | 3,162 | r |
#Takes a character vector with one entry per document and its metadata
textProcessor <- function(documents, metadata=NULL,
lowercase=TRUE, removestopwords=TRUE, removenumbers=TRUE, removepunctuation=TRUE, stem=TRUE,
sparselevel=1, language="en", extrastops=NULL,
verbose=TRUE) {
if(!require(tm,quietly=TRUE)) stop("Please install tm package to use this function. You will also need SnowballC if stemming.")
if(stem) {
if(!require(SnowballC, quietly=TRUE)) stop("Please install SnowballC to use stemming.")
}
#If there is only one item assume its a url and load it.
if(length(documents)==1) {
filelist <- list.files(path=documents, full.names=TRUE, recursive=TRUE)
documents <- vector(length=length(filelist))
if(verbose) cat(sprintf("Loading %i files from directory...\n", length(documents)))
for(i in 1:length(filelist)) {
documents[i] <- paste(readLines(filelist[i]), collapse=" ")
}
} else {
documents <- as.character(documents)
}
if(verbose) cat("Building corpus... \n")
txt <- VCorpus(VectorSource(documents), readerControl=list(language= language))
#Apply filters
txt <- tm_map(txt, stripWhitespace)
if(lowercase){
if(verbose) cat("Converting to Lower Case... \n")
#Convert to Lower case
#(Note that this is slightly more complicated due to API change in tm)
if(packageVersion("tm") >= "0.6") {
txt <- tm_map(txt, content_transformer(tolower))
} else {
txt <- tm_map(txt, tolower)
}
}
if(removestopwords){
if(verbose) cat("Removing stopwords... \n")
if(!is.null(extrastops)){
txt <- tm_map(txt, removeWords, c(stopwords(language), extrastops)) #Remove extra stopwords
}
if(is.null(extrastops)){
txt <- tm_map(txt, removeWords, stopwords(language)) #Remove just built-in stopwords
}
}
if(removenumbers){
if(verbose) cat("Removing numbers... \n")
txt <- tm_map(txt, removeNumbers) #Remove numbers
}
if(removepunctuation){
if(verbose) cat("Removing punctuation... \n")
txt <- tm_map(txt, removePunctuation) #Remove punctuation
}
if(stem){
if(verbose) cat("Stemming... \n")
txt <- tm_map(txt, stemDocument, language=language)
}
if(!is.null(metadata)) {
for(i in 1:ncol(metadata)) {
meta(txt, colnames(metadata)[i]) <- metadata[,i]
}
}
#Make a matrix
if(verbose) cat("Creating Output... \n")
dtm <- DocumentTermMatrix(txt)
if(sparselevel!=1) {
V <- ncol(dtm)
dtm <- removeSparseTerms(dtm, sparselevel) #remove terms that are sparse
if(ncol(dtm) < V & verbose) {
message <- sprintf("Removed %i of %i words due to sparselevel of %s \n",
V-ncol(dtm), V, sparselevel)
cat(message)
}
}
#If there is metadata we need to remove some documents
if(!is.null(metadata)) {
docindex <- unique(dtm$i)
metadata <- meta(txt)[docindex,]
}
out <- read.slam(dtm) #using the read.slam() function in stm to convert
vocab <- as.character(out$vocab)
return(list(documents=out$documents, vocab=vocab, meta=metadata))
}
|
##STATEMENT:
# Loading the libraries
library(dplyr)
data(murders)
# Create new data frame called my_states (with specifications in the instructions)
##ANSWER
my_states <- murders %>% mutate(rate = total / population * 100000, rank = rank(-rate)) %>% filter((region=="Northeast" | region=="West") & (rate < 1)) %>% select(state, rate, rank) | /01.DS-R Basics/S3-Indexing-Data Wrangling-Plots/3.2 Data Wrangling/09. Exercise.R | no_license | dgpaniagua/data-science-assessments | R | false | false | 344 | r | ##STATEMENT:
# Loading the libraries
library(dplyr)
data(murders)
# Create new data frame called my_states (with specifications in the instructions)
##ANSWER
my_states <- murders %>% mutate(rate = total / population * 100000, rank = rank(-rate)) %>% filter((region=="Northeast" | region=="West") & (rate < 1)) %>% select(state, rate, rank) |
plot2<-function(){
# Read the data
DPC<-read_power_data()
# Create de x-axis variable and naming the y-axis variable
measuringtime <- strptime(paste(DPC$Date, DPC$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- DPC$Global_active_power
# Initialize the graphic device, build the plot and close the graphic device
png("plot2.png", width=480, height=480)
plot(measuringtime, GAP, type="l", xlab="Measuring Time", ylab="Global Active Power [kW]")
dev.off()
}
| /plot2.R | no_license | mansera/ExData_Plotting1 | R | false | false | 526 | r | plot2<-function(){
# Read the data
DPC<-read_power_data()
# Create de x-axis variable and naming the y-axis variable
measuringtime <- strptime(paste(DPC$Date, DPC$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
GAP <- DPC$Global_active_power
# Initialize the graphic device, build the plot and close the graphic device
png("plot2.png", width=480, height=480)
plot(measuringtime, GAP, type="l", xlab="Measuring Time", ylab="Global Active Power [kW]")
dev.off()
}
|
# Page 333
h <- 6.62608 * 10^-34
b <- 2.17989 * 10^-18
c <- 2.99792 * 10^10
R <- b / (h * c)
print(R) | /Modern_Physical_Chemistry_A_Molecular_Approach_by_George_H_Duffey/CH12/EX12.13/Ex12_13.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 104 | r | # Page 333
h <- 6.62608 * 10^-34
b <- 2.17989 * 10^-18
c <- 2.99792 * 10^10
R <- b / (h * c)
print(R) |
GET## Plot4.R to multiple views of global power in a 2 X 2 frame
hpc <- read.table("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", stringsAsFactors=FALSE)
hpc$DateTime <- paste(hpc$Date, hpc$Time)
hpc$DateTime <- strptime(hpc$DateTime, format = "%d/%m/%Y %H:%M:%S")
hpc1 <- hpc[hpc$DateTime >= as.POSIXct("2007-02-01 00:00:00") & hpc$DateTime <= as.POSIXct("2007-02-03 00:00:00"), ]
png(file="plot4.png",width=480,height=480)
par(mfrow = c(2,2))
plot(hpc1$DateTime, hpc1$Global_active_power, type="l", lwd=1, ylab="Global Active Power", xlab="")
plot(hpc1$DateTime, hpc1$Voltage, type="l", lwd=1, ylab="Voltage", xlab="datetime")
plot(hpc1$DateTime, hpc1$Sub_metering_1,col="black", type="l", xlab="", ylab="Energy Sub Metering")
lines(hpc1$DateTime, hpc1$Sub_metering_2, type="l",col="red")
lines(hpc1$DateTime, hpc1$Sub_metering_3, type="l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lty=1, bty="n", cex=1)
plot(hpc1$DateTime, hpc1$Global_reactive_power, type="l", lwd=1, ylab="Global_reactive_power", xlab="datetime")
dev.off() | /Old Plot4.R | no_license | AlexRoopesh/Coursera-docs | R | false | false | 1,143 | r | GET## Plot4.R to multiple views of global power in a 2 X 2 frame
hpc <- read.table("./data/household_power_consumption.txt", sep=";", header=TRUE, na.strings="?", stringsAsFactors=FALSE)
hpc$DateTime <- paste(hpc$Date, hpc$Time)
hpc$DateTime <- strptime(hpc$DateTime, format = "%d/%m/%Y %H:%M:%S")
hpc1 <- hpc[hpc$DateTime >= as.POSIXct("2007-02-01 00:00:00") & hpc$DateTime <= as.POSIXct("2007-02-03 00:00:00"), ]
png(file="plot4.png",width=480,height=480)
par(mfrow = c(2,2))
plot(hpc1$DateTime, hpc1$Global_active_power, type="l", lwd=1, ylab="Global Active Power", xlab="")
plot(hpc1$DateTime, hpc1$Voltage, type="l", lwd=1, ylab="Voltage", xlab="datetime")
plot(hpc1$DateTime, hpc1$Sub_metering_1,col="black", type="l", xlab="", ylab="Energy Sub Metering")
lines(hpc1$DateTime, hpc1$Sub_metering_2, type="l",col="red")
lines(hpc1$DateTime, hpc1$Sub_metering_3, type="l",col="blue")
legend("topright",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"), lty=1, bty="n", cex=1)
plot(hpc1$DateTime, hpc1$Global_reactive_power, type="l", lwd=1, ylab="Global_reactive_power", xlab="datetime")
dev.off() |
# Available from NorESM2-MM:
# od550aer AOD from the ambient aerosols (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550aerh2o atmosphere_optical_thickness_due_to_water_in_ambient_aerosol_particles
# od550bc atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol
# od550csaer AOD from the ambient aerosols in clear skies if od550aer is for all-sky (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550dust atmosphere_optical_thickness_due_to_dust_ambient_aerosol_particles
# od550lt1aer od550 due to particles with wet diameter less than 1 um (ambient here means wetted). When models do not include explicit size information, it can be assumed that all anthropogenic aerosols and natural secondary aerosols have diameter less than 1 um.
# od550oa atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles
# od550so4 atmosphere_optical_thickness_due_to_sulfate_ambient_aerosol_particles
# od550ss atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles
# First attempt: use
# od550ss for the first class "SEA"
# od550oa for the second class "LAND"
# od550bc for the third class "SOOT"
# od550dust for the fourth class "DESERT"
# The following are then not used:
# od550aer
# od550aerh20
# od550csaer
# od550lt1aer (Should we consider adding this to the SOOT class?)
# od550so4
# Download data from Nird:
# cd ~/HCLIM/aerosols/aero_tegen/NorESM2-MM
# scp "nird:/projects/NS9034K/CMIP6/CMIP/NCC/NorESM2-MM/historical/r1i1p1f1/AERmon/od550*/gn/latest/od550*198001-198912.nc" .
library(ncdf4)
library(fields)
# Read od550ss
set_revlat=F
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
lat <- nc$dim$lat$vals
lon <- nc$dim$lon$vals
nc_close(nc)
if (length(dim(lat)) > 1) { stop(" The lat variable has more than one dimension. Only regular grids supported so far.") }
dlat <- diff(lat)
if (any(dlat>0) & any(dlat<0) ) { stop("Error while reading lat. Values are both increasing and decreasing.")}
if (all(dlat > 0)) { lat <- rev(lat); set_revlat=T } # go from north to south
nlat <- length(lat)
if (length(dim(lon)) > 1) { stop(" The lon variable has more than one dimension. Only regular grids supported so far.") }
dlon <- diff(lon)
if (any(dlon>0) & any(dlon<0) ) { stop("Error while reading lon. Values are both increasing and decreasing.")}
nlon <- length(lon)
# generate the first rows in Tegen file (lat and lon values)
if ((nlat + nlon) %% 5 != 0) { stop("nlon + nlat not divisible by 5 (the number of columns). Implement a workaround.") }
header <- matrix(c(lat,lon),ncol=5,byrow = T)
write.table(matrix(header,ncol=5),file="header.txt", sep = " ",row.names = F, col.names = F)
# TODO: generalise by using names(nc$var) to identify the variable name
# make function that takes 4 files as argument
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550ss"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data1 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550oa_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550oa"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data2 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550bc_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550bc"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data3 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550dust_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550dust"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data4 <- c(data[,,1])
write.table(file="tmp.NorESM2-MM.m01.txt",x = matrix(c(data1,data2,data3,data4),ncol=12,byrow = T), row.names=F, col.names=F)
system(command = "cat header.txt tmp.NorESM2-MM.m01.txt > NorESM2-MM.m01.txt; rm tmp.NorESM2-MM.m01.txt")
| /preproc-AERmon/aero_tegen/tegen_gen_from_NorESM2-MM.R | no_license | doblerone/CMIPtoHCLIM | R | false | false | 4,121 | r | # Available from NorESM2-MM:
# od550aer AOD from the ambient aerosols (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550aerh2o atmosphere_optical_thickness_due_to_water_in_ambient_aerosol_particles
# od550bc atmosphere_optical_thickness_due_to_black_carbon_ambient_aerosol
# od550csaer AOD from the ambient aerosols in clear skies if od550aer is for all-sky (i.e., includes aerosol water). Does not include AOD from stratospheric aerosols if these are prescribed but includes other possible background aerosol types.
# od550dust atmosphere_optical_thickness_due_to_dust_ambient_aerosol_particles
# od550lt1aer od550 due to particles with wet diameter less than 1 um (ambient here means wetted). When models do not include explicit size information, it can be assumed that all anthropogenic aerosols and natural secondary aerosols have diameter less than 1 um.
# od550oa atmosphere_optical_thickness_due_to_particulate_organic_matter_ambient_aerosol_particles
# od550so4 atmosphere_optical_thickness_due_to_sulfate_ambient_aerosol_particles
# od550ss atmosphere_optical_thickness_due_to_sea_salt_ambient_aerosol_particles
# First attempt: use
# od550ss for the first class "SEA"
# od550oa for the second class "LAND"
# od550bc for the third class "SOOT"
# od550dust for the fourth class "DESERT"
# The following are then not used:
# od550aer
# od550aerh20
# od550csaer
# od550lt1aer (Should we consider adding this to the SOOT class?)
# od550so4
# Download data from Nird:
# cd ~/HCLIM/aerosols/aero_tegen/NorESM2-MM
# scp "nird:/projects/NS9034K/CMIP6/CMIP/NCC/NorESM2-MM/historical/r1i1p1f1/AERmon/od550*/gn/latest/od550*198001-198912.nc" .
library(ncdf4)
library(fields)
# Read od550ss
set_revlat=F
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
lat <- nc$dim$lat$vals
lon <- nc$dim$lon$vals
nc_close(nc)
if (length(dim(lat)) > 1) { stop(" The lat variable has more than one dimension. Only regular grids supported so far.") }
dlat <- diff(lat)
if (any(dlat>0) & any(dlat<0) ) { stop("Error while reading lat. Values are both increasing and decreasing.")}
if (all(dlat > 0)) { lat <- rev(lat); set_revlat=T } # go from north to south
nlat <- length(lat)
if (length(dim(lon)) > 1) { stop(" The lon variable has more than one dimension. Only regular grids supported so far.") }
dlon <- diff(lon)
if (any(dlon>0) & any(dlon<0) ) { stop("Error while reading lon. Values are both increasing and decreasing.")}
nlon <- length(lon)
# generate the first rows in Tegen file (lat and lon values)
if ((nlat + nlon) %% 5 != 0) { stop("nlon + nlat not divisible by 5 (the number of columns). Implement a workaround.") }
header <- matrix(c(lat,lon),ncol=5,byrow = T)
write.table(matrix(header,ncol=5),file="header.txt", sep = " ",row.names = F, col.names = F)
# TODO: generalise by using names(nc$var) to identify the variable name
# make function that takes 4 files as argument
nc <- nc_open("NorESM2-MM/od550ss_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550ss"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data1 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550oa_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550oa"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data2 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550bc_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550bc"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data3 <- c(data[,,1])
nc <- nc_open("NorESM2-MM/od550dust_AERmon_NorESM2-MM_historical_r1i1p1f1_gn_198001-198912.nc")
data <- ncvar_get(nc,"od550dust"); nc_close(nc)
if (set_revlat) { data <- data[,nlat:1,] }
data4 <- c(data[,,1])
write.table(file="tmp.NorESM2-MM.m01.txt",x = matrix(c(data1,data2,data3,data4),ncol=12,byrow = T), row.names=F, col.names=F)
system(command = "cat header.txt tmp.NorESM2-MM.m01.txt > NorESM2-MM.m01.txt; rm tmp.NorESM2-MM.m01.txt")
|
#!/usr/bin/Rscript --quiet
args = commandArgs(trailingOnly = T)
if (length(args) == 0) {
stop("Usage: perftest-plot CSV")
}
stats <- read.table(args[1], sep=",", header=F)
x <- stats$V1 / 1000000
png(filename="stats.png", width=1024, height=768)
par(mfrow=c(2,2))
plot(x, type="l", main="Latency (ms)", xlab="MsgSeqNum", ylab="Latency (ms)")
hist(x[x < 1], breaks=100, main="0 ms < Latency (ms) < 1 ms", xlab="Latency (ms)")
hist(x[x > 1 & x < 5], breaks=100, main="1 ms < Latency (ms) < 1 ms", xlab="Latency (ms)")
hist(x[x > 5], breaks=100, main="Latency (ms) > 5 ms", xlab="Latency (ms)")
| /scripts/perftest-plot | no_license | kro/fixengine | R | false | false | 598 | #!/usr/bin/Rscript --quiet
args = commandArgs(trailingOnly = T)
if (length(args) == 0) {
stop("Usage: perftest-plot CSV")
}
stats <- read.table(args[1], sep=",", header=F)
x <- stats$V1 / 1000000
png(filename="stats.png", width=1024, height=768)
par(mfrow=c(2,2))
plot(x, type="l", main="Latency (ms)", xlab="MsgSeqNum", ylab="Latency (ms)")
hist(x[x < 1], breaks=100, main="0 ms < Latency (ms) < 1 ms", xlab="Latency (ms)")
hist(x[x > 1 & x < 5], breaks=100, main="1 ms < Latency (ms) < 1 ms", xlab="Latency (ms)")
hist(x[x > 5], breaks=100, main="Latency (ms) > 5 ms", xlab="Latency (ms)")
| |
##############################################################
# Class Project for Coursera "Getting and Cleaning Data" class
##############################################################
#
# read metadata
#
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c('activity_id', 'activity'))
features <- read.table("UCI HAR Dataset/features.txt", col.names = c('colno', 'feature'))
##############################################################
#
# Step 1: read training data, keeping only columns containing
# mean() and std() in the column name.
#
subj <- read.table("UCI HAR Dataset/train/subject_train.txt",
col.names='subject')
x_full <- read.table("UCI HAR Dataset/train/X_train.txt",
col.names=features$feature)
x_mean <- subset(x_full, select = grepl(".*mean\\(\\).*", features$feature))
x_std <- subset(x_full, select = grepl(".*std\\(\\).*", features$feature))
y <- read.table("UCI HAR Dataset/train/y_train.txt",
col.names='activity_id')
dataset_training <- cbind(subj, x_mean, x_std, y)
##############################################################
#
# Step 2: read test data, keeping only columns containing
# mean() and std() in the column name.
#
subj <- read.table("UCI HAR Dataset/test/subject_test.txt",
col.names='subject')
x_full <- read.table("UCI HAR Dataset/test/X_test.txt",
col.names=features$feature)
x_mean <- subset(x_full, select = grepl(".*mean\\(\\).*", features$feature))
x_std <- subset(x_full, select = grepl(".*std\\(\\).*", features$feature))
y <- read.table("UCI HAR Dataset/test/y_test.txt",
col.names='activity_id')
dataset_testing <- cbind(subj, x_mean, x_std, y)
##############################################################
#
# Step 3: concatenate datasets.
#
dataset <- rbind(dataset_training, dataset_testing)
##############################################################
#
# Step 4: compute average values, grouped by subject and activity id
#
dataset <- aggregate(dataset, by=list(dataset$subject, dataset$activity_id), FUN=mean, simplify=TRUE)
##############################################################
#
# Step 5: now create graphs
#
library(png)
png("BodyAcc.png")
with(dataset, {
boxplot(tBodyAccMag.mean.. ~ activity_id, xlab="Activity", ylab="Acceleration")
legend("topright", legend=c('1 - Walking', '2 - Upstairs', '3 - Downstairs', '4 - Sitting', '5 - Standing', '6 - Resting'))
})
dev.off()
| /smartphones/data-cleaning/run_graph.R | permissive | beargiles/datasciencecoursera | R | false | false | 2,417 | r | ##############################################################
# Class Project for Coursera "Getting and Cleaning Data" class
##############################################################
#
# read metadata
#
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt", col.names=c('activity_id', 'activity'))
features <- read.table("UCI HAR Dataset/features.txt", col.names = c('colno', 'feature'))
##############################################################
#
# Step 1: read training data, keeping only columns containing
# mean() and std() in the column name.
#
subj <- read.table("UCI HAR Dataset/train/subject_train.txt",
col.names='subject')
x_full <- read.table("UCI HAR Dataset/train/X_train.txt",
col.names=features$feature)
x_mean <- subset(x_full, select = grepl(".*mean\\(\\).*", features$feature))
x_std <- subset(x_full, select = grepl(".*std\\(\\).*", features$feature))
y <- read.table("UCI HAR Dataset/train/y_train.txt",
col.names='activity_id')
dataset_training <- cbind(subj, x_mean, x_std, y)
##############################################################
#
# Step 2: read test data, keeping only columns containing
# mean() and std() in the column name.
#
subj <- read.table("UCI HAR Dataset/test/subject_test.txt",
col.names='subject')
x_full <- read.table("UCI HAR Dataset/test/X_test.txt",
col.names=features$feature)
x_mean <- subset(x_full, select = grepl(".*mean\\(\\).*", features$feature))
x_std <- subset(x_full, select = grepl(".*std\\(\\).*", features$feature))
y <- read.table("UCI HAR Dataset/test/y_test.txt",
col.names='activity_id')
dataset_testing <- cbind(subj, x_mean, x_std, y)
##############################################################
#
# Step 3: concatenate datasets.
#
dataset <- rbind(dataset_training, dataset_testing)
##############################################################
#
# Step 4: compute average values, grouped by subject and activity id
#
dataset <- aggregate(dataset, by=list(dataset$subject, dataset$activity_id), FUN=mean, simplify=TRUE)
##############################################################
#
# Step 5: now create graphs
#
library(png)
png("BodyAcc.png")
with(dataset, {
boxplot(tBodyAccMag.mean.. ~ activity_id, xlab="Activity", ylab="Acceleration")
legend("topright", legend=c('1 - Walking', '2 - Upstairs', '3 - Downstairs', '4 - Sitting', '5 - Standing', '6 - Resting'))
})
dev.off()
|
library(gunsales)
### Name: analysis
### Title: Run Statistical Analysis of Monthly Background Checks of Gun
### Purchase
### Aliases: analysis
### ** Examples
## Not run:
##D gs <- analysis()
##D plot_gunsales(gs)
##D ggplot_gunsales(gs)
## End(Not run)
| /data/genthat_extracted_code/gunsales/examples/analysis.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 271 | r | library(gunsales)
### Name: analysis
### Title: Run Statistical Analysis of Monthly Background Checks of Gun
### Purchase
### Aliases: analysis
### ** Examples
## Not run:
##D gs <- analysis()
##D plot_gunsales(gs)
##D ggplot_gunsales(gs)
## End(Not run)
|
## QUANTIFY INCLUDED AND EXCLUDED POPULATION (FIGURE 1)
## Nitrate in drinking water and spontaneous preterm birth
## Author: A. Sherris
## Define inclusion criteria and find number of births included at each step
# raw birth data starts with 6,226,183 births
# see "Data flow chart_for_Allie_20Jun2019.doc" for exclusion criteria applied prior to data extract
births_raw <- read_csv("E:/Projects/WaterExposure/Data/For_allie_CA_yr0011_05JUL2019.csv")
# determine counties in which maternal residences are located
# births outside CA will be excluded
births_counties <- births_raw %>%
select(brthid, lat, long) %>%
# remove invalid lat/long (required to convert to spatial)
filter(!is.na(lat)) %>%
# convert to spatial object
st_as_sf(coords = c("long", "lat"), crs = crs_geo) %>%
st_transform(crs_projected) %>%
# join with county polygon
st_join(ca_counties,
join = st_intersects) %>%
st_drop_geometry() %>%
# a few births are on border of two counties, resulting in duplicates
# remove duplicates
distinct(brthid, .keep_all = T)
# join county assignments to raw data
births_flowchart <- births_raw %>%
left_join(births_counties)
write_output(births_counties, "1_data_clean")
# define starting population
# all singleton births
# joined with county data
# exclude one birth with plural indicated by birth certificate
# starting population value for figure 1 includes births excluded by Wei
pop_start <- nrow(births_flowchart) +
13965 + # invalid sfn
27758 - # excluded by wei because invalid lat/long
1 # plural indicated by birth certificate PLURA == 1
pop1_valid_sfn <- pop1_start - 13965
# births with valid lat/long
# 27258 excluded by Wei
# 3990 additionally excluded
attach(births_flowchart)
pop2_valid_lat <- sum(PLURA == 0 &
!is.na(long)) # 6,222,192
# births with valid geocode to Street or ZIP level
# sufficient for exposure assessment to public water system
pop3_valid_geocode <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City") # 6,221,558
# births in California
pop4_in_CA <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county)) # 6,181,160
# births with valid gestational age and birth weight
pop5_valid_ga_bw <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1) # 5,678,842
# better link between OSHPD and birth certificate
# and bettter match with VSB, PDDI, and PDDM for maternal ICD9
# codes important for Diabetes, HTN, and PTB subtyping
pop6_linked <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1 &
link == 1 &
linkedB %in% c("Y", "M")) # 5,551,051
# spontaneous PTB no complications
pop7_spont_PTB <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1 &
link == 1 &
linkedB %in% c("Y", "M") &
!is.na(prem_5cat_spon_nocomp)) # 4,698,830
# 4,698,830 births in study population
# find number excluded due to no exposure assessment
load("data/processed/maternal_pwsid/births_study_pop_pws_final.RData")
load("data/processed/births_exposure/births_case_control.RData")
n_with_pwsid <- sum(!is.na(births_study_pop_pws_final$pwsid))
n_with_exposure <- nrow(births_case_control)
## find number of siblings
load("data/processed/births_exposure/sibs_ipi.RData")
load("data/processed/births_exposure/sibs_consecutive.RData")
sibs_all <- births_case_control %>%
filter(mat_id %in% mat_id[which(duplicated(mat_id))])
n_sibs <- nrow(sibs_all)
n_good_ipi <- n_sibs - sum(siblings_ipi$ipi_days<36, na.rm=T)
n_consecutive_sibs <- nrow(sibs_consecutive)
# combine and save
figure1_data <- inclusion_table %>%
bind_rows(tibble(criteria = c("start", "valid_sfn",
"valid_lat", "valid_geocode",
"pop_in_CA", "valid_ga_bw",
"linked", "spont_PTB",
"with pwsid", "with exposure",
"all sibs", "good ipi",
"conscutive sibs"),
included = c(pop_start, pop1_valid_sfn,
pop2_valid_lat, pop3_valid_geocode,
pop4_in_CA, pop5_valid_ga_bw,
pop6_linked, pop7_spont_PTB,
n_with_pwsid, n_with_exposure,
n_sibs, n_good_ipi,
n_consecutive_sibs
))) %>%
mutate(excluded = included - lag(included))
# save figure data
write_output(figure1_data, "output/visualizations/1_manuscript/")
| /code/5_visualization/1_manuscript/4_Fig1_inclusion_flowchart.R | permissive | chrisleboa/nitrate-and-preterm-birth | R | false | false | 5,652 | r | ## QUANTIFY INCLUDED AND EXCLUDED POPULATION (FIGURE 1)
## Nitrate in drinking water and spontaneous preterm birth
## Author: A. Sherris
## Define inclusion criteria and find number of births included at each step
# raw birth data starts with 6,226,183 births
# see "Data flow chart_for_Allie_20Jun2019.doc" for exclusion criteria applied prior to data extract
births_raw <- read_csv("E:/Projects/WaterExposure/Data/For_allie_CA_yr0011_05JUL2019.csv")
# determine counties in which maternal residences are located
# births outside CA will be excluded
births_counties <- births_raw %>%
select(brthid, lat, long) %>%
# remove invalid lat/long (required to convert to spatial)
filter(!is.na(lat)) %>%
# convert to spatial object
st_as_sf(coords = c("long", "lat"), crs = crs_geo) %>%
st_transform(crs_projected) %>%
# join with county polygon
st_join(ca_counties,
join = st_intersects) %>%
st_drop_geometry() %>%
# a few births are on border of two counties, resulting in duplicates
# remove duplicates
distinct(brthid, .keep_all = T)
# join county assignments to raw data
births_flowchart <- births_raw %>%
left_join(births_counties)
write_output(births_counties, "1_data_clean")
# define starting population
# all singleton births
# joined with county data
# exclude one birth with plural indicated by birth certificate
# starting population value for figure 1 includes births excluded by Wei
pop_start <- nrow(births_flowchart) +
13965 + # invalid sfn
27758 - # excluded by wei because invalid lat/long
1 # plural indicated by birth certificate PLURA == 1
pop1_valid_sfn <- pop1_start - 13965
# births with valid lat/long
# 27258 excluded by Wei
# 3990 additionally excluded
attach(births_flowchart)
pop2_valid_lat <- sum(PLURA == 0 &
!is.na(long)) # 6,222,192
# births with valid geocode to Street or ZIP level
# sufficient for exposure assessment to public water system
pop3_valid_geocode <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City") # 6,221,558
# births in California
pop4_in_CA <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county)) # 6,181,160
# births with valid gestational age and birth weight
pop5_valid_ga_bw <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1) # 5,678,842
# better link between OSHPD and birth certificate
# and bettter match with VSB, PDDI, and PDDM for maternal ICD9
# codes important for Diabetes, HTN, and PTB subtyping
pop6_linked <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1 &
link == 1 &
linkedB %in% c("Y", "M")) # 5,551,051
# spontaneous PTB no complications
pop7_spont_PTB <- sum(PLURA == 0 &
!is.na(long) &
MATCHED != "City" &
!is.na(county) &
Final_gestwk_valid == 1 &
bwt_range == 1 &
link == 1 &
linkedB %in% c("Y", "M") &
!is.na(prem_5cat_spon_nocomp)) # 4,698,830
# 4,698,830 births in study population
# find number excluded due to no exposure assessment
load("data/processed/maternal_pwsid/births_study_pop_pws_final.RData")
load("data/processed/births_exposure/births_case_control.RData")
n_with_pwsid <- sum(!is.na(births_study_pop_pws_final$pwsid))
n_with_exposure <- nrow(births_case_control)
## find number of siblings
load("data/processed/births_exposure/sibs_ipi.RData")
load("data/processed/births_exposure/sibs_consecutive.RData")
sibs_all <- births_case_control %>%
filter(mat_id %in% mat_id[which(duplicated(mat_id))])
n_sibs <- nrow(sibs_all)
n_good_ipi <- n_sibs - sum(siblings_ipi$ipi_days<36, na.rm=T)
n_consecutive_sibs <- nrow(sibs_consecutive)
# combine and save
figure1_data <- inclusion_table %>%
bind_rows(tibble(criteria = c("start", "valid_sfn",
"valid_lat", "valid_geocode",
"pop_in_CA", "valid_ga_bw",
"linked", "spont_PTB",
"with pwsid", "with exposure",
"all sibs", "good ipi",
"conscutive sibs"),
included = c(pop_start, pop1_valid_sfn,
pop2_valid_lat, pop3_valid_geocode,
pop4_in_CA, pop5_valid_ga_bw,
pop6_linked, pop7_spont_PTB,
n_with_pwsid, n_with_exposure,
n_sibs, n_good_ipi,
n_consecutive_sibs
))) %>%
mutate(excluded = included - lag(included))
# save figure data
write_output(figure1_data, "output/visualizations/1_manuscript/")
|
\name{HistData-package}
\Rdversion{1.1}
\alias{HistData-package}
\alias{HistData}
\docType{package}
\title{
Data sets from the History of Statistics and Data Visualization}
\description{
The HistData package provides a collection of data sets
that are interesting and important in the history of statistics and data visualization.
The goal of the package is to make these available, both for instructional use
and for historical research.
}
\details{
\tabular{ll}{
Package: \tab HistData\cr
Type: \tab Package\cr
Version: \tab 0.8-9\cr
Date: \tab 2021-10-23\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
LazyData: \tab yes\cr
}
Some of the data sets have examples which reproduce an historical graph or analysis.
These are meant mainly as starters for more extensive re-analysis or graphical
elaboration. Some of these present graphical challenges to reproduce in R.
They are part of a program of research called \emph{statistical historiography},
meaning the use of statistical methods to study problems and questions in the
history of statistics and graphics.
Descriptions of each DataSet can be found using \code{help(DataSet)};
\code{example(DataSet)} will likely show applications similar to the
historical use.
Data sets included in the HistData package are:
\describe{
\item{\code{\link{Arbuthnot}}}{Arbuthnot's data on male and female birth ratios in London from 1629-1710}
\item{\code{\link{Armada}}}{The Spanish Armada}
\item{\code{\link{Bowley}}}{Bowley's data on values of British and Irish trade, 1855-1899}
\item{\code{\link{Breslau}}}{Halley's Breslau Life Table}
\item{\code{\link{Cavendish}}}{Cavendish's 1798 determinations of the density of the earth}
\item{\code{\link{ChestSizes}}}{Quetelet's data on chest measurements of Scottish militiamen}
\item{\code{\link{Cholera}}}{William Farr's Data on Cholera in London, 1849}
\item{\code{\link{CushnyPeebles}}}{Cushny-Peebles data: Soporific effects of scopolamine derivatives}
\item{\code{\link{Dactyl}}}{Edgeworth's counts of dactyls in Virgil's Aeneid}
\item{\code{\link{DrinksWages}}}{Elderton and Pearson's (1910) data on drinking and wages}
\item{\code{\link{Fingerprints}}}{Waite's data on Patterns in Fingerprints}
\item{\code{\link{Galton}}}{Galton's data on the heights of parents and their children}
\item{\code{\link{GaltonFamilies}}}{Galton's data on the heights of parents and their children, by family}
\item{\code{\link{Guerry}}}{Data from A.-M. Guerry, "Essay on the Moral Statistics of France"}
\item{\code{\link{HalleyLifeTable}}}{Halley's Life Table}
\item{\code{\link{Jevons}}}{W. Stanley Jevons' data on numerical discrimination}
\item{\code{\link{Langren}}}{van Langren's data on longitude distance between Toledo and Rome}
\item{\code{\link{Macdonell}}}{Macdonell's data on height and finger length of criminals, used by Gosset (1908)}
\item{\code{\link{Mayer}}}{Mayer's data on the libration of the moon}
\item{\code{\link{Michelson}}}{Michelson's 1879 determinations of the velocity of light}
\item{\code{\link{Minard}}}{Data from Minard's famous graphic map of Napoleon's march on Moscow}
\item{\code{\link{Nightingale}}}{Florence Nightingale's data on deaths from various causes in the Crimean War}
\item{\code{\link{OldMaps}}}{Latitudes and Longitudes of 39 Points in 11 Old Maps}
\item{\code{\link{PearsonLee}}}{Pearson and Lee's 1896 data on the heights of parents and children classified by gender}
\item{\code{\link{PolioTrials}}}{Polio Field Trials Data on the Salk vaccine}
\item{\code{\link{Prostitutes}}}{Parent-Duchatelet's time-series data on the number of prostitutes in Paris}
\item{\code{\link{Pyx}}}{Trial of the Pyx}
\item{\code{\link{Quarrels}}}{Statistics of Deadly Quarrels}
\item{\code{\link{Saturn}}}{Laplace's Saturn data}
\item{\code{\link{Snow}}}{John Snow's map and data on the 1854 London Cholera outbreak}
\item{\code{\link{Virginis}}}{J. F. W. Herschel's data on the orbit of the twin star gamma Virginis}
\item{\code{\link{Wheat}}}{Playfair's data on wages and the price of wheat}
\item{\code{\link{Yeast}}}{Student's (1906) Yeast Cell Counts}
\item{\code{\link{ZeaMays}}}{Darwin's Heights of Cross- and Self-fertilized Zea May Pairs}
}
}
\author{
Michael Friendly
Maintainer: Michael Friendly <friendly@yorku.ca>
}
\references{
Friendly, M. (2007). A Brief History of Data Visualization.
In Chen, C., Hardle, W. & Unwin, A. (eds.)
\emph{Handbook of Computational Statistics: Data Visualization}, Springer-Verlag, III, Ch. 1, 1-34.
Friendly, M. & Denis, D. (2001).
Milestones in the history of thematic cartography, statistical graphics, and data visualization.
\url{http://datavis.ca/milestones/}
Friendly, M. & Denis, D. (2005). The early origins and development of the scatterplot.
\emph{Journal of the History of the Behavioral Sciences},
41, 103-130.
Friendly, M. & Sigal, M. & Harnanansingh, D. (2016).
"The Milestones Project: A Database for the History of Data Visualization,"
In Kostelnick, C. & Kimball, M. (ed.),
\emph{Visible Numbers: The History of Data Visualization}, Ashgate Press, Chapter 10.
Friendly, M. & Wainer, H. (in progress). \emph{The Origin of Graphical Species}.
Harvard University Press.
}
\seealso{
\code{\link{Arbuthnot}}, \code{\link{Armada}}, \code{\link{Bowley}},
\code{\link{Cavendish}}, \code{\link{ChestSizes}}, \code{\link{Cholera}}, \code{\link{CushnyPeebles}},
\code{\link{Dactyl}}, \code{\link{DrinksWages}}, \code{\link{Fingerprints}},
\code{\link{Galton}}, \code{\link{GaltonFamilies}}, \code{\link{Guerry}},
\code{\link{HalleyLifeTable}},
\code{\link{Jevons}}, \code{\link{Langren}},
\code{\link{Macdonell}}, \code{\link{Michelson}}, \code{\link{Minard}},
\code{\link{Nightingale}},
\code{\link{OldMaps}}, \code{\link{PearsonLee}},
\code{\link{PolioTrials}}, \code{\link{Prostitutes}}, \code{\link{Pyx}},
\code{\link{Quarrels}},
\code{\link{Snow}},
\code{\link{Wheat}},
\code{\link{Yeast}},
\code{\link{ZeaMays}}
Other packages containing data sets of historical interest include:
The \code{\link[Guerry]{Guerry-package}}, containing maps and
other data sets related to Guerry's (1833) \emph{Moral Statistics of France}.
\code{morsecodes} from the (defunct) \pkg{xgobi} package
for data from Rothkopf (1957) on errors in learning morse code, a classical
example for MDS.
The \pkg{psych} package, containing Galton's \code{peas} data.
% \code{\link[psych]{peas}} data.
The same data set is contained in \pkg{alr4} as \code{\link[alr4]{galtonpeas}}.
The \pkg{agridat} contains a large number of data sets of agricultural data,
including some extra data sets related to
the classical barley data
(\code{\link[MASS]{immer}} and \code{\link[lattice]{barley}})
from Immer (1934):
\code{\link[agridat]{minnesota.barley.yield}},
\code{\link[agridat]{minnesota.barley.weather}}.
}
\keyword{ package }
\examples{
# see examples for the separate data sets
}
| /man/HistData-package.Rd | no_license | spiritu-santi/HistData | R | false | false | 7,074 | rd | \name{HistData-package}
\Rdversion{1.1}
\alias{HistData-package}
\alias{HistData}
\docType{package}
\title{
Data sets from the History of Statistics and Data Visualization}
\description{
The HistData package provides a collection of data sets
that are interesting and important in the history of statistics and data visualization.
The goal of the package is to make these available, both for instructional use
and for historical research.
}
\details{
\tabular{ll}{
Package: \tab HistData\cr
Type: \tab Package\cr
Version: \tab 0.8-9\cr
Date: \tab 2021-10-23\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
LazyData: \tab yes\cr
}
Some of the data sets have examples which reproduce an historical graph or analysis.
These are meant mainly as starters for more extensive re-analysis or graphical
elaboration. Some of these present graphical challenges to reproduce in R.
They are part of a program of research called \emph{statistical historiography},
meaning the use of statistical methods to study problems and questions in the
history of statistics and graphics.
Descriptions of each DataSet can be found using \code{help(DataSet)};
\code{example(DataSet)} will likely show applications similar to the
historical use.
Data sets included in the HistData package are:
\describe{
\item{\code{\link{Arbuthnot}}}{Arbuthnot's data on male and female birth ratios in London from 1629-1710}
\item{\code{\link{Armada}}}{The Spanish Armada}
\item{\code{\link{Bowley}}}{Bowley's data on values of British and Irish trade, 1855-1899}
\item{\code{\link{Breslau}}}{Halley's Breslau Life Table}
\item{\code{\link{Cavendish}}}{Cavendish's 1798 determinations of the density of the earth}
\item{\code{\link{ChestSizes}}}{Quetelet's data on chest measurements of Scottish militiamen}
\item{\code{\link{Cholera}}}{William Farr's Data on Cholera in London, 1849}
\item{\code{\link{CushnyPeebles}}}{Cushny-Peebles data: Soporific effects of scopolamine derivatives}
\item{\code{\link{Dactyl}}}{Edgeworth's counts of dactyls in Virgil's Aeneid}
\item{\code{\link{DrinksWages}}}{Elderton and Pearson's (1910) data on drinking and wages}
\item{\code{\link{Fingerprints}}}{Waite's data on Patterns in Fingerprints}
\item{\code{\link{Galton}}}{Galton's data on the heights of parents and their children}
\item{\code{\link{GaltonFamilies}}}{Galton's data on the heights of parents and their children, by family}
\item{\code{\link{Guerry}}}{Data from A.-M. Guerry, "Essay on the Moral Statistics of France"}
\item{\code{\link{HalleyLifeTable}}}{Halley's Life Table}
\item{\code{\link{Jevons}}}{W. Stanley Jevons' data on numerical discrimination}
\item{\code{\link{Langren}}}{van Langren's data on longitude distance between Toledo and Rome}
\item{\code{\link{Macdonell}}}{Macdonell's data on height and finger length of criminals, used by Gosset (1908)}
\item{\code{\link{Mayer}}}{Mayer's data on the libration of the moon}
\item{\code{\link{Michelson}}}{Michelson's 1879 determinations of the velocity of light}
\item{\code{\link{Minard}}}{Data from Minard's famous graphic map of Napoleon's march on Moscow}
\item{\code{\link{Nightingale}}}{Florence Nightingale's data on deaths from various causes in the Crimean War}
\item{\code{\link{OldMaps}}}{Latitudes and Longitudes of 39 Points in 11 Old Maps}
\item{\code{\link{PearsonLee}}}{Pearson and Lee's 1896 data on the heights of parents and children classified by gender}
\item{\code{\link{PolioTrials}}}{Polio Field Trials Data on the Salk vaccine}
\item{\code{\link{Prostitutes}}}{Parent-Duchatelet's time-series data on the number of prostitutes in Paris}
\item{\code{\link{Pyx}}}{Trial of the Pyx}
\item{\code{\link{Quarrels}}}{Statistics of Deadly Quarrels}
\item{\code{\link{Saturn}}}{Laplace's Saturn data}
\item{\code{\link{Snow}}}{John Snow's map and data on the 1854 London Cholera outbreak}
\item{\code{\link{Virginis}}}{J. F. W. Herschel's data on the orbit of the twin star gamma Virginis}
\item{\code{\link{Wheat}}}{Playfair's data on wages and the price of wheat}
\item{\code{\link{Yeast}}}{Student's (1906) Yeast Cell Counts}
\item{\code{\link{ZeaMays}}}{Darwin's Heights of Cross- and Self-fertilized Zea May Pairs}
}
}
\author{
Michael Friendly
Maintainer: Michael Friendly <friendly@yorku.ca>
}
\references{
Friendly, M. (2007). A Brief History of Data Visualization.
In Chen, C., Hardle, W. & Unwin, A. (eds.)
\emph{Handbook of Computational Statistics: Data Visualization}, Springer-Verlag, III, Ch. 1, 1-34.
Friendly, M. & Denis, D. (2001).
Milestones in the history of thematic cartography, statistical graphics, and data visualization.
\url{http://datavis.ca/milestones/}
Friendly, M. & Denis, D. (2005). The early origins and development of the scatterplot.
\emph{Journal of the History of the Behavioral Sciences},
41, 103-130.
Friendly, M. & Sigal, M. & Harnanansingh, D. (2016).
"The Milestones Project: A Database for the History of Data Visualization,"
In Kostelnick, C. & Kimball, M. (ed.),
\emph{Visible Numbers: The History of Data Visualization}, Ashgate Press, Chapter 10.
Friendly, M. & Wainer, H. (in progress). \emph{The Origin of Graphical Species}.
Harvard University Press.
}
\seealso{
\code{\link{Arbuthnot}}, \code{\link{Armada}}, \code{\link{Bowley}},
\code{\link{Cavendish}}, \code{\link{ChestSizes}}, \code{\link{Cholera}}, \code{\link{CushnyPeebles}},
\code{\link{Dactyl}}, \code{\link{DrinksWages}}, \code{\link{Fingerprints}},
\code{\link{Galton}}, \code{\link{GaltonFamilies}}, \code{\link{Guerry}},
\code{\link{HalleyLifeTable}},
\code{\link{Jevons}}, \code{\link{Langren}},
\code{\link{Macdonell}}, \code{\link{Michelson}}, \code{\link{Minard}},
\code{\link{Nightingale}},
\code{\link{OldMaps}}, \code{\link{PearsonLee}},
\code{\link{PolioTrials}}, \code{\link{Prostitutes}}, \code{\link{Pyx}},
\code{\link{Quarrels}},
\code{\link{Snow}},
\code{\link{Wheat}},
\code{\link{Yeast}},
\code{\link{ZeaMays}}
Other packages containing data sets of historical interest include:
The \code{\link[Guerry]{Guerry-package}}, containing maps and
other data sets related to Guerry's (1833) \emph{Moral Statistics of France}.
\code{morsecodes} from the (defunct) \pkg{xgobi} package
for data from Rothkopf (1957) on errors in learning morse code, a classical
example for MDS.
The \pkg{psych} package, containing Galton's \code{peas} data.
% \code{\link[psych]{peas}} data.
The same data set is contained in \pkg{alr4} as \code{\link[alr4]{galtonpeas}}.
The \pkg{agridat} contains a large number of data sets of agricultural data,
including some extra data sets related to
the classical barley data
(\code{\link[MASS]{immer}} and \code{\link[lattice]{barley}})
from Immer (1934):
\code{\link[agridat]{minnesota.barley.yield}},
\code{\link[agridat]{minnesota.barley.weather}}.
}
\keyword{ package }
\examples{
# see examples for the separate data sets
}
|
###############
#
# Programming assignment 3
# Part 2
# Marquin Smith
#
##############
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = 'character')
## Check that state and outcome are valid
valid_state <- FALSE
for (i in unique(data$State)){
if(i == state){
valid_state <- TRUE
}
}
outcome_list <- as.list(c("heart attack", "heart failure", "pneumonia"))
valid_outcome <- FALSE
for (i in outcome_list){
if(i == outcome){
valid_outcome <- TRUE
}
}
if (valid_state == FALSE){
stop("invalid state")
}
if(valid_outcome == FALSE){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
if(outcome == "heart attack"){
my_column <- 11
}
if(outcome == "heart failure"){
my_column <- 17
}
if(outcome == "pneumonia"){
my_column <- 23
}
data[,my_column] <- as.numeric(data[,my_column])
subset1 <- data[data$State == state, c(2, my_column)]
subset1 <- subset1[complete.cases(subset1),]
#subset1 <- subset1[order(subset1[,2]),]
subset1 <- subset1[order(subset1[,2], subset1[,1]),]
if(num == 'best'){
my_rank <- 1
} else if (num == 'worst'){
my_rank <- nrow(subset1)
} else {
my_rank <- num
}
subset1$Hospital.Name[my_rank]
} | /R programming/Week 4/Week 4 Programming assignment/rprog_data_ProgAssignment3-data/rankhospital.R | no_license | quincysmiith/datasciencecoursera | R | false | false | 1,552 | r | ###############
#
# Programming assignment 3
# Part 2
# Marquin Smith
#
##############
rankhospital <- function(state, outcome, num = "best") {
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = 'character')
## Check that state and outcome are valid
valid_state <- FALSE
for (i in unique(data$State)){
if(i == state){
valid_state <- TRUE
}
}
outcome_list <- as.list(c("heart attack", "heart failure", "pneumonia"))
valid_outcome <- FALSE
for (i in outcome_list){
if(i == outcome){
valid_outcome <- TRUE
}
}
if (valid_state == FALSE){
stop("invalid state")
}
if(valid_outcome == FALSE){
stop("invalid outcome")
}
## Return hospital name in that state with the given rank
## 30-day death rate
if(outcome == "heart attack"){
my_column <- 11
}
if(outcome == "heart failure"){
my_column <- 17
}
if(outcome == "pneumonia"){
my_column <- 23
}
data[,my_column] <- as.numeric(data[,my_column])
subset1 <- data[data$State == state, c(2, my_column)]
subset1 <- subset1[complete.cases(subset1),]
#subset1 <- subset1[order(subset1[,2]),]
subset1 <- subset1[order(subset1[,2], subset1[,1]),]
if(num == 'best'){
my_rank <- 1
} else if (num == 'worst'){
my_rank <- nrow(subset1)
} else {
my_rank <- num
}
subset1$Hospital.Name[my_rank]
} |
setwd("~/Documents/OF_scaling")
#import and order data
OF = read.csv("OF.csv", header=T)
OF = OF[order(-OF$POPULATION),]
####### Generate the python commands to run Leitao et al. (2016) maximum likelihood estimations
t = dim(OF)[1]
listN = c(10, seq(30, t, 25)) #subset cities by batch of 25
l = 0
cm = data.frame()
for (n in listN){
o = OF[1:n,c("IDAIRE","POPULATION", "JOBS", "STOCKFDI", "FLUXFDI")]
rownames(o) = NULL
assign(paste0("OF", n), o)
for (v in c("JOBS", "STOCKFDI", "FLUXFDI")){
ov = o[,c("IDAIRE","POPULATION",v)]
write.table(x=ov, file=paste0("leitao/general/OF", n, v, ".txt"), sep="\t",row.names = F, quote = F)
for (m in c("ConstrainedDAnalysis", "ConstrainedDFixedBetaAnalysis")){
l = l + 1
cm[l, 1]= paste0("python analysis_run2.py 512 general ", m, " ", v, " POPULATION OF", n, v, ".txt")
}
}
write.csv(x=cm, file=paste0("leitao/commands.csv"), sep="\t",row.names = F, quote = F)
}
### Hurdle Models
# from http://data.library.virginia.edu/getting-started-with-hurdle-models/
# Ref on zero-inflated models:
# Cameron AC, Trivedi PK (2013). Regression Analysis of Count Data. Cambridge University Press, Cambridge.
# Kleiber C, Zeileis A (2008). Applied Econometrics with R. Springer-Verlag, New York. ISBN 978-0-387-77316-2.
# Zeileis A, Kleiber C, Jackman S (2008). “Regression Models for Count Data in R”. Journal of Statistical Software, 27(8). URL https://www.jstatsoft.org/article/view/v027i08.
require("AER")
require("pscl")
l = 0
tab = data.frame()
for (n in listN){
o = OF[1:n,c("IDAIRE","POPULATION", "JOBS", "STOCKFDI", "FLUXFDI")]
rownames(o) = NULL
assign(paste0("OF", n), o)
for (v in c("JOBS", "STOCKFDI", "FLUXFDI")){
df = o[,c("POPULATION",v)]
colnames(df) = c("Pop", "Y")
# simple fit with poisson
l = l+1
mod1 <- glm(Y ~ log(Pop), data = df, family = "poisson")
tab[l,"model"] = "Poisson"
tab[l,"y"] = v
tab[l,"x"] = "POPULATION"
tab[l,"alpha"] = summary(mod1)$coefficients[1,1]
tab[l,"beta"] = summary(mod1)$coefficients[2,1]
tab[l,"beta_error"] = summary(mod1)$coefficients[2,2]
tab[l,"pval"] = summary(mod1)$coefficients[2,4]
tab[l,"BIC"] = BIC(mod1)
tab[l,"file"] = paste0("OF", n)
if(min(df$Y) == 0){
l = l+1
mod.hurdle <- hurdle(Y ~ log(Pop), data = df, dist = "poisson", zero.dist = "binomial")
tab[l,"model"] = "zeroInflatedHurdle"
tab[l,"y"] = v
tab[l,"x"] = "POPULATION"
tab[l,"alpha"] = summary(mod.hurdle)$coefficients[[1]][1,1]
tab[l,"beta"] = summary(mod.hurdle)$coefficients[[1]][2,1]
tab[l,"beta_error"] = summary(mod.hurdle)$coefficients[[1]][2,2]
tab[l,"pval"] = summary(mod.hurdle)$coefficients[[1]][2,4]
tab[l,"alpha_zero"] = summary(mod.hurdle)$coefficients[[2]][1,1]
tab[l,"beta_zero"] = summary(mod.hurdle)$coefficients[[2]][2,1]
tab[l,"beta_error_zero"] = summary(mod.hurdle)$coefficients[[2]][2,2]
tab[l,"pval_zero"] = summary(mod.hurdle)$coefficients[[2]][2,4]
tab[l,"BIC"] = (-2* logLik(mod.hurdle)) + (4*log(n))
tab[l,"file"] = paste0("OF", n)
}
}
}
write.csv(x=tab, file=paste0("results_hurdle_poisson.csv"))
| /Scaling_MaxLikelihood_Hurdle_Poisson_Models.R | no_license | ClementineCttn/Scaling_MaxLikelihood_Hurdle_Poisson_Models | R | false | false | 3,246 | r | setwd("~/Documents/OF_scaling")
#import and order data
OF = read.csv("OF.csv", header=T)
OF = OF[order(-OF$POPULATION),]
####### Generate the python commands to run Leitao et al. (2016) maximum likelihood estimations
t = dim(OF)[1]
listN = c(10, seq(30, t, 25)) #subset cities by batch of 25
l = 0
cm = data.frame()
for (n in listN){
o = OF[1:n,c("IDAIRE","POPULATION", "JOBS", "STOCKFDI", "FLUXFDI")]
rownames(o) = NULL
assign(paste0("OF", n), o)
for (v in c("JOBS", "STOCKFDI", "FLUXFDI")){
ov = o[,c("IDAIRE","POPULATION",v)]
write.table(x=ov, file=paste0("leitao/general/OF", n, v, ".txt"), sep="\t",row.names = F, quote = F)
for (m in c("ConstrainedDAnalysis", "ConstrainedDFixedBetaAnalysis")){
l = l + 1
cm[l, 1]= paste0("python analysis_run2.py 512 general ", m, " ", v, " POPULATION OF", n, v, ".txt")
}
}
write.csv(x=cm, file=paste0("leitao/commands.csv"), sep="\t",row.names = F, quote = F)
}
### Hurdle Models
# from http://data.library.virginia.edu/getting-started-with-hurdle-models/
# Ref on zero-inflated models:
# Cameron AC, Trivedi PK (2013). Regression Analysis of Count Data. Cambridge University Press, Cambridge.
# Kleiber C, Zeileis A (2008). Applied Econometrics with R. Springer-Verlag, New York. ISBN 978-0-387-77316-2.
# Zeileis A, Kleiber C, Jackman S (2008). “Regression Models for Count Data in R”. Journal of Statistical Software, 27(8). URL https://www.jstatsoft.org/article/view/v027i08.
require("AER")
require("pscl")
l = 0
tab = data.frame()
for (n in listN){
o = OF[1:n,c("IDAIRE","POPULATION", "JOBS", "STOCKFDI", "FLUXFDI")]
rownames(o) = NULL
assign(paste0("OF", n), o)
for (v in c("JOBS", "STOCKFDI", "FLUXFDI")){
df = o[,c("POPULATION",v)]
colnames(df) = c("Pop", "Y")
# simple fit with poisson
l = l+1
mod1 <- glm(Y ~ log(Pop), data = df, family = "poisson")
tab[l,"model"] = "Poisson"
tab[l,"y"] = v
tab[l,"x"] = "POPULATION"
tab[l,"alpha"] = summary(mod1)$coefficients[1,1]
tab[l,"beta"] = summary(mod1)$coefficients[2,1]
tab[l,"beta_error"] = summary(mod1)$coefficients[2,2]
tab[l,"pval"] = summary(mod1)$coefficients[2,4]
tab[l,"BIC"] = BIC(mod1)
tab[l,"file"] = paste0("OF", n)
if(min(df$Y) == 0){
l = l+1
mod.hurdle <- hurdle(Y ~ log(Pop), data = df, dist = "poisson", zero.dist = "binomial")
tab[l,"model"] = "zeroInflatedHurdle"
tab[l,"y"] = v
tab[l,"x"] = "POPULATION"
tab[l,"alpha"] = summary(mod.hurdle)$coefficients[[1]][1,1]
tab[l,"beta"] = summary(mod.hurdle)$coefficients[[1]][2,1]
tab[l,"beta_error"] = summary(mod.hurdle)$coefficients[[1]][2,2]
tab[l,"pval"] = summary(mod.hurdle)$coefficients[[1]][2,4]
tab[l,"alpha_zero"] = summary(mod.hurdle)$coefficients[[2]][1,1]
tab[l,"beta_zero"] = summary(mod.hurdle)$coefficients[[2]][2,1]
tab[l,"beta_error_zero"] = summary(mod.hurdle)$coefficients[[2]][2,2]
tab[l,"pval_zero"] = summary(mod.hurdle)$coefficients[[2]][2,4]
tab[l,"BIC"] = (-2* logLik(mod.hurdle)) + (4*log(n))
tab[l,"file"] = paste0("OF", n)
}
}
}
write.csv(x=tab, file=paste0("results_hurdle_poisson.csv"))
|
#### Install the neccessary packages if not available yet
if (("prophet" %in% rownames(installed.packages())) == FALSE) install.packages("prophet")
if (("forecast" %in% rownames(installed.packages())) == FALSE) install.packages("forecast")
if (("lubridate" %in% rownames(installed.packages())) == FALSE) install.packages("lubridate")
suppressMessages(library(prophet))
suppressMessages(library(forecast))
############################################## INPUT #################################################
#### We expect the following variables passed in from the master script
# brand_name - the name of the brand
# brand_data - historical data frame with two columns (ds, y) for date and the forecasted metric
# metric - the name of the forecasted metric
# actual_end - the last date of the historical data
# fc_periods - the numbers of days we want to forecast ahead
# predicts - the data frame that collects the forecast results from the forecast scripts for each
# brand.
######################################## GET TRAINING DATA ############################################
train.raw <- brand_data[brand_data$ds <= actual_end,]
#--- Remove outliers in the training data using the tsclean function in the package "forecast"
# The outliers are identified, and replaced by estimates that are the sum of the linear trend
# on the whole training data and the seasonal components.
train.ts <- ts(train.raw$y, freq = 365)
train.ts.cleaned <- tsclean(train.ts, replace.missing = TRUE, lambda = NULL)
# create training data after removing outliers
train <- data.frame(ds = train.raw$ds
, y = as.numeric(train.ts.cleaned))
############################### BUILD THE HOLIDAY DATA FRAME - IF ANY #################################
holidays <- NULL
########################################### MODELLING #################################################
fit.prophet <- prophet(train
, holidays = holidays
, growth = 'linear'
, yearly.seasonality = FALSE
)
########################################## PREDICTING #################################################
future <- make_future_dataframe(fit.prophet, fc_periods)
forecast <- predict(fit.prophet, future)
############################ PASSING RESULTS BACK TO MASTER SCRIPT ####################################
#### Put the predicts (both historical and forecast periods) into the data frame "predicts"
brand_predicts <- data.frame(brand = brand_name
, activity_date = as.Date(forecast$ds)
, metric = metric
, pred = forecast$yhat
, stringsAsFactors = FALSE)
predicts <- rbind(predicts, brand_predicts)
# The double arrow is to pass the local predict values to the global predict values
# in the master script
predicts <<- predicts | /All Brand Daily Forecast_R Server/R Scripts/Brand Forecasts/InStyle_Visits_Daily_Prophet.R | no_license | saravanannagappan/All-Brand-Daily-Forecast_R-Server | R | false | false | 3,004 | r | #### Install the neccessary packages if not available yet
if (("prophet" %in% rownames(installed.packages())) == FALSE) install.packages("prophet")
if (("forecast" %in% rownames(installed.packages())) == FALSE) install.packages("forecast")
if (("lubridate" %in% rownames(installed.packages())) == FALSE) install.packages("lubridate")
suppressMessages(library(prophet))
suppressMessages(library(forecast))
############################################## INPUT #################################################
#### We expect the following variables passed in from the master script
# brand_name - the name of the brand
# brand_data - historical data frame with two columns (ds, y) for date and the forecasted metric
# metric - the name of the forecasted metric
# actual_end - the last date of the historical data
# fc_periods - the numbers of days we want to forecast ahead
# predicts - the data frame that collects the forecast results from the forecast scripts for each
# brand.
######################################## GET TRAINING DATA ############################################
train.raw <- brand_data[brand_data$ds <= actual_end,]
#--- Remove outliers in the training data using the tsclean function in the package "forecast"
# The outliers are identified, and replaced by estimates that are the sum of the linear trend
# on the whole training data and the seasonal components.
train.ts <- ts(train.raw$y, freq = 365)
train.ts.cleaned <- tsclean(train.ts, replace.missing = TRUE, lambda = NULL)
# create training data after removing outliers
train <- data.frame(ds = train.raw$ds
, y = as.numeric(train.ts.cleaned))
############################### BUILD THE HOLIDAY DATA FRAME - IF ANY #################################
holidays <- NULL
########################################### MODELLING #################################################
fit.prophet <- prophet(train
, holidays = holidays
, growth = 'linear'
, yearly.seasonality = FALSE
)
########################################## PREDICTING #################################################
future <- make_future_dataframe(fit.prophet, fc_periods)
forecast <- predict(fit.prophet, future)
############################ PASSING RESULTS BACK TO MASTER SCRIPT ####################################
#### Put the predicts (both historical and forecast periods) into the data frame "predicts"
brand_predicts <- data.frame(brand = brand_name
, activity_date = as.Date(forecast$ds)
, metric = metric
, pred = forecast$yhat
, stringsAsFactors = FALSE)
predicts <- rbind(predicts, brand_predicts)
# The double arrow is to pass the local predict values to the global predict values
# in the master script
predicts <<- predicts |
### START
# recall iris data
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
par(mfrow=c(1,1))
prc<-prcomp(iris[,1:4], scale=T)
# can distinguish 2 groups without class knowledge
pairs(prc$x)
pairs(prc$x,col=my.pch)
par(mfrow=c(1,1))
biplot(prc,scale=F)
prc$rotation
plot(prc$x[,1],prc$x[,2])
lines(c(0,prc$rotation[2,1]),c(0,prc$rotation[2,2]),col='green',lty=1,type='l',lwd=3)
### gene expression data
GSE10245.data = read.table(file='GSE10245.csv',header=T,sep=',')
gem1 = GSE10245.data[,2:51]
gr = GSE10245.data[,1]
prc = prcomp(gem1)
plot(prc)
### loadings can give structure to the clustering
par(mfrow=c(2,2))
biplot(prc,choices=c(1,2))
biplot(prc,choices=c(1,3))
biplot(prc,choices=c(1,4))
biplot(prc,choices=c(2,3))
pairs(prc$x[,1:4])
pairs(prc$x[,1:4],col=my.pch)
###
############ Hierarchical Clustering
hfit = hclust(dist(iris[,1:4]))
plot(hfit)
plot(hfit,labels=my.pch)
### methods can give very different results
par(mfrow=c(3,1))
hfit = hclust(dist(iris[,1:4]), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(iris[,1:4]), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(iris[,1:4]), method="average")
plot(hfit,labels=my.pch,main='average')
### simulate data
set.seed(123)
my.pch = rep(c(1,2,3),each=50)
# case 1 - not very good at clustering
xm = matrix(NA, 150, 2)
xm[,2] = rnorm(150,sd=10)
xm[1:50,1] = rnorm(50,sd=0.1) + 1
xm[51:100,1] = rnorm(50,sd=0.1) + 2
xm[101:150,1] = rnorm(50,sd=0.1) + 3
plot(xm[,1],xm[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm), method="single")
plot(hfit,labels=my.pch,main='single',col=my.pch)
hfit = hclust(dist(xm), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm), method="average")
plot(hfit,labels=my.pch,main='average')
# case 2 - better, for single link clustering
xm = matrix(NA, 150, 2)
xm[,2] = rnorm(150,sd=1)
xm[1:50,1] = rnorm(50,sd=0.1) + 1
xm[51:100,1] = rnorm(50,sd=0.1) + 2
xm[101:150,1] = rnorm(50,sd=0.1) + 3
plot(xm[,1],xm[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm), method="single")
plot(hfit,labels=my.pch,main='single',col=my.pch)
hfit = hclust(dist(xm), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm), method="average")
plot(hfit,labels=my.pch,main='average')
# case 3 - standardized data from case 2
xm2 = xm
xm2[,1] = (xm[,1]-mean(xm[,1]))/sd(xm[,1])
xm2[,2] = (xm[,2]-mean(xm[,2]))/sd(xm[,2])
plot(xm2[,1],xm2[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm2), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(xm2), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm2), method="average")
plot(hfit,labels=my.pch,main='average')
####################
xm3 = matrix(NA,150,2)
xm3[,1] = rnorm(150)+rep(c(5,10,15),each=50)
xm3[,2] = (xm3[,1]+rnorm(150,sd=0.25))/sqrt(1+0.25^2)
plot(xm3[,1],xm3[,2],col=my.pch)
par(mfrow=c(3,1),las=0)
hfit = hclust(dist(xm3), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(xm3), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm3), method="average")
plot(hfit,labels=my.pch,main='average')
abline(h=9,col='red')
abline(h=5,col='green')
### can extract clusters
pr = cutree(hfit,k=3)
table(my.pch,pr)
pr = cutree(hfit,k=6)
table(my.pch,pr)
pr = cutree(hfit,h=5)
table(my.pch,pr)
pr = cutree(hfit,h=9)
table(my.pch,pr)
### example
xy = cbind(c(2,3,2,4,3),c(3,2,4,5,5))
dist(xy)
hfit = hclust(dist(xy))
plot(hfit)
cphz = cophenetic(hfit)
cphz
hfit$height
########## K-means
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=3,nstart=100)
table(fit$cluster,my.pch)
# exchange 1,2
my.pch = rep(c(2,3,1),each=50)
table(fit$cluster,my.pch)
pairs(iris[,1:4],col=1+(fit$cluster!=my.pch))
# can examine centers
fit$centers
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,3))
pchv=c(rep(3,150),rep(19,3))
pairs(x,col=colv,pch=pchv)
### try with 2-5 means
nc = 2
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 3
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 4
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 5
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
sse = rep(0,10)
for (i in 1:10) {
fit = kmeans(iris[,1:4],centers=i)
sse[i] = fit$totss-fit$betweenss
}
plot(sse,type='b')
r2 = rep(0,10)
for (i in 1:10) {
fit = kmeans(iris[,1:4],centers=i)
r2[i] = fit$betweenss/fit$totss
}
plot(r2,type='b')
### STOP
| /DSC465/Lecture-Code/UNSUPERVISED-LEARNING.R | no_license | TianxuLuo/University-of-Rochester | R | false | false | 5,667 | r |
### START
# recall iris data
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
par(mfrow=c(1,1))
prc<-prcomp(iris[,1:4], scale=T)
# can distinguish 2 groups without class knowledge
pairs(prc$x)
pairs(prc$x,col=my.pch)
par(mfrow=c(1,1))
biplot(prc,scale=F)
prc$rotation
plot(prc$x[,1],prc$x[,2])
lines(c(0,prc$rotation[2,1]),c(0,prc$rotation[2,2]),col='green',lty=1,type='l',lwd=3)
### gene expression data
GSE10245.data = read.table(file='GSE10245.csv',header=T,sep=',')
gem1 = GSE10245.data[,2:51]
gr = GSE10245.data[,1]
prc = prcomp(gem1)
plot(prc)
### loadings can give structure to the clustering
par(mfrow=c(2,2))
biplot(prc,choices=c(1,2))
biplot(prc,choices=c(1,3))
biplot(prc,choices=c(1,4))
biplot(prc,choices=c(2,3))
pairs(prc$x[,1:4])
pairs(prc$x[,1:4],col=my.pch)
###
############ Hierarchical Clustering
hfit = hclust(dist(iris[,1:4]))
plot(hfit)
plot(hfit,labels=my.pch)
### methods can give very different results
par(mfrow=c(3,1))
hfit = hclust(dist(iris[,1:4]), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(iris[,1:4]), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(iris[,1:4]), method="average")
plot(hfit,labels=my.pch,main='average')
### simulate data
set.seed(123)
my.pch = rep(c(1,2,3),each=50)
# case 1 - not very good at clustering
xm = matrix(NA, 150, 2)
xm[,2] = rnorm(150,sd=10)
xm[1:50,1] = rnorm(50,sd=0.1) + 1
xm[51:100,1] = rnorm(50,sd=0.1) + 2
xm[101:150,1] = rnorm(50,sd=0.1) + 3
plot(xm[,1],xm[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm), method="single")
plot(hfit,labels=my.pch,main='single',col=my.pch)
hfit = hclust(dist(xm), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm), method="average")
plot(hfit,labels=my.pch,main='average')
# case 2 - better, for single link clustering
xm = matrix(NA, 150, 2)
xm[,2] = rnorm(150,sd=1)
xm[1:50,1] = rnorm(50,sd=0.1) + 1
xm[51:100,1] = rnorm(50,sd=0.1) + 2
xm[101:150,1] = rnorm(50,sd=0.1) + 3
plot(xm[,1],xm[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm), method="single")
plot(hfit,labels=my.pch,main='single',col=my.pch)
hfit = hclust(dist(xm), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm), method="average")
plot(hfit,labels=my.pch,main='average')
# case 3 - standardized data from case 2
xm2 = xm
xm2[,1] = (xm[,1]-mean(xm[,1]))/sd(xm[,1])
xm2[,2] = (xm[,2]-mean(xm[,2]))/sd(xm[,2])
plot(xm2[,1],xm2[,2],col=my.pch)
par(mfrow=c(3,1))
hfit = hclust(dist(xm2), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(xm2), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm2), method="average")
plot(hfit,labels=my.pch,main='average')
####################
xm3 = matrix(NA,150,2)
xm3[,1] = rnorm(150)+rep(c(5,10,15),each=50)
xm3[,2] = (xm3[,1]+rnorm(150,sd=0.25))/sqrt(1+0.25^2)
plot(xm3[,1],xm3[,2],col=my.pch)
par(mfrow=c(3,1),las=0)
hfit = hclust(dist(xm3), method="single")
plot(hfit,labels=my.pch,main='single')
hfit = hclust(dist(xm3), method="complete")
plot(hfit,labels=my.pch,main='complete')
hfit = hclust(dist(xm3), method="average")
plot(hfit,labels=my.pch,main='average')
abline(h=9,col='red')
abline(h=5,col='green')
### can extract clusters
pr = cutree(hfit,k=3)
table(my.pch,pr)
pr = cutree(hfit,k=6)
table(my.pch,pr)
pr = cutree(hfit,h=5)
table(my.pch,pr)
pr = cutree(hfit,h=9)
table(my.pch,pr)
### example
xy = cbind(c(2,3,2,4,3),c(3,2,4,5,5))
dist(xy)
hfit = hclust(dist(xy))
plot(hfit)
cphz = cophenetic(hfit)
cphz
hfit$height
########## K-means
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=3,nstart=100)
table(fit$cluster,my.pch)
# exchange 1,2
my.pch = rep(c(2,3,1),each=50)
table(fit$cluster,my.pch)
pairs(iris[,1:4],col=1+(fit$cluster!=my.pch))
# can examine centers
fit$centers
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,3))
pchv=c(rep(3,150),rep(19,3))
pairs(x,col=colv,pch=pchv)
### try with 2-5 means
nc = 2
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 3
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 4
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
nc = 5
my.pch = rep(c(1,2,3),each=50)
pairs(iris[,1:4],col=my.pch)
fit = kmeans(iris[,1:4],centers=nc,nstart=100)
x = rbind(iris[,1:4],fit$centers)
colv=c(rep(1,150),rep(2,nc))
pchv=c(rep(3,150),rep(19,nc))
pairs(x,col=colv,pch=pchv)
fit$totss
fit$betweenss
fit$tot.withinss
fit$withinss
sum(fit$withinss)+fit$betweenss
fit$totss
sse = rep(0,10)
for (i in 1:10) {
fit = kmeans(iris[,1:4],centers=i)
sse[i] = fit$totss-fit$betweenss
}
plot(sse,type='b')
r2 = rep(0,10)
for (i in 1:10) {
fit = kmeans(iris[,1:4],centers=i)
r2[i] = fit$betweenss/fit$totss
}
plot(r2,type='b')
### STOP
|
#' Ensemble predict function for convex combination of predictions
#'
#' This function computes the ensemble prediction for the super learner
#' based on a convex combination of weights
#'
#' @param p An n x K matrix of predictions from K different online learners
#' @param alpha A K row vector of weights
#'
#' @return prediction An n-length vector of super learner predictions
convexLogitCom <- function(p, alpha, trimLogit = 1e-5){
p[p < trimLogit] <- trimLogit;
p[p > 1-trimLogit] <- 1-trimLogit
plogis(qlogis(p) %*% alpha)
}
| /R/convexLogitCom.R | no_license | benkeser/onlinesl | R | false | false | 562 | r | #' Ensemble predict function for convex combination of predictions
#'
#' This function computes the ensemble prediction for the super learner
#' based on a convex combination of weights
#'
#' @param p An n x K matrix of predictions from K different online learners
#' @param alpha A K row vector of weights
#'
#' @return prediction An n-length vector of super learner predictions
convexLogitCom <- function(p, alpha, trimLogit = 1e-5){
p[p < trimLogit] <- trimLogit;
p[p > 1-trimLogit] <- 1-trimLogit
plogis(qlogis(p) %*% alpha)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fullInformation.R
\name{fullInformation}
\alias{fullInformation}
\title{Information Function For Zero-Inflated Poisson (ZIP) Distribution}
\usage{
fullInformation(x, currentLambda, p, q)
}
\arguments{
\item{x}{A vector containing values belonging to a ZIP Distribution}
\item{currentLambda}{The value of our Lambda Parameter}
\item{p}{Probability of observing a Structural 0}
\item{q}{Probability of not observing a structural 0}
}
\description{
This is the second derivative of Log-Liklihood function for ZIP Distribution
Use this for Newton Raphson Algorithm
}
\examples{
fullInformation()
}
| /ZeroInflPoisson/man/fullInformation.Rd | no_license | JGMoore44/MooreFunctions | R | false | true | 675 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fullInformation.R
\name{fullInformation}
\alias{fullInformation}
\title{Information Function For Zero-Inflated Poisson (ZIP) Distribution}
\usage{
fullInformation(x, currentLambda, p, q)
}
\arguments{
\item{x}{A vector containing values belonging to a ZIP Distribution}
\item{currentLambda}{The value of our Lambda Parameter}
\item{p}{Probability of observing a Structural 0}
\item{q}{Probability of not observing a structural 0}
}
\description{
This is the second derivative of Log-Liklihood function for ZIP Distribution
Use this for Newton Raphson Algorithm
}
\examples{
fullInformation()
}
|
context("Atorus Validation")
#' @title Test Cases Code
#' @section Last Updated By:
#' Nathan Kosiba
#' @section Last Update Date:
#' 02/09/2021
#setup ----
#insert any necessary libraries
library(Tplyr)
library(tidyverse)
library(rlang)
library(testthat)
#insert code applicable to all tests i.e. functions or data
adsl <- haven::read_xpt("~/Tplyr/uat/input/adsl.xpt")
adsl$RACE_FACTOR <- factor(adsl$RACE, c("WHITE", "BLACK OR AFRICAN AMERICAN",
"AMERICAN INDIAN OR ALASKA NATIVE", "ASIAN"))
adae <- haven::read_xpt("~/Tplyr/uat/input/adae.xpt")
advs <- haven::read_xpt("~/Tplyr/uat/input/advs.xpt")
adlb <- haven::read_xpt("~/Tplyr/uat/input/adlbc.xpt")
adlb$ANRIND_FACTOR <- factor(adlb$ANRIND, c("L","N","H"))
adlb$BNRIND_FACTOR <- factor(adlb$BNRIND, c("L","N","H"))
opts = options()
#no updates needed - initializes vur which is used to determine which parts of code to execute during testing
vur <- NULL
if(file.exists("~/Tplyr/uat/references/output/vur_auto.Rds")) vur <- readRDS("~/Tplyr/uat/references/output/vur_auto.Rds")
#test 1 ----
test_that('T1',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_1 <- tplyr_table(adsl, TRT01P)
# output table to check attributes
save(test_1, file = "~/Tplyr/uat/output/test_1.RData")
#clean up working directory
rm(test_1)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_1.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_1), label = "T1.1")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_1$treat_var), label = "T1.2")
#manual check(s)
#clean up working directory
rm(test_1)
})
#test 2 ----
test_that('T2',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_2 <- tplyr_table(adsl, TRT01P, where = (EFFFL == 'Y'))
# output table to check attributes
save(test_2, file = "~/Tplyr/uat/output/test_2.RData")
#clean up working directory
rm(test_2)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_2.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(filter(adsl, EFFFL == 'Y'), filter(Tplyr::pop_data(test_2),!!Tplyr::get_where(test_2)), label = "T2.1")
#manual check(s)
#clean up working directory
rm(test_2)
})
#test 3 ----
test_that('T3',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_total_group() %>%
add_treat_grps('Total Xanomeline' = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
add_layer(
group_count(AGEGR1)
)
build(t)
test_3 <- header_n(t)
# output table to check attributes
save(test_3, file = "~/Tplyr/uat/output/test_3.RData")
#clean up working directory
rm(t)
rm(test_3)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_3.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c("Placebo", "Total", "Total Xanomeline", "Xanomeline High Dose", "Xanomeline Low Dose"),
as.vector(test_3$TRT01P), label = "T3.1")
t3_2 <- c(nrow(filter(adsl, TRT01P == "Placebo")), nrow(adsl),
nrow(filter(adsl, TRT01P == "Xanomeline High Dose" | TRT01P == "Xanomeline Low Dose")),
nrow(filter(adsl, TRT01P == "Xanomeline High Dose")), nrow(filter(adsl, TRT01P == "Xanomeline Low Dose")))
testthat::expect_equal(t3_2, test_3[[2]], label = "T3.2")
#manual check(s)
#clean up working directory
rm(t3_2)
rm(test_3)
})
#test 4 ----
test_that('T4',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_4 <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by="Age (Groups)", where = SAFFL == "Y")
)
# output table to check attributes
save(test_4, file = "~/Tplyr/uat/output/test_4.RData")
#clean up working directory
rm(test_4)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_4.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_4), label = "T4.1")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_4$treat_var), label = "T4.2")
#manual check(s)
#clean up working directory
rm(test_4)
})
#test 5 ----
test_that('T5',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_5 <- tplyr_table(adae, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_count(AEDECOD)
)
# output table to check attributes
save(test_5, file = "~/Tplyr/uat/output/test_5.RData")
#clean up working directory
rm(test_5)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_5.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_5), label = "T5.1")
testthat::expect_equal(adae, test_5$target, label = "T5.2")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_5$pop_treat_var), label = "T5.3")
testthat::expect_equal(expr(TRTA), quo_get_expr(test_5$treat_var), label = "T5.4")
#manual check(s)
#clean up working directory
rm(test_5)
})
#test 6 ----
test_that('T6',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_6 <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD, by="Preferred Term", where = SAFFL == "Y")
)
# output table to check attributes
save(test_6, file = "~/Tplyr/uat/output/test_6.RData")
#clean up working directory
rm(test_6)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_6.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(filter(adae, SAFFL == 'Y'),
filter(Tplyr::pop_data(test_6),!!Tplyr::get_where(test_6)),
label = "T6.1")
#manual check(s)
#clean up working directory
rm(test_6)
})
#test 7 ----
test_that('T7',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD)
) %>%
add_layer(
group_count(AEDECOD) %>%
set_distinct_by(USUBJID) %>%
set_format_strings(f_str("xxx", distinct_n))
) %>%
add_layer(
group_count("Any AE")
)
build(t)
test_7 <- get_numeric_data(t)
# output table to check attributes
save(test_7, file = "~/Tplyr/uat/output/test_7.RData")
#clean up working directory
rm(t)
rm(test_7)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_7.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t7_1 <- filter(adae, TRTA == "Placebo") %>%
group_by(AEDECOD) %>%
summarise(n=n())
t7_2 <- filter(adae, TRTA == "Placebo") %>%
group_by(AEDECOD) %>%
distinct(USUBJID, AEDECOD) %>%
summarise(n=n())
t7_3 <- filter(adae, TRTA == "Placebo") %>%
group_by("Any AE") %>%
summarise(n = n())
testthat::expect_equal(t7_1[[2]],
subset(test_7[[1]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T7.1")
testthat::expect_equal(t7_2[[2]],
subset(test_7[[2]], TRTA == 'Placebo' & n != 0)[['distinct_n']],
label = "T7.2")
testthat::expect_equal(t7_3[[2]],
subset(test_7[[3]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T7.3")
#manual check(s)
#clean up working directory
rm(t7_1)
rm(t7_2)
rm(t7_3)
rm(test_7)
})
#test 8 ----
test_that('T8',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD, by=SEX) %>%
keep_levels("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")
) %>%
add_layer(
group_count(AEDECOD, by=SEX) %>%
set_distinct_by(USUBJID) %>%
set_format_strings(f_str("xxx", distinct_n)) %>%
keep_levels("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")
)
build(t)
test_8 <- get_numeric_data(t)
# output table to check attributes
save(test_8, file = "~/Tplyr/uat/output/test_8.RData")
#clean up working directory
rm(t)
rm(test_8)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_8.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t8_1 <- filter(adae, TRTA == "Placebo" & AEDECOD %in% c("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")) %>%
group_by(SEX, AEDECOD) %>%
summarise(n=n())
t8_2 <- filter(adae, TRTA == "Placebo" & AEDECOD %in% c("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")) %>%
group_by(SEX, AEDECOD) %>%
distinct(USUBJID, SEX, AEDECOD) %>%
summarise(n=n())
testthat::expect_equal(t8_1[[3]],
subset(test_8[[1]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T8.1")
testthat::expect_equal(t8_2[[3]],
subset(test_8[[2]], TRTA == 'Placebo' & n != 0)[['distinct_n']],
label = "T8.2")
#manual check(s)
#clean up working directory
rm(t8_1)
rm(t8_2)
rm(test_8)
})
#test 9 ----
test_that('T9',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(vars(EOSSTT, DCDECOD)) %>%
set_format_strings(f_str("xxx", n)) %>%
keep_levels("COMPLETED", "DEATH")
) %>%
add_layer(
group_count(vars("Discontinuation", DCDECOD)) %>%
set_format_strings(f_str("xxx", n)) %>%
keep_levels("COMPLETED", "DEATH")
)
test_9 <- build(t)
# output table to check attributes
save(test_9, file = "~/Tplyr/uat/output/test_9.RData")
#clean up working directory
rm(t)
rm(test_9)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_9.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t9_1_outer <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, EOSSTT) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, EOSSTT, fill = list(n = 0)) %>%
mutate(DCDECOD = " A")
t9_1 <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, EOSSTT, DCDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, nesting(EOSSTT, DCDECOD), fill = list(n = 0)) %>%
rbind(t9_1_outer) %>%
arrange(TRT01P, EOSSTT, DCDECOD) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = c(EOSSTT, DCDECOD))
t9_2_outer <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, "Discontinued") %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0)) %>%
mutate(DCDECOD = " A")
t9_2 <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, "Discontinued", DCDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, nesting("Discontinued", DCDECOD), fill = list(n = 0)) %>%
rbind(t9_2_outer) %>%
arrange(TRT01P, DCDECOD) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = DCDECOD)
test_9_1 <-subset(test_9, ord_layer_index == 1)
test_9_2 <-subset(test_9, ord_layer_index == 2)
testthat::expect_equal(c(t9_1$Placebo, t9_1$`Xanomeline Low Dose`, t9_1$`Xanomeline High Dose`),
c(test_9_1$var1_Placebo, test_9_1$`var1_Xanomeline Low Dose`, test_9_1$`var1_Xanomeline High Dose`),
label = "T9.1")
testthat::expect_equal(c(t9_2$Placebo, t9_2$`Xanomeline Low Dose`, t9_2$`Xanomeline High Dose`),
c(test_9_2$var1_Placebo, test_9_2$`var1_Xanomeline Low Dose`, test_9_2$`var1_Xanomeline High Dose`),
label = "T9.2")
#manual check(s)
#clean up working directory
rm(t9_1)
rm(t9_1_outer)
rm(t9_2)
rm(t9_2_outer)
rm(test_9)
rm(test_9_1)
rm(test_9_2)
})
#test 10 ----
test_that('T10',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(DCSREAS) %>%
set_format_strings(f_str("xxx", n)) %>%
add_total_row(sort_value = -Inf, count_missings = FALSE) %>%
set_total_row_label("TOTAL") %>%
set_missing_count(fmt = f_str("xxx", n), sort_value = Inf, Missing = "")
)
test_10 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_10, file = "~/Tplyr/uat/output/test_10.RData")
#clean up working directory
rm(t)
rm(test_10)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_10.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t10_tots <- group_by(adsl, TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0))
t10_totalrow <- filter(adsl, DCSREAS != "") %>%
group_by(TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0)) %>%
mutate(DCSREAS = ' TOTAL')
t10_1 <- group_by(adsl, TRT01P, DCSREAS) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, DCSREAS, fill = list(n = 0)) %>%
as_tibble() %>%
rbind(t10_totalrow) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = DCSREAS) %>%
mutate(DCSREAS = if_else(DCSREAS == "", 'ZZZ', DCSREAS)) %>%
arrange(DCSREAS)
testthat::expect_equal(t10_1$Placebo, test_10$var1_Placebo, label = "T10.1")
#manual check(s)
#clean up working directory
rm(t10_tots)
rm(t10_1)
rm(test_10)
})
#test 11 ----
test_that('T11',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(DCSREAS, where = EFFFL == 'Y') %>%
set_missing_count(fmt = f_str("xx", n), sort_value = Inf, Missing = "", denom_ignore = TRUE) %>%
set_denom_where(TRUE)
)
test_11 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_11, file = "~/Tplyr/uat/output/test_11.RData")
#clean up working directory
rm(t)
rm(test_11)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_11.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t11_tots <- filter(adsl, DCSREAS != "") %>%
group_by(TRT01P) %>%
summarise(total = n())
t11_1 <- filter(adsl, EFFFL == 'Y') %>%
group_by(TRT01P, DCSREAS) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, DCSREAS, fill = list(n = 0)) %>%
left_join(t11_tots, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = ifelse(DCSREAS == "", sprintf("%2s",n), paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),"%)"))) %>%
mutate(DCSREAS = ifelse(DCSREAS == "", 'ZZZ', DCSREAS)) %>%
filter(TRT01P == "Placebo") %>%
arrange(DCSREAS)
testthat::expect_equal(t11_1$col,test_11$var1_Placebo,label = "T11.1")
#manual check(s)
#clean up working directory
rm(t11_tots)
rm(t11_1)
rm(test_11)
})
#test 12 ----
test_that('T12',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE_FACTOR, where = EFFFL == 'Y') %>%
set_format_strings(f_str('xxx',n)) %>%
add_total_row(f_str('xxx',n), sort_value = -Inf) %>%
set_missing_count(f_str('xxx',n), Missing = NA, sort_value = Inf) %>%
keep_levels("WHITE","ASIAN")
)
test_12 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_12, file = "~/Tplyr/uat/output/test_12.RData")
#clean up working directory
rm(t)
rm(test_12)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_12.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t12_totalrow <- filter(adsl, EFFFL == 'Y' & RACE_FACTOR %in% c("WHITE","ASIAN")) %>%
group_by(TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n=0)) %>%
mutate(RACE_FACTOR = 'TOTAL') %>%
as_tibble()
if (!length(filter(adsl,is.na(RACE_FACTOR) & EFFFL == 'Y'))) {
t12_missingrow <- group_by(TRT01P) %>%
filter(is.na(RACE_FACTOR)) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n=0)) %>%
mutate(RACE_FACTOR = 'MISSING')
} else {
t12_missingrow <- unique(adsl$TRT01P) %>%
as_tibble() %>%
mutate(n = 0) %>%
mutate(RACE_FACTOR = 'MISSING') %>%
rename(TRT01P = value)
}
t12_categoryrows <- filter(adsl, EFFFL == 'Y') %>%
group_by(TRT01P, RACE_FACTOR) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR, fill = list(n=0)) %>%
arrange(RACE_FACTOR) %>%
as_tibble()
t12_1 <- rbind(t12_totalrow, t12_categoryrows, t12_missingrow) %>%
filter(TRT01P == 'Placebo' & RACE_FACTOR %in% c("TOTAL","MISSING","WHITE","ASIAN")) %>%
mutate(fmtd = sprintf("%3s", n))
testthat::expect_equal(t12_1$fmtd, test_12$var1_Placebo, label = "T12.1")
#manual check(s)
#clean up working directory
rm(t12_totalrow)
rm(t12_missingrow)
rm(t12_categoryrows)
rm(t12_1)
rm(test_12)
})
#test 13 ----
test_that('T13',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, where=TRTA == 'Placebo') %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct)) %>%
add_total_row(f_str("xxx", n), sort_value = -Inf)
)%>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%)", distinct_n, distinct_pct)) %>%
add_total_row(f_str("xxx", distinct_n), sort_value = -Inf)
)%>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%) [xxx (xxx.x%)]", n, pct, distinct_n, distinct_pct)) %>%
add_total_row(f_str("xxx [xxx]", n, distinct_n), sort_value = -Inf)
)
test_13 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_13, file = "~/Tplyr/uat/output/test_13.RData")
#clean up working directory
rm(t)
rm(test_13)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_13.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t13_totals <- filter(adae) %>%
group_by(TRTA) %>%
summarize(total=n())
t13_totals_distinct <- filter(adae) %>%
distinct(USUBJID, TRTA) %>%
group_by(TRTA) %>%
summarize(distinct_total=n())
t13_total_row <- filter(adae, TRTA == 'Placebo') %>%
group_by(TRTA) %>%
summarize(cnt=n()) %>%
mutate(AEDECOD = ' TOTAL')
t13_total_row_distinct <- filter(adae, TRTA == 'Placebo') %>%
distinct(USUBJID, TRTA, AEDECOD) %>%
group_by(TRTA) %>%
summarize(cnt=n()) %>%
mutate(AEDECOD = ' TOTAL')
t13_1 <- filter(adae, TRTA == 'Placebo') %>%
group_by(AEDECOD, TRTA) %>%
summarize(cnt=n()) %>%
as_tibble() %>%
rbind(t13_total_row) %>%
left_join(t13_totals,by="TRTA") %>%
mutate(pct = sprintf("%5.1f", round(cnt/total*100,digits = 1))) %>%
mutate(col = ifelse(AEDECOD == ' TOTAL', sprintf("%3s", cnt),paste0(as.character(cnt),' (',pct,'%)'))) %>%
arrange(AEDECOD)
t13_2 <- filter(adae, TRTA == 'Placebo') %>%
distinct(USUBJID, TRTA, AEDECOD) %>%
group_by(AEDECOD, TRTA) %>%
summarize(cnt=n()) %>%
as_tibble() %>%
rbind(t13_total_row_distinct) %>%
left_join(t13_totals_distinct,by="TRTA") %>%
mutate(pct = sprintf("%5.1f", round(cnt/distinct_total*100,digits = 1))) %>%
mutate(distinct_col = ifelse(AEDECOD == ' TOTAL', sprintf("%3s", cnt),paste0(as.character(cnt),' (',pct,'%)'))) %>%
arrange(AEDECOD)
t13_3 <- select(t13_1,c("TRTA","AEDECOD","col")) %>%
left_join(t13_2, by=c("TRTA","AEDECOD")) %>%
mutate(col_combo = ifelse(AEDECOD == ' TOTAL', paste0(col, " [",distinct_col,"]"),paste0(col, " [",sprintf("%12s",distinct_col),"]"))) %>%
arrange(AEDECOD)
testthat::expect_equal(t13_1$col,
trimws(filter(test_13, ord_layer_index == 1)[["var1_Placebo"]]),
label = "T13.1")
testthat::expect_equal(t13_2$distinct_col,
trimws(filter(test_13, ord_layer_index == 2)[["var1_Placebo"]]),
label = "T13.2")
testthat::expect_equal(t13_3$col_combo,
trimws(filter(test_13, ord_layer_index == 3)[["var1_Placebo"]]),
label = "T13.3")
#manual check(s)
#clean up working directory
rm(t13_totals)
rm(t13_totals_distinct)
rm(t13_total_row)
rm(t13_total_row_distinct)
rm(t13_1)
rm(t13_2)
rm(t13_3)
rm(test_13)
})
#test 14 ----
test_that('T14',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_format_strings(f_str("xxx (xx.x%)", n, pct))
)
build(t)
test_14 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_14, file = "~/Tplyr/uat/output/test_14.RData")
#clean up working directory
rm(t)
rm(test_14)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_14.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t14_1 <- group_by(adsl, TRT01P) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total))
t14_2 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
left_join(t14_1, by='TRT01P') %>%
mutate(pct = round((n / total) * 100, digits = 1))
testthat::expect_equal(t14_1$total,unique(test_14[c("TRT01P", "total")])$total,label = "T14.1")
testthat::expect_equal(t14_2$pct,
mutate(filter(test_14, n != 0),pct = round((n / total) * 100, digits = 1))[['pct']],
label = "T14.2")
#manual check(s)
#clean up working directory
rm(t14_1)
rm(t14_2)
rm(test_14)
})
#test 15 ----
test_that('T15',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P, where=SEX == "F") %>%
add_layer(
group_count(RACE) %>%
set_format_strings(f_str("xxx (xx.x%)", n, pct))
)
build(t)
test_15 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_15, file = "~/Tplyr/uat/output/test_15.RData")
#clean up working directory
rm(t)
rm(test_15)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_15.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t15_1 <- filter(adsl, SEX == "F") %>%
group_by(TRT01P) %>%
summarise(total=n())
t15_2 <- filter(adsl, SEX == "F") %>%
group_by(TRT01P, RACE) %>%
summarise(n=n()) %>%
left_join(t15_1, by='TRT01P') %>%
mutate(pct = round((n / total) * 100, digits = 1))
testthat::expect_equal(t15_1$total,unique(test_15[c("TRT01P", "total")])$total,label = "T15.1")
testthat::expect_equal(t15_2$pct,
mutate(filter(test_15, n != 0),pct = round((n / total) * 100, digits = 1))[['pct']],
label = "T15.2")
#manual check(s)
#clean up working directory
rm(t15_1)
rm(t15_2)
rm(test_15)
})
#test 16 ----
test_that('T16',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_count(AEDECOD) %>%
set_distinct_by(USUBJID)
)
test_16 <- list(build(t), header_n(t))
# output table to check attributes
save(test_16, file = "~/Tplyr/uat/output/test_16.RData")
#clean up working directory
rm(t)
rm(test_16)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_16.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t16_1 <- group_by(adsl, TRT01P) %>%
summarise(total=n())
t16_2 <- distinct(adae, TRTA, AEDECOD, USUBJID) %>%
group_by(TRTA, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEDECOD, fill = list(n = 0)) %>%
merge(t16_1, by.y='TRT01P', by.x = "TRTA") %>%
mutate(pct = round((n / total) * 100, digits = 1)) %>%
mutate(col = paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
select(TRTA, AEDECOD, col) %>%
pivot_wider(names_from = "TRTA", values_from = col)
testthat::expect_equal(t16_2$Placebo,test_16[[1]]$var1_Placebo, label = "T16.1")
testthat::expect_equal(t16_1$total,test_16[[2]]$n, label = "T16.2")
#manual check(s)
#clean up working directory
rm(t16_1)
rm(t16_2)
rm(test_16)
})
#test 17 ----
test_that('T17',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(ETHNIC, by=SEX) %>%
set_denoms_by(TRT01P, SEX) %>%
add_total_row()
)
test_17 <- build(t)
# output table to check attributes
save(test_17, file = "~/Tplyr/uat/output/test_17.RData")
#clean up working directory
rm(t)
rm(test_17)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_17.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t17_tots <- group_by(adsl, TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.numeric(total)) %>%
mutate(n = total)
t17_1 <- group_by(adsl, TRT01P, SEX, ETHNIC) %>%
summarise(n=n()) %>%
rbind(select(t17_tots, -total)) %>%
left_join(select(t17_tots, -n), by = c('TRT01P', "SEX")) %>%
mutate(pct = round((n / total) * 100, digits = 1)) %>%
mutate(col = paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
filter(TRT01P == "Placebo") %>%
mutate(ETHNIC = replace_na(ETHNIC, 'Total')) %>%
arrange(SEX, ETHNIC)
testthat::expect_equal(t17_1$col, test_17$var1_Placebo,label = "T17.1")
#manual check(s)
#clean up working directory
rm(t17_tots)
rm(t17_1)
rm(test_17)
})
#test 18 ----
test_that('T18',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01A) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'),
args = list(conf.level = 0.9, correct=FALSE, alternative='less'))
)
suppressWarnings(build(t))
test_18 <- get_stats_data(t)
# output table to check attributes
save(test_18, file = "~/Tplyr/uat/output/test_18.RData")
#clean up working directory
rm(t)
rm(test_18)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_18.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE'), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE'), n=n())[[1]]
t18_noarg <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p))
t18_args <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p), conf.level = 0.9, correct=FALSE, alternative='less')
testthat::expect_equal(t18_noarg$estimate[[1]] - t18_noarg$estimate[[2]],
filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'dif')[[3]],
label = "T18.1")
testthat::expect_equal(c(t18_noarg$conf.int[1], t18_noarg$conf.int[2]),
c(filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'low')[[3]],
filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'high')[[3]]),
label = "T18.2")
testthat::expect_equal(c(t18_args$estimate[[1]] - t18_args$estimate[[2]], t18_args$conf.int[1], t18_args$conf.int[2]),
c(filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'dif')[[3]],
filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'low')[[3]],
filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'high')[[3]]),
label = "T18.3")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(cnt_p)
rm(tot_t)
rm(cnt_t)
rm(t18_noarg)
rm(t18_args)
rm(test_18)
})
#test 19 ----
test_that('T19',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01A, cols=SEX) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
)
suppressWarnings(build(t))
test_19 <- get_stats_data(t)
# output table to check attributes
save(test_19, file = "~/Tplyr/uat/output/test_19.RData")
#clean up working directory
rm(t)
rm(test_19)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_19.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & SEX == "F"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE' & SEX == "F"), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo" & SEX == "F"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE' & SEX == "F"), n=n())[[1]]
suppressWarnings(t19 <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p)))
testthat::expect_equal(c(t19$estimate[[1]] - t19$estimate[[2]], t19$conf.int[1], t19$conf.int[2]),
c(filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'dif')[[4]],
filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'low')[[4]],
filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'high')[[4]]),
label = "T19.1")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(cnt_p)
rm(tot_t)
rm(cnt_t)
rm(t19)
rm(test_19)
})
#test 20 ----
test_that('T20',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(AEBODSYS, by = SEX) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD), by = SEX) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
)
suppressWarnings(build(t))
test_20 <- get_stats_data(t)
# output table to check attributes
save(test_20, file = "~/Tplyr/uat/output/test_20.RData")
#clean up working directory
rm(t)
rm(test_20)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_20.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_p <- summarise(filter(adae, TRTA == "Placebo"), n=n())[[1]]
tot_t <- summarise(filter(adae, TRTA == 'Xanomeline High Dose'), n=n())[[1]]
cnt_p1 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS"),
n=n())[[1]]
cnt_t1 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS"),
n=n())[[1]]
cnt_p2 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == "F"),
n=n())[[1]]
cnt_t2 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == "F"),
n=n())[[1]]
cnt_p3 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS" &
SEX == "F"),
n=n())[[1]]
cnt_t3 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS" &
SEX == "F"),
n=n())[[1]]
suppressWarnings(t20_1 <- prop.test(c(cnt_t1, cnt_p1), c(tot_t, tot_p)))
suppressWarnings(t20_2 <- prop.test(c(cnt_t2, cnt_p2), c(tot_t, tot_p)))
suppressWarnings(t20_3 <- prop.test(c(cnt_t3, cnt_p3), c(tot_t, tot_p)))
testthat::expect_equal(c(t20_1$estimate[[1]] - t20_1$estimate[[2]], t20_1$conf.int[1], t20_1$conf.int[2]),
c(filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'dif')[[4]],
filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'low')[[4]],
filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'high')[[4]]),
label = "T20.1")
testthat::expect_equal(c(t20_2$estimate[[1]] - t20_2$estimate[[2]], t20_2$conf.int[1], t20_2$conf.int[2]),
c(filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'dif')[[4]],
filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'low')[[4]],
filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'high')[[4]]),
label = "T20.2")
testthat::expect_equal(c(t20_3$estimate[[1]] - t20_3$estimate[[2]], t20_3$conf.int[1], t20_3$conf.int[2]),
c(filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'dif')[[5]],
filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'low')[[5]],
filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'high')[[5]]),
label = "T20.3")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(tot_t)
rm(cnt_p1)
rm(cnt_t1)
rm(cnt_p2)
rm(cnt_t2)
rm(cnt_p3)
rm(cnt_t3)
rm(t20_1)
rm(t20_2)
rm(t20_3)
rm(test_20)
})
#test 21 ----
test_that('T21',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'n' = f_str('xx', n),
'mean' = f_str('xx.x', mean),
'median' = f_str('xx.x', median),
'sd' = f_str('xx.xx', sd),
'var' = f_str('xx.xx', var),
'min' = f_str('xx', min),
'max' = f_str('xx', max),
'iqr' = f_str('xx.x', iqr),
'q1' = f_str('xx.x', q1),
'q3' = f_str('xx.x', q3),
'missing' = f_str('xx', missing)
)
)
build(t)
test_21 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_21, file = "~/Tplyr/uat/output/test_21.RData")
#clean up working directory
rm(t)
rm(test_21)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_21.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], n=n())[[1]],
subset(test_21, stat == 'n' & TRT01P == 'Placebo')[['value']],
label = "T21.1")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], mean=mean(AGE))[[1]],
subset(test_21, stat == 'mean' & TRT01P == 'Placebo')[['value']],
label = "T21.2")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], median=median(AGE))[[1]],
subset(test_21, stat == 'median' & TRT01P == 'Placebo')[['value']],
label = "T21.3")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], sd=sd(AGE))[[1]],
subset(test_21, stat == 'sd' & TRT01P == 'Placebo')[['value']],
label = "T21.4")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], var=var(AGE))[[1]],
subset(test_21, stat == 'var' & TRT01P == 'Placebo')[['value']],
label = "T21.5")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], min=min(AGE))[[1]],
subset(test_21, stat == 'min' & TRT01P == 'Placebo')[['value']],
label = "T21.6")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], max=max(AGE))[[1]],
subset(test_21, stat == 'max' & TRT01P == 'Placebo')[['value']],
label = "T21.7")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], iqr=IQR(AGE))[[1]],
subset(test_21, stat == 'iqr' & TRT01P == 'Placebo')[['value']],
label = "T21.8")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], q1=quantile(AGE)[[2]])[[1]],
subset(test_21, stat == 'q1' & TRT01P == 'Placebo')[['value']],
label = "T21.9")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], q3=quantile(AGE)[[4]])[[1]],
subset(test_21, stat == 'q3' & TRT01P == 'Placebo')[['value']],
label = "T21.10")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo' & is.na(adsl$AGE),], n=n())[[1]],
subset(test_21, stat == 'missing' & TRT01P == 'Placebo')[['value']],
label = "T21.11")
#manual check(s)
#clean up working directory
rm(test_21)
})
#test 22 ----
test_that('T22',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_custom_summaries(
geometric_mean = exp(sum(log(.var[.var > 0]),
na.rm=TRUE) / length(.var))
) %>%
set_format_strings(
'Geometric Mean (SD)' = f_str('xx.xx (xx.xxx)', geometric_mean, sd)
)
)
build(t)
test_22 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_22, file = "~/Tplyr/uat/output/test_22.RData")
#clean up working directory
rm(t)
rm(test_22)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_22.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',],
geometric_mean = exp(sum(log(AGE[AGE > 0]),na.rm=TRUE) / length(AGE)))[[1]],
subset(test_22, stat == 'geometric_mean' & TRT01P == 'Placebo')[['value']],
label = "T22.1")
#manual check(s)
#clean up working directory
rm(test_22)
})
#test 23 ----
test_that('T23',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by=ETHNIC) %>%
set_format_strings(
'n' = f_str('xx', n),
'mean' = f_str('xx.x', mean),
'median' = f_str('xx.x', median),
'sd' = f_str('xx.xx', sd),
'var' = f_str('xx.xx', var),
'min' = f_str('xx', min),
'max' = f_str('xx', max),
'iqr' = f_str('xx.x', iqr),
'q1' = f_str('xx.x', q1),
'q3' = f_str('xx.x', q3)
)
)
build(t)
test_23 <- filter(get_numeric_data(t)[[1]], TRT01P == 'Placebo')
# output table to check attributes
save(test_23, file = "~/Tplyr/uat/output/test_23.RData")
#clean up working directory
rm(t)
rm(test_23)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_23.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t23_1 <- pivot_longer(data.frame(summarise(group_by(adsl[adsl$TRT01P == 'Placebo',],ETHNIC),
n=n(),
mean=mean(AGE),
median=median(AGE),
sd=sd(AGE),
var=var(AGE),
min=min(AGE),
max=max(AGE),
iqr=IQR(AGE),
q1=quantile(AGE)[[2]],
q3=quantile(AGE)[[4]]
)
),
cols=c(n,mean,median,sd,var,min,max,iqr,q1,q3),names_to="STAT")
testthat::expect_equal(t23_1$value,
test_23$value,
label = "T23.1")
#manual check(s)
#clean up working directory
rm(t23_1)
rm(test_23)
})
#test 24 ----
test_that('T24',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, xx.x, xx, x.xx, xx.xx, xx, xx, xx.x, xx.x, xx.x',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_24 <- build(t)$var1_Placebo
# output table to check attributes
save(test_24, file = "~/Tplyr/uat/output/test_24.RData")
#clean up working directory
rm(t)
rm(test_24)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_24.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t24_1 <- paste(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=round(mean(AGE),1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],median=median(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=round(sd(AGE),2))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],var=round(var(AGE),2))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=round(IQR(AGE),1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=round(quantile(AGE)[[2]],1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=round(quantile(AGE)[[4]],1))[[1]],
sep=", ")
testthat::expect_equal(t24_1,
test_24,
label = "T24.1")
#manual check(s)
#clean up working directory
rm(t24_1)
rm(test_24)
})
#test 25 ----
test_that('T25',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, xx.xx, xx.xx, xx.xxx, xx.xxx, xx, xx, xx.xx, xx.xx, xx.xx',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_25 <- build(t)$var1_Placebo
# output table to check attributes
save(test_25, file = "~/Tplyr/uat/output/test_25.RData")
#clean up working directory
rm(t)
rm(test_25)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_25.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t25_1 <- paste(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=sprintf("%5.2f",round(mean(AGE),2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],median=sprintf("%5.2f",round(median(AGE),2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=sprintf("%6.3f",round(sd(AGE),3)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],var=sprintf("%6.3f",round(var(AGE),3)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=sprintf("%5.2f",round(IQR(AGE),1)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=sprintf("%5.2f",round(quantile(AGE)[[2]],2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=sprintf("%5.2f",round(quantile(AGE)[[4]],2)))[[1]],
sep=", ")
testthat::expect_equal(t25_1,
test_25,
label = "T25.1")
#manual check(s)
#clean up working directory
rm(t25_1)
rm(test_25)
})
#test 26 ----
test_that('T26',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(advs, TRTA) %>%
add_layer(
group_desc(AVAL, by=PARAMCD) %>%
set_format_strings(
'combo' = f_str('xxxx, a.a+1, xx.a+1, a.a+2, xx.a+2, xxx, a, a.xx, xxx.xx, a.a+1',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_26 <- build(t)
# output table to check attributes
save(test_26, file = "~/Tplyr/uat/output/test_26.RData")
#clean up working directory
rm(t)
rm(test_26)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_26.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t26_dat <- mutate(advs, avalc = as.character(AVAL)) %>%
rowwise() %>%
mutate(intlen = nchar(unlist(strsplit(avalc,'\\.'))[[1]])) %>%
mutate(hasdec = as.numeric(grepl('\\.', avalc))) %>%
mutate(declen = ifelse(hasdec > 0, nchar(unlist(strsplit(avalc,'\\.'))[[2]]), 0)) %>%
ungroup() %>%
group_by(PARAMCD) %>%
mutate(intlen = max(intlen, na.rm=TRUE)) %>%
mutate(hasdec = max(hasdec)) %>%
mutate(declen = max(declen))
t26_1 <- unique(t26_dat[,c("PARAMCD","intlen","declen","hasdec")]) %>%
left_join(summarise(t26_dat[t26_dat$TRTA == 'Placebo',], n=n(), mean=mean(AVAL), median=median(AVAL), sd=sd(AVAL),
var=var(AVAL), min=min(AVAL), max=max(AVAL), iqr=IQR(AVAL),
q1=quantile(AVAL)[[2]], q3=quantile(AVAL)[[4]]), by="PARAMCD") %>%
mutate(combo = paste(sprintf("%4s",n),
sprintf("%*s", (intlen + declen + 2),
sprintf("%.*f",declen+1,
round(mean,declen+1)
)[[1]]),
sprintf("%*s", 2 + declen + 2,
sprintf("%.*f",declen+1,
round(median,declen+1)
)[[1]]),
sprintf("%*s", intlen + declen + 3,
sprintf("%.*f",declen+2,
round(sd,declen+2)
)[[1]]),
sprintf("%*s", 2 + declen + 3,
sprintf("%.*f",declen+2,
round(var,declen+2)
)[[1]]),
sprintf("%*s", 3,
sprintf("%.*f",0,
round(min,0)
)[[1]]),
sprintf("%*s", intlen,
sprintf("%.*f",0,
round(max,0)
)[[1]]),
sprintf("%*s", intlen + 3,
sprintf("%.*f",2,
round(iqr,2)
)[[1]]),
sprintf("%*s", 6,
sprintf("%.*f",2,
round(q1,2)
)[[1]]),
sprintf("%*s", intlen + declen + 2,
sprintf("%.*f",declen+1,
round(q3,declen+1)
)[[1]]),
sep = ", "))
testthat::expect_equal(t26_1$combo,
test_26$var1_Placebo,
label = "T26.1")
#manual check(s)
#clean up working directory
rm(t26_dat)
rm(t26_1)
rm(test_26)
})
#test 27 ----
test_that('T27',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, (xx.x), )xx(), x.xx%%, [xx.xx[], xx, xx, xx.x, {Q1 - xx.x}, Q3 - xx.x',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_27 <- build(t)$var1_Placebo
# output table to check attributes
save(test_27, file = "~/Tplyr/uat/output/test_27.RData")
#clean up working directory
rm(t)
rm(test_27)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_27.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t27_1 <- paste0(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
", (",
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=round(mean(AGE),1))[[1]],
"), )",
summarise(adsl[adsl$TRT01P == 'Placebo',],median=median(AGE))[[1]],
"(), ",
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=round(sd(AGE),2))[[1]],
"%%, [",
summarise(adsl[adsl$TRT01P == 'Placebo',],var=round(var(AGE),2))[[1]],
"[], ",
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
", ",
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
", ",
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=round(IQR(AGE),1))[[1]],
", {Q1 - ",
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=round(quantile(AGE)[[2]],1))[[1]],
"}, Q3 - ",
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=round(quantile(AGE)[[4]],1))[[1]]
)
testthat::expect_equal(t27_1,
test_27,
label = "T27.1")
#manual check(s)
#clean up working directory
rm(t27_1)
rm(test_27)
})
#test 28 ----
test_that('T28',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by = RACE_FACTOR) %>%
set_format_strings(
'n' = f_str('xx', n, empty = "NA"),
'mean' = f_str('xx.x', mean, empty = "N/A")
)
)
build(t)
test_28 <- build(t)
# output table to check attributes
save(test_28, file = "~/Tplyr/uat/output/test_28.RData")
#clean up working directory
rm(t)
rm(test_28)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_28.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t28_1 <- group_by(adsl, TRT01P, RACE_FACTOR) %>%
summarise(n=n(), mean = round(mean(AGE),1)) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR, fill=list(n="NA",mean="N/A")) %>%
filter(TRT01P == "Placebo") %>%
pivot_longer(cols=c(n,mean))
testthat::expect_equal(t28_1$value, trimws(test_28$var1_Placebo),label = "T28.1")
#manual check(s)
#clean up working directory
rm(t28_1)
rm(test_28)
})
#test 29 ----
test_that('T29',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), where=(ANRIND != "" & BNRIND != "")) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct))
)
test_29 <- build(t)
# output table to check attributes
save(test_29, file = "~/Tplyr/uat/output/test_29.RData")
#clean up working directory
rm(t)
rm(test_29)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_29.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t29_totals <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, fill=list(total = 0))
t29_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t29_totals,by="TRTA") %>%
mutate(pct = ifelse(total == 0, 0, (n / total) * 100)) %>%
mutate(fmtd = paste0(sprintf("%3s",n), ' (', sprintf("%5.1f", pct), '%)')) %>%
select(TRTA, ANRIND, BNRIND, fmtd) %>%
pivot_wider(names_from = c(TRTA, BNRIND), id_cols = ANRIND, values_from = fmtd, names_prefix = 'var1_') %>%
as_tibble()
testthat::expect_equal(t29_1[1:2,2:7],
test_29[1:2,2:7],
label = "T29.1")
#manual check(s)
#clean up working directory
rm(t29_totals)
rm(t29_1)
rm(test_29)
})
#test 30 ----
test_that('T30',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=SEX)
)
build(t)
test_30 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_30, file = "~/Tplyr/uat/output/test_30.RData")
#clean up working directory
rm(t)
rm(test_30)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_30.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t30_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, SEX, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, SEX, ANRIND, BNRIND, fill=list(n = 0))
testthat::expect_equal(t30_1$n,test_30$n,label = "T30.1")
#manual check(s)
#clean up working directory
rm(t30_1)
rm(test_30)
})
#test 31 ----
test_that('T31',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=vars(RACE, SEX))
)
build(t)
test_31 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_31, file = "~/Tplyr/uat/output/test_31.RData")
#clean up working directory
rm(t)
rm(test_31)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_31.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t31_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, RACE, SEX, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, RACE, SEX, ANRIND, BNRIND, fill=list(n = 0))
testthat::expect_equal(t31_1$n,test_31$n,label = "T31.1")
#manual check(s)
#clean up working directory
rm(t31_1)
rm(test_31)
})
#test 32 ----
test_that('T32',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
add_layer(
group_shift(vars(row=ANRIND_FACTOR, column=BNRIND_FACTOR), where=(ANRIND != "" & BNRIND != "")) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct)) %>%
set_denom_where(TRUE)
)
test_32 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_32, file = "~/Tplyr/uat/output/test_32.RData")
#clean up working directory
rm(t)
rm(test_32)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_32.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t32_totals <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2") %>%
group_by(TRTA) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, fill=list(total = 0))
t32_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND_FACTOR, BNRIND_FACTOR) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND_FACTOR, BNRIND_FACTOR, fill=list(n = 0)) %>%
left_join(t32_totals,by="TRTA") %>%
mutate(pct = ifelse(total == 0, 0, (n / total) * 100)) %>%
mutate(fmtd = paste0(sprintf("%3s",n), ' (', sprintf("%5.1f", pct), '%)')) %>%
select(TRTA, ANRIND_FACTOR, BNRIND_FACTOR, fmtd) %>%
pivot_wider(names_from = c(TRTA, BNRIND_FACTOR), id_cols = ANRIND_FACTOR, values_from = fmtd, names_prefix = 'var1_') %>%
as_tibble()
testthat::expect_equal(t32_1[1:3,2:10],
test_32[1:3,2:10],
label = "T32.1")
#manual check(s)
#clean up working directory
rm(t32_totals)
rm(t32_1)
rm(test_32)
})
#test 33 ----
test_that('T33',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_33 <- build(t)
# output table to check attributes
save(test_33, file = "~/Tplyr/uat/output/test_33.RData")
#clean up working directory
rm(t)
rm(test_33)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_33.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t33_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill=list(total = 0))
t33_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t33_tots, by=c("TRTA","BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100,0)) %>%
mutate(col =paste0(sprintf("%3s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t33_1$col,test_33$var1_Placebo_N,label = "T33.1")
#manual check(s)
#clean up working directory
rm(t33_tots)
rm(t33_1)
rm(test_33)
})
#test 34 ----
test_that('T34',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_34 <- build(t)
# output table to check attributes
save(test_34, file = "~/Tplyr/uat/output/test_34.RData")
#clean up working directory
rm(t)
rm(test_34)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_34.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t34_tots <- filter(adlb) %>%
group_by(TRTA) %>%
summarise(total=n())
t34_1 <- filter(adlb) %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t34_tots, by="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t34_1$col,test_34$var1_Placebo_N,label = "T34.1")
#manual check(s)
#clean up working directory
rm(t34_tots)
rm(t34_1)
rm(test_34)
})
#test 35 ----
test_that('T35',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_35 <- build(t)
# output table to check attributes
save(test_35, file = "~/Tplyr/uat/output/test_35.RData")
#clean up working directory
rm(t)
rm(test_35)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_35.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t35_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA) %>%
summarise(total=n())
t35_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t35_tots, by="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t35_1$col,test_35$var1_Placebo_N,label = "T35.1")
#manual check(s)
#clean up working directory
rm(t35_tots)
rm(t35_1)
rm(test_35)
})
#test 36 ----
test_that('T36',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND),
where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_36 <- build(t)
# output table to check attributes
save(test_36, file = "~/Tplyr/uat/output/test_36.RData")
#clean up working directory
rm(t)
rm(test_36)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_36.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t36_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2") %>%
select(TRTA, USUBJID) %>%
distinct(TRTA, USUBJID) %>%
merge(adsl, by.x=c("USUBJID", "TRTA"), by.y=c("USUBJID", "TRT01P"), all.y = FALSE) %>%
group_by(TRTA) %>%
summarise(total=n())
t36_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
merge(t36_tots, by.x="TRTA", by.y="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t36_1$col,test_36$var1_Placebo_N,label = "T36.1")
#manual check(s)
#clean up working directory
rm(t36_tots)
rm(t36_1)
rm(test_36)
})
#test 37 ----
test_that('T37',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=vars(PARAMCD, AVISIT)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct)) %>%
set_denoms_by(TRTA, PARAMCD, AVISIT)
)
test_37 <- build(t)
# output table to check attributes
save(test_37, file = "~/Tplyr/uat/output/test_37.RData")
#clean up working directory
rm(t)
rm(test_37)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_37.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t37_tots <- filter(adlb) %>%
group_by(TRTA, PARAMCD, AVISIT) %>%
summarise(total=n())
t37_1 <- filter(adlb) %>%
group_by(TRTA, PARAMCD, AVISIT, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, PARAMCD, AVISIT, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t37_tots, by=c("TRTA", "PARAMCD", "AVISIT")) %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t37_1$col,test_37$var1_Placebo_N,label = "T37.1")
#manual check(s)
#clean up working directory
rm(t37_tots)
rm(t37_1)
rm(test_37)
})
#test 38 ----
test_that('T38',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE, by = "Race")
)
test_38 <- build(t)
# output table to check attributes
save(test_38, file = "~/Tplyr/uat/output/test_38.RData")
#clean up working directory
rm(t)
rm(test_38)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_38.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(replicate(n = length(unique(adsl$RACE)), "Race", simplify = TRUE ),
test_38$row_label1,
label = "T38.1")
#manual check(s)
#clean up working directory
rm(test_38)
})
#test 39 ----
test_that('T39',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE, by = vars("Ethnicity", ETHNIC, "Race"))
)
test_39 <- build(t)
# output table to check attributes
save(test_39, file = "~/Tplyr/uat/output/test_39.RData")
#clean up working directory
rm(t)
rm(test_39)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_39.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t39_1 <- distinct(adsl, ETHNIC, RACE) %>%
complete(ETHNIC, RACE) %>%
mutate(ethnic_text = "Ethnicity") %>%
mutate(race_text = "Race")
testthat::expect_equal(c(t39_1$ethnic_text, t39_1$ETHNIC, t39_1$race_text, t39_1$RACE),
c(test_39$row_label1, test_39$row_label2, test_39$row_label3, test_39$row_label4),
label = "T39.1")
#manual check(s)
#clean up working directory
rm(t39_1)
rm(test_39)
})
#test 40 ----
test_that('T40',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
) %>%
add_layer(
group_desc(AGE)
) %>%
add_layer(
group_desc(CUMDOSE)
) %>%
add_layer(
group_count(ETHNIC)
)
test_40 <- build(t)
# output table to check attributes
save(test_40, file = "~/Tplyr/uat/output/test_40.RData")
#clean up working directory
rm(t)
rm(test_40)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_40.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t40_denoms <- filter(adsl, TRT01P == "Placebo") %>%
group_by(TRT01P) %>%
summarise(total = n())
t40_race <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill=list(n = 0)) %>%
filter(TRT01P == "Placebo") %>%
left_join(t40_denoms, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = paste0(sprintf("%2s", n)," (",sprintf("%5.1f",pct),"%)")) %>%
mutate(label = RACE) %>%
select(label, col)
t40_ethnic <- group_by(adsl, TRT01P, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, ETHNIC, fill=list(n = 0)) %>%
filter(TRT01P == "Placebo") %>%
left_join(t40_denoms, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = paste0(sprintf("%2s", n)," (",sprintf("%5.1f",pct),"%)")) %>%
mutate(label = ETHNIC) %>%
select(label, col)
t40_age <- filter(adsl, TRT01P == "Placebo") %>%
summarise(n=n(),
mean=mean(AGE),
median=median(AGE),
sd=sd(AGE),
min=min(AGE),
max=max(AGE),
q1=quantile(AGE)[[2]],
q3=quantile(AGE)[[4]]) %>%
mutate(col_n = sprintf("%3s", n)) %>%
mutate(col_meansd = paste0(sprintf("%4.1f", mean)," (",sprintf("%5.2f", sd),")")) %>%
mutate(col_median = sprintf("%4.1f", median)) %>%
mutate(col_q1q3 = paste0(sprintf("%4.1f", q1),", ",sprintf("%4.1f", q3))) %>%
mutate(col_minmax = paste0(sprintf("%2.0f", min),", ",sprintf("%2.0f", max))) %>%
pivot_longer(cols = c(col_n,col_meansd,col_median,col_q1q3,col_minmax),
names_to = "label", values_to = "col") %>%
select(label, col)
t40_cumdose <- filter(adsl, TRT01P == "Placebo") %>%
summarise(n=n(),
mean=mean(CUMDOSE),
median=median(CUMDOSE),
sd=sd(CUMDOSE),
min=min(CUMDOSE),
max=max(CUMDOSE),
q1=quantile(CUMDOSE)[[2]],
q3=quantile(CUMDOSE)[[4]]) %>%
mutate(col_n = sprintf("%3s", n)) %>%
mutate(col_meansd = paste0(sprintf("%7.1f", mean)," (",sprintf("%8.2f", sd),")")) %>%
mutate(col_median = sprintf("%7.1f", median)) %>%
mutate(col_q1q3 = paste0(sprintf("%7.1f", q1),", ",sprintf("%7.1f", q3))) %>%
mutate(col_minmax = paste0(sprintf("%5.0f", min),", ",sprintf("%5.0f", max))) %>%
pivot_longer(cols = c(col_n,col_meansd,col_median,col_q1q3,col_minmax),
names_to = "label", values_to = "col") %>%
select(label, col)
t40_1 <- rbind(t40_race, t40_age, t40_cumdose, t40_ethnic)
testthat::expect_equal(t40_1$col, filter(test_40, row_label1 != 'Missing')$var1_Placebo,label = "T40.1")
#manual check(s)
#clean up working directory
rm(t40_denoms)
rm(t40_race)
rm(t40_ethnic)
rm(t40_age)
rm(t40_cumdose)
rm(t40_1)
rm(test_40)
})
#test 41 ----
test_that('T41',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE_FACTOR) %>%
set_order_count_method("byfactor")
)
test_41 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_41, file = "~/Tplyr/uat/output/test_41.RData")
#clean up working directory
rm(t)
rm(test_41)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_41.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c(c("WHITE", "BLACK OR AFRICAN AMERICAN","AMERICAN INDIAN OR ALASKA NATIVE", "ASIAN"),
c(1, 2, 3, 4)),
c(test_41$row_label1, test_41$ord_layer_1),
label = "T41.1")
#manual check(s)
#clean up working directory
rm(test_41)
})
#test 42 ----
test_that('T42',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
)
test_42 <- build(t) %>%
arrange(ord_layer_index, desc(ord_layer_1))
# output table to check attributes
save(test_42, file = "~/Tplyr/uat/output/test_42.RData")
#clean up working directory
rm(t)
rm(test_42)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_42.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t42_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
filter(TRT01P == "Xanomeline High Dose") %>%
arrange(desc(n))
testthat::expect_equal(c(t42_1$RACE, t42_1$n),
c(test_42$row_label1, test_42$ord_layer_1),
label = "T42.1")
#manual check(s)
#clean up working directory
rm(t42_1)
rm(test_42)
})
#test 43 ----
test_that('T43',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
)
test_43 <- build(t) %>%
arrange(ord_layer_index, row_label1)
# output table to check attributes
save(test_43, file = "~/Tplyr/uat/output/test_43.RData")
#clean up working directory
rm(t)
rm(test_43)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_43.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(sort(unique(adsl$RACE)),
test_43$row_label1,
label = "T43.1")
#manual check(s)
#clean up working directory
rm(test_43)
})
#test 44 ----
test_that('T44',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("byvarn")
)
test_44 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_44, file = "~/Tplyr/uat/output/test_44.RData")
#clean up working directory
rm(t)
rm(test_44)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_44.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t44_1 <- distinct(adsl, RACE, RACEN) %>%
arrange(RACEN)
testthat::expect_equal(c(t44_1$RACE, t44_1$RACEN),
c(test_44$row_label1, test_44$ord_layer_1),
label = "T44.1")
#manual check(s)
#clean up working directory
rm(t44_1)
rm(test_44)
})
#test 45 ----
test_that('T45',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(ETHNIC, by=RACE)
) %>%
add_layer(
group_count(ETHNIC, by=SEX)
)
test_45 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_45, file = "~/Tplyr/uat/output/test_45.RData")
#clean up working directory
rm(t)
rm(test_45)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_45.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t45_racesort <- distinct(adsl, RACE, RACEN) %>%
mutate(sorter = as.numeric(RACEN)) %>%
select(RACE,sorter)
t45_sexsort <- distinct(adsl, SEX) %>%
mutate(sorter = ifelse(SEX == 'F',1,2)) %>%
select(SEX,sorter)
t45_byrace <- group_by(adsl, TRT01P, RACE, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, ETHNIC ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE) %>%
left_join(t45_racesort, by="RACE") %>%
select(label, ETHNIC, sorter) %>%
mutate(ord_layer = 1)
t45_bysex <- group_by(adsl, TRT01P, SEX, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX, ETHNIC ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = SEX) %>%
left_join(t45_sexsort, by="SEX") %>%
select(label, ETHNIC, sorter) %>%
mutate(ord_layer = 2)
t45_1 <- rbind(t45_byrace, t45_bysex)%>%
arrange(ord_layer, sorter)
testthat::expect_equal(c(t45_1$label, t45_1$sorter),
c(test_45$row_label1, test_45$ord_layer_1),
label = "T45.1")
#manual check(s)
#clean up working directory
rm(t45_racesort)
rm(t45_sexsort)
rm(t45_byrace)
rm(t45_bysex)
rm(t45_1)
rm(test_45)
})
#test 46 ----
test_that('T46',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD), where = (AOCC01FL == 'Y')) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
)
test_46 <- build(t) %>%
arrange(ord_layer_index, desc(ord_layer_1), row_label1, desc(ord_layer_2), row_label2)
# output table to check attributes
save(test_46, file = "~/Tplyr/uat/output/test_46.RData")
#clean up working directory
rm(t)
rm(test_46)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_46.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t46_aebodsys <- filter(adae, AOCC01FL == 'Y') %>%
group_by(TRTA, AEBODSYS) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, fill = list(n=0)) %>%
mutate(total = n) %>%
mutate(AEDECOD = AEBODSYS)
t46_1 <- filter(adae, AOCC01FL == 'Y') %>%
group_by(TRTA, AEBODSYS, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, AEDECOD, fill = list(n=0)) %>%
left_join(select(t46_aebodsys, TRTA, AEBODSYS, total), by=c("TRTA","AEBODSYS")) %>%
rbind(mutate(t46_aebodsys, n=Inf)) %>%
pivot_wider(values_from=c(n,total), names_from = TRTA) %>%
arrange(desc(`total_Xanomeline High Dose`), AEBODSYS, desc(`n_Xanomeline High Dose`), AEDECOD) %>%
filter(n_Placebo > 0 | `n_Xanomeline Low Dose` > 0 | `n_Xanomeline High Dose` > 0) %>%
mutate(AEDECOD = ifelse(AEBODSYS == AEDECOD, AEDECOD, paste0(' ',AEDECOD)))
testthat::expect_equal(c(t46_1$AEBODSYS, t46_1$AEDECOD, t46_1$`total_Xanomeline High Dose`, t46_1$`n_Xanomeline High Dose`),
c(test_46$row_label1, test_46$row_label2, test_46$ord_layer_1, test_46$ord_layer_2),
label = "T46.1")
#manual check(s)
#clean up working directory
rm(t46_aebodsys)
rm(t46_1)
rm(test_46)
})
#test 47 ----
test_that('T47',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("byvarn")
) %>%
add_layer(
group_count(ETHNIC) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
) %>%
add_layer(
group_count(SEX) %>%
set_order_count_method("byfactor")
) %>%
add_layer(
group_count(RACE_FACTOR) %>%
set_order_count_method("byfactor")
)
test_47 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_47, file = "~/Tplyr/uat/output/test_47.RData")
#clean up working directory
rm(t)
rm(test_47)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_47.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t47_racesort <- distinct(adsl, RACE, RACEN) %>%
mutate(sorter = as.numeric(RACEN)) %>%
select(RACE,sorter)
t47_ethnicsort <- filter(adsl, TRT01P == "Xanomeline High Dose")%>%
group_by(ETHNIC) %>%
summarise(sorter = n()) %>%
select(ETHNIC,sorter)
t47_sexsort <- distinct(adsl, SEX) %>%
mutate(sorter = ifelse(SEX == 'F',1,2)) %>%
select(SEX,sorter)
t47_racefactorsort <- distinct(adsl, RACE_FACTOR) %>%
complete(RACE_FACTOR) %>%
cbind(sorter = c(1,2,3,4))
t47_race <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE) %>%
left_join(t47_racesort, by="RACE") %>%
select(label, sorter) %>%
mutate(ord_layer = 1)
t47_ethnic <- group_by(adsl, TRT01P, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, ETHNIC,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = ETHNIC) %>%
left_join(t47_ethnicsort, by="ETHNIC") %>%
select(label, sorter) %>%
mutate(ord_layer = 2)
t47_sex <- group_by(adsl, TRT01P, SEX) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = SEX) %>%
left_join(t47_sexsort, by="SEX") %>%
select(label, sorter) %>%
mutate(ord_layer = 3)
t47_racefactor <- group_by(adsl, TRT01P, RACE_FACTOR) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE_FACTOR) %>%
left_join(t47_racefactorsort, by="RACE_FACTOR") %>%
select(label, sorter) %>%
mutate(ord_layer = 4)
t47_1 <- rbind(t47_race, t47_ethnic, t47_sex, t47_racefactor)%>%
arrange(ord_layer, sorter)
testthat::expect_equal(c(t47_1$label, t47_1$sorter),
c(test_47$row_label1, test_47$ord_layer_1),
label = "T47.1")
#manual check(s)
#clean up working directory
rm(t47_racesort)
rm(t47_ethnicsort)
rm(t47_sexsort)
rm(t47_racefactorsort)
rm(t47_race)
rm(t47_ethnic)
rm(t47_sex)
rm(t47_racefactor)
rm(t47_1)
rm(test_47)
})
#test 48 ----
test_that('T48',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
set_count_layer_formats(n_counts = f_str('xxxx (xxx.x%)',n,pct)) %>%
add_layer(
group_count(RACE)
) %>%
add_layer(
group_count(SEX) %>%
set_format_strings(n_counts = f_str('[xxx]',n))
)
test_48 <- build(t)
# output table to check attributes
save(test_48, file = "~/Tplyr/uat/output/test_48.RData")
#clean up working directory
rm(t)
rm(test_48)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_48.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t48_tots <- group_by(adsl, TRT01P) %>%
summarise(total = n())
t48_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
left_join(t48_tots, by="TRT01P") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%5.1f", pct),'%)')) %>%
select(col, TRT01P, RACE) %>%
pivot_wider(values_from = col, names_from = TRT01P)
t48_2 <- group_by(adsl, TRT01P, SEX) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX, fill = list(n=0)) %>%
mutate(col = paste0('[',sprintf("%3s",n),']')) %>%
select(col, TRT01P, SEX) %>%
pivot_wider(values_from = col, names_from = TRT01P)
testthat::expect_equal(t48_1$Placebo,
filter(test_48, ord_layer_index == 1)$var1_Placebo,
label = "T48.1")
testthat::expect_equal(t48_2$Placebo,
filter(test_48, ord_layer_index == 2)$var1_Placebo,
label = "T48.2")
#manual check(s)
#clean up working directory
rm(t48_tots)
rm(t48_1)
rm(t48_2)
rm(test_48)
})
#test 49 ----
test_that('T49',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
set_desc_layer_formats(meansd = f_str('xxx.x (xxx.xx)',mean, sd),
quartiles = f_str('xxx.x (xxx.x, xxx.x)',iqr, q1, q3)
)%>%
add_layer(
group_desc(CUMDOSE)
) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
n = f_str('xxx',n),
meansdvar = f_str('xxx.x (xxx.xx) [xxx.xx]',mean, sd, var),
medianquarts = f_str('xxx.x (xxx.x, xxx.x)',median, q1, q3)
)
)
test_49 <- build(t)
# output table to check attributes
save(test_49, file = "~/Tplyr/uat/output/test_49.RData")
#clean up working directory
rm(t)
rm(test_49)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_49.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t49_1 <- group_by(adsl, TRT01P) %>%
summarise(mean = mean(CUMDOSE),
sd = sd(CUMDOSE),
iqr = IQR(CUMDOSE),
q1 = quantile(CUMDOSE)[[2]],
q3 = quantile(CUMDOSE)[[4]]
) %>%
mutate(meansd = paste0(sprintf("%5.1f", mean), ' (',sprintf("%6.2f", sd), ')'))%>%
mutate(quartiles = paste0(sprintf("%5.1f", iqr), ' (',sprintf("%6.1f", q1),', ',sprintf("%6.1f", q3), ')')) %>%
pivot_longer(cols = c(meansd, quartiles), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
t49_2 <- group_by(adsl, TRT01P) %>%
summarise(n = n(),
mean = mean(AGE),
sd = sd(AGE),
var = var(AGE),
med = median(AGE),
q1 = quantile(AGE)[[2]],
q3 = quantile(AGE)[[4]]
) %>%
mutate(n = sprintf("%3s", n)) %>%
mutate(meansdvar = paste0(sprintf("%5.1f", mean), ' (',sprintf("%6.2f", sd), ') [',sprintf("%6.2f",var),']')) %>%
mutate(medianquarts = paste0(sprintf("%5.1f", med), ' (',sprintf("%5.1f", q1),', ',sprintf("%5.1f", q3), ')')) %>%
pivot_longer(cols = c(n, meansdvar, medianquarts), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
testthat::expect_equal(t49_1$`Xanomeline High Dose`,
filter(test_49, ord_layer_index == 1)$`var1_Xanomeline High Dose`,
label = "T49.1")
testthat::expect_equal(t49_2$Placebo,
filter(test_49, ord_layer_index == 2)$var1_Placebo,
label = "T49.2")
#manual check(s)
#clean up working directory
rm(t49_1)
rm(t49_2)
rm(test_49)
})
#test 50 ----
test_that('T50',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
set_shift_layer_formats(f_str('xxxx (xxx.x%)',n,pct)) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND))
) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx",n))
)
test_50 <- build(t)
# output table to check attributes
save(test_50, file = "~/Tplyr/uat/output/test_50.RData")
#clean up working directory
rm(t)
rm(test_50)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_50.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t50_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill = list(total = 0))
t50_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
left_join(t50_tots, by=c("TRTA", "BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100, 0)) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%5.1f", pct),'%)')) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
t50_2 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
mutate(col = sprintf("%3s",n)) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
testthat::expect_equal(c(t50_1$Placebo_H,t50_1$Placebo_N),
c(filter(test_50, ord_layer_index == 1)$var1_Placebo_H,
filter(test_50, ord_layer_index == 1)$var1_Placebo_N),
label = "T50.1")
testthat::expect_equal(c(t50_2$Placebo_H,t50_2$Placebo_N),
c(filter(test_50, ord_layer_index == 2)$var1_Placebo_H,
filter(test_50, ord_layer_index == 2)$var1_Placebo_N),
label = "T50.2")
#manual check(s)
#clean up working directory
rm(t50_tots)
rm(t50_1)
rm(t50_2)
rm(test_50)
})
#test 51 ----
test_that('T51',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.count_layer_default_formats' = list(
'n_counts' = f_str('xxxx [xxx.xx%]', n, pct)
))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
)
test_51 <- build(t)
# output table to check attributes
save(test_51, file = "~/Tplyr/uat/output/test_51.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_51)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_51.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t51_tots <- group_by(adsl, TRT01P) %>%
summarise(total = n())
t51_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
left_join(t51_tots, by="TRT01P") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%4s",n),' [',sprintf("%6.2f", pct),'%]')) %>%
select(col, TRT01P, RACE) %>%
pivot_wider(values_from = col, names_from = TRT01P)
testthat::expect_equal(t51_1$Placebo,
test_51$var1_Placebo,
label = "T51.1")
#manual check(s)
#clean up working directory
rm(t51_tots)
rm(t51_1)
rm(test_51)
})
#test 52 ----
test_that('T52',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.desc_layer_default_formats' = list(
'meansd' = f_str('xxx.x [xxx.xx]', mean, sd),
'medquarts' = f_str('xxx.x, xxx.x, xxx.x', q1, median, q3)
))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE)
)
test_52 <- build(t)
# output table to check attributes
save(test_52, file = "~/Tplyr/uat/output/test_52.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_52)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_52.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t52_1 <- group_by(adsl, TRT01P) %>%
summarise(mean = mean(AGE),
sd = sd(AGE),
med = median(AGE),
q1 = quantile(AGE)[[2]],
q3 = quantile(AGE)[[4]]
) %>%
mutate(meansd = paste0(sprintf("%5.1f", mean), ' [',sprintf("%6.2f", sd), ']'))%>%
mutate(quartiles = paste0(sprintf("%5.1f", q1), ', ',sprintf("%5.1f", med),', ',sprintf("%5.1f", q3))) %>%
pivot_longer(cols = c(meansd, quartiles), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
testthat::expect_equal(t52_1$Placebo,
test_52$var1_Placebo,
label = "T52.1")
#manual check(s)
#clean up working directory
rm(t52_1)
rm(test_52)
})
#test 53 ----
test_that('T53',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.shift_layer_default_formats' = list(
f_str('xxxx (xxx.xx%)', n, pct)
))
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND))
)
test_53 <- build(t)
# output table to check attributes
save(test_53, file = "~/Tplyr/uat/output/test_53.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_53)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_53.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t53_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill = list(total = 0))
t53_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
left_join(t53_tots, by=c("TRTA", "BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100, 0)) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%6.2f", pct),'%)')) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
testthat::expect_equal(c(t53_1$Placebo_H,t53_1$Placebo_N),
c(test_53$var1_Placebo_H, test_53$var1_Placebo_N),
label = "T53.1")
#manual check(s)
#clean up working directory
rm(t53_tots)
rm(t53_1)
rm(test_53)
})
#test 54 ----
test_that('T54',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.precision_cap' = c('int'=5, 'dec'=2))
t <- tplyr_table(adlb, TRTA, where=PARAMCD == "BUN") %>%
add_layer(
group_desc(AVAL)
)
test_54 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_54, file = "~/Tplyr/uat/output/test_54.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_54)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_54.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t54_int = min(5, max(nchar(sub("\\..*", "", filter(adlb, PARAMCD == "BUN")$AVAL))))
t54_dec = min(2, max(nchar(sub("*.\\.", "", filter(adlb, PARAMCD == "BUN")$AVAL))))
t54_1 <- filter(adlb, PARAMCD == "BUN") %>%
group_by(TRTA) %>%
summarise(n = n(),
mean = mean(AVAL),
sd = sd(AVAL),
median = median(AVAL),
q1 = quantile(AVAL)[[2]],
q3 = quantile(AVAL)[[4]],
min = min(AVAL),
max = max(AVAL)
) %>%
mutate(n = sprintf("%*s",t54_int,n)) %>%
mutate(meansd = paste0(sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, mean)), ' (',
sprintf("%*s",t54_int + t54_dec + 3, sprintf("%.*f", t54_dec + 2, sd)), ')')) %>%
mutate(median = sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, median))) %>%
mutate(quartiles = paste0(sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, q1)),', ',
sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, q3)))) %>%
mutate(minmax = paste0(sprintf("%*s",t54_int + t54_dec + 1, sprintf("%.*f", t54_dec, min)),', ',
sprintf("%*s",t54_int + t54_dec + 1, sprintf("%.*f", t54_dec, max)))) %>%
pivot_longer(cols = c(n, meansd, median, quartiles, minmax), values_to = "stat") %>%
select(TRTA, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRTA)
testthat::expect_equal(t54_1$Placebo,
test_54$var1_Placebo,
label = "T54.1")
#manual check(s)
#clean up working directory
rm(t54_int)
rm(t54_dec)
rm(t54_1)
rm(test_54)
})
#test 55 ----
test_that('T55',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.custom_summaries' = quos(geometric_mean = exp(sum(log(.var[.var > 0]), na.rm=TRUE) / length(.var))))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'Geometric Mean' = f_str('xxx.xx', geometric_mean)
)
)
test_55 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_55, file = "~/Tplyr/uat/output/test_55.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_55)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_55.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t55_1 <- group_by(adsl, TRT01P) %>%
summarise(geometric_mean = exp(sum(log(AGE[AGE > 0]), na.rm=TRUE) / length(AGE))) %>%
mutate(geometric_mean = sprintf("%6.2f",geometric_mean)) %>%
pivot_wider(values_from = "geometric_mean",names_from = "TRT01P")
testthat::expect_equal(c(t55_1$Placebo, t55_1$`Xanomeline Low Dose`, t55_1$`Xanomeline High Dose`),
c(test_55$var1_Placebo, test_55$`var1_Xanomeline Low Dose`, test_55$`var1_Xanomeline High Dose`),
label = "T55.1")
#manual check(s)
#clean up working directory
rm(t55_1)
rm(test_55)
})
#test 56 ----
test_that('T56',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.scipen' = -3)
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c("Xanomeline High Dose", "Placebo"))
)
test_56 <- suppressWarnings(build(t))
# output table to check attributes
save(test_56, file = "~/Tplyr/uat/output/test_56.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_56)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_56.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
options("scipen" = -3)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE'), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE'), n=n())[[1]]
testvals <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p))
t56_1 = paste0(format(round(testvals$estimate[[1]] - testvals$estimate[[2]],3),nsmall = 3), ' (',
format(round(testvals$conf.int[[1]],3),nsmall = 3), ', ',
format(round(testvals$conf.int[[2]],3),nsmall = 3), ')'
)
testthat::expect_equal(t56_1,
filter(test_56,row_label1 == 'WHITE')$`rdiff_Xanomeline High Dose_Placebo`,
label = "T56.1")
#manual check(s)
#clean up working directory
options("scipen" = 0)
rm(tot_t)
rm(cnt_t)
rm(tot_p)
rm(cnt_p)
rm(testvals)
rm(t56_1)
rm(test_56)
})
#test 57 ----
test_that('T57',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.quantile_type' = 3)
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(CUMDOSE) %>%
set_format_strings(
'Quartiles' = f_str('xxx.x, xxx.x', q1, q3)
)
)
test_57 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_57, file = "~/Tplyr/uat/output/test_57.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_57)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_57.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t57_1 <- group_by(adsl, TRT01P) %>%
summarise(q1 = quantile(CUMDOSE, type = 3)[[2]],
q3 = quantile(CUMDOSE, type = 3)[[4]]) %>%
mutate(col = paste0(sprintf("%5.1f", q1), ', ', sprintf("%5.1f", q3))) %>%
select(TRT01P, col) %>%
pivot_wider(values_from = "col",names_from = "TRT01P")
testthat::expect_equal(c(t57_1$Placebo, t57_1$`Xanomeline Low Dose`, t57_1$`Xanomeline High Dose`),
c(test_57$var1_Placebo, test_57$`var1_Xanomeline Low Dose`, test_57$`var1_Xanomeline High Dose`),
label = "T57.1")
#manual check(s)
#clean up working directory
rm(t57_1)
rm(test_57)
})
#test 58 ----
test_that('T58',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.IBMRounding' = TRUE)
row_num <- seq(1:2000)
trta = ifelse(row_num <= 1000, "Placebo", "ThisDrg")
gender = ifelse(between(row_num, 1, 485), "F",
ifelse(between(row_num, 1001, 1525), "F", "M"))
tdat_58 <- tibble(trta, gender)
t <- tplyr_table(tdat_58, trta) %>%
add_total_group(group_name = "Total") %>%
add_layer(
group_count(gender, by = "Gender") %>%
set_format_strings(f_str("xxx (xxx%)", n, pct))
)
test_58 <- suppressWarnings(build(t))
# output table to check attributes
save(test_58, file = "~/Tplyr/uat/output/test_58.RData")
#clean up working directory
options(opts)
rm(row_num)
rm(trta)
rm(gender)
rm(tdat_58)
rm(t)
rm(test_58)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_58.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
row_num <- seq(1:2000)
trta = ifelse(row_num <= 1000, "Placebo", "ThisDrg")
gender = ifelse(between(row_num, 1, 485), "F",
ifelse(between(row_num, 1001, 1525), "F", "M"))
tdat_58 <- tibble(trta, gender)
t58_tots <- rbind(tdat_58, mutate(tdat_58, trta = "Total")) %>%
group_by(trta) %>%
summarise(tot = n())
t58_1 <- rbind(tdat_58, mutate(tdat_58, trta = "Total")) %>%
group_by(trta, gender) %>%
summarise(n = n()) %>%
merge(t58_tots, by = "trta") %>%
mutate(pct = n / tot * 100) %>%
mutate(rnd = trunc(pct * 1 + sign(pct) * 0.5) / 1) %>%
mutate(fmtd = paste0(sprintf("%3.0f", n)," (",sprintf("%3.0f", rnd), "%)"))
testthat::expect_equal(t58_1$fmtd,
c(test_58$var1_Placebo,test_58$var1_ThisDrg,test_58$var1_Total),
label = "T58.1")
#manual check(s)
#clean up working directory
rm(t58_1)
rm(test_58)
rm(tdat_58)
})
#test 59 ----
test_that('T59',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
) %>%
build() %>%
mutate_all(as.character)
test_59 <- add_column_headers(t, "Race|Placebo|Xanomeline High Dose|Xanomeline Low Dose|LayerIndex|Sorter")
# output table to check attributes
save(test_59, file = "~/Tplyr/uat/output/test_59.RData")
#clean up working directory
rm(t)
rm(test_59)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_59.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c("Race", "Placebo","Xanomeline High Dose","Xanomeline Low Dose", "LayerIndex", "Sorter"),
as.character(test_59[1,]),
label = "T59.1")
#manual check(s)
#clean up working directory
rm(test_59)
})
#test 60 ----
test_that('T60',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
set_format_strings(f_str('xxx', n))
) %>%
build() %>%
arrange(desc(ord_layer_1), desc(ord_layer_2))
test_60 <- apply_row_masks(t, row_breaks = TRUE, ord_layer_1)
# output table to check attributes
save(test_60, file = "~/Tplyr/uat/output/test_60.RData")
#clean up working directory
rm(t)
rm(test_60)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_60.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t60_aebodsys <- group_by(adae, TRTA, AEBODSYS) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, fill = list(n=0)) %>%
mutate(AEDECOD = AEBODSYS)
t60_breaks <- select(t60_aebodsys, TRTA, AEBODSYS) %>%
mutate(n = -1) %>%
mutate(AEDECOD = "")
t60_1 <- group_by(adae, TRTA, AEBODSYS, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, AEDECOD, fill = list(n=0)) %>%
rbind(t60_aebodsys) %>%
rbind(t60_breaks) %>%
pivot_wider(values_from=c(n), names_from = TRTA) %>%
filter(Placebo != 0 | `Xanomeline Low Dose` != 0 | `Xanomeline High Dose` != 0) %>%
mutate(AEDECOD = ifelse(AEDECOD == "", "", ifelse(AEBODSYS == AEDECOD, paste0('z', AEDECOD), paste0(' ',AEDECOD)))) %>%
arrange(AEBODSYS, desc(AEDECOD)) %>%
mutate(AEDECOD = substring(AEDECOD, 2)) %>%
mutate(AEBODSYS = ifelse(AEBODSYS == AEDECOD, AEBODSYS, "")) %>%
mutate(Placebo = ifelse(Placebo == -1, "", sprintf("%3s",Placebo))) %>%
mutate(`Xanomeline Low Dose` = ifelse(`Xanomeline Low Dose` == -1, "", sprintf("%3s",`Xanomeline Low Dose`))) %>%
mutate(`Xanomeline High Dose` = ifelse(`Xanomeline High Dose` == -1, "", sprintf("%3s",`Xanomeline High Dose`)))
testthat::expect_equal(c(t60_1$AEBODSYS, t60_1$AEDECOD, t60_1$Placebo,
t60_1$`Xanomeline High Dose`, t60_1$`Xanomeline Low Dose`),
c(test_60$row_label1, test_60$row_label2, test_60$var1_Placebo,
test_60$`var1_Xanomeline High Dose`, test_60$`var1_Xanomeline Low Dose`),
label = "T60.1")
#manual check(s)
#clean up working directory
rm(t60_aebodsys)
rm(t60_breaks)
rm(t60_1)
rm(test_60)
})
#test 61 ----
test_that('T61',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_total_group() %>%
add_treat_grps('Total Xanomeline' = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
add_layer(
group_count(SEX, by = "Sex")
) %>%
add_layer(
group_desc(AGE, by = "Age")
) %>%
add_layer(
group_count(RACE_FACTOR, by = "Race")
) %>%
add_layer(
group_count(ETHNIC, by = "Ethnicity")
) %>%
add_layer(
group_desc(WEIGHTBL, by = "Baseline Weight")
)
built <- build(t) %>%
apply_row_masks() %>%
select(starts_with("row"),"var1_Placebo",starts_with("var1_X"),"var1_Total Xanomeline","var1_Total") %>%
add_column_headers("Parameter | | Placebo | Xanomeline Low Dose | Xanomeline High Dose |
Total | Total Xanomeline")
hux <- huxtable::as_hux(built) %>%
huxtable::set_width(1.5) %>%
huxtable::map_align(huxtable::by_cols("left","left","center","center","center","center","center"))
test_61 <- pharmaRTF::rtf_doc(hux) %>%
pharmaRTF::add_titles(pharmaRTF::hf_line("Demographics Summary", bold=TRUE))
# output table to check attributes
pharmaRTF::write_rtf(test_61, file = "~/Tplyr/uat/output/test_61.rtf")
#clean up working directory
rm(t)
rm(built)
rm(hux)
rm(test_61)
#load output for checks
} else {
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
#manual check(s)
expect_true(vur[vur$ID == "T61.1", "Response"])
#clean up working directory
})
#test 62 ----
test_that('T62',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, cols=SEX) %>%
add_treat_grps("Treated" = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Treated','Placebo'))
)
test_62 <- list(suppressWarnings(build(t)), header_n(t))
# output table to check attributes
save(test_62, file = "~/Tplyr/uat/output/test_62.RData")
#clean up working directory
rm(t)
rm(test_62)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_62.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t62_tots <- rbind(adsl, mutate(filter(adsl, TRT01P %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRT01P = 'Treated')) %>%
distinct(TRT01P, SEX, USUBJID) %>%
group_by(TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total)) %>%
complete(TRT01P, SEX, fill=list(n=0))
t62_ae <- rbind(adae, mutate(filter(adae, TRTA %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRTA = 'Treated'))
t62_calc <- rbind(t62_ae, mutate(t62_ae,AEDECOD = ' ')) %>%
distinct(TRTA, SEX, AEBODSYS, AEDECOD, USUBJID) %>%
group_by(TRTA, SEX, AEBODSYS, AEDECOD) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, SEX, AEBODSYS, AEDECOD, fill=list(n=0)) %>%
merge(t62_tots, by.x=c("TRTA","SEX"), by.y=c("TRT01P","SEX")) %>%
mutate(pct = (n / total) * 100) %>%
mutate(col = paste0(sprintf('%3s',n),' (',sprintf("%5.1f", pct),'%)')) %>%
pivot_wider(names_from = c(TRTA, SEX), values_from = c(col,n,total,pct)) %>%
filter(n_Placebo_F != 0 | n_Placebo_M != 0 |
n_Treated_F != 0 | n_Treated_M != 0 |
`n_Xanomeline High Dose_F` != 0 | `n_Xanomeline High Dose_M` != 0 |
`n_Xanomeline Low Dose_F` != 0 | `n_Xanomeline Low Dose_M` != 0)
t62_2 <- rowwise(t62_calc) %>%
mutate(est1 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[1]]) %>%
mutate(est2 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[2]]) %>%
mutate(lci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[1]]) %>%
mutate(uci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[2]]) %>%
mutate(rdiff = est1 - est2) %>%
mutate(col = paste0(sprintf("%6.3f",rdiff),' (',sprintf("%6.3f",lci), ', ',sprintf("%6.3f",uci),')'))
testthat::expect_equal(c(t62_calc$col_Placebo_F, t62_calc$col_Placebo_M, t62_calc$col_Treated_F, t62_calc$col_Treated_M),
c(test_62[[1]]$var1_Placebo_F, test_62[[1]]$var1_Placebo_M, test_62[[1]]$var1_Treated_F, test_62[[1]]$var1_Treated_M),
label = "T62.1")
testthat::expect_equal(t62_2$col,
test_62[[1]]$rdiff_Treated_Placebo_F,
label = "T62.2")
testthat::expect_equal(t62_tots$total,
test_62[[2]]$n,
label = "T62.3")
#manual check(s)
#clean up working directory
rm(t62_tots)
rm(t62_ae)
rm(t62_calc)
rm(t62_2)
rm(test_62)
})
#test 63 ----
test_that('T63',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, where=RACE == 'WHITE', cols=SEX) %>%
add_treat_grps("Treated" = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Treated','Placebo'))
)
test_63 <- list(suppressWarnings(build(t)), header_n(t))
# output table to check attributes
save(test_63, file = "~/Tplyr/uat/output/test_63.RData")
#clean up working directory
rm(t)
rm(test_63)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_63.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t63_tots <- rbind(adsl, mutate(filter(adsl, TRT01P %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRT01P = 'Treated')) %>%
filter(RACE == 'WHITE') %>%
distinct(TRT01P, SEX, USUBJID) %>%
group_by(TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total)) %>%
complete(TRT01P, SEX, fill=list(n=0))
t63_ae <- rbind(adae, mutate(filter(adae, TRTA %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRTA = 'Treated')) %>%
filter(RACE == 'WHITE')
t63_calc <- rbind(t63_ae, mutate(t63_ae,AEDECOD = ' ')) %>%
distinct(TRTA, SEX, AEBODSYS, AEDECOD, USUBJID) %>%
group_by(TRTA, SEX, AEBODSYS, AEDECOD) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, SEX, AEBODSYS, AEDECOD, fill=list(n=0)) %>%
merge(t63_tots, by.x=c("TRTA","SEX"), by.y=c("TRT01P","SEX")) %>%
mutate(pct = (n / total) * 100) %>%
mutate(col = paste0(sprintf('%3s',n),' (',sprintf("%5.1f", pct),'%)')) %>%
pivot_wider(names_from = c(TRTA, SEX), values_from = c(col,n,total,pct)) %>%
filter(n_Placebo_F != 0 | n_Placebo_M != 0 |
n_Treated_F != 0 | n_Treated_M != 0 |
`n_Xanomeline High Dose_F` != 0 | `n_Xanomeline High Dose_M` != 0 |
`n_Xanomeline Low Dose_F` != 0 | `n_Xanomeline Low Dose_M` != 0)
t63_2 <- rowwise(t63_calc) %>%
mutate(est1 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[1]]) %>%
mutate(est2 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[2]]) %>%
mutate(lci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[1]]) %>%
mutate(uci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[2]]) %>%
mutate(rdiff = est1 - est2) %>%
mutate(col = paste0(sprintf("%6.3f",rdiff),' (',sprintf("%6.3f",lci), ', ',sprintf("%6.3f",uci),')'))
testthat::expect_equal(c(t63_calc$col_Placebo_F, t63_calc$col_Placebo_M, t63_calc$col_Treated_F, t63_calc$col_Treated_M),
c(test_63[[1]]$var1_Placebo_F, test_63[[1]]$var1_Placebo_M, test_63[[1]]$var1_Treated_F, test_63[[1]]$var1_Treated_M),
label = "T63.1")
testthat::expect_equal(t63_2$col,
test_63[[1]]$rdiff_Treated_Placebo_F,
label = "T63.2")
testthat::expect_equal(t63_tots$total,
test_63[[2]]$n,
label = "T63.3")
#manual check(s)
#clean up working directory
rm(t63_tots)
rm(t63_ae)
rm(t63_calc)
rm(t63_2)
rm(test_63)
})
#test 64 ----
test_that('T64',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P, cols = SEX, where = !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
add_layer(
group_count(AGEGR1) %>%
set_format_strings(f_str("xxx", n))
) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
"mean" = f_str("xx.xx", mean)
)
)
test_64 <- build(t)
# output table to check attributes
save(test_64, file = "~/Tplyr/uat/output/test_64.RData")
#clean up working directory
rm(t)
rm(test_64)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_64.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t64_cnt_shell <- unique(adsl[,c("TRT01P", "SEX", "AGEGR1")])
t_64_cnts <- filter(adsl, !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRT01P, SEX, AGEGR1) %>%
summarise(n = n()) %>%
right_join(t64_cnt_shell, by = c("TRT01P", "SEX", "AGEGR1")) %>%
mutate(fmtd = if_else(is.na(n), ' 0', sprintf("%3s", n))) %>%
mutate(row_label = AGEGR1) %>%
select(TRT01P, SEX, row_label, fmtd)
t64_stat_shell <- unique(adsl[,c("TRT01P", "SEX")])
t_64_stats <- filter(adsl, !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRT01P, SEX) %>%
summarise(mean = mean(AGE)) %>%
right_join(t64_stat_shell, by = c("TRT01P", "SEX")) %>%
mutate(fmtd = if_else(is.na(mean), '', sprintf("%5.2f", mean))) %>%
mutate(row_label = "mean") %>%
select(TRT01P, SEX, row_label, fmtd)
t_64 <- rbind(t_64_cnts, t_64_stats) %>%
pivot_wider(names_from = c("TRT01P", "SEX"), values_from = fmtd, id_cols = row_label)
testthat::expect_equal(c(t_64$Placebo_F, t_64$Placebo_M,
t_64$`Xanomeline Low Dose_F`, t_64$`Xanomeline Low Dose_M`,
t_64$`Xanomeline High Dose_F`, t_64$`Xanomeline High Dose_M`),
c(test_64$var1_Placebo_F, test_64$var1_Placebo_M,
test_64$`var1_Xanomeline Low Dose_F`, test_64$`var1_Xanomeline Low Dose_M`,
test_64$`var1_Xanomeline High Dose_F`, test_64$`var1_Xanomeline High Dose_M`),
label = "T64.1")
#manual check(s)
#clean up working directory
rm(t64_cnt_shell)
rm(t_64_cnts)
rm(t64_stat_shell)
rm(t_64)
rm(t_64_stats)
rm(test_64)
})
#test 65 ----
test_that('T65',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, cols = SEX, where = !(TRTA == 'Xanomeline High Dose' | (TRTA == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_pop_where(!(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
add_layer(
group_count(AEBODSYS) %>%
set_format_strings(f_str("xxx", n))
)
test_65 <- build(t)
# output table to check attributes
save(test_65, file = "~/Tplyr/uat/output/test_65.RData")
#clean up working directory
rm(t)
rm(test_65)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_65.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t65_shell <- unique(adae[,c("TRTA", "SEX")])
t_65 <- filter(adae, !(TRTA == 'Xanomeline High Dose' | (TRTA == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRTA, SEX, AEBODSYS) %>%
summarise(n = n()) %>%
right_join(t65_shell, by = c("TRTA", "SEX")) %>%
mutate(fmtd = if_else(is.na(n), ' 0', sprintf("%3s", n))) %>%
mutate(row_label = AEBODSYS) %>%
select(TRTA, SEX, row_label, fmtd) %>%
pivot_wider(names_from = c("TRTA", "SEX"), values_from = fmtd, id_cols = row_label) %>%
filter(!is.na(row_label)) %>%
arrange(row_label)
t_65[is.na(t_65)] <- ' 0'
testthat::expect_equal(c(t_65$Placebo_F, t_65$Placebo_M,
t_65$`Xanomeline Low Dose_F`, t_65$`Xanomeline Low Dose_M`,
t_65$`Xanomeline High Dose_F`, t_65$`Xanomeline High Dose_M`),
c(test_65$var1_Placebo_F, test_65$var1_Placebo_M,
test_65$`var1_Xanomeline Low Dose_F`, test_65$`var1_Xanomeline Low Dose_M`,
test_65$`var1_Xanomeline High Dose_F`, test_65$`var1_Xanomeline High Dose_M`),
label = "T65.1")
#manual check(s)
#clean up working directory
rm(t65_shell)
rm(t_65)
rm(test_65)
})
#clean up ----
rm(vur)
| /uat/test_cases.R | permissive | jimsforks/Tplyr | R | false | false | 130,811 | r | context("Atorus Validation")
#' @title Test Cases Code
#' @section Last Updated By:
#' Nathan Kosiba
#' @section Last Update Date:
#' 02/09/2021
#setup ----
#insert any necessary libraries
library(Tplyr)
library(tidyverse)
library(rlang)
library(testthat)
#insert code applicable to all tests i.e. functions or data
adsl <- haven::read_xpt("~/Tplyr/uat/input/adsl.xpt")
adsl$RACE_FACTOR <- factor(adsl$RACE, c("WHITE", "BLACK OR AFRICAN AMERICAN",
"AMERICAN INDIAN OR ALASKA NATIVE", "ASIAN"))
adae <- haven::read_xpt("~/Tplyr/uat/input/adae.xpt")
advs <- haven::read_xpt("~/Tplyr/uat/input/advs.xpt")
adlb <- haven::read_xpt("~/Tplyr/uat/input/adlbc.xpt")
adlb$ANRIND_FACTOR <- factor(adlb$ANRIND, c("L","N","H"))
adlb$BNRIND_FACTOR <- factor(adlb$BNRIND, c("L","N","H"))
opts = options()
#no updates needed - initializes vur which is used to determine which parts of code to execute during testing
vur <- NULL
if(file.exists("~/Tplyr/uat/references/output/vur_auto.Rds")) vur <- readRDS("~/Tplyr/uat/references/output/vur_auto.Rds")
#test 1 ----
test_that('T1',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_1 <- tplyr_table(adsl, TRT01P)
# output table to check attributes
save(test_1, file = "~/Tplyr/uat/output/test_1.RData")
#clean up working directory
rm(test_1)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_1.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_1), label = "T1.1")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_1$treat_var), label = "T1.2")
#manual check(s)
#clean up working directory
rm(test_1)
})
#test 2 ----
test_that('T2',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_2 <- tplyr_table(adsl, TRT01P, where = (EFFFL == 'Y'))
# output table to check attributes
save(test_2, file = "~/Tplyr/uat/output/test_2.RData")
#clean up working directory
rm(test_2)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_2.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(filter(adsl, EFFFL == 'Y'), filter(Tplyr::pop_data(test_2),!!Tplyr::get_where(test_2)), label = "T2.1")
#manual check(s)
#clean up working directory
rm(test_2)
})
#test 3 ----
test_that('T3',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_total_group() %>%
add_treat_grps('Total Xanomeline' = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
add_layer(
group_count(AGEGR1)
)
build(t)
test_3 <- header_n(t)
# output table to check attributes
save(test_3, file = "~/Tplyr/uat/output/test_3.RData")
#clean up working directory
rm(t)
rm(test_3)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_3.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c("Placebo", "Total", "Total Xanomeline", "Xanomeline High Dose", "Xanomeline Low Dose"),
as.vector(test_3$TRT01P), label = "T3.1")
t3_2 <- c(nrow(filter(adsl, TRT01P == "Placebo")), nrow(adsl),
nrow(filter(adsl, TRT01P == "Xanomeline High Dose" | TRT01P == "Xanomeline Low Dose")),
nrow(filter(adsl, TRT01P == "Xanomeline High Dose")), nrow(filter(adsl, TRT01P == "Xanomeline Low Dose")))
testthat::expect_equal(t3_2, test_3[[2]], label = "T3.2")
#manual check(s)
#clean up working directory
rm(t3_2)
rm(test_3)
})
#test 4 ----
test_that('T4',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_4 <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by="Age (Groups)", where = SAFFL == "Y")
)
# output table to check attributes
save(test_4, file = "~/Tplyr/uat/output/test_4.RData")
#clean up working directory
rm(test_4)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_4.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_4), label = "T4.1")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_4$treat_var), label = "T4.2")
#manual check(s)
#clean up working directory
rm(test_4)
})
#test 5 ----
test_that('T5',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_5 <- tplyr_table(adae, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_count(AEDECOD)
)
# output table to check attributes
save(test_5, file = "~/Tplyr/uat/output/test_5.RData")
#clean up working directory
rm(test_5)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_5.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(adsl, Tplyr::pop_data(test_5), label = "T5.1")
testthat::expect_equal(adae, test_5$target, label = "T5.2")
testthat::expect_equal(expr(TRT01P), quo_get_expr(test_5$pop_treat_var), label = "T5.3")
testthat::expect_equal(expr(TRTA), quo_get_expr(test_5$treat_var), label = "T5.4")
#manual check(s)
#clean up working directory
rm(test_5)
})
#test 6 ----
test_that('T6',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
test_6 <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD, by="Preferred Term", where = SAFFL == "Y")
)
# output table to check attributes
save(test_6, file = "~/Tplyr/uat/output/test_6.RData")
#clean up working directory
rm(test_6)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_6.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(filter(adae, SAFFL == 'Y'),
filter(Tplyr::pop_data(test_6),!!Tplyr::get_where(test_6)),
label = "T6.1")
#manual check(s)
#clean up working directory
rm(test_6)
})
#test 7 ----
test_that('T7',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD)
) %>%
add_layer(
group_count(AEDECOD) %>%
set_distinct_by(USUBJID) %>%
set_format_strings(f_str("xxx", distinct_n))
) %>%
add_layer(
group_count("Any AE")
)
build(t)
test_7 <- get_numeric_data(t)
# output table to check attributes
save(test_7, file = "~/Tplyr/uat/output/test_7.RData")
#clean up working directory
rm(t)
rm(test_7)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_7.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t7_1 <- filter(adae, TRTA == "Placebo") %>%
group_by(AEDECOD) %>%
summarise(n=n())
t7_2 <- filter(adae, TRTA == "Placebo") %>%
group_by(AEDECOD) %>%
distinct(USUBJID, AEDECOD) %>%
summarise(n=n())
t7_3 <- filter(adae, TRTA == "Placebo") %>%
group_by("Any AE") %>%
summarise(n = n())
testthat::expect_equal(t7_1[[2]],
subset(test_7[[1]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T7.1")
testthat::expect_equal(t7_2[[2]],
subset(test_7[[2]], TRTA == 'Placebo' & n != 0)[['distinct_n']],
label = "T7.2")
testthat::expect_equal(t7_3[[2]],
subset(test_7[[3]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T7.3")
#manual check(s)
#clean up working directory
rm(t7_1)
rm(t7_2)
rm(t7_3)
rm(test_7)
})
#test 8 ----
test_that('T8',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(AEDECOD, by=SEX) %>%
keep_levels("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")
) %>%
add_layer(
group_count(AEDECOD, by=SEX) %>%
set_distinct_by(USUBJID) %>%
set_format_strings(f_str("xxx", distinct_n)) %>%
keep_levels("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")
)
build(t)
test_8 <- get_numeric_data(t)
# output table to check attributes
save(test_8, file = "~/Tplyr/uat/output/test_8.RData")
#clean up working directory
rm(t)
rm(test_8)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_8.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t8_1 <- filter(adae, TRTA == "Placebo" & AEDECOD %in% c("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")) %>%
group_by(SEX, AEDECOD) %>%
summarise(n=n())
t8_2 <- filter(adae, TRTA == "Placebo" & AEDECOD %in% c("APPLICATION SITE ERYTHEMA","APPLICATION SITE PRURITUS","DIARRHOEA","PRURITUS","LOCALISED INFECTION")) %>%
group_by(SEX, AEDECOD) %>%
distinct(USUBJID, SEX, AEDECOD) %>%
summarise(n=n())
testthat::expect_equal(t8_1[[3]],
subset(test_8[[1]], TRTA == 'Placebo' & n != 0)[['n']],
label = "T8.1")
testthat::expect_equal(t8_2[[3]],
subset(test_8[[2]], TRTA == 'Placebo' & n != 0)[['distinct_n']],
label = "T8.2")
#manual check(s)
#clean up working directory
rm(t8_1)
rm(t8_2)
rm(test_8)
})
#test 9 ----
test_that('T9',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(vars(EOSSTT, DCDECOD)) %>%
set_format_strings(f_str("xxx", n)) %>%
keep_levels("COMPLETED", "DEATH")
) %>%
add_layer(
group_count(vars("Discontinuation", DCDECOD)) %>%
set_format_strings(f_str("xxx", n)) %>%
keep_levels("COMPLETED", "DEATH")
)
test_9 <- build(t)
# output table to check attributes
save(test_9, file = "~/Tplyr/uat/output/test_9.RData")
#clean up working directory
rm(t)
rm(test_9)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_9.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t9_1_outer <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, EOSSTT) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, EOSSTT, fill = list(n = 0)) %>%
mutate(DCDECOD = " A")
t9_1 <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, EOSSTT, DCDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, nesting(EOSSTT, DCDECOD), fill = list(n = 0)) %>%
rbind(t9_1_outer) %>%
arrange(TRT01P, EOSSTT, DCDECOD) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = c(EOSSTT, DCDECOD))
t9_2_outer <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, "Discontinued") %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0)) %>%
mutate(DCDECOD = " A")
t9_2 <- filter(adsl, DCDECOD %in% c("COMPLETED", "DEATH")) %>%
group_by(TRT01P, "Discontinued", DCDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRT01P, nesting("Discontinued", DCDECOD), fill = list(n = 0)) %>%
rbind(t9_2_outer) %>%
arrange(TRT01P, DCDECOD) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = DCDECOD)
test_9_1 <-subset(test_9, ord_layer_index == 1)
test_9_2 <-subset(test_9, ord_layer_index == 2)
testthat::expect_equal(c(t9_1$Placebo, t9_1$`Xanomeline Low Dose`, t9_1$`Xanomeline High Dose`),
c(test_9_1$var1_Placebo, test_9_1$`var1_Xanomeline Low Dose`, test_9_1$`var1_Xanomeline High Dose`),
label = "T9.1")
testthat::expect_equal(c(t9_2$Placebo, t9_2$`Xanomeline Low Dose`, t9_2$`Xanomeline High Dose`),
c(test_9_2$var1_Placebo, test_9_2$`var1_Xanomeline Low Dose`, test_9_2$`var1_Xanomeline High Dose`),
label = "T9.2")
#manual check(s)
#clean up working directory
rm(t9_1)
rm(t9_1_outer)
rm(t9_2)
rm(t9_2_outer)
rm(test_9)
rm(test_9_1)
rm(test_9_2)
})
#test 10 ----
test_that('T10',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(DCSREAS) %>%
set_format_strings(f_str("xxx", n)) %>%
add_total_row(sort_value = -Inf, count_missings = FALSE) %>%
set_total_row_label("TOTAL") %>%
set_missing_count(fmt = f_str("xxx", n), sort_value = Inf, Missing = "")
)
test_10 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_10, file = "~/Tplyr/uat/output/test_10.RData")
#clean up working directory
rm(t)
rm(test_10)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_10.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t10_tots <- group_by(adsl, TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0))
t10_totalrow <- filter(adsl, DCSREAS != "") %>%
group_by(TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n = 0)) %>%
mutate(DCSREAS = ' TOTAL')
t10_1 <- group_by(adsl, TRT01P, DCSREAS) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, DCSREAS, fill = list(n = 0)) %>%
as_tibble() %>%
rbind(t10_totalrow) %>%
mutate(fmtd = sprintf("%3s", n)) %>%
pivot_wider(names_from = TRT01P, values_from = fmtd, id_cols = DCSREAS) %>%
mutate(DCSREAS = if_else(DCSREAS == "", 'ZZZ', DCSREAS)) %>%
arrange(DCSREAS)
testthat::expect_equal(t10_1$Placebo, test_10$var1_Placebo, label = "T10.1")
#manual check(s)
#clean up working directory
rm(t10_tots)
rm(t10_1)
rm(test_10)
})
#test 11 ----
test_that('T11',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(DCSREAS, where = EFFFL == 'Y') %>%
set_missing_count(fmt = f_str("xx", n), sort_value = Inf, Missing = "", denom_ignore = TRUE) %>%
set_denom_where(TRUE)
)
test_11 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_11, file = "~/Tplyr/uat/output/test_11.RData")
#clean up working directory
rm(t)
rm(test_11)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_11.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t11_tots <- filter(adsl, DCSREAS != "") %>%
group_by(TRT01P) %>%
summarise(total = n())
t11_1 <- filter(adsl, EFFFL == 'Y') %>%
group_by(TRT01P, DCSREAS) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, DCSREAS, fill = list(n = 0)) %>%
left_join(t11_tots, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = ifelse(DCSREAS == "", sprintf("%2s",n), paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),"%)"))) %>%
mutate(DCSREAS = ifelse(DCSREAS == "", 'ZZZ', DCSREAS)) %>%
filter(TRT01P == "Placebo") %>%
arrange(DCSREAS)
testthat::expect_equal(t11_1$col,test_11$var1_Placebo,label = "T11.1")
#manual check(s)
#clean up working directory
rm(t11_tots)
rm(t11_1)
rm(test_11)
})
#test 12 ----
test_that('T12',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE_FACTOR, where = EFFFL == 'Y') %>%
set_format_strings(f_str('xxx',n)) %>%
add_total_row(f_str('xxx',n), sort_value = -Inf) %>%
set_missing_count(f_str('xxx',n), Missing = NA, sort_value = Inf) %>%
keep_levels("WHITE","ASIAN")
)
test_12 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_12, file = "~/Tplyr/uat/output/test_12.RData")
#clean up working directory
rm(t)
rm(test_12)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_12.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t12_totalrow <- filter(adsl, EFFFL == 'Y' & RACE_FACTOR %in% c("WHITE","ASIAN")) %>%
group_by(TRT01P) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n=0)) %>%
mutate(RACE_FACTOR = 'TOTAL') %>%
as_tibble()
if (!length(filter(adsl,is.na(RACE_FACTOR) & EFFFL == 'Y'))) {
t12_missingrow <- group_by(TRT01P) %>%
filter(is.na(RACE_FACTOR)) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, fill = list(n=0)) %>%
mutate(RACE_FACTOR = 'MISSING')
} else {
t12_missingrow <- unique(adsl$TRT01P) %>%
as_tibble() %>%
mutate(n = 0) %>%
mutate(RACE_FACTOR = 'MISSING') %>%
rename(TRT01P = value)
}
t12_categoryrows <- filter(adsl, EFFFL == 'Y') %>%
group_by(TRT01P, RACE_FACTOR) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR, fill = list(n=0)) %>%
arrange(RACE_FACTOR) %>%
as_tibble()
t12_1 <- rbind(t12_totalrow, t12_categoryrows, t12_missingrow) %>%
filter(TRT01P == 'Placebo' & RACE_FACTOR %in% c("TOTAL","MISSING","WHITE","ASIAN")) %>%
mutate(fmtd = sprintf("%3s", n))
testthat::expect_equal(t12_1$fmtd, test_12$var1_Placebo, label = "T12.1")
#manual check(s)
#clean up working directory
rm(t12_totalrow)
rm(t12_missingrow)
rm(t12_categoryrows)
rm(t12_1)
rm(test_12)
})
#test 13 ----
test_that('T13',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, where=TRTA == 'Placebo') %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct)) %>%
add_total_row(f_str("xxx", n), sort_value = -Inf)
)%>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%)", distinct_n, distinct_pct)) %>%
add_total_row(f_str("xxx", distinct_n), sort_value = -Inf)
)%>%
add_layer(
group_count(AEDECOD) %>%
set_format_strings(f_str("xxx (xxx.x%) [xxx (xxx.x%)]", n, pct, distinct_n, distinct_pct)) %>%
add_total_row(f_str("xxx [xxx]", n, distinct_n), sort_value = -Inf)
)
test_13 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_13, file = "~/Tplyr/uat/output/test_13.RData")
#clean up working directory
rm(t)
rm(test_13)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_13.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t13_totals <- filter(adae) %>%
group_by(TRTA) %>%
summarize(total=n())
t13_totals_distinct <- filter(adae) %>%
distinct(USUBJID, TRTA) %>%
group_by(TRTA) %>%
summarize(distinct_total=n())
t13_total_row <- filter(adae, TRTA == 'Placebo') %>%
group_by(TRTA) %>%
summarize(cnt=n()) %>%
mutate(AEDECOD = ' TOTAL')
t13_total_row_distinct <- filter(adae, TRTA == 'Placebo') %>%
distinct(USUBJID, TRTA, AEDECOD) %>%
group_by(TRTA) %>%
summarize(cnt=n()) %>%
mutate(AEDECOD = ' TOTAL')
t13_1 <- filter(adae, TRTA == 'Placebo') %>%
group_by(AEDECOD, TRTA) %>%
summarize(cnt=n()) %>%
as_tibble() %>%
rbind(t13_total_row) %>%
left_join(t13_totals,by="TRTA") %>%
mutate(pct = sprintf("%5.1f", round(cnt/total*100,digits = 1))) %>%
mutate(col = ifelse(AEDECOD == ' TOTAL', sprintf("%3s", cnt),paste0(as.character(cnt),' (',pct,'%)'))) %>%
arrange(AEDECOD)
t13_2 <- filter(adae, TRTA == 'Placebo') %>%
distinct(USUBJID, TRTA, AEDECOD) %>%
group_by(AEDECOD, TRTA) %>%
summarize(cnt=n()) %>%
as_tibble() %>%
rbind(t13_total_row_distinct) %>%
left_join(t13_totals_distinct,by="TRTA") %>%
mutate(pct = sprintf("%5.1f", round(cnt/distinct_total*100,digits = 1))) %>%
mutate(distinct_col = ifelse(AEDECOD == ' TOTAL', sprintf("%3s", cnt),paste0(as.character(cnt),' (',pct,'%)'))) %>%
arrange(AEDECOD)
t13_3 <- select(t13_1,c("TRTA","AEDECOD","col")) %>%
left_join(t13_2, by=c("TRTA","AEDECOD")) %>%
mutate(col_combo = ifelse(AEDECOD == ' TOTAL', paste0(col, " [",distinct_col,"]"),paste0(col, " [",sprintf("%12s",distinct_col),"]"))) %>%
arrange(AEDECOD)
testthat::expect_equal(t13_1$col,
trimws(filter(test_13, ord_layer_index == 1)[["var1_Placebo"]]),
label = "T13.1")
testthat::expect_equal(t13_2$distinct_col,
trimws(filter(test_13, ord_layer_index == 2)[["var1_Placebo"]]),
label = "T13.2")
testthat::expect_equal(t13_3$col_combo,
trimws(filter(test_13, ord_layer_index == 3)[["var1_Placebo"]]),
label = "T13.3")
#manual check(s)
#clean up working directory
rm(t13_totals)
rm(t13_totals_distinct)
rm(t13_total_row)
rm(t13_total_row_distinct)
rm(t13_1)
rm(t13_2)
rm(t13_3)
rm(test_13)
})
#test 14 ----
test_that('T14',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_format_strings(f_str("xxx (xx.x%)", n, pct))
)
build(t)
test_14 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_14, file = "~/Tplyr/uat/output/test_14.RData")
#clean up working directory
rm(t)
rm(test_14)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_14.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t14_1 <- group_by(adsl, TRT01P) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total))
t14_2 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
left_join(t14_1, by='TRT01P') %>%
mutate(pct = round((n / total) * 100, digits = 1))
testthat::expect_equal(t14_1$total,unique(test_14[c("TRT01P", "total")])$total,label = "T14.1")
testthat::expect_equal(t14_2$pct,
mutate(filter(test_14, n != 0),pct = round((n / total) * 100, digits = 1))[['pct']],
label = "T14.2")
#manual check(s)
#clean up working directory
rm(t14_1)
rm(t14_2)
rm(test_14)
})
#test 15 ----
test_that('T15',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P, where=SEX == "F") %>%
add_layer(
group_count(RACE) %>%
set_format_strings(f_str("xxx (xx.x%)", n, pct))
)
build(t)
test_15 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_15, file = "~/Tplyr/uat/output/test_15.RData")
#clean up working directory
rm(t)
rm(test_15)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_15.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t15_1 <- filter(adsl, SEX == "F") %>%
group_by(TRT01P) %>%
summarise(total=n())
t15_2 <- filter(adsl, SEX == "F") %>%
group_by(TRT01P, RACE) %>%
summarise(n=n()) %>%
left_join(t15_1, by='TRT01P') %>%
mutate(pct = round((n / total) * 100, digits = 1))
testthat::expect_equal(t15_1$total,unique(test_15[c("TRT01P", "total")])$total,label = "T15.1")
testthat::expect_equal(t15_2$pct,
mutate(filter(test_15, n != 0),pct = round((n / total) * 100, digits = 1))[['pct']],
label = "T15.2")
#manual check(s)
#clean up working directory
rm(t15_1)
rm(t15_2)
rm(test_15)
})
#test 16 ----
test_that('T16',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_count(AEDECOD) %>%
set_distinct_by(USUBJID)
)
test_16 <- list(build(t), header_n(t))
# output table to check attributes
save(test_16, file = "~/Tplyr/uat/output/test_16.RData")
#clean up working directory
rm(t)
rm(test_16)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_16.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t16_1 <- group_by(adsl, TRT01P) %>%
summarise(total=n())
t16_2 <- distinct(adae, TRTA, AEDECOD, USUBJID) %>%
group_by(TRTA, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEDECOD, fill = list(n = 0)) %>%
merge(t16_1, by.y='TRT01P', by.x = "TRTA") %>%
mutate(pct = round((n / total) * 100, digits = 1)) %>%
mutate(col = paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
select(TRTA, AEDECOD, col) %>%
pivot_wider(names_from = "TRTA", values_from = col)
testthat::expect_equal(t16_2$Placebo,test_16[[1]]$var1_Placebo, label = "T16.1")
testthat::expect_equal(t16_1$total,test_16[[2]]$n, label = "T16.2")
#manual check(s)
#clean up working directory
rm(t16_1)
rm(t16_2)
rm(test_16)
})
#test 17 ----
test_that('T17',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(ETHNIC, by=SEX) %>%
set_denoms_by(TRT01P, SEX) %>%
add_total_row()
)
test_17 <- build(t)
# output table to check attributes
save(test_17, file = "~/Tplyr/uat/output/test_17.RData")
#clean up working directory
rm(t)
rm(test_17)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_17.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t17_tots <- group_by(adsl, TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.numeric(total)) %>%
mutate(n = total)
t17_1 <- group_by(adsl, TRT01P, SEX, ETHNIC) %>%
summarise(n=n()) %>%
rbind(select(t17_tots, -total)) %>%
left_join(select(t17_tots, -n), by = c('TRT01P', "SEX")) %>%
mutate(pct = round((n / total) * 100, digits = 1)) %>%
mutate(col = paste0(sprintf("%2s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
filter(TRT01P == "Placebo") %>%
mutate(ETHNIC = replace_na(ETHNIC, 'Total')) %>%
arrange(SEX, ETHNIC)
testthat::expect_equal(t17_1$col, test_17$var1_Placebo,label = "T17.1")
#manual check(s)
#clean up working directory
rm(t17_tots)
rm(t17_1)
rm(test_17)
})
#test 18 ----
test_that('T18',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01A) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'),
args = list(conf.level = 0.9, correct=FALSE, alternative='less'))
)
suppressWarnings(build(t))
test_18 <- get_stats_data(t)
# output table to check attributes
save(test_18, file = "~/Tplyr/uat/output/test_18.RData")
#clean up working directory
rm(t)
rm(test_18)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_18.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE'), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE'), n=n())[[1]]
t18_noarg <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p))
t18_args <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p), conf.level = 0.9, correct=FALSE, alternative='less')
testthat::expect_equal(t18_noarg$estimate[[1]] - t18_noarg$estimate[[2]],
filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'dif')[[3]],
label = "T18.1")
testthat::expect_equal(c(t18_noarg$conf.int[1], t18_noarg$conf.int[2]),
c(filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'low')[[3]],
filter(test_18[[1]]$riskdiff, summary_var == 'WHITE' & measure == 'high')[[3]]),
label = "T18.2")
testthat::expect_equal(c(t18_args$estimate[[1]] - t18_args$estimate[[2]], t18_args$conf.int[1], t18_args$conf.int[2]),
c(filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'dif')[[3]],
filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'low')[[3]],
filter(test_18[[2]]$riskdiff, summary_var == 'WHITE' & measure == 'high')[[3]]),
label = "T18.3")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(cnt_p)
rm(tot_t)
rm(cnt_t)
rm(t18_noarg)
rm(t18_args)
rm(test_18)
})
#test 19 ----
test_that('T19',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01A, cols=SEX) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
)
suppressWarnings(build(t))
test_19 <- get_stats_data(t)
# output table to check attributes
save(test_19, file = "~/Tplyr/uat/output/test_19.RData")
#clean up working directory
rm(t)
rm(test_19)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_19.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & SEX == "F"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE' & SEX == "F"), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo" & SEX == "F"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE' & SEX == "F"), n=n())[[1]]
suppressWarnings(t19 <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p)))
testthat::expect_equal(c(t19$estimate[[1]] - t19$estimate[[2]], t19$conf.int[1], t19$conf.int[2]),
c(filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'dif')[[4]],
filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'low')[[4]],
filter(test_19[[1]]$riskdiff, summary_var == 'WHITE' & SEX == "F" & measure == 'high')[[4]]),
label = "T19.1")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(cnt_p)
rm(tot_t)
rm(cnt_t)
rm(t19)
rm(test_19)
})
#test 20 ----
test_that('T20',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(AEBODSYS, by = SEX) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD), by = SEX) %>%
add_risk_diff(c('Xanomeline High Dose','Placebo'))
)
suppressWarnings(build(t))
test_20 <- get_stats_data(t)
# output table to check attributes
save(test_20, file = "~/Tplyr/uat/output/test_20.RData")
#clean up working directory
rm(t)
rm(test_20)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_20.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
tot_p <- summarise(filter(adae, TRTA == "Placebo"), n=n())[[1]]
tot_t <- summarise(filter(adae, TRTA == 'Xanomeline High Dose'), n=n())[[1]]
cnt_p1 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS"),
n=n())[[1]]
cnt_t1 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS"),
n=n())[[1]]
cnt_p2 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == "F"),
n=n())[[1]]
cnt_t2 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == "F"),
n=n())[[1]]
cnt_p3 <- summarise(filter(adae, TRTA == "Placebo" &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS" &
SEX == "F"),
n=n())[[1]]
cnt_t3 <- summarise(filter(adae, TRTA == 'Xanomeline High Dose' &
AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
AEDECOD == "PRURITUS" &
SEX == "F"),
n=n())[[1]]
suppressWarnings(t20_1 <- prop.test(c(cnt_t1, cnt_p1), c(tot_t, tot_p)))
suppressWarnings(t20_2 <- prop.test(c(cnt_t2, cnt_p2), c(tot_t, tot_p)))
suppressWarnings(t20_3 <- prop.test(c(cnt_t3, cnt_p3), c(tot_t, tot_p)))
testthat::expect_equal(c(t20_1$estimate[[1]] - t20_1$estimate[[2]], t20_1$conf.int[1], t20_1$conf.int[2]),
c(filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'dif')[[4]],
filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'low')[[4]],
filter(test_20[[1]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & measure == 'high')[[4]]),
label = "T20.1")
testthat::expect_equal(c(t20_2$estimate[[1]] - t20_2$estimate[[2]], t20_2$conf.int[1], t20_2$conf.int[2]),
c(filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'dif')[[4]],
filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'low')[[4]],
filter(test_20[[2]]$riskdiff, summary_var == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
SEX == 'F' & measure == 'high')[[4]]),
label = "T20.2")
testthat::expect_equal(c(t20_3$estimate[[1]] - t20_3$estimate[[2]], t20_3$conf.int[1], t20_3$conf.int[2]),
c(filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'dif')[[5]],
filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'low')[[5]],
filter(test_20[[3]]$riskdiff, AEBODSYS == "SKIN AND SUBCUTANEOUS TISSUE DISORDERS" &
summary_var == ' PRURITUS' & SEX == 'F' & measure == 'high')[[5]]),
label = "T20.3")
#manual check(s)
#clean up working directory
rm(tot_p)
rm(tot_t)
rm(cnt_p1)
rm(cnt_t1)
rm(cnt_p2)
rm(cnt_t2)
rm(cnt_p3)
rm(cnt_t3)
rm(t20_1)
rm(t20_2)
rm(t20_3)
rm(test_20)
})
#test 21 ----
test_that('T21',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'n' = f_str('xx', n),
'mean' = f_str('xx.x', mean),
'median' = f_str('xx.x', median),
'sd' = f_str('xx.xx', sd),
'var' = f_str('xx.xx', var),
'min' = f_str('xx', min),
'max' = f_str('xx', max),
'iqr' = f_str('xx.x', iqr),
'q1' = f_str('xx.x', q1),
'q3' = f_str('xx.x', q3),
'missing' = f_str('xx', missing)
)
)
build(t)
test_21 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_21, file = "~/Tplyr/uat/output/test_21.RData")
#clean up working directory
rm(t)
rm(test_21)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_21.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], n=n())[[1]],
subset(test_21, stat == 'n' & TRT01P == 'Placebo')[['value']],
label = "T21.1")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], mean=mean(AGE))[[1]],
subset(test_21, stat == 'mean' & TRT01P == 'Placebo')[['value']],
label = "T21.2")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], median=median(AGE))[[1]],
subset(test_21, stat == 'median' & TRT01P == 'Placebo')[['value']],
label = "T21.3")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], sd=sd(AGE))[[1]],
subset(test_21, stat == 'sd' & TRT01P == 'Placebo')[['value']],
label = "T21.4")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], var=var(AGE))[[1]],
subset(test_21, stat == 'var' & TRT01P == 'Placebo')[['value']],
label = "T21.5")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], min=min(AGE))[[1]],
subset(test_21, stat == 'min' & TRT01P == 'Placebo')[['value']],
label = "T21.6")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], max=max(AGE))[[1]],
subset(test_21, stat == 'max' & TRT01P == 'Placebo')[['value']],
label = "T21.7")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], iqr=IQR(AGE))[[1]],
subset(test_21, stat == 'iqr' & TRT01P == 'Placebo')[['value']],
label = "T21.8")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], q1=quantile(AGE)[[2]])[[1]],
subset(test_21, stat == 'q1' & TRT01P == 'Placebo')[['value']],
label = "T21.9")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',], q3=quantile(AGE)[[4]])[[1]],
subset(test_21, stat == 'q3' & TRT01P == 'Placebo')[['value']],
label = "T21.10")
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo' & is.na(adsl$AGE),], n=n())[[1]],
subset(test_21, stat == 'missing' & TRT01P == 'Placebo')[['value']],
label = "T21.11")
#manual check(s)
#clean up working directory
rm(test_21)
})
#test 22 ----
test_that('T22',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_custom_summaries(
geometric_mean = exp(sum(log(.var[.var > 0]),
na.rm=TRUE) / length(.var))
) %>%
set_format_strings(
'Geometric Mean (SD)' = f_str('xx.xx (xx.xxx)', geometric_mean, sd)
)
)
build(t)
test_22 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_22, file = "~/Tplyr/uat/output/test_22.RData")
#clean up working directory
rm(t)
rm(test_22)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_22.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(summarise(adsl[adsl$TRT01P == 'Placebo',],
geometric_mean = exp(sum(log(AGE[AGE > 0]),na.rm=TRUE) / length(AGE)))[[1]],
subset(test_22, stat == 'geometric_mean' & TRT01P == 'Placebo')[['value']],
label = "T22.1")
#manual check(s)
#clean up working directory
rm(test_22)
})
#test 23 ----
test_that('T23',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by=ETHNIC) %>%
set_format_strings(
'n' = f_str('xx', n),
'mean' = f_str('xx.x', mean),
'median' = f_str('xx.x', median),
'sd' = f_str('xx.xx', sd),
'var' = f_str('xx.xx', var),
'min' = f_str('xx', min),
'max' = f_str('xx', max),
'iqr' = f_str('xx.x', iqr),
'q1' = f_str('xx.x', q1),
'q3' = f_str('xx.x', q3)
)
)
build(t)
test_23 <- filter(get_numeric_data(t)[[1]], TRT01P == 'Placebo')
# output table to check attributes
save(test_23, file = "~/Tplyr/uat/output/test_23.RData")
#clean up working directory
rm(t)
rm(test_23)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_23.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t23_1 <- pivot_longer(data.frame(summarise(group_by(adsl[adsl$TRT01P == 'Placebo',],ETHNIC),
n=n(),
mean=mean(AGE),
median=median(AGE),
sd=sd(AGE),
var=var(AGE),
min=min(AGE),
max=max(AGE),
iqr=IQR(AGE),
q1=quantile(AGE)[[2]],
q3=quantile(AGE)[[4]]
)
),
cols=c(n,mean,median,sd,var,min,max,iqr,q1,q3),names_to="STAT")
testthat::expect_equal(t23_1$value,
test_23$value,
label = "T23.1")
#manual check(s)
#clean up working directory
rm(t23_1)
rm(test_23)
})
#test 24 ----
test_that('T24',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, xx.x, xx, x.xx, xx.xx, xx, xx, xx.x, xx.x, xx.x',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_24 <- build(t)$var1_Placebo
# output table to check attributes
save(test_24, file = "~/Tplyr/uat/output/test_24.RData")
#clean up working directory
rm(t)
rm(test_24)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_24.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t24_1 <- paste(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=round(mean(AGE),1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],median=median(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=round(sd(AGE),2))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],var=round(var(AGE),2))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=round(IQR(AGE),1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=round(quantile(AGE)[[2]],1))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=round(quantile(AGE)[[4]],1))[[1]],
sep=", ")
testthat::expect_equal(t24_1,
test_24,
label = "T24.1")
#manual check(s)
#clean up working directory
rm(t24_1)
rm(test_24)
})
#test 25 ----
test_that('T25',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, xx.xx, xx.xx, xx.xxx, xx.xxx, xx, xx, xx.xx, xx.xx, xx.xx',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_25 <- build(t)$var1_Placebo
# output table to check attributes
save(test_25, file = "~/Tplyr/uat/output/test_25.RData")
#clean up working directory
rm(t)
rm(test_25)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_25.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t25_1 <- paste(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=sprintf("%5.2f",round(mean(AGE),2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],median=sprintf("%5.2f",round(median(AGE),2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=sprintf("%6.3f",round(sd(AGE),3)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],var=sprintf("%6.3f",round(var(AGE),3)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=sprintf("%5.2f",round(IQR(AGE),1)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=sprintf("%5.2f",round(quantile(AGE)[[2]],2)))[[1]],
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=sprintf("%5.2f",round(quantile(AGE)[[4]],2)))[[1]],
sep=", ")
testthat::expect_equal(t25_1,
test_25,
label = "T25.1")
#manual check(s)
#clean up working directory
rm(t25_1)
rm(test_25)
})
#test 26 ----
test_that('T26',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(advs, TRTA) %>%
add_layer(
group_desc(AVAL, by=PARAMCD) %>%
set_format_strings(
'combo' = f_str('xxxx, a.a+1, xx.a+1, a.a+2, xx.a+2, xxx, a, a.xx, xxx.xx, a.a+1',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_26 <- build(t)
# output table to check attributes
save(test_26, file = "~/Tplyr/uat/output/test_26.RData")
#clean up working directory
rm(t)
rm(test_26)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_26.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t26_dat <- mutate(advs, avalc = as.character(AVAL)) %>%
rowwise() %>%
mutate(intlen = nchar(unlist(strsplit(avalc,'\\.'))[[1]])) %>%
mutate(hasdec = as.numeric(grepl('\\.', avalc))) %>%
mutate(declen = ifelse(hasdec > 0, nchar(unlist(strsplit(avalc,'\\.'))[[2]]), 0)) %>%
ungroup() %>%
group_by(PARAMCD) %>%
mutate(intlen = max(intlen, na.rm=TRUE)) %>%
mutate(hasdec = max(hasdec)) %>%
mutate(declen = max(declen))
t26_1 <- unique(t26_dat[,c("PARAMCD","intlen","declen","hasdec")]) %>%
left_join(summarise(t26_dat[t26_dat$TRTA == 'Placebo',], n=n(), mean=mean(AVAL), median=median(AVAL), sd=sd(AVAL),
var=var(AVAL), min=min(AVAL), max=max(AVAL), iqr=IQR(AVAL),
q1=quantile(AVAL)[[2]], q3=quantile(AVAL)[[4]]), by="PARAMCD") %>%
mutate(combo = paste(sprintf("%4s",n),
sprintf("%*s", (intlen + declen + 2),
sprintf("%.*f",declen+1,
round(mean,declen+1)
)[[1]]),
sprintf("%*s", 2 + declen + 2,
sprintf("%.*f",declen+1,
round(median,declen+1)
)[[1]]),
sprintf("%*s", intlen + declen + 3,
sprintf("%.*f",declen+2,
round(sd,declen+2)
)[[1]]),
sprintf("%*s", 2 + declen + 3,
sprintf("%.*f",declen+2,
round(var,declen+2)
)[[1]]),
sprintf("%*s", 3,
sprintf("%.*f",0,
round(min,0)
)[[1]]),
sprintf("%*s", intlen,
sprintf("%.*f",0,
round(max,0)
)[[1]]),
sprintf("%*s", intlen + 3,
sprintf("%.*f",2,
round(iqr,2)
)[[1]]),
sprintf("%*s", 6,
sprintf("%.*f",2,
round(q1,2)
)[[1]]),
sprintf("%*s", intlen + declen + 2,
sprintf("%.*f",declen+1,
round(q3,declen+1)
)[[1]]),
sep = ", "))
testthat::expect_equal(t26_1$combo,
test_26$var1_Placebo,
label = "T26.1")
#manual check(s)
#clean up working directory
rm(t26_dat)
rm(t26_1)
rm(test_26)
})
#test 27 ----
test_that('T27',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'combo' = f_str('xx, (xx.x), )xx(), x.xx%%, [xx.xx[], xx, xx, xx.x, {Q1 - xx.x}, Q3 - xx.x',
n, mean, median, sd, var, min, max, iqr, q1, q3)
)
)
test_27 <- build(t)$var1_Placebo
# output table to check attributes
save(test_27, file = "~/Tplyr/uat/output/test_27.RData")
#clean up working directory
rm(t)
rm(test_27)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_27.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t27_1 <- paste0(summarise(adsl[adsl$TRT01P == 'Placebo',],n=n())[[1]],
", (",
summarise(adsl[adsl$TRT01P == 'Placebo',],mean=round(mean(AGE),1))[[1]],
"), )",
summarise(adsl[adsl$TRT01P == 'Placebo',],median=median(AGE))[[1]],
"(), ",
summarise(adsl[adsl$TRT01P == 'Placebo',],sd=round(sd(AGE),2))[[1]],
"%%, [",
summarise(adsl[adsl$TRT01P == 'Placebo',],var=round(var(AGE),2))[[1]],
"[], ",
summarise(adsl[adsl$TRT01P == 'Placebo',],min=min(AGE))[[1]],
", ",
summarise(adsl[adsl$TRT01P == 'Placebo',],max=max(AGE))[[1]],
", ",
summarise(adsl[adsl$TRT01P == 'Placebo',],iqr=round(IQR(AGE),1))[[1]],
", {Q1 - ",
summarise(adsl[adsl$TRT01P == 'Placebo',],q1=round(quantile(AGE)[[2]],1))[[1]],
"}, Q3 - ",
summarise(adsl[adsl$TRT01P == 'Placebo',],q3=round(quantile(AGE)[[4]],1))[[1]]
)
testthat::expect_equal(t27_1,
test_27,
label = "T27.1")
#manual check(s)
#clean up working directory
rm(t27_1)
rm(test_27)
})
#test 28 ----
test_that('T28',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE, by = RACE_FACTOR) %>%
set_format_strings(
'n' = f_str('xx', n, empty = "NA"),
'mean' = f_str('xx.x', mean, empty = "N/A")
)
)
build(t)
test_28 <- build(t)
# output table to check attributes
save(test_28, file = "~/Tplyr/uat/output/test_28.RData")
#clean up working directory
rm(t)
rm(test_28)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_28.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t28_1 <- group_by(adsl, TRT01P, RACE_FACTOR) %>%
summarise(n=n(), mean = round(mean(AGE),1)) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR, fill=list(n="NA",mean="N/A")) %>%
filter(TRT01P == "Placebo") %>%
pivot_longer(cols=c(n,mean))
testthat::expect_equal(t28_1$value, trimws(test_28$var1_Placebo),label = "T28.1")
#manual check(s)
#clean up working directory
rm(t28_1)
rm(test_28)
})
#test 29 ----
test_that('T29',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), where=(ANRIND != "" & BNRIND != "")) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct))
)
test_29 <- build(t)
# output table to check attributes
save(test_29, file = "~/Tplyr/uat/output/test_29.RData")
#clean up working directory
rm(t)
rm(test_29)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_29.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t29_totals <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, fill=list(total = 0))
t29_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t29_totals,by="TRTA") %>%
mutate(pct = ifelse(total == 0, 0, (n / total) * 100)) %>%
mutate(fmtd = paste0(sprintf("%3s",n), ' (', sprintf("%5.1f", pct), '%)')) %>%
select(TRTA, ANRIND, BNRIND, fmtd) %>%
pivot_wider(names_from = c(TRTA, BNRIND), id_cols = ANRIND, values_from = fmtd, names_prefix = 'var1_') %>%
as_tibble()
testthat::expect_equal(t29_1[1:2,2:7],
test_29[1:2,2:7],
label = "T29.1")
#manual check(s)
#clean up working directory
rm(t29_totals)
rm(t29_1)
rm(test_29)
})
#test 30 ----
test_that('T30',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=SEX)
)
build(t)
test_30 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_30, file = "~/Tplyr/uat/output/test_30.RData")
#clean up working directory
rm(t)
rm(test_30)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_30.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t30_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, SEX, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, SEX, ANRIND, BNRIND, fill=list(n = 0))
testthat::expect_equal(t30_1$n,test_30$n,label = "T30.1")
#manual check(s)
#clean up working directory
rm(t30_1)
rm(test_30)
})
#test 31 ----
test_that('T31',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=vars(RACE, SEX))
)
build(t)
test_31 <- get_numeric_data(t)[[1]]
# output table to check attributes
save(test_31, file = "~/Tplyr/uat/output/test_31.RData")
#clean up working directory
rm(t)
rm(test_31)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_31.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t31_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, RACE, SEX, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, RACE, SEX, ANRIND, BNRIND, fill=list(n = 0))
testthat::expect_equal(t31_1$n,test_31$n,label = "T31.1")
#manual check(s)
#clean up working directory
rm(t31_1)
rm(test_31)
})
#test 32 ----
test_that('T32',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
add_layer(
group_shift(vars(row=ANRIND_FACTOR, column=BNRIND_FACTOR), where=(ANRIND != "" & BNRIND != "")) %>%
set_format_strings(f_str("xxx (xxx.x%)", n, pct)) %>%
set_denom_where(TRUE)
)
test_32 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_32, file = "~/Tplyr/uat/output/test_32.RData")
#clean up working directory
rm(t)
rm(test_32)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_32.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t32_totals <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2") %>%
group_by(TRTA) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, fill=list(total = 0))
t32_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND_FACTOR, BNRIND_FACTOR) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND_FACTOR, BNRIND_FACTOR, fill=list(n = 0)) %>%
left_join(t32_totals,by="TRTA") %>%
mutate(pct = ifelse(total == 0, 0, (n / total) * 100)) %>%
mutate(fmtd = paste0(sprintf("%3s",n), ' (', sprintf("%5.1f", pct), '%)')) %>%
select(TRTA, ANRIND_FACTOR, BNRIND_FACTOR, fmtd) %>%
pivot_wider(names_from = c(TRTA, BNRIND_FACTOR), id_cols = ANRIND_FACTOR, values_from = fmtd, names_prefix = 'var1_') %>%
as_tibble()
testthat::expect_equal(t32_1[1:3,2:10],
test_32[1:3,2:10],
label = "T32.1")
#manual check(s)
#clean up working directory
rm(t32_totals)
rm(t32_1)
rm(test_32)
})
#test 33 ----
test_that('T33',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_33 <- build(t)
# output table to check attributes
save(test_33, file = "~/Tplyr/uat/output/test_33.RData")
#clean up working directory
rm(t)
rm(test_33)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_33.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t33_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total=n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill=list(total = 0))
t33_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t33_tots, by=c("TRTA","BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100,0)) %>%
mutate(col =paste0(sprintf("%3s",n),' (',sprintf("%5.1f",pct),'%)')) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t33_1$col,test_33$var1_Placebo_N,label = "T33.1")
#manual check(s)
#clean up working directory
rm(t33_tots)
rm(t33_1)
rm(test_33)
})
#test 34 ----
test_that('T34',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_34 <- build(t)
# output table to check attributes
save(test_34, file = "~/Tplyr/uat/output/test_34.RData")
#clean up working directory
rm(t)
rm(test_34)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_34.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t34_tots <- filter(adlb) %>%
group_by(TRTA) %>%
summarise(total=n())
t34_1 <- filter(adlb) %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t34_tots, by="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t34_1$col,test_34$var1_Placebo_N,label = "T34.1")
#manual check(s)
#clean up working directory
rm(t34_tots)
rm(t34_1)
rm(test_34)
})
#test 35 ----
test_that('T35',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_35 <- build(t)
# output table to check attributes
save(test_35, file = "~/Tplyr/uat/output/test_35.RData")
#clean up working directory
rm(t)
rm(test_35)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_35.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t35_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA) %>%
summarise(total=n())
t35_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t35_tots, by="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t35_1$col,test_35$var1_Placebo_N,label = "T35.1")
#manual check(s)
#clean up working directory
rm(t35_tots)
rm(t35_1)
rm(test_35)
})
#test 36 ----
test_that('T36',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND),
where=(PARAMCD == "BILI" & AVISIT == "Week 2")) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct))
)
test_36 <- build(t)
# output table to check attributes
save(test_36, file = "~/Tplyr/uat/output/test_36.RData")
#clean up working directory
rm(t)
rm(test_36)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_36.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t36_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2") %>%
select(TRTA, USUBJID) %>%
distinct(TRTA, USUBJID) %>%
merge(adsl, by.x=c("USUBJID", "TRTA"), by.y=c("USUBJID", "TRT01P"), all.y = FALSE) %>%
group_by(TRTA) %>%
summarise(total=n())
t36_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, ANRIND, BNRIND, fill=list(n = 0)) %>%
merge(t36_tots, by.x="TRTA", by.y="TRTA") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t36_1$col,test_36$var1_Placebo_N,label = "T36.1")
#manual check(s)
#clean up working directory
rm(t36_tots)
rm(t36_1)
rm(test_36)
})
#test 37 ----
test_that('T37',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND), by=vars(PARAMCD, AVISIT)) %>%
set_format_strings(f_str("xxx (xxx.x%)",n,pct)) %>%
set_denoms_by(TRTA, PARAMCD, AVISIT)
)
test_37 <- build(t)
# output table to check attributes
save(test_37, file = "~/Tplyr/uat/output/test_37.RData")
#clean up working directory
rm(t)
rm(test_37)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_37.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t37_tots <- filter(adlb) %>%
group_by(TRTA, PARAMCD, AVISIT) %>%
summarise(total=n())
t37_1 <- filter(adlb) %>%
group_by(TRTA, PARAMCD, AVISIT, ANRIND, BNRIND) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, PARAMCD, AVISIT, ANRIND, BNRIND, fill=list(n = 0)) %>%
left_join(t37_tots, by=c("TRTA", "PARAMCD", "AVISIT")) %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%3s",n)," (",sprintf("%5.1f",pct),"%)")) %>%
filter(TRTA == "Placebo" & BNRIND == "N")
testthat::expect_equal(t37_1$col,test_37$var1_Placebo_N,label = "T37.1")
#manual check(s)
#clean up working directory
rm(t37_tots)
rm(t37_1)
rm(test_37)
})
#test 38 ----
test_that('T38',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE, by = "Race")
)
test_38 <- build(t)
# output table to check attributes
save(test_38, file = "~/Tplyr/uat/output/test_38.RData")
#clean up working directory
rm(t)
rm(test_38)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_38.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(replicate(n = length(unique(adsl$RACE)), "Race", simplify = TRUE ),
test_38$row_label1,
label = "T38.1")
#manual check(s)
#clean up working directory
rm(test_38)
})
#test 39 ----
test_that('T39',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE, by = vars("Ethnicity", ETHNIC, "Race"))
)
test_39 <- build(t)
# output table to check attributes
save(test_39, file = "~/Tplyr/uat/output/test_39.RData")
#clean up working directory
rm(t)
rm(test_39)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_39.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t39_1 <- distinct(adsl, ETHNIC, RACE) %>%
complete(ETHNIC, RACE) %>%
mutate(ethnic_text = "Ethnicity") %>%
mutate(race_text = "Race")
testthat::expect_equal(c(t39_1$ethnic_text, t39_1$ETHNIC, t39_1$race_text, t39_1$RACE),
c(test_39$row_label1, test_39$row_label2, test_39$row_label3, test_39$row_label4),
label = "T39.1")
#manual check(s)
#clean up working directory
rm(t39_1)
rm(test_39)
})
#test 40 ----
test_that('T40',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
) %>%
add_layer(
group_desc(AGE)
) %>%
add_layer(
group_desc(CUMDOSE)
) %>%
add_layer(
group_count(ETHNIC)
)
test_40 <- build(t)
# output table to check attributes
save(test_40, file = "~/Tplyr/uat/output/test_40.RData")
#clean up working directory
rm(t)
rm(test_40)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_40.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t40_denoms <- filter(adsl, TRT01P == "Placebo") %>%
group_by(TRT01P) %>%
summarise(total = n())
t40_race <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill=list(n = 0)) %>%
filter(TRT01P == "Placebo") %>%
left_join(t40_denoms, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = paste0(sprintf("%2s", n)," (",sprintf("%5.1f",pct),"%)")) %>%
mutate(label = RACE) %>%
select(label, col)
t40_ethnic <- group_by(adsl, TRT01P, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, ETHNIC, fill=list(n = 0)) %>%
filter(TRT01P == "Placebo") %>%
left_join(t40_denoms, by="TRT01P") %>%
mutate(pct = n / total *100) %>%
mutate(col = paste0(sprintf("%2s", n)," (",sprintf("%5.1f",pct),"%)")) %>%
mutate(label = ETHNIC) %>%
select(label, col)
t40_age <- filter(adsl, TRT01P == "Placebo") %>%
summarise(n=n(),
mean=mean(AGE),
median=median(AGE),
sd=sd(AGE),
min=min(AGE),
max=max(AGE),
q1=quantile(AGE)[[2]],
q3=quantile(AGE)[[4]]) %>%
mutate(col_n = sprintf("%3s", n)) %>%
mutate(col_meansd = paste0(sprintf("%4.1f", mean)," (",sprintf("%5.2f", sd),")")) %>%
mutate(col_median = sprintf("%4.1f", median)) %>%
mutate(col_q1q3 = paste0(sprintf("%4.1f", q1),", ",sprintf("%4.1f", q3))) %>%
mutate(col_minmax = paste0(sprintf("%2.0f", min),", ",sprintf("%2.0f", max))) %>%
pivot_longer(cols = c(col_n,col_meansd,col_median,col_q1q3,col_minmax),
names_to = "label", values_to = "col") %>%
select(label, col)
t40_cumdose <- filter(adsl, TRT01P == "Placebo") %>%
summarise(n=n(),
mean=mean(CUMDOSE),
median=median(CUMDOSE),
sd=sd(CUMDOSE),
min=min(CUMDOSE),
max=max(CUMDOSE),
q1=quantile(CUMDOSE)[[2]],
q3=quantile(CUMDOSE)[[4]]) %>%
mutate(col_n = sprintf("%3s", n)) %>%
mutate(col_meansd = paste0(sprintf("%7.1f", mean)," (",sprintf("%8.2f", sd),")")) %>%
mutate(col_median = sprintf("%7.1f", median)) %>%
mutate(col_q1q3 = paste0(sprintf("%7.1f", q1),", ",sprintf("%7.1f", q3))) %>%
mutate(col_minmax = paste0(sprintf("%5.0f", min),", ",sprintf("%5.0f", max))) %>%
pivot_longer(cols = c(col_n,col_meansd,col_median,col_q1q3,col_minmax),
names_to = "label", values_to = "col") %>%
select(label, col)
t40_1 <- rbind(t40_race, t40_age, t40_cumdose, t40_ethnic)
testthat::expect_equal(t40_1$col, filter(test_40, row_label1 != 'Missing')$var1_Placebo,label = "T40.1")
#manual check(s)
#clean up working directory
rm(t40_denoms)
rm(t40_race)
rm(t40_ethnic)
rm(t40_age)
rm(t40_cumdose)
rm(t40_1)
rm(test_40)
})
#test 41 ----
test_that('T41',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE_FACTOR) %>%
set_order_count_method("byfactor")
)
test_41 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_41, file = "~/Tplyr/uat/output/test_41.RData")
#clean up working directory
rm(t)
rm(test_41)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_41.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c(c("WHITE", "BLACK OR AFRICAN AMERICAN","AMERICAN INDIAN OR ALASKA NATIVE", "ASIAN"),
c(1, 2, 3, 4)),
c(test_41$row_label1, test_41$ord_layer_1),
label = "T41.1")
#manual check(s)
#clean up working directory
rm(test_41)
})
#test 42 ----
test_that('T42',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
)
test_42 <- build(t) %>%
arrange(ord_layer_index, desc(ord_layer_1))
# output table to check attributes
save(test_42, file = "~/Tplyr/uat/output/test_42.RData")
#clean up working directory
rm(t)
rm(test_42)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_42.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t42_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
filter(TRT01P == "Xanomeline High Dose") %>%
arrange(desc(n))
testthat::expect_equal(c(t42_1$RACE, t42_1$n),
c(test_42$row_label1, test_42$ord_layer_1),
label = "T42.1")
#manual check(s)
#clean up working directory
rm(t42_1)
rm(test_42)
})
#test 43 ----
test_that('T43',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
)
test_43 <- build(t) %>%
arrange(ord_layer_index, row_label1)
# output table to check attributes
save(test_43, file = "~/Tplyr/uat/output/test_43.RData")
#clean up working directory
rm(t)
rm(test_43)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_43.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(sort(unique(adsl$RACE)),
test_43$row_label1,
label = "T43.1")
#manual check(s)
#clean up working directory
rm(test_43)
})
#test 44 ----
test_that('T44',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("byvarn")
)
test_44 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_44, file = "~/Tplyr/uat/output/test_44.RData")
#clean up working directory
rm(t)
rm(test_44)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_44.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t44_1 <- distinct(adsl, RACE, RACEN) %>%
arrange(RACEN)
testthat::expect_equal(c(t44_1$RACE, t44_1$RACEN),
c(test_44$row_label1, test_44$ord_layer_1),
label = "T44.1")
#manual check(s)
#clean up working directory
rm(t44_1)
rm(test_44)
})
#test 45 ----
test_that('T45',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(ETHNIC, by=RACE)
) %>%
add_layer(
group_count(ETHNIC, by=SEX)
)
test_45 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_45, file = "~/Tplyr/uat/output/test_45.RData")
#clean up working directory
rm(t)
rm(test_45)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_45.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t45_racesort <- distinct(adsl, RACE, RACEN) %>%
mutate(sorter = as.numeric(RACEN)) %>%
select(RACE,sorter)
t45_sexsort <- distinct(adsl, SEX) %>%
mutate(sorter = ifelse(SEX == 'F',1,2)) %>%
select(SEX,sorter)
t45_byrace <- group_by(adsl, TRT01P, RACE, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, ETHNIC ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE) %>%
left_join(t45_racesort, by="RACE") %>%
select(label, ETHNIC, sorter) %>%
mutate(ord_layer = 1)
t45_bysex <- group_by(adsl, TRT01P, SEX, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX, ETHNIC ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = SEX) %>%
left_join(t45_sexsort, by="SEX") %>%
select(label, ETHNIC, sorter) %>%
mutate(ord_layer = 2)
t45_1 <- rbind(t45_byrace, t45_bysex)%>%
arrange(ord_layer, sorter)
testthat::expect_equal(c(t45_1$label, t45_1$sorter),
c(test_45$row_label1, test_45$ord_layer_1),
label = "T45.1")
#manual check(s)
#clean up working directory
rm(t45_racesort)
rm(t45_sexsort)
rm(t45_byrace)
rm(t45_bysex)
rm(t45_1)
rm(test_45)
})
#test 46 ----
test_that('T46',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD), where = (AOCC01FL == 'Y')) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
)
test_46 <- build(t) %>%
arrange(ord_layer_index, desc(ord_layer_1), row_label1, desc(ord_layer_2), row_label2)
# output table to check attributes
save(test_46, file = "~/Tplyr/uat/output/test_46.RData")
#clean up working directory
rm(t)
rm(test_46)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_46.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t46_aebodsys <- filter(adae, AOCC01FL == 'Y') %>%
group_by(TRTA, AEBODSYS) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, fill = list(n=0)) %>%
mutate(total = n) %>%
mutate(AEDECOD = AEBODSYS)
t46_1 <- filter(adae, AOCC01FL == 'Y') %>%
group_by(TRTA, AEBODSYS, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, AEDECOD, fill = list(n=0)) %>%
left_join(select(t46_aebodsys, TRTA, AEBODSYS, total), by=c("TRTA","AEBODSYS")) %>%
rbind(mutate(t46_aebodsys, n=Inf)) %>%
pivot_wider(values_from=c(n,total), names_from = TRTA) %>%
arrange(desc(`total_Xanomeline High Dose`), AEBODSYS, desc(`n_Xanomeline High Dose`), AEDECOD) %>%
filter(n_Placebo > 0 | `n_Xanomeline Low Dose` > 0 | `n_Xanomeline High Dose` > 0) %>%
mutate(AEDECOD = ifelse(AEBODSYS == AEDECOD, AEDECOD, paste0(' ',AEDECOD)))
testthat::expect_equal(c(t46_1$AEBODSYS, t46_1$AEDECOD, t46_1$`total_Xanomeline High Dose`, t46_1$`n_Xanomeline High Dose`),
c(test_46$row_label1, test_46$row_label2, test_46$ord_layer_1, test_46$ord_layer_2),
label = "T46.1")
#manual check(s)
#clean up working directory
rm(t46_aebodsys)
rm(t46_1)
rm(test_46)
})
#test 47 ----
test_that('T47',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
set_order_count_method("byvarn")
) %>%
add_layer(
group_count(ETHNIC) %>%
set_order_count_method("bycount") %>%
set_ordering_cols("Xanomeline High Dose")
) %>%
add_layer(
group_count(SEX) %>%
set_order_count_method("byfactor")
) %>%
add_layer(
group_count(RACE_FACTOR) %>%
set_order_count_method("byfactor")
)
test_47 <- build(t) %>%
arrange(ord_layer_index, ord_layer_1)
# output table to check attributes
save(test_47, file = "~/Tplyr/uat/output/test_47.RData")
#clean up working directory
rm(t)
rm(test_47)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_47.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t47_racesort <- distinct(adsl, RACE, RACEN) %>%
mutate(sorter = as.numeric(RACEN)) %>%
select(RACE,sorter)
t47_ethnicsort <- filter(adsl, TRT01P == "Xanomeline High Dose")%>%
group_by(ETHNIC) %>%
summarise(sorter = n()) %>%
select(ETHNIC,sorter)
t47_sexsort <- distinct(adsl, SEX) %>%
mutate(sorter = ifelse(SEX == 'F',1,2)) %>%
select(SEX,sorter)
t47_racefactorsort <- distinct(adsl, RACE_FACTOR) %>%
complete(RACE_FACTOR) %>%
cbind(sorter = c(1,2,3,4))
t47_race <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE) %>%
left_join(t47_racesort, by="RACE") %>%
select(label, sorter) %>%
mutate(ord_layer = 1)
t47_ethnic <- group_by(adsl, TRT01P, ETHNIC) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, ETHNIC,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = ETHNIC) %>%
left_join(t47_ethnicsort, by="ETHNIC") %>%
select(label, sorter) %>%
mutate(ord_layer = 2)
t47_sex <- group_by(adsl, TRT01P, SEX) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = SEX) %>%
left_join(t47_sexsort, by="SEX") %>%
select(label, sorter) %>%
mutate(ord_layer = 3)
t47_racefactor <- group_by(adsl, TRT01P, RACE_FACTOR) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE_FACTOR ,fill = list(n=0)) %>%
filter(TRT01P == "Placebo") %>%
mutate(label = RACE_FACTOR) %>%
left_join(t47_racefactorsort, by="RACE_FACTOR") %>%
select(label, sorter) %>%
mutate(ord_layer = 4)
t47_1 <- rbind(t47_race, t47_ethnic, t47_sex, t47_racefactor)%>%
arrange(ord_layer, sorter)
testthat::expect_equal(c(t47_1$label, t47_1$sorter),
c(test_47$row_label1, test_47$ord_layer_1),
label = "T47.1")
#manual check(s)
#clean up working directory
rm(t47_racesort)
rm(t47_ethnicsort)
rm(t47_sexsort)
rm(t47_racefactorsort)
rm(t47_race)
rm(t47_ethnic)
rm(t47_sex)
rm(t47_racefactor)
rm(t47_1)
rm(test_47)
})
#test 48 ----
test_that('T48',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
set_count_layer_formats(n_counts = f_str('xxxx (xxx.x%)',n,pct)) %>%
add_layer(
group_count(RACE)
) %>%
add_layer(
group_count(SEX) %>%
set_format_strings(n_counts = f_str('[xxx]',n))
)
test_48 <- build(t)
# output table to check attributes
save(test_48, file = "~/Tplyr/uat/output/test_48.RData")
#clean up working directory
rm(t)
rm(test_48)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_48.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t48_tots <- group_by(adsl, TRT01P) %>%
summarise(total = n())
t48_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
left_join(t48_tots, by="TRT01P") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%5.1f", pct),'%)')) %>%
select(col, TRT01P, RACE) %>%
pivot_wider(values_from = col, names_from = TRT01P)
t48_2 <- group_by(adsl, TRT01P, SEX) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, SEX, fill = list(n=0)) %>%
mutate(col = paste0('[',sprintf("%3s",n),']')) %>%
select(col, TRT01P, SEX) %>%
pivot_wider(values_from = col, names_from = TRT01P)
testthat::expect_equal(t48_1$Placebo,
filter(test_48, ord_layer_index == 1)$var1_Placebo,
label = "T48.1")
testthat::expect_equal(t48_2$Placebo,
filter(test_48, ord_layer_index == 2)$var1_Placebo,
label = "T48.2")
#manual check(s)
#clean up working directory
rm(t48_tots)
rm(t48_1)
rm(t48_2)
rm(test_48)
})
#test 49 ----
test_that('T49',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
set_desc_layer_formats(meansd = f_str('xxx.x (xxx.xx)',mean, sd),
quartiles = f_str('xxx.x (xxx.x, xxx.x)',iqr, q1, q3)
)%>%
add_layer(
group_desc(CUMDOSE)
) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
n = f_str('xxx',n),
meansdvar = f_str('xxx.x (xxx.xx) [xxx.xx]',mean, sd, var),
medianquarts = f_str('xxx.x (xxx.x, xxx.x)',median, q1, q3)
)
)
test_49 <- build(t)
# output table to check attributes
save(test_49, file = "~/Tplyr/uat/output/test_49.RData")
#clean up working directory
rm(t)
rm(test_49)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_49.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t49_1 <- group_by(adsl, TRT01P) %>%
summarise(mean = mean(CUMDOSE),
sd = sd(CUMDOSE),
iqr = IQR(CUMDOSE),
q1 = quantile(CUMDOSE)[[2]],
q3 = quantile(CUMDOSE)[[4]]
) %>%
mutate(meansd = paste0(sprintf("%5.1f", mean), ' (',sprintf("%6.2f", sd), ')'))%>%
mutate(quartiles = paste0(sprintf("%5.1f", iqr), ' (',sprintf("%6.1f", q1),', ',sprintf("%6.1f", q3), ')')) %>%
pivot_longer(cols = c(meansd, quartiles), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
t49_2 <- group_by(adsl, TRT01P) %>%
summarise(n = n(),
mean = mean(AGE),
sd = sd(AGE),
var = var(AGE),
med = median(AGE),
q1 = quantile(AGE)[[2]],
q3 = quantile(AGE)[[4]]
) %>%
mutate(n = sprintf("%3s", n)) %>%
mutate(meansdvar = paste0(sprintf("%5.1f", mean), ' (',sprintf("%6.2f", sd), ') [',sprintf("%6.2f",var),']')) %>%
mutate(medianquarts = paste0(sprintf("%5.1f", med), ' (',sprintf("%5.1f", q1),', ',sprintf("%5.1f", q3), ')')) %>%
pivot_longer(cols = c(n, meansdvar, medianquarts), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
testthat::expect_equal(t49_1$`Xanomeline High Dose`,
filter(test_49, ord_layer_index == 1)$`var1_Xanomeline High Dose`,
label = "T49.1")
testthat::expect_equal(t49_2$Placebo,
filter(test_49, ord_layer_index == 2)$var1_Placebo,
label = "T49.2")
#manual check(s)
#clean up working directory
rm(t49_1)
rm(t49_2)
rm(test_49)
})
#test 50 ----
test_that('T50',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
set_shift_layer_formats(f_str('xxxx (xxx.x%)',n,pct)) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND))
) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND)) %>%
set_format_strings(f_str("xxx",n))
)
test_50 <- build(t)
# output table to check attributes
save(test_50, file = "~/Tplyr/uat/output/test_50.RData")
#clean up working directory
rm(t)
rm(test_50)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_50.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t50_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill = list(total = 0))
t50_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
left_join(t50_tots, by=c("TRTA", "BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100, 0)) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%5.1f", pct),'%)')) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
t50_2 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
mutate(col = sprintf("%3s",n)) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
testthat::expect_equal(c(t50_1$Placebo_H,t50_1$Placebo_N),
c(filter(test_50, ord_layer_index == 1)$var1_Placebo_H,
filter(test_50, ord_layer_index == 1)$var1_Placebo_N),
label = "T50.1")
testthat::expect_equal(c(t50_2$Placebo_H,t50_2$Placebo_N),
c(filter(test_50, ord_layer_index == 2)$var1_Placebo_H,
filter(test_50, ord_layer_index == 2)$var1_Placebo_N),
label = "T50.2")
#manual check(s)
#clean up working directory
rm(t50_tots)
rm(t50_1)
rm(t50_2)
rm(test_50)
})
#test 51 ----
test_that('T51',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.count_layer_default_formats' = list(
'n_counts' = f_str('xxxx [xxx.xx%]', n, pct)
))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
)
test_51 <- build(t)
# output table to check attributes
save(test_51, file = "~/Tplyr/uat/output/test_51.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_51)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_51.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t51_tots <- group_by(adsl, TRT01P) %>%
summarise(total = n())
t51_1 <- group_by(adsl, TRT01P, RACE) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRT01P, RACE, fill = list(n=0)) %>%
left_join(t51_tots, by="TRT01P") %>%
mutate(pct = n / total * 100) %>%
mutate(col = paste0(sprintf("%4s",n),' [',sprintf("%6.2f", pct),'%]')) %>%
select(col, TRT01P, RACE) %>%
pivot_wider(values_from = col, names_from = TRT01P)
testthat::expect_equal(t51_1$Placebo,
test_51$var1_Placebo,
label = "T51.1")
#manual check(s)
#clean up working directory
rm(t51_tots)
rm(t51_1)
rm(test_51)
})
#test 52 ----
test_that('T52',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.desc_layer_default_formats' = list(
'meansd' = f_str('xxx.x [xxx.xx]', mean, sd),
'medquarts' = f_str('xxx.x, xxx.x, xxx.x', q1, median, q3)
))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE)
)
test_52 <- build(t)
# output table to check attributes
save(test_52, file = "~/Tplyr/uat/output/test_52.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_52)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_52.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t52_1 <- group_by(adsl, TRT01P) %>%
summarise(mean = mean(AGE),
sd = sd(AGE),
med = median(AGE),
q1 = quantile(AGE)[[2]],
q3 = quantile(AGE)[[4]]
) %>%
mutate(meansd = paste0(sprintf("%5.1f", mean), ' [',sprintf("%6.2f", sd), ']'))%>%
mutate(quartiles = paste0(sprintf("%5.1f", q1), ', ',sprintf("%5.1f", med),', ',sprintf("%5.1f", q3))) %>%
pivot_longer(cols = c(meansd, quartiles), values_to = "stat") %>%
select(TRT01P, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRT01P)
testthat::expect_equal(t52_1$Placebo,
test_52$var1_Placebo,
label = "T52.1")
#manual check(s)
#clean up working directory
rm(t52_1)
rm(test_52)
})
#test 53 ----
test_that('T53',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.shift_layer_default_formats' = list(
f_str('xxxx (xxx.xx%)', n, pct)
))
t <- tplyr_table(adlb, TRTA, where=(PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "")) %>%
add_layer(
group_shift(vars(row=ANRIND, column=BNRIND))
)
test_53 <- build(t)
# output table to check attributes
save(test_53, file = "~/Tplyr/uat/output/test_53.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_53)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_53.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t53_tots <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND) %>%
summarise(total = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, fill = list(total = 0))
t53_1 <- filter(adlb, PARAMCD == "BILI" & AVISIT == "Week 2" & ANRIND != "" & BNRIND != "") %>%
group_by(TRTA, BNRIND, ANRIND) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, BNRIND, ANRIND, fill = list(n=0)) %>%
left_join(t53_tots, by=c("TRTA", "BNRIND")) %>%
mutate(pct = ifelse(total > 0, n / total * 100, 0)) %>%
mutate(col = paste0(sprintf("%4s",n),' (',sprintf("%6.2f", pct),'%)')) %>%
select(col, TRTA, BNRIND, ANRIND) %>%
pivot_wider(values_from = col, names_from = c(TRTA, BNRIND))
testthat::expect_equal(c(t53_1$Placebo_H,t53_1$Placebo_N),
c(test_53$var1_Placebo_H, test_53$var1_Placebo_N),
label = "T53.1")
#manual check(s)
#clean up working directory
rm(t53_tots)
rm(t53_1)
rm(test_53)
})
#test 54 ----
test_that('T54',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.precision_cap' = c('int'=5, 'dec'=2))
t <- tplyr_table(adlb, TRTA, where=PARAMCD == "BUN") %>%
add_layer(
group_desc(AVAL)
)
test_54 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_54, file = "~/Tplyr/uat/output/test_54.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_54)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_54.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t54_int = min(5, max(nchar(sub("\\..*", "", filter(adlb, PARAMCD == "BUN")$AVAL))))
t54_dec = min(2, max(nchar(sub("*.\\.", "", filter(adlb, PARAMCD == "BUN")$AVAL))))
t54_1 <- filter(adlb, PARAMCD == "BUN") %>%
group_by(TRTA) %>%
summarise(n = n(),
mean = mean(AVAL),
sd = sd(AVAL),
median = median(AVAL),
q1 = quantile(AVAL)[[2]],
q3 = quantile(AVAL)[[4]],
min = min(AVAL),
max = max(AVAL)
) %>%
mutate(n = sprintf("%*s",t54_int,n)) %>%
mutate(meansd = paste0(sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, mean)), ' (',
sprintf("%*s",t54_int + t54_dec + 3, sprintf("%.*f", t54_dec + 2, sd)), ')')) %>%
mutate(median = sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, median))) %>%
mutate(quartiles = paste0(sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, q1)),', ',
sprintf("%*s",t54_int + t54_dec + 2, sprintf("%.*f", t54_dec + 1, q3)))) %>%
mutate(minmax = paste0(sprintf("%*s",t54_int + t54_dec + 1, sprintf("%.*f", t54_dec, min)),', ',
sprintf("%*s",t54_int + t54_dec + 1, sprintf("%.*f", t54_dec, max)))) %>%
pivot_longer(cols = c(n, meansd, median, quartiles, minmax), values_to = "stat") %>%
select(TRTA, name, stat) %>%
pivot_wider(values_from = stat, names_from = TRTA)
testthat::expect_equal(t54_1$Placebo,
test_54$var1_Placebo,
label = "T54.1")
#manual check(s)
#clean up working directory
rm(t54_int)
rm(t54_dec)
rm(t54_1)
rm(test_54)
})
#test 55 ----
test_that('T55',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.custom_summaries' = quos(geometric_mean = exp(sum(log(.var[.var > 0]), na.rm=TRUE) / length(.var))))
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
'Geometric Mean' = f_str('xxx.xx', geometric_mean)
)
)
test_55 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_55, file = "~/Tplyr/uat/output/test_55.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_55)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_55.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t55_1 <- group_by(adsl, TRT01P) %>%
summarise(geometric_mean = exp(sum(log(AGE[AGE > 0]), na.rm=TRUE) / length(AGE))) %>%
mutate(geometric_mean = sprintf("%6.2f",geometric_mean)) %>%
pivot_wider(values_from = "geometric_mean",names_from = "TRT01P")
testthat::expect_equal(c(t55_1$Placebo, t55_1$`Xanomeline Low Dose`, t55_1$`Xanomeline High Dose`),
c(test_55$var1_Placebo, test_55$`var1_Xanomeline Low Dose`, test_55$`var1_Xanomeline High Dose`),
label = "T55.1")
#manual check(s)
#clean up working directory
rm(t55_1)
rm(test_55)
})
#test 56 ----
test_that('T56',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.scipen' = -3)
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE) %>%
add_risk_diff(c("Xanomeline High Dose", "Placebo"))
)
test_56 <- suppressWarnings(build(t))
# output table to check attributes
save(test_56, file = "~/Tplyr/uat/output/test_56.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_56)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_56.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
options("scipen" = -3)
tot_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose"), n=n())[[1]]
cnt_t <- summarise(filter(adsl, TRT01P == "Xanomeline High Dose" & RACE == 'WHITE'), n=n())[[1]]
tot_p <- summarise(filter(adsl, TRT01P == "Placebo"), n=n())[[1]]
cnt_p <- summarise(filter(adsl, TRT01P == "Placebo" & RACE == 'WHITE'), n=n())[[1]]
testvals <- prop.test(c(cnt_t, cnt_p), c(tot_t,tot_p))
t56_1 = paste0(format(round(testvals$estimate[[1]] - testvals$estimate[[2]],3),nsmall = 3), ' (',
format(round(testvals$conf.int[[1]],3),nsmall = 3), ', ',
format(round(testvals$conf.int[[2]],3),nsmall = 3), ')'
)
testthat::expect_equal(t56_1,
filter(test_56,row_label1 == 'WHITE')$`rdiff_Xanomeline High Dose_Placebo`,
label = "T56.1")
#manual check(s)
#clean up working directory
options("scipen" = 0)
rm(tot_t)
rm(cnt_t)
rm(tot_p)
rm(cnt_p)
rm(testvals)
rm(t56_1)
rm(test_56)
})
#test 57 ----
test_that('T57',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.quantile_type' = 3)
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_desc(CUMDOSE) %>%
set_format_strings(
'Quartiles' = f_str('xxx.x, xxx.x', q1, q3)
)
)
test_57 <- filter(build(t), row_label1 != 'Missing')
# output table to check attributes
save(test_57, file = "~/Tplyr/uat/output/test_57.RData")
#clean up working directory
options(opts)
rm(t)
rm(test_57)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_57.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t57_1 <- group_by(adsl, TRT01P) %>%
summarise(q1 = quantile(CUMDOSE, type = 3)[[2]],
q3 = quantile(CUMDOSE, type = 3)[[4]]) %>%
mutate(col = paste0(sprintf("%5.1f", q1), ', ', sprintf("%5.1f", q3))) %>%
select(TRT01P, col) %>%
pivot_wider(values_from = "col",names_from = "TRT01P")
testthat::expect_equal(c(t57_1$Placebo, t57_1$`Xanomeline Low Dose`, t57_1$`Xanomeline High Dose`),
c(test_57$var1_Placebo, test_57$`var1_Xanomeline Low Dose`, test_57$`var1_Xanomeline High Dose`),
label = "T57.1")
#manual check(s)
#clean up working directory
rm(t57_1)
rm(test_57)
})
#test 58 ----
test_that('T58',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
options('tplyr.IBMRounding' = TRUE)
row_num <- seq(1:2000)
trta = ifelse(row_num <= 1000, "Placebo", "ThisDrg")
gender = ifelse(between(row_num, 1, 485), "F",
ifelse(between(row_num, 1001, 1525), "F", "M"))
tdat_58 <- tibble(trta, gender)
t <- tplyr_table(tdat_58, trta) %>%
add_total_group(group_name = "Total") %>%
add_layer(
group_count(gender, by = "Gender") %>%
set_format_strings(f_str("xxx (xxx%)", n, pct))
)
test_58 <- suppressWarnings(build(t))
# output table to check attributes
save(test_58, file = "~/Tplyr/uat/output/test_58.RData")
#clean up working directory
options(opts)
rm(row_num)
rm(trta)
rm(gender)
rm(tdat_58)
rm(t)
rm(test_58)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_58.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
row_num <- seq(1:2000)
trta = ifelse(row_num <= 1000, "Placebo", "ThisDrg")
gender = ifelse(between(row_num, 1, 485), "F",
ifelse(between(row_num, 1001, 1525), "F", "M"))
tdat_58 <- tibble(trta, gender)
t58_tots <- rbind(tdat_58, mutate(tdat_58, trta = "Total")) %>%
group_by(trta) %>%
summarise(tot = n())
t58_1 <- rbind(tdat_58, mutate(tdat_58, trta = "Total")) %>%
group_by(trta, gender) %>%
summarise(n = n()) %>%
merge(t58_tots, by = "trta") %>%
mutate(pct = n / tot * 100) %>%
mutate(rnd = trunc(pct * 1 + sign(pct) * 0.5) / 1) %>%
mutate(fmtd = paste0(sprintf("%3.0f", n)," (",sprintf("%3.0f", rnd), "%)"))
testthat::expect_equal(t58_1$fmtd,
c(test_58$var1_Placebo,test_58$var1_ThisDrg,test_58$var1_Total),
label = "T58.1")
#manual check(s)
#clean up working directory
rm(t58_1)
rm(test_58)
rm(tdat_58)
})
#test 59 ----
test_that('T59',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_layer(
group_count(RACE)
) %>%
build() %>%
mutate_all(as.character)
test_59 <- add_column_headers(t, "Race|Placebo|Xanomeline High Dose|Xanomeline Low Dose|LayerIndex|Sorter")
# output table to check attributes
save(test_59, file = "~/Tplyr/uat/output/test_59.RData")
#clean up working directory
rm(t)
rm(test_59)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_59.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
testthat::expect_equal(c("Race", "Placebo","Xanomeline High Dose","Xanomeline Low Dose", "LayerIndex", "Sorter"),
as.character(test_59[1,]),
label = "T59.1")
#manual check(s)
#clean up working directory
rm(test_59)
})
#test 60 ----
test_that('T60',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
set_format_strings(f_str('xxx', n))
) %>%
build() %>%
arrange(desc(ord_layer_1), desc(ord_layer_2))
test_60 <- apply_row_masks(t, row_breaks = TRUE, ord_layer_1)
# output table to check attributes
save(test_60, file = "~/Tplyr/uat/output/test_60.RData")
#clean up working directory
rm(t)
rm(test_60)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_60.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t60_aebodsys <- group_by(adae, TRTA, AEBODSYS) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, fill = list(n=0)) %>%
mutate(AEDECOD = AEBODSYS)
t60_breaks <- select(t60_aebodsys, TRTA, AEBODSYS) %>%
mutate(n = -1) %>%
mutate(AEDECOD = "")
t60_1 <- group_by(adae, TRTA, AEBODSYS, AEDECOD) %>%
summarise(n=n()) %>%
ungroup() %>%
complete(TRTA, AEBODSYS, AEDECOD, fill = list(n=0)) %>%
rbind(t60_aebodsys) %>%
rbind(t60_breaks) %>%
pivot_wider(values_from=c(n), names_from = TRTA) %>%
filter(Placebo != 0 | `Xanomeline Low Dose` != 0 | `Xanomeline High Dose` != 0) %>%
mutate(AEDECOD = ifelse(AEDECOD == "", "", ifelse(AEBODSYS == AEDECOD, paste0('z', AEDECOD), paste0(' ',AEDECOD)))) %>%
arrange(AEBODSYS, desc(AEDECOD)) %>%
mutate(AEDECOD = substring(AEDECOD, 2)) %>%
mutate(AEBODSYS = ifelse(AEBODSYS == AEDECOD, AEBODSYS, "")) %>%
mutate(Placebo = ifelse(Placebo == -1, "", sprintf("%3s",Placebo))) %>%
mutate(`Xanomeline Low Dose` = ifelse(`Xanomeline Low Dose` == -1, "", sprintf("%3s",`Xanomeline Low Dose`))) %>%
mutate(`Xanomeline High Dose` = ifelse(`Xanomeline High Dose` == -1, "", sprintf("%3s",`Xanomeline High Dose`)))
testthat::expect_equal(c(t60_1$AEBODSYS, t60_1$AEDECOD, t60_1$Placebo,
t60_1$`Xanomeline High Dose`, t60_1$`Xanomeline Low Dose`),
c(test_60$row_label1, test_60$row_label2, test_60$var1_Placebo,
test_60$`var1_Xanomeline High Dose`, test_60$`var1_Xanomeline Low Dose`),
label = "T60.1")
#manual check(s)
#clean up working directory
rm(t60_aebodsys)
rm(t60_breaks)
rm(t60_1)
rm(test_60)
})
#test 61 ----
test_that('T61',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P) %>%
add_total_group() %>%
add_treat_grps('Total Xanomeline' = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
add_layer(
group_count(SEX, by = "Sex")
) %>%
add_layer(
group_desc(AGE, by = "Age")
) %>%
add_layer(
group_count(RACE_FACTOR, by = "Race")
) %>%
add_layer(
group_count(ETHNIC, by = "Ethnicity")
) %>%
add_layer(
group_desc(WEIGHTBL, by = "Baseline Weight")
)
built <- build(t) %>%
apply_row_masks() %>%
select(starts_with("row"),"var1_Placebo",starts_with("var1_X"),"var1_Total Xanomeline","var1_Total") %>%
add_column_headers("Parameter | | Placebo | Xanomeline Low Dose | Xanomeline High Dose |
Total | Total Xanomeline")
hux <- huxtable::as_hux(built) %>%
huxtable::set_width(1.5) %>%
huxtable::map_align(huxtable::by_cols("left","left","center","center","center","center","center"))
test_61 <- pharmaRTF::rtf_doc(hux) %>%
pharmaRTF::add_titles(pharmaRTF::hf_line("Demographics Summary", bold=TRUE))
# output table to check attributes
pharmaRTF::write_rtf(test_61, file = "~/Tplyr/uat/output/test_61.rtf")
#clean up working directory
rm(t)
rm(built)
rm(hux)
rm(test_61)
#load output for checks
} else {
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
#manual check(s)
expect_true(vur[vur$ID == "T61.1", "Response"])
#clean up working directory
})
#test 62 ----
test_that('T62',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, cols=SEX) %>%
add_treat_grps("Treated" = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Treated','Placebo'))
)
test_62 <- list(suppressWarnings(build(t)), header_n(t))
# output table to check attributes
save(test_62, file = "~/Tplyr/uat/output/test_62.RData")
#clean up working directory
rm(t)
rm(test_62)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_62.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t62_tots <- rbind(adsl, mutate(filter(adsl, TRT01P %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRT01P = 'Treated')) %>%
distinct(TRT01P, SEX, USUBJID) %>%
group_by(TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total)) %>%
complete(TRT01P, SEX, fill=list(n=0))
t62_ae <- rbind(adae, mutate(filter(adae, TRTA %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRTA = 'Treated'))
t62_calc <- rbind(t62_ae, mutate(t62_ae,AEDECOD = ' ')) %>%
distinct(TRTA, SEX, AEBODSYS, AEDECOD, USUBJID) %>%
group_by(TRTA, SEX, AEBODSYS, AEDECOD) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, SEX, AEBODSYS, AEDECOD, fill=list(n=0)) %>%
merge(t62_tots, by.x=c("TRTA","SEX"), by.y=c("TRT01P","SEX")) %>%
mutate(pct = (n / total) * 100) %>%
mutate(col = paste0(sprintf('%3s',n),' (',sprintf("%5.1f", pct),'%)')) %>%
pivot_wider(names_from = c(TRTA, SEX), values_from = c(col,n,total,pct)) %>%
filter(n_Placebo_F != 0 | n_Placebo_M != 0 |
n_Treated_F != 0 | n_Treated_M != 0 |
`n_Xanomeline High Dose_F` != 0 | `n_Xanomeline High Dose_M` != 0 |
`n_Xanomeline Low Dose_F` != 0 | `n_Xanomeline Low Dose_M` != 0)
t62_2 <- rowwise(t62_calc) %>%
mutate(est1 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[1]]) %>%
mutate(est2 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[2]]) %>%
mutate(lci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[1]]) %>%
mutate(uci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[2]]) %>%
mutate(rdiff = est1 - est2) %>%
mutate(col = paste0(sprintf("%6.3f",rdiff),' (',sprintf("%6.3f",lci), ', ',sprintf("%6.3f",uci),')'))
testthat::expect_equal(c(t62_calc$col_Placebo_F, t62_calc$col_Placebo_M, t62_calc$col_Treated_F, t62_calc$col_Treated_M),
c(test_62[[1]]$var1_Placebo_F, test_62[[1]]$var1_Placebo_M, test_62[[1]]$var1_Treated_F, test_62[[1]]$var1_Treated_M),
label = "T62.1")
testthat::expect_equal(t62_2$col,
test_62[[1]]$rdiff_Treated_Placebo_F,
label = "T62.2")
testthat::expect_equal(t62_tots$total,
test_62[[2]]$n,
label = "T62.3")
#manual check(s)
#clean up working directory
rm(t62_tots)
rm(t62_ae)
rm(t62_calc)
rm(t62_2)
rm(test_62)
})
#test 63 ----
test_that('T63',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, where=RACE == 'WHITE', cols=SEX) %>%
add_treat_grps("Treated" = c("Xanomeline High Dose", "Xanomeline Low Dose")) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_distinct_by(USUBJID) %>%
add_layer(
group_count(vars(AEBODSYS, AEDECOD)) %>%
add_risk_diff(c('Treated','Placebo'))
)
test_63 <- list(suppressWarnings(build(t)), header_n(t))
# output table to check attributes
save(test_63, file = "~/Tplyr/uat/output/test_63.RData")
#clean up working directory
rm(t)
rm(test_63)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_63.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t63_tots <- rbind(adsl, mutate(filter(adsl, TRT01P %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRT01P = 'Treated')) %>%
filter(RACE == 'WHITE') %>%
distinct(TRT01P, SEX, USUBJID) %>%
group_by(TRT01P, SEX) %>%
summarise(total=n()) %>%
mutate(total = as.integer(total)) %>%
complete(TRT01P, SEX, fill=list(n=0))
t63_ae <- rbind(adae, mutate(filter(adae, TRTA %in% c("Xanomeline High Dose", "Xanomeline Low Dose")),
TRTA = 'Treated')) %>%
filter(RACE == 'WHITE')
t63_calc <- rbind(t63_ae, mutate(t63_ae,AEDECOD = ' ')) %>%
distinct(TRTA, SEX, AEBODSYS, AEDECOD, USUBJID) %>%
group_by(TRTA, SEX, AEBODSYS, AEDECOD) %>%
summarise(n = n()) %>%
ungroup() %>%
complete(TRTA, SEX, AEBODSYS, AEDECOD, fill=list(n=0)) %>%
merge(t63_tots, by.x=c("TRTA","SEX"), by.y=c("TRT01P","SEX")) %>%
mutate(pct = (n / total) * 100) %>%
mutate(col = paste0(sprintf('%3s',n),' (',sprintf("%5.1f", pct),'%)')) %>%
pivot_wider(names_from = c(TRTA, SEX), values_from = c(col,n,total,pct)) %>%
filter(n_Placebo_F != 0 | n_Placebo_M != 0 |
n_Treated_F != 0 | n_Treated_M != 0 |
`n_Xanomeline High Dose_F` != 0 | `n_Xanomeline High Dose_M` != 0 |
`n_Xanomeline Low Dose_F` != 0 | `n_Xanomeline Low Dose_M` != 0)
t63_2 <- rowwise(t63_calc) %>%
mutate(est1 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[1]]) %>%
mutate(est2 = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$estimate[[2]]) %>%
mutate(lci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[1]]) %>%
mutate(uci = suppressWarnings(prop.test(c(n_Treated_F, n_Placebo_F), c(total_Treated_F, total_Placebo_F)))$conf.int[[2]]) %>%
mutate(rdiff = est1 - est2) %>%
mutate(col = paste0(sprintf("%6.3f",rdiff),' (',sprintf("%6.3f",lci), ', ',sprintf("%6.3f",uci),')'))
testthat::expect_equal(c(t63_calc$col_Placebo_F, t63_calc$col_Placebo_M, t63_calc$col_Treated_F, t63_calc$col_Treated_M),
c(test_63[[1]]$var1_Placebo_F, test_63[[1]]$var1_Placebo_M, test_63[[1]]$var1_Treated_F, test_63[[1]]$var1_Treated_M),
label = "T63.1")
testthat::expect_equal(t63_2$col,
test_63[[1]]$rdiff_Treated_Placebo_F,
label = "T63.2")
testthat::expect_equal(t63_tots$total,
test_63[[2]]$n,
label = "T63.3")
#manual check(s)
#clean up working directory
rm(t63_tots)
rm(t63_ae)
rm(t63_calc)
rm(t63_2)
rm(test_63)
})
#test 64 ----
test_that('T64',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adsl, TRT01P, cols = SEX, where = !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
add_layer(
group_count(AGEGR1) %>%
set_format_strings(f_str("xxx", n))
) %>%
add_layer(
group_desc(AGE) %>%
set_format_strings(
"mean" = f_str("xx.xx", mean)
)
)
test_64 <- build(t)
# output table to check attributes
save(test_64, file = "~/Tplyr/uat/output/test_64.RData")
#clean up working directory
rm(t)
rm(test_64)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_64.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t64_cnt_shell <- unique(adsl[,c("TRT01P", "SEX", "AGEGR1")])
t_64_cnts <- filter(adsl, !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRT01P, SEX, AGEGR1) %>%
summarise(n = n()) %>%
right_join(t64_cnt_shell, by = c("TRT01P", "SEX", "AGEGR1")) %>%
mutate(fmtd = if_else(is.na(n), ' 0', sprintf("%3s", n))) %>%
mutate(row_label = AGEGR1) %>%
select(TRT01P, SEX, row_label, fmtd)
t64_stat_shell <- unique(adsl[,c("TRT01P", "SEX")])
t_64_stats <- filter(adsl, !(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRT01P, SEX) %>%
summarise(mean = mean(AGE)) %>%
right_join(t64_stat_shell, by = c("TRT01P", "SEX")) %>%
mutate(fmtd = if_else(is.na(mean), '', sprintf("%5.2f", mean))) %>%
mutate(row_label = "mean") %>%
select(TRT01P, SEX, row_label, fmtd)
t_64 <- rbind(t_64_cnts, t_64_stats) %>%
pivot_wider(names_from = c("TRT01P", "SEX"), values_from = fmtd, id_cols = row_label)
testthat::expect_equal(c(t_64$Placebo_F, t_64$Placebo_M,
t_64$`Xanomeline Low Dose_F`, t_64$`Xanomeline Low Dose_M`,
t_64$`Xanomeline High Dose_F`, t_64$`Xanomeline High Dose_M`),
c(test_64$var1_Placebo_F, test_64$var1_Placebo_M,
test_64$`var1_Xanomeline Low Dose_F`, test_64$`var1_Xanomeline Low Dose_M`,
test_64$`var1_Xanomeline High Dose_F`, test_64$`var1_Xanomeline High Dose_M`),
label = "T64.1")
#manual check(s)
#clean up working directory
rm(t64_cnt_shell)
rm(t_64_cnts)
rm(t64_stat_shell)
rm(t_64)
rm(t_64_stats)
rm(test_64)
})
#test 65 ----
test_that('T65',{
if(is.null(vur)) {
#perform test and create outputs to use for checks
#if input files are needed they should be read in from "~/uat/input" folder
#outputs should be sent to "~/uat/output" folder
t <- tplyr_table(adae, TRTA, cols = SEX, where = !(TRTA == 'Xanomeline High Dose' | (TRTA == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
set_pop_data(adsl) %>%
set_pop_treat_var(TRT01P) %>%
set_pop_where(!(TRT01P == 'Xanomeline High Dose' | (TRT01P == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
add_layer(
group_count(AEBODSYS) %>%
set_format_strings(f_str("xxx", n))
)
test_65 <- build(t)
# output table to check attributes
save(test_65, file = "~/Tplyr/uat/output/test_65.RData")
#clean up working directory
rm(t)
rm(test_65)
#load output for checks
} else {
load("~/Tplyr/uat/output/test_65.RData")
}
#perform checks
skip_if(is.null(vur))
#programmatic check(s)
t65_shell <- unique(adae[,c("TRTA", "SEX")])
t_65 <- filter(adae, !(TRTA == 'Xanomeline High Dose' | (TRTA == 'Xanomeline Low Dose' & SEX == 'F'))) %>%
group_by(TRTA, SEX, AEBODSYS) %>%
summarise(n = n()) %>%
right_join(t65_shell, by = c("TRTA", "SEX")) %>%
mutate(fmtd = if_else(is.na(n), ' 0', sprintf("%3s", n))) %>%
mutate(row_label = AEBODSYS) %>%
select(TRTA, SEX, row_label, fmtd) %>%
pivot_wider(names_from = c("TRTA", "SEX"), values_from = fmtd, id_cols = row_label) %>%
filter(!is.na(row_label)) %>%
arrange(row_label)
t_65[is.na(t_65)] <- ' 0'
testthat::expect_equal(c(t_65$Placebo_F, t_65$Placebo_M,
t_65$`Xanomeline Low Dose_F`, t_65$`Xanomeline Low Dose_M`,
t_65$`Xanomeline High Dose_F`, t_65$`Xanomeline High Dose_M`),
c(test_65$var1_Placebo_F, test_65$var1_Placebo_M,
test_65$`var1_Xanomeline Low Dose_F`, test_65$`var1_Xanomeline Low Dose_M`,
test_65$`var1_Xanomeline High Dose_F`, test_65$`var1_Xanomeline High Dose_M`),
label = "T65.1")
#manual check(s)
#clean up working directory
rm(t65_shell)
rm(t_65)
rm(test_65)
})
#clean up ----
rm(vur)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca-fit.R
\name{apd_pca}
\alias{apd_pca}
\alias{apd_pca.default}
\alias{apd_pca.data.frame}
\alias{apd_pca.matrix}
\alias{apd_pca.formula}
\alias{apd_pca.recipe}
\title{Fit a \code{apd_pca}}
\usage{
apd_pca(x, ...)
\method{apd_pca}{default}(x, ...)
\method{apd_pca}{data.frame}(x, threshold = 0.95, ...)
\method{apd_pca}{matrix}(x, threshold = 0.95, ...)
\method{apd_pca}{formula}(formula, data, threshold = 0.95, ...)
\method{apd_pca}{recipe}(x, data, threshold = 0.95, ...)
}
\arguments{
\item{x}{Depending on the context:
\itemize{
\item A \strong{data frame} of predictors.
\item A \strong{matrix} of predictors.
\item A \strong{recipe} specifying a set of preprocessing steps
created from \code{\link[recipes:recipe]{recipes::recipe()}}.
}}
\item{...}{Not currently used, but required for extensibility.}
\item{threshold}{A number indicating the percentage of variance desired from
the principal components. It must be a number greater than 0 and less or
equal than 1.}
\item{formula}{A formula specifying the predictor terms on the right-hand
side. No outcome should be specified.}
\item{data}{When a \strong{recipe} or \strong{formula} is used, \code{data} is specified as:
\itemize{
\item A \strong{data frame} containing the predictors.
}}
}
\value{
A \code{apd_pca} object.
}
\description{
\code{apd_pca()} fits a model.
}
\details{
The function computes the principal components that account for
up to either 95\% or the provided \code{threshold} of variability. It also
computes the percentiles of the absolute value of the principal components.
Additionally, it calculates the mean of each principal component.
}
\examples{
predictors <- mtcars[, -1]
# Data frame interface
mod <- apd_pca(predictors)
# Formula interface
mod2 <- apd_pca(mpg ~ ., mtcars)
# Recipes interface
library(recipes)
rec <- recipe(mpg ~ ., mtcars)
rec <- step_log(rec, disp)
mod3 <- apd_pca(rec, mtcars)
}
| /man/apd_pca.Rd | permissive | thecodemasterk/applicable | R | false | true | 1,984 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca-fit.R
\name{apd_pca}
\alias{apd_pca}
\alias{apd_pca.default}
\alias{apd_pca.data.frame}
\alias{apd_pca.matrix}
\alias{apd_pca.formula}
\alias{apd_pca.recipe}
\title{Fit a \code{apd_pca}}
\usage{
apd_pca(x, ...)
\method{apd_pca}{default}(x, ...)
\method{apd_pca}{data.frame}(x, threshold = 0.95, ...)
\method{apd_pca}{matrix}(x, threshold = 0.95, ...)
\method{apd_pca}{formula}(formula, data, threshold = 0.95, ...)
\method{apd_pca}{recipe}(x, data, threshold = 0.95, ...)
}
\arguments{
\item{x}{Depending on the context:
\itemize{
\item A \strong{data frame} of predictors.
\item A \strong{matrix} of predictors.
\item A \strong{recipe} specifying a set of preprocessing steps
created from \code{\link[recipes:recipe]{recipes::recipe()}}.
}}
\item{...}{Not currently used, but required for extensibility.}
\item{threshold}{A number indicating the percentage of variance desired from
the principal components. It must be a number greater than 0 and less or
equal than 1.}
\item{formula}{A formula specifying the predictor terms on the right-hand
side. No outcome should be specified.}
\item{data}{When a \strong{recipe} or \strong{formula} is used, \code{data} is specified as:
\itemize{
\item A \strong{data frame} containing the predictors.
}}
}
\value{
A \code{apd_pca} object.
}
\description{
\code{apd_pca()} fits a model.
}
\details{
The function computes the principal components that account for
up to either 95\% or the provided \code{threshold} of variability. It also
computes the percentiles of the absolute value of the principal components.
Additionally, it calculates the mean of each principal component.
}
\examples{
predictors <- mtcars[, -1]
# Data frame interface
mod <- apd_pca(predictors)
# Formula interface
mod2 <- apd_pca(mpg ~ ., mtcars)
# Recipes interface
library(recipes)
rec <- recipe(mpg ~ ., mtcars)
rec <- step_log(rec, disp)
mod3 <- apd_pca(rec, mtcars)
}
|
\name{genome.plot}
\alias{genome.plot}
\alias{genome.plot.default}
\alias{genome.plot.arrayCGH}
\title{Pan-genomic representation of a normalized arrayCGH}
\description{Displays a pan-genomic representation of a normalized arrayCGH.}
\usage{
\method{genome.plot}{arrayCGH}(arrayCGH, x="PosOrder", y="LogRatio",
chrLim=NULL, col.var=NULL, clim=NULL, cex=NULL, pch=NULL, \ldots)
\method{genome.plot}{default}(data, pch=NULL, cex=NULL, xlab="", ylab="", \ldots)}
\arguments{
\item{arrayCGH}{an object of type \code{arrayCGH}}
\item{data}{a data frame with two columns: 'x' and 'y', and optionally
a column data\$chrLim giving the limits of each chromosome}
\item{x}{a variable name from \code{arrayCGH\$cloneValues} giving the order position
of the clones along the genome (defaults to 'PosOrder')}
\item{y}{a variable name from \code{arrayCGH\$cloneValues} to be plotted along the
genome (defaults to 'LogRatio')}
\item{chrLim}{an optional variable name from \code{arrayCGH\$cloneValues}
giving the limits of each chromosome}
\item{col.var}{a variable name from \code{arrayCGH\$cloneValues}
defining the color legend}
\item{clim}{a numeric vector of length 2: color range limits (used if \code{col.var} is numeric)}
\item{cex}{a numerical value giving the amount by which plotting text
and symbols should be scaled relative to the default: see \code{\link{par}}}
\item{xlab}{a title for the x axis: see \code{\link{title}}}
\item{ylab}{a title for the y axis: see \code{\link{title}}}
\item{pch}{either an integer specifying a symbol or a single character
to be used as the default in plotting points: see \code{\link{par}} }
\item{...}{further arguments to be passed to \code{plot}}
}
\details{if \code{col.var} is a numeric variable, \code{y} colors are
proportionnal to \code{col.var} values; if it is a character variable
or a factor, one color is assigned to each different value of
\code{col.var}. If \code{col.var} is NULL, colors are proportionnal to
\code{y} values.}
\author{Pierre Neuvial, \email{manor@curie.fr}.}
\note{People interested in tools for array-CGH analysis can
visit our web-page: \url{http://bioinfo.curie.fr}.}
\examples{
data(spatial)
## default color code: log-ratios
\dontrun{
genome.plot(edge.norm, chrLim="LimitChr")
}
## color code determined by a qualitative variable: ZoneGNL (DNA copy number code)
edge.norm$cloneValues$ZoneGNL <- as.factor(edge.norm$cloneValues$ZoneGNL)
\dontrun{
genome.plot(edge.norm, col.var="ZoneGNL")
}
## comparing profiles with and without normalization
## aggregate data without normalization (flags)
gradient.nonorm <- norm(gradient, flag.list=NULL, var="LogRatio",
FUN=median, na.rm=TRUE)
gradient.nonorm <- sort(gradient.nonorm)
\dontrun{
genome.plot(gradient.nonorm, pch=20, main="Genomic profile without
normalization", chrLim="LimitChr")
x11()
genome.plot(gradient.norm, pch=20, main="Genomic profile with
normalization", chrLim="LimitChr")
}
}
\seealso{\code{\link{flag}}, \code{\link{report.plot}}}
\keyword{hplot}
| /man/genome.plot.Rd | no_license | pneuvial/MANOR | R | false | false | 3,066 | rd | \name{genome.plot}
\alias{genome.plot}
\alias{genome.plot.default}
\alias{genome.plot.arrayCGH}
\title{Pan-genomic representation of a normalized arrayCGH}
\description{Displays a pan-genomic representation of a normalized arrayCGH.}
\usage{
\method{genome.plot}{arrayCGH}(arrayCGH, x="PosOrder", y="LogRatio",
chrLim=NULL, col.var=NULL, clim=NULL, cex=NULL, pch=NULL, \ldots)
\method{genome.plot}{default}(data, pch=NULL, cex=NULL, xlab="", ylab="", \ldots)}
\arguments{
\item{arrayCGH}{an object of type \code{arrayCGH}}
\item{data}{a data frame with two columns: 'x' and 'y', and optionally
a column data\$chrLim giving the limits of each chromosome}
\item{x}{a variable name from \code{arrayCGH\$cloneValues} giving the order position
of the clones along the genome (defaults to 'PosOrder')}
\item{y}{a variable name from \code{arrayCGH\$cloneValues} to be plotted along the
genome (defaults to 'LogRatio')}
\item{chrLim}{an optional variable name from \code{arrayCGH\$cloneValues}
giving the limits of each chromosome}
\item{col.var}{a variable name from \code{arrayCGH\$cloneValues}
defining the color legend}
\item{clim}{a numeric vector of length 2: color range limits (used if \code{col.var} is numeric)}
\item{cex}{a numerical value giving the amount by which plotting text
and symbols should be scaled relative to the default: see \code{\link{par}}}
\item{xlab}{a title for the x axis: see \code{\link{title}}}
\item{ylab}{a title for the y axis: see \code{\link{title}}}
\item{pch}{either an integer specifying a symbol or a single character
to be used as the default in plotting points: see \code{\link{par}} }
\item{...}{further arguments to be passed to \code{plot}}
}
\details{if \code{col.var} is a numeric variable, \code{y} colors are
proportionnal to \code{col.var} values; if it is a character variable
or a factor, one color is assigned to each different value of
\code{col.var}. If \code{col.var} is NULL, colors are proportionnal to
\code{y} values.}
\author{Pierre Neuvial, \email{manor@curie.fr}.}
\note{People interested in tools for array-CGH analysis can
visit our web-page: \url{http://bioinfo.curie.fr}.}
\examples{
data(spatial)
## default color code: log-ratios
\dontrun{
genome.plot(edge.norm, chrLim="LimitChr")
}
## color code determined by a qualitative variable: ZoneGNL (DNA copy number code)
edge.norm$cloneValues$ZoneGNL <- as.factor(edge.norm$cloneValues$ZoneGNL)
\dontrun{
genome.plot(edge.norm, col.var="ZoneGNL")
}
## comparing profiles with and without normalization
## aggregate data without normalization (flags)
gradient.nonorm <- norm(gradient, flag.list=NULL, var="LogRatio",
FUN=median, na.rm=TRUE)
gradient.nonorm <- sort(gradient.nonorm)
\dontrun{
genome.plot(gradient.nonorm, pch=20, main="Genomic profile without
normalization", chrLim="LimitChr")
x11()
genome.plot(gradient.norm, pch=20, main="Genomic profile with
normalization", chrLim="LimitChr")
}
}
\seealso{\code{\link{flag}}, \code{\link{report.plot}}}
\keyword{hplot}
|
#' The 'Kernel' class object
#'
#' This a abstract class provide the kernel function and the 1st order derivative of rbf kernel function.
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords data
#' @return an \code{\link{R6Class}} object which can be used for the rkhs interpolation.
#' @format \code{\link{R6Class}} object.
#' @field k_par vector(of length n_hy) containing the hyper-parameter of kernel. n_hy is the length of kernel hyper parameters.
#' @section Methods:
#' \describe{
#' \item{\code{kern(t1,t2)}}{This method is used to calculate the kernel function given two one dimensional real inputs.}
#' \item{\code{dkd_kpar(t1,t2)}}{This method is used to calculate the gradient of kernel function against the kernel hyper parameters given two one dimensional real inputs.}
#' \item{\code{dkdt(t1,t2)}}{This method is used to calculate the 1st order derivative of kernel function given two one dimensional real inputs.} }
#' @export
#'
#' @author Mu Niu, \email{mu.niu@glasgow.ac.uk}
Kernel<-R6Class("Kernel",
public = list(
k_par=NULL,
initialize = function(k_par = NULL) {
self$k_par <- k_par
self$greet()
},
greet = function() {
},
kern = function (t1,t2) {
},
dkd_kpar = function(t1,t2) {
},
dkdt = function (t1,t2) {
}
)
)
Sigmoid <- R6Class("sigmoid",
inherit = Kernel,
public = list(
#greet = function() {
# cat(paste0("sigmoid len is", self$k_par, ".\n"))
#},
set_k_par = function(val) {
self$k_par <- val
},
kern = function (t1,t2) {
x=t1-t2
1/( 1+exp(-x*self$k_par) )
},
dkd_kpar = function(t1,t2) {
x=t1-t2
l = self$k_par
if( (x*l)> -20 ){
ds=-( 1+exp(-x*l) )^(-2)* exp(-x*l) *(-l)
}
else {
ds = exp(x*l+log(l))
}
ds
},
dkdt = function (t1,t2) {
x=(t1-t2)
l = self$k_par
bas = exp(-x*l)
if(is.infinite(bas)){
dsdt = 0#( 1+exp(-x*l) )^(-2)* exp(-x*l) *l#0
}else
{
dsdt = (1+bas)^(-2)*l*bas
}
return(dsdt)
}
)
)
| /R/kernel.r | no_license | mu2013/KGode | R | false | false | 2,137 | r | #' The 'Kernel' class object
#'
#' This a abstract class provide the kernel function and the 1st order derivative of rbf kernel function.
#' @docType class
#' @importFrom R6 R6Class
#' @export
#' @keywords data
#' @return an \code{\link{R6Class}} object which can be used for the rkhs interpolation.
#' @format \code{\link{R6Class}} object.
#' @field k_par vector(of length n_hy) containing the hyper-parameter of kernel. n_hy is the length of kernel hyper parameters.
#' @section Methods:
#' \describe{
#' \item{\code{kern(t1,t2)}}{This method is used to calculate the kernel function given two one dimensional real inputs.}
#' \item{\code{dkd_kpar(t1,t2)}}{This method is used to calculate the gradient of kernel function against the kernel hyper parameters given two one dimensional real inputs.}
#' \item{\code{dkdt(t1,t2)}}{This method is used to calculate the 1st order derivative of kernel function given two one dimensional real inputs.} }
#' @export
#'
#' @author Mu Niu, \email{mu.niu@glasgow.ac.uk}
Kernel<-R6Class("Kernel",
public = list(
k_par=NULL,
initialize = function(k_par = NULL) {
self$k_par <- k_par
self$greet()
},
greet = function() {
},
kern = function (t1,t2) {
},
dkd_kpar = function(t1,t2) {
},
dkdt = function (t1,t2) {
}
)
)
Sigmoid <- R6Class("sigmoid",
inherit = Kernel,
public = list(
#greet = function() {
# cat(paste0("sigmoid len is", self$k_par, ".\n"))
#},
set_k_par = function(val) {
self$k_par <- val
},
kern = function (t1,t2) {
x=t1-t2
1/( 1+exp(-x*self$k_par) )
},
dkd_kpar = function(t1,t2) {
x=t1-t2
l = self$k_par
if( (x*l)> -20 ){
ds=-( 1+exp(-x*l) )^(-2)* exp(-x*l) *(-l)
}
else {
ds = exp(x*l+log(l))
}
ds
},
dkdt = function (t1,t2) {
x=(t1-t2)
l = self$k_par
bas = exp(-x*l)
if(is.infinite(bas)){
dsdt = 0#( 1+exp(-x*l) )^(-2)* exp(-x*l) *l#0
}else
{
dsdt = (1+bas)^(-2)*l*bas
}
return(dsdt)
}
)
)
|
##run through masks of each video event and infer sex using model
Sys.setenv("CUDA_VISIBLE_DEVICES" = -1)
library(reticulate)
use_condaenv("RKeras", required = TRUE)
library(abind)
library(keras)
library(doParallel)
library(magick)
model <- load_model_hdf5("../Models/BestModelSex-158")
#Testing
# Video <- "VM0028-S1-7P-08052013"
# InferMask(Video)
InferMask <- function(Video){
Short <- read.csv(paste("../MeerkatOutput/",Video, "/FramesShort.csv", sep=""))
if(nrow(Short)==0){
write.csv(Short, paste("../MeerkatOutput/",Video, "/FramesShortSexPredict.csv", sep=""))
return(NULL)
}
Short[,"PredictSex"] <- NA
Short[, "ConfidenceScore"] <- NA
for(i in 1:nrow(Short)){
images <- list.files(paste("../OutputMasks/",Video,"/Event",i,sep="")) #get all images
if(length(images)==0){
#no instances
next
}
OutArray <- array(data=NA, dim=c(length(images),3,256,144))# preallocation
for(j in 1:length(images)){
image <- image_read(paste("../OutputMasks/",Video,"/Event",i,"/",images[j], sep=""))
OutArray[j, , , ] <- as.integer(image[[1]])/255
}
OutArray <- aperm(OutArray, c(1,3,4,2))
Prediction <- predict(model, OutArray)
MeanScore <- mean(Prediction)
if(MeanScore>0.5){
#predict male
Sex = 1
Confidence = MeanScore
}else{
#predict female
Sex = 0
Confidence = 1-MeanScore
}
Short[i,"PredictSex"] <- Sex
Short[i, "ConfidenceScore"] <- Confidence
}
write.csv(Short, paste("../MeerkatOutput/",Video, "/FramesShortSexPredict.csv", sep=""))
}
AllVids <- list.files("../OutputMasks/")
registerDoParallel(cores=32)
###Run inference for all videos
# mclapply(AllVids, InferMask, mc.cores = 32)
lapply(AllVids, InferMask)
stopImplicitCluster()
| /C) MaskRCNN Franework/InferMasks.R | no_license | alexhang212/SparrowVis_Code | R | false | false | 1,823 | r | ##run through masks of each video event and infer sex using model
Sys.setenv("CUDA_VISIBLE_DEVICES" = -1)
library(reticulate)
use_condaenv("RKeras", required = TRUE)
library(abind)
library(keras)
library(doParallel)
library(magick)
model <- load_model_hdf5("../Models/BestModelSex-158")
#Testing
# Video <- "VM0028-S1-7P-08052013"
# InferMask(Video)
InferMask <- function(Video){
Short <- read.csv(paste("../MeerkatOutput/",Video, "/FramesShort.csv", sep=""))
if(nrow(Short)==0){
write.csv(Short, paste("../MeerkatOutput/",Video, "/FramesShortSexPredict.csv", sep=""))
return(NULL)
}
Short[,"PredictSex"] <- NA
Short[, "ConfidenceScore"] <- NA
for(i in 1:nrow(Short)){
images <- list.files(paste("../OutputMasks/",Video,"/Event",i,sep="")) #get all images
if(length(images)==0){
#no instances
next
}
OutArray <- array(data=NA, dim=c(length(images),3,256,144))# preallocation
for(j in 1:length(images)){
image <- image_read(paste("../OutputMasks/",Video,"/Event",i,"/",images[j], sep=""))
OutArray[j, , , ] <- as.integer(image[[1]])/255
}
OutArray <- aperm(OutArray, c(1,3,4,2))
Prediction <- predict(model, OutArray)
MeanScore <- mean(Prediction)
if(MeanScore>0.5){
#predict male
Sex = 1
Confidence = MeanScore
}else{
#predict female
Sex = 0
Confidence = 1-MeanScore
}
Short[i,"PredictSex"] <- Sex
Short[i, "ConfidenceScore"] <- Confidence
}
write.csv(Short, paste("../MeerkatOutput/",Video, "/FramesShortSexPredict.csv", sep=""))
}
AllVids <- list.files("../OutputMasks/")
registerDoParallel(cores=32)
###Run inference for all videos
# mclapply(AllVids, InferMask, mc.cores = 32)
lapply(AllVids, InferMask)
stopImplicitCluster()
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% MultiArrayUnitModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{MultiArrayUnitModel$getFitUnitGroupFunction}
\alias{MultiArrayUnitModel$getFitUnitGroupFunction}
\alias{getFitUnitGroupFunction.MultiArrayUnitModel}
\alias{MultiArrayUnitModel.getFitUnitGroupFunction}
\alias{getFitUnitGroupFunction,MultiArrayUnitModel-method}
\title{Static method to get the low-level function that fits the PLM}
\description{
Static method to get the low-level function that fits the PLM.
Any subclass model must provide this method, which should return
a \code{\link[base]{function}} that accepts an IxK \code{\link[base]{matrix}}.
}
\usage{
## Static method (use this):
## MultiArrayUnitModel$getFitUnitGroupFunction()
## Don't use the below:
\method{getFitUnitGroupFunction}{MultiArrayUnitModel}(...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{function}}.
}
\seealso{
For more information see \code{\link{MultiArrayUnitModel}}.
}
\keyword{internal}
\keyword{methods}
| /man/getFitUnitGroupFunction.MultiArrayUnitModel.Rd | no_license | HenrikBengtsson/aroma.affymetrix | R | false | false | 1,240 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% MultiArrayUnitModel.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{MultiArrayUnitModel$getFitUnitGroupFunction}
\alias{MultiArrayUnitModel$getFitUnitGroupFunction}
\alias{getFitUnitGroupFunction.MultiArrayUnitModel}
\alias{MultiArrayUnitModel.getFitUnitGroupFunction}
\alias{getFitUnitGroupFunction,MultiArrayUnitModel-method}
\title{Static method to get the low-level function that fits the PLM}
\description{
Static method to get the low-level function that fits the PLM.
Any subclass model must provide this method, which should return
a \code{\link[base]{function}} that accepts an IxK \code{\link[base]{matrix}}.
}
\usage{
## Static method (use this):
## MultiArrayUnitModel$getFitUnitGroupFunction()
## Don't use the below:
\method{getFitUnitGroupFunction}{MultiArrayUnitModel}(...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{function}}.
}
\seealso{
For more information see \code{\link{MultiArrayUnitModel}}.
}
\keyword{internal}
\keyword{methods}
|
setwd("~/camila")
source("~/R-inicia/inicia.r")
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
#-- Reading city boundaries shape file
library(maptools)
CITIES<-readShapeLines("/Users/rachel/shapefiles/cidades_SP/sao_paulo.shp")
STATES<-readShapeLines("/Users/rachel/shapefiles/estadosl_2007.shp")
#yy<-readShapeLines("/Users/rachel/shapefiles/municip07.shp")
#getinfo.shape("/Users/rachel/shapefiles/municip07.shp")
#STATES@data for info
#coordinates(STATE@lines[[26]])[[1]] -> to access coordinates of slot 26, in this case STATE OF SAO PAULO
#coordinates(CITIES@lines[[578]])[[1]] -> to access coordinates of slot 578, in this case CITY OF SAO PAULO
bla<-coordinates(CITIES@lines[[578]])[[1]]
SAO <- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[240]])[[1]]
GUA<- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[147]])[[1]]
CMP<- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[251]])[[1]]
IND<- data.frame(x=bla[,1],y=bla[,2])
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
library(maps)
library(mapdata)#For the worldHires database
library(mapproj)#For the mapproject function
plotElipse <- function(x, y, r) {#Gary's function ;-)
angles <- seq(0,2*pi,length.out=360)
lines(r*cos(angles)+x,r*sin(angles)+y)
}
plotCircle <- function(LonDec, LatDec, Km, lty=2, col=NA, border=1, lwd=1) {#Corrected function
#LatDec = latitude in decimal degrees of the center of the circle
#LonDec = longitude in decimal degrees
#Km = radius of the circle in kilometers
ER <- 6371 #Mean Earth radius in kilometers. Change this to 3959 and you will have your function working in miles.
AngDeg <- seq(1:360) #angles in degrees
Lat1Rad <- LatDec*(pi/180)#Latitude of the center of the circle in radians
Lon1Rad <- LonDec*(pi/180)#Longitude of the center of the circle in radians
AngRad <- AngDeg*(pi/180)#angles in radians
Lat2Rad <-asin(sin(Lat1Rad)*cos(Km/ER)+cos(Lat1Rad)*sin(Km/ER)*cos(AngRad)) #Latitude of each point of the circle rearding to angle in radians
Lon2Rad <- Lon1Rad+atan2(sin(AngRad)*sin(Km/ER)*cos(Lat1Rad),cos(Km/ER)-sin(Lat1Rad)*sin(Lat2Rad))#Longitude of each point of the circle rearding to angle in radians
Lat2Deg <- Lat2Rad*(180/pi)#Latitude of each point of the circle rearding to angle in degrees (conversion of radians to degrees deg = rad*(180/pi) )
Lon2Deg <- Lon2Rad*(180/pi)#Longitude of each point of the circle rearding to angle in degrees (conversion of radians to degrees deg = rad*(180/pi) )
polygon(Lon2Deg,Lat2Deg,lty=lty,col=col, border=border, lwd=lwd)
}
#map("worldHires", region="belgium")#draw a map of Belgium (yes i am Belgian ;-)
#bruxelles <- mapproject(4.330,50.830)#coordinates of Bruxelles
#points(bruxelles,pch=20,col='blue',cex=2)#draw a blue dot for Bruxelles
#plotCircle(4.330,50.830,50)#Plot a dashed circle of 50 km arround Bruxelles
#plotElipse(4.330,50.830,0.5)#Tries to plot a plain circle of 50 km arround Bruxelles, but drawn an ellipse
require(geosphere)
#-- radars coordinates
sr<-data.frame(x=-(47+(5+52/60)/60), y=-(23+(35+56/60)/60))
ct<-data.frame(x=-(45+(58+20/60)/60), y=-(23+(36+0/60)/60))
xp<-data.frame(x=-47.05641, y=-22.81405)
DualDopplerLobes<-function(radar1,radar2,deg,bearing1,bearing2)
{
meio<-midPoint(radar1,radar2)
deg<-deg*pi/180
d<-distm(radar1, radar2, fun = distHaversine)/2
r<-d/sin(deg)
x<-sqrt(r^2 - d^2)
p1<-destPoint(meio,bearing1,x)
p2<-destPoint(meio,bearing2,x)
out<-NULL
out$x1=radar1[1]; out$y1=radar1[2];
out$x2=radar2[1]; out$y2=radar2[2];
out$d=d; out$r=r;
out$mid.x=meio[1]; out$mid.y=meio[2];
out$p1.x=p1[1]; out$p1.y=p1[2];
out$p2.x=p2[1]; out$p2.y=p2[2]
return(out)
}
XLIM<-c(-49,-45); YLIM<-c(-25,-22)
#-- 30 degree view
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
graphics.off()
png(filename = "duaDoppler-30deg.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars\nwith 30 degree view angle difference", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("São Roque and XPOL", "São Roque and FCTH"),lty = c(2,1), lwd=2, bg="white", col=4)
dev.off()
#-- 45 degree view
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
graphics.off()
png(filename = "duaDoppler-45deg.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars\nwith 45 degree view angle difference", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("São Roque and XPOL", "São Roque and FCTH"),lty = c(2,1), lwd=2, bg="white", col=2)
dev.off()
#-- 30 and 45 degree views together
graphics.off()
png(filename = "duaDoppler-all.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.15,y=-24.5,legend=c("SR and XP (30deg)", "SR and CT (30deg)", "SR and XP (45deg)", "SR and CT (45deg)"),lty = c(2,1,2,1), lwd=2, bg="white", col=c(4,4,2,2))
dev.off()
#-- 30 e 45 degree views together by pair of radars
dist.sr.ct<-distm(sr, ct, fun = distHaversine)*1e-3
dist.sr.xp<-distm(sr, xp, fun = distHaversine)*1e-3
dist.ct.xp<-distm(ct, xp, fun = distHaversine)*1e-3
graphics.off()
png(filename = "duaDoppler-SR_CT.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for SR and CT pair of radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
lines(x=c(sr$x,ct$x), y=c(sr$y,ct$y), lty=3, lwd=2)
d<-midPoint(sr,ct)
text(x=d[1],y=d[2],labels = sprintf("%3.0fkm",dist.sr.ct), font=1, pos=1)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("30 degrees", "45 degrees"),lty = 1, lwd=2, bg="white", col=c(4,2))
dev.off()
graphics.off()
png(filename = "duaDoppler-SR_XP.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for SR and XP pair of radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=1,lwd=2)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=1,lwd=2)
lines(x=c(sr$x,xp$x), y=c(sr$y,xp$y), lty=3, lwd=2)
d<-midPoint(sr,xp)
text(x=d[1],y=d[2],labels = sprintf("%3.0fkm",dist.sr.xp), font=1, pos=1, srt=90)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=3)
legend(x=-49.2,y=-24.8,legend=c("30 degrees", "45 degrees"),lty = 1, lwd=2, bg="white", col=c(4,2))
dev.off()
| /General_Processing/dual-doppler-v2.R | no_license | cclopes/soschuva_hail | R | false | false | 10,567 | r | setwd("~/camila")
source("~/R-inicia/inicia.r")
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
#-- Reading city boundaries shape file
library(maptools)
CITIES<-readShapeLines("/Users/rachel/shapefiles/cidades_SP/sao_paulo.shp")
STATES<-readShapeLines("/Users/rachel/shapefiles/estadosl_2007.shp")
#yy<-readShapeLines("/Users/rachel/shapefiles/municip07.shp")
#getinfo.shape("/Users/rachel/shapefiles/municip07.shp")
#STATES@data for info
#coordinates(STATE@lines[[26]])[[1]] -> to access coordinates of slot 26, in this case STATE OF SAO PAULO
#coordinates(CITIES@lines[[578]])[[1]] -> to access coordinates of slot 578, in this case CITY OF SAO PAULO
bla<-coordinates(CITIES@lines[[578]])[[1]]
SAO <- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[240]])[[1]]
GUA<- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[147]])[[1]]
CMP<- data.frame(x=bla[,1],y=bla[,2])
bla<-coordinates(CITIES@lines[[251]])[[1]]
IND<- data.frame(x=bla[,1],y=bla[,2])
#---------------------------------------------------------------------------------
#---------------------------------------------------------------------------------
library(maps)
library(mapdata)#For the worldHires database
library(mapproj)#For the mapproject function
plotElipse <- function(x, y, r) {#Gary's function ;-)
angles <- seq(0,2*pi,length.out=360)
lines(r*cos(angles)+x,r*sin(angles)+y)
}
plotCircle <- function(LonDec, LatDec, Km, lty=2, col=NA, border=1, lwd=1) {#Corrected function
#LatDec = latitude in decimal degrees of the center of the circle
#LonDec = longitude in decimal degrees
#Km = radius of the circle in kilometers
ER <- 6371 #Mean Earth radius in kilometers. Change this to 3959 and you will have your function working in miles.
AngDeg <- seq(1:360) #angles in degrees
Lat1Rad <- LatDec*(pi/180)#Latitude of the center of the circle in radians
Lon1Rad <- LonDec*(pi/180)#Longitude of the center of the circle in radians
AngRad <- AngDeg*(pi/180)#angles in radians
Lat2Rad <-asin(sin(Lat1Rad)*cos(Km/ER)+cos(Lat1Rad)*sin(Km/ER)*cos(AngRad)) #Latitude of each point of the circle rearding to angle in radians
Lon2Rad <- Lon1Rad+atan2(sin(AngRad)*sin(Km/ER)*cos(Lat1Rad),cos(Km/ER)-sin(Lat1Rad)*sin(Lat2Rad))#Longitude of each point of the circle rearding to angle in radians
Lat2Deg <- Lat2Rad*(180/pi)#Latitude of each point of the circle rearding to angle in degrees (conversion of radians to degrees deg = rad*(180/pi) )
Lon2Deg <- Lon2Rad*(180/pi)#Longitude of each point of the circle rearding to angle in degrees (conversion of radians to degrees deg = rad*(180/pi) )
polygon(Lon2Deg,Lat2Deg,lty=lty,col=col, border=border, lwd=lwd)
}
#map("worldHires", region="belgium")#draw a map of Belgium (yes i am Belgian ;-)
#bruxelles <- mapproject(4.330,50.830)#coordinates of Bruxelles
#points(bruxelles,pch=20,col='blue',cex=2)#draw a blue dot for Bruxelles
#plotCircle(4.330,50.830,50)#Plot a dashed circle of 50 km arround Bruxelles
#plotElipse(4.330,50.830,0.5)#Tries to plot a plain circle of 50 km arround Bruxelles, but drawn an ellipse
require(geosphere)
#-- radars coordinates
sr<-data.frame(x=-(47+(5+52/60)/60), y=-(23+(35+56/60)/60))
ct<-data.frame(x=-(45+(58+20/60)/60), y=-(23+(36+0/60)/60))
xp<-data.frame(x=-47.05641, y=-22.81405)
DualDopplerLobes<-function(radar1,radar2,deg,bearing1,bearing2)
{
meio<-midPoint(radar1,radar2)
deg<-deg*pi/180
d<-distm(radar1, radar2, fun = distHaversine)/2
r<-d/sin(deg)
x<-sqrt(r^2 - d^2)
p1<-destPoint(meio,bearing1,x)
p2<-destPoint(meio,bearing2,x)
out<-NULL
out$x1=radar1[1]; out$y1=radar1[2];
out$x2=radar2[1]; out$y2=radar2[2];
out$d=d; out$r=r;
out$mid.x=meio[1]; out$mid.y=meio[2];
out$p1.x=p1[1]; out$p1.y=p1[2];
out$p2.x=p2[1]; out$p2.y=p2[2]
return(out)
}
XLIM<-c(-49,-45); YLIM<-c(-25,-22)
#-- 30 degree view
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
graphics.off()
png(filename = "duaDoppler-30deg.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars\nwith 30 degree view angle difference", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("São Roque and XPOL", "São Roque and FCTH"),lty = c(2,1), lwd=2, bg="white", col=4)
dev.off()
#-- 45 degree view
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
graphics.off()
png(filename = "duaDoppler-45deg.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars\nwith 45 degree view angle difference", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("São Roque and XPOL", "São Roque and FCTH"),lty = c(2,1), lwd=2, bg="white", col=2)
dev.off()
#-- 30 and 45 degree views together
graphics.off()
png(filename = "duaDoppler-all.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for pairs of Doppler radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
points(xp,pch=19)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=2,lwd=2)
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=2,lwd=2)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=1)
legend(x=-49.15,y=-24.5,legend=c("SR and XP (30deg)", "SR and CT (30deg)", "SR and XP (45deg)", "SR and CT (45deg)"),lty = c(2,1,2,1), lwd=2, bg="white", col=c(4,4,2,2))
dev.off()
#-- 30 e 45 degree views together by pair of radars
dist.sr.ct<-distm(sr, ct, fun = distHaversine)*1e-3
dist.sr.xp<-distm(sr, xp, fun = distHaversine)*1e-3
dist.ct.xp<-distm(ct, xp, fun = distHaversine)*1e-3
graphics.off()
png(filename = "duaDoppler-SR_CT.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for SR and CT pair of radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
dd.sr.ct<-DualDopplerLobes(sr,ct,30,0,180)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=4,lty=1,lwd=2)
dd.sr.ct<-DualDopplerLobes(sr,ct,45,0,180)
plotCircle(dd.sr.ct$p1.x,dd.sr.ct$p1.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.ct$p2.x,dd.sr.ct$p2.y,dd.sr.ct$r*1e-3,border=2,lty=1,lwd=2)
lines(x=c(sr$x,ct$x), y=c(sr$y,ct$y), lty=3, lwd=2)
d<-midPoint(sr,ct)
text(x=d[1],y=d[2],labels = sprintf("%3.0fkm",dist.sr.ct), font=1, pos=1)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(ct,labels = "CT", font=2, pos=1)
legend(x=-49.2,y=-24.8,legend=c("30 degrees", "45 degrees"),lty = 1, lwd=2, bg="white", col=c(4,2))
dev.off()
graphics.off()
png(filename = "duaDoppler-SR_XP.png",width = 550, height = 500)
par(mfrow=c(1,1),mar=c(4.5,4.8,3,1),oma=c(0,0,0,0))
plot(sr,pch=19,xlim=XLIM,ylim=YLIM, cex.axis=1.5,
xlab=expression("longitude ("*degree*")"), ylab=expression("latitude ("*degree*")"), cex.lab=1.5,
main="Dual-Doppler lobes for SR and XP pair of radars", font.main=1, cex.main=1.5)
mapaas()
points(ct,pch=19)
dd.sr.xp<-DualDopplerLobes(sr,xp,30,90,270)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=4,lty=1,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=4,lty=1,lwd=2)
dd.sr.xp<-DualDopplerLobes(sr,xp,45,90,270)
plotCircle(dd.sr.xp$p1.x,dd.sr.xp$p1.y,dd.sr.xp$r*1e-3,border=2,lty=1,lwd=2)
plotCircle(dd.sr.xp$p2.x,dd.sr.xp$p2.y,dd.sr.xp$r*1e-3,border=2,lty=1,lwd=2)
lines(x=c(sr$x,xp$x), y=c(sr$y,xp$y), lty=3, lwd=2)
d<-midPoint(sr,xp)
text(x=d[1],y=d[2],labels = sprintf("%3.0fkm",dist.sr.xp), font=1, pos=1, srt=90)
lines(CMP,col="gray66"); lines(SAO,col="gray66"); lines(IND,col="gray66")
text(sr,labels = "SR", font=2, pos=1)
text(xp,labels = "XP", font=2, pos=3)
legend(x=-49.2,y=-24.8,legend=c("30 degrees", "45 degrees"),lty = 1, lwd=2, bg="white", col=c(4,2))
dev.off()
|
\name{logit}
\alias{logit}
\alias{logit.nests}
\alias{logit.nests.alm}
\alias{logit.cap}
\alias{logit.alm}
\alias{logit.cap.alm}
\title{(Nested) Logit Demand Calibration and Merger Simulation}
\description{Calibrates consumer demand using (Nested) Logit and then simulates the price effect of a merger between two firms
under the assumption that all firms in the market are playing a
differentiated products Bertrand pricing game.}
\usage{
logit(prices,shares,margins,
ownerPre,ownerPost,
normIndex=ifelse(isTRUE(all.equal(sum(shares), 1,
check.names=FALSE)), 1 , NA ),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
insideSize = NA_real_,
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.alm(prices,shares,margins,
ownerPre,ownerPost,
mktElast = NA_real_,
insideSize = NA_real_,
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.nests(prices,shares,margins,
ownerPre,ownerPost,
nests=rep(1,length(shares)),
normIndex=ifelse(sum(shares) < 1,NA,1),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
constraint = TRUE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.nests.alm(prices,shares,margins,
ownerPre,ownerPost,
nests=rep(1,length(shares)),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
constraint = TRUE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.cap(prices,shares,margins,
ownerPre,ownerPost,
capacitiesPre=rep(Inf,length(prices)),
capacitiesPost=capacitiesPre,
insideSize,
normIndex=ifelse(sum(shares)<1,NA,1),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.cap.alm(prices,shares,margins,
ownerPre,ownerPost,
capacitiesPre=rep(Inf,length(prices)),
capacitiesPost=capacitiesPre,
mktElast = NA_real_,
insideSize,
mcDelta=rep(0,length(prices)),
subset=rep(TRUE,length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
}
\arguments{
\item{}{Let k denote the number of products produced by all firms playing the
Bertrand pricing game.}
\item{prices}{A length k vector of product prices.}
\item{shares}{A length k vector of product (quantity) shares. Values must be
between 0 and 1.}
\item{margins}{A length k vector of product margins, some of which may
equal NA.}
\item{nests}{A length k vector identifying the nest that each
product belongs to.}
\item{capacitiesPre}{A length k vector of pre-merger product capacities. Capacities
must be at least as great as shares * insideSize.}
\item{capacitiesPost}{A length k vector of post-merger product capacities.}
\item{insideSize}{An integer equal to total pre-merger units sold.
If shares sum to one, this also equals the size of the market.}
\item{normIndex}{An integer equalling the index (position) of the
inside product whose mean valuation will be normalized to 1. Default
is 1, unless \sQuote{shares} sum to less than 1, in which case the default is
NA and an outside good is assumed to exist.}
\item{ownerPre}{EITHER a vector of length k whose values
indicate which firm produced a product pre-merger OR
a k x k matrix of pre-merger ownership shares.}
\item{ownerPost}{EITHER a vector of length k whose values
indicate which firm produced a product after the merger OR
a k x k matrix of post-merger ownership shares.}
\item{mktElast}{a negative value indicating market elasticity. Default is NA.}
\item{mcDelta}{A vector of length k where each element equals the
proportional change in a product's marginal costs due to
the merger. Default is 0, which assumes that the merger does not
affect any products' marginal cost.}
\item{subset}{A vector of length k where each element equals TRUE if
the product indexed by that element should be included in the
post-merger simulation and FALSE if it should be excluded.Default is a
length k vector of TRUE.}
\item{constraint}{if TRUE, then the nesting parameters for all
non-singleton nests are assumed equal. If FALSE, then each
non-singleton nest is permitted to have its own value. Default is
TRUE.}
\item{priceOutside}{A length 1 vector indicating the price of the
outside good. Default is 0.}
\item{priceStart}{A length k vector of starting values used to solve for
equilibrium price. Default is the \sQuote{prices} vector.}
\item{isMax}{If TRUE, checks to see whether computed price equilibrium
locally maximizes firm profits and returns a warning if not. Default is FALSE.}
\item{parmsStart}{For \code{logit.cap.alm}, a length-2 vector of starting values
used to solve for the price coefficient and outside share (in that order). For
\code{logit.nets}, rhe first element should
always be the price coefficient and the remaining elements should be
the nesting parameters. Theory requires the nesting parameters to be
greater than the price coefficient. If missing then the random
draws with the appropriate restrictions are employed.}
\item{control.slopes}{A list of \code{\link{optim}} control parameters passed to the calibration routine optimizer (typically the \code{calcSlopes} method).}
\item{control.equ}{A list of \code{\link[BB]{BBsolve}} control parameters passed to the non-linear equation solver (typically the \code{calcPrices} method).}
\item{labels}{A k-length vector of labels. Default is "Prod#", where
\sQuote{#} is a number between 1 and the length of \sQuote{prices}.}
\item{...}{Additional options to feed to the \code{\link[BB]{BBsolve}}
optimizer used to solve for equilibrium prices.}
}
\details{Using product prices, quantity shares and all of the
product margins from at least one firm, \code{logit} is able to
recover the price coefficient and product mean valuations in a
Logit demand model. \code{logit} then uses these
calibrated parameters to simulate a merger between two firms.
\code{logit.alm} is identical to \code{logit} except that it assumes
that an outside product exists and uses additional margin
information to estimate the share of the outside good.
If market elasticity is known, it may be supplied using the
\sQuote{mktElast} argument.
\code{logit.nests} is identical to \code{logit} except that it includes the \sQuote{nests}
argument which may be used to assign products to different
nests. Nests are useful because they allow for richer substitution
patterns between products. Products within the same nest are assumed
to be closer substitutes than products in different nests. The degree
of substitutability between products located in different nests is
controlled by the value of the nesting parameter sigma.
The nesting parameters for singleton nests (nests containing
only one product) are not identified and normalized to 1. The vector of
sigmas is calibrated from the prices, revenue shares, and margins supplied
by the user.
By default, all non-singleton nests are assumed to have a common value for sigma.
This constraint may be relaxed by setting \sQuote{constraint} to
FALSE. In this case, at least one product margin must be supplied from
a product within each nest.
\code{logit.nests.alm} is identical to \code{logit.nests} except that it assumes
that an outside product exists and uses additional margin
information to estimate the share of the outside good.
\code{logit.cap} is identical to \code{logit} except that firms are
playing the Bertrand pricing game under exogenously supplied capacity
constraints. Unlike \code{logit}, \code{logit.cap} requires users to
specify capacity constraints via \sQuote{capacities} and the number of
potential customers in a market via \sQuote{mktSize}. \sQuote{mktSize} is needed to
transform \sQuote{shares} into quantities that must be directly compared to \sQuote{capacities}.
In \code{logit}, \code{logit.nests} and \code{logit.cap}, if quantity shares sum to 1,
then one product's mean value is not identified and must be normalized
to 0. \sQuote{normIndex} may be used to specify the index (position) of the
product whose mean value is to be normalized. If the sum of revenue shares
is less than 1, both of these functions assume that the exists a k+1st
product in the market whose price and mean value are both normalized
to 0.
}
\value{\code{logit} returns an instance of class
\code{\linkS4class{Logit}}.
\code{logit.alm} returns an instance of \code{\linkS4class{LogitALM}}, a
child class of \code{\linkS4class{Logit}.}.
\code{logit.nests} returns an instance of \code{\linkS4class{LogitNests}}, a
child class of \code{\linkS4class{Logit}}.
\code{logit.cap} returns an instance of \code{\linkS4class{LogitCap}}, a
child class of \code{\linkS4class{Logit}.}}
\seealso{\code{\link{ces}}}
\author{Charles Taragin \email{charles.taragin@usdoj.gov}}
\references{
Anderson, Simon, Palma, Andre, and Francois Thisse (1992).
\emph{Discrete Choice Theory of Product Differentiation}.
The MIT Press, Cambridge, Mass.
Epstein, Roy and Rubinfeld, Daniel (2004).
\dQuote{Effects of Mergers Involving Differentiated Products.}
Werden, Gregory and Froeb, Luke (1994).
\dQuote{The Effects of Mergers in
Differentiated Products Industries: Structural Merger Policy and the
Logit Model},
\emph{Journal of Law, Economics, \& Organization}, \bold{10}, pp. 407-426.
Froeb, Luke, Tschantz, Steven and Phillip Crooke (2003).
\dQuote{Bertrand Competition and Capacity Constraints: Mergers Among Parking Lots},
\emph{Journal of Econometrics}, \bold{113}, pp. 49-67.
Froeb, Luke and Werden, Greg (1996).
\dQuote{Computational Economics and Finance: Modeling and Analysis with Mathematica, Volume 2.}
In Varian H (ed.), chapter Simulating Mergers among Noncooperative Oligopolists, pp. 177-95.
Springer-Verlag, New York.
}
\examples{
## Calibration and simulation results from a merger between Budweiser and
## Old Style.
## Source: Epstein/Rubenfeld 2004, pg 80
prodNames <- c("BUD","OLD STYLE","MILLER","MILLER-LITE","OTHER-LITE","OTHER-REG")
ownerPre <-c("BUD","OLD STYLE","MILLER","MILLER","OTHER-LITE","OTHER-REG")
ownerPost <-c("BUD","BUD","MILLER","MILLER","OTHER-LITE","OTHER-REG")
nests <- c("Reg","Reg","Reg","Light","Light","Reg")
price <- c(.0441,.0328,.0409,.0396,.0387,.0497)
shares <- c(.066,.172,.253,.187,.099,.223)
margins <- c(.3830,.5515,.5421,.5557,.4453,.3769)
insideSize <- 1000
names(price) <-
names(shares) <-
names(margins) <-
prodNames
result.logit <- logit(price,shares,margins,
ownerPre=ownerPre,ownerPost=ownerPost,
insideSize = insideSize,
labels=prodNames)
print(result.logit) # return predicted price change
summary(result.logit) # summarize merger simulation
elast(result.logit,TRUE) # returns premerger elasticities
elast(result.logit,FALSE) # returns postmerger elasticities
diversion(result.logit,TRUE) # return premerger diversion ratios
diversion(result.logit,FALSE) # return postmerger diversion ratios
cmcr(result.logit) #calculate compensating marginal cost reduction
upp(result.logit) #calculate Upwards Pricing Pressure Index
CV(result.logit) #calculate representative agent compensating variation
## Implement the Hypothetical Monopolist Test
## for BUD and OLD STYLE using a 5\% SSNIP
HypoMonTest(result.logit,prodIndex=1:2)
## Get a detailed description of the 'Logit' class slots
showClass("Logit")
## Show all methods attached to the 'Logit' Class
showMethods(classes="Logit")
## Show which classes have their own 'elast' method
showMethods("elast")
## Show the method definition for 'elast' and Class 'Logit'
getMethod("elast","Logit")
#
# Logit With capacity Constraints
#
cap <- c(66,200,300,200,99,300) # BUD and OTHER-LITE are capacity constrained
result.cap <- logit.cap(price,shares,margins,capacitiesPre=cap,
insideSize=insideSize,ownerPre=ownerPre,
ownerPost=ownerPost,labels=prodNames)
print(result.cap)
}
| /man/logit.Rd | no_license | josempazymino/antitrust | R | false | false | 14,711 | rd | \name{logit}
\alias{logit}
\alias{logit.nests}
\alias{logit.nests.alm}
\alias{logit.cap}
\alias{logit.alm}
\alias{logit.cap.alm}
\title{(Nested) Logit Demand Calibration and Merger Simulation}
\description{Calibrates consumer demand using (Nested) Logit and then simulates the price effect of a merger between two firms
under the assumption that all firms in the market are playing a
differentiated products Bertrand pricing game.}
\usage{
logit(prices,shares,margins,
ownerPre,ownerPost,
normIndex=ifelse(isTRUE(all.equal(sum(shares), 1,
check.names=FALSE)), 1 , NA ),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
insideSize = NA_real_,
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.alm(prices,shares,margins,
ownerPre,ownerPost,
mktElast = NA_real_,
insideSize = NA_real_,
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.nests(prices,shares,margins,
ownerPre,ownerPost,
nests=rep(1,length(shares)),
normIndex=ifelse(sum(shares) < 1,NA,1),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
constraint = TRUE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.nests.alm(prices,shares,margins,
ownerPre,ownerPost,
nests=rep(1,length(shares)),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
constraint = TRUE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.cap(prices,shares,margins,
ownerPre,ownerPost,
capacitiesPre=rep(Inf,length(prices)),
capacitiesPost=capacitiesPre,
insideSize,
normIndex=ifelse(sum(shares)<1,NA,1),
mcDelta=rep(0,length(prices)),
subset=rep(TRUE, length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
logit.cap.alm(prices,shares,margins,
ownerPre,ownerPost,
capacitiesPre=rep(Inf,length(prices)),
capacitiesPost=capacitiesPre,
mktElast = NA_real_,
insideSize,
mcDelta=rep(0,length(prices)),
subset=rep(TRUE,length(prices)),
priceOutside = 0,
priceStart = prices,
isMax=FALSE,
parmsStart,
control.slopes,
control.equ,
labels=paste("Prod",1:length(prices),sep=""),
...
)
}
\arguments{
\item{}{Let k denote the number of products produced by all firms playing the
Bertrand pricing game.}
\item{prices}{A length k vector of product prices.}
\item{shares}{A length k vector of product (quantity) shares. Values must be
between 0 and 1.}
\item{margins}{A length k vector of product margins, some of which may
equal NA.}
\item{nests}{A length k vector identifying the nest that each
product belongs to.}
\item{capacitiesPre}{A length k vector of pre-merger product capacities. Capacities
must be at least as great as shares * insideSize.}
\item{capacitiesPost}{A length k vector of post-merger product capacities.}
\item{insideSize}{An integer equal to total pre-merger units sold.
If shares sum to one, this also equals the size of the market.}
\item{normIndex}{An integer equalling the index (position) of the
inside product whose mean valuation will be normalized to 1. Default
is 1, unless \sQuote{shares} sum to less than 1, in which case the default is
NA and an outside good is assumed to exist.}
\item{ownerPre}{EITHER a vector of length k whose values
indicate which firm produced a product pre-merger OR
a k x k matrix of pre-merger ownership shares.}
\item{ownerPost}{EITHER a vector of length k whose values
indicate which firm produced a product after the merger OR
a k x k matrix of post-merger ownership shares.}
\item{mktElast}{a negative value indicating market elasticity. Default is NA.}
\item{mcDelta}{A vector of length k where each element equals the
proportional change in a product's marginal costs due to
the merger. Default is 0, which assumes that the merger does not
affect any products' marginal cost.}
\item{subset}{A vector of length k where each element equals TRUE if
the product indexed by that element should be included in the
post-merger simulation and FALSE if it should be excluded.Default is a
length k vector of TRUE.}
\item{constraint}{if TRUE, then the nesting parameters for all
non-singleton nests are assumed equal. If FALSE, then each
non-singleton nest is permitted to have its own value. Default is
TRUE.}
\item{priceOutside}{A length 1 vector indicating the price of the
outside good. Default is 0.}
\item{priceStart}{A length k vector of starting values used to solve for
equilibrium price. Default is the \sQuote{prices} vector.}
\item{isMax}{If TRUE, checks to see whether computed price equilibrium
locally maximizes firm profits and returns a warning if not. Default is FALSE.}
\item{parmsStart}{For \code{logit.cap.alm}, a length-2 vector of starting values
used to solve for the price coefficient and outside share (in that order). For
\code{logit.nets}, rhe first element should
always be the price coefficient and the remaining elements should be
the nesting parameters. Theory requires the nesting parameters to be
greater than the price coefficient. If missing then the random
draws with the appropriate restrictions are employed.}
\item{control.slopes}{A list of \code{\link{optim}} control parameters passed to the calibration routine optimizer (typically the \code{calcSlopes} method).}
\item{control.equ}{A list of \code{\link[BB]{BBsolve}} control parameters passed to the non-linear equation solver (typically the \code{calcPrices} method).}
\item{labels}{A k-length vector of labels. Default is "Prod#", where
\sQuote{#} is a number between 1 and the length of \sQuote{prices}.}
\item{...}{Additional options to feed to the \code{\link[BB]{BBsolve}}
optimizer used to solve for equilibrium prices.}
}
\details{Using product prices, quantity shares and all of the
product margins from at least one firm, \code{logit} is able to
recover the price coefficient and product mean valuations in a
Logit demand model. \code{logit} then uses these
calibrated parameters to simulate a merger between two firms.
\code{logit.alm} is identical to \code{logit} except that it assumes
that an outside product exists and uses additional margin
information to estimate the share of the outside good.
If market elasticity is known, it may be supplied using the
\sQuote{mktElast} argument.
\code{logit.nests} is identical to \code{logit} except that it includes the \sQuote{nests}
argument which may be used to assign products to different
nests. Nests are useful because they allow for richer substitution
patterns between products. Products within the same nest are assumed
to be closer substitutes than products in different nests. The degree
of substitutability between products located in different nests is
controlled by the value of the nesting parameter sigma.
The nesting parameters for singleton nests (nests containing
only one product) are not identified and normalized to 1. The vector of
sigmas is calibrated from the prices, revenue shares, and margins supplied
by the user.
By default, all non-singleton nests are assumed to have a common value for sigma.
This constraint may be relaxed by setting \sQuote{constraint} to
FALSE. In this case, at least one product margin must be supplied from
a product within each nest.
\code{logit.nests.alm} is identical to \code{logit.nests} except that it assumes
that an outside product exists and uses additional margin
information to estimate the share of the outside good.
\code{logit.cap} is identical to \code{logit} except that firms are
playing the Bertrand pricing game under exogenously supplied capacity
constraints. Unlike \code{logit}, \code{logit.cap} requires users to
specify capacity constraints via \sQuote{capacities} and the number of
potential customers in a market via \sQuote{mktSize}. \sQuote{mktSize} is needed to
transform \sQuote{shares} into quantities that must be directly compared to \sQuote{capacities}.
In \code{logit}, \code{logit.nests} and \code{logit.cap}, if quantity shares sum to 1,
then one product's mean value is not identified and must be normalized
to 0. \sQuote{normIndex} may be used to specify the index (position) of the
product whose mean value is to be normalized. If the sum of revenue shares
is less than 1, both of these functions assume that the exists a k+1st
product in the market whose price and mean value are both normalized
to 0.
}
\value{\code{logit} returns an instance of class
\code{\linkS4class{Logit}}.
\code{logit.alm} returns an instance of \code{\linkS4class{LogitALM}}, a
child class of \code{\linkS4class{Logit}.}.
\code{logit.nests} returns an instance of \code{\linkS4class{LogitNests}}, a
child class of \code{\linkS4class{Logit}}.
\code{logit.cap} returns an instance of \code{\linkS4class{LogitCap}}, a
child class of \code{\linkS4class{Logit}.}}
\seealso{\code{\link{ces}}}
\author{Charles Taragin \email{charles.taragin@usdoj.gov}}
\references{
Anderson, Simon, Palma, Andre, and Francois Thisse (1992).
\emph{Discrete Choice Theory of Product Differentiation}.
The MIT Press, Cambridge, Mass.
Epstein, Roy and Rubinfeld, Daniel (2004).
\dQuote{Effects of Mergers Involving Differentiated Products.}
Werden, Gregory and Froeb, Luke (1994).
\dQuote{The Effects of Mergers in
Differentiated Products Industries: Structural Merger Policy and the
Logit Model},
\emph{Journal of Law, Economics, \& Organization}, \bold{10}, pp. 407-426.
Froeb, Luke, Tschantz, Steven and Phillip Crooke (2003).
\dQuote{Bertrand Competition and Capacity Constraints: Mergers Among Parking Lots},
\emph{Journal of Econometrics}, \bold{113}, pp. 49-67.
Froeb, Luke and Werden, Greg (1996).
\dQuote{Computational Economics and Finance: Modeling and Analysis with Mathematica, Volume 2.}
In Varian H (ed.), chapter Simulating Mergers among Noncooperative Oligopolists, pp. 177-95.
Springer-Verlag, New York.
}
\examples{
## Calibration and simulation results from a merger between Budweiser and
## Old Style.
## Source: Epstein/Rubenfeld 2004, pg 80
prodNames <- c("BUD","OLD STYLE","MILLER","MILLER-LITE","OTHER-LITE","OTHER-REG")
ownerPre <-c("BUD","OLD STYLE","MILLER","MILLER","OTHER-LITE","OTHER-REG")
ownerPost <-c("BUD","BUD","MILLER","MILLER","OTHER-LITE","OTHER-REG")
nests <- c("Reg","Reg","Reg","Light","Light","Reg")
price <- c(.0441,.0328,.0409,.0396,.0387,.0497)
shares <- c(.066,.172,.253,.187,.099,.223)
margins <- c(.3830,.5515,.5421,.5557,.4453,.3769)
insideSize <- 1000
names(price) <-
names(shares) <-
names(margins) <-
prodNames
result.logit <- logit(price,shares,margins,
ownerPre=ownerPre,ownerPost=ownerPost,
insideSize = insideSize,
labels=prodNames)
print(result.logit) # return predicted price change
summary(result.logit) # summarize merger simulation
elast(result.logit,TRUE) # returns premerger elasticities
elast(result.logit,FALSE) # returns postmerger elasticities
diversion(result.logit,TRUE) # return premerger diversion ratios
diversion(result.logit,FALSE) # return postmerger diversion ratios
cmcr(result.logit) #calculate compensating marginal cost reduction
upp(result.logit) #calculate Upwards Pricing Pressure Index
CV(result.logit) #calculate representative agent compensating variation
## Implement the Hypothetical Monopolist Test
## for BUD and OLD STYLE using a 5\% SSNIP
HypoMonTest(result.logit,prodIndex=1:2)
## Get a detailed description of the 'Logit' class slots
showClass("Logit")
## Show all methods attached to the 'Logit' Class
showMethods(classes="Logit")
## Show which classes have their own 'elast' method
showMethods("elast")
## Show the method definition for 'elast' and Class 'Logit'
getMethod("elast","Logit")
#
# Logit With capacity Constraints
#
cap <- c(66,200,300,200,99,300) # BUD and OTHER-LITE are capacity constrained
result.cap <- logit.cap(price,shares,margins,capacitiesPre=cap,
insideSize=insideSize,ownerPre=ownerPre,
ownerPost=ownerPost,labels=prodNames)
print(result.cap)
}
|
set.seed(12)
# generate artificial data
prdat <- data.frame(a1=floor(runif(400, min=1, max=5)),
a2=floor(runif(400, min=1, max=5)),
a3=floor(runif(400, min=1, max=5)),
a4=floor(runif(400, min=1, max=5)))
prdat$f <- 2*tanh(prdat$a1-2*prdat$a2+3*prdat$a3-prdat$a4+1)-
3*tanh(-2*prdat$a1+3*prdat$a2-2*prdat$a3+prdat$a4-1)+2
# training and test subsets
prdat.train <- prdat[1:200,]
prdat.test <- prdat[201:400,]
| /R/parametric-regression-data.R | no_license | 42n4/dmr.linreg | R | false | false | 485 | r | set.seed(12)
# generate artificial data
prdat <- data.frame(a1=floor(runif(400, min=1, max=5)),
a2=floor(runif(400, min=1, max=5)),
a3=floor(runif(400, min=1, max=5)),
a4=floor(runif(400, min=1, max=5)))
prdat$f <- 2*tanh(prdat$a1-2*prdat$a2+3*prdat$a3-prdat$a4+1)-
3*tanh(-2*prdat$a1+3*prdat$a2-2*prdat$a3+prdat$a4-1)+2
# training and test subsets
prdat.train <- prdat[1:200,]
prdat.test <- prdat[201:400,]
|
#library(tnet) # install if not there
library(igraph)
load(file="./Graphs/Jazz.RData")
Jazz_Hetero_10_5S=list()
k=23
while(k<=100){ #Generate 100 infection graphs
random=list()
print(k)
sample=sample(V(graph))
random=as.numeric(c(sample[1],sample[2],sample[3],sample[4],sample[5])) #Randomly pick five sources
print(random)
t=0
neighbors=list()
temp_active=list()
perm_active=list()
temp=list()
adj=list()
perm_active=random
while(1)
{
t=t+1
neighbors=list()
for(node in unlist(perm_active))
{
adj=adjacent_vertices(graph,(node))
i=1
while(i<=length(unlist(adj)))
{
neighbors[length(unlist(neighbors))+1]=unlist(adj)[i]
i=i+1
}
}
neighbors=unique(unlist(neighbors))
i=1
rel_neighbors=setdiff(unlist(neighbors),unlist(perm_active))
#print(t)
#print(neighbors)
#print(rel_neighbors)
temp=perm_active
#temp_active=list()
for(inactive in rel_neighbors)
{
count=0
edge_list=list()
for(active in temp)
{
if(are.connected(graph,(inactive),(active)))
{
count=count+1
# print("c")
#print(count)
e=get.edge.ids(graph, c((inactive),(active)))
edge_list[length(unlist(edge_list))+1]=E(graph)[e]$weight
#print(unlist(edge_list))
#print(active)
#print(inactive)
}
#print(edge_list)
}
if(count>1)
{
i=1
prob=1
while(i<=count)
{
prob=prob*(1-unlist(edge_list)[i])
# print(prob)
i=i+1
}
prob=1-prob
#print(prob)
}
else
{
prob=unlist(edge_list)
}
if(prob>runif(n=1, min=0, max=1))
{
perm_active[length(unlist(perm_active))+1]=(inactive)
#print(unlist(perm_active))
#print(prob)
}
}
#print(length(unlist(perm_active)))
if(length(unlist(perm_active))>=length(V(graph))*0.10) #Set infection size
{
#if(length(unlist(perm_active))<=length(V(graph))*0.05){
sg=induced_subgraph(graph, perm_active, impl = c("copy_and_delete"))
if(is.connected(sg)==F) {
print("NOT CONNECTED")
break
}
else{
Jazz_Hetero_10_5S[[length(Jazz_Hetero_10_5S)+1]]=unlist(perm_active)
print(unlist(length(perm_active)))
#}
#save(Jazz_Hetero_10_5S, file="./Infection Graphs (Multiple Source)/5/Jazz_Hetero_10_5S.RData")
k=k+1
break
}
}
}
}
| /Code/SI Model Code (Single and Multi-Source)/SI (Five Sources).R | no_license | ZhangGaoxing/EPA-Data-Code | R | false | false | 2,368 | r | #library(tnet) # install if not there
library(igraph)
load(file="./Graphs/Jazz.RData")
Jazz_Hetero_10_5S=list()
k=23
while(k<=100){ #Generate 100 infection graphs
random=list()
print(k)
sample=sample(V(graph))
random=as.numeric(c(sample[1],sample[2],sample[3],sample[4],sample[5])) #Randomly pick five sources
print(random)
t=0
neighbors=list()
temp_active=list()
perm_active=list()
temp=list()
adj=list()
perm_active=random
while(1)
{
t=t+1
neighbors=list()
for(node in unlist(perm_active))
{
adj=adjacent_vertices(graph,(node))
i=1
while(i<=length(unlist(adj)))
{
neighbors[length(unlist(neighbors))+1]=unlist(adj)[i]
i=i+1
}
}
neighbors=unique(unlist(neighbors))
i=1
rel_neighbors=setdiff(unlist(neighbors),unlist(perm_active))
#print(t)
#print(neighbors)
#print(rel_neighbors)
temp=perm_active
#temp_active=list()
for(inactive in rel_neighbors)
{
count=0
edge_list=list()
for(active in temp)
{
if(are.connected(graph,(inactive),(active)))
{
count=count+1
# print("c")
#print(count)
e=get.edge.ids(graph, c((inactive),(active)))
edge_list[length(unlist(edge_list))+1]=E(graph)[e]$weight
#print(unlist(edge_list))
#print(active)
#print(inactive)
}
#print(edge_list)
}
if(count>1)
{
i=1
prob=1
while(i<=count)
{
prob=prob*(1-unlist(edge_list)[i])
# print(prob)
i=i+1
}
prob=1-prob
#print(prob)
}
else
{
prob=unlist(edge_list)
}
if(prob>runif(n=1, min=0, max=1))
{
perm_active[length(unlist(perm_active))+1]=(inactive)
#print(unlist(perm_active))
#print(prob)
}
}
#print(length(unlist(perm_active)))
if(length(unlist(perm_active))>=length(V(graph))*0.10) #Set infection size
{
#if(length(unlist(perm_active))<=length(V(graph))*0.05){
sg=induced_subgraph(graph, perm_active, impl = c("copy_and_delete"))
if(is.connected(sg)==F) {
print("NOT CONNECTED")
break
}
else{
Jazz_Hetero_10_5S[[length(Jazz_Hetero_10_5S)+1]]=unlist(perm_active)
print(unlist(length(perm_active)))
#}
#save(Jazz_Hetero_10_5S, file="./Infection Graphs (Multiple Source)/5/Jazz_Hetero_10_5S.RData")
k=k+1
break
}
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_trim.R
\name{group_trim}
\alias{group_trim}
\title{Trim grouping structure}
\usage{
group_trim(.tbl, .drop = group_by_drop_default(.tbl))
}
\arguments{
\item{.tbl}{A \link[=grouped_df]{grouped data frame}}
\item{.drop}{See \code{\link[=group_by]{group_by()}}}
}
\value{
A \link[=grouped_df]{grouped data frame}
}
\description{
Drop unused levels of all factors that are used as grouping variables,
then recalculates the grouping structure.
\code{group_trim()} is particularly useful after a \code{\link[=filter]{filter()}} that is intended
to select a subset of groups.
}
\details{
\Sexpr[results=rd, stage=render]{dplyr:::lifecycle("experimental")}
}
\examples{
iris \%>\%
group_by(Species) \%>\%
filter(Species == "setosa", .preserve = TRUE) \%>\%
group_trim()
}
\seealso{
Other grouping functions:
\code{\link{group_by_all}()},
\code{\link{group_by}()},
\code{\link{group_indices}()},
\code{\link{group_keys}()},
\code{\link{group_map}()},
\code{\link{group_nest}()},
\code{\link{group_rows}()},
\code{\link{group_size}()},
\code{\link{groups}()}
}
\concept{grouping functions}
| /man/group_trim.Rd | permissive | krlmlr/dplyr | R | false | true | 1,175 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/group_trim.R
\name{group_trim}
\alias{group_trim}
\title{Trim grouping structure}
\usage{
group_trim(.tbl, .drop = group_by_drop_default(.tbl))
}
\arguments{
\item{.tbl}{A \link[=grouped_df]{grouped data frame}}
\item{.drop}{See \code{\link[=group_by]{group_by()}}}
}
\value{
A \link[=grouped_df]{grouped data frame}
}
\description{
Drop unused levels of all factors that are used as grouping variables,
then recalculates the grouping structure.
\code{group_trim()} is particularly useful after a \code{\link[=filter]{filter()}} that is intended
to select a subset of groups.
}
\details{
\Sexpr[results=rd, stage=render]{dplyr:::lifecycle("experimental")}
}
\examples{
iris \%>\%
group_by(Species) \%>\%
filter(Species == "setosa", .preserve = TRUE) \%>\%
group_trim()
}
\seealso{
Other grouping functions:
\code{\link{group_by_all}()},
\code{\link{group_by}()},
\code{\link{group_indices}()},
\code{\link{group_keys}()},
\code{\link{group_map}()},
\code{\link{group_nest}()},
\code{\link{group_rows}()},
\code{\link{group_size}()},
\code{\link{groups}()}
}
\concept{grouping functions}
|
lcps.data.original <- utils::read.csv('https://lcps.nu/wp-content/uploads/covid-19.csv', sep =',')
# Order numbers: IC_Bedden_COVID, IC_Bedden_Non_COVID, Kliniek_Bedden, IC_Nieuwe_Opnames_COVID, Kliniek_Nieuwe_Opnames_COVID
lcps.data <- lcps.data.original %>%
mutate(
date = as.Date(Datum, tryFormats = c('%d-%m-%Y')),
.before = Datum
) %>%
mutate(
Datum = NULL
)
lcps.condition <- head(lcps.data$Kliniek_Nieuwe_Opnames_COVID,1) < head(lcps.data$IC_Nieuwe_Opnames_COVID,1)
# Verify clinical beds and IC beds are correctly reported (not swapped around)
if (lcps.condition) {stop("The value is TRUE, so the script must end here")
} else {
lcps.dailydata <- lcps.data %>%
head(1)
lcps.date <- lcps.dailydata[['date']]
filename <- paste0('data-lcps/total/covid-19_', lcps.date, '.csv')
filename.daily <- paste0('data-lcps/data-per-day/covid-19_', lcps.date, '.csv')
filename.common <- 'data/lcps_by_day.csv'
lcps.data <- lcps.data[order(lcps.data$date),]
lcps.data <- lcps.data %>%
mutate(Totaal_bezetting = Kliniek_Bedden + IC_Bedden_COVID) %>%
mutate(IC_Opnames_7d = round(frollmean(IC_Nieuwe_Opnames_COVID,7),0)) %>%
mutate(Kliniek_Opnames_7d = round(frollmean(Kliniek_Nieuwe_Opnames_COVID,7),0)) %>%
mutate(Totaal_opnames = IC_Nieuwe_Opnames_COVID + Kliniek_Nieuwe_Opnames_COVID) %>%
mutate(Totaal_opnames_7d = IC_Opnames_7d + Kliniek_Opnames_7d) %>%
mutate(Totaal_IC = IC_Bedden_COVID + IC_Bedden_Non_COVID)
lcps.data <- lcps.data[order(lcps.data$date, decreasing = T),]
write.csv(lcps.data.original, file=filename, row.names = F)
write.csv(lcps.dailydata, file = filename.daily, row.names = F)
write.csv(lcps.data, file = filename.common, row.names = F)
}
rm(filename, filename.common, filename.daily, lcps.condition, lcps.date, lcps.dailydata, lcps.data.original)
| /workflow/parse_lcps-data.R | permissive | peterboncz/covid-19 | R | false | false | 1,815 | r | lcps.data.original <- utils::read.csv('https://lcps.nu/wp-content/uploads/covid-19.csv', sep =',')
# Order numbers: IC_Bedden_COVID, IC_Bedden_Non_COVID, Kliniek_Bedden, IC_Nieuwe_Opnames_COVID, Kliniek_Nieuwe_Opnames_COVID
lcps.data <- lcps.data.original %>%
mutate(
date = as.Date(Datum, tryFormats = c('%d-%m-%Y')),
.before = Datum
) %>%
mutate(
Datum = NULL
)
lcps.condition <- head(lcps.data$Kliniek_Nieuwe_Opnames_COVID,1) < head(lcps.data$IC_Nieuwe_Opnames_COVID,1)
# Verify clinical beds and IC beds are correctly reported (not swapped around)
if (lcps.condition) {stop("The value is TRUE, so the script must end here")
} else {
lcps.dailydata <- lcps.data %>%
head(1)
lcps.date <- lcps.dailydata[['date']]
filename <- paste0('data-lcps/total/covid-19_', lcps.date, '.csv')
filename.daily <- paste0('data-lcps/data-per-day/covid-19_', lcps.date, '.csv')
filename.common <- 'data/lcps_by_day.csv'
lcps.data <- lcps.data[order(lcps.data$date),]
lcps.data <- lcps.data %>%
mutate(Totaal_bezetting = Kliniek_Bedden + IC_Bedden_COVID) %>%
mutate(IC_Opnames_7d = round(frollmean(IC_Nieuwe_Opnames_COVID,7),0)) %>%
mutate(Kliniek_Opnames_7d = round(frollmean(Kliniek_Nieuwe_Opnames_COVID,7),0)) %>%
mutate(Totaal_opnames = IC_Nieuwe_Opnames_COVID + Kliniek_Nieuwe_Opnames_COVID) %>%
mutate(Totaal_opnames_7d = IC_Opnames_7d + Kliniek_Opnames_7d) %>%
mutate(Totaal_IC = IC_Bedden_COVID + IC_Bedden_Non_COVID)
lcps.data <- lcps.data[order(lcps.data$date, decreasing = T),]
write.csv(lcps.data.original, file=filename, row.names = F)
write.csv(lcps.dailydata, file = filename.daily, row.names = F)
write.csv(lcps.data, file = filename.common, row.names = F)
}
rm(filename, filename.common, filename.daily, lcps.condition, lcps.date, lcps.dailydata, lcps.data.original)
|
\name{SBCp}
\alias{SBCp}
\title{
Calculate SBC
}
\description{Schwarz' Bayesian criterion
\deqn{SBC = n*ln(SSEp)-n*ln(n)+ln(n)*p}
}
\usage{
SBCp(model)
}
\arguments{
\item{model}{
model of regression
}
}
\references{
Michael H. Kutner; Christopher J. Nachtsheim; John Neter; William Li. Applied Linear Statistical Models Fifth Edition .page 360
}
\examples{
##################use data Surgical Unit, page 360
SBCp(lm(lny~x4,SurgicalUnit))
}
| /man/9_SBCp.Rd | no_license | AliGhanbari26/ALSM | R | false | false | 452 | rd | \name{SBCp}
\alias{SBCp}
\title{
Calculate SBC
}
\description{Schwarz' Bayesian criterion
\deqn{SBC = n*ln(SSEp)-n*ln(n)+ln(n)*p}
}
\usage{
SBCp(model)
}
\arguments{
\item{model}{
model of regression
}
}
\references{
Michael H. Kutner; Christopher J. Nachtsheim; John Neter; William Li. Applied Linear Statistical Models Fifth Edition .page 360
}
\examples{
##################use data Surgical Unit, page 360
SBCp(lm(lny~x4,SurgicalUnit))
}
|
plotSimDiags <-
function (obj, simvalues = NULL, seed = NULL, types = NULL, which = c(1:3, 5),
layout = c(4, 1), qqline = TRUE, cook.levels = c(0.5, 1),
caption = list("Residuals vs Fitted", "Normal Q-Q", "Scale-Location",
"Cook's distance", "Residuals vs Leverage",
expression("Cook's dist vs Leverage " * h[ii]/(1 - h[ii]))), ...)
{
dropInf <- function(x, h) {
if (any(isInf <- h >= 1)) {
x[isInf] <- NaN
}
x
}
if (!inherits(obj, "lm"))
stop("use only with \"lm\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 6))
stop("'which' must be in 1:6")
gphlist <- vector("list", 6)
names(gphlist) <- c("residVSfitted", "normalQQ", "scaleVSloc",
"CookDist", "residVSlev", "CookVSlev")
isGlm <- inherits(obj, "glm")
if (is.null(types))
types <- list(c("p", "smooth"), NULL, c("p", "smooth"),
"h", c("p", "smooth"), NULL)
show <- rep(FALSE, 6)
show[which] <- TRUE
numsim <- prod(layout)
if (is.null(simvalues))
simvalues <- simulate(obj, nsim = numsim, seed = seed)
if (ncol(simvalues) != numsim)
stop(paste("Number of columns of simvalues must", "equal number of panels of layout"))
okrows <- complete.cases(residuals(obj))
hat <- fitted(obj)
nobs <- length(hat)
df <- as.data.frame(simvalues)
simnam <- paste("Sim", 1:numsim, sep = "_")
names(df)[1:numsim] <- simnam
mmat <- model.matrix(obj)
regs <- lapply(df, function(y) lm(y ~ mmat))
hii <- lm.influence(obj, do.coef = FALSE)$hat
diaglist <- lapply(regs, lmdiags, which = which, hii=hii)
rslist <- sapply(diaglist, function(x) x[["rs"]])
objdf <- data.frame(gp = paste("Simulation", rep(1:numsim,
rep(nobs, numsim)), sep = "_"))
if (show[1]) {
objdf[["r"]] <- as.vector(sapply(diaglist, function(x) x[["r"]]))
objdf[["yh"]] <- as.vector(sapply(diaglist, function(x) x[["yh"]]))
}
if (any(show[2:3])) {
objdf[["rs"]] <- as.vector(sapply(diaglist, function(x) x[["rs"]]))
ylab23 <- if (isGlm)
"Std. deviance resid."
else "Standardized residuals"
}
if (show[3])
objdf[["yhn0"]] <- as.vector(sapply(diaglist, function(x) x$yhn0))
if (any(show[c(4, 6)]))
objdf[["cook"]] <- as.vector(sapply(diaglist, function(x) x$cook))
if (show[5]){
objdf[["rsp"]] <- as.vector(sapply(diaglist, function(x) x[["rsp"]]))
}
getCaption <- function(k) if (length(caption) < k)
NA_character_
else as.graphicsAnnot(caption[[k]])
if (show[1]) {
formyx <- r ~ yh | gp
gph <- lattice::xyplot(formyx, type = types[[1]], panel=lattice::panel.xyplot,
par.settings = simpleTheme(pch = c(16,
16),
lty = 2, col = c("black", "gray")), layout = layout,
data = objdf, xlab = "Fitted values", ylab = "Residuals",
main = getCaption(1), ...)
gph <- gph + latticeExtra::layer(lattice::panel.abline(h = 0, lty = 3, col = "gray"))
gphlist[[1]] <- gph
}
if (show[2]) {
gph <- lattice::qqmath(~rs | gp, data = objdf, prepanel = lattice::prepanel.qqmathline,
panel = function(x, ...) {
lattice::panel.qqmathline(x, lty = 2, ...)
lattice::panel.qqmath(x, ...)
}, layout = layout, xlab = "Theoretical Quantiles",
ylab = ylab23, ...)
gphlist[[2]] <- gph
}
if (show[3]) {
yl <- as.expression(substitute(sqrt(abs(YL)), list(YL = as.name(ylab23))))
sqrtabsr <- sqrt(abs(objdf[["rs"]]))
formyx <- sqrtabsr ~ yhn0 | gp
gph <- lattice::xyplot(formyx, type = types[[3]], par.settings = simpleTheme(pch = c(16,
16), lty = 2, col = c("black", "gray")), layout = layout,
data = objdf, xlab = "Fitted values", ylab = yl,
...)
gphlist[[3]] <- gph
}
if (show[4]) {
objdf$x <- rep(1:nobs, numsim)
gph <- lattice::xyplot(cook ~ x | gp, data = objdf, type = types[[4]],
layout = layout, xlab = "Obs. number", ylab = "Cook's distance",
...)
gphlist[[4]] <- gph
}
if (show[5]) {
ylab5 <- if (isGlm)
"Std. Pearson resid."
else "Standardized residuals"
panel5.diag <- function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
lattice::panel.abline(h = 0, lty = 3, col = "gray")
}
r.hat <- range(hii, na.rm = TRUE)
isConst.hat <- all(r.hat == 0) || diff(r.hat) < 1e-10 *
mean(hii, na.rm = TRUE)
if (isConst.hat) {
aterms <- attributes(terms(obj))
dcl <- aterms$dataClasses[-aterms$response]
facvars <- names(dcl)[dcl %in% c("factor", "ordered")]
mf <- model.frame(obj)[facvars]
if (ncol(mf) > 0) {
dm <- data.matrix(mf)
nf <- length(nlev <- unlist(unname(lapply(obj$xlevels,
length))))
ff <- if (nf == 1)
1
else rev(cumprod(c(1, nlev[nf:2])))
facval <- (dm - 1) %*% ff
levels(facval) <- obj$xlevels[[1L]]
xlim <- c(-1/2, sum((nlev-1)*ff)+1/2)
vval <- ff[1L] * (0:(nlev[1L]-1))-1/2
gph <- lattice::xyplot(rsp ~ facval|gp, xlim=xlim, ylab=ylab5, data = objdf,
## panel.groups=function(x,y,...){
## panel.points(x,y,...)
## xu <- unique(x)
## ym <- sapply(split(y,x),mean)
## browser()
## panel.points(xu,ym, pch=2, col="red")
## },
scales=list(x=list(at=0:0:(nlev[1L]-1), labels=obj$xlevels[[1L]])))
gph <- gph + latticeExtra::layer(lattice::panel.abline(v=vval, col = "gray", lty = "F4"),
lattice::panel.abline(h = 0, lty = 3, col = "gray"),
data=list(vval=vval))
}
else {
message("hat values (leverages) are all = ",
format(mean(r.hat)), "\n and there are no factor predictors; no plot no. 5")
do.plot <- FALSE
}
} else {
xx <- rep(hii,numsim)
xx[xx >= 1] <- NA
p <- length(coef(obj))
if (length(cook.levels))
yscale.cpts <- function(lim, ...) {
ans <- lattice::yscale.components.default(lim = lim, ...)
ans$right <- ans$left
ans$right$ticks$at <- c(-rev(sqrt(cook.levels)) *
ymult, sqrt(cook.levels) * ymult)
ans$right$ticks$tck <- rep(0, 2 * length(cook.levels))
ans$right$labels$at <- c(-rev(sqrt(cook.levels)) *
ymult, sqrt(cook.levels) * ymult)
ans$right$labels$labels <- paste(c(rev(cook.levels),
cook.levels))
ans
}
formyx <- rsp ~ xx | gp
gph <- lattice::xyplot(formyx, type = types[[5]], data = objdf,
par.settings = simpleTheme(pch = c(16, 16), lty = 2,
col = c("black", "gray")), scales = list(y = list(alternating = 3)),
layout = layout, xlab = "Leverage", ylab = ylab5,
main = getCaption(5), panel = panel5.diag, yscale.components = yscale.cpts)
usr <- gph[["x.limits"]]
xmax <- min(0.99, usr[2L])
ymult <- sqrt(p * (1 - xmax)/xmax)
hh <- seq.int(min(r.hat[1L], r.hat[2L]/100), usr[2L],
length.out = 101)
xy <- expand.grid(hh = c(hh, NA), cl.h = cook.levels)
xy <- within(xy, cl.h <- sqrt(cl.h * p * (1 - hh)/hh))
xy <- with(xy, data.frame(hh = c(hh, hh), cl.h = c(cl.h,
-cl.h)))
aty <- c(-rev(sqrt(cook.levels)) * ymult, sqrt(cook.levels) *
ymult)
laby <- paste(c(rev(cook.levels), cook.levels))
gph2 <- lattice::xyplot(cl.h ~ hh, data = xy, type = "l", lty = 3,
col = "red")
gph <- gph + latticeExtra::as.layer(gph2)
}
gphlist[[5]] <- gph
}
if (show[6]) {
g <- with(objdf, dropInf(hii/(1 - hii), hii))
ymx <- with(objdf, max(cook, na.rm = TRUE) * 1.025)
athat <- pretty(hii)
gph <- lattice::xyplot(cook ~ g | gp, xlim = c(0, max(g, na.rm = TRUE)),
data = objdf, ylim = c(0, ymx))
p <- length(coef(obj))
bval <- with(objdf, pretty(sqrt(p * cook/g), 5))
xmax <- gph[["x.limits"]][2]
ymax <- gph[["y.limits"]][2]
panel6 <- function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
for (i in seq_along(bval)) {
bi2 <- bval[i]^2
if (ymax > bi2 * xmax) {
xi <- xmax
yi <- bi2 * xi
lattice::panel.abline(0, bi2, lty = 2)
lattice::panel.text(xi, yi, paste(bval[i]), adj = c(1.25,
0.5), cex = 0.75)
}
else {
yi <- ymax
xi <- yi/bi2
lattice::panel.lines(c(0, xi), c(0, yi), lty = 2)
lattice::panel.text(xi, ymax, paste(bval[i]), adj = c(0.5,
1.25), cex = 0.75)
}
}
}
gph <- lattice::xyplot(cook ~ g | gp, xlim = c(0, max(g, na.rm = TRUE)),
data = objdf, ylim = c(0, ymx), main = getCaption(6),
ylab = "Cook's distance", xlab = expression("Leverage " *
h[ii]), layout = layout, scales = list(x = list(at = athat/(1 -
athat), labels = paste(athat))), panel = panel6)
gphlist[[6]] <- gph
}
gphlist <- gphlist[!sapply(gphlist, is.null)]
if (length(gphlist) == 1)
gphlist <- gphlist[[1]]
gphlist
}
| /R/plotSimDiags.R | no_license | cran/DAAG | R | false | false | 10,937 | r | plotSimDiags <-
function (obj, simvalues = NULL, seed = NULL, types = NULL, which = c(1:3, 5),
layout = c(4, 1), qqline = TRUE, cook.levels = c(0.5, 1),
caption = list("Residuals vs Fitted", "Normal Q-Q", "Scale-Location",
"Cook's distance", "Residuals vs Leverage",
expression("Cook's dist vs Leverage " * h[ii]/(1 - h[ii]))), ...)
{
dropInf <- function(x, h) {
if (any(isInf <- h >= 1)) {
x[isInf] <- NaN
}
x
}
if (!inherits(obj, "lm"))
stop("use only with \"lm\" objects")
if (!is.numeric(which) || any(which < 1) || any(which > 6))
stop("'which' must be in 1:6")
gphlist <- vector("list", 6)
names(gphlist) <- c("residVSfitted", "normalQQ", "scaleVSloc",
"CookDist", "residVSlev", "CookVSlev")
isGlm <- inherits(obj, "glm")
if (is.null(types))
types <- list(c("p", "smooth"), NULL, c("p", "smooth"),
"h", c("p", "smooth"), NULL)
show <- rep(FALSE, 6)
show[which] <- TRUE
numsim <- prod(layout)
if (is.null(simvalues))
simvalues <- simulate(obj, nsim = numsim, seed = seed)
if (ncol(simvalues) != numsim)
stop(paste("Number of columns of simvalues must", "equal number of panels of layout"))
okrows <- complete.cases(residuals(obj))
hat <- fitted(obj)
nobs <- length(hat)
df <- as.data.frame(simvalues)
simnam <- paste("Sim", 1:numsim, sep = "_")
names(df)[1:numsim] <- simnam
mmat <- model.matrix(obj)
regs <- lapply(df, function(y) lm(y ~ mmat))
hii <- lm.influence(obj, do.coef = FALSE)$hat
diaglist <- lapply(regs, lmdiags, which = which, hii=hii)
rslist <- sapply(diaglist, function(x) x[["rs"]])
objdf <- data.frame(gp = paste("Simulation", rep(1:numsim,
rep(nobs, numsim)), sep = "_"))
if (show[1]) {
objdf[["r"]] <- as.vector(sapply(diaglist, function(x) x[["r"]]))
objdf[["yh"]] <- as.vector(sapply(diaglist, function(x) x[["yh"]]))
}
if (any(show[2:3])) {
objdf[["rs"]] <- as.vector(sapply(diaglist, function(x) x[["rs"]]))
ylab23 <- if (isGlm)
"Std. deviance resid."
else "Standardized residuals"
}
if (show[3])
objdf[["yhn0"]] <- as.vector(sapply(diaglist, function(x) x$yhn0))
if (any(show[c(4, 6)]))
objdf[["cook"]] <- as.vector(sapply(diaglist, function(x) x$cook))
if (show[5]){
objdf[["rsp"]] <- as.vector(sapply(diaglist, function(x) x[["rsp"]]))
}
getCaption <- function(k) if (length(caption) < k)
NA_character_
else as.graphicsAnnot(caption[[k]])
if (show[1]) {
formyx <- r ~ yh | gp
gph <- lattice::xyplot(formyx, type = types[[1]], panel=lattice::panel.xyplot,
par.settings = simpleTheme(pch = c(16,
16),
lty = 2, col = c("black", "gray")), layout = layout,
data = objdf, xlab = "Fitted values", ylab = "Residuals",
main = getCaption(1), ...)
gph <- gph + latticeExtra::layer(lattice::panel.abline(h = 0, lty = 3, col = "gray"))
gphlist[[1]] <- gph
}
if (show[2]) {
gph <- lattice::qqmath(~rs | gp, data = objdf, prepanel = lattice::prepanel.qqmathline,
panel = function(x, ...) {
lattice::panel.qqmathline(x, lty = 2, ...)
lattice::panel.qqmath(x, ...)
}, layout = layout, xlab = "Theoretical Quantiles",
ylab = ylab23, ...)
gphlist[[2]] <- gph
}
if (show[3]) {
yl <- as.expression(substitute(sqrt(abs(YL)), list(YL = as.name(ylab23))))
sqrtabsr <- sqrt(abs(objdf[["rs"]]))
formyx <- sqrtabsr ~ yhn0 | gp
gph <- lattice::xyplot(formyx, type = types[[3]], par.settings = simpleTheme(pch = c(16,
16), lty = 2, col = c("black", "gray")), layout = layout,
data = objdf, xlab = "Fitted values", ylab = yl,
...)
gphlist[[3]] <- gph
}
if (show[4]) {
objdf$x <- rep(1:nobs, numsim)
gph <- lattice::xyplot(cook ~ x | gp, data = objdf, type = types[[4]],
layout = layout, xlab = "Obs. number", ylab = "Cook's distance",
...)
gphlist[[4]] <- gph
}
if (show[5]) {
ylab5 <- if (isGlm)
"Std. Pearson resid."
else "Standardized residuals"
panel5.diag <- function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
lattice::panel.abline(h = 0, lty = 3, col = "gray")
}
r.hat <- range(hii, na.rm = TRUE)
isConst.hat <- all(r.hat == 0) || diff(r.hat) < 1e-10 *
mean(hii, na.rm = TRUE)
if (isConst.hat) {
aterms <- attributes(terms(obj))
dcl <- aterms$dataClasses[-aterms$response]
facvars <- names(dcl)[dcl %in% c("factor", "ordered")]
mf <- model.frame(obj)[facvars]
if (ncol(mf) > 0) {
dm <- data.matrix(mf)
nf <- length(nlev <- unlist(unname(lapply(obj$xlevels,
length))))
ff <- if (nf == 1)
1
else rev(cumprod(c(1, nlev[nf:2])))
facval <- (dm - 1) %*% ff
levels(facval) <- obj$xlevels[[1L]]
xlim <- c(-1/2, sum((nlev-1)*ff)+1/2)
vval <- ff[1L] * (0:(nlev[1L]-1))-1/2
gph <- lattice::xyplot(rsp ~ facval|gp, xlim=xlim, ylab=ylab5, data = objdf,
## panel.groups=function(x,y,...){
## panel.points(x,y,...)
## xu <- unique(x)
## ym <- sapply(split(y,x),mean)
## browser()
## panel.points(xu,ym, pch=2, col="red")
## },
scales=list(x=list(at=0:0:(nlev[1L]-1), labels=obj$xlevels[[1L]])))
gph <- gph + latticeExtra::layer(lattice::panel.abline(v=vval, col = "gray", lty = "F4"),
lattice::panel.abline(h = 0, lty = 3, col = "gray"),
data=list(vval=vval))
}
else {
message("hat values (leverages) are all = ",
format(mean(r.hat)), "\n and there are no factor predictors; no plot no. 5")
do.plot <- FALSE
}
} else {
xx <- rep(hii,numsim)
xx[xx >= 1] <- NA
p <- length(coef(obj))
if (length(cook.levels))
yscale.cpts <- function(lim, ...) {
ans <- lattice::yscale.components.default(lim = lim, ...)
ans$right <- ans$left
ans$right$ticks$at <- c(-rev(sqrt(cook.levels)) *
ymult, sqrt(cook.levels) * ymult)
ans$right$ticks$tck <- rep(0, 2 * length(cook.levels))
ans$right$labels$at <- c(-rev(sqrt(cook.levels)) *
ymult, sqrt(cook.levels) * ymult)
ans$right$labels$labels <- paste(c(rev(cook.levels),
cook.levels))
ans
}
formyx <- rsp ~ xx | gp
gph <- lattice::xyplot(formyx, type = types[[5]], data = objdf,
par.settings = simpleTheme(pch = c(16, 16), lty = 2,
col = c("black", "gray")), scales = list(y = list(alternating = 3)),
layout = layout, xlab = "Leverage", ylab = ylab5,
main = getCaption(5), panel = panel5.diag, yscale.components = yscale.cpts)
usr <- gph[["x.limits"]]
xmax <- min(0.99, usr[2L])
ymult <- sqrt(p * (1 - xmax)/xmax)
hh <- seq.int(min(r.hat[1L], r.hat[2L]/100), usr[2L],
length.out = 101)
xy <- expand.grid(hh = c(hh, NA), cl.h = cook.levels)
xy <- within(xy, cl.h <- sqrt(cl.h * p * (1 - hh)/hh))
xy <- with(xy, data.frame(hh = c(hh, hh), cl.h = c(cl.h,
-cl.h)))
aty <- c(-rev(sqrt(cook.levels)) * ymult, sqrt(cook.levels) *
ymult)
laby <- paste(c(rev(cook.levels), cook.levels))
gph2 <- lattice::xyplot(cl.h ~ hh, data = xy, type = "l", lty = 3,
col = "red")
gph <- gph + latticeExtra::as.layer(gph2)
}
gphlist[[5]] <- gph
}
if (show[6]) {
g <- with(objdf, dropInf(hii/(1 - hii), hii))
ymx <- with(objdf, max(cook, na.rm = TRUE) * 1.025)
athat <- pretty(hii)
gph <- lattice::xyplot(cook ~ g | gp, xlim = c(0, max(g, na.rm = TRUE)),
data = objdf, ylim = c(0, ymx))
p <- length(coef(obj))
bval <- with(objdf, pretty(sqrt(p * cook/g), 5))
xmax <- gph[["x.limits"]][2]
ymax <- gph[["y.limits"]][2]
panel6 <- function(x, y, ...) {
lattice::panel.xyplot(x, y, ...)
for (i in seq_along(bval)) {
bi2 <- bval[i]^2
if (ymax > bi2 * xmax) {
xi <- xmax
yi <- bi2 * xi
lattice::panel.abline(0, bi2, lty = 2)
lattice::panel.text(xi, yi, paste(bval[i]), adj = c(1.25,
0.5), cex = 0.75)
}
else {
yi <- ymax
xi <- yi/bi2
lattice::panel.lines(c(0, xi), c(0, yi), lty = 2)
lattice::panel.text(xi, ymax, paste(bval[i]), adj = c(0.5,
1.25), cex = 0.75)
}
}
}
gph <- lattice::xyplot(cook ~ g | gp, xlim = c(0, max(g, na.rm = TRUE)),
data = objdf, ylim = c(0, ymx), main = getCaption(6),
ylab = "Cook's distance", xlab = expression("Leverage " *
h[ii]), layout = layout, scales = list(x = list(at = athat/(1 -
athat), labels = paste(athat))), panel = panel6)
gphlist[[6]] <- gph
}
gphlist <- gphlist[!sapply(gphlist, is.null)]
if (length(gphlist) == 1)
gphlist <- gphlist[[1]]
gphlist
}
|
library(VIM)
context("kNN ordered")
d <- data.frame(x=LETTERS[1:6],y=as.double(1:6),z=as.double(1:6),
w=ordered(LETTERS[1:6]), stringsAsFactors = FALSE)
d <- rbind(d,d)
setna <- function(d,i,col=2){
d[i,col] <- NA
d
}
d$w[1] <- "F"
## Test for medianSamp
test_that("medianSamp as expected",{
expect_true(medianSamp(d$w)%in%c("C","D"))
expect_true(medianSamp(d$w, weights = d$y)=="E")
})
## Test for maxCat
test_that("maxCat as expected",{
expect_true(maxCat(d$w)=="F")
expect_true(maxCat(d$w, weights = d$y)=="F")
})
## Test for sampleCat
test_that("sampleCat as expected",{
expect_true(sampleCat(d$w)%in%LETTERS[1:6])
expect_true(sampleCat(d$w, weights = d$y)%in%LETTERS[1:6])
})
| /tests/testthat/test_aggFunctions.R | no_license | mcemerden/VIM | R | false | false | 716 | r | library(VIM)
context("kNN ordered")
d <- data.frame(x=LETTERS[1:6],y=as.double(1:6),z=as.double(1:6),
w=ordered(LETTERS[1:6]), stringsAsFactors = FALSE)
d <- rbind(d,d)
setna <- function(d,i,col=2){
d[i,col] <- NA
d
}
d$w[1] <- "F"
## Test for medianSamp
test_that("medianSamp as expected",{
expect_true(medianSamp(d$w)%in%c("C","D"))
expect_true(medianSamp(d$w, weights = d$y)=="E")
})
## Test for maxCat
test_that("maxCat as expected",{
expect_true(maxCat(d$w)=="F")
expect_true(maxCat(d$w, weights = d$y)=="F")
})
## Test for sampleCat
test_that("sampleCat as expected",{
expect_true(sampleCat(d$w)%in%LETTERS[1:6])
expect_true(sampleCat(d$w, weights = d$y)%in%LETTERS[1:6])
})
|
\name{glactc}
\alias{glactc}
\title{ Convert between celestial and Galactic (or Supergalactic) coordinates
}
\description{
Convert between celestial and Galactic (or Supergalactic) coordinates
}
\usage{
glactc(ra, dec, year, gl, gb, j, degree=FALSE, fk4 = FALSE, supergalactic = FALSE)
}
\arguments{
\item{ra}{Right Ascension (j=1) or Galactic longitude (j=2), in decimal hours or degrees, scalar or vector}
\item{dec}{Declination (j=1) or Galactic latitude (j=2), in degrees, scalar or vector}
\item{year}{equinox of ra and dec, scalar}
\item{gl}{Galactic longitude or Right Ascension, in degrees, scalar or vector}
\item{gb}{Galactic latitude or Declination, in degrees, scalar or vector}
\item{j}{integer indicator, direction of conversion \cr
1: ra,dec --> gl,gb \cr
2: gl,gb --> ra,dec}
\item{degree}{if set, then the RA parameter (both input and output) is given in degrees rather than hours (default=FALSE)}
\item{fk4}{if set, then the celestial (RA, Dec) coordinates are assumed to be input/output in the FK4 system. By default, coordinates are assumed to be in the FK5 system. (default=FALSE)}
\item{supergalactic}{if set, the function returns SuperGalactic coordinates (see details). (default=FALSE)}
}
\details{
If \emph{j=1}, this function converts proper motion in equatorial coordinates (ra,dec) to proper motion in Galactic coordinates (gl, gb) or Supergalactic Coordinates (sgl,sgb). If \emph{j=2}, the conversion is reversed from Galactic/Supergalactic coordinates to equatorial coordinates. The calculation includes precession on the coordinates.
For B1950 coordinates, set \emph{fk4=TRUE} and \emph{year=1950}.
If \emph{supergalactic=TRUE} is set, Supergalactic coordinates are defined by de Vaucouleurs et al. (1976) to account for the local supercluster. The North pole in Supergalactic coordinates has Galactic coordinates l = 47.47, b = 6.32, and the origin is at Galactic coordinates l = 137.37, b = 0.00.
}
\value{
\item{ra}{Galactic longitude (j=1) or Right Ascension (j=2), in decimal hours or degrees, scalar or vector}
\item{dec}{Galactic latitude (j=1) or Declination (j=2), in degrees, scalar or vector}
}
\author{
FORTRAN subroutine by T. A. Nagy, 1978. Conversion to IDL, R. S. Hill, STX, 1987.
R adaptation by Arnab Chakraborty June 2013
}
\seealso{
\code{\link{precess}}
\code{\link{jprecess}}
\code{\link{bprecess}}
}
\examples{
# Find the Galactic coordinates of Altair (RA (J2000): 19 50 47 Dec (J2000): 08 52 06)
# Result: gl = 47.74, gb = -8.91
glactc(ten(19,50,47), ten(8,52,6), 2000, gl, gb, 1)
}
\keyword{ misc }
| /man/glactc.Rd | no_license | f-silva-archaeo/astrolibR | R | false | false | 2,594 | rd | \name{glactc}
\alias{glactc}
\title{ Convert between celestial and Galactic (or Supergalactic) coordinates
}
\description{
Convert between celestial and Galactic (or Supergalactic) coordinates
}
\usage{
glactc(ra, dec, year, gl, gb, j, degree=FALSE, fk4 = FALSE, supergalactic = FALSE)
}
\arguments{
\item{ra}{Right Ascension (j=1) or Galactic longitude (j=2), in decimal hours or degrees, scalar or vector}
\item{dec}{Declination (j=1) or Galactic latitude (j=2), in degrees, scalar or vector}
\item{year}{equinox of ra and dec, scalar}
\item{gl}{Galactic longitude or Right Ascension, in degrees, scalar or vector}
\item{gb}{Galactic latitude or Declination, in degrees, scalar or vector}
\item{j}{integer indicator, direction of conversion \cr
1: ra,dec --> gl,gb \cr
2: gl,gb --> ra,dec}
\item{degree}{if set, then the RA parameter (both input and output) is given in degrees rather than hours (default=FALSE)}
\item{fk4}{if set, then the celestial (RA, Dec) coordinates are assumed to be input/output in the FK4 system. By default, coordinates are assumed to be in the FK5 system. (default=FALSE)}
\item{supergalactic}{if set, the function returns SuperGalactic coordinates (see details). (default=FALSE)}
}
\details{
If \emph{j=1}, this function converts proper motion in equatorial coordinates (ra,dec) to proper motion in Galactic coordinates (gl, gb) or Supergalactic Coordinates (sgl,sgb). If \emph{j=2}, the conversion is reversed from Galactic/Supergalactic coordinates to equatorial coordinates. The calculation includes precession on the coordinates.
For B1950 coordinates, set \emph{fk4=TRUE} and \emph{year=1950}.
If \emph{supergalactic=TRUE} is set, Supergalactic coordinates are defined by de Vaucouleurs et al. (1976) to account for the local supercluster. The North pole in Supergalactic coordinates has Galactic coordinates l = 47.47, b = 6.32, and the origin is at Galactic coordinates l = 137.37, b = 0.00.
}
\value{
\item{ra}{Galactic longitude (j=1) or Right Ascension (j=2), in decimal hours or degrees, scalar or vector}
\item{dec}{Galactic latitude (j=1) or Declination (j=2), in degrees, scalar or vector}
}
\author{
FORTRAN subroutine by T. A. Nagy, 1978. Conversion to IDL, R. S. Hill, STX, 1987.
R adaptation by Arnab Chakraborty June 2013
}
\seealso{
\code{\link{precess}}
\code{\link{jprecess}}
\code{\link{bprecess}}
}
\examples{
# Find the Galactic coordinates of Altair (RA (J2000): 19 50 47 Dec (J2000): 08 52 06)
# Result: gl = 47.74, gb = -8.91
glactc(ten(19,50,47), ten(8,52,6), 2000, gl, gb, 1)
}
\keyword{ misc }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prior_distribution.R
\name{runiform_ball}
\alias{runiform_ball}
\title{Multivariate Uniform Distribution on a ball}
\usage{
runiform_ball(n, d, R)
}
\arguments{
\item{n}{number of desired samples.}
\item{d}{positive integer, representing the dimension of the observations.}
\item{R}{positive real value, the radius of the ball in \strong{R}^{d}.}
}
\value{
a matrix of \code{n} samples of length d.
}
\description{
This function generates random samples from multivariate uniform distribution on a ball in \strong{R}^{d}, equipped with \eqn{L^{2}} norm (\emph{i.e.,} Euclidean distance), centered in \strong{0}, with radius R.
}
\details{
This function generates samples from the multivariate uniform distribution whose density is \deqn{\pi(c, R) = \Gamma(d/2 + 1)/ \pi^{d/2} * 1/(R)^{d} 1_{B_{d}(R)}(c),} where \eqn{1_{B_{d}(R)}} is a centered \eqn{L^{2}} ball with radius R.
}
\examples{
##generating 10000 samples from uniform distribution on a unit ball in \\strong{R}^{2}
result <- runiform_ball(10000, 2, 1)
plot(result)
}
| /man/runiform_ball.Rd | no_license | cran/PACBO | R | false | true | 1,110 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prior_distribution.R
\name{runiform_ball}
\alias{runiform_ball}
\title{Multivariate Uniform Distribution on a ball}
\usage{
runiform_ball(n, d, R)
}
\arguments{
\item{n}{number of desired samples.}
\item{d}{positive integer, representing the dimension of the observations.}
\item{R}{positive real value, the radius of the ball in \strong{R}^{d}.}
}
\value{
a matrix of \code{n} samples of length d.
}
\description{
This function generates random samples from multivariate uniform distribution on a ball in \strong{R}^{d}, equipped with \eqn{L^{2}} norm (\emph{i.e.,} Euclidean distance), centered in \strong{0}, with radius R.
}
\details{
This function generates samples from the multivariate uniform distribution whose density is \deqn{\pi(c, R) = \Gamma(d/2 + 1)/ \pi^{d/2} * 1/(R)^{d} 1_{B_{d}(R)}(c),} where \eqn{1_{B_{d}(R)}} is a centered \eqn{L^{2}} ball with radius R.
}
\examples{
##generating 10000 samples from uniform distribution on a unit ball in \\strong{R}^{2}
result <- runiform_ball(10000, 2, 1)
plot(result)
}
|
testlist <- list(a = 0L, b = 0L, x = c(-1L, -12582913L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) | /grattan/inst/testfiles/anyOutside/libFuzzer_anyOutside/anyOutside_valgrind_files/1610131282-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 296 | r | testlist <- list(a = 0L, b = 0L, x = c(-1L, -12582913L, -16777216L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L))
result <- do.call(grattan:::anyOutside,testlist)
str(result) |
library(tidyverse)
library(xts)
library(stringr)
rm(list=ls())
Sys.setenv(TZ = "UTC")
source('functions.R')
tides <- tibble(
shore = c("Easky", "Roonagh", "Spiddal", "Kilkee", "Smerwick_Harbour", "Murphys_Cove", "Viana_do_Castelo"),
country = c(rep("Ireland", 6), "Portugal"),
lat = c(54.292652, 53.762524, 53.242921, 52.684082, 52.186683, 51.487872, 41.69943),
lon = c(-8.956733, -9.905105, -9.301441, -9.651712, -10.512499, -9.264645, -8.854797))
# date range
d <- list(
year0 = 2000,
year1 = 2030)
d$dates = as.POSIXct(c(str_c(d$year0, '-01-01 00:00'), str_c(d$year1, '-12-31 23:59')))
# collect tide data
cat("--> COLLECTING TIDE DATA FOR", nrow(tides), "SITES\n")
cat("----> collecting low tide times and heights\n")
x <- list()
for(i in 1:nrow(tides)) {
cat(" ", tides$shore[i], "\n")
x[[i]] <- get.fes.low.tides(tides$lat[i], tides$lon[i], d$dates, 20)
}
tides <- add_column(tides, low_tides = x)
save(tides, file = str_c("extracted_lowtides_", d$year0, "_", d$year1, ".RData"))
| /extract_low_tides/extract_low_tides2.R | no_license | ruiseabra/fieldwork_planning | R | false | false | 1,025 | r | library(tidyverse)
library(xts)
library(stringr)
rm(list=ls())
Sys.setenv(TZ = "UTC")
source('functions.R')
tides <- tibble(
shore = c("Easky", "Roonagh", "Spiddal", "Kilkee", "Smerwick_Harbour", "Murphys_Cove", "Viana_do_Castelo"),
country = c(rep("Ireland", 6), "Portugal"),
lat = c(54.292652, 53.762524, 53.242921, 52.684082, 52.186683, 51.487872, 41.69943),
lon = c(-8.956733, -9.905105, -9.301441, -9.651712, -10.512499, -9.264645, -8.854797))
# date range
d <- list(
year0 = 2000,
year1 = 2030)
d$dates = as.POSIXct(c(str_c(d$year0, '-01-01 00:00'), str_c(d$year1, '-12-31 23:59')))
# collect tide data
cat("--> COLLECTING TIDE DATA FOR", nrow(tides), "SITES\n")
cat("----> collecting low tide times and heights\n")
x <- list()
for(i in 1:nrow(tides)) {
cat(" ", tides$shore[i], "\n")
x[[i]] <- get.fes.low.tides(tides$lat[i], tides$lon[i], d$dates, 20)
}
tides <- add_column(tides, low_tides = x)
save(tides, file = str_c("extracted_lowtides_", d$year0, "_", d$year1, ".RData"))
|
## Copyright 2013 Elliot Chow
## Licensed under the Apache License, Version 2.0 (the "License")
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import('doMC',
'gdata',
'stringr',
'plyr',
'hash',
'R.oo',
'Hmisc',
'digest',
'sfsmisc',
'rjson',
'testthat',
'Matrix',
as.library='utils')
## options(width=110,scipen=6)
options(scipen=6, menu.graphics=FALSE)
####################
#### Logging
####################
if (!"SimpleLog" %in% ls()) {
setConstructorS3('SimpleLog',
## logging object
## id: id of the logger
## level: recognized logging levels
## colors: colors to use if colorizing logs
function(id='log',
level=c('info','warning','error','debug'),
colors = c('info'='light gray','warning'='yellow','error'='red','debug'='dark gray')
## outputs=stderr(),
## overwrite=TRUE
){
## if(overwrite){
## sapply(outputs[is.character(outputs) & outputs != ""],
## function(x){
## if(is.character(x)){
## file.remove(x)
## }
## })
## }
extend(Object(), 'SimpleLog',
id=id,
level=level,
colors=colors,
## outputs=outputs
outputs=stderr()
)
})
## logging levels
SimpleLog.INFO <- 'info'
SimpleLog.WARNING <- c(SimpleLog.INFO, 'warning')
SimpleLog.ERROR <- c(SimpleLog.WARNING, 'error')
SimpleLog.DEBUG <- c(SimpleLog.ERROR, 'debug')
if(!exists('SimpleLog.CONFIG', envir = globalenv())){
## global var to hold SimpleLog configuration
assign('SimpleLog.CONFIG', new.env(), envir=globalenv())
}
setMethodS3('write.msg','SimpleLog',
## write a message to standard error
## log: SimpleLog object
## ...: message (and string formatting parameters)
## level: level of the message
## sep: separating chars in log
## return.success: set to true if you should return whether or not the message was successfully written
function(log, ..., level=SimpleLog.INFO, sep=' - ', return.success=FALSE){
check <- TRUE
lvl <- intersect(tail(level,1), log$level)
if(length(lvl) > 0){
msg <- paste(list(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), lvl, log$id, sprintf(...)), collapse = sep)
success <- all(sapply(log$outputs,
function(o) {
if((!is.null(globalenv()$SimpleLog.CONFIG$colorize) && globalenv()$SimpleLog.CONFIG$colorize) && (o %in% c(stderr(), stdout()))){
color <- log$colors[lvl]
if(!is.na(color))
msg <- colourise(msg, color)
}
tryCatch(is.null(cat(msg, '\n', file=o, append=TRUE)),
error = function(e) FALSE)
}))
if(return.success) success else invisible(success)
}
})
setConstructorS3('Timer',
## timer object
function(log=NULL){
if(is.null(log)){
log <- SimpleLog('timerLog')
}
extend(Object(), 'Timer',
log=log)
})
setMethodS3('start.timer', 'Timer',
## start timing
## msg: message to print on start
## ...: args for write.msg
function(self, msg=NULL, ...){
if(!is.null(msg)){
write.msg(self$log,msg, ...)
}
self$startTime <- proc.time()[3]
})
setMethodS3('stop.timer', 'Timer',
## stop timing
## ...: args for write.msg
function(self, ...){
self$stopTime <- proc.time()[3]
dt <- self$stopTime - self$startTime
m <- as.integer(dt / 60)
s <- round(dt - 60 * m,1)
write.msg(self$log,
sprintf('elapsed time: %s', paste(m, 'm', s, 's')),
...)
})
}
timer <- function(expr){
## simple timer for an expression
s.expr <- substitute(expr)
t <- Timer()
start.timer(t, 'timing %s', paste(deparse(s.expr), collapse=''))
res <- eval(s.expr, parent.frame())
stop.timer(t)
res
}
stop.if <- function(x, msg, ..., tags=list(), cleanup = function(){}, failed.cond = substitute(x)){
## throw an exception if condition is met
## x: boolean value (true to throw exception)
## msg: error message
## tags: list of tags and values to be attached to the exception
## cleanup: a function with no arguments to be executed (like finally)
## failed.cond: expression representing the failed condition
call <- sys.call(1)
if(x){
err <- csplat(tag, c(list(simpleError(paste(c(sprintf(msg, ...),
'\n Failed condition: ', failed.cond),
collapse=''),
call)),
tags))
cleanup()
stop(err)
}
}
stop.if.not <- function(x, ...){
## throw an exception if the condition isnot met
## ...: args for stop if
failed.cond <- substitute(!(x))
stop.if(!x, ..., failed.cond = failed.cond)
}
dump.frames.on.failure <- function(on = TRUE){
## dumps frames for debugging upon unexpected exit
## on: turn on/off
if(on)
options(error = quote({ dump.frames('dump-frames', to.file = TRUE)
system('echo "frames dumped to dump-frames.rda"')
q()
}
))
else
options(error = NULL)
}
####################
#### URL encoding
####################
## http://svn.python.org/view/*checkout*/python/tags/r265/Lib/urllib.py?revision=79064&content-type=text%2Fplain
url.always.safe.chars <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","0","1","2","3","4","5","6","7","8","9","_",".","-")
url.reserved.chars <- c(";", "/", "?", ":", "@", "&", "=", "+", "$", ",")
url.quote <- function(s, reserved = url.reserved.chars, plus.spaces = T){
## url encode a string
## s: string to encode
## reserved: vector of reserved characters
## plus.spaces: convert spaces to +
chars <- int.to.char(1:255)
safe <- named(ifelse(chars %in% c(url.always.safe.chars, reserved), chars, sprintf('%%%.2X', 1:255)),
chars)
if(plus.spaces)
safe[[' ']] <- '+'
unlist(lapply(strcodes(s), function(chars) paste(safe[chars], collapse = '')))
}
url.unquote <- function(s, reserved = NULL, plus.spaces = T){
## url decode a string
## s: string to decode
## reserved: vector of reserved characters
## plus.spaces: convert + to spaces
chars <- int.to.char(1:255)
safe <- named(chars,
ifelse(chars %in% c(url.always.safe.chars, reserved), chars, sprintf('%.2X', 1:255)))
z <- lapply(strsplit(s, '%'),
function(xs){
y <- paste(safe[str_sub(xs[-1], end = 2)],
str_sub(xs[-1], start = 3),
sep = '')
z <- paste(c(xs[1], y), collapse = '')
if(plus.spaces)
gsub('\\+',' ',z)
else
z
})
unlist(z)
}
url.encode.params <- function(params, ...){
## encode a named list as url parameters
## params: named list of params
## ...: args for url.quote
params <- unlist(params)
paste(paste(url.quote(names(params), ...),
url.quote(as.character(params), ...),
sep = '='),
collapse = '&')
}
url.parse.params <- function(params.str, ...){
## parse parameters to named list
## params.str: string of url parameters
lapply(strsplit(params.str, '&'),
function(kv) url.unquote(unlist(zip.to.named(strsplit(kv, '='))), ...))
}
url.qry.string <- function(url)
## exctract the query string from url
## url: url from which to extract qs
unlist(lapply(strsplit(url, '\\?'), function(x) x[[2]]))
####################
#### Functions
####################
## evaluate an expression within an environment
"%within%" <- function(expr, envir) eval(substitute(expr), envir=envir)
tag <- function(x,...) {
## tag a value with attributes
tagged <- x
dots <- list(...)
if(length(dots) > 0){
for(i in indices(dots))
attr(tagged, names(dots)[i]) <- dots[[i]]
}
tagged
}
## read var name as string
var.name <- function(x) deparse(substitute(x))
## test if an environment is the global environment
is.global.env <- function(env) environmentName(env) == 'R_GlobalEnv'
## curry a function with 1 argument
curry1 <- function(f, ...){
function(x)
f(x, ...)
}
####################
#### Files
####################
rrmdir <- function(path,rm.contents.only=FALSE){
## recursivedly remove directory
## path: path to remove
## rm.contents.only: leave path untouched
path <- gsub('/( )*$','',path)
isDir <- file.info(path)$isdir
if(!is.na(isDir) && isDir){
for(i in dir(path)){
rrmdir(file.path(path,i),FALSE)
}
}
if(!rm.contents.only){
file.remove(path)
}
}
curl.cmd <- function(url, output.path, params = NULL, method = 'get', show.progress = NULL, custom.opts = ''){
## construct a curl command
stop.if.not(method %in% c('get','post'), 'method must be get or post')
stop.if.not(is.null(show.progress) || show.progress %in% c('bar','text'), 'progress must be bar or text')
if(!is.null(params))
ps <- url.encode.params(params, reserved=NULL)
else
ps <- ''
if(is.null(show.progress))
progress.opt <- '-s'
else if (show.progress == 'bar')
progress.opt <- '-#'
else if(show.progress == 'text')
progress.opt <- ''
method.opt <- if(method == 'get') '-X GET' else '-X POST'
sprintf('curl %s %s --data "%s" -o %s %s "%s"',
method.opt,
custom.opts,
ps,
output.path,
progress.opt,
url)
}
cache.data <- function(path, ..., cache.path='.cache', force=FALSE, log.level = SimpleLog.INFO){
## cache data by downloading (if necessary) using curl; return connection
logger <- SimpleLog('cache.data', log.level)
if(str_detect(path,'http[s]?://')){
path.hash <- digest(csplat(paste, list(...), path), 'md5')
cached.file <- file.path(cache.path, path.hash)
cmd <- curl.cmd(path, cached.file, ...)
write.msg(logger, 'curl command: %s', cmd, level = SimpleLog.DEBUG)
write.msg(logger, 'caching to %s', cached.file)
dir.create(cache.path, showWarnings = FALSE)
exit.code <- 0
if(!file.exists(cached.file) || force)
exit.code <- system(cmd)
else
write.msg(logger, 'reading from cache', cmd)
stop.if.not(exit.code == 0, 'failed to download file', cleanup = function() file.remove(cached.file) )
conn <- cached.file
}else{
conn <- path
}
conn
}
load.data <- function(path, load.fun, ..., cache.path = '.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data
logger <- SimpleLog('load.data', log.level)
if(missing(load.fun)){
write.msg(logger, 'missing "load.fun" function - calling load.table', level = SimpleLog.DEBUG)
return(load.table(path, ..., cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level))
}
if(is.list(path)){
path <- c(path, cache.path = cache.path, show.progress = show.progress, force = force, list(log.level = log.level))
conn <- cache.data %wargs% path
}else{
conn <- cache.data(path, cache.path = cache.path, force = force, log.level=log.level)
}
tryCatch({
options(warn=-1)
z <- get(load(conn))
options(warn=0)
z
},
error = function(e){
load.fun(conn, ...)
})
}
load.lines <- function(path, parser = NULL, cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data line by line
load <- function(conn)
readLines(conn, warn = F)
z <- load.data(path, load, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
if(!is.null(parser))
lapply(z, parser)
else
z
}
load.string <- function(path, parser = NULL, cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data as string
load <- function(conn)
file.to.string(conn)
z <- load.data(path, load, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
if(!is.null(parser))
parser(z)
else
z
}
load.table <- function(path, ..., sep='\t', header=T, comment.char='', quote='', cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data as table
load.fun <- function(conn)
read.table(conn, sep=sep, header=header, comment.char=comment.char, quote=quote, ...)
load.data(path, load.fun, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
}
serialize.to.text <- function(object, encoder=I){
## text serialization
encoder(rawToChar(serialize(m, connection = NULL, ascii=T)))
}
unserialize.from.text <- function(s, decoder=I){
## text deserialization
unserialize(decoder(textConnection(s)))
}
streaming.group.by.key <- function(f, get.key=function(x) x[[1]]){
## group streaming data by key
## assumes sorted by key!
## s <- textConnection('1\t2\n1\ta\n3\tb\n5\t3\t10'); streaming.group.by.key(function(lines) print(read.table(textConnection(unlist(lines)),sep='\t',header=F)), function(x) strsplit(x,'\t')[[1]][1])(s, 1100)
function(con, chunk.size = 1000){
if(!isOpen(con))
con <- open(con, open='r')
done.reading <- FALSE
buf <- list()
buf.keys <- list()
current.key <- NULL
repeat {
incoming <- as.list(readLines(con, chunk.size))
if (length(incoming) == 0)
done.reading <- TRUE
if(done.reading && length(buf) == 0)
break
buf <- c(buf, incoming)
buf.keys <- c(buf.keys, lapply(incoming, get.key))
if(is.null(current.key))
current.key <- buf.keys[[1]]
last.buf.key <- tail(buf.keys, 1)
if((current.key != last.buf.key) || done.reading){
to.process <- buf.keys == current.key
f(buf[to.process])
buf <- buf[!to.process]
buf.keys <- buf.keys[!to.process]
current.key <- if(length(buf.keys) > 0) buf.keys[[1]] else NA
}
}
}
}
run.once <- function(expr, store = 'run.once.store__', algo='md5', lazy = TRUE, log.level=SimpleLog.INFO){
## execute and expression and cache it
logger <- SimpleLog('run.once', log.level)
g <- globalenv()
expr.q <- substitute(expr)
expr.s <- paste0(deparse(expr.q), collapse='\n ')
var.name <- digest(expr.s, algo=algo)
write.msg(logger, 'caching \n %s\n into %s (envir = %s)', expr.s, var.name, store, level=SimpleLog.DEBUG)
if(!exists(store, g)){
write.msg(logger, 'initializing store %s', store, level=SimpleLog.DEBUG)
assign(store, new.env(), envir=g)
}
if(!(var.name %in% ls(g[[store]]))){
write.msg(logger, 'computing %s', var.name, level=SimpleLog.DEBUG)
assign(var.name, eval(expr.q), g[[store]])
}
g[[store]][[var.name]]
}
file.to.string <- function(file){
## read file contents to string
readChar(file, file.info(file)$size)
}
brew.string <- function(s,...){
dots <- list(...)
e <- if(length(dots) == 0) new.env() else list2env(dots)
brewedSql <- tempfile()
brew(text=s,output=brewedSql,envir=e)
sql <- file.to.string(brewedSql)
sql
}
####################
#### Lists/Vectors
####################
get.or.else <- function(x, field, default){
z <- x[[field]]
if(is.null(z))
z <- default
z
}
with.defaults <- function(xs, defaults){
ys <- xs
c(ys, defaults[setdiff(names(defaults), names(xs))])
}
csplat <- function(f,a,...)
do.call(f, c(as.list(a),...))
"%wargs%" <- function(f, a)
do.call(f, as.list(a))
indices <- function(xs, type){
if(missing(type))
f <- length
else if(type == 'col')
f <- ncol
else if(type == 'row')
f <- nrow
len <- f(xs)
if(len > 0) 1:len else NULL
}
na.rm <- function(x, required, discard = is.na) {
if(is.data.frame(x)){
if(missing(required))
required <- names(x)
keep <- Reduce(function(a,b) a & b, lapply(subset(x,select=required), function(y) !discard(y)), init=TRUE)
x[keep,]
}else
x[!discard(x)]
}
inf.rm <- function(...) na.rm(..., discard = is.infinite)
nan.rm <- function(...) na.rm(..., discard = is.nan)
invalid.rm <- function(...) na.rm(..., discard = function(z) is.na(z) | is.nan(z) | is.infinite(z))
is.invalid <- function(z) is.na(z) | is.nan(z) | is.infinite(z)
replicate <- function(n, expr, .parallel=FALSE)
## same as base replicate with parallel option
llply(integer(n), eval.parent(substitute(function(...) expr)),
.parallel=.parallel)
grep <- function(pat, x, ..., value=FALSE){
## same as base grep with option to return boolean match/no match
stop.if.not(is.logical(value) || (value == 'logical'), 'unknown value type "%s"', value)
if(value == 'logical'){
i <- base::grep(pat, x, ..., value = FALSE)
(1:length(x)) %in% i
}else{
base::grep(pat, x, ..., value=value)
}
}
tapply <- function (X.expr, INDEX.expr, FUN = NULL, simplify = TRUE, ret.type='list', envir = NULL) {
## modified tapply: 1) lookup inputs in envir if supplied, 2) list of inputs for 1st argument, 3) various output formats
if(is.null(envir)){
X <- X.expr
INDEX <- INDEX.expr
}else{
X <- eval(substitute(X.expr), envir = envir)
INDEX <- eval(substitute(INDEX.expr), envir = envir)
}
FUN <- if(!is.null(FUN))
match.fun(FUN)
if(!is.list(INDEX))
INDEX <- list(INDEX)
if(!is.list(X))
X <- list(X)
names(X) <- NULL
nI <- length(INDEX)
namelist <- vector("list", nI)
names(namelist) <- names(INDEX)
extent <- integer(nI)
nx <- length(X[[1]])
one <- 1L
group <- rep.int(one, nx)
ngroup <- one
for(i in seq_along(INDEX)){
index <- as.factor(INDEX[[i]])
if(length(index) != nx)
stop("arguments must have same length")
namelist[[i]] <- levels(index)
extent[i] <- nlevels(index)
group <- group + ngroup * (as.integer(index) - one)
ngroup <- ngroup * nlevels(index)
}
if(is.null(FUN))
return(group)
## use mapply/split to allow for multiple inputs
ans <- do.call(mapply, c(FUN, lapply(X, function(x) split(x, group)), SIMPLIFY=FALSE))
index <- as.integer(names(ans))
if(simplify && all(unlist(lapply(ans, length)) == 1L)){
ansmat <- array(dim = extent, dimnames = namelist)
ans <- unlist(ans, recursive = FALSE)
}
else{
ansmat <- array(vector("list", prod(extent)), dim = extent,
dimnames = namelist)
}
if(length(index)){
names(ans) <- NULL
ansmat[index] <- ans
}
## return types
if(ret.type == 'df'){
## create data.frame by expanding grid
ansmat <- data.frame(expand.grid(dimnames(ansmat)),y=do.call(rbind,as.list(ansmat)))
}else if(ret.type == 'par'){
## put output values into original spots
z <- vector('list',length(X[[1]]))
split(z, group) <- ansmat
ansmat <- unlist(z)
}else{
stop.if(ret.type != 'list', 'unknown ret.type "%s"', ret.type, tag = 'unknown.type')
}
ansmat
}
lzip <- function(...){
## zip multiple lists together
delayedAssign('args', lapply(list(...), as.list))
n <- min(sapply(args,length))
if(n <= 0)
return(NULL)
lapply(1:n,
function(i){
zip.to.named(lapply(indices(args),
function(j){
y <- args[j]
list(names(y)[1], y[[1]][[i]])
}))
})
}
"%zip%" <- function(a,b) lzip(a, b)
zip.to.named <- function(x,nameCol=1,valCol=2){
## convert zipped list to named list
flatten(lapply(x,
function(y){
z <- list(y[[valCol]])
names(z) <- y[[nameCol]]
z
}))
}
named <- function(x, n, type=''){
if(type == 'row')
row.names(x) <- n
else if(type == 'col')
colnames(x) <- n
else
names(x) <- n
x
}
"%named%" <- function(x, n) named(x, n)
"%rnamed%" <- function(x, n) named(x, n, 'row')
"%cnamed%" <- function(x, n) named(x, n, 'col')
remove.names <- function(x, type='')
named(x, NULL, type)
keep.if <- function(x,f){
mask <- sapply(x,f)
x[mask]
}
flatten <- function(x)
do.call(c,x)
merge.lists <- function(all,FUN=function(n,x){x}){
allNames <- unique(do.call(c,lapply(all,names)))
z <- lapply(allNames,
function(n){
z <- FUN(n,lapply(all,
function(x){
tryCatch(x[[n]],
error=function(e){NULL})
}))
z
})
names(z) <- allNames
z
}
setdiff2 <- function(x,y){
list(setdiff(x,y), setdiff(y, x))
}
make.combinations <- function(...){
## generate combinations taking an element from each input vector
dots <- list(...)
apply(expand.grid %wargs% dots, 1,
function(z) as.list(z))
}
parameter.scan <- function(f, params.list, as.df=FALSE, .parallel=FALSE){
## apply f over a list of parameters
z <- llply(params.list,
function(params){
#cbind(as.data.frame(params), f = f %wargs% params)
c(list(params=params), list(f = f %wargs% params))
},
.parallel=.parallel)
if(as.df)
rbind.fill(lapply(z, as.data.frame))
else
z
}
sample.by <- function(x,...,as.filter=TRUE){
if(is.factor(x))
g <- levels(x)
else
g <- unique(x)
g.s <- sample(g, ...)
if(as.filter)
x %in% g.s
else
x[x %in% g.s]
}
####################
#### Matrices
####################
sparse.matrix.from.file <- function(path, ..., what=list(integer(),integer(),numeric())){
entries <- scan(path, what=what, ...)
sparseMatrix(i=entries[[1]], j=entries[[2]], x=entries[[3]])
}
####################
#### Data.frame
####################
sort.by <- function(x, ..., decreasing=FALSE){
dots <- list(...)
x[csplat(order, lapply(dots, function(i) x[[i]]), decreasing = decreasing), ]
}
rbind.fill.par <- function(lst, m = 1000) {
## chunk and parallelize rbind.fill
n <- length(lst)
ldply(1:ceiling(n / m),
function(offset){
select <- 1 + ((m * (offset - 1)) : (m * offset - 1))
select <- select[select <= n]
ds <- lst[select]
rbind.fill(ds)
},
.parallel = TRUE)
}
max.element.str.length <- function(data,.parallel=FALSE){
maxLengths <- llply(names(data),
function(i){
max(str_length(i),
max(str_length(data[[i]])))
},
.parallel=.parallel)
names(maxLengths) <- names(data)
maxLengths
}
str.align <- function(data, maxLengths, .parallel=FALSE){
result <- llply(names(maxLengths),
function(i){
sapply(data[[i]],
function(j){
x <- as.character(j)
if(is.na(x)){x <- 'NA'}
n <- maxLengths[[i]] - str_length(x)
if(is.na(n)){
n <- 2
x <- 'NA'
}
fmtd <- x
if(n > 0){
fmtd <- sprintf('%s%s', x, paste(rep(' ',n), collapse = ''))
}
fmtd
})},
.parallel=.parallel)
names(result) <- names(maxLengths)
result
}
dataframe.to.tsv <- function(x, file, ..., sep='\t', row.names=F, quote=FALSE)
write.table(x, file=file, ..., sep=sep, row.names=row.names, quote=quote)
pprint.dataframe <- function(data, sep=' | ', prepend.row.names = ' ', .parallel=FALSE){
## pretty print data frame in text format
if(is.matrix(data))
data <- as.data.frame(data)
stop.if.not(is.data.frame(data), 'input must be data.frame')
if(!is.null(prepend.row.names) && !is.null(row.names(data)))
data <- named(cbind(row.names(data), data), c(prepend.row.names, names(data)))
maxLengths <- max.element.str.length(data,.parallel=.parallel)
header <- as.list(names(maxLengths))
names(header) <- header
result <- str.align(data,maxLengths,.parallel=.parallel)
result <- as.data.frame(result)
header <- paste(str.align(header, maxLengths), collapse = sep)
paste(header,
paste(rep('-',str_length(header)), collapse = ''),
paste(apply(result, 1,
function(x) paste(x, collapse = sep)
),
collapse = '\n'),
'',
sep = '\n')
}
dataframe.to.textile <- function(x, attr.for = function(e, i, j) NA, header = T, prepend.row.names = ' ', .parallel=FALSE){
## print dataframe to textile
row.to.tt <- function(row) paste('|', paste(as.character(row), collapse = ' |'), ' |', sep='')
add.attr <- function(e, i, j) {
attr <- attr.for(e, i, j)
if(is.na(attr)) paste(' ', e, sep='') else paste(attr, e, sep='. ')
}
if(is.matrix(x))
x <- as.data.frame(x)
if(!is.null(prepend.row.names) && !is.null(row.names(x)))
x <- named(cbind(row.names(x), x), c(prepend.row.names, names(x)))
zz <- if(header) paste('_', names(x), sep='. ') else NULL
paste(
list(if(header) row.to.tt(paste('_', names(x), sep='. ')) else NULL),
llply(indices(x, 'row'),
function(i){
row.to.tt(sapply(indices(x, 'col'),
function(j) add.attr(x[i,j], i, j)))
},
.parallel=.parallel),
collapse = '\n')
}
dataframe.to.html.table <- function(x,
table.attrs='class="sortable" border="1"',
th.attrs='style=font-size:24px',
add.tr.attr=function(i){''},
add.td.attr=function(i,j){''},
prepend.row.names = ' ',
.parallel=FALSE){
## print dataframe to html
if(is.matrix(x))
x <- as.data.frame(x)
if(!is.null(prepend.row.names) && !is.null(row.names(x)))
x <- named(cbind(row.names(x), x), c(prepend.row.names, names(x)))
if(nrow(x) == 0){
rows <- ''
}else{
rows <- paste(llply(1:nrow(x),
function(i){
z <- sprintf('<tr %s>%s</tr>',
add.tr.attr(i),
paste(lapply(1:ncol(x),
function(j){
sprintf('<td %s>%s</td>',
add.td.attr(i,j),
x[i,j])
}),
collapse = ''))
z
},.parallel=.parallel),
collapse = '\n')
}
headers <- sprintf('<tr>%s</tr>',
paste(lapply(colnames(x), function(c){sprintf('<th %s>%s</th>', th.attrs, c)}),
collapse = ''))
sprintf('<table %s>\n%s\n%s\n</table>',
table.attrs,
headers,
rows)
}
sorttable.import <- function(loc='http://www.kryogenix.org/code/browser/sorttable/sorttable.js'){
## add import for simple sortable tables
sprintf('<script src="%s"></script>
<style media="screen" type="text/css">
table.sortable thead {
background-color:#eee;
color:#666666;
font-weight: bold;
cursor: default;
}
</style>',loc)
}
###################
#### Date
###################
STANDARD.TIMESTAMP.FORMAT <- "%Y-%m-%d %H:%M:%S"
EPOCH <- strptime("1970-01-01 00:00:00", STANDARD.TIMESTAMP.FORMAT, tz="UTC")
MIN.SEC <- 60
HOUR.SEC <- 60 * MIN.SEC
DAY.SEC <- 24 * HOUR.SEC
unix.timestamp.to.fmt <- function(ts, fmt=STANDARD.TIMESTAMP.FORMAT, tz='UTC')
strptime(as.POSIXct(ts,origin=EPOCH), fmt, tz=tz)
unix.timestamp.now <- function() unclass(Sys.time())
today <- function(sep='-') unix.timestamp.to.fmt(unix.timestamp.now(), paste('%Y','%m','%d', sep='-'))
####################
#### Misc
####################
str.fmt <- function(s,...){
## named string formatting
dots <- list(...)
named.pat <- '(^|[^%])(%)\\(([A-Za-z0-9_.]+?)\\)(([0-9.]+)?[sdf])' ## name params can contain alphanumeric chars, underscores, and periods
unnamed.pat <- '(^|[^%])(%[sdf])'
named <- str_detect(s,named.pat)
ss <- s
params <- dots
if(named){
n <- as.vector(sapply(str_extract_all(s,named.pat),
function(x) gsub(named.pat,'\\3', x)))
stop.if(is.null(names(dots)) || any(str_length(names(dots))==0),
'requires named parameters')
stop.if(any(!n %in% names(dots)), 'missing params %s', paste(setdiff(n,names(dots)), collapse = ','))
## first escape things that percent symbols; then replace named params with unnamed params
ss <- gsub(named.pat,'\\1\\2\\4',
gsub(unnamed.pat,'\\1%\\2',s))
## get params in order of appearance
params <- dots[n]
}
csplat(sprintf, ss, params)
}
int.to.char <- chars8bit
char.to.int <- AsciiToInt
int.to.char.seq <- function(x, offset=0){
n <- as.integer((x-1) / 26) + 1
m <- as.integer((x-1) %% 26)
paste(rep(int.to.char(m + 97), n), collapse = '')
}
file.size.gb <- function(path)
file.info(path)$size / (1024^3)
object.size.gb <- function(x)
object.size(x) / (8 * 1024^3)
estimate.parallelism <- function(sizes.gb, default.memory.gb = 4, num.nested.par = 1, max.procs.per.core = 2, par.limit = Inf, log.level = SimpleLog.INFO){
floor.8 <- function(x) {
f.x <- floor(x)
f.x + ((x -f.x) > 0.8)
}
log <- SimpleLog('estimate.parallelism', log.level)
num.cores <- multicore:::detectCores()
max.num.procs <- num.cores * max.procs.per.core
mul <- 100
max.size.gb <- max(unlist(sizes.gb))
est.proc.mem.usage <- max.size.gb * mul
write.msg(log, sprintf('estimated memory usage: %.2f Gb', est.proc.mem.usage))
mem.free.gb <- get.free.mem.gb(default.memory.gb)
write.msg(log, sprintf('free memory: %.2f Gb', mem.free.gb))
max.par <- floor(mem.free.gb / est.proc.mem.usage)
max.par.with.nested <- pmin(max.par, floor.8(max.par ^ (1 / num.nested.par)))
pmax(1,pmin(par.limit, max.par.with.nested))
}
get.free.mem.gb <- function(default.memory.gb = 1, log.level=SimpleLog.INFO){
log <- SimpleLog('get.free.mem.gb', log.level)
uname <- system("uname", intern=TRUE)
if(uname == 'Linux'){
cmd <- "free -k | grep 'buffers/cache' | sed -r 's/\\s+/\\t/g' |cut -f4"
write.msg(log,str.fmt('OS detected: Linux - command to get free memory\n #> %(cmd)s', cmd=cmd))
as.double(system(cmd, intern=TRUE)) / 1024^2
}else if(uname == 'Darwin'){
cmd <- "top -l 1 | grep PhysMem| awk -F', ' '{ print $NF }' | sed 's/\\s*free.*//g'"
write.msg(log,str.fmt('OS detected: Mac - command to get free memory\n #> %(cmd)s', cmd=cmd))
mem.str <- str_trim(system(cmd, intern=TRUE))
mem.unit <- str_sub(mem.str,start=str_length(mem.str))
mem.n <- as.double(str_sub(mem.str,end=str_length(mem.str)-1))
d <- if(mem.unit == 'G') 1 else if(mem.unit == 'M') 1024 else if(mem.unit == 'K') 1024^2 else 1024^3
mem.n / d
}else
default.memory.gb
}
| /utils.R | no_license | ellchow/moRe | R | false | false | 33,408 | r | ## Copyright 2013 Elliot Chow
## Licensed under the Apache License, Version 2.0 (the "License")
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
## http://www.apache.org/licenses/LICENSE-2.0
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
import('doMC',
'gdata',
'stringr',
'plyr',
'hash',
'R.oo',
'Hmisc',
'digest',
'sfsmisc',
'rjson',
'testthat',
'Matrix',
as.library='utils')
## options(width=110,scipen=6)
options(scipen=6, menu.graphics=FALSE)
####################
#### Logging
####################
if (!"SimpleLog" %in% ls()) {
setConstructorS3('SimpleLog',
## logging object
## id: id of the logger
## level: recognized logging levels
## colors: colors to use if colorizing logs
function(id='log',
level=c('info','warning','error','debug'),
colors = c('info'='light gray','warning'='yellow','error'='red','debug'='dark gray')
## outputs=stderr(),
## overwrite=TRUE
){
## if(overwrite){
## sapply(outputs[is.character(outputs) & outputs != ""],
## function(x){
## if(is.character(x)){
## file.remove(x)
## }
## })
## }
extend(Object(), 'SimpleLog',
id=id,
level=level,
colors=colors,
## outputs=outputs
outputs=stderr()
)
})
## logging levels
SimpleLog.INFO <- 'info'
SimpleLog.WARNING <- c(SimpleLog.INFO, 'warning')
SimpleLog.ERROR <- c(SimpleLog.WARNING, 'error')
SimpleLog.DEBUG <- c(SimpleLog.ERROR, 'debug')
if(!exists('SimpleLog.CONFIG', envir = globalenv())){
## global var to hold SimpleLog configuration
assign('SimpleLog.CONFIG', new.env(), envir=globalenv())
}
setMethodS3('write.msg','SimpleLog',
## write a message to standard error
## log: SimpleLog object
## ...: message (and string formatting parameters)
## level: level of the message
## sep: separating chars in log
## return.success: set to true if you should return whether or not the message was successfully written
function(log, ..., level=SimpleLog.INFO, sep=' - ', return.success=FALSE){
check <- TRUE
lvl <- intersect(tail(level,1), log$level)
if(length(lvl) > 0){
msg <- paste(list(format(Sys.time(), "%Y/%m/%d %H:%M:%S"), lvl, log$id, sprintf(...)), collapse = sep)
success <- all(sapply(log$outputs,
function(o) {
if((!is.null(globalenv()$SimpleLog.CONFIG$colorize) && globalenv()$SimpleLog.CONFIG$colorize) && (o %in% c(stderr(), stdout()))){
color <- log$colors[lvl]
if(!is.na(color))
msg <- colourise(msg, color)
}
tryCatch(is.null(cat(msg, '\n', file=o, append=TRUE)),
error = function(e) FALSE)
}))
if(return.success) success else invisible(success)
}
})
setConstructorS3('Timer',
## timer object
function(log=NULL){
if(is.null(log)){
log <- SimpleLog('timerLog')
}
extend(Object(), 'Timer',
log=log)
})
setMethodS3('start.timer', 'Timer',
## start timing
## msg: message to print on start
## ...: args for write.msg
function(self, msg=NULL, ...){
if(!is.null(msg)){
write.msg(self$log,msg, ...)
}
self$startTime <- proc.time()[3]
})
setMethodS3('stop.timer', 'Timer',
## stop timing
## ...: args for write.msg
function(self, ...){
self$stopTime <- proc.time()[3]
dt <- self$stopTime - self$startTime
m <- as.integer(dt / 60)
s <- round(dt - 60 * m,1)
write.msg(self$log,
sprintf('elapsed time: %s', paste(m, 'm', s, 's')),
...)
})
}
timer <- function(expr){
## simple timer for an expression
s.expr <- substitute(expr)
t <- Timer()
start.timer(t, 'timing %s', paste(deparse(s.expr), collapse=''))
res <- eval(s.expr, parent.frame())
stop.timer(t)
res
}
stop.if <- function(x, msg, ..., tags=list(), cleanup = function(){}, failed.cond = substitute(x)){
## throw an exception if condition is met
## x: boolean value (true to throw exception)
## msg: error message
## tags: list of tags and values to be attached to the exception
## cleanup: a function with no arguments to be executed (like finally)
## failed.cond: expression representing the failed condition
call <- sys.call(1)
if(x){
err <- csplat(tag, c(list(simpleError(paste(c(sprintf(msg, ...),
'\n Failed condition: ', failed.cond),
collapse=''),
call)),
tags))
cleanup()
stop(err)
}
}
stop.if.not <- function(x, ...){
## throw an exception if the condition isnot met
## ...: args for stop if
failed.cond <- substitute(!(x))
stop.if(!x, ..., failed.cond = failed.cond)
}
dump.frames.on.failure <- function(on = TRUE){
## dumps frames for debugging upon unexpected exit
## on: turn on/off
if(on)
options(error = quote({ dump.frames('dump-frames', to.file = TRUE)
system('echo "frames dumped to dump-frames.rda"')
q()
}
))
else
options(error = NULL)
}
####################
#### URL encoding
####################
## http://svn.python.org/view/*checkout*/python/tags/r265/Lib/urllib.py?revision=79064&content-type=text%2Fplain
url.always.safe.chars <- c("A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S","T","U","V","W","X","Y","Z","a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z","0","1","2","3","4","5","6","7","8","9","_",".","-")
url.reserved.chars <- c(";", "/", "?", ":", "@", "&", "=", "+", "$", ",")
url.quote <- function(s, reserved = url.reserved.chars, plus.spaces = T){
## url encode a string
## s: string to encode
## reserved: vector of reserved characters
## plus.spaces: convert spaces to +
chars <- int.to.char(1:255)
safe <- named(ifelse(chars %in% c(url.always.safe.chars, reserved), chars, sprintf('%%%.2X', 1:255)),
chars)
if(plus.spaces)
safe[[' ']] <- '+'
unlist(lapply(strcodes(s), function(chars) paste(safe[chars], collapse = '')))
}
url.unquote <- function(s, reserved = NULL, plus.spaces = T){
## url decode a string
## s: string to decode
## reserved: vector of reserved characters
## plus.spaces: convert + to spaces
chars <- int.to.char(1:255)
safe <- named(chars,
ifelse(chars %in% c(url.always.safe.chars, reserved), chars, sprintf('%.2X', 1:255)))
z <- lapply(strsplit(s, '%'),
function(xs){
y <- paste(safe[str_sub(xs[-1], end = 2)],
str_sub(xs[-1], start = 3),
sep = '')
z <- paste(c(xs[1], y), collapse = '')
if(plus.spaces)
gsub('\\+',' ',z)
else
z
})
unlist(z)
}
url.encode.params <- function(params, ...){
## encode a named list as url parameters
## params: named list of params
## ...: args for url.quote
params <- unlist(params)
paste(paste(url.quote(names(params), ...),
url.quote(as.character(params), ...),
sep = '='),
collapse = '&')
}
url.parse.params <- function(params.str, ...){
## parse parameters to named list
## params.str: string of url parameters
lapply(strsplit(params.str, '&'),
function(kv) url.unquote(unlist(zip.to.named(strsplit(kv, '='))), ...))
}
url.qry.string <- function(url)
## exctract the query string from url
## url: url from which to extract qs
unlist(lapply(strsplit(url, '\\?'), function(x) x[[2]]))
####################
#### Functions
####################
## evaluate an expression within an environment
"%within%" <- function(expr, envir) eval(substitute(expr), envir=envir)
tag <- function(x,...) {
## tag a value with attributes
tagged <- x
dots <- list(...)
if(length(dots) > 0){
for(i in indices(dots))
attr(tagged, names(dots)[i]) <- dots[[i]]
}
tagged
}
## read var name as string
var.name <- function(x) deparse(substitute(x))
## test if an environment is the global environment
is.global.env <- function(env) environmentName(env) == 'R_GlobalEnv'
## curry a function with 1 argument
curry1 <- function(f, ...){
function(x)
f(x, ...)
}
####################
#### Files
####################
rrmdir <- function(path,rm.contents.only=FALSE){
## recursivedly remove directory
## path: path to remove
## rm.contents.only: leave path untouched
path <- gsub('/( )*$','',path)
isDir <- file.info(path)$isdir
if(!is.na(isDir) && isDir){
for(i in dir(path)){
rrmdir(file.path(path,i),FALSE)
}
}
if(!rm.contents.only){
file.remove(path)
}
}
curl.cmd <- function(url, output.path, params = NULL, method = 'get', show.progress = NULL, custom.opts = ''){
## construct a curl command
stop.if.not(method %in% c('get','post'), 'method must be get or post')
stop.if.not(is.null(show.progress) || show.progress %in% c('bar','text'), 'progress must be bar or text')
if(!is.null(params))
ps <- url.encode.params(params, reserved=NULL)
else
ps <- ''
if(is.null(show.progress))
progress.opt <- '-s'
else if (show.progress == 'bar')
progress.opt <- '-#'
else if(show.progress == 'text')
progress.opt <- ''
method.opt <- if(method == 'get') '-X GET' else '-X POST'
sprintf('curl %s %s --data "%s" -o %s %s "%s"',
method.opt,
custom.opts,
ps,
output.path,
progress.opt,
url)
}
cache.data <- function(path, ..., cache.path='.cache', force=FALSE, log.level = SimpleLog.INFO){
## cache data by downloading (if necessary) using curl; return connection
logger <- SimpleLog('cache.data', log.level)
if(str_detect(path,'http[s]?://')){
path.hash <- digest(csplat(paste, list(...), path), 'md5')
cached.file <- file.path(cache.path, path.hash)
cmd <- curl.cmd(path, cached.file, ...)
write.msg(logger, 'curl command: %s', cmd, level = SimpleLog.DEBUG)
write.msg(logger, 'caching to %s', cached.file)
dir.create(cache.path, showWarnings = FALSE)
exit.code <- 0
if(!file.exists(cached.file) || force)
exit.code <- system(cmd)
else
write.msg(logger, 'reading from cache', cmd)
stop.if.not(exit.code == 0, 'failed to download file', cleanup = function() file.remove(cached.file) )
conn <- cached.file
}else{
conn <- path
}
conn
}
load.data <- function(path, load.fun, ..., cache.path = '.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data
logger <- SimpleLog('load.data', log.level)
if(missing(load.fun)){
write.msg(logger, 'missing "load.fun" function - calling load.table', level = SimpleLog.DEBUG)
return(load.table(path, ..., cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level))
}
if(is.list(path)){
path <- c(path, cache.path = cache.path, show.progress = show.progress, force = force, list(log.level = log.level))
conn <- cache.data %wargs% path
}else{
conn <- cache.data(path, cache.path = cache.path, force = force, log.level=log.level)
}
tryCatch({
options(warn=-1)
z <- get(load(conn))
options(warn=0)
z
},
error = function(e){
load.fun(conn, ...)
})
}
load.lines <- function(path, parser = NULL, cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data line by line
load <- function(conn)
readLines(conn, warn = F)
z <- load.data(path, load, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
if(!is.null(parser))
lapply(z, parser)
else
z
}
load.string <- function(path, parser = NULL, cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data as string
load <- function(conn)
file.to.string(conn)
z <- load.data(path, load, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
if(!is.null(parser))
parser(z)
else
z
}
load.table <- function(path, ..., sep='\t', header=T, comment.char='', quote='', cache.path='.cache', show.progress = NULL, force=FALSE, log.level = SimpleLog.INFO){
## cache/load data as table
load.fun <- function(conn)
read.table(conn, sep=sep, header=header, comment.char=comment.char, quote=quote, ...)
load.data(path, load.fun, cache.path = cache.path, show.progress = show.progress, force = force, log.level = log.level)
}
serialize.to.text <- function(object, encoder=I){
## text serialization
encoder(rawToChar(serialize(m, connection = NULL, ascii=T)))
}
unserialize.from.text <- function(s, decoder=I){
## text deserialization
unserialize(decoder(textConnection(s)))
}
streaming.group.by.key <- function(f, get.key=function(x) x[[1]]){
## group streaming data by key
## assumes sorted by key!
## s <- textConnection('1\t2\n1\ta\n3\tb\n5\t3\t10'); streaming.group.by.key(function(lines) print(read.table(textConnection(unlist(lines)),sep='\t',header=F)), function(x) strsplit(x,'\t')[[1]][1])(s, 1100)
function(con, chunk.size = 1000){
if(!isOpen(con))
con <- open(con, open='r')
done.reading <- FALSE
buf <- list()
buf.keys <- list()
current.key <- NULL
repeat {
incoming <- as.list(readLines(con, chunk.size))
if (length(incoming) == 0)
done.reading <- TRUE
if(done.reading && length(buf) == 0)
break
buf <- c(buf, incoming)
buf.keys <- c(buf.keys, lapply(incoming, get.key))
if(is.null(current.key))
current.key <- buf.keys[[1]]
last.buf.key <- tail(buf.keys, 1)
if((current.key != last.buf.key) || done.reading){
to.process <- buf.keys == current.key
f(buf[to.process])
buf <- buf[!to.process]
buf.keys <- buf.keys[!to.process]
current.key <- if(length(buf.keys) > 0) buf.keys[[1]] else NA
}
}
}
}
run.once <- function(expr, store = 'run.once.store__', algo='md5', lazy = TRUE, log.level=SimpleLog.INFO){
## execute and expression and cache it
logger <- SimpleLog('run.once', log.level)
g <- globalenv()
expr.q <- substitute(expr)
expr.s <- paste0(deparse(expr.q), collapse='\n ')
var.name <- digest(expr.s, algo=algo)
write.msg(logger, 'caching \n %s\n into %s (envir = %s)', expr.s, var.name, store, level=SimpleLog.DEBUG)
if(!exists(store, g)){
write.msg(logger, 'initializing store %s', store, level=SimpleLog.DEBUG)
assign(store, new.env(), envir=g)
}
if(!(var.name %in% ls(g[[store]]))){
write.msg(logger, 'computing %s', var.name, level=SimpleLog.DEBUG)
assign(var.name, eval(expr.q), g[[store]])
}
g[[store]][[var.name]]
}
file.to.string <- function(file){
## read file contents to string
readChar(file, file.info(file)$size)
}
brew.string <- function(s,...){
dots <- list(...)
e <- if(length(dots) == 0) new.env() else list2env(dots)
brewedSql <- tempfile()
brew(text=s,output=brewedSql,envir=e)
sql <- file.to.string(brewedSql)
sql
}
####################
#### Lists/Vectors
####################
get.or.else <- function(x, field, default){
z <- x[[field]]
if(is.null(z))
z <- default
z
}
with.defaults <- function(xs, defaults){
ys <- xs
c(ys, defaults[setdiff(names(defaults), names(xs))])
}
csplat <- function(f,a,...)
do.call(f, c(as.list(a),...))
"%wargs%" <- function(f, a)
do.call(f, as.list(a))
indices <- function(xs, type){
if(missing(type))
f <- length
else if(type == 'col')
f <- ncol
else if(type == 'row')
f <- nrow
len <- f(xs)
if(len > 0) 1:len else NULL
}
na.rm <- function(x, required, discard = is.na) {
if(is.data.frame(x)){
if(missing(required))
required <- names(x)
keep <- Reduce(function(a,b) a & b, lapply(subset(x,select=required), function(y) !discard(y)), init=TRUE)
x[keep,]
}else
x[!discard(x)]
}
inf.rm <- function(...) na.rm(..., discard = is.infinite)
nan.rm <- function(...) na.rm(..., discard = is.nan)
invalid.rm <- function(...) na.rm(..., discard = function(z) is.na(z) | is.nan(z) | is.infinite(z))
is.invalid <- function(z) is.na(z) | is.nan(z) | is.infinite(z)
replicate <- function(n, expr, .parallel=FALSE)
## same as base replicate with parallel option
llply(integer(n), eval.parent(substitute(function(...) expr)),
.parallel=.parallel)
grep <- function(pat, x, ..., value=FALSE){
## same as base grep with option to return boolean match/no match
stop.if.not(is.logical(value) || (value == 'logical'), 'unknown value type "%s"', value)
if(value == 'logical'){
i <- base::grep(pat, x, ..., value = FALSE)
(1:length(x)) %in% i
}else{
base::grep(pat, x, ..., value=value)
}
}
tapply <- function (X.expr, INDEX.expr, FUN = NULL, simplify = TRUE, ret.type='list', envir = NULL) {
## modified tapply: 1) lookup inputs in envir if supplied, 2) list of inputs for 1st argument, 3) various output formats
if(is.null(envir)){
X <- X.expr
INDEX <- INDEX.expr
}else{
X <- eval(substitute(X.expr), envir = envir)
INDEX <- eval(substitute(INDEX.expr), envir = envir)
}
FUN <- if(!is.null(FUN))
match.fun(FUN)
if(!is.list(INDEX))
INDEX <- list(INDEX)
if(!is.list(X))
X <- list(X)
names(X) <- NULL
nI <- length(INDEX)
namelist <- vector("list", nI)
names(namelist) <- names(INDEX)
extent <- integer(nI)
nx <- length(X[[1]])
one <- 1L
group <- rep.int(one, nx)
ngroup <- one
for(i in seq_along(INDEX)){
index <- as.factor(INDEX[[i]])
if(length(index) != nx)
stop("arguments must have same length")
namelist[[i]] <- levels(index)
extent[i] <- nlevels(index)
group <- group + ngroup * (as.integer(index) - one)
ngroup <- ngroup * nlevels(index)
}
if(is.null(FUN))
return(group)
## use mapply/split to allow for multiple inputs
ans <- do.call(mapply, c(FUN, lapply(X, function(x) split(x, group)), SIMPLIFY=FALSE))
index <- as.integer(names(ans))
if(simplify && all(unlist(lapply(ans, length)) == 1L)){
ansmat <- array(dim = extent, dimnames = namelist)
ans <- unlist(ans, recursive = FALSE)
}
else{
ansmat <- array(vector("list", prod(extent)), dim = extent,
dimnames = namelist)
}
if(length(index)){
names(ans) <- NULL
ansmat[index] <- ans
}
## return types
if(ret.type == 'df'){
## create data.frame by expanding grid
ansmat <- data.frame(expand.grid(dimnames(ansmat)),y=do.call(rbind,as.list(ansmat)))
}else if(ret.type == 'par'){
## put output values into original spots
z <- vector('list',length(X[[1]]))
split(z, group) <- ansmat
ansmat <- unlist(z)
}else{
stop.if(ret.type != 'list', 'unknown ret.type "%s"', ret.type, tag = 'unknown.type')
}
ansmat
}
lzip <- function(...){
## zip multiple lists together
delayedAssign('args', lapply(list(...), as.list))
n <- min(sapply(args,length))
if(n <= 0)
return(NULL)
lapply(1:n,
function(i){
zip.to.named(lapply(indices(args),
function(j){
y <- args[j]
list(names(y)[1], y[[1]][[i]])
}))
})
}
"%zip%" <- function(a,b) lzip(a, b)
zip.to.named <- function(x,nameCol=1,valCol=2){
## convert zipped list to named list
flatten(lapply(x,
function(y){
z <- list(y[[valCol]])
names(z) <- y[[nameCol]]
z
}))
}
named <- function(x, n, type=''){
if(type == 'row')
row.names(x) <- n
else if(type == 'col')
colnames(x) <- n
else
names(x) <- n
x
}
"%named%" <- function(x, n) named(x, n)
"%rnamed%" <- function(x, n) named(x, n, 'row')
"%cnamed%" <- function(x, n) named(x, n, 'col')
remove.names <- function(x, type='')
named(x, NULL, type)
keep.if <- function(x,f){
mask <- sapply(x,f)
x[mask]
}
flatten <- function(x)
do.call(c,x)
merge.lists <- function(all,FUN=function(n,x){x}){
allNames <- unique(do.call(c,lapply(all,names)))
z <- lapply(allNames,
function(n){
z <- FUN(n,lapply(all,
function(x){
tryCatch(x[[n]],
error=function(e){NULL})
}))
z
})
names(z) <- allNames
z
}
setdiff2 <- function(x,y){
list(setdiff(x,y), setdiff(y, x))
}
make.combinations <- function(...){
## generate combinations taking an element from each input vector
dots <- list(...)
apply(expand.grid %wargs% dots, 1,
function(z) as.list(z))
}
parameter.scan <- function(f, params.list, as.df=FALSE, .parallel=FALSE){
## apply f over a list of parameters
z <- llply(params.list,
function(params){
#cbind(as.data.frame(params), f = f %wargs% params)
c(list(params=params), list(f = f %wargs% params))
},
.parallel=.parallel)
if(as.df)
rbind.fill(lapply(z, as.data.frame))
else
z
}
sample.by <- function(x,...,as.filter=TRUE){
if(is.factor(x))
g <- levels(x)
else
g <- unique(x)
g.s <- sample(g, ...)
if(as.filter)
x %in% g.s
else
x[x %in% g.s]
}
####################
#### Matrices
####################
sparse.matrix.from.file <- function(path, ..., what=list(integer(),integer(),numeric())){
entries <- scan(path, what=what, ...)
sparseMatrix(i=entries[[1]], j=entries[[2]], x=entries[[3]])
}
####################
#### Data.frame
####################
sort.by <- function(x, ..., decreasing=FALSE){
dots <- list(...)
x[csplat(order, lapply(dots, function(i) x[[i]]), decreasing = decreasing), ]
}
rbind.fill.par <- function(lst, m = 1000) {
## chunk and parallelize rbind.fill
n <- length(lst)
ldply(1:ceiling(n / m),
function(offset){
select <- 1 + ((m * (offset - 1)) : (m * offset - 1))
select <- select[select <= n]
ds <- lst[select]
rbind.fill(ds)
},
.parallel = TRUE)
}
max.element.str.length <- function(data,.parallel=FALSE){
maxLengths <- llply(names(data),
function(i){
max(str_length(i),
max(str_length(data[[i]])))
},
.parallel=.parallel)
names(maxLengths) <- names(data)
maxLengths
}
str.align <- function(data, maxLengths, .parallel=FALSE){
result <- llply(names(maxLengths),
function(i){
sapply(data[[i]],
function(j){
x <- as.character(j)
if(is.na(x)){x <- 'NA'}
n <- maxLengths[[i]] - str_length(x)
if(is.na(n)){
n <- 2
x <- 'NA'
}
fmtd <- x
if(n > 0){
fmtd <- sprintf('%s%s', x, paste(rep(' ',n), collapse = ''))
}
fmtd
})},
.parallel=.parallel)
names(result) <- names(maxLengths)
result
}
dataframe.to.tsv <- function(x, file, ..., sep='\t', row.names=F, quote=FALSE)
write.table(x, file=file, ..., sep=sep, row.names=row.names, quote=quote)
pprint.dataframe <- function(data, sep=' | ', prepend.row.names = ' ', .parallel=FALSE){
## pretty print data frame in text format
if(is.matrix(data))
data <- as.data.frame(data)
stop.if.not(is.data.frame(data), 'input must be data.frame')
if(!is.null(prepend.row.names) && !is.null(row.names(data)))
data <- named(cbind(row.names(data), data), c(prepend.row.names, names(data)))
maxLengths <- max.element.str.length(data,.parallel=.parallel)
header <- as.list(names(maxLengths))
names(header) <- header
result <- str.align(data,maxLengths,.parallel=.parallel)
result <- as.data.frame(result)
header <- paste(str.align(header, maxLengths), collapse = sep)
paste(header,
paste(rep('-',str_length(header)), collapse = ''),
paste(apply(result, 1,
function(x) paste(x, collapse = sep)
),
collapse = '\n'),
'',
sep = '\n')
}
dataframe.to.textile <- function(x, attr.for = function(e, i, j) NA, header = T, prepend.row.names = ' ', .parallel=FALSE){
## print dataframe to textile
row.to.tt <- function(row) paste('|', paste(as.character(row), collapse = ' |'), ' |', sep='')
add.attr <- function(e, i, j) {
attr <- attr.for(e, i, j)
if(is.na(attr)) paste(' ', e, sep='') else paste(attr, e, sep='. ')
}
if(is.matrix(x))
x <- as.data.frame(x)
if(!is.null(prepend.row.names) && !is.null(row.names(x)))
x <- named(cbind(row.names(x), x), c(prepend.row.names, names(x)))
zz <- if(header) paste('_', names(x), sep='. ') else NULL
paste(
list(if(header) row.to.tt(paste('_', names(x), sep='. ')) else NULL),
llply(indices(x, 'row'),
function(i){
row.to.tt(sapply(indices(x, 'col'),
function(j) add.attr(x[i,j], i, j)))
},
.parallel=.parallel),
collapse = '\n')
}
dataframe.to.html.table <- function(x,
table.attrs='class="sortable" border="1"',
th.attrs='style=font-size:24px',
add.tr.attr=function(i){''},
add.td.attr=function(i,j){''},
prepend.row.names = ' ',
.parallel=FALSE){
## print dataframe to html
if(is.matrix(x))
x <- as.data.frame(x)
if(!is.null(prepend.row.names) && !is.null(row.names(x)))
x <- named(cbind(row.names(x), x), c(prepend.row.names, names(x)))
if(nrow(x) == 0){
rows <- ''
}else{
rows <- paste(llply(1:nrow(x),
function(i){
z <- sprintf('<tr %s>%s</tr>',
add.tr.attr(i),
paste(lapply(1:ncol(x),
function(j){
sprintf('<td %s>%s</td>',
add.td.attr(i,j),
x[i,j])
}),
collapse = ''))
z
},.parallel=.parallel),
collapse = '\n')
}
headers <- sprintf('<tr>%s</tr>',
paste(lapply(colnames(x), function(c){sprintf('<th %s>%s</th>', th.attrs, c)}),
collapse = ''))
sprintf('<table %s>\n%s\n%s\n</table>',
table.attrs,
headers,
rows)
}
sorttable.import <- function(loc='http://www.kryogenix.org/code/browser/sorttable/sorttable.js'){
## add import for simple sortable tables
sprintf('<script src="%s"></script>
<style media="screen" type="text/css">
table.sortable thead {
background-color:#eee;
color:#666666;
font-weight: bold;
cursor: default;
}
</style>',loc)
}
###################
#### Date
###################
STANDARD.TIMESTAMP.FORMAT <- "%Y-%m-%d %H:%M:%S"
EPOCH <- strptime("1970-01-01 00:00:00", STANDARD.TIMESTAMP.FORMAT, tz="UTC")
MIN.SEC <- 60
HOUR.SEC <- 60 * MIN.SEC
DAY.SEC <- 24 * HOUR.SEC
unix.timestamp.to.fmt <- function(ts, fmt=STANDARD.TIMESTAMP.FORMAT, tz='UTC')
strptime(as.POSIXct(ts,origin=EPOCH), fmt, tz=tz)
unix.timestamp.now <- function() unclass(Sys.time())
today <- function(sep='-') unix.timestamp.to.fmt(unix.timestamp.now(), paste('%Y','%m','%d', sep='-'))
####################
#### Misc
####################
str.fmt <- function(s,...){
## named string formatting
dots <- list(...)
named.pat <- '(^|[^%])(%)\\(([A-Za-z0-9_.]+?)\\)(([0-9.]+)?[sdf])' ## name params can contain alphanumeric chars, underscores, and periods
unnamed.pat <- '(^|[^%])(%[sdf])'
named <- str_detect(s,named.pat)
ss <- s
params <- dots
if(named){
n <- as.vector(sapply(str_extract_all(s,named.pat),
function(x) gsub(named.pat,'\\3', x)))
stop.if(is.null(names(dots)) || any(str_length(names(dots))==0),
'requires named parameters')
stop.if(any(!n %in% names(dots)), 'missing params %s', paste(setdiff(n,names(dots)), collapse = ','))
## first escape things that percent symbols; then replace named params with unnamed params
ss <- gsub(named.pat,'\\1\\2\\4',
gsub(unnamed.pat,'\\1%\\2',s))
## get params in order of appearance
params <- dots[n]
}
csplat(sprintf, ss, params)
}
int.to.char <- chars8bit
char.to.int <- AsciiToInt
int.to.char.seq <- function(x, offset=0){
n <- as.integer((x-1) / 26) + 1
m <- as.integer((x-1) %% 26)
paste(rep(int.to.char(m + 97), n), collapse = '')
}
file.size.gb <- function(path)
file.info(path)$size / (1024^3)
object.size.gb <- function(x)
object.size(x) / (8 * 1024^3)
estimate.parallelism <- function(sizes.gb, default.memory.gb = 4, num.nested.par = 1, max.procs.per.core = 2, par.limit = Inf, log.level = SimpleLog.INFO){
floor.8 <- function(x) {
f.x <- floor(x)
f.x + ((x -f.x) > 0.8)
}
log <- SimpleLog('estimate.parallelism', log.level)
num.cores <- multicore:::detectCores()
max.num.procs <- num.cores * max.procs.per.core
mul <- 100
max.size.gb <- max(unlist(sizes.gb))
est.proc.mem.usage <- max.size.gb * mul
write.msg(log, sprintf('estimated memory usage: %.2f Gb', est.proc.mem.usage))
mem.free.gb <- get.free.mem.gb(default.memory.gb)
write.msg(log, sprintf('free memory: %.2f Gb', mem.free.gb))
max.par <- floor(mem.free.gb / est.proc.mem.usage)
max.par.with.nested <- pmin(max.par, floor.8(max.par ^ (1 / num.nested.par)))
pmax(1,pmin(par.limit, max.par.with.nested))
}
get.free.mem.gb <- function(default.memory.gb = 1, log.level=SimpleLog.INFO){
log <- SimpleLog('get.free.mem.gb', log.level)
uname <- system("uname", intern=TRUE)
if(uname == 'Linux'){
cmd <- "free -k | grep 'buffers/cache' | sed -r 's/\\s+/\\t/g' |cut -f4"
write.msg(log,str.fmt('OS detected: Linux - command to get free memory\n #> %(cmd)s', cmd=cmd))
as.double(system(cmd, intern=TRUE)) / 1024^2
}else if(uname == 'Darwin'){
cmd <- "top -l 1 | grep PhysMem| awk -F', ' '{ print $NF }' | sed 's/\\s*free.*//g'"
write.msg(log,str.fmt('OS detected: Mac - command to get free memory\n #> %(cmd)s', cmd=cmd))
mem.str <- str_trim(system(cmd, intern=TRUE))
mem.unit <- str_sub(mem.str,start=str_length(mem.str))
mem.n <- as.double(str_sub(mem.str,end=str_length(mem.str)-1))
d <- if(mem.unit == 'G') 1 else if(mem.unit == 'M') 1024 else if(mem.unit == 'K') 1024^2 else 1024^3
mem.n / d
}else
default.memory.gb
}
|
#' data2 - prostate \{ElemStatLearn\}
#'
#' A dataset containing y and 10 predictors of 97 cancer patients.
#'
#' @format A data frame with 97 rows and 9 variables:
#'
#' \describe{
#' \item{y}{}
#' \item{lcavol}{}
#' \item{lweight}{}
#' \item{age}{}
#' \item{lbph}{}
#' \item{svi}{}
#' \item{gleason}{}
#' \item{pgg45}{}
#' }
#' @source \{ElemStatLearn\}
"data2"
| /R/data2.R | no_license | guhjy/BLasso | R | false | false | 381 | r | #' data2 - prostate \{ElemStatLearn\}
#'
#' A dataset containing y and 10 predictors of 97 cancer patients.
#'
#' @format A data frame with 97 rows and 9 variables:
#'
#' \describe{
#' \item{y}{}
#' \item{lcavol}{}
#' \item{lweight}{}
#' \item{age}{}
#' \item{lbph}{}
#' \item{svi}{}
#' \item{gleason}{}
#' \item{pgg45}{}
#' }
#' @source \{ElemStatLearn\}
"data2"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.