content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
## simulation rules
set.seed(13053312)
lambda <- 0.2
n <- 40
sim <- 1000
## simulating
sim_test <- matrix(data=rexp(n * sim, lambda), sim)
sim_test_Means <- apply(sim_test, 1, mean)
## range & histogram
range(sim_test_Means)
hist(sim_test_Means , breaks=40 , col="lightblue", border=F , main="Rexp mean distribution", xlab = "mean", xlim = c(2.5,8.5))
## mean vs theorical mean
rexp_sim_mean <- mean(sim_test_Means)
theoretical_mean <- 1/0.2
## visualize mean & theorical mean
abline(v = rexp_sim_mean, lwd="2", col="blue")
abline(v = theoretical_mean, lwd="2", col = "red")
legend('topright', c("simulation", "theoretical"), lty=c(1,1), col=c("blue", "red"))
## variance vs theorical variance
rexp_sim_var <- var(sim_test_Means)
theoretical_var <- (1/lambda)^2/n
## distribution
hist(sim_test_Means , prob = TRUE, breaks=40 , col="lightblue", border=F , main="rexp mean distribution", xlab = "mean", xlim = c(2.5,8.5))
curve(dnorm(x, mean=theoretical_mean, sd=sqrt(theoretical_var)), add = TRUE, lwd = 2, col = "red")
lines(density(sim_test_Means), lwd = 2, col = "blue")
legend('topright', c("simulation distribution", "theoretical distribution"), lty=c(1,1), col=c("blue", "red"))
|
/R06 - Statistical inference/SI_CP.R
|
no_license
|
dasmey/Portfolio
|
R
| false
| false
| 1,191
|
r
|
## simulation rules
set.seed(13053312)
lambda <- 0.2
n <- 40
sim <- 1000
## simulating
sim_test <- matrix(data=rexp(n * sim, lambda), sim)
sim_test_Means <- apply(sim_test, 1, mean)
## range & histogram
range(sim_test_Means)
hist(sim_test_Means , breaks=40 , col="lightblue", border=F , main="Rexp mean distribution", xlab = "mean", xlim = c(2.5,8.5))
## mean vs theorical mean
rexp_sim_mean <- mean(sim_test_Means)
theoretical_mean <- 1/0.2
## visualize mean & theorical mean
abline(v = rexp_sim_mean, lwd="2", col="blue")
abline(v = theoretical_mean, lwd="2", col = "red")
legend('topright', c("simulation", "theoretical"), lty=c(1,1), col=c("blue", "red"))
## variance vs theorical variance
rexp_sim_var <- var(sim_test_Means)
theoretical_var <- (1/lambda)^2/n
## distribution
hist(sim_test_Means , prob = TRUE, breaks=40 , col="lightblue", border=F , main="rexp mean distribution", xlab = "mean", xlim = c(2.5,8.5))
curve(dnorm(x, mean=theoretical_mean, sd=sqrt(theoretical_var)), add = TRUE, lwd = 2, col = "red")
lines(density(sim_test_Means), lwd = 2, col = "blue")
legend('topright', c("simulation distribution", "theoretical distribution"), lty=c(1,1), col=c("blue", "red"))
|
#' Generate theoretical GC content distributions
#'
#' This function generates random simulated reads from
#' either provided \code{seqs} (best for RNA-seq)
#' or from a genome (best for DNA-seq). The GC content
#' of these reads is then tabulated to produce a distribution
#' file which can be read by MultiQC to be displayed
#' on top of the FASTQC GC content module. Either \code{seqs}
#' or \code{genome} is required, and only one can be specified.
#' Specifying \code{genome} requires also specifying \code{nchrom}.
#'
#' @param seqs a DNAStringSet of the sequences to simulate
#' read from. E.g. for RNA-seq, the transcripts, which
#' can be generated with \code{extractTranscriptSeqs}
#' from the GenomicFeatures package.
#' See the example script located in \code{inst/script/human_mouse.R}
#' @param genome a BSgenome object.
#' See the example script located in \code{inst/script/human_mouse.R}
#' @param nchrom the number of chromosomes from the genome to simulate
#' reads from
#' @param file the path of the file to write out
#' @param n the number of reads to simulate
#' @param bp the basepair of the reads
#' @param wts optional weights to go along with the \code{seqs} or
#' the chromosomes in \code{genome}, e.g. to represent
#' more realistic expression of transcripts
#' @param name the name to be printed at the top of the file
#'
#' @return the name of the file which was written
#'
#' @references
#'
#' MultiQC:
#' http://multiqc.info/
#'
#' FASTQC:
#' http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
#'
#' @export
generateDistn <- function(seqs, genome, nchrom,
file="fastqc_theoretical_gc.txt",
n=1e6, bp=100, wts=1, name="") {
stopifnot(missing(seqs) | missing(genome))
if (!missing(genome)) stopifnot(!missing(nchrom))
# first the routine generating GC from given sequences (txome)
if (!missing(seqs)) {
# remove seqs that are too short
keep <- width(seqs) >= bp
if (length(wts) > 1) {
stopifnot(length(wts) == length(seqs))
stopifnot(sum(keep) > 0)
wts <- wts[keep]
}
message(paste(sum(keep),"sequences of sufficient length"))
seqs <- seqs[keep]
# weights (optional) can specify e.g. transcript expression
prob <- wts * width(seqs)
prob <- prob/sum(prob)
idx <- sample(length(seqs), n, replace=TRUE, prob=prob)
message(paste("generating",n,"reads"))
molecules <- seqs[idx]
# sample start positions uniformly
starts <- round(runif(length(molecules),1,width(molecules)-bp+1))
reads <- subseq(molecules,start=starts,width=bp)
} else {
# now the routine generating from genome
chrom.lens <- head(seqlengths(genome),nchrom)
message(paste0("will generate reads from ",names(chrom.lens)[1],
"-",names(chrom.lens)[nchrom]))
stopifnot(all(chrom.lens > bp))
if (length(wts) > 1) stopifnot(length(wts) == nchrom)
prob <- chrom.lens * wts
prob <- prob/sum(prob)
chroms <- sample(names(chrom.lens),n,replace=TRUE,prob=prob)
starts <- round(runif(n,1,chrom.lens[chroms]-bp+1))
gr <- GRanges(chroms,IRanges(starts,width=bp))
message(paste("generating",n,"reads"))
# sorting speeds up accessing the genome
gr <- sort(gr)
reads <- as(Views(genome, gr),"DNAStringSet")
}
# tabulate frequencies per percentile of GC content
gc <- as.vector(letterFrequency(reads,"GC"))
total <- as.vector(letterFrequency(reads,"ACGT"))
gc.content <- (gc/total)[total > 0]
message(paste0("mean (sd) GC content: ",100*round(mean(gc.content),2),
" (",100*round(sd(gc.content),2),")"))
dens <- hist(round(gc.content,2), breaks=0:101/100 - 1/200, plot=FALSE)$density
dens <- round(dens, 3)
out <- cbind(GC=0:100, dens)
message(paste("writing out density to",file))
if (name != "") {
name <- paste(":",name)
}
write(paste0("# FastQC theoretical GC content curve",name), file=file)
write.table(out, file=file, append=TRUE, sep="\t",
quote=FALSE, row.names=FALSE, col.names=FALSE)
file
}
|
/R/core.R
|
permissive
|
jnpaulson/fastqcTheoreticalGC
|
R
| false
| false
| 4,071
|
r
|
#' Generate theoretical GC content distributions
#'
#' This function generates random simulated reads from
#' either provided \code{seqs} (best for RNA-seq)
#' or from a genome (best for DNA-seq). The GC content
#' of these reads is then tabulated to produce a distribution
#' file which can be read by MultiQC to be displayed
#' on top of the FASTQC GC content module. Either \code{seqs}
#' or \code{genome} is required, and only one can be specified.
#' Specifying \code{genome} requires also specifying \code{nchrom}.
#'
#' @param seqs a DNAStringSet of the sequences to simulate
#' read from. E.g. for RNA-seq, the transcripts, which
#' can be generated with \code{extractTranscriptSeqs}
#' from the GenomicFeatures package.
#' See the example script located in \code{inst/script/human_mouse.R}
#' @param genome a BSgenome object.
#' See the example script located in \code{inst/script/human_mouse.R}
#' @param nchrom the number of chromosomes from the genome to simulate
#' reads from
#' @param file the path of the file to write out
#' @param n the number of reads to simulate
#' @param bp the basepair of the reads
#' @param wts optional weights to go along with the \code{seqs} or
#' the chromosomes in \code{genome}, e.g. to represent
#' more realistic expression of transcripts
#' @param name the name to be printed at the top of the file
#'
#' @return the name of the file which was written
#'
#' @references
#'
#' MultiQC:
#' http://multiqc.info/
#'
#' FASTQC:
#' http://www.bioinformatics.babraham.ac.uk/projects/fastqc/
#'
#' @export
generateDistn <- function(seqs, genome, nchrom,
file="fastqc_theoretical_gc.txt",
n=1e6, bp=100, wts=1, name="") {
stopifnot(missing(seqs) | missing(genome))
if (!missing(genome)) stopifnot(!missing(nchrom))
# first the routine generating GC from given sequences (txome)
if (!missing(seqs)) {
# remove seqs that are too short
keep <- width(seqs) >= bp
if (length(wts) > 1) {
stopifnot(length(wts) == length(seqs))
stopifnot(sum(keep) > 0)
wts <- wts[keep]
}
message(paste(sum(keep),"sequences of sufficient length"))
seqs <- seqs[keep]
# weights (optional) can specify e.g. transcript expression
prob <- wts * width(seqs)
prob <- prob/sum(prob)
idx <- sample(length(seqs), n, replace=TRUE, prob=prob)
message(paste("generating",n,"reads"))
molecules <- seqs[idx]
# sample start positions uniformly
starts <- round(runif(length(molecules),1,width(molecules)-bp+1))
reads <- subseq(molecules,start=starts,width=bp)
} else {
# now the routine generating from genome
chrom.lens <- head(seqlengths(genome),nchrom)
message(paste0("will generate reads from ",names(chrom.lens)[1],
"-",names(chrom.lens)[nchrom]))
stopifnot(all(chrom.lens > bp))
if (length(wts) > 1) stopifnot(length(wts) == nchrom)
prob <- chrom.lens * wts
prob <- prob/sum(prob)
chroms <- sample(names(chrom.lens),n,replace=TRUE,prob=prob)
starts <- round(runif(n,1,chrom.lens[chroms]-bp+1))
gr <- GRanges(chroms,IRanges(starts,width=bp))
message(paste("generating",n,"reads"))
# sorting speeds up accessing the genome
gr <- sort(gr)
reads <- as(Views(genome, gr),"DNAStringSet")
}
# tabulate frequencies per percentile of GC content
gc <- as.vector(letterFrequency(reads,"GC"))
total <- as.vector(letterFrequency(reads,"ACGT"))
gc.content <- (gc/total)[total > 0]
message(paste0("mean (sd) GC content: ",100*round(mean(gc.content),2),
" (",100*round(sd(gc.content),2),")"))
dens <- hist(round(gc.content,2), breaks=0:101/100 - 1/200, plot=FALSE)$density
dens <- round(dens, 3)
out <- cbind(GC=0:100, dens)
message(paste("writing out density to",file))
if (name != "") {
name <- paste(":",name)
}
write(paste0("# FastQC theoretical GC content curve",name), file=file)
write.table(out, file=file, append=TRUE, sep="\t",
quote=FALSE, row.names=FALSE, col.names=FALSE)
file
}
|
workDir <- "d:/Repos/Cloud4YourData/Demos/4DevKatowice2018/MLRevoscale/Data/Revoscale/"
outPath <- paste0(workDir,"wines.xdf")
sqlConnString <- "Driver=SQL Server; server=.; database=RevoScaleDb; Trusted_Connection = True;"
sqlCC <- RxInSqlServer(connectionString = sqlConnString, numTasks = 1)
sqlQuery <-"SELECT Facidity, Vacidity, Citric, Sugar, Chlorides,
Fsulfur, Tsulfur, Density, pH,Sulphates, Alcohol,
Color,
Quality
FROM dbo.WineTest;"
#Change Compute Context
rxSetComputeContext(sqlCC)
rxGetComputeContext()
wines_ds <- RxSqlServerData(sqlQuery = sqlQuery,
connectionString = sqlConnString)
#rxSummary(formula = ~.,
# data = wines_ds)
rxGetVarInfo(data = wines_ds)
#Local Compute Context
rxSetComputeContext(RxLocalParallel())
rxGetComputeContext()
wines <- rxImport(inData = wines_ds,
outFile = outPath,
overwrite = TRUE)
rxGetVarInfo(wines)
transformColor <- function(dataList) {
dataList$ColNum <- ifelse(dataList$Color == "red", 1, 0)
return(dataList)
}
wines_data <- rxDataStep(inData = wines,
#transforms = list(ColNum = ifelse(Color == "red", 1, 0)),
transformFunc = transformColor,
rowsPerRead = 250,
overwrite = TRUE)
wines_data <- rxDataStep(inData = wines_data,
varsToDrop = c("Color"),
overwrite = TRUE)
rxGetVarInfo(wines_data)
rxHistogram(~Quality, data = wines_data)
##Split data
outFiles <- rxSplit(inData = wines_data,
outFilesBase = paste0(workDir, "/modelData"),
outFileSuffixes = c("Train", "Test"),
overwrite = TRUE,
splitByFactor = "splitVar",
transforms = list(
splitVar = factor(sample(c("Train", "Test"),
size = .rxNumRows,
replace = TRUE,
prob = c(.70, .30)),
levels = c("Train", "Test"))),
consoleOutput = TRUE
)
wines_train <- rxReadXdf(file = paste0(workDir, "modelData.splitVar.Train.xdf"))
wines_test <- rxReadXdf(file = paste0(workDir, "modelData.splitVar.Test.xdf"))
rxGetInfo(wines_train)
rxGetInfo(wines_test)
colNames <- colnames(wines_train)
modelFormula <- as.formula(paste("Quality ~", paste(colNames[!colNames %in% c("Quality", "splitVar")], collapse = " + ")))
modelFormula
#train model
model = rxDForest(modelFormula, data = wines_train, method = "anova")
summary(model)
#
wines_test <- rxDataStep(inData = wines_test,
varsToDrop = c("splitVar"),
overwrite = TRUE)
nrow(wines_test)
pred = rxPredict(modelObject = model,
data = wines_test,
type = "response",
predVarNames = "QualityPred",
extraVarsToWrite = c("Quality"))
head(pred$QualityPred)
library(MLmetrics)
R2_Score(pred$QualityPred, pred$Quality)
#Serialize model
rxGetComputeContext()
rxSetComputeContext(RxLocalSeq())
rxGetComputeContext()
model_ser <- rxSerializeModel(model, realtimeScoringOnly = TRUE)
writeBin(model_ser, paste0(workDir, "model.rsm"))
##
# Estimate a regression neural net
res2 <- rxNeuralNet(modelFormula, data = wines_train,
type = "regression")
scoreOut2 <- rxPredict(res2, data = wines_test,
extraVarsToWrite = "Quality")
scoreOut2
# Plot the rating versus the score with a regression line
rxLinePlot(Quality ~ Score, type = c("r", "smooth"), data = scoreOut2)
|
/4DevKatowice2018/MLRevoscale/RevoscaleR/Script.R
|
no_license
|
beatajaworska/CommunityEvents
|
R
| false
| false
| 3,606
|
r
|
workDir <- "d:/Repos/Cloud4YourData/Demos/4DevKatowice2018/MLRevoscale/Data/Revoscale/"
outPath <- paste0(workDir,"wines.xdf")
sqlConnString <- "Driver=SQL Server; server=.; database=RevoScaleDb; Trusted_Connection = True;"
sqlCC <- RxInSqlServer(connectionString = sqlConnString, numTasks = 1)
sqlQuery <-"SELECT Facidity, Vacidity, Citric, Sugar, Chlorides,
Fsulfur, Tsulfur, Density, pH,Sulphates, Alcohol,
Color,
Quality
FROM dbo.WineTest;"
#Change Compute Context
rxSetComputeContext(sqlCC)
rxGetComputeContext()
wines_ds <- RxSqlServerData(sqlQuery = sqlQuery,
connectionString = sqlConnString)
#rxSummary(formula = ~.,
# data = wines_ds)
rxGetVarInfo(data = wines_ds)
#Local Compute Context
rxSetComputeContext(RxLocalParallel())
rxGetComputeContext()
wines <- rxImport(inData = wines_ds,
outFile = outPath,
overwrite = TRUE)
rxGetVarInfo(wines)
transformColor <- function(dataList) {
dataList$ColNum <- ifelse(dataList$Color == "red", 1, 0)
return(dataList)
}
wines_data <- rxDataStep(inData = wines,
#transforms = list(ColNum = ifelse(Color == "red", 1, 0)),
transformFunc = transformColor,
rowsPerRead = 250,
overwrite = TRUE)
wines_data <- rxDataStep(inData = wines_data,
varsToDrop = c("Color"),
overwrite = TRUE)
rxGetVarInfo(wines_data)
rxHistogram(~Quality, data = wines_data)
##Split data
outFiles <- rxSplit(inData = wines_data,
outFilesBase = paste0(workDir, "/modelData"),
outFileSuffixes = c("Train", "Test"),
overwrite = TRUE,
splitByFactor = "splitVar",
transforms = list(
splitVar = factor(sample(c("Train", "Test"),
size = .rxNumRows,
replace = TRUE,
prob = c(.70, .30)),
levels = c("Train", "Test"))),
consoleOutput = TRUE
)
wines_train <- rxReadXdf(file = paste0(workDir, "modelData.splitVar.Train.xdf"))
wines_test <- rxReadXdf(file = paste0(workDir, "modelData.splitVar.Test.xdf"))
rxGetInfo(wines_train)
rxGetInfo(wines_test)
colNames <- colnames(wines_train)
modelFormula <- as.formula(paste("Quality ~", paste(colNames[!colNames %in% c("Quality", "splitVar")], collapse = " + ")))
modelFormula
#train model
model = rxDForest(modelFormula, data = wines_train, method = "anova")
summary(model)
#
wines_test <- rxDataStep(inData = wines_test,
varsToDrop = c("splitVar"),
overwrite = TRUE)
nrow(wines_test)
pred = rxPredict(modelObject = model,
data = wines_test,
type = "response",
predVarNames = "QualityPred",
extraVarsToWrite = c("Quality"))
head(pred$QualityPred)
library(MLmetrics)
R2_Score(pred$QualityPred, pred$Quality)
#Serialize model
rxGetComputeContext()
rxSetComputeContext(RxLocalSeq())
rxGetComputeContext()
model_ser <- rxSerializeModel(model, realtimeScoringOnly = TRUE)
writeBin(model_ser, paste0(workDir, "model.rsm"))
##
# Estimate a regression neural net
res2 <- rxNeuralNet(modelFormula, data = wines_train,
type = "regression")
scoreOut2 <- rxPredict(res2, data = wines_test,
extraVarsToWrite = "Quality")
scoreOut2
# Plot the rating versus the score with a regression line
rxLinePlot(Quality ~ Score, type = c("r", "smooth"), data = scoreOut2)
|
\name{MCEM}
\alias{MCEM}
\title{Monte Carlo EM algorithm
}
\description{
Use: Computes the MLEs of the Gaussian copula model using the Monte Carlo EM algorithm
}
\usage{
MCEM(obs.data, Monte_Carlo_samples, p, q, initial, num_iterations,
prec=0.01, marginal_dist="negbin",
optim_method="nmkb", compute_stderrors=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obs.data}{ Observed Data }
\item{Monte_Carlo_samples}{The number of Monte Carlo samples, m, to approximate the conditional expectation, if m is a vector it repeats num_iterations with each numbr in the vector}
\item{p}{order of the ar parameters in ARMA(p,q)}
\item{q}{order of the ma parameters in ARMA(p,q)}
\item{initial}{ initial parameter values for the EM algorithm }
\item{prec}{ precision for stopping criteria }
\item{num_iterations}{ Number of iterations}
\item{marginal_dist}{ Marginal distributions}
\item{optim_method}{ Default is "nmkb" from the R package dfoptim, however "CG", "BFGS" or "nlm" can be used. optimx() from the optimx R package suggests all 4 can be used but in practice only "L-BFGS-B" have provided a good optimisation routine}
\item{compute_stderrors}{TRUE or FALSE}
}
\details{
Default is "L-BFGS-B", however "CG", "BFGS" or "nlm" can be used. optimx() from the optimx R package suggests all 4 can be used but in practice only "L-BFGS-B" have provided a good optimisation routine.
There are two types of optimisation: function minimisation and mathematical programming. Most optimisers in R are function minimisers. method="L-BFGS-B" is the method of Bryd et al. (1995) which is a modification of BFGS quasi-newton method.
"Dela_lx" for change in log-likelihood of observed data, as following Chen and Ledolter (1995)
}
\value{
Iterations for the MCEM algorithm which converge to the MLE estimates of the ARMA(1,1) parameters
}
\author{
Hannah Lennon
<drhannahlennon@gmail.com>
}
\references{
Lennon H., & Yuan J., Estimation of a digitised Gaussian ARMA model by Monte Carlo Expectation Maximisation, Computational Statistics & Data Analysis 2018;8:e020683
Lennon, H., 2016. Gaussian copula modelling for integer-valued time series (Doctoral Thesis, University of Manchester).
}
\note{
Produces pdf plots
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
First and Second Year Transfer Reports}
\examples{
library(numDeriv)
library(MASS)
library(VGAM)
library(dfoptim)
library(polynom)
library(dfoptim)
ls()
set.seed(1)
obs.data <- generate_data(10, 0.2, 0.1, "nbinom", c(3, 0.4))$x
}
|
/man/MCEM.Rd
|
no_license
|
hlennon/copulaIVTS
|
R
| false
| false
| 2,612
|
rd
|
\name{MCEM}
\alias{MCEM}
\title{Monte Carlo EM algorithm
}
\description{
Use: Computes the MLEs of the Gaussian copula model using the Monte Carlo EM algorithm
}
\usage{
MCEM(obs.data, Monte_Carlo_samples, p, q, initial, num_iterations,
prec=0.01, marginal_dist="negbin",
optim_method="nmkb", compute_stderrors=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{obs.data}{ Observed Data }
\item{Monte_Carlo_samples}{The number of Monte Carlo samples, m, to approximate the conditional expectation, if m is a vector it repeats num_iterations with each numbr in the vector}
\item{p}{order of the ar parameters in ARMA(p,q)}
\item{q}{order of the ma parameters in ARMA(p,q)}
\item{initial}{ initial parameter values for the EM algorithm }
\item{prec}{ precision for stopping criteria }
\item{num_iterations}{ Number of iterations}
\item{marginal_dist}{ Marginal distributions}
\item{optim_method}{ Default is "nmkb" from the R package dfoptim, however "CG", "BFGS" or "nlm" can be used. optimx() from the optimx R package suggests all 4 can be used but in practice only "L-BFGS-B" have provided a good optimisation routine}
\item{compute_stderrors}{TRUE or FALSE}
}
\details{
Default is "L-BFGS-B", however "CG", "BFGS" or "nlm" can be used. optimx() from the optimx R package suggests all 4 can be used but in practice only "L-BFGS-B" have provided a good optimisation routine.
There are two types of optimisation: function minimisation and mathematical programming. Most optimisers in R are function minimisers. method="L-BFGS-B" is the method of Bryd et al. (1995) which is a modification of BFGS quasi-newton method.
"Dela_lx" for change in log-likelihood of observed data, as following Chen and Ledolter (1995)
}
\value{
Iterations for the MCEM algorithm which converge to the MLE estimates of the ARMA(1,1) parameters
}
\author{
Hannah Lennon
<drhannahlennon@gmail.com>
}
\references{
Lennon H., & Yuan J., Estimation of a digitised Gaussian ARMA model by Monte Carlo Expectation Maximisation, Computational Statistics & Data Analysis 2018;8:e020683
Lennon, H., 2016. Gaussian copula modelling for integer-valued time series (Doctoral Thesis, University of Manchester).
}
\note{
Produces pdf plots
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
First and Second Year Transfer Reports}
\examples{
library(numDeriv)
library(MASS)
library(VGAM)
library(dfoptim)
library(polynom)
library(dfoptim)
ls()
set.seed(1)
obs.data <- generate_data(10, 0.2, 0.1, "nbinom", c(3, 0.4))$x
}
|
setwd("~/Documents/Coursera/ExplorData2/ExData_Plotting1")
# Download the file and read it in
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "household.zip"
if(!file.exists("household")) {
download.file(url, filename, method="curl")
file <- unzip("household.zip", exdir = "household")
}
expname <- list.files("household")[1]
setwd("~/Documents/Coursera/ExplorData2/ExData_Plotting1/household")
rawdata <- read.table(file = expname, header = T, sep = ";", na.strings = "?")
# Fix the date and time columns, subset to get only the dates we want (02/01/2007 and 02/02/2007)
rawdata$DateTime <- paste(rawdata$Date, rawdata$Time, sep=" ")
rawdata$DateTime <- strptime(rawdata$DateTime, format = "%d/%m/%Y %H:%M:%S", tz = "GMT")
rawdata$Date <- as.Date(rawdata$Date, format = "%d/%m/%Y")
v <- grep(pattern = "2007-02-0[12]", x = rawdata$Date)
data <- rawdata[v,]
# Test plotting the third graph
plot(data$DateTime, data$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", main = "")
lines(data$DateTime, data$Sub_metering_2, col = "red")
lines(data$DateTime, data$Sub_metering_3, col= "blue")
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col=c("black", "red", "blue"))
# Construct as png
png(filename = "plot3.png", width = 480, height = 480)
plot(data$DateTime, data$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", main = "")
lines(data$DateTime, data$Sub_metering_2, col = "red")
lines(data$DateTime, data$Sub_metering_3, col= "blue")
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col=c("black", "red", "blue"))
dev.off()
|
/plot3.R
|
no_license
|
tinaowenmark/ExData_Plotting1
|
R
| false
| false
| 1,786
|
r
|
setwd("~/Documents/Coursera/ExplorData2/ExData_Plotting1")
# Download the file and read it in
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
filename <- "household.zip"
if(!file.exists("household")) {
download.file(url, filename, method="curl")
file <- unzip("household.zip", exdir = "household")
}
expname <- list.files("household")[1]
setwd("~/Documents/Coursera/ExplorData2/ExData_Plotting1/household")
rawdata <- read.table(file = expname, header = T, sep = ";", na.strings = "?")
# Fix the date and time columns, subset to get only the dates we want (02/01/2007 and 02/02/2007)
rawdata$DateTime <- paste(rawdata$Date, rawdata$Time, sep=" ")
rawdata$DateTime <- strptime(rawdata$DateTime, format = "%d/%m/%Y %H:%M:%S", tz = "GMT")
rawdata$Date <- as.Date(rawdata$Date, format = "%d/%m/%Y")
v <- grep(pattern = "2007-02-0[12]", x = rawdata$Date)
data <- rawdata[v,]
# Test plotting the third graph
plot(data$DateTime, data$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", main = "")
lines(data$DateTime, data$Sub_metering_2, col = "red")
lines(data$DateTime, data$Sub_metering_3, col= "blue")
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col=c("black", "red", "blue"))
# Construct as png
png(filename = "plot3.png", width = 480, height = 480)
plot(data$DateTime, data$Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "", main = "")
lines(data$DateTime, data$Sub_metering_2, col = "red")
lines(data$DateTime, data$Sub_metering_3, col= "blue")
legend(x="topright", legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd = c(2.5, 2.5, 2.5), col=c("black", "red", "blue"))
dev.off()
|
# lineup analysis to complement cluster model
library(tidyverse)
library(magrittr)
library(reshape2)
library(lme4)
library(lmerTest)
# get libraries required for bbplot
pacman::p_load('dplyr', 'tidyr', 'gapminder',
'ggplot2', 'ggalt',
'forcats', 'R.utils', 'png',
'grid', 'ggpubr', 'scales',
'bbplot')
options(scipen=999)
# read in lineup combination data from 2015-2020 and filter out low-use lineups
# reduces from 10,000 lineup combinations over the five years to 7,612 combinations
lineups = read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/lineups_2015-20.csv',
stringsAsFactors = F) %>%
filter(MIN >= 10 & GP >= 5) %>%
# trim out those who didn't play much
select(TEAM_ID:PIE, SEASON:PLAYER_NAME_5) %>%
# add id column to match with data later
tibble::rowid_to_column(., "LINEUP_ID")
# get predicted classes from cluster model
preds <- read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/fulldf.csv',
stringsAsFactors = F) %>%
select(PLAYER_NAME, classOrder, classNames, POSITION, TEAM_ABBREVIATION, SEASON)
# get player id's from original data
nba = read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/season_totals_1996-20.csv',
stringsAsFactors = F) %>%
filter(SEASON >= '2015-16') %>%
select(PLAYER_ID, PLAYER_NAME, SEASON, TEAM_ABBREVIATION, TEAM_ID)
# join the three datasets (just need lineups now)
df = merge(preds, nba, by=c('PLAYER_NAME', 'SEASON'))
# move lineups from wide to long so can more readily match class values to data
p1 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_1')) %>%
rename(., PLAYER_ID = PLAYER_ID_1)
p2 = lineups %>%
select(-PLAYER_ID_1, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_2')) %>%
rename(., PLAYER_ID = PLAYER_ID_2)
p3 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_1, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_3')) %>%
rename(., PLAYER_ID = PLAYER_ID_3)
p4 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_1, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_4')) %>%
rename(., PLAYER_ID = PLAYER_ID_4)
p5 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_1) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_5')) %>%
rename(., PLAYER_ID = PLAYER_ID_5)
# join all of these long lineup dataframes
lineupsLong = do.call("rbind", list(p1,p2,p3,p4,p5))
# join all of the data together into single dataframe
lineupsFull = merge(lineupsLong, df, by=c('PLAYER_ID', 'SEASON')) %>%
select(-TEAM_ABBREVIATION.x, -TEAM_ABBREVIATION.y, -TEAM_ID.y)
#### ANALYSIS #######
# get long format, ordered list of position numbers, and counts of each, within each lineup
lineupsFull %>%
select(LINEUP_ID, classNames) %>%
group_by(LINEUP_ID, classNames) %>%
tally() %>%
mutate(n = n/31) %>%
arrange(classNames, n, .by_group = TRUE)
# get long format, ordered list of the positions within each lineup
positionCombos = lineupsFull %>%
select(LINEUP_ID, classNames, variable) %>%
filter(variable == 'NET_RATING') %>%
group_by(LINEUP_ID, classNames) %>%
arrange(classNames, .by_group = TRUE) %>%
group_by(LINEUP_ID, variable) %>%
mutate(all_positions = paste(classNames, collapse = " | ")) %>% # create variable of composition
ungroup() %>% # get rid of variable
select(LINEUP_ID, all_positions) %>% # drop variable
group_by(LINEUP_ID) %>%
slice(1) # grab just first one for each lineup (otherwise its repeated) %>%
# what are the most common combinations?
totals = positionCombos %>%
group_by(all_positions) %>%
tally() %>%
arrange(desc(n))
# get output variable + other predictors from long dataset
modelVars = lineupsFull %>%
select(LINEUP_ID, SEASON, TEAM_ABBREVIATION, variable, value) %>%
filter(variable == 'NET_RATING') %>%
rename(., NET_RATING = value) %>%
select(-variable) %>%
group_by(LINEUP_ID) %>%
slice(1) # grab just first one for each lineup (otherwise its repeated) %>%
# join the data (positions and predictors/outcome data)
modelData = merge(modelVars, positionCombos, by='LINEUP_ID')
modelData$NET_RATING = as.numeric(modelData$NET_RATING)
#### ESTIMATE EFFECT OF EACH POSITION #############
PositionVars = lineupsFull %>%
select(LINEUP_ID, SEASON, TEAM_ABBREVIATION, classNames, POSITION, variable, value) %>%
filter(variable == 'NET_RATING') %>%
rename(., NET_RATING = value) %>%
select(-variable)
PositionVars$NET_RATING = as.numeric(PositionVars$NET_RATING)
PositionVars$LINEUP_ID = as.factor(PositionVars$LINEUP_ID)
str(PositionVars)
# check against averages by position, generally
grpMeans = PositionVars %>%
group_by(classNames) %>%
summarise(mean = mean(NET_RATING))
# run model
modPosition = glmer(NET_RATING ~ classNames -1 +
(1 | TEAM_ABBREVIATION:SEASON),
data = PositionVars)
summary(modPosition)
# grab fixed effects for positions from the model into df
posEst = as.data.frame(fixef(modPosition))
posEst$Position = rownames(posEst)
posEst$Position = gsub('classNames','',posEst$Position)
# get confidence intervals around the estimates
confInt = confint(modPosition)
confInt = as.data.frame(confInt)
confInt$Position = rownames(confInt)
confInt$Position = gsub('classNames','', confInt$Position)
# join data
posEst = merge(posEst, confInt)
posEst = merge(posEst, grpMeans, by.x = 'Position', by.y = 'classNames')
# plot fixed effects by position
p = posEst %>%
ggplot() +
aes(x = reorder(Position, fixef(modPosition)), y = fixef(modPosition)) +
geom_errorbar(aes(ymin=`2.5 %`, ymax=`97.5 %`),
width = .2, # width of the error bars
position = position_dodge(0.9)) +
geom_point() +
geom_point(aes(x = Position, y = mean, shape = 8), colour='blue') +
scale_shape_identity() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
scale_y_continuous(breaks = seq(0, 7, by =1)) +
coord_flip() +
bbc_style() +
labs(title = 'Net Rating Estimate by Position')
p
# alternative visual (unordered)
library(sjPlot)
sjPlot::plot_model(modPosition, type = 'eff')
#### ESTIMATE BEST LINEUPS #######
#library(extraoperators)
#library(JWileymisc)
#library(multilevelTools)
# unadjusted means by lineup type (like group mean)
mod = lmer(NET_RATING ~ (1 | all_positions),
data = modelData)
modFit = coef(mod)$all_positions
# lineup nested within teams nested within seasons
# gives specific net rating for exact lineup in exact team in exact season
# too exact
#mod2 = lmer(NET_RATING ~ (1 | all_positions:TEAM_ABBREVIATION:SEASON),
# data = modelData)
#summary(mod2)
#mod2Fit = coef(mod2)$all_positions
# get specific estimates for the lineups, while controlling for season and team
# but not nesting them, because don't want that specific of an estimate
# want to go outside of teams and generalize to the lineups
mod3 = lmer(NET_RATING ~ (1 | TEAM_ABBREVIATION:SEASON) + (1 | all_positions),
data = modelData)
summary(mod3)
mod3Fit = coef(mod3)$all_positions
mod3FitNested = coef(mod3)$TEAM_ABBREVIATION
ranef(mod3)$all_positions # just the random effects
coef(mod3)$all_positions # the coefficients (adding in the other variables to get point estimate)
# get confidence intervals of random effects
# probably a better idea to instead just run a Bayesian model
library(merTools)
randomSims <- REsim(mod3, n.sims = 5000)
# and to plot it
plotREsim(REsim(mod3, n.sims = 5000))
#### plot coefficients with highest values
mod3Fit$lineup = row.names(mod3Fit) # make linup into a column
mod3Fit %>%
filter(`(Intercept)` > 2.4) %>%
ggplot() +
aes(x = reorder(lineup, `(Intercept)`), y = `(Intercept)`) +
geom_point() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
coord_flip() +
bbc_style() +
theme(axis.text.y = element_text(size = 10)) +
labs(title = 'Top Lineups by Estimated Net Rating')
#### plot coefficients with lowest values
mod3Fit %>%
filter(`(Intercept)` < 1) %>%
ggplot() +
aes(x = reorder(lineup, `(Intercept)`), y = `(Intercept)`) +
geom_point() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
coord_flip() +
bbc_style() +
theme(axis.text.y = element_text(size = 10)) +
labs(title = 'Worst Lineups by Estimated Net Rating')
# visualize all random effects with estimates
devtools::install_github("m-clark/visibly")
library(visibly)
plot_coefficients(mod3, ranef=TRUE, which_ranef = 'all_positions') + coord_flip()
|
/Data Modeling/NBA_lineup_model.R
|
no_license
|
dhutexas/nba_stats
|
R
| false
| false
| 9,300
|
r
|
# lineup analysis to complement cluster model
library(tidyverse)
library(magrittr)
library(reshape2)
library(lme4)
library(lmerTest)
# get libraries required for bbplot
pacman::p_load('dplyr', 'tidyr', 'gapminder',
'ggplot2', 'ggalt',
'forcats', 'R.utils', 'png',
'grid', 'ggpubr', 'scales',
'bbplot')
options(scipen=999)
# read in lineup combination data from 2015-2020 and filter out low-use lineups
# reduces from 10,000 lineup combinations over the five years to 7,612 combinations
lineups = read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/lineups_2015-20.csv',
stringsAsFactors = F) %>%
filter(MIN >= 10 & GP >= 5) %>%
# trim out those who didn't play much
select(TEAM_ID:PIE, SEASON:PLAYER_NAME_5) %>%
# add id column to match with data later
tibble::rowid_to_column(., "LINEUP_ID")
# get predicted classes from cluster model
preds <- read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/fulldf.csv',
stringsAsFactors = F) %>%
select(PLAYER_NAME, classOrder, classNames, POSITION, TEAM_ABBREVIATION, SEASON)
# get player id's from original data
nba = read.csv('https://raw.githubusercontent.com/dhutexas/nba_stats/main/Data/season_totals_1996-20.csv',
stringsAsFactors = F) %>%
filter(SEASON >= '2015-16') %>%
select(PLAYER_ID, PLAYER_NAME, SEASON, TEAM_ABBREVIATION, TEAM_ID)
# join the three datasets (just need lineups now)
df = merge(preds, nba, by=c('PLAYER_NAME', 'SEASON'))
# move lineups from wide to long so can more readily match class values to data
p1 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_1')) %>%
rename(., PLAYER_ID = PLAYER_ID_1)
p2 = lineups %>%
select(-PLAYER_ID_1, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_2')) %>%
rename(., PLAYER_ID = PLAYER_ID_2)
p3 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_1, -PLAYER_ID_4, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_3')) %>%
rename(., PLAYER_ID = PLAYER_ID_3)
p4 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_1, PLAYER_ID_5) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_4')) %>%
rename(., PLAYER_ID = PLAYER_ID_4)
p5 = lineups %>%
select(-PLAYER_ID_2, -PLAYER_ID_3, -PLAYER_ID_4, PLAYER_ID_1) %>%
reshape2::melt(., id.vars = c('LINEUP_ID', 'TEAM_ABBREVIATION', 'TEAM_ID',
'SEASON', 'PLAYER_ID_5')) %>%
rename(., PLAYER_ID = PLAYER_ID_5)
# join all of these long lineup dataframes
lineupsLong = do.call("rbind", list(p1,p2,p3,p4,p5))
# join all of the data together into single dataframe
lineupsFull = merge(lineupsLong, df, by=c('PLAYER_ID', 'SEASON')) %>%
select(-TEAM_ABBREVIATION.x, -TEAM_ABBREVIATION.y, -TEAM_ID.y)
#### ANALYSIS #######
# get long format, ordered list of position numbers, and counts of each, within each lineup
lineupsFull %>%
select(LINEUP_ID, classNames) %>%
group_by(LINEUP_ID, classNames) %>%
tally() %>%
mutate(n = n/31) %>%
arrange(classNames, n, .by_group = TRUE)
# get long format, ordered list of the positions within each lineup
positionCombos = lineupsFull %>%
select(LINEUP_ID, classNames, variable) %>%
filter(variable == 'NET_RATING') %>%
group_by(LINEUP_ID, classNames) %>%
arrange(classNames, .by_group = TRUE) %>%
group_by(LINEUP_ID, variable) %>%
mutate(all_positions = paste(classNames, collapse = " | ")) %>% # create variable of composition
ungroup() %>% # get rid of variable
select(LINEUP_ID, all_positions) %>% # drop variable
group_by(LINEUP_ID) %>%
slice(1) # grab just first one for each lineup (otherwise its repeated) %>%
# what are the most common combinations?
totals = positionCombos %>%
group_by(all_positions) %>%
tally() %>%
arrange(desc(n))
# get output variable + other predictors from long dataset
modelVars = lineupsFull %>%
select(LINEUP_ID, SEASON, TEAM_ABBREVIATION, variable, value) %>%
filter(variable == 'NET_RATING') %>%
rename(., NET_RATING = value) %>%
select(-variable) %>%
group_by(LINEUP_ID) %>%
slice(1) # grab just first one for each lineup (otherwise its repeated) %>%
# join the data (positions and predictors/outcome data)
modelData = merge(modelVars, positionCombos, by='LINEUP_ID')
modelData$NET_RATING = as.numeric(modelData$NET_RATING)
#### ESTIMATE EFFECT OF EACH POSITION #############
PositionVars = lineupsFull %>%
select(LINEUP_ID, SEASON, TEAM_ABBREVIATION, classNames, POSITION, variable, value) %>%
filter(variable == 'NET_RATING') %>%
rename(., NET_RATING = value) %>%
select(-variable)
PositionVars$NET_RATING = as.numeric(PositionVars$NET_RATING)
PositionVars$LINEUP_ID = as.factor(PositionVars$LINEUP_ID)
str(PositionVars)
# check against averages by position, generally
grpMeans = PositionVars %>%
group_by(classNames) %>%
summarise(mean = mean(NET_RATING))
# run model
modPosition = glmer(NET_RATING ~ classNames -1 +
(1 | TEAM_ABBREVIATION:SEASON),
data = PositionVars)
summary(modPosition)
# grab fixed effects for positions from the model into df
posEst = as.data.frame(fixef(modPosition))
posEst$Position = rownames(posEst)
posEst$Position = gsub('classNames','',posEst$Position)
# get confidence intervals around the estimates
confInt = confint(modPosition)
confInt = as.data.frame(confInt)
confInt$Position = rownames(confInt)
confInt$Position = gsub('classNames','', confInt$Position)
# join data
posEst = merge(posEst, confInt)
posEst = merge(posEst, grpMeans, by.x = 'Position', by.y = 'classNames')
# plot fixed effects by position
p = posEst %>%
ggplot() +
aes(x = reorder(Position, fixef(modPosition)), y = fixef(modPosition)) +
geom_errorbar(aes(ymin=`2.5 %`, ymax=`97.5 %`),
width = .2, # width of the error bars
position = position_dodge(0.9)) +
geom_point() +
geom_point(aes(x = Position, y = mean, shape = 8), colour='blue') +
scale_shape_identity() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
scale_y_continuous(breaks = seq(0, 7, by =1)) +
coord_flip() +
bbc_style() +
labs(title = 'Net Rating Estimate by Position')
p
# alternative visual (unordered)
library(sjPlot)
sjPlot::plot_model(modPosition, type = 'eff')
#### ESTIMATE BEST LINEUPS #######
#library(extraoperators)
#library(JWileymisc)
#library(multilevelTools)
# unadjusted means by lineup type (like group mean)
mod = lmer(NET_RATING ~ (1 | all_positions),
data = modelData)
modFit = coef(mod)$all_positions
# lineup nested within teams nested within seasons
# gives specific net rating for exact lineup in exact team in exact season
# too exact
#mod2 = lmer(NET_RATING ~ (1 | all_positions:TEAM_ABBREVIATION:SEASON),
# data = modelData)
#summary(mod2)
#mod2Fit = coef(mod2)$all_positions
# get specific estimates for the lineups, while controlling for season and team
# but not nesting them, because don't want that specific of an estimate
# want to go outside of teams and generalize to the lineups
mod3 = lmer(NET_RATING ~ (1 | TEAM_ABBREVIATION:SEASON) + (1 | all_positions),
data = modelData)
summary(mod3)
mod3Fit = coef(mod3)$all_positions
mod3FitNested = coef(mod3)$TEAM_ABBREVIATION
ranef(mod3)$all_positions # just the random effects
coef(mod3)$all_positions # the coefficients (adding in the other variables to get point estimate)
# get confidence intervals of random effects
# probably a better idea to instead just run a Bayesian model
library(merTools)
randomSims <- REsim(mod3, n.sims = 5000)
# and to plot it
plotREsim(REsim(mod3, n.sims = 5000))
#### plot coefficients with highest values
mod3Fit$lineup = row.names(mod3Fit) # make linup into a column
mod3Fit %>%
filter(`(Intercept)` > 2.4) %>%
ggplot() +
aes(x = reorder(lineup, `(Intercept)`), y = `(Intercept)`) +
geom_point() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
coord_flip() +
bbc_style() +
theme(axis.text.y = element_text(size = 10)) +
labs(title = 'Top Lineups by Estimated Net Rating')
#### plot coefficients with lowest values
mod3Fit %>%
filter(`(Intercept)` < 1) %>%
ggplot() +
aes(x = reorder(lineup, `(Intercept)`), y = `(Intercept)`) +
geom_point() +
geom_hline(yintercept = 0,
size = 0.5,
color = '#00416d',
alpha = 0.7) +
coord_flip() +
bbc_style() +
theme(axis.text.y = element_text(size = 10)) +
labs(title = 'Worst Lineups by Estimated Net Rating')
# visualize all random effects with estimates
devtools::install_github("m-clark/visibly")
library(visibly)
plot_coefficients(mod3, ranef=TRUE, which_ranef = 'all_positions') + coord_flip()
|
\name{floyd}
\alias{floyd}
\title{Find Shortest Paths Between All Nodes in a Graph}
\description{
The \code{floyd} function finds all shortest paths in a graph using Floyd's algorithm.
}
\usage{
floyd(data)
}
\arguments{
\item{data}{matrix or distance object}
}
\value{
\code{floyd} returns a matrix with the total lengths of the shortest path between each pair of points.
}
\references{
Floyd, Robert W \cr
Algorithm 97: Shortest Path.\cr
\emph{Communications of the ACM} 1962; 5 (6): 345. doi:10.1145/367766.368168. \href{https://dl.acm.org/doi/10.1145/367766.368168}{Link}
}
\examples{
# build a graph with 5 nodes
x=matrix(c(0,NA,NA,NA,NA,30,0,NA,NA,NA,10,NA,0,NA,NA,NA,70,50,0,10,NA,40,20,60,0),ncol=5)
print(x)
# compute all path lengths
z=floyd(x)
print(z)
}
|
/man/floyd.Rd
|
no_license
|
Dev-RDV/KODAMA
|
R
| false
| false
| 818
|
rd
|
\name{floyd}
\alias{floyd}
\title{Find Shortest Paths Between All Nodes in a Graph}
\description{
The \code{floyd} function finds all shortest paths in a graph using Floyd's algorithm.
}
\usage{
floyd(data)
}
\arguments{
\item{data}{matrix or distance object}
}
\value{
\code{floyd} returns a matrix with the total lengths of the shortest path between each pair of points.
}
\references{
Floyd, Robert W \cr
Algorithm 97: Shortest Path.\cr
\emph{Communications of the ACM} 1962; 5 (6): 345. doi:10.1145/367766.368168. \href{https://dl.acm.org/doi/10.1145/367766.368168}{Link}
}
\examples{
# build a graph with 5 nodes
x=matrix(c(0,NA,NA,NA,NA,30,0,NA,NA,NA,10,NA,0,NA,NA,NA,70,50,0,10,NA,40,20,60,0),ncol=5)
print(x)
# compute all path lengths
z=floyd(x)
print(z)
}
|
context("Testing azureSMR Blob commands")
test_that("No category", {
blobObject <- list(a = "1", b = "2")
blobName <- "testBlob"
resourceGroup = "asrQuantProduction"
containerName = "testdata"
storageAccount = "asrquantstorage"
verbose <- FALSE
sc <- createAzureContext(tenantID = Sys.getenv("azureTenantID"),
clientID = Sys.getenv("azureClientID"),
authKey= Sys.getenv("azureAuthKey"),
authType = "ClientCredential")
expect_false(is.null(sc))
sk <- azureSAGetKey(azureActiveContext = sc,
resourceGroup = resourceGroup,
storageAccount = storageAccount,
verbose = verbose)
expect_false(is.null(sk))
con <- list(sc = sc,
sk = sk)
assign(x = ".azureConnection",
value = con,
envir = .GlobalEnv)
ret <- azurePutBlob(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
contents = jsonlite::toJSON(blobObject),
blob = blobName,
verbose = verbose)
expect_true(ret)
listBlobs <- azureListStorageBlobs(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
prefix = blobName,
verbose = verbose)
expect_is(listBlobs, "data.frame")
expBlobObject <- jsonlite::fromJSON(azureGetBlob(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
blob = blobName,
type = "text",
verbose = verbose) )
expect_equal(blobObject, expBlobObject)
ret <- azureDeleteBlob(azureActiveContext = con$sc,
storageAccount = storageAccount,
container = containerName,
blob = blobName,
verbose = verbose)
expect_true(ret)
if(exists(x = ".azureConnection", envir = .GlobalEnv)){
rm(".azureConnection", envir = .GlobalEnv)
}
})
|
/tests/testthat/test-AzureSMRBlob.R
|
no_license
|
CharlesCara/AzureSMRLite
|
R
| false
| false
| 2,479
|
r
|
context("Testing azureSMR Blob commands")
test_that("No category", {
blobObject <- list(a = "1", b = "2")
blobName <- "testBlob"
resourceGroup = "asrQuantProduction"
containerName = "testdata"
storageAccount = "asrquantstorage"
verbose <- FALSE
sc <- createAzureContext(tenantID = Sys.getenv("azureTenantID"),
clientID = Sys.getenv("azureClientID"),
authKey= Sys.getenv("azureAuthKey"),
authType = "ClientCredential")
expect_false(is.null(sc))
sk <- azureSAGetKey(azureActiveContext = sc,
resourceGroup = resourceGroup,
storageAccount = storageAccount,
verbose = verbose)
expect_false(is.null(sk))
con <- list(sc = sc,
sk = sk)
assign(x = ".azureConnection",
value = con,
envir = .GlobalEnv)
ret <- azurePutBlob(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
contents = jsonlite::toJSON(blobObject),
blob = blobName,
verbose = verbose)
expect_true(ret)
listBlobs <- azureListStorageBlobs(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
prefix = blobName,
verbose = verbose)
expect_is(listBlobs, "data.frame")
expBlobObject <- jsonlite::fromJSON(azureGetBlob(azureActiveContext = con$sc,
storageKey = con$sk,
storageAccount = storageAccount,
container = containerName,
blob = blobName,
type = "text",
verbose = verbose) )
expect_equal(blobObject, expBlobObject)
ret <- azureDeleteBlob(azureActiveContext = con$sc,
storageAccount = storageAccount,
container = containerName,
blob = blobName,
verbose = verbose)
expect_true(ret)
if(exists(x = ".azureConnection", envir = .GlobalEnv)){
rm(".azureConnection", envir = .GlobalEnv)
}
})
|
library(testthat)
library(devtools)
library(Reg)
test_package("Reg")
|
/tests/testthat.R
|
permissive
|
parismita/Reg
|
R
| false
| false
| 71
|
r
|
library(testthat)
library(devtools)
library(Reg)
test_package("Reg")
|
library(shiny)
library(shinyAce)
library(tidyverse)
library(ggplot2)
library(DT)
library(dplyr)
shinyUI(fluidPage(
#titlePanel("RStudio shiny app for communicating a data science process."),
navbarPage("RStudio Shiny App",
tabPanel("App",
navlistPanel(
"Six Steps",
widths = c(2, 8),
tabPanel("Import",
sidebarLayout(
sidebarPanel(
conditionalPanel(
condition = "input.own == false",
selectInput("dataset", label = "Choose Dataset", choices = c("Iris", "Abalone", "Wine"))
),
conditionalPanel(
condition = "input.own == true",
fileInput('file', 'Choose CSV File',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv'))
),
checkboxInput("own", "Upload Dataset"),
actionButton("save_button", "Save")
),
mainPanel(DT::dataTableOutput("mytable1")
)
)
),
tabPanel("Tidy",
sidebarLayout(
sidebarPanel(
selectInput("tidy_type", label = "Chose the operation",
choices = c("General Formatting","Separate", "Arrange", "Handle Missing Data", "Delete a Column", "Filter", "Standardize" )),
conditionalPanel(
condition = "input.tidy_type == 'Standardize'",
uiOutput("select_name_to_normalize"),
actionButton("normalize_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Separate'",
uiOutput("select_name_to_separate"),
textInput('name_separated1', 'Enter new name 1'),
textInput('name_separated2', 'Enter new name 2'),
actionButton("sep_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Filter'",
uiOutput("select_name_to_filter"),
radioButtons(inputId = "comparison", label = "Select the operation", choices = list("=", ">", "<")),
textInput('value', 'Enter value to compare with'),
actionButton("filter_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Delete a Column'",
uiOutput("select_name_to_delete"),
actionButton("del_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Handle Missing Data'",
radioButtons("fill_delete", "Operation",
choices = c(Fill = "fill",
Delete = "delete"),
selected = "fill"),
actionButton("NA_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting'&&input.own == true",
checkboxInput("header", "Header", TRUE),
radioButtons("sep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
radioButtons("quote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
hr(),
radioButtons("disp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head"),
hr(),
actionButton("tidy_button", "Tidy")
),
conditionalPanel(
condition = "input.tidy_type == 'Arrange'",
uiOutput("select_name_to_arrange"),
radioButtons("order", "Order",
choices = c(Descending = "descending",
Ascending = "ascending"),
selected = "ascending"),
actionButton("arr_button", "Apply")
)
),
mainPanel(
conditionalPanel(
condition = "input.tidy_type == 'Separate'||input.tidy_type == 'Standardize'||input.tidy_type == 'Arrange'||input.tidy_type == 'Filter'||input.tidy_type == 'Handle Missing Data'||input.tidy_type == 'Delete a Column'",
mainPanel(DT::dataTableOutput("table_tidy"))
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting' && input.own == true",
tableOutput("contents")
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting' && input.own == false",
h4("Example datasets are already formatted according to the app!")
)
)
)
),
tabPanel("Transform",
sidebarLayout(
sidebarPanel(
radioButtons(inputId = "op", label = "Select the operation", choices = list("X+Y", "X-Y", "a*X", "X^a")),
hr(),
uiOutput("ColumnSelector_xx"),
conditionalPanel(
condition = "input.op == 'a*X' || input.op == 'X^a'",
numericInput("const", "Choose constant a", value = 1, step = 0.1)
),
conditionalPanel(
condition = "input.op == 'X+Y' || input.op == 'X-Y'",
uiOutput("ColumnSelector_yy")
),
hr(),
textInput('NewCol', 'Enter new column name'),
actionButton("btn", "Add column")
),
mainPanel(
DT::dataTableOutput("mytable2_make")
)
)
),
tabPanel("Visualization",
sidebarLayout(
sidebarPanel(
selectInput("plot_type", "Choose visualization:", choices = c("Histogram", "Numerical", "T-Test")),
hr(),
conditionalPanel(
condition = "input.plot_type == 'T-Test'",
radioButtons("sample",
"Please choose one sample t test or two sample t test:",
choices = c(Two = "twoSamp",
One = "oneSamp"))
),
uiOutput("ColumnSelector"),
conditionalPanel(
condition = "input.plot_type == 'Histogram'",
sliderInput("bins","Number of bins:",min=1,max=50,value=30)
),
conditionalPanel(
condition = "input.plot_type == 'Numerical'"
),
conditionalPanel(
condition = "input.plot_type == 'T-Test' && input.sample == 'twoSamp'",
uiOutput("sample_two")
),
conditionalPanel(
condition = "input.plot_type == 'T-Test' && input.sample == 'oneSamp'",
numericInput("mu", label = "Input null hypotesis mean:", value = 1, step = 0.1)
),
actionButton("visual_button", "Code")
),
mainPanel(
conditionalPanel(
condition = "input.plot_type == 'Histogram'",
plotOutput("distPlot")
),
conditionalPanel(
condition = "input.plot_type == 'Numerical'",
verbatimTextOutput("summary")
),
conditionalPanel(
condition = "input.plot_type == 'T-Test'",
h2("Key summary statistics"),
p("The observed sample statistics were:"),
tableOutput('parametric'),
h2("Hypothesis of the t-test"),
p("The observed t test statistic:"),
textOutput('tvalue'),
p("A low P value suggests that your sample provides enough evidence that you can reject the null hypothesis for the entire population."),
textOutput('pvalue')
)
)
)
),
tabPanel("Model",
sidebarLayout(
sidebarPanel(
uiOutput("ColumnSelector_x"),
uiOutput("ColumnSelector_y"),
hr(),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9),
actionButton("model_button", "Code")
),
mainPanel(
plotOutput('plot_model')
)
)
),
tabPanel("Communicate",
sidebarLayout(
sidebarPanel(
selectInput("action", "Choose action:", choices = c("Save plot", "Save dataset", "Save model")),
hr(),
conditionalPanel(
condition = "input.action == 'Save plot'",
radioButtons(inputId = "var3", label = "Select the file type", choices = list("png", "pdf")),
downloadButton("down_plot", label = "Download plot")
),
conditionalPanel(
condition = "input.action == 'Save model'",
radioButtons(inputId = "var5", label = "Select the file type", choices = list("png", "pdf")),
downloadButton("down_model", label = "Download model")
),
conditionalPanel(
condition = "input.action == 'Save dataset'",
radioButtons(inputId = "var4", label = "Select the file type", choices = list("Excel (CSV)", "Text (TSV)", "Text (space separated)", "Doc")),
downloadButton("down_data", label = "Download data"),
actionButton("save_button_code", "Code")
)
),
mainPanel(
conditionalPanel(
condition = "input.action == 'Save plot'",
plotOutput("savePlot")
),
conditionalPanel(
condition = "input.action == 'Save model'",
plotOutput("saveModel")
),
conditionalPanel(
condition = "input.action == 'Save dataset'",
h4("Head of the new dataset"),
tableOutput("contents2")
)
)
)
)
)
),
tabPanel("Underlying Code"
),
navbarMenu("More",
tabPanel("About",
h4("This is a project of Srdjan Milojevic as a part of Business Analysis Seminar.")
)
)
),
h4("Here is the code..."),
aceEditor("myEditor", "", mode = "r", readOnly = TRUE, theme = "chrome")
)
)
|
/ui.R
|
permissive
|
srdjaner/Shiny-app
|
R
| false
| false
| 16,891
|
r
|
library(shiny)
library(shinyAce)
library(tidyverse)
library(ggplot2)
library(DT)
library(dplyr)
shinyUI(fluidPage(
#titlePanel("RStudio shiny app for communicating a data science process."),
navbarPage("RStudio Shiny App",
tabPanel("App",
navlistPanel(
"Six Steps",
widths = c(2, 8),
tabPanel("Import",
sidebarLayout(
sidebarPanel(
conditionalPanel(
condition = "input.own == false",
selectInput("dataset", label = "Choose Dataset", choices = c("Iris", "Abalone", "Wine"))
),
conditionalPanel(
condition = "input.own == true",
fileInput('file', 'Choose CSV File',
accept=c('text/csv',
'text/comma-separated-values,text/plain',
'.csv'))
),
checkboxInput("own", "Upload Dataset"),
actionButton("save_button", "Save")
),
mainPanel(DT::dataTableOutput("mytable1")
)
)
),
tabPanel("Tidy",
sidebarLayout(
sidebarPanel(
selectInput("tidy_type", label = "Chose the operation",
choices = c("General Formatting","Separate", "Arrange", "Handle Missing Data", "Delete a Column", "Filter", "Standardize" )),
conditionalPanel(
condition = "input.tidy_type == 'Standardize'",
uiOutput("select_name_to_normalize"),
actionButton("normalize_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Separate'",
uiOutput("select_name_to_separate"),
textInput('name_separated1', 'Enter new name 1'),
textInput('name_separated2', 'Enter new name 2'),
actionButton("sep_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Filter'",
uiOutput("select_name_to_filter"),
radioButtons(inputId = "comparison", label = "Select the operation", choices = list("=", ">", "<")),
textInput('value', 'Enter value to compare with'),
actionButton("filter_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Delete a Column'",
uiOutput("select_name_to_delete"),
actionButton("del_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'Handle Missing Data'",
radioButtons("fill_delete", "Operation",
choices = c(Fill = "fill",
Delete = "delete"),
selected = "fill"),
actionButton("NA_button", "Apply")
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting'&&input.own == true",
checkboxInput("header", "Header", TRUE),
radioButtons("sep", "Separator",
choices = c(Comma = ",",
Semicolon = ";",
Tab = "\t"),
selected = ","),
radioButtons("quote", "Quote",
choices = c(None = "",
"Double Quote" = '"',
"Single Quote" = "'"),
selected = '"'),
hr(),
radioButtons("disp", "Display",
choices = c(Head = "head",
All = "all"),
selected = "head"),
hr(),
actionButton("tidy_button", "Tidy")
),
conditionalPanel(
condition = "input.tidy_type == 'Arrange'",
uiOutput("select_name_to_arrange"),
radioButtons("order", "Order",
choices = c(Descending = "descending",
Ascending = "ascending"),
selected = "ascending"),
actionButton("arr_button", "Apply")
)
),
mainPanel(
conditionalPanel(
condition = "input.tidy_type == 'Separate'||input.tidy_type == 'Standardize'||input.tidy_type == 'Arrange'||input.tidy_type == 'Filter'||input.tidy_type == 'Handle Missing Data'||input.tidy_type == 'Delete a Column'",
mainPanel(DT::dataTableOutput("table_tidy"))
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting' && input.own == true",
tableOutput("contents")
),
conditionalPanel(
condition = "input.tidy_type == 'General Formatting' && input.own == false",
h4("Example datasets are already formatted according to the app!")
)
)
)
),
tabPanel("Transform",
sidebarLayout(
sidebarPanel(
radioButtons(inputId = "op", label = "Select the operation", choices = list("X+Y", "X-Y", "a*X", "X^a")),
hr(),
uiOutput("ColumnSelector_xx"),
conditionalPanel(
condition = "input.op == 'a*X' || input.op == 'X^a'",
numericInput("const", "Choose constant a", value = 1, step = 0.1)
),
conditionalPanel(
condition = "input.op == 'X+Y' || input.op == 'X-Y'",
uiOutput("ColumnSelector_yy")
),
hr(),
textInput('NewCol', 'Enter new column name'),
actionButton("btn", "Add column")
),
mainPanel(
DT::dataTableOutput("mytable2_make")
)
)
),
tabPanel("Visualization",
sidebarLayout(
sidebarPanel(
selectInput("plot_type", "Choose visualization:", choices = c("Histogram", "Numerical", "T-Test")),
hr(),
conditionalPanel(
condition = "input.plot_type == 'T-Test'",
radioButtons("sample",
"Please choose one sample t test or two sample t test:",
choices = c(Two = "twoSamp",
One = "oneSamp"))
),
uiOutput("ColumnSelector"),
conditionalPanel(
condition = "input.plot_type == 'Histogram'",
sliderInput("bins","Number of bins:",min=1,max=50,value=30)
),
conditionalPanel(
condition = "input.plot_type == 'Numerical'"
),
conditionalPanel(
condition = "input.plot_type == 'T-Test' && input.sample == 'twoSamp'",
uiOutput("sample_two")
),
conditionalPanel(
condition = "input.plot_type == 'T-Test' && input.sample == 'oneSamp'",
numericInput("mu", label = "Input null hypotesis mean:", value = 1, step = 0.1)
),
actionButton("visual_button", "Code")
),
mainPanel(
conditionalPanel(
condition = "input.plot_type == 'Histogram'",
plotOutput("distPlot")
),
conditionalPanel(
condition = "input.plot_type == 'Numerical'",
verbatimTextOutput("summary")
),
conditionalPanel(
condition = "input.plot_type == 'T-Test'",
h2("Key summary statistics"),
p("The observed sample statistics were:"),
tableOutput('parametric'),
h2("Hypothesis of the t-test"),
p("The observed t test statistic:"),
textOutput('tvalue'),
p("A low P value suggests that your sample provides enough evidence that you can reject the null hypothesis for the entire population."),
textOutput('pvalue')
)
)
)
),
tabPanel("Model",
sidebarLayout(
sidebarPanel(
uiOutput("ColumnSelector_x"),
uiOutput("ColumnSelector_y"),
hr(),
numericInput('clusters', 'Cluster count', 3,
min = 1, max = 9),
actionButton("model_button", "Code")
),
mainPanel(
plotOutput('plot_model')
)
)
),
tabPanel("Communicate",
sidebarLayout(
sidebarPanel(
selectInput("action", "Choose action:", choices = c("Save plot", "Save dataset", "Save model")),
hr(),
conditionalPanel(
condition = "input.action == 'Save plot'",
radioButtons(inputId = "var3", label = "Select the file type", choices = list("png", "pdf")),
downloadButton("down_plot", label = "Download plot")
),
conditionalPanel(
condition = "input.action == 'Save model'",
radioButtons(inputId = "var5", label = "Select the file type", choices = list("png", "pdf")),
downloadButton("down_model", label = "Download model")
),
conditionalPanel(
condition = "input.action == 'Save dataset'",
radioButtons(inputId = "var4", label = "Select the file type", choices = list("Excel (CSV)", "Text (TSV)", "Text (space separated)", "Doc")),
downloadButton("down_data", label = "Download data"),
actionButton("save_button_code", "Code")
)
),
mainPanel(
conditionalPanel(
condition = "input.action == 'Save plot'",
plotOutput("savePlot")
),
conditionalPanel(
condition = "input.action == 'Save model'",
plotOutput("saveModel")
),
conditionalPanel(
condition = "input.action == 'Save dataset'",
h4("Head of the new dataset"),
tableOutput("contents2")
)
)
)
)
)
),
tabPanel("Underlying Code"
),
navbarMenu("More",
tabPanel("About",
h4("This is a project of Srdjan Milojevic as a part of Business Analysis Seminar.")
)
)
),
h4("Here is the code..."),
aceEditor("myEditor", "", mode = "r", readOnly = TRUE, theme = "chrome")
)
)
|
\name{O.mykiss}
\alias{O.mykiss}
\docType{data}
\title{Test data from a 21 day fish test}
\description{
Test data from a 21 day fish test following the guidelines OECD GL204,
using the test organism Rainbow trout \emph{Oncorhynchus mykiss}.
}
\usage{data(O.mykiss)}
\format{
A data frame with 70 observations on the following 2 variables.
\describe{
\item{\code{conc}}{a numeric vector of concentrations (mg/l)}
\item{\code{weight}}{a numeric vector of wet weights (g)}
}
}
\details{
Weights are measured after 28 days.
}
\source{
Organisation for Economic Co-operation and Development (OECD) (2006)
\emph{CURRENT APPROACHES IN THE STATISTICAL ANALYSIS OF ECOTOXICITY DATA: A GUIDANCE TO APPLICATION - ANNEXES},
Paris (p. 65).
}
\references{}
\examples{
O.mykiss
}
\keyword{datasets}
|
/man/O.mykiss.Rd
|
no_license
|
dedream/nlrwr
|
R
| false
| false
| 864
|
rd
|
\name{O.mykiss}
\alias{O.mykiss}
\docType{data}
\title{Test data from a 21 day fish test}
\description{
Test data from a 21 day fish test following the guidelines OECD GL204,
using the test organism Rainbow trout \emph{Oncorhynchus mykiss}.
}
\usage{data(O.mykiss)}
\format{
A data frame with 70 observations on the following 2 variables.
\describe{
\item{\code{conc}}{a numeric vector of concentrations (mg/l)}
\item{\code{weight}}{a numeric vector of wet weights (g)}
}
}
\details{
Weights are measured after 28 days.
}
\source{
Organisation for Economic Co-operation and Development (OECD) (2006)
\emph{CURRENT APPROACHES IN THE STATISTICAL ANALYSIS OF ECOTOXICITY DATA: A GUIDANCE TO APPLICATION - ANNEXES},
Paris (p. 65).
}
\references{}
\examples{
O.mykiss
}
\keyword{datasets}
|
df.train<-read.csv('D:/R/data sets/titanic.csv')
head(df.train)
#EDA
#helps in visualizing the NA values
install.packages('Amelia')
library(Amelia)
missmap(df.train, main = "Titanic Training Data - Missing Maps", col=c('yellow','black'), legend=FALSE)
#name of map #first colour shows if its missing, 2nd colour, if its present
library(ggplot2)
ggplot(df.train,aes(Survived)) + geom_bar() #0 dead, 1 alive
ggplot(df.train,aes(Pclass)) + geom_bar(aes(fill=factor(Pclass)),alpha=0.8)
ggplot(df.train,aes(Sex)) + geom_bar(aes(fill=factor(Sex)),alpha=0.8)
ggplot(df.train,aes(Age)) + geom_histogram(fill='blue',bins=20,alpha=0.8) #they remved the NA values for the graph
#Data Cleaning
pl<-ggplot(df.train,aes(Pclass,Age)) + geom_boxplot(aes(group=Pclass,fill=factor(Pclass),alpha=0.5)) #since age containns a lot of missing values we try to put the mean for every class
pl+ scale_y_continuous(breaks = seq(min(0),max(80),by=2)) #showing the y values in the axis, 0,2,4,6...
impute_age<-function(age,class)
{
out<-age
for(i in 1:length(age))
{
if(is.na(age[i]))
{
if(class[i]==1)
out[i]<-37
else if (class[i]==2)
out[i]<-29
else
out[i]<-24
}
else
out[i]<-age[i]
}
return(out)
}
df.train$Age<-impute_age(df.train$Age,df.train$Pclass)
#builiding the model
str(df.train)
#few parameters are not required like PassengerId,Name,Ticket,Fare,Cabin,Embarked
head(df.train,2)
library(dplyr)
df.train<-select(df.train, -PassengerId, -Name, -Ticket, -Cabin) #we can remove some more if we want
head(df.train,2)
#convert into factor to make the data better
df.train$Survived<-factor(df.train$Survived)
df.train$Pclass<-factor(df.train$Pclass)
df.train$Parch<-factor(df.train$Parch)
df.train$SibSp<-factor(df.train$SibSp)
str(df.train)
#split
library(caTools)
set.seed(101)
split=sample.split(df.train$Survived, SplitRatio = 0.70)
final.train=subset(df.train, split==TRUE)
final.test=subset(df.train,split==FALSE)
#model
final.log.model<-glm(formula = Survived ~ .,family = binomial(link = 'logit'),data = final.train)
summary(final.log.model)
#prediction
fitted.probabilties<-predict(final.log.model, newdata=final.test, type='response')
#fixing 0 or 1
fitted.results<-ifelse(fitted.probabilties > 0.5, 1,0)
misClasificError<-mean(fitted.results != final.test$Survived)
print(paste('Accuracy', 1-misClasificError))
#creating the confusion matrix
table(final.test$Survived, fitted.probabilties>0.5) #if we change that prob value the matrix will also change
#0 dead FALSE prediction that person is dead but he didnt die
|
/titanic.R
|
no_license
|
AnushreeChakraborty/Logistic-Regression
|
R
| false
| false
| 2,678
|
r
|
df.train<-read.csv('D:/R/data sets/titanic.csv')
head(df.train)
#EDA
#helps in visualizing the NA values
install.packages('Amelia')
library(Amelia)
missmap(df.train, main = "Titanic Training Data - Missing Maps", col=c('yellow','black'), legend=FALSE)
#name of map #first colour shows if its missing, 2nd colour, if its present
library(ggplot2)
ggplot(df.train,aes(Survived)) + geom_bar() #0 dead, 1 alive
ggplot(df.train,aes(Pclass)) + geom_bar(aes(fill=factor(Pclass)),alpha=0.8)
ggplot(df.train,aes(Sex)) + geom_bar(aes(fill=factor(Sex)),alpha=0.8)
ggplot(df.train,aes(Age)) + geom_histogram(fill='blue',bins=20,alpha=0.8) #they remved the NA values for the graph
#Data Cleaning
pl<-ggplot(df.train,aes(Pclass,Age)) + geom_boxplot(aes(group=Pclass,fill=factor(Pclass),alpha=0.5)) #since age containns a lot of missing values we try to put the mean for every class
pl+ scale_y_continuous(breaks = seq(min(0),max(80),by=2)) #showing the y values in the axis, 0,2,4,6...
impute_age<-function(age,class)
{
out<-age
for(i in 1:length(age))
{
if(is.na(age[i]))
{
if(class[i]==1)
out[i]<-37
else if (class[i]==2)
out[i]<-29
else
out[i]<-24
}
else
out[i]<-age[i]
}
return(out)
}
df.train$Age<-impute_age(df.train$Age,df.train$Pclass)
#builiding the model
str(df.train)
#few parameters are not required like PassengerId,Name,Ticket,Fare,Cabin,Embarked
head(df.train,2)
library(dplyr)
df.train<-select(df.train, -PassengerId, -Name, -Ticket, -Cabin) #we can remove some more if we want
head(df.train,2)
#convert into factor to make the data better
df.train$Survived<-factor(df.train$Survived)
df.train$Pclass<-factor(df.train$Pclass)
df.train$Parch<-factor(df.train$Parch)
df.train$SibSp<-factor(df.train$SibSp)
str(df.train)
#split
library(caTools)
set.seed(101)
split=sample.split(df.train$Survived, SplitRatio = 0.70)
final.train=subset(df.train, split==TRUE)
final.test=subset(df.train,split==FALSE)
#model
final.log.model<-glm(formula = Survived ~ .,family = binomial(link = 'logit'),data = final.train)
summary(final.log.model)
#prediction
fitted.probabilties<-predict(final.log.model, newdata=final.test, type='response')
#fixing 0 or 1
fitted.results<-ifelse(fitted.probabilties > 0.5, 1,0)
misClasificError<-mean(fitted.results != final.test$Survived)
print(paste('Accuracy', 1-misClasificError))
#creating the confusion matrix
table(final.test$Survived, fitted.probabilties>0.5) #if we change that prob value the matrix will also change
#0 dead FALSE prediction that person is dead but he didnt die
|
#==================
#CreateHouseholds.R
#==================
#<doc>
## CreateHouseholds Module
#### September 6, 2018
#
#This module creates a *Household* table in the datastore and populates the table with datasets characterizing simulated households. Each entry represents a simulated household. Household datasets are created for the numbers of persons in each of 6 age categories (0-14, 15-19, 20-29, 30-54, 55-64, and 65+) and the total number of persons in the household. Two types of households are created: *regular* households (i.e. not persons in group quarters) and *group quarters* households (i.e. persons in group quarters such as college dormatories). Households are created from Azone level demographic forecasts of the number of persons in each of the 6 age groups for *regular* households and for the group quarters population. In addition, users may optionally specify an average household size and/or the proportion of households that are single person households. The module creates households that matches the age forecast and the optional household size and single person inputs (close but not exact). The module tabulates the number of households created in each Azone.
#
### Model Parameter Estimation
#
#This model has just one parameter object, a matrix of the probability that a person in each age group is in one of several hundred *regular* household types. The matrix is created by selecting from the PUMS data the records for the most frequently observed household types. The default is to select the household types which account for 99% of all households. Each household type is denoted by the number of persons in each age group in the household. For example, a household that has 2 persons of age 0-14 and 2 persons of age 20-29 would be designated as type *2-0-2-0-0-0*. The numbers represent the number of persons in each of the age groups in the order listed above with the hyphens separating the age groups. These household types comprise the rows of the probability matrix. The columns of the matrix correspond to the 6 age groups. Each column of the matrix sums to 1.
#
#This probability matrix is created from Census public use microsample (PUMS) data that is compiled into a R dataset (HhData_df) when the VESimHouseholds package is built. The data that is supplied with the VESimHouseholds package downloaded from the official VisionEval repository may be used, but it is preferrable to use data for the region being modeled. How this is done is explained in the documentation for the *CreateEstimationDatasets.R* script. The matrix is created by summing the number of persons each each age group and each of the household types using the household weights in the PUMS data. The probability that a person in each age group would be in each of the household type is the number of persons in the household type divided by the total number of persons in the age group.
#
#No model parameters are used to create *group quarters* households because those households are just composed of single persons.
#
### How the Module Works
#
#For *regular* households, the module uses the matrix of probabilities that a person in each age group is present in the most frequently observed household types along with a forecast of number of persons in each age group to synthesize a likely set of *regular* households. The module starts by assigning the forecast population by age group to household types using the probability matrix that has been estimated. It then carries out the following interative process to create a set of households that is internally consistent and that matches (approximately) the optional inputs for household size and proportion of single-person households:
#
#1) For each household type, the number of households of the type is calculated from the number of persons of each age group assigned to the type. For example if 420 persons age 0-14 and 480 persons age 20-29 are assigned to household type *2-0-2-0-0-0*, that implies either 210 or 240 households of that type. Where the number of households of the type implied by the persons assigned is not consistent as in this example, the mean of the implied number of households is used. In the example, this would be 225 households. This is the *resolved* number of households. For all household types, the resolved number of households is compared to the maximum number of implied households (in this case 225 is compared to 240) if ratio of these values differs from 1 in absolute terms by less than 0.001 for all household types, the iterative process ends.
#
#2) If a household size target has been specified, the average household size for the resolved households is computed. The ratio of the target household size and the average household size for the resolved households is computed. The number of resolved households in household types having sizes greater than the target household size is multiplied by this ratio. For example, if target household size is 2.5 and average household size for the resolved households is 3, th number of household for household types having more than 2.5 persons (i.e. 3 or more persons) would be multiplied by *2.5 / 3*.
#
#3) If a target for the proportion of households that are 1-person households is set, the difference between the number of 1-person households that there should be and the number that have been assigned is calculated. That difference is added across all 1-person household types (e.g. if the difference is 100, since there are 5 1-person household types, 20 is added to each of those types). The difference is substracted across all other household types.
#
#4) Using the resolved number of households of each type (as adjusted to match household size and 1-person household targets), the number of persons in each age group in each household type is computed. Continuing with the example, 225 households in household type *2-0-2-0-0-0* means that there must be 550 persons of age 0-14 and 550 persons of age 20-29 in that household type. This is called the *resolved* population. An updated probability matrix is computed using the resolved population by housing type.
#
#5) The difference between the total number of persons by age group in the resolved population and the forecast number of persons by age group is calculated and that difference is allocated to household types using the updated probability matrix. Then calculation returns to the first iteration step.
#
#After the iterations have been completed, the numbers of households by type are rounded to create whole number amounts. Then individual household records are created for each.
#</doc>
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#This model has just one parameter object, a matrix of the probability that a
#person in each age group is in one of several hundred household types.
#Each household type is denoted by the number of persons in each age group in
#the household. The rows of the matrix correspond to the household types.
#The columns of the matrix correspond to the 6 age groups. Each column of the
#matrix sums to 1. The process selects the most frequently observed households.
#The default is to select the most frequent households which account for 99% of
#all households.
#Define a function to estimate household size proportion parameters
#------------------------------------------------------------------
calcHhAgeTypes <- function(HhData_df, Threshold = 0.99) {
Hh_df <- HhData_df[HhData_df$HhType == "Reg",]
Ag <-
c("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus")
#Create vector of household type names
HhType_ <-
apply(Hh_df[, Ag], 1, function(x)
paste(x, collapse = "-"))
#Expand the HH types using HH weights and select most prevalent households
ExpHhType_ <- rep(HhType_, Hh_df$HhWeight)
#Define function to identify most prevalent households
idMostPrevalent <- function(Types_, Cutoff) {
TypeTab_ <- rev(sort(tapply(Types_, Types_, length)))
TypeProp_ <- cumsum(TypeTab_ / sum(TypeTab_))
names(TypeProp_[TypeProp_ <= Cutoff])
}
#Select most prevalent households
SelHhTypes_ <- idMostPrevalent(ExpHhType_, Threshold)
SelHh_df <- Hh_df[HhType_ %in% SelHhTypes_, ]
SelHhType_ <-
apply(SelHh_df[, Ag], 1, function(x)
paste(x, collapse = "-"))
#Apply household weights to persons by age
WtHhPop_df <- sweep(SelHh_df[, Ag], 1, SelHh_df$HhWeight, "*")
#Tabulate persons by age group by household type
AgeTab_ls <- lapply(WtHhPop_df, function(x) {
tapply(x, SelHhType_, function(x)
sum(as.numeric(x)))
})
AgeTab_HtAp <- do.call(cbind, AgeTab_ls)
#Calculate and return matrix of probabilities
sweep(AgeTab_HtAp, 2, colSums(AgeTab_HtAp), "/")
}
#Create and save household size proportions parameters
#-----------------------------------------------------
load("data/Hh_df.rda")
HtProb_HtAp <- calcHhAgeTypes(Hh_df)
#' Household size proportions
#'
#' A dataset containing the proportions of households by household size.
#'
#' @format A matrix having 950 rows (for Oregon data) and 6 colums:
#' @source CreateHouseholds.R script.
"HtProb_HtAp"
usethis::use_data(HtProb_HtAp, overwrite = TRUE)
rm(calcHhAgeTypes, Hh_df)
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CreateHouseholdsSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
NewSetTable = items(
item(
TABLE = "Household",
GROUP = "Year"
)
),
#Specify input data
Inp = items(
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
FILE = "azone_hh_pop_by_age.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items(
"Household (non-group quarters) population in 0 to 14 year old age group",
"Household (non-group quarters) population in 15 to 19 year old age group",
"Household (non-group quarters) population in 20 to 29 year old age group",
"Household (non-group quarters) population in 30 to 54 year old age group",
"Household (non-group quarters) population in 55 to 64 year old age group",
"Household (non-group quarters) population in 65 or older age group")
),
item(
NAME = "AveHhSize",
FILE = "azone_hhsize_targets.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "compound",
UNITS = "PRSN/HH",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION = "Average household size of households (non-group quarters)"
),
item(
NAME = "Prop1PerHh",
FILE = "azone_hhsize_targets.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion of households",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION = "Proportion of households (non-group quarters) having only one person"
),
item(
NAME =
items("GrpAge0to14",
"GrpAge15to19",
"GrpAge20to29",
"GrpAge30to54",
"GrpAge55to64",
"GrpAge65Plus"),
FILE = "azone_gq_pop_by_age.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items("Group quarters population in 0 to 14 year old age group",
"Group quarters population in 15 to 19 year old age group",
"Group quarters population in 20 to 29 year old age group",
"Group quarters population in 30 to 54 year old age group",
"Group quarters population in 55 to 64 year old age group",
"Group quarters population in 65 or older age group")
)
),
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Azone",
TABLE = "Azone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Azone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "AveHhSize",
TABLE = "Azone",
GROUP = "Year",
TYPE = "compound",
UNITS = "PRSN/HH",
PROHIBIT = c("< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Prop1PerHh",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion of households",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME =
items("GrpAge0to14",
"GrpAge15to19",
"GrpAge20to29",
"GrpAge30to54",
"GrpAge55to64",
"GrpAge65Plus"),
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = "NumHh",
TABLE = "Azone",
GROUP = "Year",
TYPE = "households",
UNITS = "HH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of households (non-group quarters)"
),
item(
NAME = "NumGq",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of people in non-institutional group quarters"
),
item(
NAME =
items("HhId",
"Azone",
"Marea"),
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
NAVALUE = "NA",
PROHIBIT = "",
ISELEMENTOF = "",
DESCRIPTION =
items("Unique household ID",
"Azone ID",
"Marea ID")
),
item(
NAME = "HhSize",
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "<= 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of persons"
),
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION =
list("Persons in 0 to 14 year old age group",
"Persons in 15 to 19 year old age group",
"Persons in 20 to 29 year old age group",
"Persons in 30 to 54 year old age group",
"Persons in 55 to 64 year old age group",
"Persons in 65 or older age group")
),
item(
NAME = "HhType",
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
NAVALUE = "NA",
PROHIBIT = "",
ISELEMENTOF = "",
DESCRIPTION = "Coded household age composition (e.g. 2-1-0-2-0-0) or Grp for group quarters"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CreateHouseholds module
#'
#' A list containing specifications for the CreateHouseholds module.
#'
#' @format A list containing 5 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{NewSetTable}{new table to be created for datasets specified in the
#' 'Set' specifications}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CreateHouseholds.R script.
"CreateHouseholdsSpecifications"
usethis::use_data(CreateHouseholdsSpecifications, overwrite = TRUE)
rm(CreateHouseholdsSpecifications)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#This function creates households for the entire model region. A household table
#is created and this is populated with the household size and persons by age
#characteristics of all the households.
#Function that creates set of households for an Azone
#----------------------------------------------------
#' Create simulated households for an Azone
#'
#' \code{createHhByAge} creates a set of simulated households for an Azone that
#' reasonably represents a population census or forecast of persons in each of 6
#' age categories.
#'
#' This function creates a set of simulated households for an Azone that
#' reasonably represents the population census or forecast of persons in each of
#' 6 age categories: 0 to 14, 15 to 19, 20 to 29, 30 to 54, 55 to 64, and 65
#' plus. The function uses a matrix of probabilities that a person in each age
#' group might be present in each of 524 household types. This matrix
#' (HtProb_HtAp) is estimated by the calcHhAgeTypes function which is described
#' above. Household types are distinguished by the number of persons in each age
#' category in the household. The function fits the distribution of households
#' by type by iteratively applying the probability matrix to the population,
#' reconciling households allocated by type based on the population assigned,
#' recomputing the assigned population, calculating the difference between the
#' assigned population by age and the input population by age, recalculating the
#' probabilities, and assigning the population difference. This process
#' continues until the difference between the assigned population and the input
#' population by age group is less than 0.1%. After the households are
#' synthesized, the size of each household is calculated.
#'
#' @param Prsn_Ap A named vector containing the number of persons in each age
#' category.
#' @param MaxIter An integer specifying the maximum number of iterations the
#' algorithm should use to balance and reconcile the population allocation to
#' household types.
#' @param TargetHhSize A double specifying a household size target value or NA
#' if there is no target.
#' @param TargetProp1PerHh A double specifying a target for the proportion of
#' households that are one-person households or NA if there is no target.
#' @return A list containing 7 components. Each component is a vector where each
#' element of a vector corresponds to a simulated household. The components
#' are as follows:
#' Age0to14 - number of persons age 0 to 14 in the household
#' Age15to19 - number of persons age 15 to 19 in the household
#' Age20to29 - number of persons age 20 to 29 in the household
#' Age30to54 - number of persons age 30 to 54 in the household
#' Age 55to64 - number of persons age 55 to 64 in the household
#' Age65Plus - number of persons 65 or older in the household
#' HhSize - total number of persons in the household
#' @name createHhByAge
#' @export
createHhByAge <-
function(Prsn_Ap,
MaxIter = 100,
TargetHhSize = NA,
TargetProp1PerHh = NA) {
#Dimension names
Ap <- colnames(HtProb_HtAp)
Ht <- rownames(HtProb_HtAp)
#Place persons by age into household types by multiplying person vector
#by probabilities
Prsn_HtAp <- sweep(HtProb_HtAp, 2, Prsn_Ap, "*")
#Make table of factors to convert persons into households and vice verse
PrsnFactors_Ht_Ap <-
lapply(strsplit(Ht, "-"), function(x)
as.numeric(x))
PrsnFactors_HtAp <- do.call(rbind, PrsnFactors_Ht_Ap)
dimnames(PrsnFactors_HtAp) <- dimnames(Prsn_HtAp)
rm(PrsnFactors_Ht_Ap)
# Calculate household size for each household type
HsldSize_Ht <- rowSums( PrsnFactors_HtAp )
#Initial calculation of persons by age for each housing type
#-----------------------------------------------------------
#Convert population into households. Each row of Hsld_HtAp contains an
#estimate of the number of household of the type given the number of persons
#assigned to the household type
Hsld_HtAp <- Prsn_HtAp / PrsnFactors_HtAp
Hsld_HtAp[is.na(Hsld_HtAp)] <- 0
MaxHh_Ht <- apply(Hsld_HtAp, 1, max)
#Iterate until "balanced" set of households is created
#-----------------------------------------------------
MaxDiff_ <- numeric(MaxIter)
HsldSize_ <- numeric(MaxIter)
for (i in 1:MaxIter) {
#Resolve differences in household type estimates. For each household type
#if there is more than one estimate of the number of households, take the
#mean value of the estimates that are non-zero to determine the number of
#households of the type.
ResolveHh_HtAp <- t(apply(Hsld_HtAp, 1, function(x) {
if (sum(x > 0) > 1) {
x[x > 0] <- mean(x[x > 0])
}
x
}))
# Exit if the difference between the maximum estimate for each
# household type is not too different than the resolved estimate
# for each household type
ResolveHh_Ht <- apply(ResolveHh_HtAp, 1, max)
Diff_Ht <- abs(MaxHh_Ht - ResolveHh_Ht)
PropDiff_Ht <- Diff_Ht / ResolveHh_Ht
if (all(PropDiff_Ht < 0.001)) break
MaxDiff_[i] <- max(PropDiff_Ht)
# Adjust household proportions to match household size target if exists
if (!is.na(TargetHhSize)) {
# Calculate average household size and ratio with target household size
AveHsldSize <-
sum(ResolveHh_Ht * HsldSize_Ht) / sum(ResolveHh_Ht)
HsldSize_[i] <- AveHsldSize
HsldSizeAdj <- TargetHhSize / AveHsldSize
# Calculate household adjustment factors and adjust households
HsldAdjFactor_Ht <-
HsldSize_Ht * 0 + 1 # Start with a vector of ones
HsldAdjFactor_Ht[HsldSize_Ht > TargetHhSize] <- HsldSizeAdj
ResolveHh_HtAp <-
sweep(ResolveHh_HtAp, 1, HsldAdjFactor_Ht, "*")
}
# Adjust proportion of 1-person households to match target if there is one
if (!is.na(TargetProp1PerHh)) {
Hsld_Ht <- round(apply(ResolveHh_HtAp, 1, max))
NumHh_Sz <- tapply(Hsld_Ht, HsldSize_Ht, sum)
NumHh <- sum(NumHh_Sz)
Add1PerHh <- (TargetProp1PerHh * NumHh) - NumHh_Sz[1]
Is1PerHh_Ht <- HsldSize_Ht == 1
Add1PerHh_Ht <-
Add1PerHh * Hsld_Ht[Is1PerHh_Ht] / sum(Hsld_Ht[Is1PerHh_Ht])
RmOthHh_Ht <-
-Add1PerHh * Hsld_Ht[!Is1PerHh_Ht] / sum(Hsld_Ht[!Is1PerHh_Ht])
ResolveHh_HtAp[Is1PerHh_Ht] <-
ResolveHh_HtAp[Is1PerHh_Ht] + Add1PerHh_Ht
ResolveHh_HtAp[!Is1PerHh_Ht] <-
ResolveHh_HtAp[!Is1PerHh_Ht] + RmOthHh_Ht
}
#Calculate the number of persons by age group consistent with the resolved
#numbers of households of each household type
ResolvePrsn_HtAp <- ResolveHh_HtAp * PrsnFactors_HtAp
#Convert the resolved persons tabulation into probabilities
PrsnProb_HtAp <-
sweep(ResolvePrsn_HtAp, 2, colSums(ResolvePrsn_HtAp), "/")
#Calculate the difference in the number of persons by age category
PrsnDiff_Ap <- Prsn_Ap - colSums(ResolvePrsn_HtAp)
#Allocate extra persons to households based on probabilities
AddPrsn_HtAp <- sweep(PrsnProb_HtAp, 2, PrsnDiff_Ap, "*")
#Add the reallocated persons to the resolved persons matrix
Prsn_HtAp <- ResolvePrsn_HtAp + AddPrsn_HtAp
# Recalculate number of households by type
Hsld_HtAp <- Prsn_HtAp/PrsnFactors_HtAp
Hsld_HtAp[is.na(Hsld_HtAp)] <- 0
# Calculate the maximum households by each type for convergence check
MaxHh_Ht <- apply(ResolveHh_HtAp, 1, max)
}
#Calculate number of households by household type
Hsld_Ht <- round(apply(ResolveHh_HtAp, 1, max))
#Calculate persons by age group and household type
Prsn_HtAp <- sweep(PrsnFactors_HtAp, 1, Hsld_Ht, "*")
#Convert into a matrix of households
Hsld_Hh <- rep(names(Hsld_Ht), Hsld_Ht)
Hsld_Hh_Ap <- strsplit(Hsld_Hh, "-")
Hsld_Hh_Ap <- lapply(Hsld_Hh_Ap, function(x) as.numeric(x))
Hsld_df <- data.frame(do.call(rbind, Hsld_Hh_Ap))
names(Hsld_df) <- Ap
Hsld_df$HhSize <- rowSums(Hsld_df)
Hsld_df$HhType <-
apply(Hsld_df[, Ap], 1, function(x) paste(x, collapse = "-"))
#Randomly order the rows of the matrix and convert into a list of
#corresponding vectors by age group
RandomSort <-
sample(1:nrow(Hsld_df), nrow(Hsld_df), replace = FALSE)
Hsld_ls <- as.list(Hsld_df[RandomSort, ])
# Return a list of corresponding age group vectors
Hsld_ls
}
#Function that creates group quarters population for an Azone
#------------------------------------------------------------
#' Create group quarters population for an Azone
#'
#' \code{createGroupQtrHhByAge} creates the quarters 'households' for an Azone
#' where each 'household' is a single person in group quarters.
#'
#' This function creates a set of simulated 'households' living in group
#' quaters in an Azone. Each group quarters 'household' is a single person in
#' each of 6 age categories: 0 to 14, 15 to 19, 20 to 29, 30 to 54, 55 to 64,
#' and 65 plus.
#'
#' @param GrpPrsn_Ag A named vector containing the number of persons in each age
#' category.
#' @return A list containing 7 components. Each component is a vector where each
#' element of a vector corresponds to a simulated household. The components
#' are as follows:
#' Age0to14 - number of persons age 0 to 14 in the household
#' Age15to19 - number of persons age 15 to 19 in the household
#' Age20to29 - number of persons age 20 to 29 in the household
#' Age30to54 - number of persons age 30 to 54 in the household
#' Age 55to64 - number of persons age 55 to 64 in the household
#' Age65Plus - number of persons 65 or older in the household
#' HhSize - total number of persons in the household
#' @name createGrpHhByAge
#' @export
createGrpHhByAge <-
function(GrpPrsn_Ag) {
if (sum(GrpPrsn_Ag > 0)) {
GrpHh_df <-
data.frame(
Age0to14 = as.integer(rep(c(1,0,0,0,0,0), GrpPrsn_Ag)),
Age15to19 = as.integer(rep(c(0,1,0,0,0,0), GrpPrsn_Ag)),
Age20to29 = as.integer(rep(c(0,0,1,0,0,0), GrpPrsn_Ag)),
Age30to54 = as.integer(rep(c(0,0,0,1,0,0), GrpPrsn_Ag)),
Age55to64 = as.integer(rep(c(0,0,0,0,1,0), GrpPrsn_Ag)),
Age65Plus = as.integer(rep(c(0,0,0,0,0,1), GrpPrsn_Ag)),
HhSize = as.integer(rep(c(1,1,1,1,1,1), GrpPrsn_Ag)),
HhType = rep("Grp", sum(GrpPrsn_Ag)),
stringsAsFactors = FALSE)
RandomSort <-
sample(1:nrow(GrpHh_df), nrow(GrpHh_df), replace = FALSE)
GrpHh_ls <- as.list(GrpHh_df[RandomSort, ])
} else {
GrpHh_ls <-
list(
Age0to14 = integer(0),
Age15to19 = integer(0),
Age20to29 = integer(0),
Age30to54 = integer(0),
Age55to64 = integer(0),
Age65Plus = integer(0),
HhSize = integer(0),
HhType = character(0))
}
GrpHh_ls
}
#Main module function that creates simulated households
#------------------------------------------------------
#' Main module function to create simulated households
#'
#' \code{CreateHouseholds} creates a set of simulated households that each have
#' a unique household ID, an Azone to which it is assigned, household
#' size (number of people in the household), and numbers of persons in each of
#' 6 age categories.
#'
#' This function creates a set of simulated households for the model region
#' where each household is assigned a household size, an Azone, a unique ID, and
#' numbers of persons in each of 6 age categories. The function calls the
#' createHhByAge and createGrpHhByAge functions for each Azone to create
#' simulated households containing persons by age category from a vector of
#' persons by age category for the Azone. The list of vectors produced by the
#' Create Households function are to be stored in the "Household" table. Since
#' this table does not exist, the function calculates a LENGTH value for the
#' table and returns that as well. The framework uses this information to
#' initialize the Households table. The function also computes the maximum
#' numbers of characters in the HhId and Azone datasets and assigns these to a
#' SIZE vector. This is necessary so that the framework can initialize these
#' datasets in the datastore. All the results are returned in a list.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module along with:
#' LENGTH: A named integer vector having a single named element, "Household",
#' which identifies the length (number of rows) of the Household table to be
#' created in the datastore.
#' SIZE: A named integer vector having two elements. The first element, "Azone",
#' identifies the size of the longest Azone name. The second element, "HhId",
#' identifies the size of the longest HhId.
#' @import visioneval stats
#' @include CreateEstimationDatasets.R
#' @name CreateHouseholds
#' @export
CreateHouseholds <- function(L) {
#Define dimension name vectors
Ap <-
c("Age0to14", "Age15to19", "Age20to29", "Age30to54", "Age55to64", "Age65Plus")
Ag <- paste0("Grp", Ap)
Az <- L$Year$Azone$Azone
#fix seed as synthesis involves sampling
set.seed(L$G$Seed)
#Initialize output list
Out_ls <- initDataList()
Out_ls$Year$Azone$NumHh <- numeric(0)
Out_ls$Year$Household <-
list(
Azone = character(0),
Marea = character(0),
HhId = character(0),
HhSize = integer(0),
HhType = character(0),
Age0to14 = integer(0),
Age15to19 = integer(0),
Age20to29 = integer(0),
Age30to54 = integer(0),
Age55to64 = integer(0),
Age65Plus = integer(0)
)
#Make matrix of regular household persons by Azone and age group
Prsn_AzAp <-
as.matrix(data.frame(L$Year$Azone, stringsAsFactors = FALSE)[,Ap])
rownames(Prsn_AzAp) <- Az
#If values in Prsn_AzAp are not integers, round them and issue warning
AllInt <- all(Prsn_AzAp == round(Prsn_AzAp))
if (!AllInt) {
Prsn_AzAp <- round(Prsn_AzAp)
Msg <- paste(
"Inputs for number of persons by age group in 'azone_hh_pop_by_age.csv'",
"file include some non-integer values for the year", L$G$Year, ".",
"These have been rounded to the nearest whole number."
)
addWarningMsg("Out_ls", Msg)
rm(Msg)
}
rm(AllInt)
#Make vector of average household size target by Azone
TargetHhSize_Az <- L$Year$Azone$AveHhSize
names(TargetHhSize_Az) <- Az
#Make vector of target proportion of 1-person households
TargetProp1PerHh_Az <- L$Year$Azone$Prop1PerHh
names(TargetProp1PerHh_Az) <- Az
#Make matrix of group population households by Azone and age group
Prsn_AzAg <-
as.matrix(data.frame(L$Year$Azone, stringsAsFactors = FALSE)[,Ag])
rownames(Prsn_AzAg) <- Az
#If values in Prsn_AzAg are not integers, round them and issue warning
AllInt <- all(Prsn_AzAg == round(Prsn_AzAg))
if (!AllInt) {
Prsn_AzAg <- round(Prsn_AzAg)
Msg <- paste(
"Inputs for number of persons by age group in 'azone_gq_pop_by_age.csv'",
"file include some non-integer values for the year", L$G$Year, ".",
"These have been rounded to the nearest whole number."
)
addWarningMsg("Out_ls", Msg)
rm(Msg)
}
rm(AllInt)
#Simulate households for each Azone and add to output list
for (az in Az) {
RegHh_ls <-
createHhByAge(Prsn_AzAp[az,],
MaxIter=100,
TargetHhSize = TargetHhSize_Az[az],
TargetProp1PerHh = TargetProp1PerHh_Az[az])
GrpHh_ls <-
createGrpHhByAge(Prsn_AzAg[az,])
NumRegHh <- length(RegHh_ls[[1]])
NumGrpHh <- length(GrpHh_ls[[1]])
NumHh <- NumRegHh + NumGrpHh
Marea <- L$Year$Azone$Marea[L$Year$Azone$Azone == az]
Out_ls$Year$Household$Azone <-
c(Out_ls$Year$Household$Azone, rep(az, NumHh))
Out_ls$Year$Household$Marea <-
c(Out_ls$Year$Household$Marea, rep(Marea, NumHh))
Out_ls$Year$Household$HhId <-
c(Out_ls$Year$Household$HhId, paste(rep(az, NumHh), 1:NumHh, sep = "-"))
Out_ls$Year$Household$HhSize <-
c(Out_ls$Year$Household$HhSize, RegHh_ls$HhSize, GrpHh_ls$HhSize)
Out_ls$Year$Household$HhType <-
c(Out_ls$Year$Household$HhType, RegHh_ls$HhType, GrpHh_ls$HhType)
Out_ls$Year$Household$Age0to14 <-
c(Out_ls$Year$Household$Age0to14, RegHh_ls$Age0to14, GrpHh_ls$Age0to14)
Out_ls$Year$Household$Age15to19 <-
c(Out_ls$Year$Household$Age15to19, RegHh_ls$Age15to19, GrpHh_ls$Age15to19)
Out_ls$Year$Household$Age20to29 <-
c(Out_ls$Year$Household$Age20to29, RegHh_ls$Age20to29, GrpHh_ls$Age20to29)
Out_ls$Year$Household$Age30to54 <-
c(Out_ls$Year$Household$Age30to54, RegHh_ls$Age30to54, GrpHh_ls$Age30to54)
Out_ls$Year$Household$Age55to64 <-
c(Out_ls$Year$Household$Age55to64, RegHh_ls$Age55to64, GrpHh_ls$Age55to64)
Out_ls$Year$Household$Age65Plus <-
c(Out_ls$Year$Household$Age65Plus, RegHh_ls$Age65Plus, GrpHh_ls$Age65Plus)
Out_ls$Year$Azone$NumHh <- c(Out_ls$Year$Azone$NumHh, NumRegHh)
Out_ls$Year$Azone$NumGq <- c(Out_ls$Year$Azone$NumGq, NumGrpHh)
}
Out_ls$Year$Household$HhSize <- as.integer(Out_ls$Year$Household$HhSize)
Out_ls$Year$Household$Age0to14 <- as.integer(Out_ls$Year$Household$Age0to14)
Out_ls$Year$Household$Age15to19 <- as.integer(Out_ls$Year$Household$Age15to19)
Out_ls$Year$Household$Age20to29 <- as.integer(Out_ls$Year$Household$Age20to29)
Out_ls$Year$Household$Age30to54 <- as.integer(Out_ls$Year$Household$Age30to54)
Out_ls$Year$Household$Age55to64 <- as.integer(Out_ls$Year$Household$Age55to64)
Out_ls$Year$Household$Age65Plus <- as.integer(Out_ls$Year$Household$Age65Plus)
Out_ls$Year$Azone$NumHh <- as.integer(Out_ls$Year$Azone$NumHh)
Out_ls$Year$Azone$NumGq <- as.integer(Out_ls$Year$Azone$NumGq)
#Calculate LENGTH attribute for Household table
attributes(Out_ls$Year$Household)$LENGTH <-
length(Out_ls$Year$Household$HhId)
#Calculate SIZE attributes for 'Household$Azone' and 'Household$HhId'
attributes(Out_ls$Year$Household$Azone)$SIZE <-
max(nchar(Out_ls$Year$Household$Azone))
attributes(Out_ls$Year$Household$Marea)$SIZE <-
max(nchar(Out_ls$Year$Household$Marea))
attributes(Out_ls$Year$Household$HhId)$SIZE <-
max(nchar(Out_ls$Year$Household$HhId))
attributes(Out_ls$Year$Household$HhType)$SIZE <-
max(nchar(Out_ls$Year$Household$HhType))
#Return the list
Out_ls
}
#===============================================================
#SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE
#===============================================================
#Run module automatic documentation
#----------------------------------
documentModule("CreateHouseholds")
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# #Load packages and test functions
# library(visioneval)
# library(filesstrings)
# source("tests/scripts/test_functions.R")
# #Set up test environment
# TestSetup_ls <- list(
# TestDataRepo = "../Test_Data/VE-RSPM",
# DatastoreName = "Datastore.tar",
# LoadDatastore = TRUE,
# TestDocsDir = "verspm",
# ClearLogs = TRUE,
# # SaveDatastore = TRUE
# SaveDatastore = FALSE
# )
# setUpTests(TestSetup_ls)
# #Run test module
# TestDat_ <- testModule(
# ModuleName = "CreateHouseholds",
# LoadDatastore = TRUE,
# SaveDatastore = FALSE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- CreateHouseholds(L)
|
/sources/modules/VESimHouseholds/R/CreateHouseholds.R
|
permissive
|
jslason-rsg/BG_OregonDOT-VisionEval
|
R
| false
| false
| 37,157
|
r
|
#==================
#CreateHouseholds.R
#==================
#<doc>
## CreateHouseholds Module
#### September 6, 2018
#
#This module creates a *Household* table in the datastore and populates the table with datasets characterizing simulated households. Each entry represents a simulated household. Household datasets are created for the numbers of persons in each of 6 age categories (0-14, 15-19, 20-29, 30-54, 55-64, and 65+) and the total number of persons in the household. Two types of households are created: *regular* households (i.e. not persons in group quarters) and *group quarters* households (i.e. persons in group quarters such as college dormatories). Households are created from Azone level demographic forecasts of the number of persons in each of the 6 age groups for *regular* households and for the group quarters population. In addition, users may optionally specify an average household size and/or the proportion of households that are single person households. The module creates households that matches the age forecast and the optional household size and single person inputs (close but not exact). The module tabulates the number of households created in each Azone.
#
### Model Parameter Estimation
#
#This model has just one parameter object, a matrix of the probability that a person in each age group is in one of several hundred *regular* household types. The matrix is created by selecting from the PUMS data the records for the most frequently observed household types. The default is to select the household types which account for 99% of all households. Each household type is denoted by the number of persons in each age group in the household. For example, a household that has 2 persons of age 0-14 and 2 persons of age 20-29 would be designated as type *2-0-2-0-0-0*. The numbers represent the number of persons in each of the age groups in the order listed above with the hyphens separating the age groups. These household types comprise the rows of the probability matrix. The columns of the matrix correspond to the 6 age groups. Each column of the matrix sums to 1.
#
#This probability matrix is created from Census public use microsample (PUMS) data that is compiled into a R dataset (HhData_df) when the VESimHouseholds package is built. The data that is supplied with the VESimHouseholds package downloaded from the official VisionEval repository may be used, but it is preferrable to use data for the region being modeled. How this is done is explained in the documentation for the *CreateEstimationDatasets.R* script. The matrix is created by summing the number of persons each each age group and each of the household types using the household weights in the PUMS data. The probability that a person in each age group would be in each of the household type is the number of persons in the household type divided by the total number of persons in the age group.
#
#No model parameters are used to create *group quarters* households because those households are just composed of single persons.
#
### How the Module Works
#
#For *regular* households, the module uses the matrix of probabilities that a person in each age group is present in the most frequently observed household types along with a forecast of number of persons in each age group to synthesize a likely set of *regular* households. The module starts by assigning the forecast population by age group to household types using the probability matrix that has been estimated. It then carries out the following interative process to create a set of households that is internally consistent and that matches (approximately) the optional inputs for household size and proportion of single-person households:
#
#1) For each household type, the number of households of the type is calculated from the number of persons of each age group assigned to the type. For example if 420 persons age 0-14 and 480 persons age 20-29 are assigned to household type *2-0-2-0-0-0*, that implies either 210 or 240 households of that type. Where the number of households of the type implied by the persons assigned is not consistent as in this example, the mean of the implied number of households is used. In the example, this would be 225 households. This is the *resolved* number of households. For all household types, the resolved number of households is compared to the maximum number of implied households (in this case 225 is compared to 240) if ratio of these values differs from 1 in absolute terms by less than 0.001 for all household types, the iterative process ends.
#
#2) If a household size target has been specified, the average household size for the resolved households is computed. The ratio of the target household size and the average household size for the resolved households is computed. The number of resolved households in household types having sizes greater than the target household size is multiplied by this ratio. For example, if target household size is 2.5 and average household size for the resolved households is 3, th number of household for household types having more than 2.5 persons (i.e. 3 or more persons) would be multiplied by *2.5 / 3*.
#
#3) If a target for the proportion of households that are 1-person households is set, the difference between the number of 1-person households that there should be and the number that have been assigned is calculated. That difference is added across all 1-person household types (e.g. if the difference is 100, since there are 5 1-person household types, 20 is added to each of those types). The difference is substracted across all other household types.
#
#4) Using the resolved number of households of each type (as adjusted to match household size and 1-person household targets), the number of persons in each age group in each household type is computed. Continuing with the example, 225 households in household type *2-0-2-0-0-0* means that there must be 550 persons of age 0-14 and 550 persons of age 20-29 in that household type. This is called the *resolved* population. An updated probability matrix is computed using the resolved population by housing type.
#
#5) The difference between the total number of persons by age group in the resolved population and the forecast number of persons by age group is calculated and that difference is allocated to household types using the updated probability matrix. Then calculation returns to the first iteration step.
#
#After the iterations have been completed, the numbers of households by type are rounded to create whole number amounts. Then individual household records are created for each.
#</doc>
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#This model has just one parameter object, a matrix of the probability that a
#person in each age group is in one of several hundred household types.
#Each household type is denoted by the number of persons in each age group in
#the household. The rows of the matrix correspond to the household types.
#The columns of the matrix correspond to the 6 age groups. Each column of the
#matrix sums to 1. The process selects the most frequently observed households.
#The default is to select the most frequent households which account for 99% of
#all households.
#Define a function to estimate household size proportion parameters
#------------------------------------------------------------------
calcHhAgeTypes <- function(HhData_df, Threshold = 0.99) {
Hh_df <- HhData_df[HhData_df$HhType == "Reg",]
Ag <-
c("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus")
#Create vector of household type names
HhType_ <-
apply(Hh_df[, Ag], 1, function(x)
paste(x, collapse = "-"))
#Expand the HH types using HH weights and select most prevalent households
ExpHhType_ <- rep(HhType_, Hh_df$HhWeight)
#Define function to identify most prevalent households
idMostPrevalent <- function(Types_, Cutoff) {
TypeTab_ <- rev(sort(tapply(Types_, Types_, length)))
TypeProp_ <- cumsum(TypeTab_ / sum(TypeTab_))
names(TypeProp_[TypeProp_ <= Cutoff])
}
#Select most prevalent households
SelHhTypes_ <- idMostPrevalent(ExpHhType_, Threshold)
SelHh_df <- Hh_df[HhType_ %in% SelHhTypes_, ]
SelHhType_ <-
apply(SelHh_df[, Ag], 1, function(x)
paste(x, collapse = "-"))
#Apply household weights to persons by age
WtHhPop_df <- sweep(SelHh_df[, Ag], 1, SelHh_df$HhWeight, "*")
#Tabulate persons by age group by household type
AgeTab_ls <- lapply(WtHhPop_df, function(x) {
tapply(x, SelHhType_, function(x)
sum(as.numeric(x)))
})
AgeTab_HtAp <- do.call(cbind, AgeTab_ls)
#Calculate and return matrix of probabilities
sweep(AgeTab_HtAp, 2, colSums(AgeTab_HtAp), "/")
}
#Create and save household size proportions parameters
#-----------------------------------------------------
load("data/Hh_df.rda")
HtProb_HtAp <- calcHhAgeTypes(Hh_df)
#' Household size proportions
#'
#' A dataset containing the proportions of households by household size.
#'
#' @format A matrix having 950 rows (for Oregon data) and 6 colums:
#' @source CreateHouseholds.R script.
"HtProb_HtAp"
usethis::use_data(HtProb_HtAp, overwrite = TRUE)
rm(calcHhAgeTypes, Hh_df)
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
CreateHouseholdsSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
NewSetTable = items(
item(
TABLE = "Household",
GROUP = "Year"
)
),
#Specify input data
Inp = items(
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
FILE = "azone_hh_pop_by_age.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items(
"Household (non-group quarters) population in 0 to 14 year old age group",
"Household (non-group quarters) population in 15 to 19 year old age group",
"Household (non-group quarters) population in 20 to 29 year old age group",
"Household (non-group quarters) population in 30 to 54 year old age group",
"Household (non-group quarters) population in 55 to 64 year old age group",
"Household (non-group quarters) population in 65 or older age group")
),
item(
NAME = "AveHhSize",
FILE = "azone_hhsize_targets.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "compound",
UNITS = "PRSN/HH",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION = "Average household size of households (non-group quarters)"
),
item(
NAME = "Prop1PerHh",
FILE = "azone_hhsize_targets.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion of households",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION = "Proportion of households (non-group quarters) having only one person"
),
item(
NAME =
items("GrpAge0to14",
"GrpAge15to19",
"GrpAge20to29",
"GrpAge30to54",
"GrpAge55to64",
"GrpAge65Plus"),
FILE = "azone_gq_pop_by_age.csv",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items("Group quarters population in 0 to 14 year old age group",
"Group quarters population in 15 to 19 year old age group",
"Group quarters population in 20 to 29 year old age group",
"Group quarters population in 30 to 54 year old age group",
"Group quarters population in 55 to 64 year old age group",
"Group quarters population in 65 or older age group")
)
),
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Azone",
TABLE = "Azone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Azone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "AveHhSize",
TABLE = "Azone",
GROUP = "Year",
TYPE = "compound",
UNITS = "PRSN/HH",
PROHIBIT = c("< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Prop1PerHh",
TABLE = "Azone",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion of households",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME =
items("GrpAge0to14",
"GrpAge15to19",
"GrpAge20to29",
"GrpAge30to54",
"GrpAge55to64",
"GrpAge65Plus"),
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = "NumHh",
TABLE = "Azone",
GROUP = "Year",
TYPE = "households",
UNITS = "HH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of households (non-group quarters)"
),
item(
NAME = "NumGq",
TABLE = "Azone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of people in non-institutional group quarters"
),
item(
NAME =
items("HhId",
"Azone",
"Marea"),
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
NAVALUE = "NA",
PROHIBIT = "",
ISELEMENTOF = "",
DESCRIPTION =
items("Unique household ID",
"Azone ID",
"Marea ID")
),
item(
NAME = "HhSize",
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "<= 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of persons"
),
item(
NAME =
items("Age0to14",
"Age15to19",
"Age20to29",
"Age30to54",
"Age55to64",
"Age65Plus"),
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION =
list("Persons in 0 to 14 year old age group",
"Persons in 15 to 19 year old age group",
"Persons in 20 to 29 year old age group",
"Persons in 30 to 54 year old age group",
"Persons in 55 to 64 year old age group",
"Persons in 65 or older age group")
),
item(
NAME = "HhType",
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
NAVALUE = "NA",
PROHIBIT = "",
ISELEMENTOF = "",
DESCRIPTION = "Coded household age composition (e.g. 2-1-0-2-0-0) or Grp for group quarters"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for CreateHouseholds module
#'
#' A list containing specifications for the CreateHouseholds module.
#'
#' @format A list containing 5 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{NewSetTable}{new table to be created for datasets specified in the
#' 'Set' specifications}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source CreateHouseholds.R script.
"CreateHouseholdsSpecifications"
usethis::use_data(CreateHouseholdsSpecifications, overwrite = TRUE)
rm(CreateHouseholdsSpecifications)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#This function creates households for the entire model region. A household table
#is created and this is populated with the household size and persons by age
#characteristics of all the households.
#Function that creates set of households for an Azone
#----------------------------------------------------
#' Create simulated households for an Azone
#'
#' \code{createHhByAge} creates a set of simulated households for an Azone that
#' reasonably represents a population census or forecast of persons in each of 6
#' age categories.
#'
#' This function creates a set of simulated households for an Azone that
#' reasonably represents the population census or forecast of persons in each of
#' 6 age categories: 0 to 14, 15 to 19, 20 to 29, 30 to 54, 55 to 64, and 65
#' plus. The function uses a matrix of probabilities that a person in each age
#' group might be present in each of 524 household types. This matrix
#' (HtProb_HtAp) is estimated by the calcHhAgeTypes function which is described
#' above. Household types are distinguished by the number of persons in each age
#' category in the household. The function fits the distribution of households
#' by type by iteratively applying the probability matrix to the population,
#' reconciling households allocated by type based on the population assigned,
#' recomputing the assigned population, calculating the difference between the
#' assigned population by age and the input population by age, recalculating the
#' probabilities, and assigning the population difference. This process
#' continues until the difference between the assigned population and the input
#' population by age group is less than 0.1%. After the households are
#' synthesized, the size of each household is calculated.
#'
#' @param Prsn_Ap A named vector containing the number of persons in each age
#' category.
#' @param MaxIter An integer specifying the maximum number of iterations the
#' algorithm should use to balance and reconcile the population allocation to
#' household types.
#' @param TargetHhSize A double specifying a household size target value or NA
#' if there is no target.
#' @param TargetProp1PerHh A double specifying a target for the proportion of
#' households that are one-person households or NA if there is no target.
#' @return A list containing 7 components. Each component is a vector where each
#' element of a vector corresponds to a simulated household. The components
#' are as follows:
#' Age0to14 - number of persons age 0 to 14 in the household
#' Age15to19 - number of persons age 15 to 19 in the household
#' Age20to29 - number of persons age 20 to 29 in the household
#' Age30to54 - number of persons age 30 to 54 in the household
#' Age 55to64 - number of persons age 55 to 64 in the household
#' Age65Plus - number of persons 65 or older in the household
#' HhSize - total number of persons in the household
#' @name createHhByAge
#' @export
createHhByAge <-
function(Prsn_Ap,
MaxIter = 100,
TargetHhSize = NA,
TargetProp1PerHh = NA) {
#Dimension names
Ap <- colnames(HtProb_HtAp)
Ht <- rownames(HtProb_HtAp)
#Place persons by age into household types by multiplying person vector
#by probabilities
Prsn_HtAp <- sweep(HtProb_HtAp, 2, Prsn_Ap, "*")
#Make table of factors to convert persons into households and vice verse
PrsnFactors_Ht_Ap <-
lapply(strsplit(Ht, "-"), function(x)
as.numeric(x))
PrsnFactors_HtAp <- do.call(rbind, PrsnFactors_Ht_Ap)
dimnames(PrsnFactors_HtAp) <- dimnames(Prsn_HtAp)
rm(PrsnFactors_Ht_Ap)
# Calculate household size for each household type
HsldSize_Ht <- rowSums( PrsnFactors_HtAp )
#Initial calculation of persons by age for each housing type
#-----------------------------------------------------------
#Convert population into households. Each row of Hsld_HtAp contains an
#estimate of the number of household of the type given the number of persons
#assigned to the household type
Hsld_HtAp <- Prsn_HtAp / PrsnFactors_HtAp
Hsld_HtAp[is.na(Hsld_HtAp)] <- 0
MaxHh_Ht <- apply(Hsld_HtAp, 1, max)
#Iterate until "balanced" set of households is created
#-----------------------------------------------------
MaxDiff_ <- numeric(MaxIter)
HsldSize_ <- numeric(MaxIter)
for (i in 1:MaxIter) {
#Resolve differences in household type estimates. For each household type
#if there is more than one estimate of the number of households, take the
#mean value of the estimates that are non-zero to determine the number of
#households of the type.
ResolveHh_HtAp <- t(apply(Hsld_HtAp, 1, function(x) {
if (sum(x > 0) > 1) {
x[x > 0] <- mean(x[x > 0])
}
x
}))
# Exit if the difference between the maximum estimate for each
# household type is not too different than the resolved estimate
# for each household type
ResolveHh_Ht <- apply(ResolveHh_HtAp, 1, max)
Diff_Ht <- abs(MaxHh_Ht - ResolveHh_Ht)
PropDiff_Ht <- Diff_Ht / ResolveHh_Ht
if (all(PropDiff_Ht < 0.001)) break
MaxDiff_[i] <- max(PropDiff_Ht)
# Adjust household proportions to match household size target if exists
if (!is.na(TargetHhSize)) {
# Calculate average household size and ratio with target household size
AveHsldSize <-
sum(ResolveHh_Ht * HsldSize_Ht) / sum(ResolveHh_Ht)
HsldSize_[i] <- AveHsldSize
HsldSizeAdj <- TargetHhSize / AveHsldSize
# Calculate household adjustment factors and adjust households
HsldAdjFactor_Ht <-
HsldSize_Ht * 0 + 1 # Start with a vector of ones
HsldAdjFactor_Ht[HsldSize_Ht > TargetHhSize] <- HsldSizeAdj
ResolveHh_HtAp <-
sweep(ResolveHh_HtAp, 1, HsldAdjFactor_Ht, "*")
}
# Adjust proportion of 1-person households to match target if there is one
if (!is.na(TargetProp1PerHh)) {
Hsld_Ht <- round(apply(ResolveHh_HtAp, 1, max))
NumHh_Sz <- tapply(Hsld_Ht, HsldSize_Ht, sum)
NumHh <- sum(NumHh_Sz)
Add1PerHh <- (TargetProp1PerHh * NumHh) - NumHh_Sz[1]
Is1PerHh_Ht <- HsldSize_Ht == 1
Add1PerHh_Ht <-
Add1PerHh * Hsld_Ht[Is1PerHh_Ht] / sum(Hsld_Ht[Is1PerHh_Ht])
RmOthHh_Ht <-
-Add1PerHh * Hsld_Ht[!Is1PerHh_Ht] / sum(Hsld_Ht[!Is1PerHh_Ht])
ResolveHh_HtAp[Is1PerHh_Ht] <-
ResolveHh_HtAp[Is1PerHh_Ht] + Add1PerHh_Ht
ResolveHh_HtAp[!Is1PerHh_Ht] <-
ResolveHh_HtAp[!Is1PerHh_Ht] + RmOthHh_Ht
}
#Calculate the number of persons by age group consistent with the resolved
#numbers of households of each household type
ResolvePrsn_HtAp <- ResolveHh_HtAp * PrsnFactors_HtAp
#Convert the resolved persons tabulation into probabilities
PrsnProb_HtAp <-
sweep(ResolvePrsn_HtAp, 2, colSums(ResolvePrsn_HtAp), "/")
#Calculate the difference in the number of persons by age category
PrsnDiff_Ap <- Prsn_Ap - colSums(ResolvePrsn_HtAp)
#Allocate extra persons to households based on probabilities
AddPrsn_HtAp <- sweep(PrsnProb_HtAp, 2, PrsnDiff_Ap, "*")
#Add the reallocated persons to the resolved persons matrix
Prsn_HtAp <- ResolvePrsn_HtAp + AddPrsn_HtAp
# Recalculate number of households by type
Hsld_HtAp <- Prsn_HtAp/PrsnFactors_HtAp
Hsld_HtAp[is.na(Hsld_HtAp)] <- 0
# Calculate the maximum households by each type for convergence check
MaxHh_Ht <- apply(ResolveHh_HtAp, 1, max)
}
#Calculate number of households by household type
Hsld_Ht <- round(apply(ResolveHh_HtAp, 1, max))
#Calculate persons by age group and household type
Prsn_HtAp <- sweep(PrsnFactors_HtAp, 1, Hsld_Ht, "*")
#Convert into a matrix of households
Hsld_Hh <- rep(names(Hsld_Ht), Hsld_Ht)
Hsld_Hh_Ap <- strsplit(Hsld_Hh, "-")
Hsld_Hh_Ap <- lapply(Hsld_Hh_Ap, function(x) as.numeric(x))
Hsld_df <- data.frame(do.call(rbind, Hsld_Hh_Ap))
names(Hsld_df) <- Ap
Hsld_df$HhSize <- rowSums(Hsld_df)
Hsld_df$HhType <-
apply(Hsld_df[, Ap], 1, function(x) paste(x, collapse = "-"))
#Randomly order the rows of the matrix and convert into a list of
#corresponding vectors by age group
RandomSort <-
sample(1:nrow(Hsld_df), nrow(Hsld_df), replace = FALSE)
Hsld_ls <- as.list(Hsld_df[RandomSort, ])
# Return a list of corresponding age group vectors
Hsld_ls
}
#Function that creates group quarters population for an Azone
#------------------------------------------------------------
#' Create group quarters population for an Azone
#'
#' \code{createGroupQtrHhByAge} creates the quarters 'households' for an Azone
#' where each 'household' is a single person in group quarters.
#'
#' This function creates a set of simulated 'households' living in group
#' quaters in an Azone. Each group quarters 'household' is a single person in
#' each of 6 age categories: 0 to 14, 15 to 19, 20 to 29, 30 to 54, 55 to 64,
#' and 65 plus.
#'
#' @param GrpPrsn_Ag A named vector containing the number of persons in each age
#' category.
#' @return A list containing 7 components. Each component is a vector where each
#' element of a vector corresponds to a simulated household. The components
#' are as follows:
#' Age0to14 - number of persons age 0 to 14 in the household
#' Age15to19 - number of persons age 15 to 19 in the household
#' Age20to29 - number of persons age 20 to 29 in the household
#' Age30to54 - number of persons age 30 to 54 in the household
#' Age 55to64 - number of persons age 55 to 64 in the household
#' Age65Plus - number of persons 65 or older in the household
#' HhSize - total number of persons in the household
#' @name createGrpHhByAge
#' @export
createGrpHhByAge <-
function(GrpPrsn_Ag) {
if (sum(GrpPrsn_Ag > 0)) {
GrpHh_df <-
data.frame(
Age0to14 = as.integer(rep(c(1,0,0,0,0,0), GrpPrsn_Ag)),
Age15to19 = as.integer(rep(c(0,1,0,0,0,0), GrpPrsn_Ag)),
Age20to29 = as.integer(rep(c(0,0,1,0,0,0), GrpPrsn_Ag)),
Age30to54 = as.integer(rep(c(0,0,0,1,0,0), GrpPrsn_Ag)),
Age55to64 = as.integer(rep(c(0,0,0,0,1,0), GrpPrsn_Ag)),
Age65Plus = as.integer(rep(c(0,0,0,0,0,1), GrpPrsn_Ag)),
HhSize = as.integer(rep(c(1,1,1,1,1,1), GrpPrsn_Ag)),
HhType = rep("Grp", sum(GrpPrsn_Ag)),
stringsAsFactors = FALSE)
RandomSort <-
sample(1:nrow(GrpHh_df), nrow(GrpHh_df), replace = FALSE)
GrpHh_ls <- as.list(GrpHh_df[RandomSort, ])
} else {
GrpHh_ls <-
list(
Age0to14 = integer(0),
Age15to19 = integer(0),
Age20to29 = integer(0),
Age30to54 = integer(0),
Age55to64 = integer(0),
Age65Plus = integer(0),
HhSize = integer(0),
HhType = character(0))
}
GrpHh_ls
}
#Main module function that creates simulated households
#------------------------------------------------------
#' Main module function to create simulated households
#'
#' \code{CreateHouseholds} creates a set of simulated households that each have
#' a unique household ID, an Azone to which it is assigned, household
#' size (number of people in the household), and numbers of persons in each of
#' 6 age categories.
#'
#' This function creates a set of simulated households for the model region
#' where each household is assigned a household size, an Azone, a unique ID, and
#' numbers of persons in each of 6 age categories. The function calls the
#' createHhByAge and createGrpHhByAge functions for each Azone to create
#' simulated households containing persons by age category from a vector of
#' persons by age category for the Azone. The list of vectors produced by the
#' Create Households function are to be stored in the "Household" table. Since
#' this table does not exist, the function calculates a LENGTH value for the
#' table and returns that as well. The framework uses this information to
#' initialize the Households table. The function also computes the maximum
#' numbers of characters in the HhId and Azone datasets and assigns these to a
#' SIZE vector. This is necessary so that the framework can initialize these
#' datasets in the datastore. All the results are returned in a list.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module along with:
#' LENGTH: A named integer vector having a single named element, "Household",
#' which identifies the length (number of rows) of the Household table to be
#' created in the datastore.
#' SIZE: A named integer vector having two elements. The first element, "Azone",
#' identifies the size of the longest Azone name. The second element, "HhId",
#' identifies the size of the longest HhId.
#' @import visioneval stats
#' @include CreateEstimationDatasets.R
#' @name CreateHouseholds
#' @export
CreateHouseholds <- function(L) {
#Define dimension name vectors
Ap <-
c("Age0to14", "Age15to19", "Age20to29", "Age30to54", "Age55to64", "Age65Plus")
Ag <- paste0("Grp", Ap)
Az <- L$Year$Azone$Azone
#fix seed as synthesis involves sampling
set.seed(L$G$Seed)
#Initialize output list
Out_ls <- initDataList()
Out_ls$Year$Azone$NumHh <- numeric(0)
Out_ls$Year$Household <-
list(
Azone = character(0),
Marea = character(0),
HhId = character(0),
HhSize = integer(0),
HhType = character(0),
Age0to14 = integer(0),
Age15to19 = integer(0),
Age20to29 = integer(0),
Age30to54 = integer(0),
Age55to64 = integer(0),
Age65Plus = integer(0)
)
#Make matrix of regular household persons by Azone and age group
Prsn_AzAp <-
as.matrix(data.frame(L$Year$Azone, stringsAsFactors = FALSE)[,Ap])
rownames(Prsn_AzAp) <- Az
#If values in Prsn_AzAp are not integers, round them and issue warning
AllInt <- all(Prsn_AzAp == round(Prsn_AzAp))
if (!AllInt) {
Prsn_AzAp <- round(Prsn_AzAp)
Msg <- paste(
"Inputs for number of persons by age group in 'azone_hh_pop_by_age.csv'",
"file include some non-integer values for the year", L$G$Year, ".",
"These have been rounded to the nearest whole number."
)
addWarningMsg("Out_ls", Msg)
rm(Msg)
}
rm(AllInt)
#Make vector of average household size target by Azone
TargetHhSize_Az <- L$Year$Azone$AveHhSize
names(TargetHhSize_Az) <- Az
#Make vector of target proportion of 1-person households
TargetProp1PerHh_Az <- L$Year$Azone$Prop1PerHh
names(TargetProp1PerHh_Az) <- Az
#Make matrix of group population households by Azone and age group
Prsn_AzAg <-
as.matrix(data.frame(L$Year$Azone, stringsAsFactors = FALSE)[,Ag])
rownames(Prsn_AzAg) <- Az
#If values in Prsn_AzAg are not integers, round them and issue warning
AllInt <- all(Prsn_AzAg == round(Prsn_AzAg))
if (!AllInt) {
Prsn_AzAg <- round(Prsn_AzAg)
Msg <- paste(
"Inputs for number of persons by age group in 'azone_gq_pop_by_age.csv'",
"file include some non-integer values for the year", L$G$Year, ".",
"These have been rounded to the nearest whole number."
)
addWarningMsg("Out_ls", Msg)
rm(Msg)
}
rm(AllInt)
#Simulate households for each Azone and add to output list
for (az in Az) {
RegHh_ls <-
createHhByAge(Prsn_AzAp[az,],
MaxIter=100,
TargetHhSize = TargetHhSize_Az[az],
TargetProp1PerHh = TargetProp1PerHh_Az[az])
GrpHh_ls <-
createGrpHhByAge(Prsn_AzAg[az,])
NumRegHh <- length(RegHh_ls[[1]])
NumGrpHh <- length(GrpHh_ls[[1]])
NumHh <- NumRegHh + NumGrpHh
Marea <- L$Year$Azone$Marea[L$Year$Azone$Azone == az]
Out_ls$Year$Household$Azone <-
c(Out_ls$Year$Household$Azone, rep(az, NumHh))
Out_ls$Year$Household$Marea <-
c(Out_ls$Year$Household$Marea, rep(Marea, NumHh))
Out_ls$Year$Household$HhId <-
c(Out_ls$Year$Household$HhId, paste(rep(az, NumHh), 1:NumHh, sep = "-"))
Out_ls$Year$Household$HhSize <-
c(Out_ls$Year$Household$HhSize, RegHh_ls$HhSize, GrpHh_ls$HhSize)
Out_ls$Year$Household$HhType <-
c(Out_ls$Year$Household$HhType, RegHh_ls$HhType, GrpHh_ls$HhType)
Out_ls$Year$Household$Age0to14 <-
c(Out_ls$Year$Household$Age0to14, RegHh_ls$Age0to14, GrpHh_ls$Age0to14)
Out_ls$Year$Household$Age15to19 <-
c(Out_ls$Year$Household$Age15to19, RegHh_ls$Age15to19, GrpHh_ls$Age15to19)
Out_ls$Year$Household$Age20to29 <-
c(Out_ls$Year$Household$Age20to29, RegHh_ls$Age20to29, GrpHh_ls$Age20to29)
Out_ls$Year$Household$Age30to54 <-
c(Out_ls$Year$Household$Age30to54, RegHh_ls$Age30to54, GrpHh_ls$Age30to54)
Out_ls$Year$Household$Age55to64 <-
c(Out_ls$Year$Household$Age55to64, RegHh_ls$Age55to64, GrpHh_ls$Age55to64)
Out_ls$Year$Household$Age65Plus <-
c(Out_ls$Year$Household$Age65Plus, RegHh_ls$Age65Plus, GrpHh_ls$Age65Plus)
Out_ls$Year$Azone$NumHh <- c(Out_ls$Year$Azone$NumHh, NumRegHh)
Out_ls$Year$Azone$NumGq <- c(Out_ls$Year$Azone$NumGq, NumGrpHh)
}
Out_ls$Year$Household$HhSize <- as.integer(Out_ls$Year$Household$HhSize)
Out_ls$Year$Household$Age0to14 <- as.integer(Out_ls$Year$Household$Age0to14)
Out_ls$Year$Household$Age15to19 <- as.integer(Out_ls$Year$Household$Age15to19)
Out_ls$Year$Household$Age20to29 <- as.integer(Out_ls$Year$Household$Age20to29)
Out_ls$Year$Household$Age30to54 <- as.integer(Out_ls$Year$Household$Age30to54)
Out_ls$Year$Household$Age55to64 <- as.integer(Out_ls$Year$Household$Age55to64)
Out_ls$Year$Household$Age65Plus <- as.integer(Out_ls$Year$Household$Age65Plus)
Out_ls$Year$Azone$NumHh <- as.integer(Out_ls$Year$Azone$NumHh)
Out_ls$Year$Azone$NumGq <- as.integer(Out_ls$Year$Azone$NumGq)
#Calculate LENGTH attribute for Household table
attributes(Out_ls$Year$Household)$LENGTH <-
length(Out_ls$Year$Household$HhId)
#Calculate SIZE attributes for 'Household$Azone' and 'Household$HhId'
attributes(Out_ls$Year$Household$Azone)$SIZE <-
max(nchar(Out_ls$Year$Household$Azone))
attributes(Out_ls$Year$Household$Marea)$SIZE <-
max(nchar(Out_ls$Year$Household$Marea))
attributes(Out_ls$Year$Household$HhId)$SIZE <-
max(nchar(Out_ls$Year$Household$HhId))
attributes(Out_ls$Year$Household$HhType)$SIZE <-
max(nchar(Out_ls$Year$Household$HhType))
#Return the list
Out_ls
}
#===============================================================
#SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE
#===============================================================
#Run module automatic documentation
#----------------------------------
documentModule("CreateHouseholds")
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# #Load packages and test functions
# library(visioneval)
# library(filesstrings)
# source("tests/scripts/test_functions.R")
# #Set up test environment
# TestSetup_ls <- list(
# TestDataRepo = "../Test_Data/VE-RSPM",
# DatastoreName = "Datastore.tar",
# LoadDatastore = TRUE,
# TestDocsDir = "verspm",
# ClearLogs = TRUE,
# # SaveDatastore = TRUE
# SaveDatastore = FALSE
# )
# setUpTests(TestSetup_ls)
# #Run test module
# TestDat_ <- testModule(
# ModuleName = "CreateHouseholds",
# LoadDatastore = TRUE,
# SaveDatastore = FALSE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- CreateHouseholds(L)
|
############################################################################
#
############################################################################
loadAllDataSets <- function(dataSet, chipType="*", pattern=NULL, ..., rootPath="totalAndFracBData", type=c("fracB", "total", "genotypes", "confidenceScores"), verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'dataSet':
dataSet <- Arguments$getCharacter(dataSet);
# Argument 'chipType':
chipType <- Arguments$getCharacter(chipType);
# Argument 'pattern':
if (is.null(pattern)) {
pattern <- sprintf("^%s,.*", dataSet);
}
pattern <- Arguments$getRegularExpression(pattern);
# Argument 'rootPath':
rootPath <- Arguments$getReadablePath(rootPath, mustExist=TRUE);
# Argument 'type':
type <- match.arg(type, c("fracB", "total", "genotypes", "confidenceScores"))
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "loadAllDataSets()");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify all data sets
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Scanning directory for data sets");
verbose && cat(verbose, "Path: ", rootPath);
verbose && cat(verbose, "Pattern: ", pattern);
# Search for directories and links
paths <- list.files(path=rootPath, pattern=pattern, full.names=TRUE);
verbose && cat(verbose, "Located paths:");
verbose && print(verbose, paths);
paths <- sapply(paths, FUN=Arguments$getReadablePath);
paths <- paths[sapply(paths, FUN=isDirectory)];
dataSets <- basename(paths);
verbose && cat(verbose, "Located data sets:");
verbose && print(verbose, dataSets);
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup data sets
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dsList <- list();
verbose && enter(verbose, "Loading data sets");
for (kk in seq_along(dataSets)) {
dataSet <- dataSets[kk];
verbose && enter(verbose, sprintf("Data set #%d ('%s') of %d",
kk, dataSet, length(dataSets)));
verbose && cat(verbose, "Type: ", type);
if (type=="total") {
ds <- AromaUnitTotalCnBinarySet$byName(dataSet, chipType=chipType, paths=rootPath);
} else if (type=="fracB") {
ds <- AromaUnitFracBCnBinarySet$byName(dataSet, chipType=chipType, paths=rootPath);
} else if (type=="genotypes") {
ds <- AromaUnitGenotypeCallSet$byName(dataSet, chipType=chipType);
} else if (type=="confidenceScores") {
ds <- AromaUnitSignalBinarySet$byName(dataSet, chipType=chipType, pattern="confidenceScores", paths=rootPath);
}
if (length(ds)) {
dsList[[kk]] <- ds;
verbose && print(verbose, ds);
} else {
verbose && cat(verbose, "No such data set found.");
}
verbose && exit(verbose);
} # for (kk ...)
verbose && exit(verbose);
# Drop empty data sets
dsList <- dsList[sapply(dsList, FUN=length) > 0];
# Set the names
names <- sapply(dsList, FUN=getFullName);
names(dsList) <- names;
verbose && cat(verbose, "Loaded data sets:");
verbose && print(verbose, dsList);
# Sanity check
ns <- sapply(dsList, FUN=function(ds) nbrOfUnits(getFile(ds,1)));
nbrOfUnits <- ns[1];
if (!all(ns == nbrOfUnits)) {
verbose && print(verbose, ns);
verbose && print(verbose, dsList);
throw("INTERNAL ERROR: The loaded data sets does not have the same number of units.");
}
verbose && exit(verbose);
dsList;
} # loadAllDataSets()
############################################################################
# HISTORY:
# 2011-03-18
# o Added sanity checks for the result of loadAllDataSets().
# 2009-02-23
# o Created.
############################################################################
|
/inst/rsp-ex/TCGA,CMTN/R/loadAllDataSets.R
|
no_license
|
HenrikBengtsson/aroma.cn.eval
|
R
| false
| false
| 4,123
|
r
|
############################################################################
#
############################################################################
loadAllDataSets <- function(dataSet, chipType="*", pattern=NULL, ..., rootPath="totalAndFracBData", type=c("fracB", "total", "genotypes", "confidenceScores"), verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'dataSet':
dataSet <- Arguments$getCharacter(dataSet);
# Argument 'chipType':
chipType <- Arguments$getCharacter(chipType);
# Argument 'pattern':
if (is.null(pattern)) {
pattern <- sprintf("^%s,.*", dataSet);
}
pattern <- Arguments$getRegularExpression(pattern);
# Argument 'rootPath':
rootPath <- Arguments$getReadablePath(rootPath, mustExist=TRUE);
# Argument 'type':
type <- match.arg(type, c("fracB", "total", "genotypes", "confidenceScores"))
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
verbose && enter(verbose, "loadAllDataSets()");
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Identify all data sets
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
verbose && enter(verbose, "Scanning directory for data sets");
verbose && cat(verbose, "Path: ", rootPath);
verbose && cat(verbose, "Pattern: ", pattern);
# Search for directories and links
paths <- list.files(path=rootPath, pattern=pattern, full.names=TRUE);
verbose && cat(verbose, "Located paths:");
verbose && print(verbose, paths);
paths <- sapply(paths, FUN=Arguments$getReadablePath);
paths <- paths[sapply(paths, FUN=isDirectory)];
dataSets <- basename(paths);
verbose && cat(verbose, "Located data sets:");
verbose && print(verbose, dataSets);
verbose && exit(verbose);
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Setup data sets
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
dsList <- list();
verbose && enter(verbose, "Loading data sets");
for (kk in seq_along(dataSets)) {
dataSet <- dataSets[kk];
verbose && enter(verbose, sprintf("Data set #%d ('%s') of %d",
kk, dataSet, length(dataSets)));
verbose && cat(verbose, "Type: ", type);
if (type=="total") {
ds <- AromaUnitTotalCnBinarySet$byName(dataSet, chipType=chipType, paths=rootPath);
} else if (type=="fracB") {
ds <- AromaUnitFracBCnBinarySet$byName(dataSet, chipType=chipType, paths=rootPath);
} else if (type=="genotypes") {
ds <- AromaUnitGenotypeCallSet$byName(dataSet, chipType=chipType);
} else if (type=="confidenceScores") {
ds <- AromaUnitSignalBinarySet$byName(dataSet, chipType=chipType, pattern="confidenceScores", paths=rootPath);
}
if (length(ds)) {
dsList[[kk]] <- ds;
verbose && print(verbose, ds);
} else {
verbose && cat(verbose, "No such data set found.");
}
verbose && exit(verbose);
} # for (kk ...)
verbose && exit(verbose);
# Drop empty data sets
dsList <- dsList[sapply(dsList, FUN=length) > 0];
# Set the names
names <- sapply(dsList, FUN=getFullName);
names(dsList) <- names;
verbose && cat(verbose, "Loaded data sets:");
verbose && print(verbose, dsList);
# Sanity check
ns <- sapply(dsList, FUN=function(ds) nbrOfUnits(getFile(ds,1)));
nbrOfUnits <- ns[1];
if (!all(ns == nbrOfUnits)) {
verbose && print(verbose, ns);
verbose && print(verbose, dsList);
throw("INTERNAL ERROR: The loaded data sets does not have the same number of units.");
}
verbose && exit(verbose);
dsList;
} # loadAllDataSets()
############################################################################
# HISTORY:
# 2011-03-18
# o Added sanity checks for the result of loadAllDataSets().
# 2009-02-23
# o Created.
############################################################################
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/501.Error-Failure_LimitBased_BASE_All.R
\name{errWD}
\alias{errWD}
\title{Calculates error, long term power and pass/fail criteria for Wald method}
\usage{
errWD(n, alp, phi, f)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{phi}{- Null hypothesis value}
\item{f}{- Failure criterion}
}
\value{
A dataframe with
\item{delalp}{ Delta-alpha is the increase of the nominal error with respect to real error}
\item{theta}{ Long term power of the test}
\item{Fail_Pass}{Fail/pass based on the input f criterion}
}
\description{
Calculates error, long term power and pass/fail criteria for Wald method
}
\details{
Evaluation of Wald-type intervals using error due to the
difference of achieved and nominal level of significance for the \eqn{n + 1} intervals
}
\examples{
n=20; alp=0.05; phi=0.05; f=-2
errWD(n,alp,phi,f)
}
\references{
[1] 2014 Martin Andres, A. and Alvarez Hernandez, M.
Two-tailed asymptotic inferences for a proportion.
Journal of Applied Statistics, 41, 7, 1516-1529
}
\seealso{
Other Error for base methods: \code{\link{PloterrAS}},
\code{\link{PloterrAll}}, \code{\link{PloterrBA}},
\code{\link{PloterrEX}}, \code{\link{PloterrLR}},
\code{\link{PloterrLT}}, \code{\link{PloterrSC}},
\code{\link{PloterrTW}}, \code{\link{PloterrWD}},
\code{\link{errAS}}, \code{\link{errAll}},
\code{\link{errBA}}, \code{\link{errEX}},
\code{\link{errLR}}, \code{\link{errLT}},
\code{\link{errSC}}, \code{\link{errTW}}
}
|
/man/errWD.Rd
|
no_license
|
ElsevierSoftwareX/SOFTX-D-16-00020
|
R
| false
| true
| 1,578
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/501.Error-Failure_LimitBased_BASE_All.R
\name{errWD}
\alias{errWD}
\title{Calculates error, long term power and pass/fail criteria for Wald method}
\usage{
errWD(n, alp, phi, f)
}
\arguments{
\item{n}{- Number of trials}
\item{alp}{- Alpha value (significance level required)}
\item{phi}{- Null hypothesis value}
\item{f}{- Failure criterion}
}
\value{
A dataframe with
\item{delalp}{ Delta-alpha is the increase of the nominal error with respect to real error}
\item{theta}{ Long term power of the test}
\item{Fail_Pass}{Fail/pass based on the input f criterion}
}
\description{
Calculates error, long term power and pass/fail criteria for Wald method
}
\details{
Evaluation of Wald-type intervals using error due to the
difference of achieved and nominal level of significance for the \eqn{n + 1} intervals
}
\examples{
n=20; alp=0.05; phi=0.05; f=-2
errWD(n,alp,phi,f)
}
\references{
[1] 2014 Martin Andres, A. and Alvarez Hernandez, M.
Two-tailed asymptotic inferences for a proportion.
Journal of Applied Statistics, 41, 7, 1516-1529
}
\seealso{
Other Error for base methods: \code{\link{PloterrAS}},
\code{\link{PloterrAll}}, \code{\link{PloterrBA}},
\code{\link{PloterrEX}}, \code{\link{PloterrLR}},
\code{\link{PloterrLT}}, \code{\link{PloterrSC}},
\code{\link{PloterrTW}}, \code{\link{PloterrWD}},
\code{\link{errAS}}, \code{\link{errAll}},
\code{\link{errBA}}, \code{\link{errEX}},
\code{\link{errLR}}, \code{\link{errLT}},
\code{\link{errSC}}, \code{\link{errTW}}
}
|
shinyServer(function(input, output) {
Dataset <- reactive({
if (is.null(input$file1)) {return(NULL)}
else{
text <- readLines(input$file1$datapath )
text = str_replace_all(text, "<.*?>", "")
text = text[text !=""]
return(text)
}
})
lang_model = reactive({
if (input$lang == "Eng") {
lang_model = udpipe_load_model("./english-ud-2.0-170801.udpipe")
}
if (input$lang == "Span") {
lang_model = udpipe_load_model("./spanish-ud-2.0-170801.udpipe")
}
if (input$lang == "Hin") {
lang_model = udpipe_load_model("./hindi-ud-2.0-170801.udpipe")
}
return(lang_model)
})
annot = reactive({
x <- udpipe_annotate(lang_model(), x = Dataset())
x <- as.data.frame(x)
return(x)
})
output$downloadData <- downloadHandler(
filename = function() {
"annotated_data.csv"
},
content = function(file){
write.csv(annot()[-4], file, row.names = FALSE)
}
)
output$top_100_annotated = renderDataTable({
if (is.null(input$file1)) {return(NULL)}
out = annot()[-4]
return(out)
})
output$wordcloud = renderPlot({
if (is.null(input$file1)) {return(NULL)}
else{
x <- annot()
x = x %>% subset(., upos %in% input$rd)
x = txt_freq(x$lemma) # txt_freq() calcs noun freqs in desc order
wordcloud(x$key, x$freq, scale = c(3.5, 0.5), min.freq = 5, max.words = 100, colors = brewer.pal(8, "Dark2"))
}
})
output$COG = renderPlot({
if (is.null(input$file1)) {return(NULL)}
else{
nokia_cooc <- cooccurrence( # try `?cooccurrence` for parm options
x = subset(annot(), upos %in% input$upos),
term = "lemma",
group = c("doc_id", "paragraph_id", "sentence_id"))
wordnetwork <- head(nokia_cooc, 50)
wordnetwork <- igraph::graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "orange") +
geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none") +
# labs(title = "Cooccurrences within 3 words distance", subtitle = "Nouns & Adjective")
labs(title = "Cooccurrences within 3 words distance", subtitle = "Cooccurrences plot")
}
})
})
|
/server.R
|
no_license
|
aravindmcs1/ISB-TA-Assignment2
|
R
| false
| false
| 2,480
|
r
|
shinyServer(function(input, output) {
Dataset <- reactive({
if (is.null(input$file1)) {return(NULL)}
else{
text <- readLines(input$file1$datapath )
text = str_replace_all(text, "<.*?>", "")
text = text[text !=""]
return(text)
}
})
lang_model = reactive({
if (input$lang == "Eng") {
lang_model = udpipe_load_model("./english-ud-2.0-170801.udpipe")
}
if (input$lang == "Span") {
lang_model = udpipe_load_model("./spanish-ud-2.0-170801.udpipe")
}
if (input$lang == "Hin") {
lang_model = udpipe_load_model("./hindi-ud-2.0-170801.udpipe")
}
return(lang_model)
})
annot = reactive({
x <- udpipe_annotate(lang_model(), x = Dataset())
x <- as.data.frame(x)
return(x)
})
output$downloadData <- downloadHandler(
filename = function() {
"annotated_data.csv"
},
content = function(file){
write.csv(annot()[-4], file, row.names = FALSE)
}
)
output$top_100_annotated = renderDataTable({
if (is.null(input$file1)) {return(NULL)}
out = annot()[-4]
return(out)
})
output$wordcloud = renderPlot({
if (is.null(input$file1)) {return(NULL)}
else{
x <- annot()
x = x %>% subset(., upos %in% input$rd)
x = txt_freq(x$lemma) # txt_freq() calcs noun freqs in desc order
wordcloud(x$key, x$freq, scale = c(3.5, 0.5), min.freq = 5, max.words = 100, colors = brewer.pal(8, "Dark2"))
}
})
output$COG = renderPlot({
if (is.null(input$file1)) {return(NULL)}
else{
nokia_cooc <- cooccurrence( # try `?cooccurrence` for parm options
x = subset(annot(), upos %in% input$upos),
term = "lemma",
group = c("doc_id", "paragraph_id", "sentence_id"))
wordnetwork <- head(nokia_cooc, 50)
wordnetwork <- igraph::graph_from_data_frame(wordnetwork)
ggraph(wordnetwork, layout = "fr") +
geom_edge_link(aes(width = cooc, edge_alpha = cooc), edge_colour = "orange") +
geom_node_text(aes(label = name), col = "darkgreen", size = 4) +
theme_graph(base_family = "Arial Narrow") +
theme(legend.position = "none") +
# labs(title = "Cooccurrences within 3 words distance", subtitle = "Nouns & Adjective")
labs(title = "Cooccurrences within 3 words distance", subtitle = "Cooccurrences plot")
}
})
})
|
pro <-read.csv("FHVData.csv")
y = as.numeric(pro$Uber)
pareto.chart(y)
|
/paretoCode.r
|
no_license
|
Soma586/Data
|
R
| false
| false
| 72
|
r
|
pro <-read.csv("FHVData.csv")
y = as.numeric(pro$Uber)
pareto.chart(y)
|
#############################################################
## R code to reproduce statistical analysis in the textbook:
## Agresti, Franklin, Klingenberg
## Statistics: The Art & Science of Learning from Data
## 5th Edition, Pearson 2021
## Web: ArtofStat.com
## Copyright: Bernhard Klingenberg
############################################################
###################
### Chapter 2 ###
### Example 19 ###
###################
#####################
## Standardizing ##
#####################
# Reading in the data of the performances of athletes:
meters200 <- c(24.48, 23.49, 23.26, 24.32, 24.09, 24.67)
javelin <- c(39.39, 46.06, 36.36, 37.77, 40.93, 33.42)
# To compute the z-scores of each athlete for both events:
zScoresMeters200 <- (meters200 - mean(meters200)) / sd(meters200)
zScoresJavelin <- (javelin - mean(javelin)) / sd(javelin)
# Rounding z scores to 2 decimal places
round(zScoresMeters200, 2)
round(zScoresJavelin, 2)
|
/Chapter_2/Chp_2_Example_19.R
|
no_license
|
artofstat/RCode
|
R
| false
| false
| 953
|
r
|
#############################################################
## R code to reproduce statistical analysis in the textbook:
## Agresti, Franklin, Klingenberg
## Statistics: The Art & Science of Learning from Data
## 5th Edition, Pearson 2021
## Web: ArtofStat.com
## Copyright: Bernhard Klingenberg
############################################################
###################
### Chapter 2 ###
### Example 19 ###
###################
#####################
## Standardizing ##
#####################
# Reading in the data of the performances of athletes:
meters200 <- c(24.48, 23.49, 23.26, 24.32, 24.09, 24.67)
javelin <- c(39.39, 46.06, 36.36, 37.77, 40.93, 33.42)
# To compute the z-scores of each athlete for both events:
zScoresMeters200 <- (meters200 - mean(meters200)) / sd(meters200)
zScoresJavelin <- (javelin - mean(javelin)) / sd(javelin)
# Rounding z scores to 2 decimal places
round(zScoresMeters200, 2)
round(zScoresJavelin, 2)
|
context("run_calculations")
test_that("works", {
## make data
data(sim_pu_raster, sim_features)
p1 <-
problem(sim_pu_raster, sim_features) %>%
add_min_set_objective() %>%
add_relative_targets(0.1) %>%
add_binary_decisions() %>%
add_boundary_penalties(3, 0.5)
p2 <-
problem(sim_pu_raster, sim_features) %>%
add_min_set_objective() %>%
add_relative_targets(0.1) %>%
add_binary_decisions() %>%
add_boundary_penalties(3, 0.5)
## run calculations
run_calculations(p1)
## compile problems
o1 <- compile(p1)
o2 <- compile(p2)
## tests
expect_equal(as.list(o1), as.list(o2))
})
|
/tests/testthat/test_run_calculations.R
|
no_license
|
bbest/prioritizr
|
R
| false
| false
| 633
|
r
|
context("run_calculations")
test_that("works", {
## make data
data(sim_pu_raster, sim_features)
p1 <-
problem(sim_pu_raster, sim_features) %>%
add_min_set_objective() %>%
add_relative_targets(0.1) %>%
add_binary_decisions() %>%
add_boundary_penalties(3, 0.5)
p2 <-
problem(sim_pu_raster, sim_features) %>%
add_min_set_objective() %>%
add_relative_targets(0.1) %>%
add_binary_decisions() %>%
add_boundary_penalties(3, 0.5)
## run calculations
run_calculations(p1)
## compile problems
o1 <- compile(p1)
o2 <- compile(p2)
## tests
expect_equal(as.list(o1), as.list(o2))
})
|
testlist <- list(n = -1970667520L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
/breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609961662-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 99
|
r
|
testlist <- list(n = -1970667520L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result)
|
#' Replace
#'
#' @param session BasexClient instance-ID
#' @param path Path where to store the data
#' @param input Replacement
#'
#' @return A list with two items
#' \itemize{
#' \item {info} {Aditional info}
#' \item {success} {A boolean, indicating if the command was completed successfull}
#' }
#'
#' @description Replaces a resource with the specified input.
#'
#' @details The 'Replace' command is deprecated and has been renamed to 'Put'.
#' 'Replace' is being kept as convenience.
#'
#' @details The input can be a UTF-8 encoded XML document, a binary resource, or any other data (such as JSON or CSV)
#' that can be successfully converted to a resource by the server.
#' This method returns \emph{self} invisibly, thus making it possible to chain together multiple method calls.
#'
#' @examples
#' \dontrun{
#' Replace(Session, "test", "<xml>Create test</xml>")
#' }
#'
#' @export
Replace <- function(session, path, input) {
return(session$Replace(path, input))
}
|
/R/Replace.R
|
no_license
|
BenEngbers/RBaseX
|
R
| false
| false
| 1,007
|
r
|
#' Replace
#'
#' @param session BasexClient instance-ID
#' @param path Path where to store the data
#' @param input Replacement
#'
#' @return A list with two items
#' \itemize{
#' \item {info} {Aditional info}
#' \item {success} {A boolean, indicating if the command was completed successfull}
#' }
#'
#' @description Replaces a resource with the specified input.
#'
#' @details The 'Replace' command is deprecated and has been renamed to 'Put'.
#' 'Replace' is being kept as convenience.
#'
#' @details The input can be a UTF-8 encoded XML document, a binary resource, or any other data (such as JSON or CSV)
#' that can be successfully converted to a resource by the server.
#' This method returns \emph{self} invisibly, thus making it possible to chain together multiple method calls.
#'
#' @examples
#' \dontrun{
#' Replace(Session, "test", "<xml>Create test</xml>")
#' }
#'
#' @export
Replace <- function(session, path, input) {
return(session$Replace(path, input))
}
|
library(waterfall)
# Lager et dataframe med to kolonner med scores
set.seed(13435)
x <- data.frame("label"=c("A","B","C","D","E"),"value"=c(5,15,-4,12,2))
x
# Tester Waterfall chart:
waterfallchart(value~label, data=x, col=c(1,2))
|
/waterfall chart.R
|
no_license
|
MHaneferd/RScripts
|
R
| false
| false
| 233
|
r
|
library(waterfall)
# Lager et dataframe med to kolonner med scores
set.seed(13435)
x <- data.frame("label"=c("A","B","C","D","E"),"value"=c(5,15,-4,12,2))
x
# Tester Waterfall chart:
waterfallchart(value~label, data=x, col=c(1,2))
|
## Put comments here that give an overall description of what your
# funtion to make the cache matrix
makeCacheMatrix <- function(x = matrix()) {
cacheMatrix <- NULL
setMatrix <- function(y) {
x <<- y
cacheMatrix <<- NULL
}
getMatrix <- function() x
setCache <- function(inverse) cacheMatrix <<- inverse
getCache <- function() cacheMatrix
}
# function to return the inverse of a given matrix using the cache
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
cacheMatrix <- x$getCache()
if (!is.null(cacheMatrix)) {
# returning the cache matrix
return(cacheMatrix)
}
else {
dMatrix <- x$getMatrix()
cacheMatrix <- solve(dMatrix, ...)
x$setCache(cacheMatrix)
return(cacheMatrix)
}
}
|
/cachematrix.R
|
no_license
|
cilidon/ProgrammingAssignment2
|
R
| false
| false
| 919
|
r
|
## Put comments here that give an overall description of what your
# funtion to make the cache matrix
makeCacheMatrix <- function(x = matrix()) {
cacheMatrix <- NULL
setMatrix <- function(y) {
x <<- y
cacheMatrix <<- NULL
}
getMatrix <- function() x
setCache <- function(inverse) cacheMatrix <<- inverse
getCache <- function() cacheMatrix
}
# function to return the inverse of a given matrix using the cache
cacheSolve <- function(x, ...) {
# Return a matrix that is the inverse of 'x'
cacheMatrix <- x$getCache()
if (!is.null(cacheMatrix)) {
# returning the cache matrix
return(cacheMatrix)
}
else {
dMatrix <- x$getMatrix()
cacheMatrix <- solve(dMatrix, ...)
x$setCache(cacheMatrix)
return(cacheMatrix)
}
}
|
#' RNA-Seq of a TNBC patient
#'
#' A dataset containing RNA-Seq data of two transcriptomes belonging to a triple negative breast
#' cancer (TNBC) patient
#'
#'
#' @format A data frame with 20501 rows and 2 columns:
#' \describe{
#' \item{Tumor_Sample}{RNA-Seq data of a tumor sample}
#' \item{Healthy_Sample}{RNA-Seq data of a healthy sample}
#' ...
#' }
#'
#' @source \url{https://portal.gdc.cancer.gov/}
"exp_tnbc_A2C9"
|
/R/data.R
|
no_license
|
alfonsosaera/iDEG
|
R
| false
| false
| 431
|
r
|
#' RNA-Seq of a TNBC patient
#'
#' A dataset containing RNA-Seq data of two transcriptomes belonging to a triple negative breast
#' cancer (TNBC) patient
#'
#'
#' @format A data frame with 20501 rows and 2 columns:
#' \describe{
#' \item{Tumor_Sample}{RNA-Seq data of a tumor sample}
#' \item{Healthy_Sample}{RNA-Seq data of a healthy sample}
#' ...
#' }
#'
#' @source \url{https://portal.gdc.cancer.gov/}
"exp_tnbc_A2C9"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response.R
\name{print.response}
\alias{print.response}
\title{Printing Responses}
\usage{
\method{print}{response}(x, ...)
}
\arguments{
\item{x}{Object of class \code{response}.}
\item{\ldots}{Ignored.}
}
\description{
Print a response object.
}
\details{
Formats the response as an HTTP response.
}
\seealso{
\code{\link{response}}
}
\keyword{internal}
|
/man/print.response.Rd
|
no_license
|
waynenilsen/prairie
|
R
| false
| true
| 436
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/response.R
\name{print.response}
\alias{print.response}
\title{Printing Responses}
\usage{
\method{print}{response}(x, ...)
}
\arguments{
\item{x}{Object of class \code{response}.}
\item{\ldots}{Ignored.}
}
\description{
Print a response object.
}
\details{
Formats the response as an HTTP response.
}
\seealso{
\code{\link{response}}
}
\keyword{internal}
|
#' The Amari-alpha Csiszar-function in log-space
#'
#' A Csiszar-function is a member of ` F = { f:R_+ to R : f convex } `.
#'
#' When `self_normalized = TRUE`, the Amari-alpha Csiszar-function is:
#'
#' ```
#' f(u) = { -log(u) + (u - 1)}, alpha = 0
#' { u log(u) - (u - 1)}, alpha = 1
#' { ((u^alpha - 1) - alpha (u - 1) / (alpha (alpha - 1))}, otherwise
#' ```
#'
#' When `self_normalized = FALSE` the `(u - 1)` terms are omitted.
#'
#' Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
#' non-log-space calculations and may therefore be numerically unstable for
#' `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param alpha `float`-like scalar.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @section References:
#' - A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences: Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp. 1532-1568, 2010.
#'
#' @family vi-functions
#'
#' @export
vi_amari_alpha <-
function(logu,
alpha = 1,
self_normalized = FALSE,
name = NULL) {
tfp$vi$amari_alpha(logu, alpha, self_normalized, name)
}
#' The reverse Kullback-Leibler Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = TRUE`, the KL-reverse Csiszar-function is `f(u) = -log(u) + (u - 1)`.
#' When `self_normalized = FALSE` the `(u - 1)` term is omitted.
#' Observe that as an f-Divergence, this Csiszar-function implies: `D_f[p, q] = KL[q, p]`
#'
#' The KL is "reverse" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @family vi-functions
#'
#' @export
vi_kl_reverse <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$kl_reverse(logu, self_normalized, name)
}
#' The forward Kullback-Leibler Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = TRUE`, the KL-reverse Csiszar-function is `f(u) = u log(u) - (u - 1)`.
#' When `self_normalized = FALSE` the `(u - 1)` term is omitted.
#' Observe that as an f-Divergence, this Csiszar-function implies: `D_f[p, q] = KL[q, p]`
#'
#' The KL is "forward" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @family vi-functions
#'
#' @export
vi_kl_forward <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$kl_forward(logu, self_normalized, name)
}
#' Monte-Carlo approximation of an f-Divergence variational loss
#'
#' Variational losses measure the divergence between an unnormalized target
#' distribution `p` (provided via `target_log_prob_fn`) and a surrogate
#' distribution `q` (provided as `surrogate_posterior`). When the
#' target distribution is an unnormalized posterior from conditioning a model on
#' data, minimizing the loss with respect to the parameters of
#' `surrogate_posterior` performs approximate posterior inference.
#'
#' This function defines divergences of the form
#' `E_q[discrepancy_fn(log p(z) - log q(z))]`, sometimes known as
#' [f-divergences](https://en.wikipedia.org/wiki/F-divergence).
#'
#' In the special case `discrepancy_fn(logu) == -logu` (the default
#' `vi_kl_reverse`), this is the reverse Kullback-Liebler divergence
#' `KL[q||p]`, whose negation applied to an unnormalized `p` is the widely-used
#' evidence lower bound (ELBO). Other cases of interest available under
#' `tfp$vi` include the forward `KL[p||q]` (given by `vi_kl_forward(logu) == exp(logu) * logu`),
#' total variation distance, Amari alpha-divergences, and more.
#'
#' Csiszar f-divergences
#'
#' A Csiszar function `f` is a convex function from `R^+` (the positive reals)
#' to `R`. The Csiszar f-Divergence is given by:
#' ```
#' D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]
#' ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),
#' where x_j ~iid q(X)
#' ```
#'
#' For example, `f = lambda u: -log(u)` recovers `KL[q||p]`, while `f = lambda u: u * log(u)`
#' recovers the forward `KL[p||q]`. These and other functions are available in `tfp$vi`.
#'
#' Tricks: Reparameterization and Score-Gradient
#'
#' When q is "reparameterized", i.e., a diffeomorphic transformation of a
#' parameterless distribution (e.g., `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`),
#' we can swap gradient and expectation, i.e.,
#' `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`
#' and `s_i = f(x_i), x_i ~iid q(X)`.
#'
#' However, if q is not reparameterized, TensorFlow's gradient will be incorrect
#' since the chain-rule stops at samples of unreparameterized distributions. In
#' this circumstance using the Score-Gradient trick results in an unbiased
#' gradient, i.e.,
#' ```
#' grad[ E_q[f(X)] ]
#' = grad[ int dx q(x) f(x) ]
#' = int dx grad[ q(x) f(x) ]
#' = int dx [ q'(x) f(x) + q(x) f'(x) ]
#' = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]
#' = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]
#' = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]
#' ```
#' Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is
#' usually preferable to set `use_reparametrization = True`.
#'
#' Example Application:
#' The Csiszar f-Divergence is a useful framework for variational inference.
#' I.e., observe that,
#' ```
#' f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )
#' <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]
#' := D_f[p(x, Z), q(Z | x)]
#' ```
#'
#' The inequality follows from the fact that the "perspective" of `f`, i.e.,
#' `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and
#' `t` is a real. Since the above framework includes the popular Evidence Lower
#' BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework
#' "Evidence Divergence Bound Optimization" (EDBO).
#'
#' @param target_log_prob_fn function that takes a set of `Tensor` arguments
#' and returns a `Tensor` log-density. Given
#' `q_sample <- surrogate_posterior$sample(sample_size)`, this
#' will be (in Python) called as `target_log_prob_fn(q_sample)` if `q_sample` is a list
#' or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
#' dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
#' It should support batched evaluation, i.e., should return a result of
#' shape `[sample_size]`.
#' @param surrogate_posterior A `tfp$distributions$Distribution`
#' instance defining a variational posterior (could be a
#' `tfp$distributions$JointDistribution`). Crucially, the distribution's `log_prob` and
#' (if reparameterized) `sample` methods must directly invoke all ops
#' that generate gradients to the underlying variables. One way to ensure
#' this is to use `tfp$util$DeferredTensor` to represent any parameters
#' defined as transformations of unconstrained variables, so that the
#' transformations execute at runtime instead of at distribution creation.
#' @param sample_size `integer` number of Monte Carlo samples to use
#' in estimating the variational divergence. Larger values may stabilize
#' the optimization, but at higher cost per step in time and memory.
#' Default value: `1`.
#' @param discrepancy_fn function representing a Csiszar `f` function in
#' in log-space. That is, `discrepancy_fn(log(u)) = f(u)`, where `f` is
#' convex in `u`. Default value: `vi_kl_reverse`.
#' @param use_reparametrization `logical`. When `NULL` (the default),
#' automatically set to: `surrogate_posterior.reparameterization_type == tfp$distributions$FULLY_REPARAMETERIZED`.
#' When `TRUE` uses the standard Monte-Carlo average. When `FALSE` uses the score-gradient trick. (See above for
#' details.) When `FALSE`, consider using `csiszar_vimco`.
#' @param seed `integer` seed for `surrogate_posterior$sample`.
#' @param name name prefixed to Ops created by this function.
#'
#' @return monte_carlo_variational_loss `float`-like `Tensor` Monte Carlo
#' approximation of the Csiszar f-Divergence.
#'
#' @section References:
#' - Ali, Syed Mumtaz, and Samuel D. Silvey. "A general class of coefficients of divergence of one distribution from another."
#' Journal of the Royal Statistical Society: Series B (Methodological) 28.1 (1966): 131-142.
#' @family vi-functions
#'
#' @export
vi_monte_carlo_variational_loss <-
function(target_log_prob_fn,
surrogate_posterior,
sample_size = 1,
discrepancy_fn = vi_kl_reverse,
use_reparametrization = NULL,
seed = NULL,
name = NULL) {
tfp$vi$monte_carlo_variational_loss(
target_log_prob_fn,
surrogate_posterior,
as.integer(sample_size),
discrepancy_fn,
use_reparametrization,
seed,
name
)
}
#' The Jensen-Shannon Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
#' ```
#' f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
#' ```
#'
#' When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
#'
#' Observe that as an f-Divergence, this Csiszar-function implies:
#'
#' ```
#' D_f[p, q] = KL[p, m] + KL[q, m]
#' m(x) = 0.5 p(x) + 0.5 q(x)
#' ```
#'
#' In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
#' f-Divergence.
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @section References:
#' - Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
#' Inf. Th., 37, 145-151, 1991.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return jensen_shannon_of_u, `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_jensen_shannon <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$jensen_shannon(logu, self_normalized, name)
}
#' The Arithmetic-Geometric Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:
#' ```
#' f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
#' ```
#'
#' When `self_normalized = False` the `(1 + u) log(2)` term is omitted.
#'
#' Observe that as an f-Divergence, this Csiszar-function implies:
#'
#' ```
#' D_f[p, q] = KL[m, p] + KL[m, q]
#' m(x) = 0.5 p(x) + 0.5 q(x)
#' ```
#'
#' In a sense, this divergence is the "reverse" of the Jensen-Shannon
#' f-Divergence.
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return arithmetic_geometric_of_u: `float`-like `Tensor` of the
#' Csiszar-function evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_arithmetic_geometric <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$arithmetic_geometric(logu, self_normalized, name)
}
#' The Total Variation Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Total-Variation Csiszar-function is:
#' ```
#' f(u) = 0.5 |u - 1|
#' ````
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions#'
#' @export
vi_total_variation <-
function(logu,
name = NULL) {
tfp$vi$total_variation(logu, name)
}
#' The Pearson Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Pearson Csiszar-function is:
#' ```
#' f(u) = (u - 1)**2
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return pearson_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_pearson <-
function(logu,
name = NULL) {
tfp$vi$pearson(logu, name)
}
#' The Squared-Hellinger Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Squared-Hellinger Csiszar-function is:
#' ```
#' f(u) = (sqrt(u) - 1)**2
#' ```
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return Squared-Hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_squared_hellinger <-
function(logu,
name = NULL) {
tfp$vi$squared_hellinger(logu, name)
}
#' The Triangular Csiszar-function in log-space
#'
#' The Triangular Csiszar-function is:
#'
#' ```
#' f(u) = (u - 1)**2 / (1 + u)
#' ```
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return triangular_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions#'
#' @export
vi_triangular <-
function(logu,
name = NULL) {
tfp$vi$triangular(logu, name)
}
#' The T-Power Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the T-Power Csiszar-function is:
#'
#' ```
#' f(u) = s [ u**t - 1 - t(u - 1) ]
#' s = { -1 0 < t < 1 }
#' { +1 otherwise }
#' ```
#'
#' When `self_normalized = False` the `- t(u - 1)` term is omitted.
#'
#' This is similar to the `amari_alpha` Csiszar-function, with the associated
#' divergence being the same up to factors depending only on `t`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param t `Tensor` of same `dtype` as `logu` and broadcastable shape.
#' @inheritParams vi_amari_alpha
#' @return t_power_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions#'
#' @export
vi_t_power <-
function(logu,
t,
self_normalized = FALSE,
name = NULL) {
tfp$vi$t_power(logu, t, self_normalized, name)
}
#' The log1p-abs Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Log1p-Abs Csiszar-function is:
#'
#' ```
#' f(u) = u**(sign(u-1)) - 1
#' ```
#'
#' This function is so-named because it was invented from the following recipe.
#' Choose a convex function g such that g(0)=0 and solve for f:
#'
#' ```
#' log(1 + f(u)) = g(log(u)).
#' <=>
#' f(u) = exp(g(log(u))) - 1
#' ```
#'
#' That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
#' is `log`-domain.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_log1p_abs <-
function(logu,
name = NULL) {
tfp$vi$log1p_abs(logu, name)
}
#' The Jeffreys Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Jeffreys Csiszar-function is:
#'
#' ```
#' f(u) = 0.5 ( u log(u) - log(u))
#' = 0.5 kl_forward + 0.5 kl_reverse
#' = symmetrized_csiszar_function(kl_reverse)
#' = symmetrized_csiszar_function(kl_forward)
#' ```
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_jeffreys <-
function(logu,
name = NULL) {
tfp$vi$jeffreys(logu, name)
}
#' The chi-square Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Chi-square Csiszar-function is:
#'
#' ```
#' f(u) = u**2 - 1
#' ```
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return chi_square_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_chi_square <-
function(logu,
name = NULL) {
tfp$vi$chi_square(logu, name)
}
#' The Modified-GAN Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the modified-GAN (Generative/Adversarial
#' Network) Csiszar-function is:
#'
#' ```
#' f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
#' ```
#'
#' When `self_normalized = False` the `0.5 (u - 1)` is omitted.
#'
#' The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
#' `self_normalized = False`).
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return jensen_shannon_of_u, `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_modified_gan <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$modified_gan(logu, self_normalized, name)
}
#' Calculates the dual Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Csiszar-dual is defined as:
#' ```
#' f^*(u) = u f(1 / u)
#' ```
#' where `f` is some other Csiszar-function.
#' For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
#' ```
#' f(u) = -log(u)
#' f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
#' ```
#' The dual of the dual is the original function:
#' ```
#' f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @param csiszar_function function representing a Csiszar-function over log-domain.
#'
#' @inheritParams vi_amari_alpha
#' @return dual_f_of_u `float`-like `Tensor` of the result of calculating the dual of
#' `f` at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_dual_csiszar_function <-
function(logu,
csiszar_function,
name = NULL) {
tfp$vi$dual_csiszar_function(logu, csiszar_function, name)
}
#' Symmetrizes a Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The symmetrized Csiszar-function is defined as:
#' ```
#' f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
#' ```
#'
#' where `g` is some other Csiszar-function.
#' We say the function is "symmetrized" because:
#' ```
#' D_{f_g}[p, q] = D_{f_g}[q, p]
#' ```
#' for all `p << >> q` (i.e., `support(p) = support(q)`).
#'
#' There exists alternatives for symmetrizing a Csiszar-function. For example,
#' ```
#' f_g(u) = max(f(u), f^*(u)),
#' ```
#'
#' where `f^*` is the dual Csiszar-function, also implies a symmetric
#' f-Divergence.
#'
#' Example:
#' When either of the following functions are symmetrized, we obtain the
#' Jensen-Shannon Csiszar-function, i.e.,
#' ```
#' g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
#' h(u) = log(4) + 2 u log(u / (1 + u))
#' ```
#' implies,
#' ```
#' f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
#' = jensen_shannon(log(u)).
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @param csiszar_function function representing a Csiszar-function over log-domain.
#'
#' @inheritParams vi_amari_alpha
#' @return symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the
#' symmetrization of `g` evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_symmetrized_csiszar_function <-
function(logu,
csiszar_function,
name = NULL) {
tfp$vi$symmetrized_csiszar_function(logu, csiszar_function, name)
}
#' Use VIMCO to lower the variance of the gradient of csiszar_function(Avg(logu))
#'
#' This function generalizes VIMCO (Mnih and Rezende, 2016) to Csiszar
#' f-Divergences.
#'
#' Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`,
#' consider using `monte_carlo_csiszar_f_divergence`.
#'
#' The VIMCO loss is:
#' ```
#' vimco = f(Avg{logu[i] : i=0,...,m-1})
#' where,
#' logu[i] = log( p(x, h[i]) / q(h[i] | x) )
#' h[i] iid~ q(H | x)
#' ```
#'
#' Interestingly, the VIMCO gradient is not the naive gradient of `vimco`.
#' Rather, it is characterized by:
#' ```
#' grad[vimco] - variance_reducing_term
#' where,
#' variance_reducing_term = Sum{ grad[log q(h[i] | x)] *
#' (vimco - f(log Avg{h[j;i] : j=0,...,m-1}))
#' : i=0, ..., m-1 }
#' h[j;i] = { u[j]
#' j!=i
#' { GeometricAverage{ u[k] : k!=i} j==i
#' ```
#'
#' (We omitted `stop_gradient` for brevity. See implementation for more details.)
#' The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th
#' element has been replaced by the leave-`i`-out Geometric-average.
#'
#' This implementation prefers numerical precision over efficiency, i.e.,
#' `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`.
#' (The constant may be fairly large, perhaps around 12.)
#'
#' @param f function representing a Csiszar-function in log-space.
#' @param p_log_prob function representing the natural-log of the
#' probability under distribution `p`. (In variational inference `p` is the
#' joint distribution.)
#' @param q `tfd$Distribution`-like instance; must implement: `sample(n, seed)`, and
#' `log_prob(x)`. (In variational inference `q` is the approximate posterior
#' distribution.)
#' @param num_draws Integer scalar number of draws used to approximate the
#' f-Divergence expectation.
#' @param num_batch_draws Integer scalar number of draws used to approximate the
#' f-Divergence expectation.
#' @param seed `integer` seed for `q$sample`.
#' @param name String prefixed to Ops created by this function.
#'
#' @return vimco The Csiszar f-Divergence generalized VIMCO objective
#'
#' @section References:
#' - [Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016.](https://arxiv.org/abs/1602.06725)
#'
#' @family vi-functions
#' @export
vi_csiszar_vimco <-
function(f,
p_log_prob,
q,
num_draws,
num_batch_draws = 1,
seed = NULL,
name = NULL) {
tfp$vi$csiszar_vimco(f,
p_log_prob,
q,
as.integer(num_draws),
as.integer(num_batch_draws),
as.integer(seed),
name)
}
|
/R/vi-functions.R
|
permissive
|
jeffreypullin/tfprobability
|
R
| false
| false
| 24,872
|
r
|
#' The Amari-alpha Csiszar-function in log-space
#'
#' A Csiszar-function is a member of ` F = { f:R_+ to R : f convex } `.
#'
#' When `self_normalized = TRUE`, the Amari-alpha Csiszar-function is:
#'
#' ```
#' f(u) = { -log(u) + (u - 1)}, alpha = 0
#' { u log(u) - (u - 1)}, alpha = 1
#' { ((u^alpha - 1) - alpha (u - 1) / (alpha (alpha - 1))}, otherwise
#' ```
#'
#' When `self_normalized = FALSE` the `(u - 1)` terms are omitted.
#'
#' Warning: when `alpha != 0` and/or `self_normalized = True` this function makes
#' non-log-space calculations and may therefore be numerically unstable for
#' `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param alpha `float`-like scalar.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @section References:
#' - A. Cichocki and S. Amari. "Families of Alpha-Beta-and GammaDivergences: Flexible and Robust Measures of Similarities." Entropy, vol. 12, no. 6, pp. 1532-1568, 2010.
#'
#' @family vi-functions
#'
#' @export
vi_amari_alpha <-
function(logu,
alpha = 1,
self_normalized = FALSE,
name = NULL) {
tfp$vi$amari_alpha(logu, alpha, self_normalized, name)
}
#' The reverse Kullback-Leibler Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = TRUE`, the KL-reverse Csiszar-function is `f(u) = -log(u) + (u - 1)`.
#' When `self_normalized = FALSE` the `(u - 1)` term is omitted.
#' Observe that as an f-Divergence, this Csiszar-function implies: `D_f[p, q] = KL[q, p]`
#'
#' The KL is "reverse" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @family vi-functions
#'
#' @export
vi_kl_reverse <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$kl_reverse(logu, self_normalized, name)
}
#' The forward Kullback-Leibler Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = TRUE`, the KL-reverse Csiszar-function is `f(u) = u log(u) - (u - 1)`.
#' When `self_normalized = FALSE` the `(u - 1)` term is omitted.
#' Observe that as an f-Divergence, this Csiszar-function implies: `D_f[p, q] = KL[q, p]`
#'
#' The KL is "forward" because in maximum likelihood we think of minimizing `q` as in `KL[p, q]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param logu `float`-like `Tensor` representing `log(u)` from above.
#' @param self_normalized `logical` indicating whether `f'(u=1)=0`. When
#' `f'(u=1)=0` the implied Csiszar f-Divergence remains non-negative even
#' when `p, q` are unnormalized measures.
#' @param name name prefixed to Ops created by this function.
#'
#' @family vi-functions
#'
#' @export
vi_kl_forward <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$kl_forward(logu, self_normalized, name)
}
#' Monte-Carlo approximation of an f-Divergence variational loss
#'
#' Variational losses measure the divergence between an unnormalized target
#' distribution `p` (provided via `target_log_prob_fn`) and a surrogate
#' distribution `q` (provided as `surrogate_posterior`). When the
#' target distribution is an unnormalized posterior from conditioning a model on
#' data, minimizing the loss with respect to the parameters of
#' `surrogate_posterior` performs approximate posterior inference.
#'
#' This function defines divergences of the form
#' `E_q[discrepancy_fn(log p(z) - log q(z))]`, sometimes known as
#' [f-divergences](https://en.wikipedia.org/wiki/F-divergence).
#'
#' In the special case `discrepancy_fn(logu) == -logu` (the default
#' `vi_kl_reverse`), this is the reverse Kullback-Liebler divergence
#' `KL[q||p]`, whose negation applied to an unnormalized `p` is the widely-used
#' evidence lower bound (ELBO). Other cases of interest available under
#' `tfp$vi` include the forward `KL[p||q]` (given by `vi_kl_forward(logu) == exp(logu) * logu`),
#' total variation distance, Amari alpha-divergences, and more.
#'
#' Csiszar f-divergences
#'
#' A Csiszar function `f` is a convex function from `R^+` (the positive reals)
#' to `R`. The Csiszar f-Divergence is given by:
#' ```
#' D_f[p(X), q(X)] := E_{q(X)}[ f( p(X) / q(X) ) ]
#' ~= m**-1 sum_j^m f( p(x_j) / q(x_j) ),
#' where x_j ~iid q(X)
#' ```
#'
#' For example, `f = lambda u: -log(u)` recovers `KL[q||p]`, while `f = lambda u: u * log(u)`
#' recovers the forward `KL[p||q]`. These and other functions are available in `tfp$vi`.
#'
#' Tricks: Reparameterization and Score-Gradient
#'
#' When q is "reparameterized", i.e., a diffeomorphic transformation of a
#' parameterless distribution (e.g., `Normal(Y; m, s) <=> Y = sX + m, X ~ Normal(0,1)`),
#' we can swap gradient and expectation, i.e.,
#' `grad[Avg{ s_i : i=1...n }] = Avg{ grad[s_i] : i=1...n }` where `S_n=Avg{s_i}`
#' and `s_i = f(x_i), x_i ~iid q(X)`.
#'
#' However, if q is not reparameterized, TensorFlow's gradient will be incorrect
#' since the chain-rule stops at samples of unreparameterized distributions. In
#' this circumstance using the Score-Gradient trick results in an unbiased
#' gradient, i.e.,
#' ```
#' grad[ E_q[f(X)] ]
#' = grad[ int dx q(x) f(x) ]
#' = int dx grad[ q(x) f(x) ]
#' = int dx [ q'(x) f(x) + q(x) f'(x) ]
#' = int dx q(x) [q'(x) / q(x) f(x) + f'(x) ]
#' = int dx q(x) grad[ f(x) q(x) / stop_grad[q(x)] ]
#' = E_q[ grad[ f(x) q(x) / stop_grad[q(x)] ] ]
#' ```
#' Unless `q.reparameterization_type != tfd.FULLY_REPARAMETERIZED` it is
#' usually preferable to set `use_reparametrization = True`.
#'
#' Example Application:
#' The Csiszar f-Divergence is a useful framework for variational inference.
#' I.e., observe that,
#' ```
#' f(p(x)) = f( E_{q(Z | x)}[ p(x, Z) / q(Z | x) ] )
#' <= E_{q(Z | x)}[ f( p(x, Z) / q(Z | x) ) ]
#' := D_f[p(x, Z), q(Z | x)]
#' ```
#'
#' The inequality follows from the fact that the "perspective" of `f`, i.e.,
#' `(s, t) |-> t f(s / t))`, is convex in `(s, t)` when `s/t in domain(f)` and
#' `t` is a real. Since the above framework includes the popular Evidence Lower
#' BOund (ELBO) as a special case, i.e., `f(u) = -log(u)`, we call this framework
#' "Evidence Divergence Bound Optimization" (EDBO).
#'
#' @param target_log_prob_fn function that takes a set of `Tensor` arguments
#' and returns a `Tensor` log-density. Given
#' `q_sample <- surrogate_posterior$sample(sample_size)`, this
#' will be (in Python) called as `target_log_prob_fn(q_sample)` if `q_sample` is a list
#' or a tuple, `target_log_prob_fn(**q_sample)` if `q_sample` is a
#' dictionary, or `target_log_prob_fn(q_sample)` if `q_sample` is a `Tensor`.
#' It should support batched evaluation, i.e., should return a result of
#' shape `[sample_size]`.
#' @param surrogate_posterior A `tfp$distributions$Distribution`
#' instance defining a variational posterior (could be a
#' `tfp$distributions$JointDistribution`). Crucially, the distribution's `log_prob` and
#' (if reparameterized) `sample` methods must directly invoke all ops
#' that generate gradients to the underlying variables. One way to ensure
#' this is to use `tfp$util$DeferredTensor` to represent any parameters
#' defined as transformations of unconstrained variables, so that the
#' transformations execute at runtime instead of at distribution creation.
#' @param sample_size `integer` number of Monte Carlo samples to use
#' in estimating the variational divergence. Larger values may stabilize
#' the optimization, but at higher cost per step in time and memory.
#' Default value: `1`.
#' @param discrepancy_fn function representing a Csiszar `f` function in
#' in log-space. That is, `discrepancy_fn(log(u)) = f(u)`, where `f` is
#' convex in `u`. Default value: `vi_kl_reverse`.
#' @param use_reparametrization `logical`. When `NULL` (the default),
#' automatically set to: `surrogate_posterior.reparameterization_type == tfp$distributions$FULLY_REPARAMETERIZED`.
#' When `TRUE` uses the standard Monte-Carlo average. When `FALSE` uses the score-gradient trick. (See above for
#' details.) When `FALSE`, consider using `csiszar_vimco`.
#' @param seed `integer` seed for `surrogate_posterior$sample`.
#' @param name name prefixed to Ops created by this function.
#'
#' @return monte_carlo_variational_loss `float`-like `Tensor` Monte Carlo
#' approximation of the Csiszar f-Divergence.
#'
#' @section References:
#' - Ali, Syed Mumtaz, and Samuel D. Silvey. "A general class of coefficients of divergence of one distribution from another."
#' Journal of the Royal Statistical Society: Series B (Methodological) 28.1 (1966): 131-142.
#' @family vi-functions
#'
#' @export
vi_monte_carlo_variational_loss <-
function(target_log_prob_fn,
surrogate_posterior,
sample_size = 1,
discrepancy_fn = vi_kl_reverse,
use_reparametrization = NULL,
seed = NULL,
name = NULL) {
tfp$vi$monte_carlo_variational_loss(
target_log_prob_fn,
surrogate_posterior,
as.integer(sample_size),
discrepancy_fn,
use_reparametrization,
seed,
name
)
}
#' The Jensen-Shannon Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True`, the Jensen-Shannon Csiszar-function is:
#' ```
#' f(u) = u log(u) - (1 + u) log(1 + u) + (u + 1) log(2)
#' ```
#'
#' When `self_normalized = False` the `(u + 1) log(2)` term is omitted.
#'
#' Observe that as an f-Divergence, this Csiszar-function implies:
#'
#' ```
#' D_f[p, q] = KL[p, m] + KL[q, m]
#' m(x) = 0.5 p(x) + 0.5 q(x)
#' ```
#'
#' In a sense, this divergence is the "reverse" of the Arithmetic-Geometric
#' f-Divergence.
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @section References:
#' - Lin, J. "Divergence measures based on the Shannon entropy." IEEE Trans.
#' Inf. Th., 37, 145-151, 1991.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return jensen_shannon_of_u, `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_jensen_shannon <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$jensen_shannon(logu, self_normalized, name)
}
#' The Arithmetic-Geometric Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the Arithmetic-Geometric Csiszar-function is:
#' ```
#' f(u) = (1 + u) log( (1 + u) / sqrt(u) ) - (1 + u) log(2)
#' ```
#'
#' When `self_normalized = False` the `(1 + u) log(2)` term is omitted.
#'
#' Observe that as an f-Divergence, this Csiszar-function implies:
#'
#' ```
#' D_f[p, q] = KL[m, p] + KL[m, q]
#' m(x) = 0.5 p(x) + 0.5 q(x)
#' ```
#'
#' In a sense, this divergence is the "reverse" of the Jensen-Shannon
#' f-Divergence.
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return arithmetic_geometric_of_u: `float`-like `Tensor` of the
#' Csiszar-function evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_arithmetic_geometric <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$arithmetic_geometric(logu, self_normalized, name)
}
#' The Total Variation Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Total-Variation Csiszar-function is:
#' ```
#' f(u) = 0.5 |u - 1|
#' ````
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return total_variation_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions#'
#' @export
vi_total_variation <-
function(logu,
name = NULL) {
tfp$vi$total_variation(logu, name)
}
#' The Pearson Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Pearson Csiszar-function is:
#' ```
#' f(u) = (u - 1)**2
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#'
#' @return pearson_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_pearson <-
function(logu,
name = NULL) {
tfp$vi$pearson(logu, name)
}
#' The Squared-Hellinger Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Squared-Hellinger Csiszar-function is:
#' ```
#' f(u) = (sqrt(u) - 1)**2
#' ```
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return Squared-Hellinger_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_squared_hellinger <-
function(logu,
name = NULL) {
tfp$vi$squared_hellinger(logu, name)
}
#' The Triangular Csiszar-function in log-space
#'
#' The Triangular Csiszar-function is:
#'
#' ```
#' f(u) = (u - 1)**2 / (1 + u)
#' ```
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return triangular_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions#'
#' @export
vi_triangular <-
function(logu,
name = NULL) {
tfp$vi$triangular(logu, name)
}
#' The T-Power Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the T-Power Csiszar-function is:
#'
#' ```
#' f(u) = s [ u**t - 1 - t(u - 1) ]
#' s = { -1 0 < t < 1 }
#' { +1 otherwise }
#' ```
#'
#' When `self_normalized = False` the `- t(u - 1)` term is omitted.
#'
#' This is similar to the `amari_alpha` Csiszar-function, with the associated
#' divergence being the same up to factors depending only on `t`.
#'
#' Warning: when self_normalized = True` this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @param t `Tensor` of same `dtype` as `logu` and broadcastable shape.
#' @inheritParams vi_amari_alpha
#' @return t_power_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions#'
#' @export
vi_t_power <-
function(logu,
t,
self_normalized = FALSE,
name = NULL) {
tfp$vi$t_power(logu, t, self_normalized, name)
}
#' The log1p-abs Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Log1p-Abs Csiszar-function is:
#'
#' ```
#' f(u) = u**(sign(u-1)) - 1
#' ```
#'
#' This function is so-named because it was invented from the following recipe.
#' Choose a convex function g such that g(0)=0 and solve for f:
#'
#' ```
#' log(1 + f(u)) = g(log(u)).
#' <=>
#' f(u) = exp(g(log(u))) - 1
#' ```
#'
#' That is, the graph is identically `g` when y-axis is `log1p`-domain and x-axis
#' is `log`-domain.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return log1p_abs_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_log1p_abs <-
function(logu,
name = NULL) {
tfp$vi$log1p_abs(logu, name)
}
#' The Jeffreys Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Jeffreys Csiszar-function is:
#'
#' ```
#' f(u) = 0.5 ( u log(u) - log(u))
#' = 0.5 kl_forward + 0.5 kl_reverse
#' = symmetrized_csiszar_function(kl_reverse)
#' = symmetrized_csiszar_function(kl_forward)
#' ```
#'
#' This Csiszar-function induces a symmetric f-Divergence, i.e.,
#' `D_f[p, q] = D_f[q, p]`.
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return jeffreys_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_jeffreys <-
function(logu,
name = NULL) {
tfp$vi$jeffreys(logu, name)
}
#' The chi-square Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Chi-square Csiszar-function is:
#'
#' ```
#' f(u) = u**2 - 1
#' ```
#'
#' Warning: this function makes non-log-space calculations and may
#' therefore be numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return chi_square_of_u: `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#' @family vi-functions
#' @export
vi_chi_square <-
function(logu,
name = NULL) {
tfp$vi$chi_square(logu, name)
}
#' The Modified-GAN Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' When `self_normalized = True` the modified-GAN (Generative/Adversarial
#' Network) Csiszar-function is:
#'
#' ```
#' f(u) = log(1 + u) - log(u) + 0.5 (u - 1)
#' ```
#'
#' When `self_normalized = False` the `0.5 (u - 1)` is omitted.
#'
#' The unmodified GAN Csiszar-function is identical to Jensen-Shannon (with
#' `self_normalized = False`).
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @inheritParams vi_amari_alpha
#' @return jensen_shannon_of_u, `float`-like `Tensor` of the Csiszar-function
#' evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_modified_gan <-
function(logu,
self_normalized = FALSE,
name = NULL) {
tfp$vi$modified_gan(logu, self_normalized, name)
}
#' Calculates the dual Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The Csiszar-dual is defined as:
#' ```
#' f^*(u) = u f(1 / u)
#' ```
#' where `f` is some other Csiszar-function.
#' For example, the dual of `kl_reverse` is `kl_forward`, i.e.,
#' ```
#' f(u) = -log(u)
#' f^*(u) = u f(1 / u) = -u log(1 / u) = u log(u)
#' ```
#' The dual of the dual is the original function:
#' ```
#' f^**(u) = {u f(1/u)}^*(u) = u (1/u) f(1/(1/u)) = f(u)
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @param csiszar_function function representing a Csiszar-function over log-domain.
#'
#' @inheritParams vi_amari_alpha
#' @return dual_f_of_u `float`-like `Tensor` of the result of calculating the dual of
#' `f` at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_dual_csiszar_function <-
function(logu,
csiszar_function,
name = NULL) {
tfp$vi$dual_csiszar_function(logu, csiszar_function, name)
}
#' Symmetrizes a Csiszar-function in log-space
#'
#' A Csiszar-function is a member of `F = { f:R_+ to R : f convex }`.
#'
#' The symmetrized Csiszar-function is defined as:
#' ```
#' f_g(u) = 0.5 g(u) + 0.5 u g (1 / u)
#' ```
#'
#' where `g` is some other Csiszar-function.
#' We say the function is "symmetrized" because:
#' ```
#' D_{f_g}[p, q] = D_{f_g}[q, p]
#' ```
#' for all `p << >> q` (i.e., `support(p) = support(q)`).
#'
#' There exists alternatives for symmetrizing a Csiszar-function. For example,
#' ```
#' f_g(u) = max(f(u), f^*(u)),
#' ```
#'
#' where `f^*` is the dual Csiszar-function, also implies a symmetric
#' f-Divergence.
#'
#' Example:
#' When either of the following functions are symmetrized, we obtain the
#' Jensen-Shannon Csiszar-function, i.e.,
#' ```
#' g(u) = -log(u) - (1 + u) log((1 + u) / 2) + u - 1
#' h(u) = log(4) + 2 u log(u / (1 + u))
#' ```
#' implies,
#' ```
#' f_g(u) = f_h(u) = u log(u) - (1 + u) log((1 + u) / 2)
#' = jensen_shannon(log(u)).
#' ```
#'
#' Warning: this function makes non-log-space calculations and may therefore be
#' numerically unstable for `|logu| >> 0`.
#'
#' @param csiszar_function function representing a Csiszar-function over log-domain.
#'
#' @inheritParams vi_amari_alpha
#' @return symmetrized_g_of_u: `float`-like `Tensor` of the result of applying the
#' symmetrization of `g` evaluated at `u = exp(logu)`.
#'
#' @family vi-functions
#' @export
vi_symmetrized_csiszar_function <-
function(logu,
csiszar_function,
name = NULL) {
tfp$vi$symmetrized_csiszar_function(logu, csiszar_function, name)
}
#' Use VIMCO to lower the variance of the gradient of csiszar_function(Avg(logu))
#'
#' This function generalizes VIMCO (Mnih and Rezende, 2016) to Csiszar
#' f-Divergences.
#'
#' Note: if `q.reparameterization_type = tfd.FULLY_REPARAMETERIZED`,
#' consider using `monte_carlo_csiszar_f_divergence`.
#'
#' The VIMCO loss is:
#' ```
#' vimco = f(Avg{logu[i] : i=0,...,m-1})
#' where,
#' logu[i] = log( p(x, h[i]) / q(h[i] | x) )
#' h[i] iid~ q(H | x)
#' ```
#'
#' Interestingly, the VIMCO gradient is not the naive gradient of `vimco`.
#' Rather, it is characterized by:
#' ```
#' grad[vimco] - variance_reducing_term
#' where,
#' variance_reducing_term = Sum{ grad[log q(h[i] | x)] *
#' (vimco - f(log Avg{h[j;i] : j=0,...,m-1}))
#' : i=0, ..., m-1 }
#' h[j;i] = { u[j]
#' j!=i
#' { GeometricAverage{ u[k] : k!=i} j==i
#' ```
#'
#' (We omitted `stop_gradient` for brevity. See implementation for more details.)
#' The `Avg{h[j;i] : j}` term is a kind of "swap-out average" where the `i`-th
#' element has been replaced by the leave-`i`-out Geometric-average.
#'
#' This implementation prefers numerical precision over efficiency, i.e.,
#' `O(num_draws * num_batch_draws * prod(batch_shape) * prod(event_shape))`.
#' (The constant may be fairly large, perhaps around 12.)
#'
#' @param f function representing a Csiszar-function in log-space.
#' @param p_log_prob function representing the natural-log of the
#' probability under distribution `p`. (In variational inference `p` is the
#' joint distribution.)
#' @param q `tfd$Distribution`-like instance; must implement: `sample(n, seed)`, and
#' `log_prob(x)`. (In variational inference `q` is the approximate posterior
#' distribution.)
#' @param num_draws Integer scalar number of draws used to approximate the
#' f-Divergence expectation.
#' @param num_batch_draws Integer scalar number of draws used to approximate the
#' f-Divergence expectation.
#' @param seed `integer` seed for `q$sample`.
#' @param name String prefixed to Ops created by this function.
#'
#' @return vimco The Csiszar f-Divergence generalized VIMCO objective
#'
#' @section References:
#' - [Andriy Mnih and Danilo Rezende. Variational Inference for Monte Carlo objectives. In _International Conference on Machine Learning_, 2016.](https://arxiv.org/abs/1602.06725)
#'
#' @family vi-functions
#' @export
vi_csiszar_vimco <-
function(f,
p_log_prob,
q,
num_draws,
num_batch_draws = 1,
seed = NULL,
name = NULL) {
tfp$vi$csiszar_vimco(f,
p_log_prob,
q,
as.integer(num_draws),
as.integer(num_batch_draws),
as.integer(seed),
name)
}
|
# SDC processing - plotting observed data and smoothing curves
# querying of melt-seasons per ez and year
# script version: 2012-01-27
library(sqldf)
library (tseries)
library(timeSeries)
# change settings here --------------------------------------------------
options(warn=-1)
y1 <- 2005 # first year
y2 <- 2012 # last year
ez1 <- 4 # first elevation zone
ez2 <- 10 # last elevation zone
dfs <- 7 # degrees of freedom for spline smoothing
setwd('C:/AHT.PRO/Pakistan_Modelling/Data/Snowcover/MODIS/TIFs/') # set working directory
input.f <- "snowcover_0.txt"
output <- "splines_0.txt"
a_seasons <- "meltseasons_0.txt"
classes.f <- "ez_def.txt"
tf.f <- "timeframes.txt"
titlo <- " Observed/Smoothed Snow Depletion Curves after Cloud Cover Elimination, Mangla Catchment, Zone "
# end of code section with editable settings ----------------------------
cc <- c("character","integer","numeric","numeric") # type specification of columns in file 'snowcover*.txt'
se <- read.table(input.f, header=TRUE, colClasses=cc) # load data into table
names(se) <- c("date","ez","nosnow","snow")
classes <- read.table(classes.f, header=TRUE)
write(c("EZ","year","start_melt","start_r", "end_melt","end_r"), file=a_seasons, ncolumns=6, append = FALSE, sep = " ")
write(c("date","SDC","EZ"), file=output, ncolumns=3, append = FALSE, sep = " ")
# read individual chart-periods for elevation zones into dataframe
cc <- c("character", "character", "character","character","character","numeric")
tf <- read.table(tf.f, header=TRUE, sep=",", row.names="Zone", colClasses=cc)
nz <- nrow(tf) # number of rows in dataset tf
# valid for all charts
ylm <- c(0,1) # parameters for ylim in plots
pylab <- "snowcover ratio"
# set colors
pcolors <- c("blue", "darkgrey", "green", "red", "darkred", "violet", "cornflowerblue", "darkgreen", "brown", "cyan4", "chartreuse4", "darkmagenta")
tsource <- 'Datasource: MODIS "MOD10A1" daily grids and own calculations'
ds <- "%Y_%m_%d" # format for converting text to POSIXct (date)
ds0 <- "%Y-%m-%d"
par(mar=c(4.5,4,4,9)+0.1) # resizes plot box window
par(cex=0.8) # relates to fonts of all graphics
for (ez in ez1:ez2) {
i <- 1
for (a in y1:y2) {
d01s <- paste(a, substr (tf[ez,1], 5, 10), sep="")
d02s <- paste(a, substr (tf[ez,2], 5, 10), sep="")
tsql <- paste("SELECT date, AVG(snow/(nosnow+snow)) AS r_snow FROM se GROUP BY date, ez HAVING (ez=", ez, " AND date >'", d01s, "' AND date <'", d02s, "') ORDER BY date", sep="")
sdf <- sqldf(tsql)
if (dim(sdf)[1]==0) next # exit if no data available
d01s <- gsub("_", "-", d01s)
d02s <- gsub("_", "-", d02s)
sdfd <- sdf
sdfd[,1] <- gsub("_", "-",sdfd[,1])
sdfd[,2] <- round (sdfd[,2]*1000)/1000
# calculate start, end of season
tsql <- paste("SELECT max(date), max(r_snow) FROM sdfd GROUP BY r_snow ORDER BY r_snow DESC", sep="")
smax <- (sqldf(tsql))
tsql <- paste("SELECT min(date), min(r_snow) FROM sdfd GROUP BY r_snow ORDER BY r_snow ASC", sep="")
smin<- sqldf(tsql)
meltseason <- c(ez, a, smax[1,1], smax[1,2], smin[1,1], smin[1,2])
write(meltseason, a_seasons, ncolumns =6, append=TRUE) # output to file
# insert missing dates dates
sdfd[,1] <- as.Date(sdfd[,1])
seq <- seq(from=as.Date(d01s), to=as.Date(d02s), by=1)
seqdf <- as.data.frame(seq)
tsql <- "SELECT seqdf.seq, sdfd.r_snow FROM seqdf LEFT OUTER JOIN sdfd ON seqdf.seq = sdfd.date"
sdff <- sqldf(tsql)
# interpolate missing values
sdff[,1] <- as.POSIXct(sdff[,1])
sdffts <- as.timeSeries(sdff)
sinterpol <- interpNA(sdffts, method = "linear") # interpolation
# convert timeSeries back to dataframe
sinterpoldf <- as.data.frame(sinterpol)
sinterpoldf[,2] <- sinterpoldf
sinterpoldf[,1] <- as.Date(rownames(sinterpoldf))
# remove outer missing values that could not be interpolated
sinterpolc <- removeNA(sinterpoldf)
sinterpolc[,1] <- as.POSIXct(sinterpolc[,1])
sinterpol_irts <- as.irts(sinterpolc) # irts makes nicer plots than timeSeries
# calc intervals
d1 <- as.POSIXct(paste(a, substr (tf[ez,3], 5, 10), sep=""), format=ds, tz = "GMT")
d2 <- as.POSIXct(paste(a, substr (tf[ez,4], 5, 10), sep=""), format=ds, tz = "GMT")
ix <-seq(from=d1, to=d2, length.out=tf[ez,5]) # calc intervals for vertical grid
d01 <- as.POSIXct(d01s, format=ds0, tz = "GMT")
d02 <- as.POSIXct(d02s, format=ds0, tz = "GMT") # tz="" may give NA for certain dates
tlima <-c(d01,d02) # time frame for individual year
# calculate spline
fitss <- smooth.spline(sinterpolc, df=dfs) #works now!
sdfs <- sinterpolc
sdfs[,1] <- as.POSIXct(sdfs[,1])
sdfs[2] <- as.vector(fitss [2]) # list of fitted data
sdfs[,2] <- round(sdfs[,2]*1000)/1000
sdfs[,2] <- pmin(sdfs[,2],1) # clips values exceeding 1
sdfs[,2] <- pmax(sdfs[,2],0) # clips values below 0
sdfs_irts <- as.irts(sdfs) # bring to irts for plotting
sdfs[,1] <- as.Date(sdfs[,1]) # bring to date for text output
sdfs[3] <- ez
write.table(sdfs, output, col.names=FALSE, row.names=FALSE, append=TRUE) # output to file
par(xpd=F) # restrict output to plot area
if (a == y1) {
mtitle <- paste(titlo, ez, sep="") # set individual chart title
# plot first time series
plot(sinterpol_irts, xlim=tlima, ylim=ylm, col=pcolors[i], xaxt = "n",las=1, main=mtitle, xlab="", ylab=pylab, yaxs="i", xaxs="i")
# add tick marks on x-axis
axis.POSIXct(side=1, at=seq(d01, d02, by="month"), format="%d.%b")
# add horizontal grid
grid(nx=NA, ny=NULL, col="gray", lty="dotted", lwd=1)
# add vertical grid
abline(a=NULL, b=0, h=NULL, v=ix, reg=NULL, col="gray", lty="dotted", lwd=1)
} # first year
else {
par(new=T) # this adds another curve to the existing plot!
plot(sinterpol_irts, xlim=tlima, ylim=ylm, col=pcolors[i], main="", yaxt="n", xaxt="n",
xlab="", ylab="", yaxs="i", xaxs="i")
} # further years
# plot spline
par(new=T) # continue on existing plot
plot(sdfs_irts, xlim=tlima, ylim=ylm, col=pcolors[i], xaxt="n", lwd=2, las=1, main="", xlab="", ylab="", yaxs="i", xaxs="i")
i <- i+1 # counter for color vector
} # end of loop over years a
par(xpd=T) # permit legend outside of plot area
legend (title="year",tlima[2]+240000, 1, legend=y1:y2, lty=1, lwd=2, col=pcolors[1:i])
mtext(tsource, side=1, line=3, outer=FALSE, cex=0.75)
# output of charts to file
dev.copy(png,width=800,height=500, paste("SDC_zone_",sprintf("%02d",ez),".png",sep=""))
dev.off()
} # end of loop over elevationzones ez
options(warn=0)
|
/SDC_calc_gapfree.R
|
no_license
|
climatepals/R-Scripts
|
R
| false
| false
| 7,039
|
r
|
# SDC processing - plotting observed data and smoothing curves
# querying of melt-seasons per ez and year
# script version: 2012-01-27
library(sqldf)
library (tseries)
library(timeSeries)
# change settings here --------------------------------------------------
options(warn=-1)
y1 <- 2005 # first year
y2 <- 2012 # last year
ez1 <- 4 # first elevation zone
ez2 <- 10 # last elevation zone
dfs <- 7 # degrees of freedom for spline smoothing
setwd('C:/AHT.PRO/Pakistan_Modelling/Data/Snowcover/MODIS/TIFs/') # set working directory
input.f <- "snowcover_0.txt"
output <- "splines_0.txt"
a_seasons <- "meltseasons_0.txt"
classes.f <- "ez_def.txt"
tf.f <- "timeframes.txt"
titlo <- " Observed/Smoothed Snow Depletion Curves after Cloud Cover Elimination, Mangla Catchment, Zone "
# end of code section with editable settings ----------------------------
cc <- c("character","integer","numeric","numeric") # type specification of columns in file 'snowcover*.txt'
se <- read.table(input.f, header=TRUE, colClasses=cc) # load data into table
names(se) <- c("date","ez","nosnow","snow")
classes <- read.table(classes.f, header=TRUE)
write(c("EZ","year","start_melt","start_r", "end_melt","end_r"), file=a_seasons, ncolumns=6, append = FALSE, sep = " ")
write(c("date","SDC","EZ"), file=output, ncolumns=3, append = FALSE, sep = " ")
# read individual chart-periods for elevation zones into dataframe
cc <- c("character", "character", "character","character","character","numeric")
tf <- read.table(tf.f, header=TRUE, sep=",", row.names="Zone", colClasses=cc)
nz <- nrow(tf) # number of rows in dataset tf
# valid for all charts
ylm <- c(0,1) # parameters for ylim in plots
pylab <- "snowcover ratio"
# set colors
pcolors <- c("blue", "darkgrey", "green", "red", "darkred", "violet", "cornflowerblue", "darkgreen", "brown", "cyan4", "chartreuse4", "darkmagenta")
tsource <- 'Datasource: MODIS "MOD10A1" daily grids and own calculations'
ds <- "%Y_%m_%d" # format for converting text to POSIXct (date)
ds0 <- "%Y-%m-%d"
par(mar=c(4.5,4,4,9)+0.1) # resizes plot box window
par(cex=0.8) # relates to fonts of all graphics
for (ez in ez1:ez2) {
i <- 1
for (a in y1:y2) {
d01s <- paste(a, substr (tf[ez,1], 5, 10), sep="")
d02s <- paste(a, substr (tf[ez,2], 5, 10), sep="")
tsql <- paste("SELECT date, AVG(snow/(nosnow+snow)) AS r_snow FROM se GROUP BY date, ez HAVING (ez=", ez, " AND date >'", d01s, "' AND date <'", d02s, "') ORDER BY date", sep="")
sdf <- sqldf(tsql)
if (dim(sdf)[1]==0) next # exit if no data available
d01s <- gsub("_", "-", d01s)
d02s <- gsub("_", "-", d02s)
sdfd <- sdf
sdfd[,1] <- gsub("_", "-",sdfd[,1])
sdfd[,2] <- round (sdfd[,2]*1000)/1000
# calculate start, end of season
tsql <- paste("SELECT max(date), max(r_snow) FROM sdfd GROUP BY r_snow ORDER BY r_snow DESC", sep="")
smax <- (sqldf(tsql))
tsql <- paste("SELECT min(date), min(r_snow) FROM sdfd GROUP BY r_snow ORDER BY r_snow ASC", sep="")
smin<- sqldf(tsql)
meltseason <- c(ez, a, smax[1,1], smax[1,2], smin[1,1], smin[1,2])
write(meltseason, a_seasons, ncolumns =6, append=TRUE) # output to file
# insert missing dates dates
sdfd[,1] <- as.Date(sdfd[,1])
seq <- seq(from=as.Date(d01s), to=as.Date(d02s), by=1)
seqdf <- as.data.frame(seq)
tsql <- "SELECT seqdf.seq, sdfd.r_snow FROM seqdf LEFT OUTER JOIN sdfd ON seqdf.seq = sdfd.date"
sdff <- sqldf(tsql)
# interpolate missing values
sdff[,1] <- as.POSIXct(sdff[,1])
sdffts <- as.timeSeries(sdff)
sinterpol <- interpNA(sdffts, method = "linear") # interpolation
# convert timeSeries back to dataframe
sinterpoldf <- as.data.frame(sinterpol)
sinterpoldf[,2] <- sinterpoldf
sinterpoldf[,1] <- as.Date(rownames(sinterpoldf))
# remove outer missing values that could not be interpolated
sinterpolc <- removeNA(sinterpoldf)
sinterpolc[,1] <- as.POSIXct(sinterpolc[,1])
sinterpol_irts <- as.irts(sinterpolc) # irts makes nicer plots than timeSeries
# calc intervals
d1 <- as.POSIXct(paste(a, substr (tf[ez,3], 5, 10), sep=""), format=ds, tz = "GMT")
d2 <- as.POSIXct(paste(a, substr (tf[ez,4], 5, 10), sep=""), format=ds, tz = "GMT")
ix <-seq(from=d1, to=d2, length.out=tf[ez,5]) # calc intervals for vertical grid
d01 <- as.POSIXct(d01s, format=ds0, tz = "GMT")
d02 <- as.POSIXct(d02s, format=ds0, tz = "GMT") # tz="" may give NA for certain dates
tlima <-c(d01,d02) # time frame for individual year
# calculate spline
fitss <- smooth.spline(sinterpolc, df=dfs) #works now!
sdfs <- sinterpolc
sdfs[,1] <- as.POSIXct(sdfs[,1])
sdfs[2] <- as.vector(fitss [2]) # list of fitted data
sdfs[,2] <- round(sdfs[,2]*1000)/1000
sdfs[,2] <- pmin(sdfs[,2],1) # clips values exceeding 1
sdfs[,2] <- pmax(sdfs[,2],0) # clips values below 0
sdfs_irts <- as.irts(sdfs) # bring to irts for plotting
sdfs[,1] <- as.Date(sdfs[,1]) # bring to date for text output
sdfs[3] <- ez
write.table(sdfs, output, col.names=FALSE, row.names=FALSE, append=TRUE) # output to file
par(xpd=F) # restrict output to plot area
if (a == y1) {
mtitle <- paste(titlo, ez, sep="") # set individual chart title
# plot first time series
plot(sinterpol_irts, xlim=tlima, ylim=ylm, col=pcolors[i], xaxt = "n",las=1, main=mtitle, xlab="", ylab=pylab, yaxs="i", xaxs="i")
# add tick marks on x-axis
axis.POSIXct(side=1, at=seq(d01, d02, by="month"), format="%d.%b")
# add horizontal grid
grid(nx=NA, ny=NULL, col="gray", lty="dotted", lwd=1)
# add vertical grid
abline(a=NULL, b=0, h=NULL, v=ix, reg=NULL, col="gray", lty="dotted", lwd=1)
} # first year
else {
par(new=T) # this adds another curve to the existing plot!
plot(sinterpol_irts, xlim=tlima, ylim=ylm, col=pcolors[i], main="", yaxt="n", xaxt="n",
xlab="", ylab="", yaxs="i", xaxs="i")
} # further years
# plot spline
par(new=T) # continue on existing plot
plot(sdfs_irts, xlim=tlima, ylim=ylm, col=pcolors[i], xaxt="n", lwd=2, las=1, main="", xlab="", ylab="", yaxs="i", xaxs="i")
i <- i+1 # counter for color vector
} # end of loop over years a
par(xpd=T) # permit legend outside of plot area
legend (title="year",tlima[2]+240000, 1, legend=y1:y2, lty=1, lwd=2, col=pcolors[1:i])
mtext(tsource, side=1, line=3, outer=FALSE, cex=0.75)
# output of charts to file
dev.copy(png,width=800,height=500, paste("SDC_zone_",sprintf("%02d",ez),".png",sep=""))
dev.off()
} # end of loop over elevationzones ez
options(warn=0)
|
## Load the required libraries
library(dplyr)
# Check if raw_data is loaded, else get the data.
if(!exists("raw_data", mode="function")) source("Assignment_1_Getting_Data.R")
# Arranging the data by date
by_date <- group_by(raw_data, date)
# Calculating total steps for each day
t_s <- summarize(by_date, Total_Steps = sum(steps, na.rm = TRUE))
Mean_TS <- mean(t_s$Total_Steps)
Median_TS <- median(t_s$Total_Steps)
#Create histogram
png("Hist_Total_Steps_Each_Day.png")
with(t_s, hist(Total_Steps, breaks = 10, col = "lightgreen", main = "Total Steps Each Day", xlab = "Total Steps"))
dev.off()
paste("The mean total number of steps taken per day is ", round(Mean_TS, digits = 2), "and the median of the total number of steps taken per day is ", round(Median_TS, digits = 2))
|
/Assignment_1a_CodeV2.R
|
no_license
|
Sahil-yp/RepData_PeerAssessment1
|
R
| false
| false
| 782
|
r
|
## Load the required libraries
library(dplyr)
# Check if raw_data is loaded, else get the data.
if(!exists("raw_data", mode="function")) source("Assignment_1_Getting_Data.R")
# Arranging the data by date
by_date <- group_by(raw_data, date)
# Calculating total steps for each day
t_s <- summarize(by_date, Total_Steps = sum(steps, na.rm = TRUE))
Mean_TS <- mean(t_s$Total_Steps)
Median_TS <- median(t_s$Total_Steps)
#Create histogram
png("Hist_Total_Steps_Each_Day.png")
with(t_s, hist(Total_Steps, breaks = 10, col = "lightgreen", main = "Total Steps Each Day", xlab = "Total Steps"))
dev.off()
paste("The mean total number of steps taken per day is ", round(Mean_TS, digits = 2), "and the median of the total number of steps taken per day is ", round(Median_TS, digits = 2))
|
library(qtlc)
### Name: s3D
### Title: Internal function used by showtlc3D
### Aliases: s3D
### ** Examples
## Not run:
##D #Internal function.
## End(Not run)
|
/data/genthat_extracted_code/qtlc/examples/s3D.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 169
|
r
|
library(qtlc)
### Name: s3D
### Title: Internal function used by showtlc3D
### Aliases: s3D
### ** Examples
## Not run:
##D #Internal function.
## End(Not run)
|
## These functions compute the inverse of a matrix only if it
## has not already been calculated.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
# If the inverse has already been calculated retrieve the inverse from the cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# Compute and retrieve the inverse
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
/cachematrix.R
|
no_license
|
Iridella/ProgrammingAssignment2
|
R
| false
| false
| 1,147
|
r
|
## These functions compute the inverse of a matrix only if it
## has not already been calculated.
## This function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then cacheSolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
# If the inverse has already been calculated retrieve the inverse from the cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
# Compute and retrieve the inverse
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(1.9433118586544e+185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(-4.18240144581448e-209, 3.13151605919845e-294, 9.29609968145595e+121, -5.24735735928569e-235, 2.23333206042187e-106, -5.83380844034655e+196, 8.84670778456032e-160, 1.35209597614284e-20, 6.33872710878606e+128, 4.37364694676111e-277, 1.89133251782559e+112, -1.98538144364467e+279, -3.6133240336912e+38, -5.5855135755695e+160, -5.33923829103485e+303, -8.26903340356292e-103, -3.46995023044415e-281, -3.63875683405274e+101, 3.92687496904889e+166, 1.63329743414245e+86, NaN))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615842267-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 790
|
r
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), ra = numeric(0), relh = c(1.9433118586544e+185, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), rs = numeric(0), temp = c(-4.18240144581448e-209, 3.13151605919845e-294, 9.29609968145595e+121, -5.24735735928569e-235, 2.23333206042187e-106, -5.83380844034655e+196, 8.84670778456032e-160, 1.35209597614284e-20, 6.33872710878606e+128, 4.37364694676111e-277, 1.89133251782559e+112, -1.98538144364467e+279, -3.6133240336912e+38, -5.5855135755695e+160, -5.33923829103485e+303, -8.26903340356292e-103, -3.46995023044415e-281, -3.63875683405274e+101, 3.92687496904889e+166, 1.63329743414245e+86, NaN))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result)
|
`%>%` <- magrittr::`%>%`
effects_file = "../data2/gtex/lead_effects_na.tsv"
output_dir = "../data2/gtex/mfactorization/"
effects = readr::read_tsv(effects_file)
dir.create(output_dir, recursive = T)
dplyr::select(effects, variant, molecular_trait_id, ends_with('.beta')) %>%
dplyr::rename_all(function(x){sub(".beta", "", x)}) %>%
dplyr::rename(SNP = variant, Gene = molecular_trait_id) %>%
readr::write_tsv(file.path(output_dir, "slope.txt"))
dplyr::select(effects, variant, molecular_trait_id, ends_with('.se')) %>%
dplyr::rename_all(function(x){sub(".se", "", x)}) %>%
dplyr::rename(SNP = variant, Gene = molecular_trait_id) %>%
readr::write_tsv(file.path(output_dir, "se.txt"))
|
/get_effects.R
|
permissive
|
peikovakate/ts_eQTLs
|
R
| false
| false
| 697
|
r
|
`%>%` <- magrittr::`%>%`
effects_file = "../data2/gtex/lead_effects_na.tsv"
output_dir = "../data2/gtex/mfactorization/"
effects = readr::read_tsv(effects_file)
dir.create(output_dir, recursive = T)
dplyr::select(effects, variant, molecular_trait_id, ends_with('.beta')) %>%
dplyr::rename_all(function(x){sub(".beta", "", x)}) %>%
dplyr::rename(SNP = variant, Gene = molecular_trait_id) %>%
readr::write_tsv(file.path(output_dir, "slope.txt"))
dplyr::select(effects, variant, molecular_trait_id, ends_with('.se')) %>%
dplyr::rename_all(function(x){sub(".se", "", x)}) %>%
dplyr::rename(SNP = variant, Gene = molecular_trait_id) %>%
readr::write_tsv(file.path(output_dir, "se.txt"))
|
#assumption: input must be of the shape (x, y, z)
#if input is 2d, it can be transformed as (x, y, 1)
relu <- function(stacks)
{
shape <- dim(stacks)
for(z in seq(shape[3])){
for (y in seq(shape[2]))
{
for (x in seq(shape[1]))
stacks[x, y, z] <- max(stacks[x, y, z], 0)
}
}
#return the updated feature map
stacks
}
#test
#stacks <- array(round(runif(18,min=-10,max=10)), dim=c(3, 3, 2))
#output <- relu(stacks)
|
/lab-assignment/source-code/relu.R
|
no_license
|
tintinrevient/machine-learning-for-human-vision-and-language
|
R
| false
| false
| 457
|
r
|
#assumption: input must be of the shape (x, y, z)
#if input is 2d, it can be transformed as (x, y, 1)
relu <- function(stacks)
{
shape <- dim(stacks)
for(z in seq(shape[3])){
for (y in seq(shape[2]))
{
for (x in seq(shape[1]))
stacks[x, y, z] <- max(stacks[x, y, z], 0)
}
}
#return the updated feature map
stacks
}
#test
#stacks <- array(round(runif(18,min=-10,max=10)), dim=c(3, 3, 2))
#output <- relu(stacks)
|
library(shiny)
library(shinyFiles)
shinyUI(pageWithSidebar(
headerPanel(
"Selections with shinyFiles",
"shinyFiles example"
),
sidebarPanel(
img(src = "logo.png", style = "float: left; width: 120px; margin-right: 10px; margin-top: 5px"),
tags$p("The following buttons will expose the users R installation\n
directory. To showcase the restriction feature the base package\n
has been hidden."),
tags$p("As each button is used multiple times, the last location is\n
remembered, as well as any other states. Each button has its own\n
memory."),
tags$hr(),
shinyFilesButton("file", "File select", "Please select a file", FALSE),
tags$p(),
tags$p('The file selection button allows the user to select one or several
files and get their absolute position communicated back to the shiny
server. In this example the button has been set to single-file mode
and the default path has been set to the "library" subdirectory of
the "R Installation" path.'),
tags$hr(),
shinyDirButton("directory", "Folder select", "Please select a folder"),
tags$p(),
tags$p("This button lets the user navigate the file system and select a\n
folder. The absolute path of the selected folder is then send\n
back to the server. While only folders can be selected, it is\n
possible to get an overview of the content beforehand. \n
Furthermore it is permission aware and warns if a folder with \n
missing write permissions is selected. Lastly it is possible to\n
create folders on the fly"),
tags$hr(),
shinySaveButton("save", "Save file", "Save file as...", filetype = list(text = "txt", picture = c("jpeg", "jpg"))),
tags$p(),
tags$p('The last type of button is the save button which allows the user
to navigate to a position in the filesystem and specify the name
of a new file to be send back to the server. As above write
permissions are communicated and folders can be created. It is
possible to specify a range of different filetypes that the user
can choose between. In this example it is "text" and "picture"')
),
mainPanel(
tags$h4("The output of a file selection"),
tags$p(HTML("When one or several files are chosen the result is made \n
available to the shinyServer instance. In order for it to get the\n
formatting expected of a filepath it must first be fed into\n
<code>parseFilePaths()</code> after which the output matches the formatting of\n that returned by shinys own fileInput widget.")),
verbatimTextOutput("filepaths"),
tags$hr(),
tags$h4("The output of a folder selection"),
tags$p(HTML("When a folder is selected the position of the folder is sent to \n
the server and can be formatted with <code>parseDirPath()</code> to reflect a\n
standard path string as returned by e.g. <code>choose.dir()</code> on windows\n
systems.")),
verbatimTextOutput("directorypath"),
tags$hr(),
tags$h4("The output of a file save"),
tags$p(HTML('When a file is "saved" the name, path and type is sent back to
the server, where it can be formatted with <code>parseSavePath()</code>. The
format after parsing is very similar to a file choise, except
size information is omitted (often the file doesn\'t exist yet)
and type is now available (provided that filetype information has
been send from the server).')),
verbatimTextOutput("savefile")
)
))
|
/inst/example/ui.R
|
no_license
|
rpodcast/shinyFiles
|
R
| false
| false
| 3,755
|
r
|
library(shiny)
library(shinyFiles)
shinyUI(pageWithSidebar(
headerPanel(
"Selections with shinyFiles",
"shinyFiles example"
),
sidebarPanel(
img(src = "logo.png", style = "float: left; width: 120px; margin-right: 10px; margin-top: 5px"),
tags$p("The following buttons will expose the users R installation\n
directory. To showcase the restriction feature the base package\n
has been hidden."),
tags$p("As each button is used multiple times, the last location is\n
remembered, as well as any other states. Each button has its own\n
memory."),
tags$hr(),
shinyFilesButton("file", "File select", "Please select a file", FALSE),
tags$p(),
tags$p('The file selection button allows the user to select one or several
files and get their absolute position communicated back to the shiny
server. In this example the button has been set to single-file mode
and the default path has been set to the "library" subdirectory of
the "R Installation" path.'),
tags$hr(),
shinyDirButton("directory", "Folder select", "Please select a folder"),
tags$p(),
tags$p("This button lets the user navigate the file system and select a\n
folder. The absolute path of the selected folder is then send\n
back to the server. While only folders can be selected, it is\n
possible to get an overview of the content beforehand. \n
Furthermore it is permission aware and warns if a folder with \n
missing write permissions is selected. Lastly it is possible to\n
create folders on the fly"),
tags$hr(),
shinySaveButton("save", "Save file", "Save file as...", filetype = list(text = "txt", picture = c("jpeg", "jpg"))),
tags$p(),
tags$p('The last type of button is the save button which allows the user
to navigate to a position in the filesystem and specify the name
of a new file to be send back to the server. As above write
permissions are communicated and folders can be created. It is
possible to specify a range of different filetypes that the user
can choose between. In this example it is "text" and "picture"')
),
mainPanel(
tags$h4("The output of a file selection"),
tags$p(HTML("When one or several files are chosen the result is made \n
available to the shinyServer instance. In order for it to get the\n
formatting expected of a filepath it must first be fed into\n
<code>parseFilePaths()</code> after which the output matches the formatting of\n that returned by shinys own fileInput widget.")),
verbatimTextOutput("filepaths"),
tags$hr(),
tags$h4("The output of a folder selection"),
tags$p(HTML("When a folder is selected the position of the folder is sent to \n
the server and can be formatted with <code>parseDirPath()</code> to reflect a\n
standard path string as returned by e.g. <code>choose.dir()</code> on windows\n
systems.")),
verbatimTextOutput("directorypath"),
tags$hr(),
tags$h4("The output of a file save"),
tags$p(HTML('When a file is "saved" the name, path and type is sent back to
the server, where it can be formatted with <code>parseSavePath()</code>. The
format after parsing is very similar to a file choise, except
size information is omitted (often the file doesn\'t exist yet)
and type is now available (provided that filetype information has
been send from the server).')),
verbatimTextOutput("savefile")
)
))
|
getwd()
p='C:/Users/Admin/Downloads/blogfeedback'
setwd(p)
list.files()
list.dirs()
library(data.table)
blogData_train <- read_csv('C:/Users/Admin/Downloads/blogfeedback/blogData_train.csv')
View(blogData_train)
library(ISLR)
train_sg <- read.csv("blogfeedback/train_sg.csv")
test_sg <- read.csv("blogfeedback/test_sg.csv")
View(train_sg)
View(test_sg)
str(train_sg)
str(test_sg)
pairs(train_sg)
pairs(test_sg)
hist(train_sg$y,col='blue')
library(tree)
tree.train_sg<-tree(meta_knn~.-meta_rf,train_sg)
summary(tree.train_sg)
tree.test_sg<-tree(meta_knn~.-meta_rf,test_sg) summary(tree.test_sg)
t.test(train_sg$meta_knn,mu=0.6)
t.test(train_sg$meta_rf, mu=0.7)
t.test(train_sg$y, mu=0.5)
plot(train_sg$meta_knn,train_sg$meta_rf)
cor(train_sg$meta_knn,train_sg$meta_rf)
mod<-lm(train_sg$meta_knn~train_sg$meta_rf)
summary(mod)
|
/14.R
|
no_license
|
drj819/assgn14.1
|
R
| false
| false
| 885
|
r
|
getwd()
p='C:/Users/Admin/Downloads/blogfeedback'
setwd(p)
list.files()
list.dirs()
library(data.table)
blogData_train <- read_csv('C:/Users/Admin/Downloads/blogfeedback/blogData_train.csv')
View(blogData_train)
library(ISLR)
train_sg <- read.csv("blogfeedback/train_sg.csv")
test_sg <- read.csv("blogfeedback/test_sg.csv")
View(train_sg)
View(test_sg)
str(train_sg)
str(test_sg)
pairs(train_sg)
pairs(test_sg)
hist(train_sg$y,col='blue')
library(tree)
tree.train_sg<-tree(meta_knn~.-meta_rf,train_sg)
summary(tree.train_sg)
tree.test_sg<-tree(meta_knn~.-meta_rf,test_sg) summary(tree.test_sg)
t.test(train_sg$meta_knn,mu=0.6)
t.test(train_sg$meta_rf, mu=0.7)
t.test(train_sg$y, mu=0.5)
plot(train_sg$meta_knn,train_sg$meta_rf)
cor(train_sg$meta_knn,train_sg$meta_rf)
mod<-lm(train_sg$meta_knn~train_sg$meta_rf)
summary(mod)
|
# Data mining and text mining - Politecnico di Milano
# Alessandro Baldassari
# Data Mining competition - BIP data
# Loading dataset
train <- read.csv("~/Git/data mining/dataset_polimi.csv")
# Product type String -> boolean
train$Categoria_prodotto <- as.character(train$Categoria_prodotto)
train$Prodotto_1 <- "0"
train$Prodotto_1[train$Categoria_prodotto == "Prodotto_1"] <- "1"
# Extraction of day and month from date
train$Data <- as.Date.factor(train$Data)
train$Day <- sapply(train$Data, FUN=function(x) {weekdays(as.Date(x,'%y-%m-%d'))})
train$Month <- sapply(train$Data, FUN=function(x) {months(as.Date(x,'%y-%m-%d'))})
# Extraction of year
train$Data <- as.character(train$Data)
train$Year <- sapply(train$Data, FUN=function(x) {substr(x, 1, 4)})
train$Day_Number <- sapply(train$Data, FUN=function(x) {substr(x, 9, 10)})
train$Day_Number <- as.integer(train$Day_Number)
# Adding stable holidays
train$Holiday <- 0
train$Holiday[train$Month == 'gennaio' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'gennaio' & train$Day_Number == 6] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 25] <- 1
train$Holiday[train$Month == 'maggio' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'giugno' & train$Day_Number == 2] <- 1
train$Holiday[train$Month == 'agosto' & train$Day_Number == 15] <- 1
train$Holiday[train$Month == 'novembre' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 8] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 25] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 26] <- 1
# Adding moving holidays
train$Holiday[train$Month == 'marzo' & train$Day_Number == 27 & train$Year == '2016'] <- 1
train$Holiday[train$Month == 'marzo' & train$Day_Number == 28 & train$Year == '2016'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 5 & train$Year == '2015'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 6 & train$Year == '2015'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 20 & train$Year == '2014'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 21 & train$Year == '2014'] <- 1
|
/Data preparation/data_manipulation.r
|
no_license
|
AChiolini/Data-Mining-BiP
|
R
| false
| false
| 2,213
|
r
|
# Data mining and text mining - Politecnico di Milano
# Alessandro Baldassari
# Data Mining competition - BIP data
# Loading dataset
train <- read.csv("~/Git/data mining/dataset_polimi.csv")
# Product type String -> boolean
train$Categoria_prodotto <- as.character(train$Categoria_prodotto)
train$Prodotto_1 <- "0"
train$Prodotto_1[train$Categoria_prodotto == "Prodotto_1"] <- "1"
# Extraction of day and month from date
train$Data <- as.Date.factor(train$Data)
train$Day <- sapply(train$Data, FUN=function(x) {weekdays(as.Date(x,'%y-%m-%d'))})
train$Month <- sapply(train$Data, FUN=function(x) {months(as.Date(x,'%y-%m-%d'))})
# Extraction of year
train$Data <- as.character(train$Data)
train$Year <- sapply(train$Data, FUN=function(x) {substr(x, 1, 4)})
train$Day_Number <- sapply(train$Data, FUN=function(x) {substr(x, 9, 10)})
train$Day_Number <- as.integer(train$Day_Number)
# Adding stable holidays
train$Holiday <- 0
train$Holiday[train$Month == 'gennaio' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'gennaio' & train$Day_Number == 6] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 25] <- 1
train$Holiday[train$Month == 'maggio' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'giugno' & train$Day_Number == 2] <- 1
train$Holiday[train$Month == 'agosto' & train$Day_Number == 15] <- 1
train$Holiday[train$Month == 'novembre' & train$Day_Number == 1] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 8] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 25] <- 1
train$Holiday[train$Month == 'dicembre' & train$Day_Number == 26] <- 1
# Adding moving holidays
train$Holiday[train$Month == 'marzo' & train$Day_Number == 27 & train$Year == '2016'] <- 1
train$Holiday[train$Month == 'marzo' & train$Day_Number == 28 & train$Year == '2016'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 5 & train$Year == '2015'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 6 & train$Year == '2015'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 20 & train$Year == '2014'] <- 1
train$Holiday[train$Month == 'aprile' & train$Day_Number == 21 & train$Year == '2014'] <- 1
|
\name{dlm}
\alias{dlm}
\alias{as.dlm}
\alias{is.dlm}
\title{dlm objects}
\description{
The function \code{dlm} is used to create Dynamic Linear Model objects.
\code{as.dlm} and \code{is.dlm} coerce an object to a Dynamic Linear
Model object and test whether an object is a Dynamic Linear Model.
}
\usage{
dlm(...)
as.dlm(obj)
is.dlm(obj)
}
\arguments{
\item{...}{list with named elements \code{m0},
\code{C0}, \code{FF}, \code{V}, \code{GG}, \code{W} and,
optionally,
\code{JFF}, \code{JV}, \code{JGG}, \code{JW}, and
\code{X}. The first six are the usual vector and matrices that define
a time-invariant DLM. The remaining elements are used for time-varying
DLM. \code{X}, if present, should be a matrix. If \code{JFF} is not
\code{NULL}, then it must be a matrix of the
same dimension of \code{FF}, with the \eqn{(i,j)} element being zero if
\code{FF[i,j]} is time-invariant, and a positive integer \eqn{k}
otherwise. In this case the \eqn{(i,j)} element of \code{FF} at time
\eqn{t} will be \code{X[t,k]}. A similar interpretation holds for
\code{JV}, \code{JGG}, and \code{JW}. \code{...} may have additional
components, that are not used by \code{dlm}. The named components
may also be passed to the function as individual arguments.}
\item{obj}{an arbitrary \R object.}
}
\details{
The function \code{dlm} is used to create Dynamic Linear Model
objects. These are lists with the named elements described above and
with class of \code{"dlm"}.
Class \code{"dlm"} has a number of methods. In particular, consistent
DLM can be added together to produce another DLM.
}
\value{
For \code{dlm}, an object of class \code{"dlm"}.}
\references{Giovanni Petris (2010), An R Package for Dynamic Linear
Models. Journal of Statistical Software, 36(12), 1-16.
\url{http://www.jstatsoft.org/v36/i12/}.\cr
Petris, Petrone, and Campagnoli, Dynamic Linear Models with
R, Springer (2009).\cr
West and Harrison, Bayesian forecasting and
dynamic models (2nd ed.), Springer (1997).
}
\seealso{\code{\link{dlmModReg}}, \code{\link{dlmModPoly}},
\code{\link{dlmModARMA}}, \code{\link{dlmModSeas}}, to create
particular objects of class \code{"dlm"}.
}
\examples{
## Linear regression as a DLM
x <- matrix(rnorm(10),nc=2)
mod <- dlmModReg(x)
is.dlm(mod)
## Adding dlm's
dlmModPoly() + dlmModSeas(4) # linear trend plus quarterly seasonal component
}
\author{Giovanni Petris \email{GPetris@uark.edu}}
\keyword{misc}
|
/man/dlm.Rd
|
no_license
|
pchristi99/dlm
|
R
| false
| false
| 2,504
|
rd
|
\name{dlm}
\alias{dlm}
\alias{as.dlm}
\alias{is.dlm}
\title{dlm objects}
\description{
The function \code{dlm} is used to create Dynamic Linear Model objects.
\code{as.dlm} and \code{is.dlm} coerce an object to a Dynamic Linear
Model object and test whether an object is a Dynamic Linear Model.
}
\usage{
dlm(...)
as.dlm(obj)
is.dlm(obj)
}
\arguments{
\item{...}{list with named elements \code{m0},
\code{C0}, \code{FF}, \code{V}, \code{GG}, \code{W} and,
optionally,
\code{JFF}, \code{JV}, \code{JGG}, \code{JW}, and
\code{X}. The first six are the usual vector and matrices that define
a time-invariant DLM. The remaining elements are used for time-varying
DLM. \code{X}, if present, should be a matrix. If \code{JFF} is not
\code{NULL}, then it must be a matrix of the
same dimension of \code{FF}, with the \eqn{(i,j)} element being zero if
\code{FF[i,j]} is time-invariant, and a positive integer \eqn{k}
otherwise. In this case the \eqn{(i,j)} element of \code{FF} at time
\eqn{t} will be \code{X[t,k]}. A similar interpretation holds for
\code{JV}, \code{JGG}, and \code{JW}. \code{...} may have additional
components, that are not used by \code{dlm}. The named components
may also be passed to the function as individual arguments.}
\item{obj}{an arbitrary \R object.}
}
\details{
The function \code{dlm} is used to create Dynamic Linear Model
objects. These are lists with the named elements described above and
with class of \code{"dlm"}.
Class \code{"dlm"} has a number of methods. In particular, consistent
DLM can be added together to produce another DLM.
}
\value{
For \code{dlm}, an object of class \code{"dlm"}.}
\references{Giovanni Petris (2010), An R Package for Dynamic Linear
Models. Journal of Statistical Software, 36(12), 1-16.
\url{http://www.jstatsoft.org/v36/i12/}.\cr
Petris, Petrone, and Campagnoli, Dynamic Linear Models with
R, Springer (2009).\cr
West and Harrison, Bayesian forecasting and
dynamic models (2nd ed.), Springer (1997).
}
\seealso{\code{\link{dlmModReg}}, \code{\link{dlmModPoly}},
\code{\link{dlmModARMA}}, \code{\link{dlmModSeas}}, to create
particular objects of class \code{"dlm"}.
}
\examples{
## Linear regression as a DLM
x <- matrix(rnorm(10),nc=2)
mod <- dlmModReg(x)
is.dlm(mod)
## Adding dlm's
dlmModPoly() + dlmModSeas(4) # linear trend plus quarterly seasonal component
}
\author{Giovanni Petris \email{GPetris@uark.edu}}
\keyword{misc}
|
# Examine the univariate data ---------------------------------------------
#univariate data = samples of one variable
#univariate data analysis isn't concerned with the 'why'. It is just to decribe the data as it is.
#discrete variables vs continuous variables
#discrete = an eample is the level of education. It has a limit set of values.
#continuous = an example is income. It can be just about any number
#two key things to discover in EDA:
#central tendency: what is a common value? what's the values around the data is centered?
#spread: how varied are the data points?
#type of graphs to use:
#box plot
#histogram
#density plot
#pie graph
# Reading in data ---------------------------------------------------------
getwd()
setwd("./ExplorativeDataAnalysis/Data")
bike_buyers = read.csv("bike_buyers.csv")
#install.packages("dplyr")
library(dplyr)
#remove column ID
bike_buyers = select(bike_buyers, -ï..ID)
summary(bike_buyers)
#central tendency
summary(bike_buyers$Income)
boxplot(bike_buyers$Income)
#spread
hist(bike_buyers$Income)
plot(density(bike_buyers$Income), main="Income Density Spread")
#Education: categorical, so it's discrete
summary(bike_buyers$Education)
plot(bike_buyers$Education)
#Marital.Status: categorical, discrete
summary(bike_buyers$Marital.Status)
plot(bike_buyers$Marital.Status)
pie(table(bike_buyers$Marital.Status), main="Married vs single customers")
#Children
summary(bike_buyers$Children)
plot(bike_buyers$Children)
#is it the right data type? We want more of a count for each number of children
#so make to factor
bike_buyers$Children = factor(bike_buyers$Children)
summary(bike_buyers$Children)
plot(bike_buyers$Children, xlab="Num. of children", ylab="Frequency")
#what about those who bought a bike, especially?
library(dplyr)
bought = filter(bike_buyers, Purchased.Bike== "Yes")
plot(bought$Children, xlab="Num. of children", ylab="Frequency", main="Purchased bikes yes, num. children")
#what is the most common commute distance? Visualize it!
summary(bike_buyers$Commute.Distance)
plot(bike_buyers$Commute.Distance)
#did more people buy a bike or not buy bikes? Visualize it!
summary(bike_buyers$Purchased.Bike)
plot(bike_buyers$Purchased.Bike)
pie(table(bike_buyers$Purchased.Bike), main="Bike vs no bike")
|
/ExplorativeDataAnalysis/EDA_univariant_analysis.R
|
no_license
|
MarcusGrund/R_Foundations
|
R
| false
| false
| 2,269
|
r
|
# Examine the univariate data ---------------------------------------------
#univariate data = samples of one variable
#univariate data analysis isn't concerned with the 'why'. It is just to decribe the data as it is.
#discrete variables vs continuous variables
#discrete = an eample is the level of education. It has a limit set of values.
#continuous = an example is income. It can be just about any number
#two key things to discover in EDA:
#central tendency: what is a common value? what's the values around the data is centered?
#spread: how varied are the data points?
#type of graphs to use:
#box plot
#histogram
#density plot
#pie graph
# Reading in data ---------------------------------------------------------
getwd()
setwd("./ExplorativeDataAnalysis/Data")
bike_buyers = read.csv("bike_buyers.csv")
#install.packages("dplyr")
library(dplyr)
#remove column ID
bike_buyers = select(bike_buyers, -ï..ID)
summary(bike_buyers)
#central tendency
summary(bike_buyers$Income)
boxplot(bike_buyers$Income)
#spread
hist(bike_buyers$Income)
plot(density(bike_buyers$Income), main="Income Density Spread")
#Education: categorical, so it's discrete
summary(bike_buyers$Education)
plot(bike_buyers$Education)
#Marital.Status: categorical, discrete
summary(bike_buyers$Marital.Status)
plot(bike_buyers$Marital.Status)
pie(table(bike_buyers$Marital.Status), main="Married vs single customers")
#Children
summary(bike_buyers$Children)
plot(bike_buyers$Children)
#is it the right data type? We want more of a count for each number of children
#so make to factor
bike_buyers$Children = factor(bike_buyers$Children)
summary(bike_buyers$Children)
plot(bike_buyers$Children, xlab="Num. of children", ylab="Frequency")
#what about those who bought a bike, especially?
library(dplyr)
bought = filter(bike_buyers, Purchased.Bike== "Yes")
plot(bought$Children, xlab="Num. of children", ylab="Frequency", main="Purchased bikes yes, num. children")
#what is the most common commute distance? Visualize it!
summary(bike_buyers$Commute.Distance)
plot(bike_buyers$Commute.Distance)
#did more people buy a bike or not buy bikes? Visualize it!
summary(bike_buyers$Purchased.Bike)
plot(bike_buyers$Purchased.Bike)
pie(table(bike_buyers$Purchased.Bike), main="Bike vs no bike")
|
context("regr.catboost")
test_that("autotest", {
learner = LearnerRegrCatboost$new()
expect_learner(learner)
result = run_autotest(learner)
expect_true(result, info = result$error)
})
|
/tests/testthat/test_regr.catboost.R
|
no_license
|
mlr3learners/mlr3learners.catboost
|
R
| false
| false
| 193
|
r
|
context("regr.catboost")
test_that("autotest", {
learner = LearnerRegrCatboost$new()
expect_learner(learner)
result = run_autotest(learner)
expect_true(result, info = result$error)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomics_functions.R
\name{variantsets.get}
\alias{variantsets.get}
\title{Gets a variant set by ID.For the definitions of variant sets and other genomics resources, see[Fundamentals of GoogleGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)}
\usage{
variantsets.get(variantSetId)
}
\arguments{
\item{variantSetId}{Required}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/genomics
\item https://www.googleapis.com/auth/genomics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics, https://www.googleapis.com/auth/genomics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/genomics}{Google Documentation}
}
|
/googlegenomicsv1.auto/man/variantsets.get.Rd
|
permissive
|
GVersteeg/autoGoogleAPI
|
R
| false
| true
| 1,126
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genomics_functions.R
\name{variantsets.get}
\alias{variantsets.get}
\title{Gets a variant set by ID.For the definitions of variant sets and other genomics resources, see[Fundamentals of GoogleGenomics](https://cloud.google.com/genomics/fundamentals-of-google-genomics)}
\usage{
variantsets.get(variantSetId)
}
\arguments{
\item{variantSetId}{Required}
}
\description{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_skeleton}}
}
\details{
Authentication scopes used by this function are:
\itemize{
\item https://www.googleapis.com/auth/cloud-platform
\item https://www.googleapis.com/auth/genomics
\item https://www.googleapis.com/auth/genomics.readonly
}
Set \code{options(googleAuthR.scopes.selected = c(https://www.googleapis.com/auth/cloud-platform, https://www.googleapis.com/auth/genomics, https://www.googleapis.com/auth/genomics.readonly)}
Then run \code{googleAuthR::gar_auth()} to authenticate.
See \code{\link[googleAuthR]{gar_auth}} for details.
}
\seealso{
\href{https://cloud.google.com/genomics}{Google Documentation}
}
|
# UserDefnFunction
# Uses Prostrate Data
# scale()
#
# Author: PatriciaHoffman
###############################################################################
rm(list=ls())
?source
##################################################
# User Defined Function - roll die
##################################################
# ?trunc (others are ceiling(), floor(), round(x,digits = 0),
# signif(x, digits = 6)
#trunc takes a single numeric argument x and
#returns a numeric vector containing the integers
#formed by truncating the values in x toward 0
# ?runif runif generates random deviates default - between 0 and 1
rolladie = function (num.sides =6, num.rolls = 1)
{
simulated.rolls <- trunc(runif(num.rolls)*num.sides+1)
return(simulated.rolls)
}
rolladie()
rolladie(num.sides =12)
rolladie(num.sides = 12, num.rolls = 10)
##################################################
# User Defined Function - scale
##################################################
?scale
prostate<-read.csv("ProstateCancerDataESL.csv", sep=",",header=T)
prostate[1:3,]
names(prostate)
dim(prostate)
isTRUE(all.equal(prostate$age,prostate[,3]))
#put predictors into X
X <- prostate[,1:8]
class(X)
prostateScale <- scale(X, center = TRUE, scale = TRUE)
prostateScale[1:3,]
userScale <- function(x, numColm) {
#demean x and bring to unit variance
for(i in 1:numColm){
m <- sum(x[,i])
m <- m/length(x[,i])
x[,i] <- x[,i]-m
v <- var(x[,i])
if(v < 0.0000001) v <- 0.0000001 # include for case v = 0
x[,i] <- x[,i]/sqrt(v) # don't want a zero divide
}
return(x)
}
prostateUser <- userScale(X,8)
prostateUser[1:3,]
prostateUser <- as.data.frame(prostateUser)
prostateScale <- as.data.frame(prostateScale)
isTRUE(all.equal(prostateUser,prostateScale))
#
#
## scale training set
## some of data are factors
## scale test set using training parameters
## the target value is in the first colm
## Don't scale the target
#
#
##testScale is matrix
##1 1 1 3
##0 2 1 3
##0 3 1 3
##1 4 1 3
##1 5 1 3
##scaleData is matrix
##1 1 1 3 T red 5
##0 2 1 3 F blue 4
##0 3 1 3 T green 3
##1 4 1 3 T purple 2
##1 5 1 3 F yellow 1
#
#
#testunScale <- read.csv("testScale.csv", sep=",",header=F)
#testScale<- scale(testunScale, center = TRUE, scale = TRUE)
#testScale # notice that colms 3 and 4 are now full of NaN
#
#unstrain <- read.csv("scaleData.csv", sep=",",header=F)
#unstest <- unstrain;test<-unstest;train<-unstrain
##scale(unstrain) gives Error 'x' must be numeric
##demean train and bring to unit variance
## m = means, v = variance, nc = list of numeric colms, scaled data set x
#m <- rep(0.0,ncol(unstrain)); v <-rep(1.0, ncol(unstrain)); nc <- rep(FALSE, ncol(unstrain))
#
#for(i in 1:ncol(unstrain)){
# if(is.numeric(unstrain[,i])) nc[i] <- TRUE
# }
#nc
#nc[1] <- FALSE # don't want to scale the target
#
#index <- which(nc == TRUE)
#for(i in index){
# m[i]<- sum(unstrain[,i])
# m[i] <- m[i]/length(unstrain[,i])
# train[,i] <- unstrain[,i]-m[i]
# v[i] <- var(train[,i])
# if(v[i] < 0.1) v[i] <- 1
# train[,i] <- train[,i]/sqrt(v[i])
# }
#
#index <- which(nc == TRUE)
#test[,index] <- (unstest[,index]-m[index]) / sqrt(v[index])
#
#train
#test
#nc
#m
#v
##################################################
# User Defined Function - MATLAB backslash function
# compare with r function lm
##################################################
#
#As a second example, consider a function to emulate directly the Matlab backslash command,
#which returns the coefficients of the orthogonal projection of the vector y onto the column
#space of the matrix, X. (This is ordinarily called the least squares estimate of the regression
#coefficients.) This would ordinarily be done with the qr() function; however this is sometimes
#a bit tricky to use directly and it pays to have a simple function such as the following to use it
#safely.
#Thus given a n by 1 vector y and an n by p matrix X then X y is defined as (X^(T) X)^(-1) X^(T) y,
# where (XTX)^(-1) is a generalized inverse of X^(')X.
?qr
bslash <- function(X, y) {
X <- qr(X)
qr.coef(X, y)
}
#put response into Y
y <- prostate[,9]
# After the function bslash is created, it may be used in statements such as
regcoeff <- bslash(X, y)
?lm
regressionModel <- lm(y~. , X)
# classical ordinary least squares
#beta <- solve(t(x) %*% x) %*% t(x) %*% y
##################################################
# Examine a Model Object
#
##################################################
## EXAMPLE, ADDRESSING AN OBJECT
summary(regressionModel)
str(regressionModel)
regressionModel$call
regressionModel$model$y
## LITTLE MORE COMPLEX
attr(regressionModel$model, "terms")
attr(attr(regressionModel$model, "terms"), "term.labels")
## MAYBE USEFUL
quantile(regressionModel$residuals)
multipleReturn = function (x =6, y = 1)
{
variabley <- x+y
variablex <- 10
z <- cbind(variablex,variabley)
return(z)
}
z <- multipleReturn()
z
answerx <- z[1]
answery <- z[2]
answerx;answery
|
/week1/RCode/UserDefnFunction.R
|
no_license
|
saurabhmadaan/ML_UCSC
|
R
| false
| false
| 5,326
|
r
|
# UserDefnFunction
# Uses Prostrate Data
# scale()
#
# Author: PatriciaHoffman
###############################################################################
rm(list=ls())
?source
##################################################
# User Defined Function - roll die
##################################################
# ?trunc (others are ceiling(), floor(), round(x,digits = 0),
# signif(x, digits = 6)
#trunc takes a single numeric argument x and
#returns a numeric vector containing the integers
#formed by truncating the values in x toward 0
# ?runif runif generates random deviates default - between 0 and 1
rolladie = function (num.sides =6, num.rolls = 1)
{
simulated.rolls <- trunc(runif(num.rolls)*num.sides+1)
return(simulated.rolls)
}
rolladie()
rolladie(num.sides =12)
rolladie(num.sides = 12, num.rolls = 10)
##################################################
# User Defined Function - scale
##################################################
?scale
prostate<-read.csv("ProstateCancerDataESL.csv", sep=",",header=T)
prostate[1:3,]
names(prostate)
dim(prostate)
isTRUE(all.equal(prostate$age,prostate[,3]))
#put predictors into X
X <- prostate[,1:8]
class(X)
prostateScale <- scale(X, center = TRUE, scale = TRUE)
prostateScale[1:3,]
userScale <- function(x, numColm) {
#demean x and bring to unit variance
for(i in 1:numColm){
m <- sum(x[,i])
m <- m/length(x[,i])
x[,i] <- x[,i]-m
v <- var(x[,i])
if(v < 0.0000001) v <- 0.0000001 # include for case v = 0
x[,i] <- x[,i]/sqrt(v) # don't want a zero divide
}
return(x)
}
prostateUser <- userScale(X,8)
prostateUser[1:3,]
prostateUser <- as.data.frame(prostateUser)
prostateScale <- as.data.frame(prostateScale)
isTRUE(all.equal(prostateUser,prostateScale))
#
#
## scale training set
## some of data are factors
## scale test set using training parameters
## the target value is in the first colm
## Don't scale the target
#
#
##testScale is matrix
##1 1 1 3
##0 2 1 3
##0 3 1 3
##1 4 1 3
##1 5 1 3
##scaleData is matrix
##1 1 1 3 T red 5
##0 2 1 3 F blue 4
##0 3 1 3 T green 3
##1 4 1 3 T purple 2
##1 5 1 3 F yellow 1
#
#
#testunScale <- read.csv("testScale.csv", sep=",",header=F)
#testScale<- scale(testunScale, center = TRUE, scale = TRUE)
#testScale # notice that colms 3 and 4 are now full of NaN
#
#unstrain <- read.csv("scaleData.csv", sep=",",header=F)
#unstest <- unstrain;test<-unstest;train<-unstrain
##scale(unstrain) gives Error 'x' must be numeric
##demean train and bring to unit variance
## m = means, v = variance, nc = list of numeric colms, scaled data set x
#m <- rep(0.0,ncol(unstrain)); v <-rep(1.0, ncol(unstrain)); nc <- rep(FALSE, ncol(unstrain))
#
#for(i in 1:ncol(unstrain)){
# if(is.numeric(unstrain[,i])) nc[i] <- TRUE
# }
#nc
#nc[1] <- FALSE # don't want to scale the target
#
#index <- which(nc == TRUE)
#for(i in index){
# m[i]<- sum(unstrain[,i])
# m[i] <- m[i]/length(unstrain[,i])
# train[,i] <- unstrain[,i]-m[i]
# v[i] <- var(train[,i])
# if(v[i] < 0.1) v[i] <- 1
# train[,i] <- train[,i]/sqrt(v[i])
# }
#
#index <- which(nc == TRUE)
#test[,index] <- (unstest[,index]-m[index]) / sqrt(v[index])
#
#train
#test
#nc
#m
#v
##################################################
# User Defined Function - MATLAB backslash function
# compare with r function lm
##################################################
#
#As a second example, consider a function to emulate directly the Matlab backslash command,
#which returns the coefficients of the orthogonal projection of the vector y onto the column
#space of the matrix, X. (This is ordinarily called the least squares estimate of the regression
#coefficients.) This would ordinarily be done with the qr() function; however this is sometimes
#a bit tricky to use directly and it pays to have a simple function such as the following to use it
#safely.
#Thus given a n by 1 vector y and an n by p matrix X then X y is defined as (X^(T) X)^(-1) X^(T) y,
# where (XTX)^(-1) is a generalized inverse of X^(')X.
?qr
bslash <- function(X, y) {
X <- qr(X)
qr.coef(X, y)
}
#put response into Y
y <- prostate[,9]
# After the function bslash is created, it may be used in statements such as
regcoeff <- bslash(X, y)
?lm
regressionModel <- lm(y~. , X)
# classical ordinary least squares
#beta <- solve(t(x) %*% x) %*% t(x) %*% y
##################################################
# Examine a Model Object
#
##################################################
## EXAMPLE, ADDRESSING AN OBJECT
summary(regressionModel)
str(regressionModel)
regressionModel$call
regressionModel$model$y
## LITTLE MORE COMPLEX
attr(regressionModel$model, "terms")
attr(attr(regressionModel$model, "terms"), "term.labels")
## MAYBE USEFUL
quantile(regressionModel$residuals)
multipleReturn = function (x =6, y = 1)
{
variabley <- x+y
variablex <- 10
z <- cbind(variablex,variabley)
return(z)
}
z <- multipleReturn()
z
answerx <- z[1]
answery <- z[2]
answerx;answery
|
## app.R ##
library(shinydashboard)
library(shinydashboardPlus)
library(shiny)
library(tidyverse)
library(readxl)
library(ggthemes)
library(ggrepel)
library(shinyWidgets)
library(plotly)
library(shinycssloaders)
library(dashboardthemes)
library(RCzechia)
library(leaflet)
library(sf)
library(RJSONIO)
library(deSolve)
my_colors <- c("#DC143C","#F9A828", "#36648B", "#8B1C62", "#00868B", "#698B69", "#CDC673",
"#8B5A00", "#EE9572", "#483D8B", "#7A378B", "#CD69C9", "#FFB6C1", "#00C78C")
df = rjson::fromJSON(file="https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
tdf2 <- data.frame(matrix(unlist(df$infectedDaily), nrow=length(df$infectedDaily), byrow=T))
tdf2$X1 <- as.numeric(as.character(tdf2$X1))
tdf2$X2 <- as.Date(as.character(tdf2$X2))
tdf2 <- tdf2[32:nrow(tdf2),]
names(tdf2) <- c("Pocet", "Den")
cz <- RCzechia::kraje("low")
tdf1 <- data.frame(matrix(unlist(df$infectedByRegion), nrow=length(df$infectedByRegion), byrow=T))
names(tdf1) <- c("NAZ_CZNUTS3", "Pocet")
tdf1$NAZ_CZNUTS3 <- as.character(tdf1$NAZ_CZNUTS3)
tdf1$Pocet <- as.numeric(as.character(tdf1$Pocet))
dc <- cz %>% inner_join(tdf1, by = "NAZ_CZNUTS3")
poc <- c(df[[2]],df[[3]])
year <- c("CZ", "CZ")
cond <- c("nakazeny","uzdraveny")
nak <- data_frame(cond,poc,year)
#Infected <- c(45, 62, 121, 198, 291, 440, 571, 830, 1287, 1975, 2744, 4515, 5974, 7711, 9692, 11791, 14380, 17205, 20440)
inf <- tdf2[tdf2$Pocet != 0, ]
Infected <- inf$Pocet
Day <- 1:(length(Infected))
N <- 10000000 # population of mainland china
old <- par(mfrow = c(1, 2))
SIR <- function(time, state, parameters) {
par <- as.list(c(state, parameters))
with(par, {
dS <- -beta/N * I * S
dI <- beta/N * I * S - gamma * I
dR <- gamma * I
list(c(dS, dI, dR))
})
}
init <- c(S = N-Infected[1], I = Infected[1], R = 0)
RSS <- function(parameters) {
names(parameters) <- c("beta", "gamma")
out <- ode(y = init, times = Day, func = SIR, parms = parameters)
fit <- out[ , 3]
sum((Infected - fit)^2)
}
Opt <- optim(c(0.5, 0.5), RSS, method = "L-BFGS-B", lower = c(0, 0), upper = c(1, 1)) # optimize with some sensible conditions
Opt_par <- setNames(Opt$par, c("beta", "gamma"))
t <- 1:70 # time in days
fit <- data.frame(ode(y = init, times = t, func = SIR, parms = Opt_par))
col <- 1:3 # colour
R0 <- setNames(Opt_par["beta"] / Opt_par["gamma"], "R0")
fit[fit$I == max(fit$I), "I", drop = FALSE] # height of pandemic
ui <- dashboardPagePlus(collapse_sidebar = FALSE,
header = dashboardHeaderPlus(title = tagList(
span(class = "logo-lg", "COVID19 Czechia"),
img(src = "https://image.flaticon.com/icons/svg/204/204074.svg")), enable_rightsidebar = FALSE, rightSidebarIcon = "gears"),
sidebar = dashboardSidebar(
sidebarMenu(
menuItem("Total", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Daily", tabName = "summary" , icon = icon("bar-chart-o"), badgeLabel = "info", badgeColor = "red"),
menuItem("Prediction", tabName = "prediction" , icon = icon("bar-chart-o"), badgeLabel = "new", badgeColor = "green"),
menuItem("Map", tabName = "map" , icon = icon("map-marker-alt"), badgeLabel = "new", badgeColor = "blue")
),
collapsed = FALSE
),
body = dashboardBody(
### changing theme
shinyDashboardThemes(
theme = "grey_dark"
),
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(boxPlus(plotOutput("chart2", height = 680) %>% withSpinner(type="5"), width = 10, background = "black"),
#boxPlus(plotlyOutput("chart1", height =300) %>% withSpinner(type = "5"), width = 4),
boxPlus(background = "blue" , descriptionBlock(
header = h3(df$totalTested),
text = h4("Testovaných"),
right_border = FALSE,
margin_bottom = FALSE
), width = 2),
boxPlus(background = "red",
descriptionBlock(
header = h3(df[[2]]),
text = h4("Nakažených") ,
right_border = FALSE,
margin_bottom = FALSE
), width = 2),
boxPlus(background = "green",
descriptionBlock(
header = h3(df[[3]]),
text = h4("Uzdraveno"),
right_border = FALSE,
margin_bottom = FALSE
), width = 2)
),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "summary",
fluidRow(boxPlus(plotOutput("chart3", height = 680) %>% withSpinner(type="5"), width = 12, background = "yellow"),
#boxPlus(plotOutput("chart4", height =600) %>% withSpinner(type = "5"), width = 4)
),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "prediction",
fluidRow(boxPlus(plotOutput("chart5", height = 680) %>% withSpinner(type="5"), width = 12, background = "black")),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "map",
fluidRow(boxPlus(plotOutput("chart1", height = 680) %>% withSpinner(type="5"), width = 12, background = "black")),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
)
),
title = "Covid19-CZ"
)
)
server <- function(input, output, session) {
# output$chart1 <- renderPlotly(ggplot(data =dc)+
# geom_sf(aes(fill=Pocet )) + scale_fill_continuous(high = "#641E16", low = "#D98880")+
# theme_void()+theme(panel.background = element_rect(fill ="#17202A", color = "#17202A"),
# plot.background = element_rect(fill = "#17202A", color="#17202A"))
# )
#
output$chart1 <- renderPlot(ggplot(data = dc) +
geom_sf(aes(fill=Pocet)) +
scale_fill_continuous(high = "#DC143C", low = "goldenrod")+
geom_sf_text(aes(label=Pocet), size=8)+
theme_solid() +
theme(legend.text.align = 1,
legend.title.align = 0.5,
plot.background = element_rect(fill = "#116979", color="black"),
legend.position = "none",
), bg="#116979"
)
output$chart2 <- renderPlot(ggplot(dc, aes(x=reorder(NAZ_CZNUTS3, Pocet), y=Pocet, fill=NAZ_CZNUTS3))+
geom_bar(stat = "identity")+ coord_flip()+ggtitle("Nakažených Celkem / Infected Total")+
geom_text(aes(label=Pocet), hjust = 0, color="white", size=8)+
theme_bw()+xlab("")+
theme(
panel.background = element_rect(fill ="#2b374b", color = "#17202A"),
plot.background = element_rect(fill = "#17202A", color="#17202A"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.position = "none",axis.title.x=element_blank(), axis.text=element_text(size=14, colour = "white"),
plot.title = element_text(color="white", size=16)
)+
scale_fill_manual(values = c(my_colors))
)
output$chart3 <- renderPlot(ggplot(tdf2, aes(x=Den, y=Pocet))+geom_line(lwd=1, color="#f9bd2e")+
ggtitle("Nakažených za den / Infected per day")+theme_wsj()+
theme(panel.background = element_rect(fill ="#2b374b", color = "#17202A"),
plot.background = element_rect(fill = "#DC143C", color="#17202A"),
panel.grid.minor = element_blank(),
legend.position = "none",plot.title = element_text(color="#17202A"),
axis.title.x=element_blank())+
geom_point(aes(col=Pocet,size=2))+
geom_text(aes(label=Pocet),hjust=0, vjust=0, color="white", size=7)
)
output$chart4 <- renderPlot(ggplot(nak, aes(x=year, y=poc, fill=cond))+
ggtitle("Nakažených / Uzdravených Infected / Recovered ")+
geom_col()+scale_fill_manual(values = c("#DC143C","#66CDAA"))+
geom_text(aes(label = paste0(poc)), position = position_stack(vjust = 0.7), size=14)+
#scale_fill_brewer(palette = "Set2") +
theme_minimal(base_size = 16) +
ylab("") +
xlab(NULL)+
theme(panel.background = element_rect(fill ="#17202A", color = "#17202A"),
plot.background = element_rect(fill = "#17202A", color="#17202A"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.position = "none",plot.title = element_text(color="white", size=14),
axis.title.x=element_blank())
)
output$chart5 <- renderPlot(ggplot(fit[0:35,], aes(time, I))+
geom_line(lwd=3, color="#00868B")+
geom_point(aes(), color="#DC143C", size=6)+
ggtitle("SIR model prediction COV19CZ")+xlab("Days")+ ylab("Infected")+ theme_minimal()+
geom_text(aes(label=ceiling(I)), hjust=1, vjust=0, color="black", size=7)+
theme(panel.background = element_rect(fill ="goldenrod", color = "black"),
plot.background = element_rect(fill = "#DC143C", color="black"),
legend.position = "none",plot.title = element_text(color="#17202A"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(),
panel.grid.major = element_line(
colour = "black",
size = 1,
linetype = "dotted",
lineend = NULL,
color = NULL,
arrow = NULL,
inherit.blank = FALSE
), axis.text = element_text(size=15, colour = "black"),
axis.title = element_text(size = 15),
title = element_text(size=20)
)
)
}
shinyApp(ui, server)
|
/app.R
|
no_license
|
3p1463k/nCov2019CZ
|
R
| false
| false
| 13,763
|
r
|
## app.R ##
library(shinydashboard)
library(shinydashboardPlus)
library(shiny)
library(tidyverse)
library(readxl)
library(ggthemes)
library(ggrepel)
library(shinyWidgets)
library(plotly)
library(shinycssloaders)
library(dashboardthemes)
library(RCzechia)
library(leaflet)
library(sf)
library(RJSONIO)
library(deSolve)
my_colors <- c("#DC143C","#F9A828", "#36648B", "#8B1C62", "#00868B", "#698B69", "#CDC673",
"#8B5A00", "#EE9572", "#483D8B", "#7A378B", "#CD69C9", "#FFB6C1", "#00C78C")
df = rjson::fromJSON(file="https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
tdf2 <- data.frame(matrix(unlist(df$infectedDaily), nrow=length(df$infectedDaily), byrow=T))
tdf2$X1 <- as.numeric(as.character(tdf2$X1))
tdf2$X2 <- as.Date(as.character(tdf2$X2))
tdf2 <- tdf2[32:nrow(tdf2),]
names(tdf2) <- c("Pocet", "Den")
cz <- RCzechia::kraje("low")
tdf1 <- data.frame(matrix(unlist(df$infectedByRegion), nrow=length(df$infectedByRegion), byrow=T))
names(tdf1) <- c("NAZ_CZNUTS3", "Pocet")
tdf1$NAZ_CZNUTS3 <- as.character(tdf1$NAZ_CZNUTS3)
tdf1$Pocet <- as.numeric(as.character(tdf1$Pocet))
dc <- cz %>% inner_join(tdf1, by = "NAZ_CZNUTS3")
poc <- c(df[[2]],df[[3]])
year <- c("CZ", "CZ")
cond <- c("nakazeny","uzdraveny")
nak <- data_frame(cond,poc,year)
#Infected <- c(45, 62, 121, 198, 291, 440, 571, 830, 1287, 1975, 2744, 4515, 5974, 7711, 9692, 11791, 14380, 17205, 20440)
inf <- tdf2[tdf2$Pocet != 0, ]
Infected <- inf$Pocet
Day <- 1:(length(Infected))
N <- 10000000 # population of mainland china
old <- par(mfrow = c(1, 2))
SIR <- function(time, state, parameters) {
par <- as.list(c(state, parameters))
with(par, {
dS <- -beta/N * I * S
dI <- beta/N * I * S - gamma * I
dR <- gamma * I
list(c(dS, dI, dR))
})
}
init <- c(S = N-Infected[1], I = Infected[1], R = 0)
RSS <- function(parameters) {
names(parameters) <- c("beta", "gamma")
out <- ode(y = init, times = Day, func = SIR, parms = parameters)
fit <- out[ , 3]
sum((Infected - fit)^2)
}
Opt <- optim(c(0.5, 0.5), RSS, method = "L-BFGS-B", lower = c(0, 0), upper = c(1, 1)) # optimize with some sensible conditions
Opt_par <- setNames(Opt$par, c("beta", "gamma"))
t <- 1:70 # time in days
fit <- data.frame(ode(y = init, times = t, func = SIR, parms = Opt_par))
col <- 1:3 # colour
R0 <- setNames(Opt_par["beta"] / Opt_par["gamma"], "R0")
fit[fit$I == max(fit$I), "I", drop = FALSE] # height of pandemic
ui <- dashboardPagePlus(collapse_sidebar = FALSE,
header = dashboardHeaderPlus(title = tagList(
span(class = "logo-lg", "COVID19 Czechia"),
img(src = "https://image.flaticon.com/icons/svg/204/204074.svg")), enable_rightsidebar = FALSE, rightSidebarIcon = "gears"),
sidebar = dashboardSidebar(
sidebarMenu(
menuItem("Total", tabName = "dashboard", icon = icon("dashboard")),
menuItem("Daily", tabName = "summary" , icon = icon("bar-chart-o"), badgeLabel = "info", badgeColor = "red"),
menuItem("Prediction", tabName = "prediction" , icon = icon("bar-chart-o"), badgeLabel = "new", badgeColor = "green"),
menuItem("Map", tabName = "map" , icon = icon("map-marker-alt"), badgeLabel = "new", badgeColor = "blue")
),
collapsed = FALSE
),
body = dashboardBody(
### changing theme
shinyDashboardThemes(
theme = "grey_dark"
),
tabItems(
# First tab content
tabItem(tabName = "dashboard",
fluidRow(boxPlus(plotOutput("chart2", height = 680) %>% withSpinner(type="5"), width = 10, background = "black"),
#boxPlus(plotlyOutput("chart1", height =300) %>% withSpinner(type = "5"), width = 4),
boxPlus(background = "blue" , descriptionBlock(
header = h3(df$totalTested),
text = h4("Testovaných"),
right_border = FALSE,
margin_bottom = FALSE
), width = 2),
boxPlus(background = "red",
descriptionBlock(
header = h3(df[[2]]),
text = h4("Nakažených") ,
right_border = FALSE,
margin_bottom = FALSE
), width = 2),
boxPlus(background = "green",
descriptionBlock(
header = h3(df[[3]]),
text = h4("Uzdraveno"),
right_border = FALSE,
margin_bottom = FALSE
), width = 2)
),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "summary",
fluidRow(boxPlus(plotOutput("chart3", height = 680) %>% withSpinner(type="5"), width = 12, background = "yellow"),
#boxPlus(plotOutput("chart4", height =600) %>% withSpinner(type = "5"), width = 4)
),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "prediction",
fluidRow(boxPlus(plotOutput("chart5", height = 680) %>% withSpinner(type="5"), width = 12, background = "black")),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
),
tabItem(tabName = "map",
fluidRow(boxPlus(plotOutput("chart1", height = 680) %>% withSpinner(type="5"), width = 12, background = "black")),
h5("AKTUALIZOVANO : ", df$lastUpdatedAtSource),
p("Source", "https://api.apify.com/v2/key-value-stores/K373S4uCFR9W1K8ei/records/LATEST?disableRedirect=true")
)
),
title = "Covid19-CZ"
)
)
server <- function(input, output, session) {
# output$chart1 <- renderPlotly(ggplot(data =dc)+
# geom_sf(aes(fill=Pocet )) + scale_fill_continuous(high = "#641E16", low = "#D98880")+
# theme_void()+theme(panel.background = element_rect(fill ="#17202A", color = "#17202A"),
# plot.background = element_rect(fill = "#17202A", color="#17202A"))
# )
#
output$chart1 <- renderPlot(ggplot(data = dc) +
geom_sf(aes(fill=Pocet)) +
scale_fill_continuous(high = "#DC143C", low = "goldenrod")+
geom_sf_text(aes(label=Pocet), size=8)+
theme_solid() +
theme(legend.text.align = 1,
legend.title.align = 0.5,
plot.background = element_rect(fill = "#116979", color="black"),
legend.position = "none",
), bg="#116979"
)
output$chart2 <- renderPlot(ggplot(dc, aes(x=reorder(NAZ_CZNUTS3, Pocet), y=Pocet, fill=NAZ_CZNUTS3))+
geom_bar(stat = "identity")+ coord_flip()+ggtitle("Nakažených Celkem / Infected Total")+
geom_text(aes(label=Pocet), hjust = 0, color="white", size=8)+
theme_bw()+xlab("")+
theme(
panel.background = element_rect(fill ="#2b374b", color = "#17202A"),
plot.background = element_rect(fill = "#17202A", color="#17202A"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.position = "none",axis.title.x=element_blank(), axis.text=element_text(size=14, colour = "white"),
plot.title = element_text(color="white", size=16)
)+
scale_fill_manual(values = c(my_colors))
)
output$chart3 <- renderPlot(ggplot(tdf2, aes(x=Den, y=Pocet))+geom_line(lwd=1, color="#f9bd2e")+
ggtitle("Nakažených za den / Infected per day")+theme_wsj()+
theme(panel.background = element_rect(fill ="#2b374b", color = "#17202A"),
plot.background = element_rect(fill = "#DC143C", color="#17202A"),
panel.grid.minor = element_blank(),
legend.position = "none",plot.title = element_text(color="#17202A"),
axis.title.x=element_blank())+
geom_point(aes(col=Pocet,size=2))+
geom_text(aes(label=Pocet),hjust=0, vjust=0, color="white", size=7)
)
output$chart4 <- renderPlot(ggplot(nak, aes(x=year, y=poc, fill=cond))+
ggtitle("Nakažených / Uzdravených Infected / Recovered ")+
geom_col()+scale_fill_manual(values = c("#DC143C","#66CDAA"))+
geom_text(aes(label = paste0(poc)), position = position_stack(vjust = 0.7), size=14)+
#scale_fill_brewer(palette = "Set2") +
theme_minimal(base_size = 16) +
ylab("") +
xlab(NULL)+
theme(panel.background = element_rect(fill ="#17202A", color = "#17202A"),
plot.background = element_rect(fill = "#17202A", color="#17202A"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
legend.position = "none",plot.title = element_text(color="white", size=14),
axis.title.x=element_blank())
)
output$chart5 <- renderPlot(ggplot(fit[0:35,], aes(time, I))+
geom_line(lwd=3, color="#00868B")+
geom_point(aes(), color="#DC143C", size=6)+
ggtitle("SIR model prediction COV19CZ")+xlab("Days")+ ylab("Infected")+ theme_minimal()+
geom_text(aes(label=ceiling(I)), hjust=1, vjust=0, color="black", size=7)+
theme(panel.background = element_rect(fill ="goldenrod", color = "black"),
plot.background = element_rect(fill = "#DC143C", color="black"),
legend.position = "none",plot.title = element_text(color="#17202A"),
panel.grid.minor = element_blank(), panel.grid.major.x = element_blank(),
panel.grid.major = element_line(
colour = "black",
size = 1,
linetype = "dotted",
lineend = NULL,
color = NULL,
arrow = NULL,
inherit.blank = FALSE
), axis.text = element_text(size=15, colour = "black"),
axis.title = element_text(size = 15),
title = element_text(size=20)
)
)
}
shinyApp(ui, server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a3_power.R
\name{emp_power}
\alias{emp_power}
\title{Empirical power of a t-test}
\usage{
emp_power(n, mu, mu0, sd, alpha = 0.05, R = 1000, type = c("one_sample",
"two_sample"), ...)
}
\arguments{
\item{n}{numeric. sample size}
\item{mu}{mean value used to draw random normal samples. See ?rnorm() for more information}
\item{mu0}{numeric. mean of control group if running a two-sample t-test or, if type = "one_sample", a value indicating the true value of the mean (or difference in means if you are performing a one sample test). See ?t.test() for more information}
\item{sd}{standard deviation used to draw random normal samples}
\item{alpha}{significance level}
\item{R}{number of replications}
\item{type}{string. if 'one_sample', then a one-sample t-test, else a two-sample t-test}
\item{...}{optional arguments. You may pass selected parameters from the t.test() function. You may also pass the following parameters:
\itemize{
\item{n0: numeric. sample size of control group if running a two-sample t-test. If not supplied then the function will use the value of n defined above.}
\item{sd0: numeric. standard deviation of control group if running a two-sample t-test. If not supplied then the function will use the value of sd defined above.}
\item{alternative: a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter. See ?t.test() for more information}
}
Note that you are required to either pass all parameters related to the control group (n0, mu0, sd0) or a parameter related to the reference mean (ref_mu).}
}
\value{
list containing:
\itemize{
\item{inputs: }{list. user input values}
\item{data: }{matrix. values of R samples of size n with mean mu and standard deviation sd drawn from a normal distribution using rnorm()}
\item{p_values: }{vector. resulting p-values of R tests comparing the samples against the reference mean ref_mu}
\item{power: }{vector. power calculated by taking the proportion of p-values for which the value falls below or is equal to the significance level alpha}
\item{se :}{standard error of the power estimate. Calculated using the SE formula of a proportion. For more information, see the reference to Rizzo below.}
}
}
\description{
This function takes a reference mean value, sample size, mean, standard deviation, significance level and computes the empirical power of R randomly drawn normally distributed samples with mean mu, standard deviation sd and sample size n. The function can compute the power of one-sample t-tests and two-sample t-tests (variance is always assumed to be equal). This function will always return the data it generates internally because its focus is not on efficiency but rather reproducibility
}
\seealso{
Rizzo, Maria L. 'Statistical Computing with R. Chapman and Hall/CRC, 2007'. (pp. 167-169)
}
|
/man/emp_power.Rd
|
permissive
|
JasperHG90/meerkat
|
R
| false
| true
| 2,982
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/a3_power.R
\name{emp_power}
\alias{emp_power}
\title{Empirical power of a t-test}
\usage{
emp_power(n, mu, mu0, sd, alpha = 0.05, R = 1000, type = c("one_sample",
"two_sample"), ...)
}
\arguments{
\item{n}{numeric. sample size}
\item{mu}{mean value used to draw random normal samples. See ?rnorm() for more information}
\item{mu0}{numeric. mean of control group if running a two-sample t-test or, if type = "one_sample", a value indicating the true value of the mean (or difference in means if you are performing a one sample test). See ?t.test() for more information}
\item{sd}{standard deviation used to draw random normal samples}
\item{alpha}{significance level}
\item{R}{number of replications}
\item{type}{string. if 'one_sample', then a one-sample t-test, else a two-sample t-test}
\item{...}{optional arguments. You may pass selected parameters from the t.test() function. You may also pass the following parameters:
\itemize{
\item{n0: numeric. sample size of control group if running a two-sample t-test. If not supplied then the function will use the value of n defined above.}
\item{sd0: numeric. standard deviation of control group if running a two-sample t-test. If not supplied then the function will use the value of sd defined above.}
\item{alternative: a character string specifying the alternative hypothesis, must be one of "two.sided" (default), "greater" or "less". You can specify just the initial letter. See ?t.test() for more information}
}
Note that you are required to either pass all parameters related to the control group (n0, mu0, sd0) or a parameter related to the reference mean (ref_mu).}
}
\value{
list containing:
\itemize{
\item{inputs: }{list. user input values}
\item{data: }{matrix. values of R samples of size n with mean mu and standard deviation sd drawn from a normal distribution using rnorm()}
\item{p_values: }{vector. resulting p-values of R tests comparing the samples against the reference mean ref_mu}
\item{power: }{vector. power calculated by taking the proportion of p-values for which the value falls below or is equal to the significance level alpha}
\item{se :}{standard error of the power estimate. Calculated using the SE formula of a proportion. For more information, see the reference to Rizzo below.}
}
}
\description{
This function takes a reference mean value, sample size, mean, standard deviation, significance level and computes the empirical power of R randomly drawn normally distributed samples with mean mu, standard deviation sd and sample size n. The function can compute the power of one-sample t-tests and two-sample t-tests (variance is always assumed to be equal). This function will always return the data it generates internally because its focus is not on efficiency but rather reproducibility
}
\seealso{
Rizzo, Maria L. 'Statistical Computing with R. Chapman and Hall/CRC, 2007'. (pp. 167-169)
}
|
library(h2o)
h2o.init(nthreads=-1)
dx <- h2o.importFile("wk09/lect/data/airline100K.csv")
dx_split <- h2o.splitFrame(dx, ratios = c(0.6,0.2), seed = 123)
dx_train <- dx_split[[1]]
dx_valid <- dx_split[[2]]
dx_test <- dx_split[[3]]
Xnames <- names(dx_train)[which(names(dx_train)!="dep_delayed_15min")]
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
## DEFAULT: activation = "Rectifier", hidden = c(200,200),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(50,50,50,50), input_dropout_ratio = 0.2,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(50,50,50,50),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(20,20),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(20),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(5),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(1),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200), l1 = 1e-5, l2 = 1e-5,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "RectifierWithDropout", hidden = c(200,200,200,200), hidden_dropout_ratios=c(0.2,0.1,0.1,0),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.95, epsilon = 1e-06, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-08, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.9999, epsilon = 1e-08, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-06, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-09, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, ## default: rate = 0.005, rate_decay = 1, momentum_stable = 0,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.001, momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-04,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.9,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e4, momentum_stable = 0.9,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
|
/wk11/lect/2-NN/h2o.R
|
no_license
|
paljenczy/teach-ML-CEU-master-bizanalytics
|
R
| false
| false
| 8,827
|
r
|
library(h2o)
h2o.init(nthreads=-1)
dx <- h2o.importFile("wk09/lect/data/airline100K.csv")
dx_split <- h2o.splitFrame(dx, ratios = c(0.6,0.2), seed = 123)
dx_train <- dx_split[[1]]
dx_valid <- dx_split[[2]]
dx_test <- dx_split[[3]]
Xnames <- names(dx_train)[which(names(dx_train)!="dep_delayed_15min")]
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
## DEFAULT: activation = "Rectifier", hidden = c(200,200),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(50,50,50,50), input_dropout_ratio = 0.2,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(50,50,50,50),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(20,20),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(20),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(5),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(1),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200), l1 = 1e-5, l2 = 1e-5,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "RectifierWithDropout", hidden = c(200,200,200,200), hidden_dropout_ratios=c(0.2,0.1,0.1,0),
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.95, epsilon = 1e-06, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-08, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.9999, epsilon = 1e-08, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-06, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
rho = 0.999, epsilon = 1e-09, ## default: rho = 0.99, epsilon = 1e-08
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, ## default: rate = 0.005, rate_decay = 1, momentum_stable = 0,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.001, momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-04,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.99,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e5, momentum_stable = 0.9,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
system.time({
md <- h2o.deeplearning(x = Xnames, y = "dep_delayed_15min", training_frame = dx_train, validation_frame = dx_valid,
activation = "Rectifier", hidden = c(200,200),
adaptive_rate = FALSE, rate = 0.01, rate_annealing = 1e-05,
momentum_start = 0.5, momentum_ramp = 1e4, momentum_stable = 0.9,
epochs = 100, stopping_rounds = 2, stopping_metric = "AUC", stopping_tolerance = 0)
})
h2o.performance(md, dx_test)@metrics$AUC
|
#!/usr/bin/env Rscript
# Copyright (c) 2017 Michael Roach (Australian Wine Research Institute)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
require(ggplot2)
usage = "
USAGE:
dot_plot_plus.Rscript out.png q-ctg-name q-ctg-len contig.cov r1-ctg-name r1.rdotplot r1-len [ r2-ctg-name r2.rdotplot r2-len ]
arguments are positional because lazy
out.png output png image file of the dotplot with coverage hist
q-ctg-name name of query contig
q-ctg-len length of the query contig
contig.cov input window coverage hist .tsv file generated by purge_haplotigs
r1-ctg-name name of first ref contig
r1.rdotplot lastz rdotplot output for query contig against first reference
r1-len length of the r1 contig
r2-ctg-name name of second ref contig
r2.rdotplot second reference lastz-rdotplot
r2-len length of the r2 contig
"
args = commandArgs(trailingOnly = T)
if (length(args)<7){
cat(usage)
stop()
}
# initialize values
ref2 = FALSE
# parse args
q_ctg = args[2]
q_len = as.integer(args[3])
q_cov = read.table(args[4], header = F)
r1_ctg = args[5]
r1_dat = read.table(args[6], header = F)
r1_len = as.integer(args[7])
if (length(args) == 10){
r2_ctg = args[8]
r2_dat = read.table(args[9], header = F)
r2_len = as.integer(args[10])
ref2 = TRUE
}
# annotation padding
padding = -0.05 * q_len
# table headers
names(q_cov) = c("X", "Y")
names(r1_dat) = c("X", "Y")
if (ref2){
names(r2_dat) = c("X", "Y")
}
# cov span = -1.5 to 1.5
# dotplot1 = 1.5 to 3.5
# dotplot2 = 3.5 to 5.5
# convert the y-spans to 0-1 and apply offsets
r1_dat$Y = ((r1_dat$Y / r1_len) * 2) + 1.5
if (ref2){
r2_dat$Y = ((r2_dat$Y / r2_len) * 2) + 3.5
}
if (ref2){
#png(filename = args[1], width = 1000, height = 1500, units = "px")
ggplot() +
geom_path(aes(x=r1_dat$X,y=r1_dat$Y),size=0.5) +
geom_path(aes(x=r2_dat$X,y=r2_dat$Y),size=0.5) +
geom_point(aes(x=q_cov$X,y=q_cov$Y)) +
geom_path(aes(x=c(0,q_len),y=c(-1.5,-1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(1.5,1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(3.5,3.5)),color='grey') +
geom_path(aes(x=c(0,0),y=c(-1.5,5.5)),color='grey') +
annotate("text", x=padding, y=0, label= "log2(read-depth)",angle=90) +
annotate("text", x=padding, y=2, label=r1_ctg,angle=90) +
annotate("text", x=padding, y=4, label=r2_ctg,angle=90) +
scale_x_continuous(name=q_ctg) +
theme(panel.background=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank(),axis.title.y=element_blank())
} else {
#png(filename = args[1], width = 1000, height = 1000, units = "px")
ggplot() +
geom_path(aes(x=r1_dat$X,y=r1_dat$Y),size=0.5) +
geom_point(aes(x=q_cov$X,y=q_cov$Y)) +
geom_path(aes(x=c(0,q_len),y=c(-1.5,-1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(1.5,1.5)),color='grey') +
geom_path(aes(x=c(0,0),y=c(-1.5,3.5)),color='grey') +
annotate("text", x=padding, y=0, label= "log2(read-depth)",angle=90) +
annotate("text", x=padding, y=2, label=r1_ctg,angle=90) +
scale_x_continuous(name=q_ctg) +
theme(panel.background=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank(),axis.title.y=element_blank())
}
ggsave(gsub(".png", ".pdf", args[1]), width = 10, height = 8, dpi = 150)
|
/dx_applets/purge_haplotigs/purge_haplotigs_contigcov_and_purge/resources/home/dnanexus/dot_plot.Rscript
|
permissive
|
VGP/vgp-assembly
|
R
| false
| false
| 4,446
|
rscript
|
#!/usr/bin/env Rscript
# Copyright (c) 2017 Michael Roach (Australian Wine Research Institute)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
require(ggplot2)
usage = "
USAGE:
dot_plot_plus.Rscript out.png q-ctg-name q-ctg-len contig.cov r1-ctg-name r1.rdotplot r1-len [ r2-ctg-name r2.rdotplot r2-len ]
arguments are positional because lazy
out.png output png image file of the dotplot with coverage hist
q-ctg-name name of query contig
q-ctg-len length of the query contig
contig.cov input window coverage hist .tsv file generated by purge_haplotigs
r1-ctg-name name of first ref contig
r1.rdotplot lastz rdotplot output for query contig against first reference
r1-len length of the r1 contig
r2-ctg-name name of second ref contig
r2.rdotplot second reference lastz-rdotplot
r2-len length of the r2 contig
"
args = commandArgs(trailingOnly = T)
if (length(args)<7){
cat(usage)
stop()
}
# initialize values
ref2 = FALSE
# parse args
q_ctg = args[2]
q_len = as.integer(args[3])
q_cov = read.table(args[4], header = F)
r1_ctg = args[5]
r1_dat = read.table(args[6], header = F)
r1_len = as.integer(args[7])
if (length(args) == 10){
r2_ctg = args[8]
r2_dat = read.table(args[9], header = F)
r2_len = as.integer(args[10])
ref2 = TRUE
}
# annotation padding
padding = -0.05 * q_len
# table headers
names(q_cov) = c("X", "Y")
names(r1_dat) = c("X", "Y")
if (ref2){
names(r2_dat) = c("X", "Y")
}
# cov span = -1.5 to 1.5
# dotplot1 = 1.5 to 3.5
# dotplot2 = 3.5 to 5.5
# convert the y-spans to 0-1 and apply offsets
r1_dat$Y = ((r1_dat$Y / r1_len) * 2) + 1.5
if (ref2){
r2_dat$Y = ((r2_dat$Y / r2_len) * 2) + 3.5
}
if (ref2){
#png(filename = args[1], width = 1000, height = 1500, units = "px")
ggplot() +
geom_path(aes(x=r1_dat$X,y=r1_dat$Y),size=0.5) +
geom_path(aes(x=r2_dat$X,y=r2_dat$Y),size=0.5) +
geom_point(aes(x=q_cov$X,y=q_cov$Y)) +
geom_path(aes(x=c(0,q_len),y=c(-1.5,-1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(1.5,1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(3.5,3.5)),color='grey') +
geom_path(aes(x=c(0,0),y=c(-1.5,5.5)),color='grey') +
annotate("text", x=padding, y=0, label= "log2(read-depth)",angle=90) +
annotate("text", x=padding, y=2, label=r1_ctg,angle=90) +
annotate("text", x=padding, y=4, label=r2_ctg,angle=90) +
scale_x_continuous(name=q_ctg) +
theme(panel.background=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank(),axis.title.y=element_blank())
} else {
#png(filename = args[1], width = 1000, height = 1000, units = "px")
ggplot() +
geom_path(aes(x=r1_dat$X,y=r1_dat$Y),size=0.5) +
geom_point(aes(x=q_cov$X,y=q_cov$Y)) +
geom_path(aes(x=c(0,q_len),y=c(-1.5,-1.5)),color='grey') +
geom_path(aes(x=c(0,q_len),y=c(1.5,1.5)),color='grey') +
geom_path(aes(x=c(0,0),y=c(-1.5,3.5)),color='grey') +
annotate("text", x=padding, y=0, label= "log2(read-depth)",angle=90) +
annotate("text", x=padding, y=2, label=r1_ctg,angle=90) +
scale_x_continuous(name=q_ctg) +
theme(panel.background=element_blank(),axis.text.y=element_blank(),axis.ticks.y=element_blank(),axis.title.y=element_blank())
}
ggsave(gsub(".png", ".pdf", args[1]), width = 10, height = 8, dpi = 150)
|
path2data <- "C:\\Data\\My Documents\\Courses\\Data Science\\Exploratory Data Analysis\\Project\\exdata-data-household_power_consumption\\household_power_consumption.txt"
## Read the names of the variables
datanames <- read.table(path2data, sep=";", nrows=1, colClasses="character")
## Read the data and format
data <- read.table(path2data, sep=";", skip=1, col.names=datanames, colClasses="character")
subdata <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
subdata$Date <- as.Date(subdata$Date, format = '%d/%m/%Y')
## Plot 3
## Concatenate the date and time variables, convert to date/time format and
## plot time vs Energy sub metering
library(lubridate)
time_object <- ymd_hms(paste(subdata$Date, subdata$Time))
plot(time_object, subdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black", "red", "blue"), cex=.75, xjust=1)
lines(time_object, subdata$Sub_metering_2, col="red")
lines(time_object, subdata$Sub_metering_3, col="blue")
## Save the plot
dev.copy(png, file="plot3.png")
dev.off()
|
/plot3.R
|
no_license
|
sashalyulko/ExData_Plotting1
|
R
| false
| false
| 1,139
|
r
|
path2data <- "C:\\Data\\My Documents\\Courses\\Data Science\\Exploratory Data Analysis\\Project\\exdata-data-household_power_consumption\\household_power_consumption.txt"
## Read the names of the variables
datanames <- read.table(path2data, sep=";", nrows=1, colClasses="character")
## Read the data and format
data <- read.table(path2data, sep=";", skip=1, col.names=datanames, colClasses="character")
subdata <- subset(data, data$Date == "1/2/2007" | data$Date == "2/2/2007")
subdata$Date <- as.Date(subdata$Date, format = '%d/%m/%Y')
## Plot 3
## Concatenate the date and time variables, convert to date/time format and
## plot time vs Energy sub metering
library(lubridate)
time_object <- ymd_hms(paste(subdata$Date, subdata$Time))
plot(time_object, subdata$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), col=c("black", "red", "blue"), cex=.75, xjust=1)
lines(time_object, subdata$Sub_metering_2, col="red")
lines(time_object, subdata$Sub_metering_3, col="blue")
## Save the plot
dev.copy(png, file="plot3.png")
dev.off()
|
cycle_to_lookup_year <- function(cycle) {
return(switch(cycle,
"1999-2000" = "A 99-00",
"2001-2002" = "A 01-02",
"2003-2004" = "A 03-04",
"2005-2006" = "A 05-06",
"2007-2008" = "A 07-08",
"2009-2010" = "A 09-10",
"2011-2012" = "A 11-12"))
}
lookup <- function(column, cycle) {
cycle <- cycle_to_lookup_year(cycle)
return(lookup_table[lookup_table$match_column == column & lookup_table$year == cycle,])
}
lookup_dl <- function(column, cycle) {
dl_column <- paste0("X", gsub('-', '.', cycle))
if(!dl_column %in% names(lookup_table)) {
return()
}
return(lookup(column, cycle)[,dl_column])
}
|
/R/lookup_table.R
|
permissive
|
SilentSpringInstitute/RNHANES
|
R
| false
| false
| 722
|
r
|
cycle_to_lookup_year <- function(cycle) {
return(switch(cycle,
"1999-2000" = "A 99-00",
"2001-2002" = "A 01-02",
"2003-2004" = "A 03-04",
"2005-2006" = "A 05-06",
"2007-2008" = "A 07-08",
"2009-2010" = "A 09-10",
"2011-2012" = "A 11-12"))
}
lookup <- function(column, cycle) {
cycle <- cycle_to_lookup_year(cycle)
return(lookup_table[lookup_table$match_column == column & lookup_table$year == cycle,])
}
lookup_dl <- function(column, cycle) {
dl_column <- paste0("X", gsub('-', '.', cycle))
if(!dl_column %in% names(lookup_table)) {
return()
}
return(lookup(column, cycle)[,dl_column])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simves.R
\name{sim.ves}
\alias{sim.ves}
\title{Simulate Vector Exponential Smoothing}
\usage{
sim.ves(model = "ANN", obs = 10, nsim = 1, nSeries = 2, frequency = 1,
persistence = NULL, phi = 1, transition = NULL, initial = NULL,
initialSeason = NULL, seasonal = c("individual, common"),
weights = rep(1/nSeries, nSeries), bounds = c("usual", "admissible",
"restricted"), randomizer = c("rnorm", "rt", "rlaplace", "rs"), ...)
}
\arguments{
\item{model}{Type of ETS model. This can consist of 3 or 4 chars:
\code{ANN}, \code{AAN}, \code{AAdN}, \code{AAA}, \code{AAdA} etc.
Only pure additive models are supported. If you want to have multiplicative
one, then just take exponent of the generated data.}
\item{obs}{Number of observations in each generated time series.}
\item{nsim}{Number of series to generate (number of simulations to do).}
\item{nSeries}{Number of series in each generated group of series.}
\item{frequency}{Frequency of generated data. In cases of seasonal models
must be greater than 1.}
\item{persistence}{Matrix of smoothing parameters for all the components
of all the generated time series.}
\item{phi}{Value of damping parameter. If trend is not chosen in the model,
the parameter is ignored. If vector is provided, then several parameters
are used for different series.}
\item{transition}{Transition matrix. This should have the size appropriate
to the selected model and \code{nSeries}. e.g. if ETS(A,A,N) is selected
and \code{nSeries=3}, then the transition matrix should be 6 x 6. In case
of damped trend, the phi parameter should be placed in the matrix manually.
if \code{NULL}, then the default transition matrix for the selected type
of model is used. If both \code{phi} and \code{transition} are provided,
then the value of \code{phi} is ignored.}
\item{initial}{Vector of initial states of level and trend. The minimum
length is one (in case of ETS(A,N,N), the initial is used for all the
series), the maximum length is 2 x nSeries. If \code{NULL}, values are
generated for each series.}
\item{initialSeason}{Vector or matrix of initial states for seasonal
coefficients. Should have number of rows equal to \code{frequency}
parameter. If \code{NULL}, values are generated for each series.}
\item{seasonal}{The type of seasonal component across the series. Can be
\code{"individual"}, so that each series has its own component or \code{"common"},
so that the component is shared across the series.}
\item{weights}{The weights for the errors between the series with the common
seasonal component. Ignored if \code{seasonal="individual"}.}
\item{bounds}{Type of bounds to use for persistence vector if values are
generated. \code{"usual"} - bounds from p.156 by Hyndman et. al., 2008.
\code{"restricted"} - similar to \code{"usual"} but with upper bound equal
to 0.3. \code{"admissible"} - bounds from tables 10.1 and 10.2 of Hyndman
et. al., 2008. Using first letter of the type of bounds also works.}
\item{randomizer}{Type of random number generator function used for error
term. Defaults are: \code{rnorm}, \code{rt}, \code{rlaplace}, \code{rs}. But
any function from \link[stats]{Distributions} will do the trick if the
appropriate parameters are passed. \code{mvrnorm} from MASS package can also
be used.}
\item{...}{Additional parameters passed to the chosen randomizer. All the
parameters should be passed in the order they are used in chosen randomizer.
For example, passing just \code{sd=0.5} to \code{rnorm} function will lead
to the call \code{rnorm(obs, mean=0.5, sd=1)}. ATTENTION! When generating
the multiplicative errors some tuning might be needed to obtain meaningful
data. \code{sd=0.1} is usually already a high value for such models.}
}
\value{
List of the following values is returned:
\itemize{
\item \code{model} - Name of ETS model.
\item \code{data} - The matrix (or an array if \code{nsim>1}) of the
generated series.
\item \code{states} - The matrix (or array if \code{nsim>1}) of states.
States are in columns, time is in rows.
\item \code{persistence} - The matrix (or array if \code{nsim>1}) of
smoothing parameters used in the simulation.
\item \code{transition} - The transition matrix (or array if \code{nsim>1}).
\item \code{initial} - Vector (or matrix) of initial values.
\item \code{initialSeason} - Vector (or matrix) of initial seasonal
coefficients.
\item \code{residuals} - Error terms used in the simulation. Either matrix
or array, depending on \code{nsim}.
}
}
\description{
Function generates data using VES model as a data generating process.
}
\details{
For the information about the function, see the vignette:
\code{vignette("simulate","smooth")}
}
\examples{
# Create 40 observations of quarterly data using AAA model with errors
# from normal distribution
\dontrun{VESAAA <- sim.ves(model="AAA",frequency=4,obs=40,nSeries=3,
randomizer="rnorm",mean=0,sd=100)}
# You can also use mvrnorm function from MASS package as randomizer,
# but you need to provide mu and Sigma explicitly
\dontrun{VESANN <- sim.ves(model="ANN",frequency=4,obs=40,nSeries=2,
randomizer="mvrnorm",mu=c(100,50),Sigma=matrix(c(40,20,20,30),2,2))}
# When generating the data with multiplicative model a more diligent definitiion
# of parameters is needed. Here's an example with MMM model:
VESMMM <- sim.ves("AAA", obs=120, nSeries=2, frequency=12, initial=c(10,0),
initialSeason=runif(12,-1,1), persistence=c(0.06,0.05,0.2), mean=0, sd=0.03)
VESMMM$data <- exp(VESMMM$data)
# Note that smoothing parameters should be low and the standard diviation should
# definitely be less than 0.1. Otherwise you might face the explosions.
}
\references{
\itemize{
\item de Silva A,, Hyndman R.J. and Snyder, R.D. (2010). The vector
innovations structural time series framework: a simple approach to
multivariate forecasting. Statistical Modelling, 10 (4), pp.353-374
\item Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008)
Forecasting with exponential smoothing: the state space approach,
Springer-Verlag. \url{http://www.exponentialsmoothing.net}.
\item Lütkepohl, H. (2005). New Introduction to Multiple Time Series
Analysis. New introduction to Multiple Time Series Analysis. Berlin,
Heidelberg: Springer Berlin Heidelberg.
\url{https://doi.org/10.1007/978-3-540-27752-1}
}
}
\seealso{
\code{\link[smooth]{es}, \link[forecast]{ets},
\link[forecast]{forecast}, \link[stats]{ts}, \link[stats]{Distributions}}
}
\author{
Ivan Svetunkov, \email{ivan@svetunkov.ru}
}
\keyword{models}
\keyword{multivariate}
\keyword{nonlinear}
\keyword{regression}
\keyword{ts}
|
/man/sim.ves.Rd
|
no_license
|
lixixibj/smooth
|
R
| false
| true
| 6,671
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simves.R
\name{sim.ves}
\alias{sim.ves}
\title{Simulate Vector Exponential Smoothing}
\usage{
sim.ves(model = "ANN", obs = 10, nsim = 1, nSeries = 2, frequency = 1,
persistence = NULL, phi = 1, transition = NULL, initial = NULL,
initialSeason = NULL, seasonal = c("individual, common"),
weights = rep(1/nSeries, nSeries), bounds = c("usual", "admissible",
"restricted"), randomizer = c("rnorm", "rt", "rlaplace", "rs"), ...)
}
\arguments{
\item{model}{Type of ETS model. This can consist of 3 or 4 chars:
\code{ANN}, \code{AAN}, \code{AAdN}, \code{AAA}, \code{AAdA} etc.
Only pure additive models are supported. If you want to have multiplicative
one, then just take exponent of the generated data.}
\item{obs}{Number of observations in each generated time series.}
\item{nsim}{Number of series to generate (number of simulations to do).}
\item{nSeries}{Number of series in each generated group of series.}
\item{frequency}{Frequency of generated data. In cases of seasonal models
must be greater than 1.}
\item{persistence}{Matrix of smoothing parameters for all the components
of all the generated time series.}
\item{phi}{Value of damping parameter. If trend is not chosen in the model,
the parameter is ignored. If vector is provided, then several parameters
are used for different series.}
\item{transition}{Transition matrix. This should have the size appropriate
to the selected model and \code{nSeries}. e.g. if ETS(A,A,N) is selected
and \code{nSeries=3}, then the transition matrix should be 6 x 6. In case
of damped trend, the phi parameter should be placed in the matrix manually.
if \code{NULL}, then the default transition matrix for the selected type
of model is used. If both \code{phi} and \code{transition} are provided,
then the value of \code{phi} is ignored.}
\item{initial}{Vector of initial states of level and trend. The minimum
length is one (in case of ETS(A,N,N), the initial is used for all the
series), the maximum length is 2 x nSeries. If \code{NULL}, values are
generated for each series.}
\item{initialSeason}{Vector or matrix of initial states for seasonal
coefficients. Should have number of rows equal to \code{frequency}
parameter. If \code{NULL}, values are generated for each series.}
\item{seasonal}{The type of seasonal component across the series. Can be
\code{"individual"}, so that each series has its own component or \code{"common"},
so that the component is shared across the series.}
\item{weights}{The weights for the errors between the series with the common
seasonal component. Ignored if \code{seasonal="individual"}.}
\item{bounds}{Type of bounds to use for persistence vector if values are
generated. \code{"usual"} - bounds from p.156 by Hyndman et. al., 2008.
\code{"restricted"} - similar to \code{"usual"} but with upper bound equal
to 0.3. \code{"admissible"} - bounds from tables 10.1 and 10.2 of Hyndman
et. al., 2008. Using first letter of the type of bounds also works.}
\item{randomizer}{Type of random number generator function used for error
term. Defaults are: \code{rnorm}, \code{rt}, \code{rlaplace}, \code{rs}. But
any function from \link[stats]{Distributions} will do the trick if the
appropriate parameters are passed. \code{mvrnorm} from MASS package can also
be used.}
\item{...}{Additional parameters passed to the chosen randomizer. All the
parameters should be passed in the order they are used in chosen randomizer.
For example, passing just \code{sd=0.5} to \code{rnorm} function will lead
to the call \code{rnorm(obs, mean=0.5, sd=1)}. ATTENTION! When generating
the multiplicative errors some tuning might be needed to obtain meaningful
data. \code{sd=0.1} is usually already a high value for such models.}
}
\value{
List of the following values is returned:
\itemize{
\item \code{model} - Name of ETS model.
\item \code{data} - The matrix (or an array if \code{nsim>1}) of the
generated series.
\item \code{states} - The matrix (or array if \code{nsim>1}) of states.
States are in columns, time is in rows.
\item \code{persistence} - The matrix (or array if \code{nsim>1}) of
smoothing parameters used in the simulation.
\item \code{transition} - The transition matrix (or array if \code{nsim>1}).
\item \code{initial} - Vector (or matrix) of initial values.
\item \code{initialSeason} - Vector (or matrix) of initial seasonal
coefficients.
\item \code{residuals} - Error terms used in the simulation. Either matrix
or array, depending on \code{nsim}.
}
}
\description{
Function generates data using VES model as a data generating process.
}
\details{
For the information about the function, see the vignette:
\code{vignette("simulate","smooth")}
}
\examples{
# Create 40 observations of quarterly data using AAA model with errors
# from normal distribution
\dontrun{VESAAA <- sim.ves(model="AAA",frequency=4,obs=40,nSeries=3,
randomizer="rnorm",mean=0,sd=100)}
# You can also use mvrnorm function from MASS package as randomizer,
# but you need to provide mu and Sigma explicitly
\dontrun{VESANN <- sim.ves(model="ANN",frequency=4,obs=40,nSeries=2,
randomizer="mvrnorm",mu=c(100,50),Sigma=matrix(c(40,20,20,30),2,2))}
# When generating the data with multiplicative model a more diligent definitiion
# of parameters is needed. Here's an example with MMM model:
VESMMM <- sim.ves("AAA", obs=120, nSeries=2, frequency=12, initial=c(10,0),
initialSeason=runif(12,-1,1), persistence=c(0.06,0.05,0.2), mean=0, sd=0.03)
VESMMM$data <- exp(VESMMM$data)
# Note that smoothing parameters should be low and the standard diviation should
# definitely be less than 0.1. Otherwise you might face the explosions.
}
\references{
\itemize{
\item de Silva A,, Hyndman R.J. and Snyder, R.D. (2010). The vector
innovations structural time series framework: a simple approach to
multivariate forecasting. Statistical Modelling, 10 (4), pp.353-374
\item Hyndman, R.J., Koehler, A.B., Ord, J.K., and Snyder, R.D. (2008)
Forecasting with exponential smoothing: the state space approach,
Springer-Verlag. \url{http://www.exponentialsmoothing.net}.
\item Lütkepohl, H. (2005). New Introduction to Multiple Time Series
Analysis. New introduction to Multiple Time Series Analysis. Berlin,
Heidelberg: Springer Berlin Heidelberg.
\url{https://doi.org/10.1007/978-3-540-27752-1}
}
}
\seealso{
\code{\link[smooth]{es}, \link[forecast]{ets},
\link[forecast]{forecast}, \link[stats]{ts}, \link[stats]{Distributions}}
}
\author{
Ivan Svetunkov, \email{ivan@svetunkov.ru}
}
\keyword{models}
\keyword{multivariate}
\keyword{nonlinear}
\keyword{regression}
\keyword{ts}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/downloaders.R
\name{wget}
\alias{wget}
\title{wget}
\usage{
wget(
input_url,
output_path,
background = T,
force_overwrite = F,
quiet = F,
show_progress = T,
continue = T,
check_certificates = F,
conda_env = "echoR"
)
}
\description{
R wrapper for wget
}
\seealso{
Other downloaders:
\code{\link{axel}()},
\code{\link{downloader}()}
}
\concept{downloaders}
\keyword{internal}
|
/man/wget.Rd
|
permissive
|
UKDRI/echolocatoR
|
R
| false
| true
| 472
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/downloaders.R
\name{wget}
\alias{wget}
\title{wget}
\usage{
wget(
input_url,
output_path,
background = T,
force_overwrite = F,
quiet = F,
show_progress = T,
continue = T,
check_certificates = F,
conda_env = "echoR"
)
}
\description{
R wrapper for wget
}
\seealso{
Other downloaders:
\code{\link{axel}()},
\code{\link{downloader}()}
}
\concept{downloaders}
\keyword{internal}
|
## EXData - Project 1 - Plot 4
powerdata <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFactors = FALSE)
power <- subset(powerdata, Date == "1/2/2007" | Date == "2/2/2007")
power$ts <- paste(power$Date, power$Time)
power$ts <- strptime(power$ts, format = "%d/%m/%Y %H:%M:%S")
power$Date <- as.Date(power$Date,format='%d/%m/%Y')
## head(power)
## Plot 4
par(mfrow = c(2,2), mar = c(4,5,2,1), oma = c(0,0,2,0))
with(power, {
plot(power$ts, power$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(power$ts, power$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
with(power, {
plot(power$ts, power$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(power$ts, power$Sub_metering_2, col = "red")
lines(power$ts, power$Sub_metering_3, col = "blue")
legend("topright", y.intersp=2, xjust=1.5, lty=1, cex=.8, c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n", col=c('black','red','blue'))
})
plot(power$ts, power$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
dev.copy(png, file = "plot4.png")
dev.off()
|
/plot4.R
|
no_license
|
equan/ExData_Plotting1
|
R
| false
| false
| 1,220
|
r
|
## EXData - Project 1 - Plot 4
powerdata <- read.table("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFactors = FALSE)
power <- subset(powerdata, Date == "1/2/2007" | Date == "2/2/2007")
power$ts <- paste(power$Date, power$Time)
power$ts <- strptime(power$ts, format = "%d/%m/%Y %H:%M:%S")
power$Date <- as.Date(power$Date,format='%d/%m/%Y')
## head(power)
## Plot 4
par(mfrow = c(2,2), mar = c(4,5,2,1), oma = c(0,0,2,0))
with(power, {
plot(power$ts, power$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
plot(power$ts, power$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
with(power, {
plot(power$ts, power$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(power$ts, power$Sub_metering_2, col = "red")
lines(power$ts, power$Sub_metering_3, col = "blue")
legend("topright", y.intersp=2, xjust=1.5, lty=1, cex=.8, c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), bty="n", col=c('black','red','blue'))
})
plot(power$ts, power$Global_reactive_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
})
dev.copy(png, file = "plot4.png")
dev.off()
|
#' @title Topic Modelling in Semantic Embedding Spaces
#' @description ETM is a generative topic model combining traditional topic models (LDA) with word embeddings (word2vec). \cr
#' \itemize{
#' \item{It models each word with a categorical distribution whose natural parameter is the inner product between
#' a word embedding and an embedding of its assigned topic.}
#' \item{The model is fitted using an amortized variational inference algorithm on top of libtorch.}
#' }
#' @param k the number of topics to extract
#' @param embeddings either a matrix with pretrained word embeddings or an integer with the dimension of the word embeddings. Defaults to 50 if not provided.
#' @param dim dimension of the variational inference hyperparameter theta (passed on to \code{\link[torch]{nn_linear}}). Defaults to 800.
#' @param activation character string with the activation function of theta. Either one of 'relu', 'tanh', 'softplus', 'rrelu', 'leakyrelu', 'elu', 'selu', 'glu'. Defaults to 'relu'.
#' @param dropout dropout percentage on the variational distribution for theta (passed on to \code{\link[torch]{nn_dropout}}). Defaults to 0.5.
#' @param vocab a character vector with the words from the vocabulary. Defaults to the rownames of the \code{embeddings} argument.
#' @references \url{https://arxiv.org/pdf/1907.04907.pdf}
#' @return an object of class ETM which is a torch \code{nn_module} containing o.a.
#' \itemize{
#' \item{num_topics: }{the number of topics}
#' \item{vocab: }{character vector with the terminology used in the model}
#' \item{vocab_size: }{the number of words in \code{vocab}}
#' \item{rho: }{The word embeddings}
#' \item{alphas: }{The topic embeddings}
#' }
#' @section Methods:
#' \describe{
#' \item{\code{fit(data, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10)}}{Fit the model on a document term matrix by splitting the data in 70/30 training/test set and updating the model weights.}
#' }
#' @section Arguments:
#' \describe{
#' \item{data}{bag of words document term matrix in \code{dgCMatrix} format}
#' \item{optimizer}{object of class \code{torch_Optimizer}}
#' \item{epoch}{integer with the number of iterations to train}
#' \item{batch_size}{integer with the size of the batch}
#' \item{normalize}{logical indicating to normalize the bag of words data}
#' \item{clip}{number between 0 and 1 indicating to do gradient clipping - passed on to \code{\link[torch]{nn_utils_clip_grad_norm_}}}
#' \item{lr_anneal_factor}{divide the learning rate by this factor when the loss on the test set is monotonic for at least \code{lr_anneal_nonmono} training iterations}
#' \item{lr_anneal_nonmono}{number of iterations after which learning rate annealing is executed if the loss does not decreases}
#' }
#' @export
#' @examples
#' library(torch)
#' library(topicmodels.etm)
#' library(word2vec)
#' library(udpipe)
#' data(brussels_reviews_anno, package = "udpipe")
#' ##
#' ## Toy example with pretrained embeddings
#' ##
#'
#' ## a. build word2vec model
#' x <- subset(brussels_reviews_anno, language %in% "nl")
#' x <- paste.data.frame(x, term = "lemma", group = "doc_id")
#' set.seed(4321)
#' w2v <- word2vec(x = x$lemma, dim = 15, iter = 20, type = "cbow", min_count = 5)
#' embeddings <- as.matrix(w2v)
#'
#' ## b. build document term matrix on nouns + adjectives, align with the embedding terms
#' dtm <- subset(brussels_reviews_anno, language %in% "nl" & upos %in% c("NOUN", "ADJ"))
#' dtm <- document_term_frequencies(dtm, document = "doc_id", term = "lemma")
#' dtm <- document_term_matrix(dtm)
#' dtm <- dtm_conform(dtm, columns = rownames(embeddings))
#' dtm <- dtm[dtm_rowsums(dtm) > 0, ]
#'
#' ## create and fit an embedding topic model - 8 topics, theta 100-dimensional
#' if (torch::torch_is_installed()) {
#'
#' set.seed(4321)
#' torch_manual_seed(4321)
#' model <- ETM(k = 8, dim = 100, embeddings = embeddings, dropout = 0.5)
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#' overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000)
#' scores <- predict(model, dtm, type = "topics")
#'
#' lastbatch <- subset(overview$loss, overview$loss$batch_is_last == TRUE)
#' plot(lastbatch$epoch, lastbatch$loss)
#' plot(overview$loss_test)
#'
#' ## show top words in each topic
#' terminology <- predict(model, type = "terms", top_n = 7)
#' terminology
#'
#' ##
#' ## Toy example without pretrained word embeddings
#' ##
#' set.seed(4321)
#' torch_manual_seed(4321)
#' model <- ETM(k = 8, dim = 100, embeddings = 15, dropout = 0.5, vocab = colnames(dtm))
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#' overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000)
#' terminology <- predict(model, type = "terms", top_n = 7)
#' terminology
#'
#'
#'
#' \dontshow{
#' ##
#' ## Another example using fit_original
#' ##
#' data(ng20, package = "topicmodels.etm")
#' vocab <- ng20$vocab
#' tokens <- ng20$bow_tr$tokens
#' counts <- ng20$bow_tr$counts
#'
#' torch_manual_seed(123456789)
#' model <- ETM(k = 4, vocab = vocab, dim = 5, embeddings = 25)
#' model
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#'
#' traindata <- list(tokens = tokens, counts = counts, vocab = vocab)
#' test1 <- list(tokens = ng20$bow_ts_h1$tokens, counts = ng20$bow_ts_h1$counts, vocab = vocab)
#' test2 <- list(tokens = ng20$bow_ts_h2$tokens, counts = ng20$bow_ts_h2$counts, vocab = vocab)
#'
#' out <- model$fit_original(data = traindata, test1 = test1, test2 = test2, epoch = 4,
#' optimizer = optimizer, batch_size = 1000,
#' lr_anneal_factor = 4, lr_anneal_nonmono = 10)
#' test <- subset(out$loss, out$loss$batch_is_last == TRUE)
#' plot(test$epoch, test$loss)
#'
#' topic.centers <- as.matrix(model, type = "embedding", which = "topics")
#' word.embeddings <- as.matrix(model, type = "embedding", which = "words")
#' topic.terminology <- as.matrix(model, type = "beta")
#'
#' terminology <- predict(model, type = "terms", top_n = 4)
#' terminology
#' }
#'
#' }
ETM <- nn_module(
classname = "ETM",
initialize = function(k = 20,
embeddings,
dim = 800,
activation = c("relu", "tanh", "softplus", "rrelu", "leakyrelu", "elu", "selu", "glu"),
dropout = 0.5,
vocab = rownames(embeddings)) {
if(missing(embeddings)){
rho <- 50
}else{
rho <- embeddings
}
num_topics <- k
t_hidden_size <- dim
activation <- match.arg(activation)
if(is.matrix(rho)){
stopifnot(length(vocab) == nrow(rho))
stopifnot(all(vocab == rownames(rho)))
train_embeddings <- FALSE
rho_size <- ncol(rho)
}else{
if(!is.character(vocab)){
stop("provide in vocab a character vector")
}
train_embeddings <- TRUE
rho_size <- rho
}
enc_drop <- dropout
vocab_size <- length(vocab)
self$loss_fit <- NULL
self$vocab <- vocab
self$num_topics <- num_topics
self$vocab_size <- vocab_size
self$t_hidden_size <- t_hidden_size
self$rho_size <- rho_size
self$enc_drop <- enc_drop
self$t_drop <- nn_dropout(p = enc_drop)
self$activation <- activation
self$theta_act <- get_activation(activation)
## define the word embedding matrix \rho
if(train_embeddings){
self$rho <- nn_linear(rho_size, vocab_size, bias = FALSE)
}else{
#rho = nn.Embedding(num_embeddings, emsize)
#self.rho = embeddings.clone().float().to(device)
self$rho <- nn_embedding(num_embeddings = vocab_size, embedding_dim = rho_size, .weight = torch_tensor(rho))
#self$rho <- torch_tensor(rho)
}
## define the matrix containing the topic embeddings
self$alphas <- nn_linear(rho_size, self$num_topics, bias = FALSE)#nn.Parameter(torch.randn(rho_size, num_topics))
## define variational distribution for \theta_{1:D} via amortizartion
self$q_theta <- nn_sequential(
nn_linear(vocab_size, t_hidden_size),
self$theta_act,
nn_linear(t_hidden_size, t_hidden_size),
self$theta_act
)
self$mu_q_theta <- nn_linear(t_hidden_size, self$num_topics, bias = TRUE)
self$logsigma_q_theta <- nn_linear(t_hidden_size, self$num_topics, bias = TRUE)
},
print = function(...){
cat("Embedding Topic Model", sep = "\n")
cat(sprintf(" - topics: %s", self$num_topics), sep = "\n")
cat(sprintf(" - vocabulary size: %s", self$vocab_size), sep = "\n")
cat(sprintf(" - embedding dimension: %s", self$rho_size), sep = "\n")
cat(sprintf(" - variational distribution dimension: %s", self$t_hidden_size), sep = "\n")
cat(sprintf(" - variational distribution activation function: %s", self$activation), sep = "\n")
},
encode = function(bows){
# """Returns paramters of the variational distribution for \theta.
#
# input: bows
# batch of bag-of-words...tensor of shape bsz x V
# output: mu_theta, log_sigma_theta
# """
q_theta <- self$q_theta(bows)
if(self$enc_drop > 0){
q_theta <- self$t_drop(q_theta)
}
mu_theta <- self$mu_q_theta(q_theta)
logsigma_theta <- self$logsigma_q_theta(q_theta)
kl_theta <- -0.5 * torch_sum(1 + logsigma_theta - mu_theta$pow(2) - logsigma_theta$exp(), dim = -1)$mean()
list(mu_theta = mu_theta, logsigma_theta = logsigma_theta, kl_theta = kl_theta)
},
decode = function(theta, beta){
res <- torch_mm(theta, beta)
preds <- torch_log(res + 1e-6)
preds
},
get_beta = function(){
logit <- try(self$alphas(self$rho$weight)) # torch.mm(self.rho, self.alphas)
if(inherits(logit, "try-error")){
logit <- self$alphas(self$rho)
}
#beta <- nnf_softmax(logit, dim=0)$transpose(1, 0) ## softmax over vocab dimension
beta <- nnf_softmax(logit, dim = 1)$transpose(2, 1) ## softmax over vocab dimension
beta
},
get_theta = function(normalized_bows){
reparameterize = function(self, mu, logvar){
if(self$training){
std <- torch_exp(0.5 * logvar)
eps <- torch_randn_like(std)
eps$mul_(std)$add_(mu)
}else{
mu
}
}
msg <- self$encode(normalized_bows)
mu_theta <- msg$mu_theta
logsigma_theta <- msg$logsigma_theta
kld_theta <- msg$kl_theta
z <- reparameterize(self, mu_theta, logsigma_theta)
theta <- nnf_softmax(z, dim=-1)
list(theta = theta, kld_theta = kld_theta)
},
forward = function(bows, normalized_bows, theta = NULL, aggregate = TRUE) {
## get \theta
if(is.null(theta)){
msg <- self$get_theta(normalized_bows)
theta <- msg$theta
kld_theta <- msg$kld_theta
}else{
kld_theta <- NULL
}
## get \beta
beta <- self$get_beta()
## get prediction loss
preds <- self$decode(theta, beta)
recon_loss <- -(preds * bows)$sum(2)
#print(dim(recon_loss))
if(aggregate){
recon_loss <- recon_loss$mean()
}
list(recon_loss = recon_loss, kld_theta = kld_theta)
},
topwords = function(top_n = 10){
self$eval()
out <- list()
with_no_grad({
gammas <- self$get_beta()
for(k in seq_len(self$num_topics)){
gamma <- gammas[k, ]
gamma <- as.numeric(gamma)
gamma <- data.frame(term = self$vocab, beta = gamma, stringsAsFactors = FALSE)
gamma <- gamma[order(gamma$beta, decreasing = TRUE), ]
gamma$rank <- seq_len(nrow(gamma))
out[[k]] <- head(gamma, n = top_n)
}
})
out
},
train_epoch = function(tokencounts, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, permute = TRUE){
self$train()
train_tokens <- tokencounts$tokens
train_counts <- tokencounts$counts
vocab_size <- length(tokencounts$vocab)
num_docs_train <- length(train_tokens)
acc_loss <- 0
acc_kl_theta_loss <- 0
cnt <- 0
if(permute){
indices <- torch_randperm(num_docs_train) + 1
}else{
## For comparing end-to-end run and unit testing
indices <- torch_tensor(seq_len(num_docs_train))
}
indices <- torch_split(indices, batch_size)
losses <- list()
for(i in seq_along(indices)){
ind <- indices[[i]]
optimizer$zero_grad()
self$zero_grad()
data_batch <- get_batch(train_tokens, train_counts, ind, vocab_size)
sums <- data_batch$sum(2)$unsqueeze(2)
if(normalize){
normalized_data_batch <- data_batch / sums
}else{
normalized_data_batch <- data_batch
}
#as.matrix(self$q_theta(data_batch[1:10, , drop = FALSE]))
out <- self$forward(data_batch, normalized_data_batch)
total_loss <- out$recon_loss + out$kld_theta
total_loss$backward()
if(clip > 0){
nn_utils_clip_grad_norm_(self$parameters, max_norm = clip)
}
optimizer$step()
acc_loss <- acc_loss + torch_sum(out$recon_loss)$item()
acc_kl_theta_loss <- acc_kl_theta_loss + torch_sum(out$kld_theta)$item()
cnt <- cnt + 1
cur_loss <- round(acc_loss / cnt, 2)
cur_kl_theta <- round(acc_kl_theta_loss / cnt, 2)
cur_real_loss <- round(cur_loss + cur_kl_theta, 2)
losses[[i]] <- data.frame(epoch = epoch,
batch = i,
batch_is_last = i == length(indices),
lr = optimizer$param_groups[[1]][['lr']],
loss = cur_loss,
kl_theta = cur_kl_theta,
nelbo = cur_real_loss,
batch_loss = acc_loss,
batch_kl_theta = acc_kl_theta_loss,
batch_nelbo = acc_loss + acc_kl_theta_loss)
#cat(
# sprintf('Epoch: %s .. batch: %s/%s .. LR: %s .. KL_theta: %s .. Rec_loss: %s .. NELBO: %s',
# epoch, i, length(indices), optimizer$param_groups[[1]][['lr']], cur_kl_theta, cur_loss, cur_real_loss), sep = "\n")
}
losses <- do.call(rbind, losses)
losses
},
evaluate = function(data1, data2, batch_size, normalize = TRUE){
self$eval()
vocab_size <- length(data1$vocab)
tokens1 <- data1$tokens
counts1 <- data1$counts
tokens2 <- data2$tokens
counts2 <- data2$counts
indices <- torch_split(torch_tensor(seq_along(tokens1)), batch_size)
ppl_dc <- 0
with_no_grad({
beta <- self$get_beta()
acc_loss <- 0
cnt <- 0
for(i in seq_along(indices)){
## get theta from first half of docs
ind <- indices[[i]]
data_batch_1 <- get_batch(tokens1, counts1, ind, vocab_size)
sums <- data_batch_1$sum(2)$unsqueeze(2)
if(normalize){
normalized_data_batch <- data_batch_1 / sums
}else{
normalized_data_batch <- data_batch_1
}
msg <- self$get_theta(normalized_data_batch)
theta <- msg$theta
## get prediction loss using second half
data_batch_2 <- get_batch(tokens2, counts2, ind, vocab_size)
sums <- data_batch_2$sum(2)$unsqueeze(2)
res <- torch_mm(theta, beta)
preds <- torch_log(res)
recon_loss <- -(preds * data_batch_2)$sum(2)
loss <- recon_loss / sums$squeeze()
loss <- loss$mean()$item()
acc_loss <- acc_loss + loss
cnt <- cnt + 1
}
cur_loss <- acc_loss / cnt
cur_loss <- as.numeric(cur_loss)
ppl_dc <- round(exp(cur_loss), digits = 1)
})
ppl_dc
},
fit = function(data, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10){
stopifnot(inherits(data, "sparseMatrix"))
data <- data[Matrix::rowSums(data) > 0, ]
idx <- split_train_test(data, train_pct = 0.7)
test1 <- as_tokencounts(data[idx$test1, ])
test2 <- as_tokencounts(data[idx$test2, ])
data <- as_tokencounts(data[idx$train, ])
loss_evolution <- self$fit_original(data = data, test1 = test1, test2 = test2, optimizer = optimizer, epoch = epoch,
batch_size = batch_size, normalize = normalize, clip = clip,
lr_anneal_factor = lr_anneal_factor, lr_anneal_nonmono = lr_anneal_nonmono)
self$loss_fit <- loss_evolution
invisible(loss_evolution)
},
fit_original = function(data, test1, test2, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10, permute = TRUE){
epochs <- epoch
anneal_lr <- lr_anneal_factor > 0
best_epoch <- 0
best_val_ppl <- 1e9
all_val_ppls <- c()
losses <- list()
for(epoch in seq_len(epochs)){
lossevolution <- self$train_epoch(tokencounts = data, optimizer = optimizer, epoch = epoch, batch_size = batch_size, normalize = normalize, clip = clip, permute = permute)
losses[[epoch]] <- lossevolution
val_ppl <- self$evaluate(test1, test2, batch_size = batch_size, normalize = normalize)
if(val_ppl < best_val_ppl){
best_epoch <- epoch
best_val_ppl <- val_ppl
## TODO save model
}else{
## check whether to anneal lr
lr <- optimizer$param_groups[[1]]$lr
cat(sprintf("%s versus %s", val_ppl, min(tail(all_val_ppls, n = lr_anneal_nonmono))), sep = "\n")
if(anneal_lr & lr > 1e-5 & (length(all_val_ppls) > lr_anneal_nonmono) & val_ppl > min(tail(all_val_ppls, n = lr_anneal_nonmono))){
optimizer$param_groups[[1]]$lr <- optimizer$param_groups[[1]]$lr / lr_anneal_factor
}
}
all_val_ppls <- append(all_val_ppls, val_ppl)
lossevolution <- subset(lossevolution, batch_is_last == TRUE)
cat(
sprintf('Epoch: %03d/%03d, learning rate: %5f. Training data stats - KL_theta: %2f, Rec_loss: %2f, NELBO: %s. Test data stats - Loss %2f',
lossevolution$epoch, epochs, optimizer$param_groups[[1]][['lr']], lossevolution$kl_theta, lossevolution$loss, lossevolution$nelbo,
val_ppl), sep = "\n")
}
losses <- do.call(rbind, losses)
list(loss = losses, loss_test = all_val_ppls)
}
)
get_batch <- function(tokens, counts, ind, vocab_size){
ind <- as.integer(ind)
batch_size <- length(ind)
data_batch <- torch_zeros(c(batch_size, vocab_size))
tokens <- tokens[ind]
counts <- counts[ind]
for(i in seq_along(tokens)){
tok <- tokens[[i]]
cnt <- counts[[i]]
data_batch[i, tok] <- as.numeric(cnt)
#for(j in tok){
# data_batch[i, j] <- cnt[j]
#}
}
data_batch
}
get_activation = function(act) {
switch(act,
tanh = nn_tanh(),
relu = nn_relu(),
softplus = nn_softplus(),
rrelu = nn_rrelu(),
leakyrelu = nn_leaky_relu(),
elu = nn_elu(),
selu = nn_selu(),
glu = nn_glu())
}
split_train_test <- function(x, train_pct = 0.7){
stopifnot(train_pct <= 1)
test_pct <- 1 - train_pct
idx <- seq_len(nrow(x))
tst <- sample(idx, size = nrow(x) * test_pct, replace = FALSE)
tst1 <- sample(tst, size = round(length(tst) / 2), replace = FALSE)
tst2 <- setdiff(tst, tst1)
trn <- setdiff(idx, tst)
list(train = sort(trn), test1 = sort(tst1), test2 = sort(tst2))
}
#' @title Predict to which ETM topic a text belongs
#' @description Predict functionality for an \code{ETM} object
#' @param object an object of class \code{ETM}
#' @param type either 'topics' or 'terms'
#' @param newdata bag of words document term matrix in \code{dgCMatrix} format
#' @param batch_size integer with the size of the batch
#' @param normalize logical indicating to normalize the bag of words data
#' @param top_n integer with number of most relevant words for each topic to extract
#' @param ... not used
#' @export
predict.ETM <- function(object, newdata, type = c("topics", "terms"), batch_size = nrow(newdata), normalize = TRUE, top_n = 10, ...){
type <- match.arg(type)
if(type == "terms"){
object$topwords(top_n)
}else{
if(any(Matrix::rowSums(newdata) <= 0)){
stop("All rows of newdata should have at least 1 count")
}
x <- as_tokencounts(newdata)
tokens <- x$tokens
counts <- x$counts
num_topics <- object$num_topics
vocab_size <- object$vocab_size
preds <- list()
with_no_grad({
indices = torch_tensor(seq_along(tokens))
indices = torch_split(indices, batch_size)
thetaWeightedAvg = torch_zeros(1, num_topics)
cnt = 0
for(i in seq_along(indices)){
## get theta from first half of docs
ind <- indices[[i]]
data_batch = get_batch(tokens, counts, ind, vocab_size)
sums <- data_batch$sum(2)$unsqueeze(2)
cnt = cnt + as.numeric(sums$sum(1)$squeeze())
if(normalize){
normalized_data_batch <- data_batch / sums
}else{
normalized_data_batch <- data_batch
}
theta <- object$get_theta(normalized_data_batch)$theta
preds[[i]] <- as.matrix(theta)
weighed_theta = sums * theta
thetaWeightedAvg = thetaWeightedAvg + weighed_theta$sum(1)$unsqueeze(1)
}
thetaWeightedAvg = thetaWeightedAvg$squeeze() / cnt
})
preds <- do.call(rbind, preds)
rownames(preds) <- rownames(newdata)
preds
}
}
#' @title Get matrices out of an ETM object
#' @description Convenience functions to extract embeddings of the cluster centers, the word embeddings
#' and the word emittance by each topic called beta which is technically the softmax-transformed inner product of word embedding and topic embeddings
#' @param x an object of class \code{ETM}
#' @param type character string with the type of information to extract: either 'beta', 'embedding'. Defaults to 'embedding'.
#' @param which if type is set to 'embedding', which embedding, either 'words' or 'topics'. Defaults to 'topics'.
#' @param ... not used
#' @export
as.matrix.ETM <- function(x, type = c("embedding", "beta"), which = c("topics", "words"), ...){
type <- match.arg(type)
which <- match.arg(which)
self <- x
self$eval()
if(type == "embedding"){
if(which == "topics"){
with_no_grad({
out <- as.matrix(self$parameters$alphas.weight)
})
}else if(which == "words"){
with_no_grad({
out <- as.matrix(self$parameters$rho.weight)
rownames(out) <- self$vocab
})
}
}else if(type == "beta"){
with_no_grad({
gammas <- self$get_beta()
gammas <- as.matrix(gammas)
colnames(gammas) <- self$vocab
})
out <- t(gammas)
}
out
}
#' @title Plot functionality for an ETM object
#' @description Convenience function allowing to plot the evolution of the loss
#' @param x an object of class \code{ETM}
#' @param type character string with the type of plot, either 'loss' or 'topics'
#' @param ... not used
#' @export
plot.ETM <- function(x, type = c("loss", "topics"), ...){
type <- match.arg(type)
if(type == "loss"){
loss_evolution <- x$loss_fit
if(is.null(loss_evolution)){
stop("You haven't trained the model yet")
}
oldpar <- par(no.readonly = TRUE)
on.exit({
par(oldpar)
})
combined <- loss_evolution$loss[loss_evolution$loss$batch_is_last == TRUE, ]
combined$loss_test <- loss_evolution$loss_test
par(mfrow = c(1, 2))
plot(combined$epoch, combined$loss, xlab = "Epoch", ylab = "loss", main = "Avg batch loss evolution\non 70% training set", col = "steelblue", type = "b", pch = 20, lty = 2)
plot(combined$epoch, combined$loss_test, xlab = "Epoch", ylab = "exp(loss)", main = "Avg batch loss evolution\non 30% test set", col = "purple", type = "b", pch = 20, lty = 2)
}else{
.NotYetImplemented()
}
}
#' @title Project ETM embeddings using UMAP
#' @description Uses the uwot package to map the word embeddings and the center of the topic embeddings to a 2-dimensional space
#' @param object object of class \code{ETM}
#' @param type character string with the type of summary. Defaults to 'umap'.
#' @param n_components the dimension of the space to embed into. Passed on to \code{\link[uwot]{umap}}
#' @param top_n passed on to \code{\link{predict.ETM}} to get the top_n most relevant words for each topic in the 2-dimensional space
#' @param ... further arguments passed onto \code{\link[uwot]{umap}}
#' @seealso \code{\link[uwot]{umap}}
#' @export
summary.ETM <- function(object, type = c("umap"), n_components = 2, top_n = 20, ...){
type <- match.arg(type)
if(type == "umap"){
requireNamespace("uwot")
centers <- as.matrix(object, type = "embedding", which = "topics")
embeddings <- as.matrix(object, type = "embedding", which = "words")
manifold <- uwot::umap(embeddings, n_components = n_components, ret_model = TRUE, ...)
centers <- uwot::umap_transform(X = centers, model = manifold)
words <- manifold$embedding
rownames(words) <- rownames(embeddings)
rownames(centers) <- rownames(centers)
terminology <- predict(object, type = "terms", top_n = top_n)
terminology <- mapply(seq_along(terminology), terminology, FUN = function(topicnr, terminology){
terminology$cluster <- rep(topicnr, nrow(terminology))
terminology
}, SIMPLIFY = FALSE)
terminology <- do.call(rbind, terminology)
space.2d.words <- merge(x = terminology, y = data.frame(x = words[, 1], y = words[, 2], term = rownames(words), stringsAsFactors = FALSE), by = "term")
space.2d.centers <- data.frame(x = centers[, 1], y = centers[, 2], term = paste("Cluster-", seq_len(nrow(centers)), sep = ""), cluster = seq_len(nrow(centers)), stringsAsFactors = FALSE)
space.2d.words$type <- rep("words", nrow(space.2d.words))
space.2d.words <- space.2d.words[order(space.2d.words$cluster, space.2d.words$rank, decreasing = FALSE), ]
space.2d.centers$type <- rep("centers", nrow(space.2d.centers))
space.2d.centers$rank <- rep(0L, nrow(space.2d.centers))
space.2d.centers$beta <- rep(NA_real_, nrow(space.2d.centers))
fields <- c("type", "term", "cluster", "rank", "beta", "x", "y")
df <- rbind(space.2d.words[, fields], space.2d.centers[, fields])
df <- split(df, df$cluster)
df <- lapply(df, FUN = function(x){
x$weight <- ifelse(is.na(x$beta), 0.8, x$beta / max(x$beta, na.rm = TRUE))
x
})
df <- do.call(rbind, df)
rownames(df) <- NULL
list(center = centers, words = words, embed_2d = df)
}else{
.NotYetImplemented()
}
}
|
/R/ETM.R
|
permissive
|
mstei4176/ETM
|
R
| false
| false
| 27,311
|
r
|
#' @title Topic Modelling in Semantic Embedding Spaces
#' @description ETM is a generative topic model combining traditional topic models (LDA) with word embeddings (word2vec). \cr
#' \itemize{
#' \item{It models each word with a categorical distribution whose natural parameter is the inner product between
#' a word embedding and an embedding of its assigned topic.}
#' \item{The model is fitted using an amortized variational inference algorithm on top of libtorch.}
#' }
#' @param k the number of topics to extract
#' @param embeddings either a matrix with pretrained word embeddings or an integer with the dimension of the word embeddings. Defaults to 50 if not provided.
#' @param dim dimension of the variational inference hyperparameter theta (passed on to \code{\link[torch]{nn_linear}}). Defaults to 800.
#' @param activation character string with the activation function of theta. Either one of 'relu', 'tanh', 'softplus', 'rrelu', 'leakyrelu', 'elu', 'selu', 'glu'. Defaults to 'relu'.
#' @param dropout dropout percentage on the variational distribution for theta (passed on to \code{\link[torch]{nn_dropout}}). Defaults to 0.5.
#' @param vocab a character vector with the words from the vocabulary. Defaults to the rownames of the \code{embeddings} argument.
#' @references \url{https://arxiv.org/pdf/1907.04907.pdf}
#' @return an object of class ETM which is a torch \code{nn_module} containing o.a.
#' \itemize{
#' \item{num_topics: }{the number of topics}
#' \item{vocab: }{character vector with the terminology used in the model}
#' \item{vocab_size: }{the number of words in \code{vocab}}
#' \item{rho: }{The word embeddings}
#' \item{alphas: }{The topic embeddings}
#' }
#' @section Methods:
#' \describe{
#' \item{\code{fit(data, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10)}}{Fit the model on a document term matrix by splitting the data in 70/30 training/test set and updating the model weights.}
#' }
#' @section Arguments:
#' \describe{
#' \item{data}{bag of words document term matrix in \code{dgCMatrix} format}
#' \item{optimizer}{object of class \code{torch_Optimizer}}
#' \item{epoch}{integer with the number of iterations to train}
#' \item{batch_size}{integer with the size of the batch}
#' \item{normalize}{logical indicating to normalize the bag of words data}
#' \item{clip}{number between 0 and 1 indicating to do gradient clipping - passed on to \code{\link[torch]{nn_utils_clip_grad_norm_}}}
#' \item{lr_anneal_factor}{divide the learning rate by this factor when the loss on the test set is monotonic for at least \code{lr_anneal_nonmono} training iterations}
#' \item{lr_anneal_nonmono}{number of iterations after which learning rate annealing is executed if the loss does not decreases}
#' }
#' @export
#' @examples
#' library(torch)
#' library(topicmodels.etm)
#' library(word2vec)
#' library(udpipe)
#' data(brussels_reviews_anno, package = "udpipe")
#' ##
#' ## Toy example with pretrained embeddings
#' ##
#'
#' ## a. build word2vec model
#' x <- subset(brussels_reviews_anno, language %in% "nl")
#' x <- paste.data.frame(x, term = "lemma", group = "doc_id")
#' set.seed(4321)
#' w2v <- word2vec(x = x$lemma, dim = 15, iter = 20, type = "cbow", min_count = 5)
#' embeddings <- as.matrix(w2v)
#'
#' ## b. build document term matrix on nouns + adjectives, align with the embedding terms
#' dtm <- subset(brussels_reviews_anno, language %in% "nl" & upos %in% c("NOUN", "ADJ"))
#' dtm <- document_term_frequencies(dtm, document = "doc_id", term = "lemma")
#' dtm <- document_term_matrix(dtm)
#' dtm <- dtm_conform(dtm, columns = rownames(embeddings))
#' dtm <- dtm[dtm_rowsums(dtm) > 0, ]
#'
#' ## create and fit an embedding topic model - 8 topics, theta 100-dimensional
#' if (torch::torch_is_installed()) {
#'
#' set.seed(4321)
#' torch_manual_seed(4321)
#' model <- ETM(k = 8, dim = 100, embeddings = embeddings, dropout = 0.5)
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#' overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000)
#' scores <- predict(model, dtm, type = "topics")
#'
#' lastbatch <- subset(overview$loss, overview$loss$batch_is_last == TRUE)
#' plot(lastbatch$epoch, lastbatch$loss)
#' plot(overview$loss_test)
#'
#' ## show top words in each topic
#' terminology <- predict(model, type = "terms", top_n = 7)
#' terminology
#'
#' ##
#' ## Toy example without pretrained word embeddings
#' ##
#' set.seed(4321)
#' torch_manual_seed(4321)
#' model <- ETM(k = 8, dim = 100, embeddings = 15, dropout = 0.5, vocab = colnames(dtm))
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#' overview <- model$fit(data = dtm, optimizer = optimizer, epoch = 40, batch_size = 1000)
#' terminology <- predict(model, type = "terms", top_n = 7)
#' terminology
#'
#'
#'
#' \dontshow{
#' ##
#' ## Another example using fit_original
#' ##
#' data(ng20, package = "topicmodels.etm")
#' vocab <- ng20$vocab
#' tokens <- ng20$bow_tr$tokens
#' counts <- ng20$bow_tr$counts
#'
#' torch_manual_seed(123456789)
#' model <- ETM(k = 4, vocab = vocab, dim = 5, embeddings = 25)
#' model
#' optimizer <- optim_adam(params = model$parameters, lr = 0.005, weight_decay = 0.0000012)
#'
#' traindata <- list(tokens = tokens, counts = counts, vocab = vocab)
#' test1 <- list(tokens = ng20$bow_ts_h1$tokens, counts = ng20$bow_ts_h1$counts, vocab = vocab)
#' test2 <- list(tokens = ng20$bow_ts_h2$tokens, counts = ng20$bow_ts_h2$counts, vocab = vocab)
#'
#' out <- model$fit_original(data = traindata, test1 = test1, test2 = test2, epoch = 4,
#' optimizer = optimizer, batch_size = 1000,
#' lr_anneal_factor = 4, lr_anneal_nonmono = 10)
#' test <- subset(out$loss, out$loss$batch_is_last == TRUE)
#' plot(test$epoch, test$loss)
#'
#' topic.centers <- as.matrix(model, type = "embedding", which = "topics")
#' word.embeddings <- as.matrix(model, type = "embedding", which = "words")
#' topic.terminology <- as.matrix(model, type = "beta")
#'
#' terminology <- predict(model, type = "terms", top_n = 4)
#' terminology
#' }
#'
#' }
ETM <- nn_module(
classname = "ETM",
initialize = function(k = 20,
embeddings,
dim = 800,
activation = c("relu", "tanh", "softplus", "rrelu", "leakyrelu", "elu", "selu", "glu"),
dropout = 0.5,
vocab = rownames(embeddings)) {
if(missing(embeddings)){
rho <- 50
}else{
rho <- embeddings
}
num_topics <- k
t_hidden_size <- dim
activation <- match.arg(activation)
if(is.matrix(rho)){
stopifnot(length(vocab) == nrow(rho))
stopifnot(all(vocab == rownames(rho)))
train_embeddings <- FALSE
rho_size <- ncol(rho)
}else{
if(!is.character(vocab)){
stop("provide in vocab a character vector")
}
train_embeddings <- TRUE
rho_size <- rho
}
enc_drop <- dropout
vocab_size <- length(vocab)
self$loss_fit <- NULL
self$vocab <- vocab
self$num_topics <- num_topics
self$vocab_size <- vocab_size
self$t_hidden_size <- t_hidden_size
self$rho_size <- rho_size
self$enc_drop <- enc_drop
self$t_drop <- nn_dropout(p = enc_drop)
self$activation <- activation
self$theta_act <- get_activation(activation)
## define the word embedding matrix \rho
if(train_embeddings){
self$rho <- nn_linear(rho_size, vocab_size, bias = FALSE)
}else{
#rho = nn.Embedding(num_embeddings, emsize)
#self.rho = embeddings.clone().float().to(device)
self$rho <- nn_embedding(num_embeddings = vocab_size, embedding_dim = rho_size, .weight = torch_tensor(rho))
#self$rho <- torch_tensor(rho)
}
## define the matrix containing the topic embeddings
self$alphas <- nn_linear(rho_size, self$num_topics, bias = FALSE)#nn.Parameter(torch.randn(rho_size, num_topics))
## define variational distribution for \theta_{1:D} via amortizartion
self$q_theta <- nn_sequential(
nn_linear(vocab_size, t_hidden_size),
self$theta_act,
nn_linear(t_hidden_size, t_hidden_size),
self$theta_act
)
self$mu_q_theta <- nn_linear(t_hidden_size, self$num_topics, bias = TRUE)
self$logsigma_q_theta <- nn_linear(t_hidden_size, self$num_topics, bias = TRUE)
},
print = function(...){
cat("Embedding Topic Model", sep = "\n")
cat(sprintf(" - topics: %s", self$num_topics), sep = "\n")
cat(sprintf(" - vocabulary size: %s", self$vocab_size), sep = "\n")
cat(sprintf(" - embedding dimension: %s", self$rho_size), sep = "\n")
cat(sprintf(" - variational distribution dimension: %s", self$t_hidden_size), sep = "\n")
cat(sprintf(" - variational distribution activation function: %s", self$activation), sep = "\n")
},
encode = function(bows){
# """Returns paramters of the variational distribution for \theta.
#
# input: bows
# batch of bag-of-words...tensor of shape bsz x V
# output: mu_theta, log_sigma_theta
# """
q_theta <- self$q_theta(bows)
if(self$enc_drop > 0){
q_theta <- self$t_drop(q_theta)
}
mu_theta <- self$mu_q_theta(q_theta)
logsigma_theta <- self$logsigma_q_theta(q_theta)
kl_theta <- -0.5 * torch_sum(1 + logsigma_theta - mu_theta$pow(2) - logsigma_theta$exp(), dim = -1)$mean()
list(mu_theta = mu_theta, logsigma_theta = logsigma_theta, kl_theta = kl_theta)
},
decode = function(theta, beta){
res <- torch_mm(theta, beta)
preds <- torch_log(res + 1e-6)
preds
},
get_beta = function(){
logit <- try(self$alphas(self$rho$weight)) # torch.mm(self.rho, self.alphas)
if(inherits(logit, "try-error")){
logit <- self$alphas(self$rho)
}
#beta <- nnf_softmax(logit, dim=0)$transpose(1, 0) ## softmax over vocab dimension
beta <- nnf_softmax(logit, dim = 1)$transpose(2, 1) ## softmax over vocab dimension
beta
},
get_theta = function(normalized_bows){
reparameterize = function(self, mu, logvar){
if(self$training){
std <- torch_exp(0.5 * logvar)
eps <- torch_randn_like(std)
eps$mul_(std)$add_(mu)
}else{
mu
}
}
msg <- self$encode(normalized_bows)
mu_theta <- msg$mu_theta
logsigma_theta <- msg$logsigma_theta
kld_theta <- msg$kl_theta
z <- reparameterize(self, mu_theta, logsigma_theta)
theta <- nnf_softmax(z, dim=-1)
list(theta = theta, kld_theta = kld_theta)
},
forward = function(bows, normalized_bows, theta = NULL, aggregate = TRUE) {
## get \theta
if(is.null(theta)){
msg <- self$get_theta(normalized_bows)
theta <- msg$theta
kld_theta <- msg$kld_theta
}else{
kld_theta <- NULL
}
## get \beta
beta <- self$get_beta()
## get prediction loss
preds <- self$decode(theta, beta)
recon_loss <- -(preds * bows)$sum(2)
#print(dim(recon_loss))
if(aggregate){
recon_loss <- recon_loss$mean()
}
list(recon_loss = recon_loss, kld_theta = kld_theta)
},
topwords = function(top_n = 10){
self$eval()
out <- list()
with_no_grad({
gammas <- self$get_beta()
for(k in seq_len(self$num_topics)){
gamma <- gammas[k, ]
gamma <- as.numeric(gamma)
gamma <- data.frame(term = self$vocab, beta = gamma, stringsAsFactors = FALSE)
gamma <- gamma[order(gamma$beta, decreasing = TRUE), ]
gamma$rank <- seq_len(nrow(gamma))
out[[k]] <- head(gamma, n = top_n)
}
})
out
},
train_epoch = function(tokencounts, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, permute = TRUE){
self$train()
train_tokens <- tokencounts$tokens
train_counts <- tokencounts$counts
vocab_size <- length(tokencounts$vocab)
num_docs_train <- length(train_tokens)
acc_loss <- 0
acc_kl_theta_loss <- 0
cnt <- 0
if(permute){
indices <- torch_randperm(num_docs_train) + 1
}else{
## For comparing end-to-end run and unit testing
indices <- torch_tensor(seq_len(num_docs_train))
}
indices <- torch_split(indices, batch_size)
losses <- list()
for(i in seq_along(indices)){
ind <- indices[[i]]
optimizer$zero_grad()
self$zero_grad()
data_batch <- get_batch(train_tokens, train_counts, ind, vocab_size)
sums <- data_batch$sum(2)$unsqueeze(2)
if(normalize){
normalized_data_batch <- data_batch / sums
}else{
normalized_data_batch <- data_batch
}
#as.matrix(self$q_theta(data_batch[1:10, , drop = FALSE]))
out <- self$forward(data_batch, normalized_data_batch)
total_loss <- out$recon_loss + out$kld_theta
total_loss$backward()
if(clip > 0){
nn_utils_clip_grad_norm_(self$parameters, max_norm = clip)
}
optimizer$step()
acc_loss <- acc_loss + torch_sum(out$recon_loss)$item()
acc_kl_theta_loss <- acc_kl_theta_loss + torch_sum(out$kld_theta)$item()
cnt <- cnt + 1
cur_loss <- round(acc_loss / cnt, 2)
cur_kl_theta <- round(acc_kl_theta_loss / cnt, 2)
cur_real_loss <- round(cur_loss + cur_kl_theta, 2)
losses[[i]] <- data.frame(epoch = epoch,
batch = i,
batch_is_last = i == length(indices),
lr = optimizer$param_groups[[1]][['lr']],
loss = cur_loss,
kl_theta = cur_kl_theta,
nelbo = cur_real_loss,
batch_loss = acc_loss,
batch_kl_theta = acc_kl_theta_loss,
batch_nelbo = acc_loss + acc_kl_theta_loss)
#cat(
# sprintf('Epoch: %s .. batch: %s/%s .. LR: %s .. KL_theta: %s .. Rec_loss: %s .. NELBO: %s',
# epoch, i, length(indices), optimizer$param_groups[[1]][['lr']], cur_kl_theta, cur_loss, cur_real_loss), sep = "\n")
}
losses <- do.call(rbind, losses)
losses
},
evaluate = function(data1, data2, batch_size, normalize = TRUE){
self$eval()
vocab_size <- length(data1$vocab)
tokens1 <- data1$tokens
counts1 <- data1$counts
tokens2 <- data2$tokens
counts2 <- data2$counts
indices <- torch_split(torch_tensor(seq_along(tokens1)), batch_size)
ppl_dc <- 0
with_no_grad({
beta <- self$get_beta()
acc_loss <- 0
cnt <- 0
for(i in seq_along(indices)){
## get theta from first half of docs
ind <- indices[[i]]
data_batch_1 <- get_batch(tokens1, counts1, ind, vocab_size)
sums <- data_batch_1$sum(2)$unsqueeze(2)
if(normalize){
normalized_data_batch <- data_batch_1 / sums
}else{
normalized_data_batch <- data_batch_1
}
msg <- self$get_theta(normalized_data_batch)
theta <- msg$theta
## get prediction loss using second half
data_batch_2 <- get_batch(tokens2, counts2, ind, vocab_size)
sums <- data_batch_2$sum(2)$unsqueeze(2)
res <- torch_mm(theta, beta)
preds <- torch_log(res)
recon_loss <- -(preds * data_batch_2)$sum(2)
loss <- recon_loss / sums$squeeze()
loss <- loss$mean()$item()
acc_loss <- acc_loss + loss
cnt <- cnt + 1
}
cur_loss <- acc_loss / cnt
cur_loss <- as.numeric(cur_loss)
ppl_dc <- round(exp(cur_loss), digits = 1)
})
ppl_dc
},
fit = function(data, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10){
stopifnot(inherits(data, "sparseMatrix"))
data <- data[Matrix::rowSums(data) > 0, ]
idx <- split_train_test(data, train_pct = 0.7)
test1 <- as_tokencounts(data[idx$test1, ])
test2 <- as_tokencounts(data[idx$test2, ])
data <- as_tokencounts(data[idx$train, ])
loss_evolution <- self$fit_original(data = data, test1 = test1, test2 = test2, optimizer = optimizer, epoch = epoch,
batch_size = batch_size, normalize = normalize, clip = clip,
lr_anneal_factor = lr_anneal_factor, lr_anneal_nonmono = lr_anneal_nonmono)
self$loss_fit <- loss_evolution
invisible(loss_evolution)
},
fit_original = function(data, test1, test2, optimizer, epoch, batch_size, normalize = TRUE, clip = 0, lr_anneal_factor = 4, lr_anneal_nonmono = 10, permute = TRUE){
epochs <- epoch
anneal_lr <- lr_anneal_factor > 0
best_epoch <- 0
best_val_ppl <- 1e9
all_val_ppls <- c()
losses <- list()
for(epoch in seq_len(epochs)){
lossevolution <- self$train_epoch(tokencounts = data, optimizer = optimizer, epoch = epoch, batch_size = batch_size, normalize = normalize, clip = clip, permute = permute)
losses[[epoch]] <- lossevolution
val_ppl <- self$evaluate(test1, test2, batch_size = batch_size, normalize = normalize)
if(val_ppl < best_val_ppl){
best_epoch <- epoch
best_val_ppl <- val_ppl
## TODO save model
}else{
## check whether to anneal lr
lr <- optimizer$param_groups[[1]]$lr
cat(sprintf("%s versus %s", val_ppl, min(tail(all_val_ppls, n = lr_anneal_nonmono))), sep = "\n")
if(anneal_lr & lr > 1e-5 & (length(all_val_ppls) > lr_anneal_nonmono) & val_ppl > min(tail(all_val_ppls, n = lr_anneal_nonmono))){
optimizer$param_groups[[1]]$lr <- optimizer$param_groups[[1]]$lr / lr_anneal_factor
}
}
all_val_ppls <- append(all_val_ppls, val_ppl)
lossevolution <- subset(lossevolution, batch_is_last == TRUE)
cat(
sprintf('Epoch: %03d/%03d, learning rate: %5f. Training data stats - KL_theta: %2f, Rec_loss: %2f, NELBO: %s. Test data stats - Loss %2f',
lossevolution$epoch, epochs, optimizer$param_groups[[1]][['lr']], lossevolution$kl_theta, lossevolution$loss, lossevolution$nelbo,
val_ppl), sep = "\n")
}
losses <- do.call(rbind, losses)
list(loss = losses, loss_test = all_val_ppls)
}
)
get_batch <- function(tokens, counts, ind, vocab_size){
ind <- as.integer(ind)
batch_size <- length(ind)
data_batch <- torch_zeros(c(batch_size, vocab_size))
tokens <- tokens[ind]
counts <- counts[ind]
for(i in seq_along(tokens)){
tok <- tokens[[i]]
cnt <- counts[[i]]
data_batch[i, tok] <- as.numeric(cnt)
#for(j in tok){
# data_batch[i, j] <- cnt[j]
#}
}
data_batch
}
get_activation = function(act) {
switch(act,
tanh = nn_tanh(),
relu = nn_relu(),
softplus = nn_softplus(),
rrelu = nn_rrelu(),
leakyrelu = nn_leaky_relu(),
elu = nn_elu(),
selu = nn_selu(),
glu = nn_glu())
}
split_train_test <- function(x, train_pct = 0.7){
stopifnot(train_pct <= 1)
test_pct <- 1 - train_pct
idx <- seq_len(nrow(x))
tst <- sample(idx, size = nrow(x) * test_pct, replace = FALSE)
tst1 <- sample(tst, size = round(length(tst) / 2), replace = FALSE)
tst2 <- setdiff(tst, tst1)
trn <- setdiff(idx, tst)
list(train = sort(trn), test1 = sort(tst1), test2 = sort(tst2))
}
#' @title Predict to which ETM topic a text belongs
#' @description Predict functionality for an \code{ETM} object
#' @param object an object of class \code{ETM}
#' @param type either 'topics' or 'terms'
#' @param newdata bag of words document term matrix in \code{dgCMatrix} format
#' @param batch_size integer with the size of the batch
#' @param normalize logical indicating to normalize the bag of words data
#' @param top_n integer with number of most relevant words for each topic to extract
#' @param ... not used
#' @export
predict.ETM <- function(object, newdata, type = c("topics", "terms"), batch_size = nrow(newdata), normalize = TRUE, top_n = 10, ...){
type <- match.arg(type)
if(type == "terms"){
object$topwords(top_n)
}else{
if(any(Matrix::rowSums(newdata) <= 0)){
stop("All rows of newdata should have at least 1 count")
}
x <- as_tokencounts(newdata)
tokens <- x$tokens
counts <- x$counts
num_topics <- object$num_topics
vocab_size <- object$vocab_size
preds <- list()
with_no_grad({
indices = torch_tensor(seq_along(tokens))
indices = torch_split(indices, batch_size)
thetaWeightedAvg = torch_zeros(1, num_topics)
cnt = 0
for(i in seq_along(indices)){
## get theta from first half of docs
ind <- indices[[i]]
data_batch = get_batch(tokens, counts, ind, vocab_size)
sums <- data_batch$sum(2)$unsqueeze(2)
cnt = cnt + as.numeric(sums$sum(1)$squeeze())
if(normalize){
normalized_data_batch <- data_batch / sums
}else{
normalized_data_batch <- data_batch
}
theta <- object$get_theta(normalized_data_batch)$theta
preds[[i]] <- as.matrix(theta)
weighed_theta = sums * theta
thetaWeightedAvg = thetaWeightedAvg + weighed_theta$sum(1)$unsqueeze(1)
}
thetaWeightedAvg = thetaWeightedAvg$squeeze() / cnt
})
preds <- do.call(rbind, preds)
rownames(preds) <- rownames(newdata)
preds
}
}
#' @title Get matrices out of an ETM object
#' @description Convenience functions to extract embeddings of the cluster centers, the word embeddings
#' and the word emittance by each topic called beta which is technically the softmax-transformed inner product of word embedding and topic embeddings
#' @param x an object of class \code{ETM}
#' @param type character string with the type of information to extract: either 'beta', 'embedding'. Defaults to 'embedding'.
#' @param which if type is set to 'embedding', which embedding, either 'words' or 'topics'. Defaults to 'topics'.
#' @param ... not used
#' @export
as.matrix.ETM <- function(x, type = c("embedding", "beta"), which = c("topics", "words"), ...){
type <- match.arg(type)
which <- match.arg(which)
self <- x
self$eval()
if(type == "embedding"){
if(which == "topics"){
with_no_grad({
out <- as.matrix(self$parameters$alphas.weight)
})
}else if(which == "words"){
with_no_grad({
out <- as.matrix(self$parameters$rho.weight)
rownames(out) <- self$vocab
})
}
}else if(type == "beta"){
with_no_grad({
gammas <- self$get_beta()
gammas <- as.matrix(gammas)
colnames(gammas) <- self$vocab
})
out <- t(gammas)
}
out
}
#' @title Plot functionality for an ETM object
#' @description Convenience function allowing to plot the evolution of the loss
#' @param x an object of class \code{ETM}
#' @param type character string with the type of plot, either 'loss' or 'topics'
#' @param ... not used
#' @export
plot.ETM <- function(x, type = c("loss", "topics"), ...){
type <- match.arg(type)
if(type == "loss"){
loss_evolution <- x$loss_fit
if(is.null(loss_evolution)){
stop("You haven't trained the model yet")
}
oldpar <- par(no.readonly = TRUE)
on.exit({
par(oldpar)
})
combined <- loss_evolution$loss[loss_evolution$loss$batch_is_last == TRUE, ]
combined$loss_test <- loss_evolution$loss_test
par(mfrow = c(1, 2))
plot(combined$epoch, combined$loss, xlab = "Epoch", ylab = "loss", main = "Avg batch loss evolution\non 70% training set", col = "steelblue", type = "b", pch = 20, lty = 2)
plot(combined$epoch, combined$loss_test, xlab = "Epoch", ylab = "exp(loss)", main = "Avg batch loss evolution\non 30% test set", col = "purple", type = "b", pch = 20, lty = 2)
}else{
.NotYetImplemented()
}
}
#' @title Project ETM embeddings using UMAP
#' @description Uses the uwot package to map the word embeddings and the center of the topic embeddings to a 2-dimensional space
#' @param object object of class \code{ETM}
#' @param type character string with the type of summary. Defaults to 'umap'.
#' @param n_components the dimension of the space to embed into. Passed on to \code{\link[uwot]{umap}}
#' @param top_n passed on to \code{\link{predict.ETM}} to get the top_n most relevant words for each topic in the 2-dimensional space
#' @param ... further arguments passed onto \code{\link[uwot]{umap}}
#' @seealso \code{\link[uwot]{umap}}
#' @export
summary.ETM <- function(object, type = c("umap"), n_components = 2, top_n = 20, ...){
type <- match.arg(type)
if(type == "umap"){
requireNamespace("uwot")
centers <- as.matrix(object, type = "embedding", which = "topics")
embeddings <- as.matrix(object, type = "embedding", which = "words")
manifold <- uwot::umap(embeddings, n_components = n_components, ret_model = TRUE, ...)
centers <- uwot::umap_transform(X = centers, model = manifold)
words <- manifold$embedding
rownames(words) <- rownames(embeddings)
rownames(centers) <- rownames(centers)
terminology <- predict(object, type = "terms", top_n = top_n)
terminology <- mapply(seq_along(terminology), terminology, FUN = function(topicnr, terminology){
terminology$cluster <- rep(topicnr, nrow(terminology))
terminology
}, SIMPLIFY = FALSE)
terminology <- do.call(rbind, terminology)
space.2d.words <- merge(x = terminology, y = data.frame(x = words[, 1], y = words[, 2], term = rownames(words), stringsAsFactors = FALSE), by = "term")
space.2d.centers <- data.frame(x = centers[, 1], y = centers[, 2], term = paste("Cluster-", seq_len(nrow(centers)), sep = ""), cluster = seq_len(nrow(centers)), stringsAsFactors = FALSE)
space.2d.words$type <- rep("words", nrow(space.2d.words))
space.2d.words <- space.2d.words[order(space.2d.words$cluster, space.2d.words$rank, decreasing = FALSE), ]
space.2d.centers$type <- rep("centers", nrow(space.2d.centers))
space.2d.centers$rank <- rep(0L, nrow(space.2d.centers))
space.2d.centers$beta <- rep(NA_real_, nrow(space.2d.centers))
fields <- c("type", "term", "cluster", "rank", "beta", "x", "y")
df <- rbind(space.2d.words[, fields], space.2d.centers[, fields])
df <- split(df, df$cluster)
df <- lapply(df, FUN = function(x){
x$weight <- ifelse(is.na(x$beta), 0.8, x$beta / max(x$beta, na.rm = TRUE))
x
})
df <- do.call(rbind, df)
rownames(df) <- NULL
list(center = centers, words = words, embed_2d = df)
}else{
.NotYetImplemented()
}
}
|
countries <- read_csv("data/countries.csv")
indicators <- read_csv("data/indicators.csv")
get_wdi_indicator <- function(country, indicator) {
Sys.sleep(3)
WDI(
country = country,
indicator = c("value" = indicator)
) %>%
as_tibble()
}
get_indicator_name <- function(selected_indicator) {
indicators %>%
filter(indicator_code == selected_indicator) %>%
pull(indicator_name)
}
get_country_name <- function(selected_country) {
countries %>%
filter(iso2c == selected_country) %>%
pull(name)
}
|
/09_03/data-processing.R
|
no_license
|
dmancilla85/r-shiny-essential-training
|
R
| false
| false
| 531
|
r
|
countries <- read_csv("data/countries.csv")
indicators <- read_csv("data/indicators.csv")
get_wdi_indicator <- function(country, indicator) {
Sys.sleep(3)
WDI(
country = country,
indicator = c("value" = indicator)
) %>%
as_tibble()
}
get_indicator_name <- function(selected_indicator) {
indicators %>%
filter(indicator_code == selected_indicator) %>%
pull(indicator_name)
}
get_country_name <- function(selected_country) {
countries %>%
filter(iso2c == selected_country) %>%
pull(name)
}
|
#' Get Raindrop Trace
#' @description Uses a raindrop trace web service to trace the
#' nhdplus digital elevation model to the nearest downslop flowline.
#' @param point sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param direction character \code{"up"}, \code{"down"}, or \code{"none"}.
#' Controls the portion of the split flowline that is returned along with
#' the raindrop trace line.
#' @return sf data.frame containing raindrop trace and requested
#' portion of flowline.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-89.2158, 42.9561)), crs = 4326)
#'
#' (trace <- get_raindrop_trace(point))
#'
#' if(inherits(trace, "sf")) {
#' bbox <- sf::st_bbox(trace) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_geometry(trace)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(trace)[2], 3857), add = TRUE, col = "black")
#' }
#' }
#'
get_raindrop_trace <- function(point, direction = "down") {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-flowtrace/execution")
allowed_direction <- c("up", "down", "none")
if(!direction %in% allowed_direction)
stop(paste("direction must be in",
paste(allowed_direction, collapse = ", ")))
return(sf_post(url, make_json_input_trace(point, direction = direction)))
}
#' Get split catchment
#' @description Uses catchment splitting web service to retrieve
#' the portion of a catchment upstream of the point provided.
#' @param point scf POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param upstream logical If TRUE, the entire drainage basin upstream
#' of the point provided is returned in addition to the local catchment.
#' @return sf data.frame containing the local catchment, the split portion
#' and optionally the total dranage basin.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-89.2158, 42.9561)), crs = 4326)
#'
#' trace <- get_raindrop_trace(point)
#'
#' if(inherits(trace, "sf")) {
#'
#' (snap_point <- sf::st_sfc(sf::st_point(trace$intersection_point[[1]]),
#' crs = 4326))
#'
#' (catchment <- get_split_catchment(snap_point))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE, col = "white")
#'
#' (catchment <- get_split_catchment(snap_point, upstream = FALSE))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE, col = "white")
#'
#' pour_point <- sf::st_sfc(sf::st_point(x = c(-89.25619, 42.98646)), crs = 4326)
#'
#' (catchment <- get_split_catchment(pour_point, upstream = FALSE))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_sfc(pour_point, crs = 4326), 3857), add = TRUE, col = "white")
#' }
#'}
#'
get_split_catchment <- function(point, upstream = TRUE) {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-splitcatchment/execution")
return(sf_post(url, make_json_input_split(point, upstream)))
}
#' Get Cross Section From Point (experimental)
#' @description Uses a cross section retrieval web services to retrieve a
#' cross section given a point and specified width. Orientation is determined
#' based on direction of a the flowline found near point. This function uses
#' a 10m National Elevation Dataset request on the back end.
#' @param point sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}crs.
#' @param width Cross section width in meters.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @return sf data.frame containing points retrieved.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-105.97218, 36.17592)), crs = 4326)
#'
#' (xs <- get_xs_point(point, 300, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#' }
#'
get_xs_point <- function(point, width, num_pts) {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatpoint/execution")
get_xs(url, make_json_input_xspt, point, width, num_pts)
}
#' Get Cross Section Endpoints (experimental)
#' @description Uses a cross section retrieval web services to retrieve a
#' cross section between two endpoints.
#' @param point1 sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param point2 sfc POINT including crs.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @param res integer resolution of 3D Elevation Program data to request.
#' Must be on of: 1, 3, 5, 10, 30, 60.
#' @return sf data.frame containing points retrieved.
#' @export
#' @examples
#' \donttest{
#' point1 <- sf::st_sfc(sf::st_point(x = c(-105.9667, 36.17602)), crs = 4326)
#' point2 <- sf::st_sfc(sf::st_point(x = c(-105.97768, 36.17526)), crs = 4326)
#'
#' (xs <- get_xs_points(point1, point2, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point1, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point2, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#' }
#'
get_xs_points <- function(point1, point2, num_pts, res = 1) {
point1 <- check_point(point1)[[1]]
point2 <- check_point(point2)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatendpts/execution")
check_res(res)
get_xs(url, make_json_input_xspts, point1, point2, num_pts, res)
}
check_res <- function(res) {
if(!res %in% c(1, 3, 5, 10, 30, 60)) {
stop("res input must be on of 1, 3, 5, 10, 30, 60")
}
return(invisible(TRUE))
}
#' Get Elevation Along Path (experimental)
#' @description Uses a cross section retrieval web services to retrieve elevation
#' along a path.
#' @param points sf data.frame containing a point column.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @param res integer resolution of 3D Elevation Program data to request.
#' Must be on of: 1, 3, 5, 10, 30, 60.
#' @param status logical
#' @return sf data.frame containing points retrieved. Names include
#' "id", "distance_m", "elevation_m", "spatial_ref", "geometry",
#' and ".group". .group tracks which input point each set of output
#' points belongs to.
#' @export
#' @examples
#' \donttest{
#' point1 <- sf::st_sfc(sf::st_point(x = c(-105.9667, 36.17602)), crs = 4326)
#' point2 <- sf::st_sfc(sf::st_point(x = c(-105.97768, 36.17526)), crs = 4326)
#' point3 <- sf::st_sfc(sf::st_point(x = c(-105.98869, 36.17450)), crs = 4326)
#'
#' points <- sf::st_as_sf(c(point1, point2, point3))
#'
#' (xs <- get_elev_along_path(points, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point1, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point2, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point3, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#'
#' }
#'
get_elev_along_path <- function(points, num_pts, res = 1, status = TRUE) {
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatendpts/execution")
check_res(res)
get_elev(url, make_json_input_xspts, points, num_pts, res, status)
}
get_elev <- function(url, fun, points, num_pts, res, status) {
points <- split(points, seq_len(nrow(points)))
points <- lapply(points, check_point)
points <- lapply(points, "[[", 1)
data_elev <- data.frame()
dist <- vector()
for(i in 1:(length(points)-1)) {
if(status)
message(paste("Requestion segment", i, "of", (length(points)-1)))
data <- get_xs(url, fun, points[[i]], points[[i+1]], num_pts, res)
if(is.null(data)) {
return(NULL)
}
data$.group <- i
if(i == 1){
if(num_pts %% 2 != 0){
dist[[i]] <- data[[num_pts, 'distance_m']]
} else {
dist[[i]] <- data[[num_pts + 1, 'distance_m']]
}
} else {
data[['distance_m']] <- dist[i-1] + data[['distance_m']]
if(num_pts %% 2 != 0){
dist[[i]] <- data[[num_pts, 'distance_m']]
} else {
dist[[i]] <- data[[num_pts + 1, 'distance_m']]
}
}
data_elev <- rbind(data_elev, data)
}
data_elev
}
#' @importFrom dplyr rename
get_xs <- function(url, fun, ...) {
sf <- sf_post(url, fun(...))
if(is.null(sf)) {
return(NULL)
}
rename(sf,
distance_m = "distance",
elevation_m = "elevation")
}
sf_post <- function(url, json) {
tryCatch({
if(nhdplus_debug()) {
message(paste(url, "\n"))
message(json)
}
out <- httr::RETRY("POST", url, httr::accept_json(),
httr::content_type_json(),
body = json)
if(out$status_code == 200) {
sf::read_sf(rawToChar(out$content))
} else {
stop(rawToChar(out$content))
}
}, error = function(e) {
message("Error calling processing service. \n Original error: \n", e)
NULL
})
}
check_point <- function(p) {
mess <- "Point must be of type sfc and have a CRS declared."
if(inherits(p, "sf")) p <- sf::st_geometry(p)
if(!inherits(p, "sfc")) stop(mess)
tryCatch({
sf::st_transform(p, 4326)
}, error = function(e) {
stop(paste(mess, "Original error was: \n", e))
})
}
make_json_input_trace <- function(p, raindrop = TRUE, direction = "down") {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = as.character(p[2])),
list(id = "lon",
type = "text/plain",
value = as.character(p[1])),
list(id = "raindroptrace",
type = "text/plain",
value = ifelse(raindrop,
"true", "false")),
list(id = "direction",
type = "text/plain",
value = direction))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_split <- function(p, upstream = TRUE) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = as.character(p[2])),
list(id = "lon",
type = "text/plain",
value = as.character(p[1])),
list(id = "upstream",
type = "text/plain",
value = ifelse(upstream,
"true", "false")))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_xspt <- function(p, w, n) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = p[2]),
list(id = "lon",
type = "text/plain",
value = p[1]),
list(id = "width",
type = "text/plain",
value = w),
list(id = "numpts",
type = "text/plain",
value = n))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_xspts <- function(p1, p2, n, r) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = c(p1[2], p2[2])),
list(id = "lon",
type = "text/plain",
value = c(p1[1], p2[1])),
list(id = "3dep_res",
type = "text/plain",
value = as.character(r)),
list(id = "numpts",
type = "text/plain",
value = n))),
pretty = TRUE, auto_unbox = TRUE)
}
|
/R/get_oaproc.R
|
permissive
|
mikejohnson51/nhdplusTools
|
R
| false
| false
| 14,710
|
r
|
#' Get Raindrop Trace
#' @description Uses a raindrop trace web service to trace the
#' nhdplus digital elevation model to the nearest downslop flowline.
#' @param point sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param direction character \code{"up"}, \code{"down"}, or \code{"none"}.
#' Controls the portion of the split flowline that is returned along with
#' the raindrop trace line.
#' @return sf data.frame containing raindrop trace and requested
#' portion of flowline.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-89.2158, 42.9561)), crs = 4326)
#'
#' (trace <- get_raindrop_trace(point))
#'
#' if(inherits(trace, "sf")) {
#' bbox <- sf::st_bbox(trace) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_geometry(trace)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(trace)[2], 3857), add = TRUE, col = "black")
#' }
#' }
#'
get_raindrop_trace <- function(point, direction = "down") {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-flowtrace/execution")
allowed_direction <- c("up", "down", "none")
if(!direction %in% allowed_direction)
stop(paste("direction must be in",
paste(allowed_direction, collapse = ", ")))
return(sf_post(url, make_json_input_trace(point, direction = direction)))
}
#' Get split catchment
#' @description Uses catchment splitting web service to retrieve
#' the portion of a catchment upstream of the point provided.
#' @param point scf POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param upstream logical If TRUE, the entire drainage basin upstream
#' of the point provided is returned in addition to the local catchment.
#' @return sf data.frame containing the local catchment, the split portion
#' and optionally the total dranage basin.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-89.2158, 42.9561)), crs = 4326)
#'
#' trace <- get_raindrop_trace(point)
#'
#' if(inherits(trace, "sf")) {
#'
#' (snap_point <- sf::st_sfc(sf::st_point(trace$intersection_point[[1]]),
#' crs = 4326))
#'
#' (catchment <- get_split_catchment(snap_point))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE, col = "white")
#'
#' (catchment <- get_split_catchment(snap_point, upstream = FALSE))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE, col = "white")
#'
#' pour_point <- sf::st_sfc(sf::st_point(x = c(-89.25619, 42.98646)), crs = 4326)
#'
#' (catchment <- get_split_catchment(pour_point, upstream = FALSE))
#'
#' bbox <- sf::st_bbox(catchment) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(catchment)[1], 3857), add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_geometry(catchment)[2], 3857), add = TRUE, col = "black")
#' plot(sf::st_transform(sf::st_sfc(pour_point, crs = 4326), 3857), add = TRUE, col = "white")
#' }
#'}
#'
get_split_catchment <- function(point, upstream = TRUE) {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-splitcatchment/execution")
return(sf_post(url, make_json_input_split(point, upstream)))
}
#' Get Cross Section From Point (experimental)
#' @description Uses a cross section retrieval web services to retrieve a
#' cross section given a point and specified width. Orientation is determined
#' based on direction of a the flowline found near point. This function uses
#' a 10m National Elevation Dataset request on the back end.
#' @param point sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}crs.
#' @param width Cross section width in meters.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @return sf data.frame containing points retrieved.
#' @export
#' @examples
#' \donttest{
#' point <- sf::st_sfc(sf::st_point(x = c(-105.97218, 36.17592)), crs = 4326)
#'
#' (xs <- get_xs_point(point, 300, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#' }
#'
get_xs_point <- function(point, width, num_pts) {
point <- check_point(point)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatpoint/execution")
get_xs(url, make_json_input_xspt, point, width, num_pts)
}
#' Get Cross Section Endpoints (experimental)
#' @description Uses a cross section retrieval web services to retrieve a
#' cross section between two endpoints.
#' @param point1 sfc POINT including crs as created by:
#' \code{sf::st_sfc(sf::st_point(.. ,..), crs)}
#' @param point2 sfc POINT including crs.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @param res integer resolution of 3D Elevation Program data to request.
#' Must be on of: 1, 3, 5, 10, 30, 60.
#' @return sf data.frame containing points retrieved.
#' @export
#' @examples
#' \donttest{
#' point1 <- sf::st_sfc(sf::st_point(x = c(-105.9667, 36.17602)), crs = 4326)
#' point2 <- sf::st_sfc(sf::st_point(x = c(-105.97768, 36.17526)), crs = 4326)
#'
#' (xs <- get_xs_points(point1, point2, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point1, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point2, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#' }
#'
get_xs_points <- function(point1, point2, num_pts, res = 1) {
point1 <- check_point(point1)[[1]]
point2 <- check_point(point2)[[1]]
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatendpts/execution")
check_res(res)
get_xs(url, make_json_input_xspts, point1, point2, num_pts, res)
}
check_res <- function(res) {
if(!res %in% c(1, 3, 5, 10, 30, 60)) {
stop("res input must be on of 1, 3, 5, 10, 30, 60")
}
return(invisible(TRUE))
}
#' Get Elevation Along Path (experimental)
#' @description Uses a cross section retrieval web services to retrieve elevation
#' along a path.
#' @param points sf data.frame containing a point column.
#' @param num_pts numeric number of points to retrieve along the cross section.
#' @param res integer resolution of 3D Elevation Program data to request.
#' Must be on of: 1, 3, 5, 10, 30, 60.
#' @param status logical
#' @return sf data.frame containing points retrieved. Names include
#' "id", "distance_m", "elevation_m", "spatial_ref", "geometry",
#' and ".group". .group tracks which input point each set of output
#' points belongs to.
#' @export
#' @examples
#' \donttest{
#' point1 <- sf::st_sfc(sf::st_point(x = c(-105.9667, 36.17602)), crs = 4326)
#' point2 <- sf::st_sfc(sf::st_point(x = c(-105.97768, 36.17526)), crs = 4326)
#' point3 <- sf::st_sfc(sf::st_point(x = c(-105.98869, 36.17450)), crs = 4326)
#'
#' points <- sf::st_as_sf(c(point1, point2, point3))
#'
#' (xs <- get_elev_along_path(points, 100))
#'
#' if(inherits(xs, "sf")) {
#'
#' bbox <- sf::st_bbox(xs) + c(-0.005, -0.005, 0.005, 0.005)
#'
#' nhdplusTools::plot_nhdplus(bbox = bbox, cache_data = FALSE)
#'
#' plot(sf::st_transform(sf::st_geometry(xs), 3857), pch = ".", add = TRUE, col = "red")
#' plot(sf::st_transform(sf::st_sfc(point1, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point2, crs = 4326), 3857), add = TRUE)
#' plot(sf::st_transform(sf::st_sfc(point3, crs = 4326), 3857), add = TRUE)
#'
#' plot(xs$distance_m, xs$elevation_m)
#' }
#'
#' }
#'
get_elev_along_path <- function(points, num_pts, res = 1, status = TRUE) {
url_base <- paste0(get_nldi_url(), "/pygeoapi/processes/")
url <- paste0(url_base, "nldi-xsatendpts/execution")
check_res(res)
get_elev(url, make_json_input_xspts, points, num_pts, res, status)
}
get_elev <- function(url, fun, points, num_pts, res, status) {
points <- split(points, seq_len(nrow(points)))
points <- lapply(points, check_point)
points <- lapply(points, "[[", 1)
data_elev <- data.frame()
dist <- vector()
for(i in 1:(length(points)-1)) {
if(status)
message(paste("Requestion segment", i, "of", (length(points)-1)))
data <- get_xs(url, fun, points[[i]], points[[i+1]], num_pts, res)
if(is.null(data)) {
return(NULL)
}
data$.group <- i
if(i == 1){
if(num_pts %% 2 != 0){
dist[[i]] <- data[[num_pts, 'distance_m']]
} else {
dist[[i]] <- data[[num_pts + 1, 'distance_m']]
}
} else {
data[['distance_m']] <- dist[i-1] + data[['distance_m']]
if(num_pts %% 2 != 0){
dist[[i]] <- data[[num_pts, 'distance_m']]
} else {
dist[[i]] <- data[[num_pts + 1, 'distance_m']]
}
}
data_elev <- rbind(data_elev, data)
}
data_elev
}
#' @importFrom dplyr rename
get_xs <- function(url, fun, ...) {
sf <- sf_post(url, fun(...))
if(is.null(sf)) {
return(NULL)
}
rename(sf,
distance_m = "distance",
elevation_m = "elevation")
}
sf_post <- function(url, json) {
tryCatch({
if(nhdplus_debug()) {
message(paste(url, "\n"))
message(json)
}
out <- httr::RETRY("POST", url, httr::accept_json(),
httr::content_type_json(),
body = json)
if(out$status_code == 200) {
sf::read_sf(rawToChar(out$content))
} else {
stop(rawToChar(out$content))
}
}, error = function(e) {
message("Error calling processing service. \n Original error: \n", e)
NULL
})
}
check_point <- function(p) {
mess <- "Point must be of type sfc and have a CRS declared."
if(inherits(p, "sf")) p <- sf::st_geometry(p)
if(!inherits(p, "sfc")) stop(mess)
tryCatch({
sf::st_transform(p, 4326)
}, error = function(e) {
stop(paste(mess, "Original error was: \n", e))
})
}
make_json_input_trace <- function(p, raindrop = TRUE, direction = "down") {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = as.character(p[2])),
list(id = "lon",
type = "text/plain",
value = as.character(p[1])),
list(id = "raindroptrace",
type = "text/plain",
value = ifelse(raindrop,
"true", "false")),
list(id = "direction",
type = "text/plain",
value = direction))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_split <- function(p, upstream = TRUE) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = as.character(p[2])),
list(id = "lon",
type = "text/plain",
value = as.character(p[1])),
list(id = "upstream",
type = "text/plain",
value = ifelse(upstream,
"true", "false")))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_xspt <- function(p, w, n) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = p[2]),
list(id = "lon",
type = "text/plain",
value = p[1]),
list(id = "width",
type = "text/plain",
value = w),
list(id = "numpts",
type = "text/plain",
value = n))),
pretty = TRUE, auto_unbox = TRUE)
}
make_json_input_xspts <- function(p1, p2, n, r) {
jsonlite::toJSON(list(inputs = list(list(id = "lat",
type = "text/plain",
value = c(p1[2], p2[2])),
list(id = "lon",
type = "text/plain",
value = c(p1[1], p2[1])),
list(id = "3dep_res",
type = "text/plain",
value = as.character(r)),
list(id = "numpts",
type = "text/plain",
value = n))),
pretty = TRUE, auto_unbox = TRUE)
}
|
#######################################################################
# #
# Package: onemap #
# #
# File: draw.map.R #
# Contains: draw.map #
# #
# Written by Marcelo Mollinari #
# copyright (c) 2010, Marcelo Mollinari #
# #
# First version: 11/30/2010 #
# Last update: 02/19/2011 #
# License: GNU General Public License version 2 (June, 1991) or later #
# #
#######################################################################
draw.map<-function(map.list, horizontal=FALSE, names=FALSE, grid=FALSE, cex.mrk=1, cex.grp=.75){
## checking for correct object
if(!any(class(map.list)=="list" | class(map.list)=="sequence")) stop(deparse(substitute(map.list))," is not an object of class 'list' or 'sequnece'")
## if map.list is just a single chormosome, convert it into a list
if(class(map.list)=="sequence") map.list<-list(map.list)
j<-1
##converting to data.frame
out<-data.frame()
pos<-NULL #to satisfy codetools
marker<-NULL #to satisfy codetools
for(i in length(map.list):1){
if(!any(class(map.list[[i]])=="sequence")) stop("Object ", i , " in map.list is not an object of class 'sequnece'")
if(is.null(map.list[[i]]$seq.like)) stop("Parameters are not estimated for object ", i, " in map.list")
map<-cumsum(c(0,get(get(".map.fun", envir=.onemapEnv))(map.list[[i]]$seq.rf)))
marnames<-colnames(get(map.list[[i]]$data.name, pos=1)$geno)[map.list[[i]]$seq.num]
out<-rbind(out, data.frame(dist=map, pos=j,marker=marnames))
j<-j+1
}
x<-tapply(out$dist, out$pos, max)
y<-unlist(unique(out[2]))
##Plotting region
out.fake <- data.frame(dist=rep(c(0, max(out$dist)),max(y)+2) , pos=c(0:(max(y)+1)))
if(horizontal==TRUE){
plot(out.fake, axes=FALSE, col=0, xlab="Distance (cM)", ylab="", main="Genetic Map")
points(out[1:2], pch="|", cex=cex.mrk, xlab="Distance (cM)", ylab="", main="Genetic Map")
axis(1, at = seq(from=0, to=10*round(max(x)/10), by=10) , labels=seq(from=0, to=10*round(max(x)/10), by=10) , cex.axis=.75)
axis(2, y, paste("Group", rev(y)), lwd=0, las=2, cex.axis=cex.grp)
if(grid==TRUE)
abline(v=seq(from=0, to=10*round(max(x)/10), by=10), lty=2, lwd=.5, col=2)
for(i in y){
if(names==TRUE) text(x=unlist(subset(out, pos==i, dist)), y=i+max(y)/80, labels=unlist(subset(out, pos==i, marker)), srt=90, cex=cex.mrk*.75, adj = c(0,0.5))
lines(x=c(0,x[i]), y=c(y[i],y[i]))
}
}
else{
plot(-out.fake[2:1], axes=FALSE, col=0, ylab="Distance (cM)", xlab="", main="Genetic Map")
points(-out[2:1], pch= 95, cex=cex.mrk)
axis(2, cex.axis=.75, at=-seq(from=0, to=10*round(max(x)/10), by=10), labels = seq(from=0, to=10*round(max(x)/10), by=10), las=2)
axis(1, -y, paste("Group", rev(y)), lwd=0, las=2, cex.axis=cex.grp)
if(grid==TRUE)
abline(h=-seq(from=0, to=10*round(max(x)/10), by=10), lty=2, lwd=.5, col=2)
for(i in y){
if(names==TRUE) text(x=-i+max(y)/100, y=-unlist(subset(out, pos==i, dist)), labels=unlist(subset(out, pos==i, marker)), cex=cex.mrk*.75, adj = c(0,0.5))
lines(y=c(-0.2,-x[i]+0.2), x=c(-y[i],-y[i]))
}
}
}
#end of file
|
/onemap/R/draw.map.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,761
|
r
|
#######################################################################
# #
# Package: onemap #
# #
# File: draw.map.R #
# Contains: draw.map #
# #
# Written by Marcelo Mollinari #
# copyright (c) 2010, Marcelo Mollinari #
# #
# First version: 11/30/2010 #
# Last update: 02/19/2011 #
# License: GNU General Public License version 2 (June, 1991) or later #
# #
#######################################################################
draw.map<-function(map.list, horizontal=FALSE, names=FALSE, grid=FALSE, cex.mrk=1, cex.grp=.75){
## checking for correct object
if(!any(class(map.list)=="list" | class(map.list)=="sequence")) stop(deparse(substitute(map.list))," is not an object of class 'list' or 'sequnece'")
## if map.list is just a single chormosome, convert it into a list
if(class(map.list)=="sequence") map.list<-list(map.list)
j<-1
##converting to data.frame
out<-data.frame()
pos<-NULL #to satisfy codetools
marker<-NULL #to satisfy codetools
for(i in length(map.list):1){
if(!any(class(map.list[[i]])=="sequence")) stop("Object ", i , " in map.list is not an object of class 'sequnece'")
if(is.null(map.list[[i]]$seq.like)) stop("Parameters are not estimated for object ", i, " in map.list")
map<-cumsum(c(0,get(get(".map.fun", envir=.onemapEnv))(map.list[[i]]$seq.rf)))
marnames<-colnames(get(map.list[[i]]$data.name, pos=1)$geno)[map.list[[i]]$seq.num]
out<-rbind(out, data.frame(dist=map, pos=j,marker=marnames))
j<-j+1
}
x<-tapply(out$dist, out$pos, max)
y<-unlist(unique(out[2]))
##Plotting region
out.fake <- data.frame(dist=rep(c(0, max(out$dist)),max(y)+2) , pos=c(0:(max(y)+1)))
if(horizontal==TRUE){
plot(out.fake, axes=FALSE, col=0, xlab="Distance (cM)", ylab="", main="Genetic Map")
points(out[1:2], pch="|", cex=cex.mrk, xlab="Distance (cM)", ylab="", main="Genetic Map")
axis(1, at = seq(from=0, to=10*round(max(x)/10), by=10) , labels=seq(from=0, to=10*round(max(x)/10), by=10) , cex.axis=.75)
axis(2, y, paste("Group", rev(y)), lwd=0, las=2, cex.axis=cex.grp)
if(grid==TRUE)
abline(v=seq(from=0, to=10*round(max(x)/10), by=10), lty=2, lwd=.5, col=2)
for(i in y){
if(names==TRUE) text(x=unlist(subset(out, pos==i, dist)), y=i+max(y)/80, labels=unlist(subset(out, pos==i, marker)), srt=90, cex=cex.mrk*.75, adj = c(0,0.5))
lines(x=c(0,x[i]), y=c(y[i],y[i]))
}
}
else{
plot(-out.fake[2:1], axes=FALSE, col=0, ylab="Distance (cM)", xlab="", main="Genetic Map")
points(-out[2:1], pch= 95, cex=cex.mrk)
axis(2, cex.axis=.75, at=-seq(from=0, to=10*round(max(x)/10), by=10), labels = seq(from=0, to=10*round(max(x)/10), by=10), las=2)
axis(1, -y, paste("Group", rev(y)), lwd=0, las=2, cex.axis=cex.grp)
if(grid==TRUE)
abline(h=-seq(from=0, to=10*round(max(x)/10), by=10), lty=2, lwd=.5, col=2)
for(i in y){
if(names==TRUE) text(x=-i+max(y)/100, y=-unlist(subset(out, pos==i, dist)), labels=unlist(subset(out, pos==i, marker)), cex=cex.mrk*.75, adj = c(0,0.5))
lines(y=c(-0.2,-x[i]+0.2), x=c(-y[i],-y[i]))
}
}
}
#end of file
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_data.R
\name{create_anno_region}
\alias{create_anno_region}
\title{Create annotation regions}
\usage{
create_anno_region(
anno,
chrom_size = NULL,
is_centre = FALSE,
is_window = TRUE,
upstream = -5000,
downstream = 5000
)
}
\arguments{
\item{anno}{A \code{GRanges} object containing the annotation data, this
normally would be the output from \code{\link{read_anno}} function.}
\item{chrom_size}{Object containing genome chromosome sizes, normally would
be the output of \code{\link{read_chrom_size}} function.}
\item{is_centre}{Logical, whether 'start' and 'end' locations are
pre-centred. If TRUE, the mean of the locations will be chosen as centre.
If FALSE, the 'start' will be chosen as the center; e.g. for genes the
'start' denotes the TSS and we use this as centre to obtain K-bp upstream
and downstream of TSS.}
\item{is_window}{Whether to consider a predefined window region around
centre. If TRUE, then 'upstream' and 'downstream' parameters are used,
otherwise we consider the whole region from start to end location.}
\item{upstream}{Integer defining the length of bp upstream of 'centre' for
creating the genomic region. If is_window = FALSE, this parameter is
ignored.}
\item{downstream}{Integer defining the length of bp downstream of 'centre'
for creating the genomic region. If is_window = FALSE, this parameter is
ignored.}
}
\value{
A \code{GRanges} object containing the genomic regions.
The GRanges object contains two or three additional metadata column:
\itemize{\item{\code{id}: Genomic region id.} \item{\code{centre}: Central
location of each genomic region.} \item{\code{name}: (Optional) Genomic
region name.} } This column can be accessed as follows:
\code{granges_object$tss}
}
\description{
\code{create_anno_region} creates annotation regions from
annotation data, using the central point of the annotation features as
ground truth labels we create genomic regions \code{N} bp upstream and
\code{M} bp downstream of central location.
}
\examples{
# Obtain the path to files
file <- system.file("extdata", "dummy_anno.bed", package = "BPRMeth")
anno_dt <- read_anno(file, is_anno_region = FALSE)
# Create genomic region
gen_region <- create_anno_region(anno_dt)
# Extract ID
id <- gen_region$id
}
\seealso{
\code{\link{create_region_object}}, \code{\link{read_anno}}
}
\author{
C.A.Kapourani \email{C.A.Kapourani@ed.ac.uk}
}
|
/man/create_anno_region.Rd
|
permissive
|
andreaskapou/BPRMeth
|
R
| false
| true
| 2,478
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/process_data.R
\name{create_anno_region}
\alias{create_anno_region}
\title{Create annotation regions}
\usage{
create_anno_region(
anno,
chrom_size = NULL,
is_centre = FALSE,
is_window = TRUE,
upstream = -5000,
downstream = 5000
)
}
\arguments{
\item{anno}{A \code{GRanges} object containing the annotation data, this
normally would be the output from \code{\link{read_anno}} function.}
\item{chrom_size}{Object containing genome chromosome sizes, normally would
be the output of \code{\link{read_chrom_size}} function.}
\item{is_centre}{Logical, whether 'start' and 'end' locations are
pre-centred. If TRUE, the mean of the locations will be chosen as centre.
If FALSE, the 'start' will be chosen as the center; e.g. for genes the
'start' denotes the TSS and we use this as centre to obtain K-bp upstream
and downstream of TSS.}
\item{is_window}{Whether to consider a predefined window region around
centre. If TRUE, then 'upstream' and 'downstream' parameters are used,
otherwise we consider the whole region from start to end location.}
\item{upstream}{Integer defining the length of bp upstream of 'centre' for
creating the genomic region. If is_window = FALSE, this parameter is
ignored.}
\item{downstream}{Integer defining the length of bp downstream of 'centre'
for creating the genomic region. If is_window = FALSE, this parameter is
ignored.}
}
\value{
A \code{GRanges} object containing the genomic regions.
The GRanges object contains two or three additional metadata column:
\itemize{\item{\code{id}: Genomic region id.} \item{\code{centre}: Central
location of each genomic region.} \item{\code{name}: (Optional) Genomic
region name.} } This column can be accessed as follows:
\code{granges_object$tss}
}
\description{
\code{create_anno_region} creates annotation regions from
annotation data, using the central point of the annotation features as
ground truth labels we create genomic regions \code{N} bp upstream and
\code{M} bp downstream of central location.
}
\examples{
# Obtain the path to files
file <- system.file("extdata", "dummy_anno.bed", package = "BPRMeth")
anno_dt <- read_anno(file, is_anno_region = FALSE)
# Create genomic region
gen_region <- create_anno_region(anno_dt)
# Extract ID
id <- gen_region$id
}
\seealso{
\code{\link{create_region_object}}, \code{\link{read_anno}}
}
\author{
C.A.Kapourani \email{C.A.Kapourani@ed.ac.uk}
}
|
library("rjson")
data <- fromJSON(file="data.json")
new_dataa = as.data.frame(data)
##new_dataa[1]
##new_dataa[8160]
## before sorting
## first value new_dataa[1]
## last value new_dataa[8160]
pickle = new_dataa[, order(names(new_dataa))]
## after sorting
## first value pickle[3]
## last value pickle[8162]
enter_index <- 17
##enter index above_dont change values below
## 03 - AUD
## 04 - BGN
## 05 - BRL
## 06 - CAD
## 07 - CHF
## 08 - CNY
## 09 - CZY
## 10 - DKK
## 11 - GBP
## 12 - HKD
## 13 - HRK
## 14 - HUF
## 15 - IDR
## 16 - ILS
## 17 - INR
## 18 - ISK
## 19 - JPY
## 20 - KRW
## 21 - MXN
## 22 - MYR
## 23 - NOK
## 24 - NZD
## 25 - PHP
## 26 - PLN
## 27 - RON
## 28 - RUB
## 29 - SEK
## 30 - SGD
## 31 - THB
## 32 - TRY
## 33 - USD
## 34 - ZAR
#81.686
#first 706 readings belong to first month
count <- 1
i <- enter_index
print(pickle[enter_index])
while (i < 706) {
print(pickle[i])
print(count)
i = i+32
count = count + 1
}
|
/rough work/single_cur_month.r
|
no_license
|
sanket9006/2020-interns
|
R
| false
| false
| 948
|
r
|
library("rjson")
data <- fromJSON(file="data.json")
new_dataa = as.data.frame(data)
##new_dataa[1]
##new_dataa[8160]
## before sorting
## first value new_dataa[1]
## last value new_dataa[8160]
pickle = new_dataa[, order(names(new_dataa))]
## after sorting
## first value pickle[3]
## last value pickle[8162]
enter_index <- 17
##enter index above_dont change values below
## 03 - AUD
## 04 - BGN
## 05 - BRL
## 06 - CAD
## 07 - CHF
## 08 - CNY
## 09 - CZY
## 10 - DKK
## 11 - GBP
## 12 - HKD
## 13 - HRK
## 14 - HUF
## 15 - IDR
## 16 - ILS
## 17 - INR
## 18 - ISK
## 19 - JPY
## 20 - KRW
## 21 - MXN
## 22 - MYR
## 23 - NOK
## 24 - NZD
## 25 - PHP
## 26 - PLN
## 27 - RON
## 28 - RUB
## 29 - SEK
## 30 - SGD
## 31 - THB
## 32 - TRY
## 33 - USD
## 34 - ZAR
#81.686
#first 706 readings belong to first month
count <- 1
i <- enter_index
print(pickle[enter_index])
while (i < 706) {
print(pickle[i])
print(count)
i = i+32
count = count + 1
}
|
library(R.matlab)
subjid_df <- read.csv("/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/pncSingleFuncParcel_n693_SubjectsIDs.csv");
#########################################
### 2. Extrating behavior information ###
#########################################
demo <- subjid_df;
# Demographics
Demographics_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/demographics/n1601_demographics_go1_20161212.csv");
# Motion
Rest_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/rest/n1601_RestQAData_20170714.csv");
NBack_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/nback/nbackGlmBlockDesign/n1601_NBACKQAData_20181001.csv");
Idemo_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/idemo/n1601_idemo_FinalQA_092817.csv");
# Cognition
Cognition_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/cnb/n1601_cnb_factor_scores_tymoore_20151006.csv");
# Merge all data
demo <- merge(demo, Demographics_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Rest_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, NBack_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Idemo_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Cognition_Data, by = c("scanid", "bblid"));
# Output the subjects' behavior data
write.csv(demo, "/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/n693_Behavior_20181219.csv", row.names = FALSE);
# Save age in .mat file
BBLID = demo$bblid;
ScanID = demo$scanid;
Age = demo$ageAtScan1;
writeMat('/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/Age_Info.mat', BBLID = BBLID, ScanID = ScanID, Age = Age);
|
/Step_1st_PrepareData/Step_2nd_ExtractBehavior.R
|
no_license
|
guoweiwuorgin/pncSingleFuncParcel
|
R
| false
| false
| 1,709
|
r
|
library(R.matlab)
subjid_df <- read.csv("/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/pncSingleFuncParcel_n693_SubjectsIDs.csv");
#########################################
### 2. Extrating behavior information ###
#########################################
demo <- subjid_df;
# Demographics
Demographics_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/demographics/n1601_demographics_go1_20161212.csv");
# Motion
Rest_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/rest/n1601_RestQAData_20170714.csv");
NBack_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/nback/nbackGlmBlockDesign/n1601_NBACKQAData_20181001.csv");
Idemo_Motion_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/neuroimaging/idemo/n1601_idemo_FinalQA_092817.csv");
# Cognition
Cognition_Data <- read.csv("/data/joy/BBL/studies/pnc/n1601_dataFreeze/cnb/n1601_cnb_factor_scores_tymoore_20151006.csv");
# Merge all data
demo <- merge(demo, Demographics_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Rest_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, NBack_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Idemo_Motion_Data, by = c("scanid", "bblid"));
demo <- merge(demo, Cognition_Data, by = c("scanid", "bblid"));
# Output the subjects' behavior data
write.csv(demo, "/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/n693_Behavior_20181219.csv", row.names = FALSE);
# Save age in .mat file
BBLID = demo$bblid;
ScanID = demo$scanid;
Age = demo$ageAtScan1;
writeMat('/data/jux/BBL/projects/pncSingleFuncParcel/Replication/data/Age_Info.mat', BBLID = BBLID, ScanID = ScanID, Age = Age);
|
setwd("C:\\Users\\Mershack\\Documents\\NetbeansProjects\\GitHub_projects\\d3graphevaluation\\build\\web\\studies\\study5\\data")
sink("rscript-friedman.txt")
accuracy1 = read.csv("AccuracyResults1.txt")
accuracy2 = read.csv("AccuracyResults2.txt")
Acc_neighbor_one_step_edgesize2= c(accuracy1[,1])
Acc_neighbor_one_step_edgesize4= c(accuracy2[,1])
taskname="TaskName = Acc_neighbor_one_step"
taskname
combineddata =data.frame(cbind(Acc_neighbor_one_step_edgesize2,Acc_neighbor_one_step_edgesize4))
combineddata = stack(combineddata)
numcases = 4
numvariables =2
recall.df = data.frame(recall = combineddata,
subj=factor(rep(paste("subj", 1:numcases, sep=""), numvariables)))
friedmanresult = friedman.test(recall.values ~ recall.ind | subj, data=recall.df)
cat(paste("Acc_neighbor_one_step", " , " , friedmanresult$p.value, "\n" ))
"*********************"
time1 = read.csv("TimeResults1.txt")
time2 = read.csv("TimeResults2.txt")
Time_neighbor_one_step_edgesize2= c(time1[,1])
Time_neighbor_one_step_edgesize4= c(time2[,1])
taskname="TaskName = Time_neighbor_one_step"
taskname
combineddata =data.frame(cbind(Time_neighbor_one_step_edgesize2,Time_neighbor_one_step_edgesize4))
combineddata = stack(combineddata)
numcases = 4
numvariables =2
recall.df = data.frame(recall = combineddata,
subj=factor(rep(paste("subj", 1:numcases, sep=""), numvariables)))
friedmanresult = friedman.test(recall.values ~ recall.ind | subj, data=recall.df)
cat(paste("Time_neighbor_one_step", " , " , friedmanresult$p.value, "\n" ))
"*********************"
sink()
|
/build/web/studies/study4/data/rscript-friedman.R
|
no_license
|
mershack/graphunit
|
R
| false
| false
| 1,543
|
r
|
setwd("C:\\Users\\Mershack\\Documents\\NetbeansProjects\\GitHub_projects\\d3graphevaluation\\build\\web\\studies\\study5\\data")
sink("rscript-friedman.txt")
accuracy1 = read.csv("AccuracyResults1.txt")
accuracy2 = read.csv("AccuracyResults2.txt")
Acc_neighbor_one_step_edgesize2= c(accuracy1[,1])
Acc_neighbor_one_step_edgesize4= c(accuracy2[,1])
taskname="TaskName = Acc_neighbor_one_step"
taskname
combineddata =data.frame(cbind(Acc_neighbor_one_step_edgesize2,Acc_neighbor_one_step_edgesize4))
combineddata = stack(combineddata)
numcases = 4
numvariables =2
recall.df = data.frame(recall = combineddata,
subj=factor(rep(paste("subj", 1:numcases, sep=""), numvariables)))
friedmanresult = friedman.test(recall.values ~ recall.ind | subj, data=recall.df)
cat(paste("Acc_neighbor_one_step", " , " , friedmanresult$p.value, "\n" ))
"*********************"
time1 = read.csv("TimeResults1.txt")
time2 = read.csv("TimeResults2.txt")
Time_neighbor_one_step_edgesize2= c(time1[,1])
Time_neighbor_one_step_edgesize4= c(time2[,1])
taskname="TaskName = Time_neighbor_one_step"
taskname
combineddata =data.frame(cbind(Time_neighbor_one_step_edgesize2,Time_neighbor_one_step_edgesize4))
combineddata = stack(combineddata)
numcases = 4
numvariables =2
recall.df = data.frame(recall = combineddata,
subj=factor(rep(paste("subj", 1:numcases, sep=""), numvariables)))
friedmanresult = friedman.test(recall.values ~ recall.ind | subj, data=recall.df)
cat(paste("Time_neighbor_one_step", " , " , friedmanresult$p.value, "\n" ))
"*********************"
sink()
|
## the script is used for creating a tidy dataset based on
## data collected from the accelerometers from the Samsung Galaxy S smartphone
library(reshape2)
library(plyr)
root_dir <- "UCI HAR Dataset"
## import label and feature
features <- read.table(paste(root_dir,"features.txt",sep="/"), stringsAsFactors=F)
activity_labels <- read.table(paste(root_dir,"activity_labels.txt",sep="/"),col.names=c("labelid","label"))
## important features
important_features <- grep("\\-mean\\(|\\-std\\(",features[,2])
##import training data set
train_dir <- paste(root_dir,"train",sep="/")
train_subject <- read.table(paste(train_dir,"subject_train.txt",sep="/"),col.names = "subject")
train_data <- read.table(paste(train_dir,"X_train.txt",sep="/"),col.names = features[,2],check.names = F)
train_data <- train_data[,important_features]
train_labels <- read.table(paste(train_dir, "y_train.txt", sep="/"),col.names = "labelid")
train <- cbind(train_subject,train_labels,train_data)
## import test data set
test_dir <- paste(root_dir, "test", sep="/")
test_subject <- read.table(paste(test_dir,"subject_test.txt",sep="/"),col.names = "subject")
test_data <- read.table(paste(test_dir,"X_test.txt",sep="/"),col.names=features[,2],check.names = F)
test_data <- test_data[,important_features]
test_labels <- read.table(paste(test_dir,"y_test.txt",sep="/"),col.names="labelid")
test <- cbind(test_subject,test_labels,test_data)
## merge and reshape
all_data <- rbind(train,test)
df <- merge(activity_labels,all_data, by="labelid")
df <- df[,-1]
df_molten <- melt(df,id=c("label","subject"))
## create tidy data set and write to the disk
tidy <- dcast(df_molten, label + subject ~ variable, mean)
write.csv(tidy, file = "tidy.txt",row.names = FALSE)
|
/run_analysis.R
|
no_license
|
AndyLiu0429/GCDAssignment
|
R
| false
| false
| 1,739
|
r
|
## the script is used for creating a tidy dataset based on
## data collected from the accelerometers from the Samsung Galaxy S smartphone
library(reshape2)
library(plyr)
root_dir <- "UCI HAR Dataset"
## import label and feature
features <- read.table(paste(root_dir,"features.txt",sep="/"), stringsAsFactors=F)
activity_labels <- read.table(paste(root_dir,"activity_labels.txt",sep="/"),col.names=c("labelid","label"))
## important features
important_features <- grep("\\-mean\\(|\\-std\\(",features[,2])
##import training data set
train_dir <- paste(root_dir,"train",sep="/")
train_subject <- read.table(paste(train_dir,"subject_train.txt",sep="/"),col.names = "subject")
train_data <- read.table(paste(train_dir,"X_train.txt",sep="/"),col.names = features[,2],check.names = F)
train_data <- train_data[,important_features]
train_labels <- read.table(paste(train_dir, "y_train.txt", sep="/"),col.names = "labelid")
train <- cbind(train_subject,train_labels,train_data)
## import test data set
test_dir <- paste(root_dir, "test", sep="/")
test_subject <- read.table(paste(test_dir,"subject_test.txt",sep="/"),col.names = "subject")
test_data <- read.table(paste(test_dir,"X_test.txt",sep="/"),col.names=features[,2],check.names = F)
test_data <- test_data[,important_features]
test_labels <- read.table(paste(test_dir,"y_test.txt",sep="/"),col.names="labelid")
test <- cbind(test_subject,test_labels,test_data)
## merge and reshape
all_data <- rbind(train,test)
df <- merge(activity_labels,all_data, by="labelid")
df <- df[,-1]
df_molten <- melt(df,id=c("label","subject"))
## create tidy data set and write to the disk
tidy <- dcast(df_molten, label + subject ~ variable, mean)
write.csv(tidy, file = "tidy.txt",row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pareto.R
\name{dparteo}
\alias{dparteo}
\title{The probability density function of the pareto distribution.}
\usage{
dparteo(x, alpha, x_m, log.p = FALSE)
}
\arguments{
\item{x}{the point where the pdf to be evaluated}
\item{alpha}{the shape parameter of pareto dist.}
\item{x_m}{the scale parameter of pareto dist.}
\item{log.p}{use log.p = TRUE when you want
to get a result in a log scale}
}
\value{
the pdf value of the pareto dist.
}
\description{
dpareto function evaluates the pdf of pareto dist. at given x with parameters.
}
\examples{
dpareto(1:5, 2, 3)
dpareto(1:5, 2, -3:3)
dpareto(1:5, 2:5, 3)
}
|
/man/dparteo.Rd
|
no_license
|
petershan1119/paretopractice
|
R
| false
| true
| 690
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pareto.R
\name{dparteo}
\alias{dparteo}
\title{The probability density function of the pareto distribution.}
\usage{
dparteo(x, alpha, x_m, log.p = FALSE)
}
\arguments{
\item{x}{the point where the pdf to be evaluated}
\item{alpha}{the shape parameter of pareto dist.}
\item{x_m}{the scale parameter of pareto dist.}
\item{log.p}{use log.p = TRUE when you want
to get a result in a log scale}
}
\value{
the pdf value of the pareto dist.
}
\description{
dpareto function evaluates the pdf of pareto dist. at given x with parameters.
}
\examples{
dpareto(1:5, 2, 3)
dpareto(1:5, 2, -3:3)
dpareto(1:5, 2:5, 3)
}
|
### Get dates from the uncleaned data (NAs not removed)
head(UNMcaptures)
UNMcaptures$date <- as.Date(gsub(" ", "", UNMcaptures$Date),format="%m/%d/%Y")
sort(unique(UNMcaptures$date))
Z2.dates <- sort(unique(UNMcaptures$date[which(UNMcaptures$site=="Zuni"&UNMcaptures$web==2)]))
time.int <- diff(Z2.dates)
first.dates <- Z2.dates[c(1,1+which(time.int>1))]
length(first.dates)
primary.time.int.weeks <- diff(first.dates)/7
Z2.primary.time.int.weeks <- primary.time.int.weeks
### use those dates on the cleaned data:
UNMdata$date <- as.Date(gsub(" ", "", UNMdata$Date),format="%m/%d/%Y")
Z2.data <- UNMdata[which(UNMdata$site=="Zuni"&UNMdata$web==2),]
# I want to separate out deermice. Since some of the repeaters might have -9 under letter_2, I will go by tag. These are the tag numbers I want to keep (are pema).
Z2.pema.tags <- sort(unique(Z2.data$tag[which(Z2.data$letter_2=="PM")]))
# a couple have two tags and one is seen elsewhere. make same id?
Z2.pema.tags <- Z2.pema.tags[-which(Z2.pema.tags==-9)]
pema.ind <- numeric()
for(i in 1:length(Z2.pema.tags)){
pema.ind <- c(pema.ind,which(Z2.data$tag==Z2.pema.tags[i]))
}
Z2.pema.data <- Z2.data[pema.ind,]
Session.days <- list()
for(i in 1:length(first.dates)){
Session.days[[i]] <- Z2.dates[which(Z2.dates==first.dates[i]):ifelse(i==length(first.dates),length(Z2.dates),(which(Z2.dates==first.dates[i+1])-1))]
}
################################################################################
########## Basic Robust Design CJS capture histories (0 or 1)
# A matrix for each month (primary occasion), where column is day (secondary occasion) and row is individual. Each matrix has the same number of rows - all individuals have a row each month even though for most they will be zeros.
################################################################################
IDs <- sort (unique(Z2.pema.data$tag))
Ch.list <- list()
for(m in 1:length(Session.days)){
days <- Session.days[[m]]
ch.mat <- matrix(NA,ncol=length(days),nrow=length(IDs))
for(d in 1:length(days)){
for(i in 1:length(IDs)){
ch.mat[i,d] <- ifelse(length(which(Z2.pema.data$tag==IDs[i] & Z2.pema.data$date==days[d]))>0,1,0)
}
}
dimnames(ch.mat) <- list(IDs,Session.days[[m]])
Ch.list[[m]] <- ch.mat
cat("session = ", m, "\n")
}
Z2.pema.Ch.secondary <- Ch.list
####### Temporal Covariates
source("TemporalCovariateFunction.R")
temporal.covariates <- temporaldata.fun(data=UNMcaptures, site="Zuni",web=2)
##############
save(Z2.pema.Ch.secondary,Session.days,temporal.covariates,Z2.primary.time.int.weeks,file="Z2pemaCH.RData")
|
/Old Code/Z2CaptureHistories.R
|
no_license
|
angieluis/BayesianMarkRecapSNV
|
R
| false
| false
| 2,611
|
r
|
### Get dates from the uncleaned data (NAs not removed)
head(UNMcaptures)
UNMcaptures$date <- as.Date(gsub(" ", "", UNMcaptures$Date),format="%m/%d/%Y")
sort(unique(UNMcaptures$date))
Z2.dates <- sort(unique(UNMcaptures$date[which(UNMcaptures$site=="Zuni"&UNMcaptures$web==2)]))
time.int <- diff(Z2.dates)
first.dates <- Z2.dates[c(1,1+which(time.int>1))]
length(first.dates)
primary.time.int.weeks <- diff(first.dates)/7
Z2.primary.time.int.weeks <- primary.time.int.weeks
### use those dates on the cleaned data:
UNMdata$date <- as.Date(gsub(" ", "", UNMdata$Date),format="%m/%d/%Y")
Z2.data <- UNMdata[which(UNMdata$site=="Zuni"&UNMdata$web==2),]
# I want to separate out deermice. Since some of the repeaters might have -9 under letter_2, I will go by tag. These are the tag numbers I want to keep (are pema).
Z2.pema.tags <- sort(unique(Z2.data$tag[which(Z2.data$letter_2=="PM")]))
# a couple have two tags and one is seen elsewhere. make same id?
Z2.pema.tags <- Z2.pema.tags[-which(Z2.pema.tags==-9)]
pema.ind <- numeric()
for(i in 1:length(Z2.pema.tags)){
pema.ind <- c(pema.ind,which(Z2.data$tag==Z2.pema.tags[i]))
}
Z2.pema.data <- Z2.data[pema.ind,]
Session.days <- list()
for(i in 1:length(first.dates)){
Session.days[[i]] <- Z2.dates[which(Z2.dates==first.dates[i]):ifelse(i==length(first.dates),length(Z2.dates),(which(Z2.dates==first.dates[i+1])-1))]
}
################################################################################
########## Basic Robust Design CJS capture histories (0 or 1)
# A matrix for each month (primary occasion), where column is day (secondary occasion) and row is individual. Each matrix has the same number of rows - all individuals have a row each month even though for most they will be zeros.
################################################################################
IDs <- sort (unique(Z2.pema.data$tag))
Ch.list <- list()
for(m in 1:length(Session.days)){
days <- Session.days[[m]]
ch.mat <- matrix(NA,ncol=length(days),nrow=length(IDs))
for(d in 1:length(days)){
for(i in 1:length(IDs)){
ch.mat[i,d] <- ifelse(length(which(Z2.pema.data$tag==IDs[i] & Z2.pema.data$date==days[d]))>0,1,0)
}
}
dimnames(ch.mat) <- list(IDs,Session.days[[m]])
Ch.list[[m]] <- ch.mat
cat("session = ", m, "\n")
}
Z2.pema.Ch.secondary <- Ch.list
####### Temporal Covariates
source("TemporalCovariateFunction.R")
temporal.covariates <- temporaldata.fun(data=UNMcaptures, site="Zuni",web=2)
##############
save(Z2.pema.Ch.secondary,Session.days,temporal.covariates,Z2.primary.time.int.weeks,file="Z2pemaCH.RData")
|
#Hands_On Book ----
# NOTE: To comment out text use Cmd + Shift + C
# To run ALL code CMD + SHIFT + RETURN
# To run current selection or code on line CMD + RETURN
1 + 1
#2
#
10:15
#10 11 12 13 14 15
typeof(10:15)
#integer
a <- 1
a
#1 a is an 'object'
typeof(a)
a * 3
#3
typeof(a)
#"double" meaning double length storage for number
dim <- 1:5
dim
#NOTE: Names are case sensitive. Some chars are not allowed see page 8
# list variables (object names) with ls() but also view Environment window
ls()
#A virtual die ----
die <- 1:6
die[3]
#Each element squared ----
#arithmetic operators work on each element the list
die * die
#Adding a shorter list to die look at output
die + 8:10
# Same as adding 8 9 10 8 9 10 result is 9 11 13 12 14 16
#Matrix operators ----
#Inner multiplication (row onto column)
die %*% die
#Outer multiplication (column onto row)
die %o% die
#Simple functions ----
round(3.14159, digits = 3)
mean(die)
mean(die * die)
round(mean(die*die))
#Sampling from the die vector ----
sample(x = die, size = 2)
#You must use 'x' for the vector set
#National lottery pick 6 numbers from 1 to 59 - no replacements
sample(x = 1:59, size = 6)
sort(sample(x = 1:59, size = 6))
draw <- sample(x = 1:59, size = 6)
sort(draw)
#Can leave out variable names (x), size and use only the object name or value
sample(die, size = 3)
#To find out which args to use use 'args' function
args(sample)
sample(die,2)
sample(1:6, 4)
#Sampling with replacement ----
sample(die, 3, replace = TRUE)
# sample(die, 8) this generates an error because default is replace = FALSE
sample(die, 8, replace = TRUE) # equivalent to eight throws of dice
horses <- c("Stupor", "Filly", "Jasper")
horses
sort(horses)
sample(horses, 2)
#letters[1:26]
#Introducing Functions using the dice code
roll <- function(numdice, rep = TRUE){
rolled <- "number of dice must be numeric and > 0"
if (is.numeric(numdice) & numdice > 0){
die <- 1:6
rolled <- sample(die, numdice, replace = rep)
}
return(rolled)
}
roll(3)
roll(9)
|
/Chap1_The_Basics.R
|
no_license
|
kendogprior/R_practice
|
R
| false
| false
| 2,055
|
r
|
#Hands_On Book ----
# NOTE: To comment out text use Cmd + Shift + C
# To run ALL code CMD + SHIFT + RETURN
# To run current selection or code on line CMD + RETURN
1 + 1
#2
#
10:15
#10 11 12 13 14 15
typeof(10:15)
#integer
a <- 1
a
#1 a is an 'object'
typeof(a)
a * 3
#3
typeof(a)
#"double" meaning double length storage for number
dim <- 1:5
dim
#NOTE: Names are case sensitive. Some chars are not allowed see page 8
# list variables (object names) with ls() but also view Environment window
ls()
#A virtual die ----
die <- 1:6
die[3]
#Each element squared ----
#arithmetic operators work on each element the list
die * die
#Adding a shorter list to die look at output
die + 8:10
# Same as adding 8 9 10 8 9 10 result is 9 11 13 12 14 16
#Matrix operators ----
#Inner multiplication (row onto column)
die %*% die
#Outer multiplication (column onto row)
die %o% die
#Simple functions ----
round(3.14159, digits = 3)
mean(die)
mean(die * die)
round(mean(die*die))
#Sampling from the die vector ----
sample(x = die, size = 2)
#You must use 'x' for the vector set
#National lottery pick 6 numbers from 1 to 59 - no replacements
sample(x = 1:59, size = 6)
sort(sample(x = 1:59, size = 6))
draw <- sample(x = 1:59, size = 6)
sort(draw)
#Can leave out variable names (x), size and use only the object name or value
sample(die, size = 3)
#To find out which args to use use 'args' function
args(sample)
sample(die,2)
sample(1:6, 4)
#Sampling with replacement ----
sample(die, 3, replace = TRUE)
# sample(die, 8) this generates an error because default is replace = FALSE
sample(die, 8, replace = TRUE) # equivalent to eight throws of dice
horses <- c("Stupor", "Filly", "Jasper")
horses
sort(horses)
sample(horses, 2)
#letters[1:26]
#Introducing Functions using the dice code
roll <- function(numdice, rep = TRUE){
rolled <- "number of dice must be numeric and > 0"
if (is.numeric(numdice) & numdice > 0){
die <- 1:6
rolled <- sample(die, numdice, replace = rep)
}
return(rolled)
}
roll(3)
roll(9)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{shinyapp}
\alias{shinyapp}
\title{Class to manage a shiny app and a phantom.js headless browser}
\description{
Class to manage a shiny app and a phantom.js headless browser
}
\section{Usage}{
\preformatted{app <- shinyapp$new(path = ".", load_timeout = 5000)
app$stop()
app$get_value(name, iotype = c("auto", "input", "output"))
app$set_value(name, value, iotype = c("auto", "input", "output"))
app$send_keys(name = NULL, keys)
app$get_windows_size()
app$set_window_size(width, height)
app$get_url()
app$go_back()
app$refresh()
app$get_title()
app$get_source()
app$take_screenshot(file = NULL)
app$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
app$wait_for(expr, check_interval = 100, timeout = 3000)
app$find_widget(name, iotype = c("auto", "input", "output"))
app$expect_update(output, ..., timeout = 3000,
iotype = c("auto", "input", "output"))
}
}
\section{Arguments}{
\describe{
\item{app}{A \code{shinyapp} instance.}
\item{path}{Path to a directory containing a Shiny app, i.e. a
single \code{app.R} file or a \code{server.R} and \code{ui.R}
pair.}
\item{load_timeout}{How long to wait for the app to load, in ms.
This includes the time to start R.}
\item{name}{Name of a shiny widget. For \code{$send_keys} it can
be \code{NULL}, in which case the keys are sent to the active
HTML element.}
\item{iotype}{Type of the Shiny widget. Usually \code{shinytest}
finds the widgets by their name, so this need not be specified,
but Shiny allows input and output widgets with identical names.}
\item{keys}{Keys to send to the widget or the app. See the
\code{send_keys} method of the \code{webdriver} package.}
\item{width}{Scalar integer, the desired width of the browser window.}
\item{height}{Scalar integer, the desired height of the browser
window.}
\item{file}{File name to save the screenshot to. If \code{NULL}, then
it will be shown on the R graphics device.}
\item{css}{CSS selector to find an HTML element.}
\item{link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}.}
\item{partial_link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}. It uses partial matching.}
\item{xpath}{Find HTML elements using XPath expressions.}
\item{expr}{A string scalar containing JavaScript code that
evaluates to the condition to wait for.}
\item{check_interval}{How often to check for the condition, in
milliseconds.}
\item{timeout}{Timeout for the condition, in milliseconds.}
\item{output}{Character vector, the name(s) of the Shiny output
widgets that should be updated.}
\item{...}{For \code{expect_update} these can be named arguments.
The argument names correspond to Shiny input widgets: each input
widget will be set to the specified value.}
}
}
\section{Details}{
\code{shinyapp$new()} function creates a \code{shinyapp} object. It starts
the Shiny app in a new R session, and it also starts a \code{phantomjs}
headless browser that connects to the app. It waits until the app is
ready to use. It waits at most \code{load_timeout} milliseconds, and if
the app is not ready, then it throws an error. You can increase
\code{load_timeout} for slow loading apps. Currently it supports apps
that are defined in a single \code{app.R} file, or in a \code{server.R}
and \code{ui.R} pair.
\code{app$stop()} stops the app, i.e. the external R process that runs
the app, and also the phantomjs instance.
\code{app$get_value()} finds a widget and queries its value. See
the \code{get_value} method of the \code{\link{widget}} class.
\code{app$set_value()} finds a widget and sets its value. See the
\code{set_value} method of the \code{\link{widget}} class.
\code{app$send_keys} sends the specified keys to the HTML element of the
widget.
\code{app$get_window_size()} returns the current size of the browser
window, in a list of two integer scalars named \sQuote{width} and
\sQuote{height}.
\code{app$set_window_size()} sets the size of the browser window to the
specified width and height.
\code{app$get_url()} returns the current URL.
\code{app$go_back()} \dQuote{presses} the browser's \sQuote{back}
button.
\code{app$refresh()} \dQuote{presses} the browser's \sQuote{refresh}
button.
\code{app$get_title()} returns the title of the page. (More precisely
the document title.)
\code{app$get_source()} returns the complete HTML source of the current
page, in a character scalar.
\code{app$take_screenshot()} takes a screenshot of the current page
and writes it to a file, or (if \code{file} is \code{NULL}) shows it
on the R graphics device. The output file has PNG format.
\code{app$find_element()} find an HTML element on the page, using a
CSS selector or an XPath expression. The return value is an
\code{\link[webdriver]{element}} object from the \code{webdriver}
package.
\code{app$wait_for()} waits until a JavaScript expression evaluates
to \code{true}, or a timeout happens. It returns \code{TRUE} is the
expression evaluated to \code{true}, possible after some waiting.
\code{app$find_widget()} finds the corresponding HTML element of a Shiny
widget. It returns a \code{\link{widget}} object.
\code{expect_update()} is one of the main functions to test Shiny apps.
It performs one or more update operations via the browser, and then
waits for the specified output widgets to update. The test succeeds if
all specified output widgets are updated before the timeout. For
updates that involve a lot of computation, you increase the timeout.
}
\examples{
\dontrun{
## https://github.com/rstudio/shiny-examples/tree/master/050-kmeans-example
app <- shinyapp$new("050-kmeans-example")
expect_update(app, xcol = "Sepal.Width", output = "plot1")
expect_update(app, ycol = "Petal.Width", output = "plot1")
expect_update(app, clusters = 4, output = "plot1")
}
}
|
/man/shinyapp.Rd
|
permissive
|
carlganz/shinytest
|
R
| false
| true
| 5,971
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/app.R
\name{shinyapp}
\alias{shinyapp}
\title{Class to manage a shiny app and a phantom.js headless browser}
\description{
Class to manage a shiny app and a phantom.js headless browser
}
\section{Usage}{
\preformatted{app <- shinyapp$new(path = ".", load_timeout = 5000)
app$stop()
app$get_value(name, iotype = c("auto", "input", "output"))
app$set_value(name, value, iotype = c("auto", "input", "output"))
app$send_keys(name = NULL, keys)
app$get_windows_size()
app$set_window_size(width, height)
app$get_url()
app$go_back()
app$refresh()
app$get_title()
app$get_source()
app$take_screenshot(file = NULL)
app$find_element(css = NULL, link_text = NULL,
partial_link_text = NULL, xpath = NULL)
app$wait_for(expr, check_interval = 100, timeout = 3000)
app$find_widget(name, iotype = c("auto", "input", "output"))
app$expect_update(output, ..., timeout = 3000,
iotype = c("auto", "input", "output"))
}
}
\section{Arguments}{
\describe{
\item{app}{A \code{shinyapp} instance.}
\item{path}{Path to a directory containing a Shiny app, i.e. a
single \code{app.R} file or a \code{server.R} and \code{ui.R}
pair.}
\item{load_timeout}{How long to wait for the app to load, in ms.
This includes the time to start R.}
\item{name}{Name of a shiny widget. For \code{$send_keys} it can
be \code{NULL}, in which case the keys are sent to the active
HTML element.}
\item{iotype}{Type of the Shiny widget. Usually \code{shinytest}
finds the widgets by their name, so this need not be specified,
but Shiny allows input and output widgets with identical names.}
\item{keys}{Keys to send to the widget or the app. See the
\code{send_keys} method of the \code{webdriver} package.}
\item{width}{Scalar integer, the desired width of the browser window.}
\item{height}{Scalar integer, the desired height of the browser
window.}
\item{file}{File name to save the screenshot to. If \code{NULL}, then
it will be shown on the R graphics device.}
\item{css}{CSS selector to find an HTML element.}
\item{link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}.}
\item{partial_link_text}{Find \code{<a>} HTML elements based on their
\code{innerText}. It uses partial matching.}
\item{xpath}{Find HTML elements using XPath expressions.}
\item{expr}{A string scalar containing JavaScript code that
evaluates to the condition to wait for.}
\item{check_interval}{How often to check for the condition, in
milliseconds.}
\item{timeout}{Timeout for the condition, in milliseconds.}
\item{output}{Character vector, the name(s) of the Shiny output
widgets that should be updated.}
\item{...}{For \code{expect_update} these can be named arguments.
The argument names correspond to Shiny input widgets: each input
widget will be set to the specified value.}
}
}
\section{Details}{
\code{shinyapp$new()} function creates a \code{shinyapp} object. It starts
the Shiny app in a new R session, and it also starts a \code{phantomjs}
headless browser that connects to the app. It waits until the app is
ready to use. It waits at most \code{load_timeout} milliseconds, and if
the app is not ready, then it throws an error. You can increase
\code{load_timeout} for slow loading apps. Currently it supports apps
that are defined in a single \code{app.R} file, or in a \code{server.R}
and \code{ui.R} pair.
\code{app$stop()} stops the app, i.e. the external R process that runs
the app, and also the phantomjs instance.
\code{app$get_value()} finds a widget and queries its value. See
the \code{get_value} method of the \code{\link{widget}} class.
\code{app$set_value()} finds a widget and sets its value. See the
\code{set_value} method of the \code{\link{widget}} class.
\code{app$send_keys} sends the specified keys to the HTML element of the
widget.
\code{app$get_window_size()} returns the current size of the browser
window, in a list of two integer scalars named \sQuote{width} and
\sQuote{height}.
\code{app$set_window_size()} sets the size of the browser window to the
specified width and height.
\code{app$get_url()} returns the current URL.
\code{app$go_back()} \dQuote{presses} the browser's \sQuote{back}
button.
\code{app$refresh()} \dQuote{presses} the browser's \sQuote{refresh}
button.
\code{app$get_title()} returns the title of the page. (More precisely
the document title.)
\code{app$get_source()} returns the complete HTML source of the current
page, in a character scalar.
\code{app$take_screenshot()} takes a screenshot of the current page
and writes it to a file, or (if \code{file} is \code{NULL}) shows it
on the R graphics device. The output file has PNG format.
\code{app$find_element()} find an HTML element on the page, using a
CSS selector or an XPath expression. The return value is an
\code{\link[webdriver]{element}} object from the \code{webdriver}
package.
\code{app$wait_for()} waits until a JavaScript expression evaluates
to \code{true}, or a timeout happens. It returns \code{TRUE} is the
expression evaluated to \code{true}, possible after some waiting.
\code{app$find_widget()} finds the corresponding HTML element of a Shiny
widget. It returns a \code{\link{widget}} object.
\code{expect_update()} is one of the main functions to test Shiny apps.
It performs one or more update operations via the browser, and then
waits for the specified output widgets to update. The test succeeds if
all specified output widgets are updated before the timeout. For
updates that involve a lot of computation, you increase the timeout.
}
\examples{
\dontrun{
## https://github.com/rstudio/shiny-examples/tree/master/050-kmeans-example
app <- shinyapp$new("050-kmeans-example")
expect_update(app, xcol = "Sepal.Width", output = "plot1")
expect_update(app, ycol = "Petal.Width", output = "plot1")
expect_update(app, clusters = 4, output = "plot1")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{misc_uk}
\alias{misc_uk}
\title{misc_uk}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 214 rows and 10 columns.
}
\usage{
misc_uk
}
\description{
misc_uk
}
\keyword{datasets}
|
/man/misc_uk.Rd
|
permissive
|
sjbeckett/localcovid19now
|
R
| false
| true
| 348
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{misc_uk}
\alias{misc_uk}
\title{misc_uk}
\format{
An object of class \code{spec_tbl_df} (inherits from \code{tbl_df}, \code{tbl}, \code{data.frame}) with 214 rows and 10 columns.
}
\usage{
misc_uk
}
\description{
misc_uk
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prometheusservice_operations.R
\name{prometheusservice_describe_rule_groups_namespace}
\alias{prometheusservice_describe_rule_groups_namespace}
\title{Describe a rule groups namespace}
\usage{
prometheusservice_describe_rule_groups_namespace(workspaceId, name)
}
\arguments{
\item{workspaceId}{[required] The ID of the workspace to describe.}
\item{name}{[required] The rule groups namespace.}
}
\description{
Describe a rule groups namespace.
See \url{https://www.paws-r-sdk.com/docs/prometheusservice_describe_rule_groups_namespace/} for full documentation.
}
\keyword{internal}
|
/cran/paws.management/man/prometheusservice_describe_rule_groups_namespace.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 661
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prometheusservice_operations.R
\name{prometheusservice_describe_rule_groups_namespace}
\alias{prometheusservice_describe_rule_groups_namespace}
\title{Describe a rule groups namespace}
\usage{
prometheusservice_describe_rule_groups_namespace(workspaceId, name)
}
\arguments{
\item{workspaceId}{[required] The ID of the workspace to describe.}
\item{name}{[required] The rule groups namespace.}
}
\description{
Describe a rule groups namespace.
See \url{https://www.paws-r-sdk.com/docs/prometheusservice_describe_rule_groups_namespace/} for full documentation.
}
\keyword{internal}
|
#
# Assignment for Exploratory Data Analysis
#
#
## 00. Libraries ----
library(dplyr)
library(tidyr)
library(ggplot2)
## 00. Import data if it exists ----
if(file.exists("data/Source_Classification_Code.rds")){
d1 <- readRDS(file="data/Source_Classification_Code.rds")
}
if(file.exists("data/summarySCC_PM25.rds")){
d2 <- readRDS(file="data/summarySCC_PM25.rds")
}
if(exists("d1") && exists("d2"))
{
##
## Q1 - Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? ----
## Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
## for each of the years 1999, 2002, 2005, and 2008.
result <- d2 %>%
group_by(year) %>%
summarise(total = sum(Emissions))
plot(result$year, result$total, type="l", main = "Total PM2.5 in the US across Years",
xlab = "Years", ylab="Total Emission PM2.5 (ton)")
points(result$year, result$total, pch=19, col = "red")
dev.copy(png, "data/plot1.png")
dev.off()
}
|
/Plot1.R
|
no_license
|
mnoro/EDA_Project
|
R
| false
| false
| 1,015
|
r
|
#
# Assignment for Exploratory Data Analysis
#
#
## 00. Libraries ----
library(dplyr)
library(tidyr)
library(ggplot2)
## 00. Import data if it exists ----
if(file.exists("data/Source_Classification_Code.rds")){
d1 <- readRDS(file="data/Source_Classification_Code.rds")
}
if(file.exists("data/summarySCC_PM25.rds")){
d2 <- readRDS(file="data/summarySCC_PM25.rds")
}
if(exists("d1") && exists("d2"))
{
##
## Q1 - Have total emissions from PM2.5 decreased in the United States from 1999 to 2008? ----
## Using the base plotting system, make a plot showing the total PM2.5 emission from all sources
## for each of the years 1999, 2002, 2005, and 2008.
result <- d2 %>%
group_by(year) %>%
summarise(total = sum(Emissions))
plot(result$year, result$total, type="l", main = "Total PM2.5 in the US across Years",
xlab = "Years", ylab="Total Emission PM2.5 (ton)")
points(result$year, result$total, pch=19, col = "red")
dev.copy(png, "data/plot1.png")
dev.off()
}
|
##########################################
# #
# ADDING UP STEM QUALIFICATIONS #
# ERIKA JACOBS #
# PENNSYLVANIA STATE UNIVERSITY #
# 2018 #
# #
##########################################
##########################################
# #
# DIRECTIONS #
# #
##########################################
#To Run Code: Open this file with R Studio. Put cursor on line 29.
#Press CTRL+ENTER repeatedly to run each command in the file.
#Lines beginning with hashtag denote comments (Green Text).
##########################################
# #
# INTERVENTION ANALYSIS PROCESS #
# #
##########################################
#BEFORE ANALYSIS: If you do not have the astsa package, please run the command below...
install.packages("astsa")
#STEP ONE: Delete everything from the global environment
rm(list=ls())
#STEP TWO: Check location of Working Directory
getwd()
#The location of the Working Directory is where files being imported have to be.
#If Working Directory location is inconvenient, click "Session"
#on menu above in R Studio, then "Set Working Directory", then "Choose Directory..." to change.
#STEP THREE: Import .dat files, Make a Time Series Object, and Plot
#Importing "NYALL.dat", "NYSTEM.dat", and "NYNONSTEM.dat"
NYALL <- scan("NYALL.dat") #Brings in data
NYALL=ts(NYALL, start=1998) #Turns data into a Time Series object
plot(NYALL, type="b", main="All Data", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=509.0, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
NYSTEM <- scan("NYSTEM.dat") #Brings in data
NYSTEM=ts(NYSTEM, start=1998) #Turns data into a Time Series object
plot(NYSTEM, type="b", main="STEM Only", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=522.3, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
NYNONSTEM <- scan("NYNONSTEM.dat") #Brings in data
NYNONSTEM=ts(NYNONSTEM, start=1998) #Turns data into a Time Series object
plot(NYNONSTEM, type="b", main="Non STEM Only", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=496.6, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
#STEP FOUR: Split all 3 data sets into "Before" and "After" windows
#All 3 data sets have 19 years of data available
#15 years before Common Core Intervention
#4 years after Common Core Intervention
beforeALL = window (NYALL, 1998, 2012) #Before Common Core - All Data
afterALL = window (NYALL, 2013, 2016) #After Common Core - All Data
beforeSTEM = window (NYSTEM, 1998, 2012) #Before Common Core - STEM
afterSTEM = window (NYSTEM, 2013, 2016) #After Common Core - STEM
beforeNONSTEM = window (NYNONSTEM, 1998, 2012) #Before Common Core - NON STEM
afterNONSTEM = window (NYNONSTEM, 2013, 2016) #After Common Core - NON STEM
#STEP FIVE: Fit Time Series Structure to all "Before" windows
#BEGIN BY OBSERVING ACF/PACF PLOTS
#Plots and Interpretations of ACF/PACF Plots in Appendix G.
library(astsa)
acf2(beforeALL) #ACF/PACF Plot - All Data
acf2(beforeSTEM) #ACF/PACF Plot - STEM
acf2(beforeNONSTEM) #ACF/PACF Plot - NON STEM
#Testing Models - Testing 1 and 2 components for AR and MA.
#Testing all combinations of numbers possible below.
#0 stays in the middle because there is no differencing needed.
#Results of testing are summarized in Appendix H.
sarima(beforeALL,1,0,0)
sarima(beforeALL,0,0,1)
sarima(beforeALL,1,0,1)
sarima(beforeALL,2,0,0)
sarima(beforeALL,0,0,2)
sarima(beforeALL,2,0,2)
sarima(beforeALL,2,0,1)
sarima(beforeALL,1,0,2)
sarima(beforeSTEM,1,0,0)
sarima(beforeSTEM,0,0,1)
sarima(beforeSTEM,1,0,1)
sarima(beforeSTEM,2,0,0)
sarima(beforeSTEM,0,0,2)
sarima(beforeSTEM,2,0,2)
sarima(beforeSTEM,2,0,1)
sarima(beforeSTEM,1,0,2)
sarima(beforeNONSTEM,1,0,0)
sarima(beforeNONSTEM,0,0,1)
sarima(beforeNONSTEM,1,0,1)
sarima(beforeNONSTEM,2,0,0)
sarima(beforeNONSTEM,0,0,2)
sarima(beforeNONSTEM,2,0,2)
sarima(beforeNONSTEM,2,0,1)
sarima(beforeNONSTEM,1,0,2)
#Added for additional testing
sarima(beforeALL,1,0,3)
sarima(beforeALL,1,0,4)
#Residual Analysis For Final Models
#Residual outputs of final models selected in Appendix I.
ALLresiduals<-sarima(beforeALL,0,0,1)
summary(ALLresiduals)
STEMresiduals<-sarima(beforeSTEM,0,0,1)
summary(STEMresiduals)
NONSTEMresiduals<-sarima(beforeNONSTEM,1,0,2)
summary(NONSTEMresiduals)
#STEP 6: Forecast Predictions - Compare Differences
ALLpred = sarima.for(beforeALL, 4,0,0,1) #Predicted Scores 2013-2016 - ALL
ALLdiffs = afterALL - ALLpred$pred #Actual - Predicted - ALL
MeanALLdiffs<-mean(ALLdiffs) #Mean Difference in Score Per Year - ALL
STEMpred = sarima.for(beforeSTEM, 4,0,0,1) #Predicted Scores 2013-2016 - STEM
STEMdiffs = afterSTEM - STEMpred$pred #Actual - Predicted - STEM
MeanSTEMdiffs<-mean(STEMdiffs) #Mean Difference in Score Per Year - STEM
NONSTEMpred = sarima.for(beforeNONSTEM, 4,1,0,2) #Predicted Scores 2013-2016 - NON STEM
NONSTEMdiffs = afterNONSTEM - NONSTEMpred$pred #Actual - Predicted - NON STEM
MeanNONSTEMdiffs<-mean(NONSTEMdiffs) #Mean Difference in Score Per Year - NON STEM
#Mean Differences Table
MeanScoreDiffs<-matrix(c(MeanALLdiffs,MeanSTEMdiffs,MeanNONSTEMdiffs),ncol=3,byrow=TRUE)
colnames(MeanScoreDiffs)<-c("ALL","STEM","NONSTEM")
rownames(MeanScoreDiffs)<-c("Mean Difference")
MeanScoreDiffs<-as.table(MeanScoreDiffs)
MeanScoreDiffs
#STEP 7: Conduct Paired T Tests
#CHECK NORMAL PROBABILITY PLOTS - Check For Normality (points close or on line)
qqnorm(ALLdiffs) #Probability Plot - ALL
qqline(ALLdiffs)
qqnorm(STEMdiffs) #Probability Plot - STEM
qqline(STEMdiffs)
qqnorm(NONSTEMdiffs) #Probability Plot - NONSTEM
qqline(NONSTEMdiffs)
#Paired T Tests Below...
ALLpred$pred #Predicted Scores - ALL
afterALL #Actual Scores - ALL
ALLdiffs #Actual Minus Predicted - ALL
t.test(afterALL, ALLpred$pred, paired=TRUE) #Testing Statistical Significance - ALL
STEMpred$pred #Predicted Scores - STEM
afterSTEM #Actual Scores - STEM
STEMdiffs #Actual Minus Predicted - STEM
t.test(afterSTEM, STEMpred$pred, paired=TRUE) #Testing Statistical Significance - STEM
NONSTEMpred$pred #Predicted Scores - NON STEM
afterNONSTEM #Actual Scores - NON STEM
NONSTEMdiffs #Actual Minus Predicted - NON STEM
t.test(afterNONSTEM, NONSTEMpred$pred, paired=TRUE) #Testing Statistical Significance - NON STEM
#Output of Paired T Tests and Normality Plots in Appendix J
#STEP 8: Wilcoxon Test, Since Normality Assumption for T Test Not Met, and Sample Too Small
wilcox.test(afterALL, ALLpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - ALL
wilcox.test(afterSTEM, STEMpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - STEM
wilcox.test(afterNONSTEM, NONSTEMpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - NONSTEM
|
/Jacobs Capstone R Script.R
|
no_license
|
ErikaJacobs/Common-Core-Intervention-Analysis
|
R
| false
| false
| 7,128
|
r
|
##########################################
# #
# ADDING UP STEM QUALIFICATIONS #
# ERIKA JACOBS #
# PENNSYLVANIA STATE UNIVERSITY #
# 2018 #
# #
##########################################
##########################################
# #
# DIRECTIONS #
# #
##########################################
#To Run Code: Open this file with R Studio. Put cursor on line 29.
#Press CTRL+ENTER repeatedly to run each command in the file.
#Lines beginning with hashtag denote comments (Green Text).
##########################################
# #
# INTERVENTION ANALYSIS PROCESS #
# #
##########################################
#BEFORE ANALYSIS: If you do not have the astsa package, please run the command below...
install.packages("astsa")
#STEP ONE: Delete everything from the global environment
rm(list=ls())
#STEP TWO: Check location of Working Directory
getwd()
#The location of the Working Directory is where files being imported have to be.
#If Working Directory location is inconvenient, click "Session"
#on menu above in R Studio, then "Set Working Directory", then "Choose Directory..." to change.
#STEP THREE: Import .dat files, Make a Time Series Object, and Plot
#Importing "NYALL.dat", "NYSTEM.dat", and "NYNONSTEM.dat"
NYALL <- scan("NYALL.dat") #Brings in data
NYALL=ts(NYALL, start=1998) #Turns data into a Time Series object
plot(NYALL, type="b", main="All Data", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=509.0, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
NYSTEM <- scan("NYSTEM.dat") #Brings in data
NYSTEM=ts(NYSTEM, start=1998) #Turns data into a Time Series object
plot(NYSTEM, type="b", main="STEM Only", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=522.3, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
NYNONSTEM <- scan("NYNONSTEM.dat") #Brings in data
NYNONSTEM=ts(NYNONSTEM, start=1998) #Turns data into a Time Series object
plot(NYNONSTEM, type="b", main="Non STEM Only", xlab="Year", ylab="Mean SAT Math Score") #Makes Graph
abline(h=496.6, col="purple") #Mean over all years
abline(v=2013, col="red") #Year of Common Core Implementation
#STEP FOUR: Split all 3 data sets into "Before" and "After" windows
#All 3 data sets have 19 years of data available
#15 years before Common Core Intervention
#4 years after Common Core Intervention
beforeALL = window (NYALL, 1998, 2012) #Before Common Core - All Data
afterALL = window (NYALL, 2013, 2016) #After Common Core - All Data
beforeSTEM = window (NYSTEM, 1998, 2012) #Before Common Core - STEM
afterSTEM = window (NYSTEM, 2013, 2016) #After Common Core - STEM
beforeNONSTEM = window (NYNONSTEM, 1998, 2012) #Before Common Core - NON STEM
afterNONSTEM = window (NYNONSTEM, 2013, 2016) #After Common Core - NON STEM
#STEP FIVE: Fit Time Series Structure to all "Before" windows
#BEGIN BY OBSERVING ACF/PACF PLOTS
#Plots and Interpretations of ACF/PACF Plots in Appendix G.
library(astsa)
acf2(beforeALL) #ACF/PACF Plot - All Data
acf2(beforeSTEM) #ACF/PACF Plot - STEM
acf2(beforeNONSTEM) #ACF/PACF Plot - NON STEM
#Testing Models - Testing 1 and 2 components for AR and MA.
#Testing all combinations of numbers possible below.
#0 stays in the middle because there is no differencing needed.
#Results of testing are summarized in Appendix H.
sarima(beforeALL,1,0,0)
sarima(beforeALL,0,0,1)
sarima(beforeALL,1,0,1)
sarima(beforeALL,2,0,0)
sarima(beforeALL,0,0,2)
sarima(beforeALL,2,0,2)
sarima(beforeALL,2,0,1)
sarima(beforeALL,1,0,2)
sarima(beforeSTEM,1,0,0)
sarima(beforeSTEM,0,0,1)
sarima(beforeSTEM,1,0,1)
sarima(beforeSTEM,2,0,0)
sarima(beforeSTEM,0,0,2)
sarima(beforeSTEM,2,0,2)
sarima(beforeSTEM,2,0,1)
sarima(beforeSTEM,1,0,2)
sarima(beforeNONSTEM,1,0,0)
sarima(beforeNONSTEM,0,0,1)
sarima(beforeNONSTEM,1,0,1)
sarima(beforeNONSTEM,2,0,0)
sarima(beforeNONSTEM,0,0,2)
sarima(beforeNONSTEM,2,0,2)
sarima(beforeNONSTEM,2,0,1)
sarima(beforeNONSTEM,1,0,2)
#Added for additional testing
sarima(beforeALL,1,0,3)
sarima(beforeALL,1,0,4)
#Residual Analysis For Final Models
#Residual outputs of final models selected in Appendix I.
ALLresiduals<-sarima(beforeALL,0,0,1)
summary(ALLresiduals)
STEMresiduals<-sarima(beforeSTEM,0,0,1)
summary(STEMresiduals)
NONSTEMresiduals<-sarima(beforeNONSTEM,1,0,2)
summary(NONSTEMresiduals)
#STEP 6: Forecast Predictions - Compare Differences
ALLpred = sarima.for(beforeALL, 4,0,0,1) #Predicted Scores 2013-2016 - ALL
ALLdiffs = afterALL - ALLpred$pred #Actual - Predicted - ALL
MeanALLdiffs<-mean(ALLdiffs) #Mean Difference in Score Per Year - ALL
STEMpred = sarima.for(beforeSTEM, 4,0,0,1) #Predicted Scores 2013-2016 - STEM
STEMdiffs = afterSTEM - STEMpred$pred #Actual - Predicted - STEM
MeanSTEMdiffs<-mean(STEMdiffs) #Mean Difference in Score Per Year - STEM
NONSTEMpred = sarima.for(beforeNONSTEM, 4,1,0,2) #Predicted Scores 2013-2016 - NON STEM
NONSTEMdiffs = afterNONSTEM - NONSTEMpred$pred #Actual - Predicted - NON STEM
MeanNONSTEMdiffs<-mean(NONSTEMdiffs) #Mean Difference in Score Per Year - NON STEM
#Mean Differences Table
MeanScoreDiffs<-matrix(c(MeanALLdiffs,MeanSTEMdiffs,MeanNONSTEMdiffs),ncol=3,byrow=TRUE)
colnames(MeanScoreDiffs)<-c("ALL","STEM","NONSTEM")
rownames(MeanScoreDiffs)<-c("Mean Difference")
MeanScoreDiffs<-as.table(MeanScoreDiffs)
MeanScoreDiffs
#STEP 7: Conduct Paired T Tests
#CHECK NORMAL PROBABILITY PLOTS - Check For Normality (points close or on line)
qqnorm(ALLdiffs) #Probability Plot - ALL
qqline(ALLdiffs)
qqnorm(STEMdiffs) #Probability Plot - STEM
qqline(STEMdiffs)
qqnorm(NONSTEMdiffs) #Probability Plot - NONSTEM
qqline(NONSTEMdiffs)
#Paired T Tests Below...
ALLpred$pred #Predicted Scores - ALL
afterALL #Actual Scores - ALL
ALLdiffs #Actual Minus Predicted - ALL
t.test(afterALL, ALLpred$pred, paired=TRUE) #Testing Statistical Significance - ALL
STEMpred$pred #Predicted Scores - STEM
afterSTEM #Actual Scores - STEM
STEMdiffs #Actual Minus Predicted - STEM
t.test(afterSTEM, STEMpred$pred, paired=TRUE) #Testing Statistical Significance - STEM
NONSTEMpred$pred #Predicted Scores - NON STEM
afterNONSTEM #Actual Scores - NON STEM
NONSTEMdiffs #Actual Minus Predicted - NON STEM
t.test(afterNONSTEM, NONSTEMpred$pred, paired=TRUE) #Testing Statistical Significance - NON STEM
#Output of Paired T Tests and Normality Plots in Appendix J
#STEP 8: Wilcoxon Test, Since Normality Assumption for T Test Not Met, and Sample Too Small
wilcox.test(afterALL, ALLpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - ALL
wilcox.test(afterSTEM, STEMpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - STEM
wilcox.test(afterNONSTEM, NONSTEMpred$pred, paired = TRUE, alternative = "two.sided") #Wilcoxon - NONSTEM
|
## TODO: pass ... to all the things
## TODO: totally rewrite in temrs of tidyverse functions
#' Substantive effects via simulation
#' @param m a model.
#' @param x a population within which to simulate effects. Default \code{model.frame(m)} (the original population).
#' @param Z a named ist of two or more contrasting values for each variable. Default \code{bestz(m, x)}.
#' @param f a function for boiling down results. This is a function, not a string. Default \code{value}.
#' @param y values of the outcome representing success under \code{f}. Default \code{0}.
#' @param n number of simulated response vectors to generate for each level for each test. Default \code{1000}.
#' @param parallel logical. Passed on to \code{\link[plyr]{ddply}} at the outermost (term and level) levels.
#' @param w weights with respect to which to take averages. Default \code{\link{getweights}(m, x)}.
#' @param g function. the desired one-number summary of the boiled values. Default \code{stats::\link[stats]{weighted.mean}}.
#' @param ... other arguments to functions used within.
#' @return a tall \code{data.frame} with class \code{gilez} to pass to \code{\link{gdiff}} or \code{\link[wickr]{sumer}}.
#' @export
gilez <- function(m, x=stats::model.frame(m), Z=bestz(m, x), f=value, y=0, n=1000, parallel=FALSE, w=getweights(m, x),
g = stats::weighted.mean, ...) {
B <- consider(m, x, n, ...)
Y <- plyr::ddply(Z, c("term", "level"), imagine, m=m, x=x, f=f, y=y, .parallel=parallel, w=w, B=B, g=g)
Y[, -ncol(Y)] <- lapply(Y[, -ncol(Y)], as.character)
colnames(Y)[ncol(Y)] <- "value" # TODO: make not inelegant
class(Y) <- c("gilez", class(Y))
attributes(Y)$sumer <- attr(wickr::sumer(m), "sumer")
Y
}
#' Contrast effects for different levels with each other
#' @param Y a \code{\link{gilez}} object
#' @return another \code{gilez} object but with more colummzz!!!1
#' @export
gdiff <- function(Y) {
W <- Y
colnames(W) <- stringr::str_replace(colnames(W), "level", "baseline")
colnames(W) <- stringr::str_replace(colnames(W), "value", "minus")
W <- plyr::join(as.data.frame(Y), as.data.frame(W), intersect(colnames(Y), colnames(W)))
W <- W[W$level != W$baseline, ]
W$value <- W$value - W$minus
W <- W[, setdiff(colnames(W), "minus")]
class(W) <- c("gilez", setdiff(class(W), "gilez"))
attributes(W)$sumer <- attr(Y, "sumer")
W
}
#' Add simulated outputs across objects
#'
#' @param G a list of \code{\link{gilez}} objects
#' @return a \code{\link{gilez}} object
#' @export
add_up <- function(G) {
H <- lapply(G, attr, which="sumer")
H <- purrr::transpose(H)
H <- lapply(H, unlist)
H <- dplyr::as_tibble(H)
H <- dplyr::group_by_if(H, function(x) {!is.numeric(x)})
H <- dplyr::select(H, .data$n)
H <- dplyr::summarise_all(H, sum)
if(nrow(H) > 1) {stop("Foo! Don't add simulated outcomes across different kinds of models.")}
G <- dplyr::tibble(obj = 1:length(G), gilez=G)
G <- tidyr::unnest(G)
G <- dplyr::group_by(G, .data$term, .data$level, .data$.id) # assume for now these are always there under these names? I think they are
G <- dplyr::summarise(G, value = sum(.data$value), count = dplyr::n())
if(length(table(G$count)) > 1) {stop("Foo! Don't add outcomes across simulations with different settings.")}
G <- dplyr::select(G, -.data$count)
class(G) <- c("gilez", setdiff(class(G), "gilez"))
attributes(G)$sumer <- as.list(as.data.frame(H))
G
}
#' @importFrom rlang .data
#' @export
rlang::.data
|
/R/gilez.R
|
no_license
|
deepfriar/gilez
|
R
| false
| false
| 3,496
|
r
|
## TODO: pass ... to all the things
## TODO: totally rewrite in temrs of tidyverse functions
#' Substantive effects via simulation
#' @param m a model.
#' @param x a population within which to simulate effects. Default \code{model.frame(m)} (the original population).
#' @param Z a named ist of two or more contrasting values for each variable. Default \code{bestz(m, x)}.
#' @param f a function for boiling down results. This is a function, not a string. Default \code{value}.
#' @param y values of the outcome representing success under \code{f}. Default \code{0}.
#' @param n number of simulated response vectors to generate for each level for each test. Default \code{1000}.
#' @param parallel logical. Passed on to \code{\link[plyr]{ddply}} at the outermost (term and level) levels.
#' @param w weights with respect to which to take averages. Default \code{\link{getweights}(m, x)}.
#' @param g function. the desired one-number summary of the boiled values. Default \code{stats::\link[stats]{weighted.mean}}.
#' @param ... other arguments to functions used within.
#' @return a tall \code{data.frame} with class \code{gilez} to pass to \code{\link{gdiff}} or \code{\link[wickr]{sumer}}.
#' @export
gilez <- function(m, x=stats::model.frame(m), Z=bestz(m, x), f=value, y=0, n=1000, parallel=FALSE, w=getweights(m, x),
g = stats::weighted.mean, ...) {
B <- consider(m, x, n, ...)
Y <- plyr::ddply(Z, c("term", "level"), imagine, m=m, x=x, f=f, y=y, .parallel=parallel, w=w, B=B, g=g)
Y[, -ncol(Y)] <- lapply(Y[, -ncol(Y)], as.character)
colnames(Y)[ncol(Y)] <- "value" # TODO: make not inelegant
class(Y) <- c("gilez", class(Y))
attributes(Y)$sumer <- attr(wickr::sumer(m), "sumer")
Y
}
#' Contrast effects for different levels with each other
#' @param Y a \code{\link{gilez}} object
#' @return another \code{gilez} object but with more colummzz!!!1
#' @export
gdiff <- function(Y) {
W <- Y
colnames(W) <- stringr::str_replace(colnames(W), "level", "baseline")
colnames(W) <- stringr::str_replace(colnames(W), "value", "minus")
W <- plyr::join(as.data.frame(Y), as.data.frame(W), intersect(colnames(Y), colnames(W)))
W <- W[W$level != W$baseline, ]
W$value <- W$value - W$minus
W <- W[, setdiff(colnames(W), "minus")]
class(W) <- c("gilez", setdiff(class(W), "gilez"))
attributes(W)$sumer <- attr(Y, "sumer")
W
}
#' Add simulated outputs across objects
#'
#' @param G a list of \code{\link{gilez}} objects
#' @return a \code{\link{gilez}} object
#' @export
add_up <- function(G) {
H <- lapply(G, attr, which="sumer")
H <- purrr::transpose(H)
H <- lapply(H, unlist)
H <- dplyr::as_tibble(H)
H <- dplyr::group_by_if(H, function(x) {!is.numeric(x)})
H <- dplyr::select(H, .data$n)
H <- dplyr::summarise_all(H, sum)
if(nrow(H) > 1) {stop("Foo! Don't add simulated outcomes across different kinds of models.")}
G <- dplyr::tibble(obj = 1:length(G), gilez=G)
G <- tidyr::unnest(G)
G <- dplyr::group_by(G, .data$term, .data$level, .data$.id) # assume for now these are always there under these names? I think they are
G <- dplyr::summarise(G, value = sum(.data$value), count = dplyr::n())
if(length(table(G$count)) > 1) {stop("Foo! Don't add outcomes across simulations with different settings.")}
G <- dplyr::select(G, -.data$count)
class(G) <- c("gilez", setdiff(class(G), "gilez"))
attributes(G)$sumer <- as.list(as.data.frame(H))
G
}
#' @importFrom rlang .data
#' @export
rlang::.data
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 23182
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 23182
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7913
c no.of clauses 23182
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 23182
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp.qdimacs 7913 23182 E1 [] 0 138 7775 23182 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 732
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 23182
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 23182
c
c Input Parameter (command line, file):
c input filename QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 7913
c no.of clauses 23182
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 23182
c
c QBFLIB/Amendola-Ricca-Truszczynski/selection-hard/ctrl.e#1.a#3.E#138.A#48.c#.w#7.s#35.asp.qdimacs 7913 23182 E1 [] 0 138 7775 23182 NONE
|
#Creates a caregiver_ed score, the average educational attainment of the child's caregivers
library(dplyr)
#Load in data
questionnaire <- read.delim(dir("data_categories/questionnaire/",
full.names=T, pattern="^questionnaire_20"),header=TRUE, sep="\t")
#Create dataframe
caregiver_ed <- questionnaire %>%
select(record_id,
pq7a_1, #Birth mother
pq7a_2,
pq7b_1, #Birth father
pq7b_2,
pq7c_1, #Step mother
pq7c_2,
pq7d_1, #Step father
pq7d_2,
pq7e_1, #Adoptive mother
pq7e_2,
pq7f_1, #Adoptive father
pq7f_2,
pq7g_1, #Maternal grandmother
pq7g_2,
pq7gh_1, #Maternal grandfather
pq7gh_2,
pgm_1, #Paternal grandmother
pq7i_2,
pq7j_1, #Paternal grandfather
pq7j_2,
pq7k_1, #Foster mother
pq7k_2,
pq7l_1, #Foster father
pq7l_2,
pq7m_1, #Birth mother
pq7m_2) %>%
mutate(num_caregivers = rowSums(caregiver_ed[, c("pq7a_2","pq7b_2","pq7c_2","pq7d_2",
"pq7e_2","pq7f_2","pq7g_2","pq7gh_2",
"pq7i_2","pq7j_2","pq7k_2","pq7l_2","pq7m_2")], na.rm = TRUE))
|
/caregiver_ed.R
|
no_license
|
hddsilva/participant_subsets
|
R
| false
| false
| 1,325
|
r
|
#Creates a caregiver_ed score, the average educational attainment of the child's caregivers
library(dplyr)
#Load in data
questionnaire <- read.delim(dir("data_categories/questionnaire/",
full.names=T, pattern="^questionnaire_20"),header=TRUE, sep="\t")
#Create dataframe
caregiver_ed <- questionnaire %>%
select(record_id,
pq7a_1, #Birth mother
pq7a_2,
pq7b_1, #Birth father
pq7b_2,
pq7c_1, #Step mother
pq7c_2,
pq7d_1, #Step father
pq7d_2,
pq7e_1, #Adoptive mother
pq7e_2,
pq7f_1, #Adoptive father
pq7f_2,
pq7g_1, #Maternal grandmother
pq7g_2,
pq7gh_1, #Maternal grandfather
pq7gh_2,
pgm_1, #Paternal grandmother
pq7i_2,
pq7j_1, #Paternal grandfather
pq7j_2,
pq7k_1, #Foster mother
pq7k_2,
pq7l_1, #Foster father
pq7l_2,
pq7m_1, #Birth mother
pq7m_2) %>%
mutate(num_caregivers = rowSums(caregiver_ed[, c("pq7a_2","pq7b_2","pq7c_2","pq7d_2",
"pq7e_2","pq7f_2","pq7g_2","pq7gh_2",
"pq7i_2","pq7j_2","pq7k_2","pq7l_2","pq7m_2")], na.rm = TRUE))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gets.R
\name{getUserPlayslists}
\alias{getUserPlayslists}
\title{Retuns a data frame of the users playlists.}
\usage{
getUserPlayslists(userID)
}
\arguments{
\item{userID}{The wanted user id as a string}
}
\value{
Returns a dataframe that contains the names of the palylists, the total
amount of songs on the list and the spotify URI of the playlist
}
\description{
Retuns a data frame of the users playlists.
}
|
/man/getUserPlayslists.Rd
|
no_license
|
topiaskarjalainen/SpotR
|
R
| false
| true
| 490
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gets.R
\name{getUserPlayslists}
\alias{getUserPlayslists}
\title{Retuns a data frame of the users playlists.}
\usage{
getUserPlayslists(userID)
}
\arguments{
\item{userID}{The wanted user id as a string}
}
\value{
Returns a dataframe that contains the names of the palylists, the total
amount of songs on the list and the spotify URI of the playlist
}
\description{
Retuns a data frame of the users playlists.
}
|
# as_range_spec() ----
test_that("as_range_spec() rejects hopeless input", {
expect_error(as_range_spec(3), "Can't make a range")
})
test_that("as_range_spec() can deal with nothingness", {
spec <- as_range_spec(NULL)
expect_true(all(map_lgl(spec, ~ is.null(.x) || isFALSE(.x))))
})
test_that("as_range_spec() partitions 'Sheet1!A1:B2'", {
sheets_df <- tibble::tibble(name = "Sheet1")
spec <- as_range_spec("Sheet1!A1:B2", sheets_df = sheets_df)
expect_identical(spec$sheet_name, "Sheet1")
expect_identical(spec$cell_range, "A1:B2")
expect_true(spec$shim)
spec <- as_range_spec("'Sheet1'!A5:A", sheets_df = sheets_df)
# make sure we store unescaped name in range_spec
expect_identical(spec$sheet_name, "Sheet1")
expect_identical(spec$cell_range, "A5:A")
expect_true(spec$shim)
})
test_that("as_range_spec() seeks a named range, then a sheet name", {
nr_df <- tibble::tibble(name = c("a", "thingy", "z"))
spec <- as_range_spec("thingy", nr_df = nr_df)
expect_null(spec$sheet_name)
expect_identical(spec$named_range, "thingy")
expect_false(spec$shim)
spec <- as_range_spec("thingy", nr_df = nr_df, sheets_df = nr_df)
expect_null(spec$sheet_name)
expect_identical(spec$named_range, "thingy")
expect_false(spec$shim)
spec <- as_range_spec(
"thingy",
nr_df = tibble::tibble(name = letters[1:3]),
sheets_df = nr_df
)
expect_null(spec$named_range)
expect_identical(spec$sheet_name, "thingy")
expect_false(spec$shim)
})
test_that("A1 range is detected, w/ or w/o sheet", {
spec <- as_range_spec("1:2")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
sheets_df <- tibble::tibble(name = LETTERS[1:3])
spec <- as_range_spec("1:2", sheet = 3, sheets_df = sheets_df)
expect_identical(spec$sheet_name, "C")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
spec <- as_range_spec("1:2", sheet = "B", sheets_df = sheets_df)
expect_identical(spec$sheet_name, "B")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
})
test_that("skip is converted to equivalent cell limits", {
spec <- as_range_spec(x = NULL, skip = 1)
expect_equal(spec$cell_limits, cell_rows(c(2, NA)))
})
test_that("cell_limits input works, w/ or w/o sheet", {
spec <- as_range_spec(cell_rows(1:2))
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
sheets_df <- tibble::tibble(name = LETTERS[1:3])
spec <- as_range_spec(cell_rows(1:2), sheet = 3, sheets_df = sheets_df)
expect_equal(spec$sheet_name, "C")
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
spec <- as_range_spec(cell_rows(1:2), sheet = "B", sheets_df = sheets_df)
expect_equal(spec$sheet_name, "B")
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
})
test_that("invalid range is rejected", {
# no named ranges or sheet names for lookup --> interpret as A1
expect_error(
as_range_spec("thingy"),
"doesn't appear to be"
)
expect_error(
as_range_spec("thingy", nr_names = "nope", sheet_names = "nah"),
"doesn't appear to be"
)
})
test_that("unresolvable sheet raises error", {
expect_error(as_range_spec("A5:A", sheet = 3), "Can't look up")
expect_error(as_range_spec(x = NULL, sheet = 3), "Can't look up")
sheets_df <- tibble::tibble(name = LETTERS[1:3])
expect_error(
as_range_spec(x = NULL, sheet = "nope", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
expect_error(
as_range_spec("A5:A", sheet = "nope", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
expect_error(
as_range_spec("nope!A5:A", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
})
# as_A1_range() ----
test_that("as_A1_range() works", {
expect_null(as_A1_range(new_range_spec()))
expect_equal(as_A1_range(new_range_spec(sheet_name = "Sheet1")), "'Sheet1'")
expect_equal(as_A1_range(new_range_spec(named_range = "abc")), "abc")
expect_equal(as_A1_range(new_range_spec(cell_range = "B3:D9")), "B3:D9")
expect_equal(
as_A1_range(new_range_spec(sheet_name = "Sheet1", cell_range = "A1")),
"'Sheet1'!A1"
)
rs <- new_range_spec(cell_limits = cell_cols(3:5))
expect_equal(as_A1_range(rs), "C:E")
rs <- new_range_spec(sheet_name = "Sheet1", cell_limits = cell_rows(2:3))
expect_equal(as_A1_range(rs), "'Sheet1'!2:3")
})
|
/tests/testthat/test-range_spec.R
|
permissive
|
selesnow/googlesheets4
|
R
| false
| false
| 4,410
|
r
|
# as_range_spec() ----
test_that("as_range_spec() rejects hopeless input", {
expect_error(as_range_spec(3), "Can't make a range")
})
test_that("as_range_spec() can deal with nothingness", {
spec <- as_range_spec(NULL)
expect_true(all(map_lgl(spec, ~ is.null(.x) || isFALSE(.x))))
})
test_that("as_range_spec() partitions 'Sheet1!A1:B2'", {
sheets_df <- tibble::tibble(name = "Sheet1")
spec <- as_range_spec("Sheet1!A1:B2", sheets_df = sheets_df)
expect_identical(spec$sheet_name, "Sheet1")
expect_identical(spec$cell_range, "A1:B2")
expect_true(spec$shim)
spec <- as_range_spec("'Sheet1'!A5:A", sheets_df = sheets_df)
# make sure we store unescaped name in range_spec
expect_identical(spec$sheet_name, "Sheet1")
expect_identical(spec$cell_range, "A5:A")
expect_true(spec$shim)
})
test_that("as_range_spec() seeks a named range, then a sheet name", {
nr_df <- tibble::tibble(name = c("a", "thingy", "z"))
spec <- as_range_spec("thingy", nr_df = nr_df)
expect_null(spec$sheet_name)
expect_identical(spec$named_range, "thingy")
expect_false(spec$shim)
spec <- as_range_spec("thingy", nr_df = nr_df, sheets_df = nr_df)
expect_null(spec$sheet_name)
expect_identical(spec$named_range, "thingy")
expect_false(spec$shim)
spec <- as_range_spec(
"thingy",
nr_df = tibble::tibble(name = letters[1:3]),
sheets_df = nr_df
)
expect_null(spec$named_range)
expect_identical(spec$sheet_name, "thingy")
expect_false(spec$shim)
})
test_that("A1 range is detected, w/ or w/o sheet", {
spec <- as_range_spec("1:2")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
sheets_df <- tibble::tibble(name = LETTERS[1:3])
spec <- as_range_spec("1:2", sheet = 3, sheets_df = sheets_df)
expect_identical(spec$sheet_name, "C")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
spec <- as_range_spec("1:2", sheet = "B", sheets_df = sheets_df)
expect_identical(spec$sheet_name, "B")
expect_identical(spec$cell_range, "1:2")
expect_true(spec$shim)
})
test_that("skip is converted to equivalent cell limits", {
spec <- as_range_spec(x = NULL, skip = 1)
expect_equal(spec$cell_limits, cell_rows(c(2, NA)))
})
test_that("cell_limits input works, w/ or w/o sheet", {
spec <- as_range_spec(cell_rows(1:2))
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
sheets_df <- tibble::tibble(name = LETTERS[1:3])
spec <- as_range_spec(cell_rows(1:2), sheet = 3, sheets_df = sheets_df)
expect_equal(spec$sheet_name, "C")
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
spec <- as_range_spec(cell_rows(1:2), sheet = "B", sheets_df = sheets_df)
expect_equal(spec$sheet_name, "B")
expect_equal(spec$cell_limits, cell_rows(1:2))
expect_true(spec$shim)
})
test_that("invalid range is rejected", {
# no named ranges or sheet names for lookup --> interpret as A1
expect_error(
as_range_spec("thingy"),
"doesn't appear to be"
)
expect_error(
as_range_spec("thingy", nr_names = "nope", sheet_names = "nah"),
"doesn't appear to be"
)
})
test_that("unresolvable sheet raises error", {
expect_error(as_range_spec("A5:A", sheet = 3), "Can't look up")
expect_error(as_range_spec(x = NULL, sheet = 3), "Can't look up")
sheets_df <- tibble::tibble(name = LETTERS[1:3])
expect_error(
as_range_spec(x = NULL, sheet = "nope", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
expect_error(
as_range_spec("A5:A", sheet = "nope", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
expect_error(
as_range_spec("nope!A5:A", sheets_df = sheets_df),
class = "googlesheets4_error_sheet_not_found"
)
})
# as_A1_range() ----
test_that("as_A1_range() works", {
expect_null(as_A1_range(new_range_spec()))
expect_equal(as_A1_range(new_range_spec(sheet_name = "Sheet1")), "'Sheet1'")
expect_equal(as_A1_range(new_range_spec(named_range = "abc")), "abc")
expect_equal(as_A1_range(new_range_spec(cell_range = "B3:D9")), "B3:D9")
expect_equal(
as_A1_range(new_range_spec(sheet_name = "Sheet1", cell_range = "A1")),
"'Sheet1'!A1"
)
rs <- new_range_spec(cell_limits = cell_cols(3:5))
expect_equal(as_A1_range(rs), "C:E")
rs <- new_range_spec(sheet_name = "Sheet1", cell_limits = cell_rows(2:3))
expect_equal(as_A1_range(rs), "'Sheet1'!2:3")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findStations.R
\name{cf_find_station}
\alias{cf_find_station}
\title{Search for Clifro Stations}
\usage{
cf_find_station(..., search = c("name", "region", "network", "latlong"),
datatype, combine = c("all", "any"), status = c("open", "closed", "all"))
}
\arguments{
\item{...}{arguments to pass into the search, these differ depending on
\code{search}.}
\item{search}{one of \code{name}, \code{network}, \code{region} or
\code{latlong} indicating the type of search to be conducted.}
\item{datatype}{\code{cfDatatype} object for when the search is based on
datatypes.}
\item{combine}{character string \code{"all"} or \code{"any"} indicating if the
stations contain all or any of the selected datatypes for when the search is
based on datatypes.}
\item{status}{character string indicating \code{"open"}, \code{"closed"} or
\code{"all"} stations be returned by the search.}
}
\value{
\code{cfStation} object
}
\description{
Search for \pkg{clifro} stations based on name, region, location or network
number, and return a \code{cfStation} object.
}
\details{
The \code{cf_find_station} function is a convenience function for finding
CliFlo stations in \R. It uses the CliFlo
\href{https://cliflo.niwa.co.nz/pls/niwp/wstn.get_stn_html}{Find Stations}
page to do the searching, and therefore means that the stations are not
stored within \pkg{clifro}.
If \code{datatype} is missing then the search is conducted
without any reference to datatypes. If it is supplied then the
search will only return stations that have any or all of the supplied
datatypes, depending on \code{combine}. The default behaviour is to search
for stations based on pattern matching the station name and return only the
open stations.
If the \code{latlong} search type is used the function expects named
arguments with names (partially) matching latitude,
longitude and radius. If the arguments are passed in without names they must
be in order of latitude, longitude and radius (see examples).
}
\note{
Since the searching is done by CliFlo there are obvious restrictions.
Unfortunately the pattern matching for station name does not provide
functionality for regular expressions, nor does it allow simultaneous
searches although \pkg{clifro} does provide some extra functionality, see
the 'OR query Search' example below.
}
\examples{
\dontrun{
# Station Name Search ------------------------------------------------------
# Return all open stations with 'island' in the name (pattern match search)
# Note this example uses all the defaults
island_st = cf_find_station("island")
island_st
# Region Search ------------------------------------------------------------
# Return all the closed stations from Queenstown (using partial matching)
queenstown.st = cf_find_station("queen", search = "region", status = "closed")
queenstown.st
# Long/Lat Search ----------------------------------------------------------
# Return all open stations within a 10km radius of the Beehive in Wellington
# From Wikipedia: latitude 41.2784 S, longitude 174.7767 E
beehive.st = cf_find_station(lat = -41.2784, long = 174.7767, rad = 10,
search = "latlong")
beehive.st
# Network ID Search --------------------------------------------------------
# Return all stations that share A42 in their network ID
A42.st = cf_find_station("A42", search = "network", status = "all")
A42.st
# Using Datatypes in the Search --------------------------------------------
# Is the Reefton EWS station open and does it collect daily rain and/or wind
# data?
# First, create the daily rain and wind datatypes
daily.dt = cf_datatype(c(2, 3), c(1, 1), list(4, 1), c(1, NA))
daily.dt
# Then combine into the search. This will only return stations where at least
# one datatype is available.
cf_find_station("reefton EWS", datatype = daily.dt) # Yes
# OR Query Search ----------------------------------------------------------
# Return all stations sharing A42 in their network ID *or* all the open
# stations within 10km of the Beehive in Wellington (note this is not
# currently available as a single query in CliFlo).
cf_find_station("A42", search = "network", status = "all") +
cf_find_station(lat = -41.2784, long = 174.7767, rad = 10,
search = "latlong")
# Note these are all ordered by open stations, then again by their end dates
}
}
\seealso{
\code{\link{cf_save_kml}} for saving the resulting stations as a KML
file, \code{\link{cf_station}} for creating \code{\link{cfStation}} objects
when the agent numbers are known, \code{vignette("choose-station")} for a
tutorial on finding \pkg{clifro} stations and \code{vignette("cfStation")}
for working with \code{\link{cfStation}} objects.
}
|
/man/cf_find_station.Rd
|
no_license
|
DrRoad/clifro
|
R
| false
| true
| 4,771
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/findStations.R
\name{cf_find_station}
\alias{cf_find_station}
\title{Search for Clifro Stations}
\usage{
cf_find_station(..., search = c("name", "region", "network", "latlong"),
datatype, combine = c("all", "any"), status = c("open", "closed", "all"))
}
\arguments{
\item{...}{arguments to pass into the search, these differ depending on
\code{search}.}
\item{search}{one of \code{name}, \code{network}, \code{region} or
\code{latlong} indicating the type of search to be conducted.}
\item{datatype}{\code{cfDatatype} object for when the search is based on
datatypes.}
\item{combine}{character string \code{"all"} or \code{"any"} indicating if the
stations contain all or any of the selected datatypes for when the search is
based on datatypes.}
\item{status}{character string indicating \code{"open"}, \code{"closed"} or
\code{"all"} stations be returned by the search.}
}
\value{
\code{cfStation} object
}
\description{
Search for \pkg{clifro} stations based on name, region, location or network
number, and return a \code{cfStation} object.
}
\details{
The \code{cf_find_station} function is a convenience function for finding
CliFlo stations in \R. It uses the CliFlo
\href{https://cliflo.niwa.co.nz/pls/niwp/wstn.get_stn_html}{Find Stations}
page to do the searching, and therefore means that the stations are not
stored within \pkg{clifro}.
If \code{datatype} is missing then the search is conducted
without any reference to datatypes. If it is supplied then the
search will only return stations that have any or all of the supplied
datatypes, depending on \code{combine}. The default behaviour is to search
for stations based on pattern matching the station name and return only the
open stations.
If the \code{latlong} search type is used the function expects named
arguments with names (partially) matching latitude,
longitude and radius. If the arguments are passed in without names they must
be in order of latitude, longitude and radius (see examples).
}
\note{
Since the searching is done by CliFlo there are obvious restrictions.
Unfortunately the pattern matching for station name does not provide
functionality for regular expressions, nor does it allow simultaneous
searches although \pkg{clifro} does provide some extra functionality, see
the 'OR query Search' example below.
}
\examples{
\dontrun{
# Station Name Search ------------------------------------------------------
# Return all open stations with 'island' in the name (pattern match search)
# Note this example uses all the defaults
island_st = cf_find_station("island")
island_st
# Region Search ------------------------------------------------------------
# Return all the closed stations from Queenstown (using partial matching)
queenstown.st = cf_find_station("queen", search = "region", status = "closed")
queenstown.st
# Long/Lat Search ----------------------------------------------------------
# Return all open stations within a 10km radius of the Beehive in Wellington
# From Wikipedia: latitude 41.2784 S, longitude 174.7767 E
beehive.st = cf_find_station(lat = -41.2784, long = 174.7767, rad = 10,
search = "latlong")
beehive.st
# Network ID Search --------------------------------------------------------
# Return all stations that share A42 in their network ID
A42.st = cf_find_station("A42", search = "network", status = "all")
A42.st
# Using Datatypes in the Search --------------------------------------------
# Is the Reefton EWS station open and does it collect daily rain and/or wind
# data?
# First, create the daily rain and wind datatypes
daily.dt = cf_datatype(c(2, 3), c(1, 1), list(4, 1), c(1, NA))
daily.dt
# Then combine into the search. This will only return stations where at least
# one datatype is available.
cf_find_station("reefton EWS", datatype = daily.dt) # Yes
# OR Query Search ----------------------------------------------------------
# Return all stations sharing A42 in their network ID *or* all the open
# stations within 10km of the Beehive in Wellington (note this is not
# currently available as a single query in CliFlo).
cf_find_station("A42", search = "network", status = "all") +
cf_find_station(lat = -41.2784, long = 174.7767, rad = 10,
search = "latlong")
# Note these are all ordered by open stations, then again by their end dates
}
}
\seealso{
\code{\link{cf_save_kml}} for saving the resulting stations as a KML
file, \code{\link{cf_station}} for creating \code{\link{cfStation}} objects
when the agent numbers are known, \code{vignette("choose-station")} for a
tutorial on finding \pkg{clifro} stations and \code{vignette("cfStation")}
for working with \code{\link{cfStation}} objects.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_curve.R
\name{smooth_curves_covariance}
\alias{smooth_curves_covariance}
\title{Perform a non-parametric smoothing of a set of curves for covariance estimation.}
\usage{
smooth_curves_covariance(
curves,
grid = seq(0, 1, length.out = 101),
grid_param = c(0.25, 0.5, 0.75),
grid_bandwidth = NULL,
delta_f = NULL,
n_obs_min = 2,
kernel_name = "epanechnikov"
)
}
\arguments{
\item{curves}{List, where each element represents a curve. Each curve have to
be defined as a list with two entries:
\itemize{
\item \strong{$t} Sampling points.
\item \strong{$x} Observed points.
}}
\item{grid}{Vector (default = seq(0, 1, length.out = 101)), sampling points
at which estimate the curves.}
\item{grid_param}{Vector (default = c(0.25, 0.5, 0.75)), sampling points at
which we estimate the parameters.}
\item{grid_bandwidth}{Vector (default = NULL), grid of bandwidths.}
\item{delta_f}{Function (default = NULL), function to determine the delta.}
\item{n_obs_min}{Integer (default = 2), minimum number of observation for
the smoothing.}
\item{kernel_name}{String (default = 'epanechnikov'), the kernel used for the
estimation:
\itemize{
\item epanechnikov
\item uniform
\item biweight
}}
}
\value{
A list, which contains three elements. The first one is a list which
contains the estimated parameters:
\itemize{
\item \strong{sigma} Estimation of the standard deviation of the noise.
\item \strong{variance} Estimation of the variance of the process.
\item \strong{H0} Estimation of \eqn{H_0}.
\item \strong{L0} Estimation of \eqn{L_0}.
\item \strong{bandwidth} Estimation of the bandwidth.
}
The second one is the bandwidths matrix. And the last one is the estimation
of the covariance.
}
\description{
This function performs a non-parametric smoothing of a set of curves using
the Nadaraya-Watson estimator.
}
\references{
Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
estimation of irregular mean and covariance functions.
}
|
/man/smooth_curves_covariance.Rd
|
permissive
|
StevenGolovkine/funestim
|
R
| false
| true
| 2,036
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_curve.R
\name{smooth_curves_covariance}
\alias{smooth_curves_covariance}
\title{Perform a non-parametric smoothing of a set of curves for covariance estimation.}
\usage{
smooth_curves_covariance(
curves,
grid = seq(0, 1, length.out = 101),
grid_param = c(0.25, 0.5, 0.75),
grid_bandwidth = NULL,
delta_f = NULL,
n_obs_min = 2,
kernel_name = "epanechnikov"
)
}
\arguments{
\item{curves}{List, where each element represents a curve. Each curve have to
be defined as a list with two entries:
\itemize{
\item \strong{$t} Sampling points.
\item \strong{$x} Observed points.
}}
\item{grid}{Vector (default = seq(0, 1, length.out = 101)), sampling points
at which estimate the curves.}
\item{grid_param}{Vector (default = c(0.25, 0.5, 0.75)), sampling points at
which we estimate the parameters.}
\item{grid_bandwidth}{Vector (default = NULL), grid of bandwidths.}
\item{delta_f}{Function (default = NULL), function to determine the delta.}
\item{n_obs_min}{Integer (default = 2), minimum number of observation for
the smoothing.}
\item{kernel_name}{String (default = 'epanechnikov'), the kernel used for the
estimation:
\itemize{
\item epanechnikov
\item uniform
\item biweight
}}
}
\value{
A list, which contains three elements. The first one is a list which
contains the estimated parameters:
\itemize{
\item \strong{sigma} Estimation of the standard deviation of the noise.
\item \strong{variance} Estimation of the variance of the process.
\item \strong{H0} Estimation of \eqn{H_0}.
\item \strong{L0} Estimation of \eqn{L_0}.
\item \strong{bandwidth} Estimation of the bandwidth.
}
The second one is the bandwidths matrix. And the last one is the estimation
of the covariance.
}
\description{
This function performs a non-parametric smoothing of a set of curves using
the Nadaraya-Watson estimator.
}
\references{
Golovkine S., Klutchnikoff N., Patilea V. (2021) - Adaptive
estimation of irregular mean and covariance functions.
}
|
setwd("C:/Users/Roberto/Desktop/rstudio_default/covid/covid_vaccinazioni_ita/wd-vaccinazioni")
library(tidyverse)
library(data.table)
library(lubridate)
options(scipen = 9999999)
vac_all <- fread("https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-summary-latest.csv",
encoding = "UTF-8")
#### lista regioni ####
lista_regioni <- fread("lista_regioni.csv") %>%
select(-regione)
vac_all <- vac_all %>%
left_join(lista_regioni, by = c("area" = "sigla")) %>%
mutate(nome_area = str_replace_all(nome_area, "Provincia Autonoma ", ""),
nome_area = str_replace_all(nome_area, "Valle d'Aosta / Vallée d'Aoste", "Valle d'Aosta"))
####data prep####
vac_all <- vac_all %>%
filter(data_somministrazione > "2020-12-01") %>%
rename(n_vaccinazioni = totale,
data = data_somministrazione) %>%
mutate(data = as_date(data))
vac_select <- vac_all %>% group_by(area) %>% arrange(data) %>% mutate(tot_vaccinazioni = cumsum(n_vaccinazioni),
tot_prime_dosi = cumsum(prima_dose),
tot_seconde_dosi = cumsum(seconda_dose),
tasso_vaccinazioni = (tot_vaccinazioni / popolazione)*100,
tasso_seconde_dosi = (tot_seconde_dosi / popolazione)*100,
tasso_vaccinazioni_giornaliero = (n_vaccinazioni / popolazione)*100
,media_vaccinazioni_3gg = frollmean(x = n_vaccinazioni, n = 3, fill = 0)
) %>% ungroup() %>%
select(data, nome_area, tot_vaccinazioni, n_vaccinazioni,
prima_dose, tot_prime_dosi,
seconda_dose, tot_seconde_dosi,
tasso_vaccinazioni, tasso_seconde_dosi, tasso_vaccinazioni_giornaliero, media_vaccinazioni_3gg,
popolazione, area) %>%
arrange(data)
vac_reg = vac_select
vac_reg_today = vac_select %>% filter(data == max(data)) %>%
rename(vacc_oggi = n_vaccinazioni) %>%
arrange(desc(tasso_vaccinazioni))
vac_ita <- vac_select %>%
group_by(data) %>%
summarise(tot_vaccinazioni = sum(tot_vaccinazioni),
n_vaccinazioni = sum(n_vaccinazioni),
prima_dose = sum(prima_dose),
tot_prime_dosi = sum(tot_prime_dosi),
seconda_dose = sum(seconda_dose),
tot_seconde_dosi = sum(tot_seconde_dosi),
popolazione = sum(popolazione)) %>%
ungroup() %>%
arrange(desc(data)) %>%
mutate(popolazione = 60242096,
tasso_vaccinazioni = (tot_vaccinazioni / popolazione)*100,
tasso_seconde_dosi = (tot_seconde_dosi / popolazione)*100,
tasso_vaccinazioni_giornaliero = (n_vaccinazioni / popolazione)*100
#media_vaccinazioni_3gg = frollmean(x = n_vaccinazioni, n = 3, fill = 0
) %>%
select(-popolazione)
vac_ita_today = vac_ita %>% filter(data == max(data)) %>%
rename(vacc_oggi = n_vaccinazioni)
colnames <- colnames(vac_select)
colnames_remove <- c("media_vaccinazioni_3gg", "popolazione")
colnames2 <- setdiff(colnames, colnames_remove)
vac_ita_all <- vac_ita %>%
mutate(area = "ITA",
nome_area = "TOTALE") %>%
full_join(vac_select, by = colnames2) %>%
relocate(nome_area, .after= data)
vac_ita_longer <- vac_ita %>%
select(data, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!data, names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi")))
fwrite(vac_ita_longer, "vac_ita_longer.csv")
vac_reg_longer <- vac_ita_all %>%
select(data, area, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!c(data, area), names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi")))
fwrite(vac_reg_longer, "vac_reg_longer.csv")
vac_ita_longer_day <- vac_ita %>%
select(data, prima_dose, seconda_dose) %>%
pivot_longer(!data, names_to = "dose", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconda_dose", "prima_dose")))
fwrite(vac_ita_longer_day, "vac_ita_longer_day.csv")
vac_reg_longer_day <- vac_ita_all %>%
select(data, area, prima_dose, seconda_dose) %>%
pivot_longer(!c(data, area), names_to = "dose", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconda_dose", "prima_dose")))
fwrite(vac_reg_longer_day, "vac_reg_longer_day.csv")
#### viz ####
vac_ita %>% ggplot() +
geom_col(aes(data, n_vaccinazioni), fill = "steelblue") +
#geom_line(aes(data, media_vaccinazioni_3gg), size = 3, color = "red") +
theme_minimal() +
scale_x_date(date_breaks = "5 days")
vac_ita_all %>% ggplot() +
geom_col(aes(data, tasso_vaccinazioni_giornaliero, fill = tasso_vaccinazioni_giornaliero)) +
geom_line(aes(data, tasso_vaccinazioni), color = "steelblue", size = 2) +
scale_fill_viridis_b() +
guides(fill = "none") +
theme_minimal() +
ylab("vaccinati/pop") +
facet_wrap(~area) +
scale_x_date(date_breaks = "14 days", date_minor_breaks = "2 day")
vac_ita %>% ggplot() +
geom_col(aes(data, tasso_vaccinazioni_giornaliero, fill = tasso_vaccinazioni_giornaliero)) +
geom_line(aes(data, tasso_vaccinazioni), size = 3, color = "steelblue") +
scale_fill_viridis_b() +
guides(fill = "none") +
theme_minimal() +
ylab("vaccinati/pop")
vac_ita_longer %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
theme_minimal()
#esempio trend: lazio
vac_ita_all %>%
filter(area == "LAZ") %>%
select(data, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!data, names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi"))) %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
theme_minimal()
vac_trend_plot <- vac_reg_longer %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
facet_wrap(~area, scales = "free_y") +
theme_minimal() +
ggtitle("Dosi cumulative vaccino, Italia e regioni")
vac_trend_plot
ggsave("vac_trend.png", width = 20, height = 12)
#### summaries ####
vac_ita_all %>% filter(data == max(data)) %>% select(-c(data, area, media_vaccinazioni_3gg, popolazione)) %>%
rename(vacc_oggi = n_vaccinazioni, prima_dose_oggi = prima_dose, seconda_dose_oggi = seconda_dose) %>%
arrange(desc(tasso_vaccinazioni)) %>% View
vac_ita %>% arrange(desc(data))
vac_ita_today
|
/code-vaccinazioni/old/vaccinazioni.R
|
no_license
|
volperob/covid_vaccinazioni_ita
|
R
| false
| false
| 6,887
|
r
|
setwd("C:/Users/Roberto/Desktop/rstudio_default/covid/covid_vaccinazioni_ita/wd-vaccinazioni")
library(tidyverse)
library(data.table)
library(lubridate)
options(scipen = 9999999)
vac_all <- fread("https://raw.githubusercontent.com/italia/covid19-opendata-vaccini/master/dati/somministrazioni-vaccini-summary-latest.csv",
encoding = "UTF-8")
#### lista regioni ####
lista_regioni <- fread("lista_regioni.csv") %>%
select(-regione)
vac_all <- vac_all %>%
left_join(lista_regioni, by = c("area" = "sigla")) %>%
mutate(nome_area = str_replace_all(nome_area, "Provincia Autonoma ", ""),
nome_area = str_replace_all(nome_area, "Valle d'Aosta / Vallée d'Aoste", "Valle d'Aosta"))
####data prep####
vac_all <- vac_all %>%
filter(data_somministrazione > "2020-12-01") %>%
rename(n_vaccinazioni = totale,
data = data_somministrazione) %>%
mutate(data = as_date(data))
vac_select <- vac_all %>% group_by(area) %>% arrange(data) %>% mutate(tot_vaccinazioni = cumsum(n_vaccinazioni),
tot_prime_dosi = cumsum(prima_dose),
tot_seconde_dosi = cumsum(seconda_dose),
tasso_vaccinazioni = (tot_vaccinazioni / popolazione)*100,
tasso_seconde_dosi = (tot_seconde_dosi / popolazione)*100,
tasso_vaccinazioni_giornaliero = (n_vaccinazioni / popolazione)*100
,media_vaccinazioni_3gg = frollmean(x = n_vaccinazioni, n = 3, fill = 0)
) %>% ungroup() %>%
select(data, nome_area, tot_vaccinazioni, n_vaccinazioni,
prima_dose, tot_prime_dosi,
seconda_dose, tot_seconde_dosi,
tasso_vaccinazioni, tasso_seconde_dosi, tasso_vaccinazioni_giornaliero, media_vaccinazioni_3gg,
popolazione, area) %>%
arrange(data)
vac_reg = vac_select
vac_reg_today = vac_select %>% filter(data == max(data)) %>%
rename(vacc_oggi = n_vaccinazioni) %>%
arrange(desc(tasso_vaccinazioni))
vac_ita <- vac_select %>%
group_by(data) %>%
summarise(tot_vaccinazioni = sum(tot_vaccinazioni),
n_vaccinazioni = sum(n_vaccinazioni),
prima_dose = sum(prima_dose),
tot_prime_dosi = sum(tot_prime_dosi),
seconda_dose = sum(seconda_dose),
tot_seconde_dosi = sum(tot_seconde_dosi),
popolazione = sum(popolazione)) %>%
ungroup() %>%
arrange(desc(data)) %>%
mutate(popolazione = 60242096,
tasso_vaccinazioni = (tot_vaccinazioni / popolazione)*100,
tasso_seconde_dosi = (tot_seconde_dosi / popolazione)*100,
tasso_vaccinazioni_giornaliero = (n_vaccinazioni / popolazione)*100
#media_vaccinazioni_3gg = frollmean(x = n_vaccinazioni, n = 3, fill = 0
) %>%
select(-popolazione)
vac_ita_today = vac_ita %>% filter(data == max(data)) %>%
rename(vacc_oggi = n_vaccinazioni)
colnames <- colnames(vac_select)
colnames_remove <- c("media_vaccinazioni_3gg", "popolazione")
colnames2 <- setdiff(colnames, colnames_remove)
vac_ita_all <- vac_ita %>%
mutate(area = "ITA",
nome_area = "TOTALE") %>%
full_join(vac_select, by = colnames2) %>%
relocate(nome_area, .after= data)
vac_ita_longer <- vac_ita %>%
select(data, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!data, names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi")))
fwrite(vac_ita_longer, "vac_ita_longer.csv")
vac_reg_longer <- vac_ita_all %>%
select(data, area, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!c(data, area), names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi")))
fwrite(vac_reg_longer, "vac_reg_longer.csv")
vac_ita_longer_day <- vac_ita %>%
select(data, prima_dose, seconda_dose) %>%
pivot_longer(!data, names_to = "dose", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconda_dose", "prima_dose")))
fwrite(vac_ita_longer_day, "vac_ita_longer_day.csv")
vac_reg_longer_day <- vac_ita_all %>%
select(data, area, prima_dose, seconda_dose) %>%
pivot_longer(!c(data, area), names_to = "dose", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconda_dose", "prima_dose")))
fwrite(vac_reg_longer_day, "vac_reg_longer_day.csv")
#### viz ####
vac_ita %>% ggplot() +
geom_col(aes(data, n_vaccinazioni), fill = "steelblue") +
#geom_line(aes(data, media_vaccinazioni_3gg), size = 3, color = "red") +
theme_minimal() +
scale_x_date(date_breaks = "5 days")
vac_ita_all %>% ggplot() +
geom_col(aes(data, tasso_vaccinazioni_giornaliero, fill = tasso_vaccinazioni_giornaliero)) +
geom_line(aes(data, tasso_vaccinazioni), color = "steelblue", size = 2) +
scale_fill_viridis_b() +
guides(fill = "none") +
theme_minimal() +
ylab("vaccinati/pop") +
facet_wrap(~area) +
scale_x_date(date_breaks = "14 days", date_minor_breaks = "2 day")
vac_ita %>% ggplot() +
geom_col(aes(data, tasso_vaccinazioni_giornaliero, fill = tasso_vaccinazioni_giornaliero)) +
geom_line(aes(data, tasso_vaccinazioni), size = 3, color = "steelblue") +
scale_fill_viridis_b() +
guides(fill = "none") +
theme_minimal() +
ylab("vaccinati/pop")
vac_ita_longer %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
theme_minimal()
#esempio trend: lazio
vac_ita_all %>%
filter(area == "LAZ") %>%
select(data, tot_prime_dosi, tot_seconde_dosi) %>%
pivot_longer(!data, names_to = "dose", names_prefix = "tot_", values_to = "num_dosi") %>%
mutate(dose = factor(dose, levels = c("seconde_dosi", "prime_dosi"))) %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
theme_minimal()
vac_trend_plot <- vac_reg_longer %>%
ggplot() +
geom_area(aes(data, num_dosi, fill = dose)) +
scale_fill_manual(values = c("darkred", "steelblue")) +
#guides(fill = "none") +
facet_wrap(~area, scales = "free_y") +
theme_minimal() +
ggtitle("Dosi cumulative vaccino, Italia e regioni")
vac_trend_plot
ggsave("vac_trend.png", width = 20, height = 12)
#### summaries ####
vac_ita_all %>% filter(data == max(data)) %>% select(-c(data, area, media_vaccinazioni_3gg, popolazione)) %>%
rename(vacc_oggi = n_vaccinazioni, prima_dose_oggi = prima_dose, seconda_dose_oggi = seconda_dose) %>%
arrange(desc(tasso_vaccinazioni)) %>% View
vac_ita %>% arrange(desc(data))
vac_ita_today
|
testlist <- list(A = structure(c(1.24944110113233e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
/mgss/inst/testfiles/MVP_normalfactor_rcpp/AFL_MVP_normalfactor_rcpp/MVP_normalfactor_rcpp_valgrind_files/1615951426-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 292
|
r
|
testlist <- list(A = structure(c(1.24944110113233e-310, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)), left = 0L, right = 0L, x = numeric(0))
result <- do.call(mgss:::MVP_normalfactor_rcpp,testlist)
str(result)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility_VennDiagram.R
\name{Utility_VennDiagram}
\alias{Utility_VennDiagram}
\alias{vennDiagram}
\title{Venn diagram.}
\usage{
vennDiagram(x, colors = (ggsci::pal_d3())(length(x)),
show_category_names = T)
}
\arguments{
\item{x}{A list of vectors (e.g., integers, chars), with each component corresponding to a separate circle in the Venn diagram. Currently, only a list of length one to five is supported.}
\item{colors}{A set of colors for the venn diagram.}
\item{show_category_names}{Logical. Whether the names of \code{x} should be printed?}
}
\description{
Venn diagram.
}
|
/man/Utility_VennDiagram.Rd
|
permissive
|
SilenWang/Repitope
|
R
| false
| true
| 681
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Utility_VennDiagram.R
\name{Utility_VennDiagram}
\alias{Utility_VennDiagram}
\alias{vennDiagram}
\title{Venn diagram.}
\usage{
vennDiagram(x, colors = (ggsci::pal_d3())(length(x)),
show_category_names = T)
}
\arguments{
\item{x}{A list of vectors (e.g., integers, chars), with each component corresponding to a separate circle in the Venn diagram. Currently, only a list of length one to five is supported.}
\item{colors}{A set of colors for the venn diagram.}
\item{show_category_names}{Logical. Whether the names of \code{x} should be printed?}
}
\description{
Venn diagram.
}
|
library(shiny)
library(leaflet)
library(htmltools)
library(DT)
library(jsonlite)
library(dplyr)
library(RColorBrewer)
library(scales)
library(lattice)
library(ggplot2)
library(rsconnect)
library(rlang)
library(ggrepel)
vis_data <- read.csv("pre.csv")
analyticsData<-read.csv("csv_for_inquire.csv")
va <- names(analyticsData)
vars <-va[-1:-2]
Date<-analyticsData$Date
# Define UI for application that draws a histogram
ui <- navbarPage("Covid-19", id="nav",
tabPanel("Interactive Map",
div(class="outer",
tags$head
(
# Include our custom CSS
includeCSS("styles.css")
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
leafletOutput("map", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = FALSE, top = 55, left = "auto", right = 10, bottom = "auto",
width = 350, height = "100%",
h2("Covid-19 Data Search"),
selectInput("typeofDate", "Select Dates", Date),
selectInput("typeofvariable", "Select variables", vars),
tableOutput("data")
)
)
),
# tab 'DataSearch'
tabPanel("DataTable",DTOutput(outputId = "table"))
)
server <- function(input, output, session) {
#Get query date
target_date = reactive({
input$typeofDate
})
#Get query type
target_quo = reactive ({
parse_quosure(input$typeofvariable)
})
#Query fixed-type variables by date and then sort
dftable<-reactive({
analytics=filter(analyticsData,Date== target_date())
arrange(analytics,desc(!!target_quo()))
})
output$map <- renderLeaflet({
leaflet(vis_data) %>% addTiles() %>% addCircleMarkers() %>% addMarkers(~Long, ~Lat, label = ~htmlEscape(cfr))
})
output$data <- renderTable({
head((dftable()[, c("Country", input$typeofvariable), drop = FALSE]) ,10)}, rownames = TRUE)
#
output$table <- DT::renderDataTable({
DT::datatable(analyticsData)
})
}
shinyApp(ui, server)
|
/R modelling and shinny app/app.R
|
no_license
|
xiao11lam/Covid-19_forecasting_on_ASEAN_countries
|
R
| false
| false
| 2,667
|
r
|
library(shiny)
library(leaflet)
library(htmltools)
library(DT)
library(jsonlite)
library(dplyr)
library(RColorBrewer)
library(scales)
library(lattice)
library(ggplot2)
library(rsconnect)
library(rlang)
library(ggrepel)
vis_data <- read.csv("pre.csv")
analyticsData<-read.csv("csv_for_inquire.csv")
va <- names(analyticsData)
vars <-va[-1:-2]
Date<-analyticsData$Date
# Define UI for application that draws a histogram
ui <- navbarPage("Covid-19", id="nav",
tabPanel("Interactive Map",
div(class="outer",
tags$head
(
# Include our custom CSS
includeCSS("styles.css")
),
# If not using custom CSS, set height of leafletOutput to a number instead of percent
leafletOutput("map", width="100%", height="100%"),
# Shiny versions prior to 0.11 should use class = "modal" instead.
absolutePanel(id = "controls", class = "panel panel-default", fixed = TRUE,
draggable = FALSE, top = 55, left = "auto", right = 10, bottom = "auto",
width = 350, height = "100%",
h2("Covid-19 Data Search"),
selectInput("typeofDate", "Select Dates", Date),
selectInput("typeofvariable", "Select variables", vars),
tableOutput("data")
)
)
),
# tab 'DataSearch'
tabPanel("DataTable",DTOutput(outputId = "table"))
)
server <- function(input, output, session) {
#Get query date
target_date = reactive({
input$typeofDate
})
#Get query type
target_quo = reactive ({
parse_quosure(input$typeofvariable)
})
#Query fixed-type variables by date and then sort
dftable<-reactive({
analytics=filter(analyticsData,Date== target_date())
arrange(analytics,desc(!!target_quo()))
})
output$map <- renderLeaflet({
leaflet(vis_data) %>% addTiles() %>% addCircleMarkers() %>% addMarkers(~Long, ~Lat, label = ~htmlEscape(cfr))
})
output$data <- renderTable({
head((dftable()[, c("Country", input$typeofvariable), drop = FALSE]) ,10)}, rownames = TRUE)
#
output$table <- DT::renderDataTable({
DT::datatable(analyticsData)
})
}
shinyApp(ui, server)
|
#' Produce an .html report
#'
#' Produce an .html report with information for outbreak/suspicion management
#' @param ppn Ppn numbers (vector of int numbers, comma separeted)
#' @param ppn_obj Path to the list of objects output from svdc package
#' @param firstname Firstname of the person running the report
#' @param lastname Lastname of the person running the report
#' @param X X-coordinate of the outbreak provided by the user when ppn coordinates are missing
#' @param Y Y-coordinate of the outbreak provided by the user when ppn coordinates are missing
#' @param buffer_size Size in kilometers of the buffers drawn around the ppn (vector of numbers, comma separeted)
#' @param ppn_sympt PPN with symptoms
#' @param days Set the number of days to use in EpiContactTrace (max 180)
#' @param view Make TRUE to pop a browser
#' @return An html report
#' @import rmarkdown
#' @import leaflet
#' @import RODBC
#' @import knitr
#' @import EpiContactTrace
#' @import wordcloud
#' @import DT
#' @import sp
#' @import rgeos
#' @import maptools
#' @export
report <- function(ppn,
ppn_sympt = "",
ppn_obj = system.file("extdata/result.rda", package = "svamp"), #save inUBUNTU the result from SVDC
firstname = "",
lastname = "",
buffer_size = c(3, 10),
days = 90,
# X = 1491350,
# Y = 7160041,
template = "report",
format = c("knitr"),
view = FALSE) {
## Check to make sure the environment is empty
if (length(ls(envir=.svamp_env))) {
stop('Unable to create report. The report object already exists')
}
## Clean up the environment upon exiting the function
on.exit(rm(list=ls(envir=.svamp_env), envir=.svamp_env))
# if(missing(template))
# stop("Missing 'template'")
# connection via ODBC and query urax data, then close the connection
# connect <- odbcConnect("SJUKDOMSSTATUSV",
# uid = "Svaladw",
# pwd = "svaladwpw",
# believeNRows=FALSE)
#
# urax <- sqlQuery(connect, query = " SELECT *
# FROM
# URAX.SJUKDOMSSTATUSV")
#
# odbcClose(connect)
## Load the output of svsc package (load a list called "result" output of svdc package)
load(ppn_obj)
## Check arguments
if(missing(ppn)) {
stop("'ppn' is missing")
}
if(missing(days)) {
stop("'days' is missing")
}
if(missing(buffer_size)) {
stop("'buffer_size' is missing")
}
## Check that the inputed ppn is numeric
if(!is.numeric(ppn)) {
stop("Only numeric values are admitted")
}
## Check that the inputed ppns are present
if (!all(ppn %in% result$PPN$Ppn)) {
stop('One or more PPNs are not present in the database.
Please, double check the imputed PPNs')
}
## Check that the inputed ppn is numeric
if(!is.numeric(buffer_size)) {
stop("Only numeric values are admitted")
}
## Check that buffer size is less than 50 km
if (any(buffer_size > 50)) {
stop('The maximum radius for the buffer is 50 km')
}
## Check that the argument days is numeric
if(!is.numeric(days)) {
stop("Check the days box. Only numeric value are admitted")
}
## Check that the number of days is less than 180
if (days > 180) {
stop('The maximum number of days is 180. To go more back in time
use the Movements App')
}
## Load spatialpolygondataframe sticked in the ../svamp/data folder
data(NUTS_03M, package = "svamp", envir = .svamp_env)
data(postnummer, package = "svamp", envir = .svamp_env)
## Add the ppn argument to the .svamp_env so it can be accessed inside the .Rmd
assign("ppn", ppn, envir = .svamp_env)
assign("result", result, envir = .svamp_env)
assign("firstname", firstname, envir = .svamp_env)
assign("lastname", lastname, envir = .svamp_env)
assign("days", days, envir = .svamp_env)
assign("ppn_sympt", ppn_sympt, envir = .svamp_env)
# assign("X", X, envir = .svamp_env)
# assign("Y", Y, envir = .svamp_env)
assign("buffer_size", buffer_size, envir = .svamp_env)
template <- system.file(file.path(format, paste0(template, ".Rmd")), package = "svamp")
td <- tempdir()
outputfile_html <- rmarkdown::render(template, output_dir = td, encoding = "UTF-8")
if(view) {
a <- normalizePath(file.path(outputfile_html), winslash = "/")
utils::browseURL(a)
}
if(!(view)) {
invisible(readLines(outputfile_html))
}
}
.svamp_env <- new.env()
##' ReportObject
##'
##' @return The current object when generating a report
##' @export
report_data_object <- function() {
.svamp_env
}
|
/R/report.R
|
no_license
|
SVA-SE/svamp
|
R
| false
| false
| 4,782
|
r
|
#' Produce an .html report
#'
#' Produce an .html report with information for outbreak/suspicion management
#' @param ppn Ppn numbers (vector of int numbers, comma separeted)
#' @param ppn_obj Path to the list of objects output from svdc package
#' @param firstname Firstname of the person running the report
#' @param lastname Lastname of the person running the report
#' @param X X-coordinate of the outbreak provided by the user when ppn coordinates are missing
#' @param Y Y-coordinate of the outbreak provided by the user when ppn coordinates are missing
#' @param buffer_size Size in kilometers of the buffers drawn around the ppn (vector of numbers, comma separeted)
#' @param ppn_sympt PPN with symptoms
#' @param days Set the number of days to use in EpiContactTrace (max 180)
#' @param view Make TRUE to pop a browser
#' @return An html report
#' @import rmarkdown
#' @import leaflet
#' @import RODBC
#' @import knitr
#' @import EpiContactTrace
#' @import wordcloud
#' @import DT
#' @import sp
#' @import rgeos
#' @import maptools
#' @export
report <- function(ppn,
ppn_sympt = "",
ppn_obj = system.file("extdata/result.rda", package = "svamp"), #save inUBUNTU the result from SVDC
firstname = "",
lastname = "",
buffer_size = c(3, 10),
days = 90,
# X = 1491350,
# Y = 7160041,
template = "report",
format = c("knitr"),
view = FALSE) {
## Check to make sure the environment is empty
if (length(ls(envir=.svamp_env))) {
stop('Unable to create report. The report object already exists')
}
## Clean up the environment upon exiting the function
on.exit(rm(list=ls(envir=.svamp_env), envir=.svamp_env))
# if(missing(template))
# stop("Missing 'template'")
# connection via ODBC and query urax data, then close the connection
# connect <- odbcConnect("SJUKDOMSSTATUSV",
# uid = "Svaladw",
# pwd = "svaladwpw",
# believeNRows=FALSE)
#
# urax <- sqlQuery(connect, query = " SELECT *
# FROM
# URAX.SJUKDOMSSTATUSV")
#
# odbcClose(connect)
## Load the output of svsc package (load a list called "result" output of svdc package)
load(ppn_obj)
## Check arguments
if(missing(ppn)) {
stop("'ppn' is missing")
}
if(missing(days)) {
stop("'days' is missing")
}
if(missing(buffer_size)) {
stop("'buffer_size' is missing")
}
## Check that the inputed ppn is numeric
if(!is.numeric(ppn)) {
stop("Only numeric values are admitted")
}
## Check that the inputed ppns are present
if (!all(ppn %in% result$PPN$Ppn)) {
stop('One or more PPNs are not present in the database.
Please, double check the imputed PPNs')
}
## Check that the inputed ppn is numeric
if(!is.numeric(buffer_size)) {
stop("Only numeric values are admitted")
}
## Check that buffer size is less than 50 km
if (any(buffer_size > 50)) {
stop('The maximum radius for the buffer is 50 km')
}
## Check that the argument days is numeric
if(!is.numeric(days)) {
stop("Check the days box. Only numeric value are admitted")
}
## Check that the number of days is less than 180
if (days > 180) {
stop('The maximum number of days is 180. To go more back in time
use the Movements App')
}
## Load spatialpolygondataframe sticked in the ../svamp/data folder
data(NUTS_03M, package = "svamp", envir = .svamp_env)
data(postnummer, package = "svamp", envir = .svamp_env)
## Add the ppn argument to the .svamp_env so it can be accessed inside the .Rmd
assign("ppn", ppn, envir = .svamp_env)
assign("result", result, envir = .svamp_env)
assign("firstname", firstname, envir = .svamp_env)
assign("lastname", lastname, envir = .svamp_env)
assign("days", days, envir = .svamp_env)
assign("ppn_sympt", ppn_sympt, envir = .svamp_env)
# assign("X", X, envir = .svamp_env)
# assign("Y", Y, envir = .svamp_env)
assign("buffer_size", buffer_size, envir = .svamp_env)
template <- system.file(file.path(format, paste0(template, ".Rmd")), package = "svamp")
td <- tempdir()
outputfile_html <- rmarkdown::render(template, output_dir = td, encoding = "UTF-8")
if(view) {
a <- normalizePath(file.path(outputfile_html), winslash = "/")
utils::browseURL(a)
}
if(!(view)) {
invisible(readLines(outputfile_html))
}
}
.svamp_env <- new.env()
##' ReportObject
##'
##' @return The current object when generating a report
##' @export
report_data_object <- function() {
.svamp_env
}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' IAM Roles Anywhere
#'
#' @description
#' Identity and Access Management Roles Anywhere provides a secure way for
#' your workloads such as servers, containers, and applications that run
#' outside of Amazon Web Services to obtain temporary Amazon Web Services
#' credentials. Your workloads can use the same IAM policies and roles you
#' have for native Amazon Web Services applications to access Amazon Web
#' Services resources. Using IAM Roles Anywhere eliminates the need to
#' manage long-term credentials for workloads running outside of Amazon Web
#' Services.
#'
#' To use IAM Roles Anywhere, your workloads must use X.509 certificates
#' issued by their certificate authority (CA). You register the CA with IAM
#' Roles Anywhere as a trust anchor to establish trust between your public
#' key infrastructure (PKI) and IAM Roles Anywhere. If you don't manage
#' your own PKI system, you can use Private Certificate Authority to create
#' a CA and then use that to establish trust with IAM Roles Anywhere.
#'
#' This guide describes the IAM Roles Anywhere operations that you can call
#' programmatically. For more information about IAM Roles Anywhere, see the
#' [IAM Roles Anywhere User
#' Guide](https://docs.aws.amazon.com/rolesanywhere/latest/userguide/introduction.html).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- iamrolesanywhere(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- iamrolesanywhere()
#' svc$create_profile(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=iamrolesanywhere_create_profile]{create_profile} \tab Creates a profile, a list of the roles that Roles Anywhere service is trusted to assume\cr
#' \link[=iamrolesanywhere_create_trust_anchor]{create_trust_anchor} \tab Creates a trust anchor to establish trust between IAM Roles Anywhere and your certificate authority (CA)\cr
#' \link[=iamrolesanywhere_delete_crl]{delete_crl} \tab Deletes a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_delete_profile]{delete_profile} \tab Deletes a profile\cr
#' \link[=iamrolesanywhere_delete_trust_anchor]{delete_trust_anchor} \tab Deletes a trust anchor\cr
#' \link[=iamrolesanywhere_disable_crl]{disable_crl} \tab Disables a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_disable_profile]{disable_profile} \tab Disables a profile\cr
#' \link[=iamrolesanywhere_disable_trust_anchor]{disable_trust_anchor} \tab Disables a trust anchor\cr
#' \link[=iamrolesanywhere_enable_crl]{enable_crl} \tab Enables a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_enable_profile]{enable_profile} \tab Enables temporary credential requests for a profile\cr
#' \link[=iamrolesanywhere_enable_trust_anchor]{enable_trust_anchor} \tab Enables a trust anchor\cr
#' \link[=iamrolesanywhere_get_crl]{get_crl} \tab Gets a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_get_profile]{get_profile} \tab Gets a profile\cr
#' \link[=iamrolesanywhere_get_subject]{get_subject} \tab Gets a subject, which associates a certificate identity with authentication attempts\cr
#' \link[=iamrolesanywhere_get_trust_anchor]{get_trust_anchor} \tab Gets a trust anchor\cr
#' \link[=iamrolesanywhere_import_crl]{import_crl} \tab Imports the certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_list_crls]{list_crls} \tab Lists all certificate revocation lists (CRL) in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_profiles]{list_profiles} \tab Lists all profiles in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_subjects]{list_subjects} \tab Lists the subjects in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags attached to the resource\cr
#' \link[=iamrolesanywhere_list_trust_anchors]{list_trust_anchors} \tab Lists the trust anchors in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_put_notification_settings]{put_notification_settings} \tab Attaches a list of notification settings to a trust anchor\cr
#' \link[=iamrolesanywhere_reset_notification_settings]{reset_notification_settings} \tab Resets the custom notification setting to IAM Roles Anywhere default setting\cr
#' \link[=iamrolesanywhere_tag_resource]{tag_resource} \tab Attaches tags to a resource\cr
#' \link[=iamrolesanywhere_untag_resource]{untag_resource} \tab Removes tags from the resource\cr
#' \link[=iamrolesanywhere_update_crl]{update_crl} \tab Updates the certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_update_profile]{update_profile} \tab Updates a profile, a list of the roles that IAM Roles Anywhere service is trusted to assume\cr
#' \link[=iamrolesanywhere_update_trust_anchor]{update_trust_anchor} \tab Updates a trust anchor
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname iamrolesanywhere
#' @export
iamrolesanywhere <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .iamrolesanywhere$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.iamrolesanywhere <- list()
.iamrolesanywhere$operations <- list()
.iamrolesanywhere$metadata <- list(
service_name = "iamrolesanywhere",
endpoints = list("*" = list(endpoint = "rolesanywhere.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "rolesanywhere.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "rolesanywhere.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "rolesanywhere.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "RolesAnywhere",
api_version = "2018-05-10",
signing_name = "rolesanywhere",
json_version = "1.1",
target_prefix = ""
)
.iamrolesanywhere$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.iamrolesanywhere$metadata, handlers, config)
}
|
/cran/paws.security.identity/R/iamrolesanywhere_service.R
|
permissive
|
paws-r/paws
|
R
| false
| false
| 9,253
|
r
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common new_handlers new_service set_config merge_config
NULL
#' IAM Roles Anywhere
#'
#' @description
#' Identity and Access Management Roles Anywhere provides a secure way for
#' your workloads such as servers, containers, and applications that run
#' outside of Amazon Web Services to obtain temporary Amazon Web Services
#' credentials. Your workloads can use the same IAM policies and roles you
#' have for native Amazon Web Services applications to access Amazon Web
#' Services resources. Using IAM Roles Anywhere eliminates the need to
#' manage long-term credentials for workloads running outside of Amazon Web
#' Services.
#'
#' To use IAM Roles Anywhere, your workloads must use X.509 certificates
#' issued by their certificate authority (CA). You register the CA with IAM
#' Roles Anywhere as a trust anchor to establish trust between your public
#' key infrastructure (PKI) and IAM Roles Anywhere. If you don't manage
#' your own PKI system, you can use Private Certificate Authority to create
#' a CA and then use that to establish trust with IAM Roles Anywhere.
#'
#' This guide describes the IAM Roles Anywhere operations that you can call
#' programmatically. For more information about IAM Roles Anywhere, see the
#' [IAM Roles Anywhere User
#' Guide](https://docs.aws.amazon.com/rolesanywhere/latest/userguide/introduction.html).
#'
#' @param
#' config
#' Optional configuration of credentials, endpoint, and/or region.
#' \itemize{
#' \item{\strong{credentials}:} {\itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' \item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
#' \item{\strong{region}:} {The AWS Region used in instantiating the client.}
#' }}
#' \item{\strong{close_connection}:} {Immediately close all HTTP connections.}
#' \item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
#' \item{\strong{s3_force_path_style}:} {Set this to `true` to force the request to use path-style addressing, i.e. `http://s3.amazonaws.com/BUCKET/KEY`.}
#' \item{\strong{sts_regional_endpoint}:} {Set sts regional endpoint resolver to regional or legacy \url{https://docs.aws.amazon.com/sdkref/latest/guide/feature-sts-regionalized-endpoints.html}}
#' }
#' @param
#' credentials
#' Optional credentials shorthand for the config parameter
#' \itemize{
#' \item{\strong{creds}:} {\itemize{
#' \item{\strong{access_key_id}:} {AWS access key ID}
#' \item{\strong{secret_access_key}:} {AWS secret access key}
#' \item{\strong{session_token}:} {AWS temporary session token}
#' }}
#' \item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
#' \item{\strong{anonymous}:} {Set anonymous credentials.}
#' }
#' @param
#' endpoint
#' Optional shorthand for complete URL to use for the constructed client.
#' @param
#' region
#' Optional shorthand for AWS Region used in instantiating the client.
#'
#' @section Service syntax:
#' ```
#' svc <- iamrolesanywhere(
#' config = list(
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string",
#' close_connection = "logical",
#' timeout = "numeric",
#' s3_force_path_style = "logical",
#' sts_regional_endpoint = "string"
#' ),
#' credentials = list(
#' creds = list(
#' access_key_id = "string",
#' secret_access_key = "string",
#' session_token = "string"
#' ),
#' profile = "string",
#' anonymous = "logical"
#' ),
#' endpoint = "string",
#' region = "string"
#' )
#' ```
#'
#' @examples
#' \dontrun{
#' svc <- iamrolesanywhere()
#' svc$create_profile(
#' Foo = 123
#' )
#' }
#'
#' @section Operations:
#' \tabular{ll}{
#' \link[=iamrolesanywhere_create_profile]{create_profile} \tab Creates a profile, a list of the roles that Roles Anywhere service is trusted to assume\cr
#' \link[=iamrolesanywhere_create_trust_anchor]{create_trust_anchor} \tab Creates a trust anchor to establish trust between IAM Roles Anywhere and your certificate authority (CA)\cr
#' \link[=iamrolesanywhere_delete_crl]{delete_crl} \tab Deletes a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_delete_profile]{delete_profile} \tab Deletes a profile\cr
#' \link[=iamrolesanywhere_delete_trust_anchor]{delete_trust_anchor} \tab Deletes a trust anchor\cr
#' \link[=iamrolesanywhere_disable_crl]{disable_crl} \tab Disables a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_disable_profile]{disable_profile} \tab Disables a profile\cr
#' \link[=iamrolesanywhere_disable_trust_anchor]{disable_trust_anchor} \tab Disables a trust anchor\cr
#' \link[=iamrolesanywhere_enable_crl]{enable_crl} \tab Enables a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_enable_profile]{enable_profile} \tab Enables temporary credential requests for a profile\cr
#' \link[=iamrolesanywhere_enable_trust_anchor]{enable_trust_anchor} \tab Enables a trust anchor\cr
#' \link[=iamrolesanywhere_get_crl]{get_crl} \tab Gets a certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_get_profile]{get_profile} \tab Gets a profile\cr
#' \link[=iamrolesanywhere_get_subject]{get_subject} \tab Gets a subject, which associates a certificate identity with authentication attempts\cr
#' \link[=iamrolesanywhere_get_trust_anchor]{get_trust_anchor} \tab Gets a trust anchor\cr
#' \link[=iamrolesanywhere_import_crl]{import_crl} \tab Imports the certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_list_crls]{list_crls} \tab Lists all certificate revocation lists (CRL) in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_profiles]{list_profiles} \tab Lists all profiles in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_subjects]{list_subjects} \tab Lists the subjects in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_list_tags_for_resource]{list_tags_for_resource} \tab Lists the tags attached to the resource\cr
#' \link[=iamrolesanywhere_list_trust_anchors]{list_trust_anchors} \tab Lists the trust anchors in the authenticated account and Amazon Web Services Region\cr
#' \link[=iamrolesanywhere_put_notification_settings]{put_notification_settings} \tab Attaches a list of notification settings to a trust anchor\cr
#' \link[=iamrolesanywhere_reset_notification_settings]{reset_notification_settings} \tab Resets the custom notification setting to IAM Roles Anywhere default setting\cr
#' \link[=iamrolesanywhere_tag_resource]{tag_resource} \tab Attaches tags to a resource\cr
#' \link[=iamrolesanywhere_untag_resource]{untag_resource} \tab Removes tags from the resource\cr
#' \link[=iamrolesanywhere_update_crl]{update_crl} \tab Updates the certificate revocation list (CRL)\cr
#' \link[=iamrolesanywhere_update_profile]{update_profile} \tab Updates a profile, a list of the roles that IAM Roles Anywhere service is trusted to assume\cr
#' \link[=iamrolesanywhere_update_trust_anchor]{update_trust_anchor} \tab Updates a trust anchor
#' }
#'
#' @return
#' A client for the service. You can call the service's operations using
#' syntax like `svc$operation(...)`, where `svc` is the name you've assigned
#' to the client. The available operations are listed in the
#' Operations section.
#'
#' @rdname iamrolesanywhere
#' @export
iamrolesanywhere <- function(config = list(), credentials = list(), endpoint = NULL, region = NULL) {
config <- merge_config(
config,
list(
credentials = credentials,
endpoint = endpoint,
region = region
)
)
svc <- .iamrolesanywhere$operations
svc <- set_config(svc, config)
return(svc)
}
# Private API objects: metadata, handlers, interfaces, etc.
.iamrolesanywhere <- list()
.iamrolesanywhere$operations <- list()
.iamrolesanywhere$metadata <- list(
service_name = "iamrolesanywhere",
endpoints = list("*" = list(endpoint = "rolesanywhere.{region}.amazonaws.com", global = FALSE), "cn-*" = list(endpoint = "rolesanywhere.{region}.amazonaws.com.cn", global = FALSE), "us-iso-*" = list(endpoint = "rolesanywhere.{region}.c2s.ic.gov", global = FALSE), "us-isob-*" = list(endpoint = "rolesanywhere.{region}.sc2s.sgov.gov", global = FALSE)),
service_id = "RolesAnywhere",
api_version = "2018-05-10",
signing_name = "rolesanywhere",
json_version = "1.1",
target_prefix = ""
)
.iamrolesanywhere$service <- function(config = list()) {
handlers <- new_handlers("restjson", "v4")
new_service(.iamrolesanywhere$metadata, handlers, config)
}
|
#!RScript
# args[1] is the data procesed
# args[2] is the total number of configurations for the data processed
library(ggplot2)
library(plyr)
library(reshape2)
library(tools)
args=(commandArgs(TRUE))
processData <- function(data){
#CALCULATE METRICS
data$fpr=data$FP/as.numeric(args[2])
data$fnr=data$FN/as.numeric(args[2])
# data$fpr=data$FP/as.numeric(1000)
# data$fnr=data$FN/as.numeric(1000)
data<-melt(data, id.vars = c("FN","FP","TN","TP","sr","t"))
return(data)
}
#LOAD THE FILE(S)
data=read.table(args[1], header=T,sep=",");
#data=read.table("../data/0.5-Apache.csv", header=T,sep=",");
data<-processData(data);
plot<-ggplot(data,aes(sr,value),group=variable)+geom_line(aes(linetype=variable))+ theme_bw()+
xlab("Number of configurations in the training set") +
ylab("Metric Value")+
theme(legend.position="none")
ggsave(plot,file=paste(basename(file_path_sans_ext(args[1])), ".pdf", sep = ""), width=11, height=11)
|
/2.metric_view/helpers/2.calculateMetrics.R
|
no_license
|
learningconstraints/ICSE-17
|
R
| false
| false
| 949
|
r
|
#!RScript
# args[1] is the data procesed
# args[2] is the total number of configurations for the data processed
library(ggplot2)
library(plyr)
library(reshape2)
library(tools)
args=(commandArgs(TRUE))
processData <- function(data){
#CALCULATE METRICS
data$fpr=data$FP/as.numeric(args[2])
data$fnr=data$FN/as.numeric(args[2])
# data$fpr=data$FP/as.numeric(1000)
# data$fnr=data$FN/as.numeric(1000)
data<-melt(data, id.vars = c("FN","FP","TN","TP","sr","t"))
return(data)
}
#LOAD THE FILE(S)
data=read.table(args[1], header=T,sep=",");
#data=read.table("../data/0.5-Apache.csv", header=T,sep=",");
data<-processData(data);
plot<-ggplot(data,aes(sr,value),group=variable)+geom_line(aes(linetype=variable))+ theme_bw()+
xlab("Number of configurations in the training set") +
ylab("Metric Value")+
theme(legend.position="none")
ggsave(plot,file=paste(basename(file_path_sans_ext(args[1])), ".pdf", sep = ""), width=11, height=11)
|
#' Random number generation for truncated multivariate Student's t distribution subject to linear inequality constraints
#'
#' \code{rtmvt} simulates truncated multivariate (p-dimensional) Student's t distribution subject to linear inequality constraints. The constraints should be written as a matrix (\code{D}) with \code{lower} and \code{upper} as the lower and upper bounds for those constraints respectively. Note that \code{D} can be non-full rank, which generalizes many traditional methods.
#'
#' @param n number of random samples desired (sample size).
#' @param Mean location vector of the multivariate Student's t distribution.
#' @param Sigma positive definite dispersion matrix of the multivariate t distribution.
#' @param nu degrees of freedom for Student-t distribution.
#' @param D matrix or vector of coefficients of linear inequality constraints.
#' @param lower lower bound vector for truncation.
#' @param upper upper bound vector for truncation.
#' @param int initial value vector for Gibbs sampler (satisfying truncation), if \code{NULL} then determine automatically.
#' @param burn burn-in iterations discarded (default as \code{10}).
#' @param thin thinning lag (default as \code{1}).
#'
#' @return \code{rtmvt} returns a (\code{n*p}) matrix (or vector when \code{n=1}) containing random numbers which follows truncated multivariate Student-t distribution.
#'
#' @examples
#' # Example for full rank
#' d <- 3
#' rho <- 0.5
#' nu <- 10
#' Sigma <- matrix(0, nrow=d, ncol=d)
#' Sigma <- rho^abs(row(Sigma) - col(Sigma))
#'
#' D1 <- diag(1,d) # Full rank
#'
#' set.seed(1203)
#' ans.t <- rtmvt(n=1000, Mean=1:d, Sigma, nu=nu, D=D1, lower=rep(-1,d), upper=rep(1,d),
#' burn=50, thin=0)
#'
#' apply(ans.t, 2, summary)
#'
rtmvt <- function(n, Mean, Sigma, nu, D, lower, upper, int=NULL, burn=10, thin=1){
if ( any(lower >= upper)) stop("lower bound must be smaller than upper bound\n")
bound.check <- 0
if (!is.null(int)) {
inits_test <- D%*%int
lower.log <- inits_test >= lower + 1e-8 # small tol for get away from bound
upper.log <- inits_test <= upper - 1e-8 # small tol for get away from bound
bound.check <- prod(lower.log*upper.log)
} else if (bound.check == 0) {
D.inv <- MASS::ginv(D)
int <- D.inv%*%(lower + upper)/2
}
if( any (c(burn,thin,n) %% 1 != 0)) stop("burn, thin and n must be integer\n")
if ( any(c(burn, thin, n -1) < 0) ) stop("burn, thin must be non-negative interger, n must be positive integer\n")
if(is.vector(D)==TRUE){
Rtilde <- t(as.matrix(D))
lower <- as.vector(lower)
upper <- as.vector(upper)
} else {
Rtilde <- D
}
a <- lower - Rtilde%*%Mean
b <- upper - Rtilde%*%Mean
Sigma.chol <- t(chol(Sigma))
R <- Rtilde%*%Sigma.chol
p <- ncol(R) # number of parameters, i.e. length of beta vector
x <- solve(Sigma.chol)%*%(int-Mean) # initial value for the transformed tmvt
# int is the initial value for the original problem
keep.t <- matrix(NA, ncol=p, nrow=(thin+1)*n+burn)
for (i in 1:((thin+1)*n+burn)){
u <- stats::rchisq(1,df=nu) # sample from chisq(nu)
denom <- sqrt(u/nu)
lw <- c(a*denom)
up <- c(b*denom)
z0 <- c(x*denom) # initial value for the tmvn for this step
z <- c( rtmvn(n=1,Mean=rep(0,p),Sigma=diag(1,p),D=R,lower=lw,upper=up,int=z0, burn=0))
# sample from standard tmvn
x <- z/denom # sample from standard tmvt
############################################################
# for the original results
w <- Sigma.chol%*%x+Mean
keep.t[i,] <- w
}
final.ind <- 1:((thin+1)*n+burn)
final.ind <- final.ind[(burn+1):length(final.ind)]
final.ind <- seq(1,length(final.ind),by=thin+1) + thin + burn
if (n == 1) {result <- c(keep.t[final.ind,]) } else{ result <- keep.t[final.ind,]}
return( result )
}
|
/R/rtmvt.R
|
no_license
|
cran/tmvmixnorm
|
R
| false
| false
| 3,917
|
r
|
#' Random number generation for truncated multivariate Student's t distribution subject to linear inequality constraints
#'
#' \code{rtmvt} simulates truncated multivariate (p-dimensional) Student's t distribution subject to linear inequality constraints. The constraints should be written as a matrix (\code{D}) with \code{lower} and \code{upper} as the lower and upper bounds for those constraints respectively. Note that \code{D} can be non-full rank, which generalizes many traditional methods.
#'
#' @param n number of random samples desired (sample size).
#' @param Mean location vector of the multivariate Student's t distribution.
#' @param Sigma positive definite dispersion matrix of the multivariate t distribution.
#' @param nu degrees of freedom for Student-t distribution.
#' @param D matrix or vector of coefficients of linear inequality constraints.
#' @param lower lower bound vector for truncation.
#' @param upper upper bound vector for truncation.
#' @param int initial value vector for Gibbs sampler (satisfying truncation), if \code{NULL} then determine automatically.
#' @param burn burn-in iterations discarded (default as \code{10}).
#' @param thin thinning lag (default as \code{1}).
#'
#' @return \code{rtmvt} returns a (\code{n*p}) matrix (or vector when \code{n=1}) containing random numbers which follows truncated multivariate Student-t distribution.
#'
#' @examples
#' # Example for full rank
#' d <- 3
#' rho <- 0.5
#' nu <- 10
#' Sigma <- matrix(0, nrow=d, ncol=d)
#' Sigma <- rho^abs(row(Sigma) - col(Sigma))
#'
#' D1 <- diag(1,d) # Full rank
#'
#' set.seed(1203)
#' ans.t <- rtmvt(n=1000, Mean=1:d, Sigma, nu=nu, D=D1, lower=rep(-1,d), upper=rep(1,d),
#' burn=50, thin=0)
#'
#' apply(ans.t, 2, summary)
#'
rtmvt <- function(n, Mean, Sigma, nu, D, lower, upper, int=NULL, burn=10, thin=1){
if ( any(lower >= upper)) stop("lower bound must be smaller than upper bound\n")
bound.check <- 0
if (!is.null(int)) {
inits_test <- D%*%int
lower.log <- inits_test >= lower + 1e-8 # small tol for get away from bound
upper.log <- inits_test <= upper - 1e-8 # small tol for get away from bound
bound.check <- prod(lower.log*upper.log)
} else if (bound.check == 0) {
D.inv <- MASS::ginv(D)
int <- D.inv%*%(lower + upper)/2
}
if( any (c(burn,thin,n) %% 1 != 0)) stop("burn, thin and n must be integer\n")
if ( any(c(burn, thin, n -1) < 0) ) stop("burn, thin must be non-negative interger, n must be positive integer\n")
if(is.vector(D)==TRUE){
Rtilde <- t(as.matrix(D))
lower <- as.vector(lower)
upper <- as.vector(upper)
} else {
Rtilde <- D
}
a <- lower - Rtilde%*%Mean
b <- upper - Rtilde%*%Mean
Sigma.chol <- t(chol(Sigma))
R <- Rtilde%*%Sigma.chol
p <- ncol(R) # number of parameters, i.e. length of beta vector
x <- solve(Sigma.chol)%*%(int-Mean) # initial value for the transformed tmvt
# int is the initial value for the original problem
keep.t <- matrix(NA, ncol=p, nrow=(thin+1)*n+burn)
for (i in 1:((thin+1)*n+burn)){
u <- stats::rchisq(1,df=nu) # sample from chisq(nu)
denom <- sqrt(u/nu)
lw <- c(a*denom)
up <- c(b*denom)
z0 <- c(x*denom) # initial value for the tmvn for this step
z <- c( rtmvn(n=1,Mean=rep(0,p),Sigma=diag(1,p),D=R,lower=lw,upper=up,int=z0, burn=0))
# sample from standard tmvn
x <- z/denom # sample from standard tmvt
############################################################
# for the original results
w <- Sigma.chol%*%x+Mean
keep.t[i,] <- w
}
final.ind <- 1:((thin+1)*n+burn)
final.ind <- final.ind[(burn+1):length(final.ind)]
final.ind <- seq(1,length(final.ind),by=thin+1) + thin + burn
if (n == 1) {result <- c(keep.t[final.ind,]) } else{ result <- keep.t[final.ind,]}
return( result )
}
|
demo.dizzysgene<-function(){
simul(N=1e7,nbVilles=3,type="stoc")->a
plot(a,col=c(1,2,5))
pause()
plot(a,z="S",col=c(1,2,5))
pause()
plot(a,z="S",col=c(1,2,5),proj=list(c("time","P")),box=T)
pause()
plot(a,z="S",col="red")
pause()
simul(N=1e7)->a
plot(a)
pause()
b<-simul(a,continue=T,append=T,t0=1000)
plot(b)
pause()
plot(a,add=T,col="red")
pause()
b<-simul(a,continue=T,append=T,t0=1000,duration=20*365)
plot(b)
pause()
b<-simul(a,continue=T,append=T,t0=NULL)
plot(b)
pause()
b<-simul(a,type="sto",continue=T,append=F,t0=1000)
plot(b)
pause()
b<-simul(a,type="sto",nbVilles=4,continue=T,append=T,t0=1000)
plot(b,col=c(1,2))
pause()
b<-simul(a,type="sto",nbVilles=4,continue=T,append=F,t0=800)
plot(b)
pause()
simul(type="sto",N=1e7)->a
plot(a)
pause()
simul(a,type="det",continue=T,append=F)->b
plot(b)
pause()
simul(type="sto",nbVilles=3,N=c(1e7,1e6))->a
simul(a,type="det",continue=T,append=T)->b
plot(b)
pause()
summary(b)
str(b)
coef(b)
}
demo.dizzysgene()
|
/CODE_dizzys/refreshDIZZYS_2015_10_23/dizzyslan3/dizzys/demo/dizzysgene.r
|
no_license
|
ttcgiang/THESE_GitHub
|
R
| false
| false
| 1,014
|
r
|
demo.dizzysgene<-function(){
simul(N=1e7,nbVilles=3,type="stoc")->a
plot(a,col=c(1,2,5))
pause()
plot(a,z="S",col=c(1,2,5))
pause()
plot(a,z="S",col=c(1,2,5),proj=list(c("time","P")),box=T)
pause()
plot(a,z="S",col="red")
pause()
simul(N=1e7)->a
plot(a)
pause()
b<-simul(a,continue=T,append=T,t0=1000)
plot(b)
pause()
plot(a,add=T,col="red")
pause()
b<-simul(a,continue=T,append=T,t0=1000,duration=20*365)
plot(b)
pause()
b<-simul(a,continue=T,append=T,t0=NULL)
plot(b)
pause()
b<-simul(a,type="sto",continue=T,append=F,t0=1000)
plot(b)
pause()
b<-simul(a,type="sto",nbVilles=4,continue=T,append=T,t0=1000)
plot(b,col=c(1,2))
pause()
b<-simul(a,type="sto",nbVilles=4,continue=T,append=F,t0=800)
plot(b)
pause()
simul(type="sto",N=1e7)->a
plot(a)
pause()
simul(a,type="det",continue=T,append=F)->b
plot(b)
pause()
simul(type="sto",nbVilles=3,N=c(1e7,1e6))->a
simul(a,type="det",continue=T,append=T)->b
plot(b)
pause()
summary(b)
str(b)
coef(b)
}
demo.dizzysgene()
|
context("Expectation Maximization Like Least Squares Classifier")
# Simple dataset used in the tests
data(testdata)
modelform <- testdata$modelform
classname<-all.vars(modelform)[1]
D <- testdata$D
D_test <- testdata$D_test
X <- testdata$X
X_u <- testdata$X_u
y <- testdata$y
X_test <- testdata$X_test
y_test <- testdata$y_test
test_that("Formula and matrix formulation give same results",{
g_matrix <- EMLeastSquaresClassifier(X,factor(y),X_u)
g_model <- EMLeastSquaresClassifier(modelform, D)
expect_equal(predict(g_matrix,X_test),predict(g_model,D_test))
expect_equal(loss(g_matrix, X_test, y_test),loss(g_model, D_test))
expect_equal(g_matrix@classnames,g_model@classnames)
})
test_that("Different settings return the same loss",{
g_1 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE)
g_2 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE)
expect_equal(loss(g_1,X_test,y_test),loss(g_2,X_test,y_test),tolerance=10e-6)
# We get a different loss when we center the output
g_3<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE,y_scale=TRUE)
g_4<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE,x_center=TRUE,y_scale=TRUE)
expect_equal(loss(g_3,X_test,y_test),loss(g_4,X_test,y_test),tolerance=10e-6)
})
test_that("Hard label EM and self-learning give the same result", {
data <- generate2ClassGaussian(200,d=2, expected=FALSE)
data <- add_missinglabels_mar(data, Class~., prob=0.9)
data_test <- generate2ClassGaussian(200,d=2, expected=FALSE)
g_block <- EMLeastSquaresClassifier(Class~.,data,method="block",objective="responsibility",init="supervised",save_all=TRUE)
g_self <- SelfLearning(Class~.,data,method=LeastSquaresClassifier)
expect_equal(loss(g_block,data_test), loss(g_self,data_test))
expect_equal(g_block@theta, g_self@model@theta)
})
test_that("Gradient superficially correct",{
library("numDeriv")
data(testdata)
X <- cbind(1,testdata$X)
X_u <- cbind(1,testdata$X_u)
Xe <- rbind(X,X_u)
Y <- model.matrix(~y-1,data.frame(y=testdata$y))[,1,drop=FALSE]
for (i in 1:100) {
w <- c(rnorm(ncol(X)),runif(nrow(X_u)))
grad_num <- as.numeric(
numDeriv::grad(
loss_minmin_lsy,
w, Xe=Xe, Y=Y, X_u=X_u,
method="simple")
)
grad_exact <- as.numeric(
gradient_minmin_lsy(
w, Xe=Xe, Y=Y, X_u=X_u)
)
expect_equal(grad_num,grad_exact,
tolerance=10e-4)
}
})
test_that("Gradient superficially correct",{
library("numDeriv")
data(testdata)
X <- cbind(1,testdata$X)
X_u <- cbind(1,testdata$X_u)
Xe <- rbind(X,X_u)
Y <- model.matrix(~y-1,data.frame(y=testdata$y))[,1,drop=FALSE]
for (i in 1:100) {
w <- c(rnorm(ncol(X)),runif(nrow(X_u)))
grad_num <- as.numeric(
numDeriv::grad(
loss_minmin_lsq,
w, Xe=Xe, Y=Y, X_u=X_u,X=X,
method="simple")
)
grad_exact <- as.numeric(
gradient_minmin_lsq(
w, Xe=Xe, Y=Y, X_u=X_u,X=X)
)
expect_equal(grad_num,grad_exact,
tolerance=10e-4)
}
})
|
/tests/testthat/test-EMLeastSquaresClassifier.R
|
no_license
|
biocq/RSSL
|
R
| false
| false
| 3,162
|
r
|
context("Expectation Maximization Like Least Squares Classifier")
# Simple dataset used in the tests
data(testdata)
modelform <- testdata$modelform
classname<-all.vars(modelform)[1]
D <- testdata$D
D_test <- testdata$D_test
X <- testdata$X
X_u <- testdata$X_u
y <- testdata$y
X_test <- testdata$X_test
y_test <- testdata$y_test
test_that("Formula and matrix formulation give same results",{
g_matrix <- EMLeastSquaresClassifier(X,factor(y),X_u)
g_model <- EMLeastSquaresClassifier(modelform, D)
expect_equal(predict(g_matrix,X_test),predict(g_model,D_test))
expect_equal(loss(g_matrix, X_test, y_test),loss(g_model, D_test))
expect_equal(g_matrix@classnames,g_model@classnames)
})
test_that("Different settings return the same loss",{
g_1 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE)
g_2 <- EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE)
expect_equal(loss(g_1,X_test,y_test),loss(g_2,X_test,y_test),tolerance=10e-6)
# We get a different loss when we center the output
g_3<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,x_center=TRUE,scale=TRUE,y_scale=TRUE)
g_4<-EMLeastSquaresClassifier(X,y,X_u,intercept=TRUE,scale=TRUE,x_center=TRUE,y_scale=TRUE)
expect_equal(loss(g_3,X_test,y_test),loss(g_4,X_test,y_test),tolerance=10e-6)
})
test_that("Hard label EM and self-learning give the same result", {
data <- generate2ClassGaussian(200,d=2, expected=FALSE)
data <- add_missinglabels_mar(data, Class~., prob=0.9)
data_test <- generate2ClassGaussian(200,d=2, expected=FALSE)
g_block <- EMLeastSquaresClassifier(Class~.,data,method="block",objective="responsibility",init="supervised",save_all=TRUE)
g_self <- SelfLearning(Class~.,data,method=LeastSquaresClassifier)
expect_equal(loss(g_block,data_test), loss(g_self,data_test))
expect_equal(g_block@theta, g_self@model@theta)
})
test_that("Gradient superficially correct",{
library("numDeriv")
data(testdata)
X <- cbind(1,testdata$X)
X_u <- cbind(1,testdata$X_u)
Xe <- rbind(X,X_u)
Y <- model.matrix(~y-1,data.frame(y=testdata$y))[,1,drop=FALSE]
for (i in 1:100) {
w <- c(rnorm(ncol(X)),runif(nrow(X_u)))
grad_num <- as.numeric(
numDeriv::grad(
loss_minmin_lsy,
w, Xe=Xe, Y=Y, X_u=X_u,
method="simple")
)
grad_exact <- as.numeric(
gradient_minmin_lsy(
w, Xe=Xe, Y=Y, X_u=X_u)
)
expect_equal(grad_num,grad_exact,
tolerance=10e-4)
}
})
test_that("Gradient superficially correct",{
library("numDeriv")
data(testdata)
X <- cbind(1,testdata$X)
X_u <- cbind(1,testdata$X_u)
Xe <- rbind(X,X_u)
Y <- model.matrix(~y-1,data.frame(y=testdata$y))[,1,drop=FALSE]
for (i in 1:100) {
w <- c(rnorm(ncol(X)),runif(nrow(X_u)))
grad_num <- as.numeric(
numDeriv::grad(
loss_minmin_lsq,
w, Xe=Xe, Y=Y, X_u=X_u,X=X,
method="simple")
)
grad_exact <- as.numeric(
gradient_minmin_lsq(
w, Xe=Xe, Y=Y, X_u=X_u,X=X)
)
expect_equal(grad_num,grad_exact,
tolerance=10e-4)
}
})
|
### Read in Phase 1 - staircase procedure #######
# Reads in behavioural data from relevant phase 1 (staircase procedure) .csv files,
# adds few additional columns useful for further analysis, and saves the files
#############################################################################
# LOST FILES
# c(1761,3313,4733,4734,5223,7451,7452)
################################################################################
# Extract data from Phase2 from all participants and save them into a single file: my.data
files<-list.files(path=data.folder,pattern='*phase1.csv',full.names=T)
my.data.p1f<-rbindlist(lapply(files, fread),use.names=TRUE,fill=TRUE)
### SAVE IT
save(my.data.p1f,file='_Data\\d_staircase_bf.RData')
### Put it all 3 staircases into a single column ############################################
my.data.p1<-
my.data.p1f %>%
mutate(name=ifelse(intro.name!="","drop",ifelse(trials.name!="","staircase","baseline")),
contrast=ifelse(!is.na(intro.intensity),intro.intensity,ifelse(!is.na(trials.intensity),trials.intensity,contrast_baseline)),
response.keys=ifelse(response_drop.keys!="",response_drop.keys,ifelse(key_resp_dir.keys!="",key_resp_dir.keys,key_resp_5.keys)),
response.corr=ifelse(!is.na(intro.response),intro.response,ifelse(!is.na(trials.response),trials.response,key_resp_5.corr)),
partSes_ID = participant,
part_ID = substring(participant,1,3)) %>%
select(name,contrast,response.keys,response.corr,partSes_ID,part_ID)
# SAVE IT
save(my.data.p1,file='_Data\\d_staircase_b.RData')
### READ IN EYE TRACKING FILES ######################################################
# get names of the eye traking files
files.eye<-list.files(path=data.folder,pattern='p1_gaze.txt',full.names=T)
### READ IN EYE TRACK DATA #####################################################
# create containers for data frames
eye.data.trials.p1<-list()
eye.data.fixations.p1<-list()
eye.data.analysis.p1<-list()
eye.participant.p1<-NULL
eye.participant<-as.numeric(str_extract(files.eye,"[[:digit:]]{4,5}"))
for(i in 1:length(files.eye)){
print(eye.participant[i])
#Create a file.txt that is then read by the next function
clean_eye_tracker_data(i)
#then use this data to create a cleaned eye tracking file with additional calculations
eye.data.trials.p1[[i]]<-trial_eye_track_data()
eye.data.fixations.p1[[i]]<-fixation_eye_track_data(eye.data.trials.p1[[i]])
#get finished analysis file
eye.data.analysis.p1[[i]]<-calc_eye_tracker_values(eye.data.fixations.p1[[i]])
}
### ADD ANALYSED EYE TRACK DATA TO MY.DATA ######################################
my.data.p1t <- my.data.p1
namevector<-names(eye.data.analysis.p1[[i]])
nv<-which(names(my.data.p1t)%in%namevector)
my.data.p1t[,(namevector):=NULL]
my.data.p1t[,(namevector):=list(0,'','',0,0L,0L,0L,0L)]
for (i in 1:length(files.eye)){
sel<-which(my.data.p1t$partSes_ID==eye.participant[i])
my.data.p1t[sel,(namevector):=eye.data.analysis.p1[[i]]]
}
### SAVE FILE ##################################################################
save(my.data.p1t,file='_Data\\d_staircase_bt.RData')
save(eye.data.analysis.p1,file='_Data\\d_staircase_tf_analysis.RData')
save(eye.data.fixations.p1,file='_Data\\d_staircase_tf_fixations.RData')
save(eye.data.trials.p1,file='_Data\\d_staircase_tf_trials.RData')
|
/r_phase1.R
|
no_license
|
UlfLab/Evidence_Confidence
|
R
| false
| false
| 3,453
|
r
|
### Read in Phase 1 - staircase procedure #######
# Reads in behavioural data from relevant phase 1 (staircase procedure) .csv files,
# adds few additional columns useful for further analysis, and saves the files
#############################################################################
# LOST FILES
# c(1761,3313,4733,4734,5223,7451,7452)
################################################################################
# Extract data from Phase2 from all participants and save them into a single file: my.data
files<-list.files(path=data.folder,pattern='*phase1.csv',full.names=T)
my.data.p1f<-rbindlist(lapply(files, fread),use.names=TRUE,fill=TRUE)
### SAVE IT
save(my.data.p1f,file='_Data\\d_staircase_bf.RData')
### Put it all 3 staircases into a single column ############################################
my.data.p1<-
my.data.p1f %>%
mutate(name=ifelse(intro.name!="","drop",ifelse(trials.name!="","staircase","baseline")),
contrast=ifelse(!is.na(intro.intensity),intro.intensity,ifelse(!is.na(trials.intensity),trials.intensity,contrast_baseline)),
response.keys=ifelse(response_drop.keys!="",response_drop.keys,ifelse(key_resp_dir.keys!="",key_resp_dir.keys,key_resp_5.keys)),
response.corr=ifelse(!is.na(intro.response),intro.response,ifelse(!is.na(trials.response),trials.response,key_resp_5.corr)),
partSes_ID = participant,
part_ID = substring(participant,1,3)) %>%
select(name,contrast,response.keys,response.corr,partSes_ID,part_ID)
# SAVE IT
save(my.data.p1,file='_Data\\d_staircase_b.RData')
### READ IN EYE TRACKING FILES ######################################################
# get names of the eye traking files
files.eye<-list.files(path=data.folder,pattern='p1_gaze.txt',full.names=T)
### READ IN EYE TRACK DATA #####################################################
# create containers for data frames
eye.data.trials.p1<-list()
eye.data.fixations.p1<-list()
eye.data.analysis.p1<-list()
eye.participant.p1<-NULL
eye.participant<-as.numeric(str_extract(files.eye,"[[:digit:]]{4,5}"))
for(i in 1:length(files.eye)){
print(eye.participant[i])
#Create a file.txt that is then read by the next function
clean_eye_tracker_data(i)
#then use this data to create a cleaned eye tracking file with additional calculations
eye.data.trials.p1[[i]]<-trial_eye_track_data()
eye.data.fixations.p1[[i]]<-fixation_eye_track_data(eye.data.trials.p1[[i]])
#get finished analysis file
eye.data.analysis.p1[[i]]<-calc_eye_tracker_values(eye.data.fixations.p1[[i]])
}
### ADD ANALYSED EYE TRACK DATA TO MY.DATA ######################################
my.data.p1t <- my.data.p1
namevector<-names(eye.data.analysis.p1[[i]])
nv<-which(names(my.data.p1t)%in%namevector)
my.data.p1t[,(namevector):=NULL]
my.data.p1t[,(namevector):=list(0,'','',0,0L,0L,0L,0L)]
for (i in 1:length(files.eye)){
sel<-which(my.data.p1t$partSes_ID==eye.participant[i])
my.data.p1t[sel,(namevector):=eye.data.analysis.p1[[i]]]
}
### SAVE FILE ##################################################################
save(my.data.p1t,file='_Data\\d_staircase_bt.RData')
save(eye.data.analysis.p1,file='_Data\\d_staircase_tf_analysis.RData')
save(eye.data.fixations.p1,file='_Data\\d_staircase_tf_fixations.RData')
save(eye.data.trials.p1,file='_Data\\d_staircase_tf_trials.RData')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_transit_gateways}
\alias{ec2_describe_transit_gateways}
\title{Describes one or more transit gateways}
\usage{
ec2_describe_transit_gateways(TransitGatewayIds, Filters, MaxResults,
NextToken, DryRun)
}
\arguments{
\item{TransitGatewayIds}{The IDs of the transit gateways.}
\item{Filters}{One or more filters. The possible values are:
\itemize{
\item \code{options.propagation-default-route-table-id} - The ID of the default
propagation route table.
\item \code{options.amazon-side-asn} - The private ASN for the Amazon side of a
BGP session.
\item \code{options.association-default-route-table-id} - The ID of the default
association route table.
\item \code{options.auto-accept-shared-attachments} - Indicates whether there
is automatic acceptance of attachment requests (\code{enable} \\|
\code{disable}).
\item \code{options.default-route-table-association} - Indicates whether
resource attachments are automatically associated with the default
association route table (\code{enable} \\| \code{disable}).
\item \code{options.default-route-table-propagation} - Indicates whether
resource attachments automatically propagate routes to the default
propagation route table (\code{enable} \\| \code{disable}).
\item \code{options.dns-support} - Indicates whether DNS support is enabled
(\code{enable} \\| \code{disable}).
\item \code{options.vpn-ecmp-support} - Indicates whether Equal Cost Multipath
Protocol support is enabled (\code{enable} \\| \code{disable}).
\item \code{owner-id} - The ID of the AWS account that owns the transit
gateway.
\item \code{state} - The state of the attachment (\code{available} \\| \code{deleted} \\|
\code{deleting} \\| \code{failed} \\| \code{modifying} \\| \code{pendingAcceptance} \\|
\code{pending} \\| \code{rollingBack} \\| \code{rejected} \\| \code{rejecting}).
\item \code{transit-gateway-id} - The ID of the transit gateway.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes one or more transit gateways. By default, all transit gateways
are described. Alternatively, you can filter the results.
}
\section{Request syntax}{
\preformatted{svc$describe_transit_gateways(
TransitGatewayIds = list(
"string"
),
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
/paws/man/ec2_describe_transit_gateways.Rd
|
permissive
|
johnnytommy/paws
|
R
| false
| true
| 2,969
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_transit_gateways}
\alias{ec2_describe_transit_gateways}
\title{Describes one or more transit gateways}
\usage{
ec2_describe_transit_gateways(TransitGatewayIds, Filters, MaxResults,
NextToken, DryRun)
}
\arguments{
\item{TransitGatewayIds}{The IDs of the transit gateways.}
\item{Filters}{One or more filters. The possible values are:
\itemize{
\item \code{options.propagation-default-route-table-id} - The ID of the default
propagation route table.
\item \code{options.amazon-side-asn} - The private ASN for the Amazon side of a
BGP session.
\item \code{options.association-default-route-table-id} - The ID of the default
association route table.
\item \code{options.auto-accept-shared-attachments} - Indicates whether there
is automatic acceptance of attachment requests (\code{enable} \\|
\code{disable}).
\item \code{options.default-route-table-association} - Indicates whether
resource attachments are automatically associated with the default
association route table (\code{enable} \\| \code{disable}).
\item \code{options.default-route-table-propagation} - Indicates whether
resource attachments automatically propagate routes to the default
propagation route table (\code{enable} \\| \code{disable}).
\item \code{options.dns-support} - Indicates whether DNS support is enabled
(\code{enable} \\| \code{disable}).
\item \code{options.vpn-ecmp-support} - Indicates whether Equal Cost Multipath
Protocol support is enabled (\code{enable} \\| \code{disable}).
\item \code{owner-id} - The ID of the AWS account that owns the transit
gateway.
\item \code{state} - The state of the attachment (\code{available} \\| \code{deleted} \\|
\code{deleting} \\| \code{failed} \\| \code{modifying} \\| \code{pendingAcceptance} \\|
\code{pending} \\| \code{rollingBack} \\| \code{rejected} \\| \code{rejecting}).
\item \code{transit-gateway-id} - The ID of the transit gateway.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes one or more transit gateways. By default, all transit gateways
are described. Alternatively, you can filter the results.
}
\section{Request syntax}{
\preformatted{svc$describe_transit_gateways(
TransitGatewayIds = list(
"string"
),
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
#Read the data from the file provided from the http://archive.ics.uci.edu/ml/ (UC Irvine Machine Learning Repository).
#The unzipped file should be in the working directory.
data<-read.table("household_power_consumption.txt",sep=";", header= TRUE,stringsAsFactors=FALSE, na.strings = "?")
#Read the data from the first of February 2007 and then from the second of February 2007 and join them together)
fromthefirst<-data[data$Date=='1/2/2007',]
fromthesecond<-data[data$Date=='2/2/2007',]
thetwo<-rbind(fromthefirst,fromthesecond)
library(dplyr)
fulldate<-strptime(paste(thetwo$Date,thetwo$Time),format='%d/%m/%Y %H:%M:%S')
sourcedata<-cbind(thetwo,fulldate)
sourcedata<-select(sourcedata,fulldate,as.numeric(Global_active_power),as.numeric(Sub_metering_1),as.numeric(Sub_metering_2),as.numeric(Sub_metering_3), as.numeric(Voltage), as.numeric(Global_reactive_power))
par(mfrow=c(2,2))
with(sourcedata,{
plot(fulldate,Global_active_power,main="",xlab="",ylab="Global Active Power",type="n")
lines(fulldate,Global_active_power,type="l")
plot(fulldate,Voltage,main="",xlab="datetime",ylab="Voltage",type="n")
lines(fulldate,Voltage,type="l")
plot(fulldate,Sub_metering_1,main="",xlab="",ylab="Energy sub metering",type="n")
lines(fulldate,Sub_metering_1,type="l", col="black")
lines(fulldate,Sub_metering_2,type="l", col="red")
lines(fulldate,Sub_metering_3,type="l", col="blue")
legend("topright", lty = c(1,1,1), col = c("black","blue", "red"), legend = c("Sub_metering_1 ", "Sub_metering_2 ","Sub_metering_3 "), bty="n", cex=.85)
plot(fulldate,Global_reactive_power,main="",xlab="datetime",ylab="Global_reactive_power",type="n")
lines(fulldate,Global_reactive_power,type="l")
})
dev.copy(png, file = "plot4.png") ## Copy my plot to a PNG file
dev.off()
|
/plot4.R
|
no_license
|
lena-stevanoska/ExData_Plotting1
|
R
| false
| false
| 1,774
|
r
|
#Read the data from the file provided from the http://archive.ics.uci.edu/ml/ (UC Irvine Machine Learning Repository).
#The unzipped file should be in the working directory.
data<-read.table("household_power_consumption.txt",sep=";", header= TRUE,stringsAsFactors=FALSE, na.strings = "?")
#Read the data from the first of February 2007 and then from the second of February 2007 and join them together)
fromthefirst<-data[data$Date=='1/2/2007',]
fromthesecond<-data[data$Date=='2/2/2007',]
thetwo<-rbind(fromthefirst,fromthesecond)
library(dplyr)
fulldate<-strptime(paste(thetwo$Date,thetwo$Time),format='%d/%m/%Y %H:%M:%S')
sourcedata<-cbind(thetwo,fulldate)
sourcedata<-select(sourcedata,fulldate,as.numeric(Global_active_power),as.numeric(Sub_metering_1),as.numeric(Sub_metering_2),as.numeric(Sub_metering_3), as.numeric(Voltage), as.numeric(Global_reactive_power))
par(mfrow=c(2,2))
with(sourcedata,{
plot(fulldate,Global_active_power,main="",xlab="",ylab="Global Active Power",type="n")
lines(fulldate,Global_active_power,type="l")
plot(fulldate,Voltage,main="",xlab="datetime",ylab="Voltage",type="n")
lines(fulldate,Voltage,type="l")
plot(fulldate,Sub_metering_1,main="",xlab="",ylab="Energy sub metering",type="n")
lines(fulldate,Sub_metering_1,type="l", col="black")
lines(fulldate,Sub_metering_2,type="l", col="red")
lines(fulldate,Sub_metering_3,type="l", col="blue")
legend("topright", lty = c(1,1,1), col = c("black","blue", "red"), legend = c("Sub_metering_1 ", "Sub_metering_2 ","Sub_metering_3 "), bty="n", cex=.85)
plot(fulldate,Global_reactive_power,main="",xlab="datetime",ylab="Global_reactive_power",type="n")
lines(fulldate,Global_reactive_power,type="l")
})
dev.copy(png, file = "plot4.png") ## Copy my plot to a PNG file
dev.off()
|
##Prétraitements des données
library(CASdatasets)
library(keras)
library(tidyverse)
library(recipes)
library(glue)
library(zeallot)
library(tfruns)
data("freMTPLfreq")
## Pré-traitement
# on traite Power comme entier;
# Gas, Brand, Region comme facteur;
# toutes les expositions plus élevées qu'1 sont ramenées à 1;
# les assurés plus âgés que 85 sont ramenés à 85,
# âge maximal des autos à 20 ans
dat <- freMTPLfreq %>%
as_tibble() %>%
mutate_at(vars(Gas, Brand, Region), factor) %>%
mutate_at(vars(Power),as.integer) %>%
mutate(Exposure = if_else(Exposure > 1, 1, Exposure))%>%
mutate(DriverAge= ifelse(DriverAge > 85,85,DriverAge)) %>%
mutate(CarAge = ifelse(CarAge > 20,20,CarAge))
## On stratifie les données sur le nombre de réclamations
## on a 20% test, 20% validation et 60% entrainement
set.seed(100)
ll <- sample(which(dat$ClaimNb==0), round(0.8*length(which(dat$ClaimNb==0))), replace = FALSE)
ll <- c(ll,sample(which(dat$ClaimNb==1), round(0.8*length(which(dat$ClaimNb==1))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==2), round(0.8*length(which(dat$ClaimNb==2))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==3), round(0.8*length(which(dat$ClaimNb==3))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==4), round(0.8*length(which(dat$ClaimNb==4))), replace = FALSE))
## on remet l'ordre aléatoire, juste pour être sûr que les données ne
## sont pas ordonnées lorsque l'algorithme fait les mini-batch
ll <- sample(ll,size=length(ll),replace = F)
## création test et entrainement
learn <- dat[ll,]
testNN <- dat[-ll,]
##Défintion des indices pour l'échantillon de validation
set.seed(200)
ll2 <- sample(which(learn$ClaimNb==0), round(0.75*length(which(learn$ClaimNb==0))), replace = FALSE)
ll2 <- c(ll2,sample(which(learn$ClaimNb==1), round(0.75*length(which(learn$ClaimNb==1))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==2), round(0.75*length(which(learn$ClaimNb==2))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==3), round(0.75*length(which(learn$ClaimNb==3))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==4), round(0.75*length(which(learn$ClaimNb==4))), replace = FALSE))
## on remet l'indiçage aléatoire
ll2 <- sample(ll2,size=length(ll2),replace = F)
learnNN <- learn[ll2,]
valNN <- learn[-ll2,]
## Création d'une recette pour traiter les données.
## Cette recette est appris sur les données d'entrainement
## et on la cuit, « bake », sur tous les échantillons par la suite.
## Ça fais en sorte que le min et max sont appris sur l'échantillon d'entrainement,
## Dans notre cas, ça fais pas vraiment de différence, car tous les échantillons
## devraient avoir assez de données pour avoir tous les mêmes min et max pour chaque variable.
## Ça serait plus important dans le cas où on standardise en utilisant la moyenne et
## l'écart-type.
rec_obj <-
recipe(ClaimNb ~ .,
data = learnNN) %>% step_rm(PolicyID) %>% #PolicyId n'est pas importante
step_log(Density) %>% # on prend le log de Density
step_range(CarAge, DriverAge, Density,Power) %>% # (x - min(x) )/ (max(x) - min(x) )
step_dummy(Gas,
Brand,
Region,
one_hot = F,
preserve = F) %>% # on crée des variables indicatrices
prep(training = learnNN) # on apprend sur l'échantillon d'entrainement
# On utilise la recette sur chaque échantillon
learn_prepped <- bake(rec_obj, new_data = learnNN) %>% rename(Offset = Exposure)
test_prepped <- bake(rec_obj, new_data = testNN) %>% rename(Offset = Exposure)
val_prepped <- bake(rec_obj, new_data = valNN) %>% rename(Offset=Exposure)
# On doit mettre les données sous forme de matrice.
# Les réseaux ont deux couches d'intrant distintes X et W,
# soit la matrice d'incidence et le offset.
# On crée une matrice pour chaque et un vecteur de la variable réponse
features <- c(2:5,7:22)
XlearnNN <- as.matrix(learn_prepped[,features])
YlearnNN <- as.numeric(as.matrix(learn_prepped[,6]))
# on prend le log de l'exposition
WlearnNN <- as.matrix(log(learn_prepped[,1]))
# Même chose pour l'échantillon de validation et de test
XvalNN <- as.matrix(val_prepped[,features])
YvalNN <- as.numeric(as.matrix(val_prepped[,6]))
WvalNN <- as.matrix(log(val_prepped[,1]))
XtestNN <- as.matrix(test_prepped[,features])
YtestNN <- as.numeric(as.matrix(test_prepped[,6]))
WtestNN <- as.matrix(log(test_prepped[,1]))
## Création d'une fonction de perte sur mesure,
## on doit utiliser les fonctions de keras, k_**.
Poisson.Deviance <- function(y_true,y_pred){
2*k_mean( y_pred - y_true + k_log( ( y_true + k_epsilon() ) / ( y_pred + k_epsilon() ) ) * y_true, axis = -1)
}
|
/Pre-traitement.R
|
no_license
|
nibel113/Projet-reseau-de-neurones
|
R
| false
| false
| 4,729
|
r
|
##Prétraitements des données
library(CASdatasets)
library(keras)
library(tidyverse)
library(recipes)
library(glue)
library(zeallot)
library(tfruns)
data("freMTPLfreq")
## Pré-traitement
# on traite Power comme entier;
# Gas, Brand, Region comme facteur;
# toutes les expositions plus élevées qu'1 sont ramenées à 1;
# les assurés plus âgés que 85 sont ramenés à 85,
# âge maximal des autos à 20 ans
dat <- freMTPLfreq %>%
as_tibble() %>%
mutate_at(vars(Gas, Brand, Region), factor) %>%
mutate_at(vars(Power),as.integer) %>%
mutate(Exposure = if_else(Exposure > 1, 1, Exposure))%>%
mutate(DriverAge= ifelse(DriverAge > 85,85,DriverAge)) %>%
mutate(CarAge = ifelse(CarAge > 20,20,CarAge))
## On stratifie les données sur le nombre de réclamations
## on a 20% test, 20% validation et 60% entrainement
set.seed(100)
ll <- sample(which(dat$ClaimNb==0), round(0.8*length(which(dat$ClaimNb==0))), replace = FALSE)
ll <- c(ll,sample(which(dat$ClaimNb==1), round(0.8*length(which(dat$ClaimNb==1))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==2), round(0.8*length(which(dat$ClaimNb==2))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==3), round(0.8*length(which(dat$ClaimNb==3))), replace = FALSE))
ll <- c(ll,sample(which(dat$ClaimNb==4), round(0.8*length(which(dat$ClaimNb==4))), replace = FALSE))
## on remet l'ordre aléatoire, juste pour être sûr que les données ne
## sont pas ordonnées lorsque l'algorithme fait les mini-batch
ll <- sample(ll,size=length(ll),replace = F)
## création test et entrainement
learn <- dat[ll,]
testNN <- dat[-ll,]
##Défintion des indices pour l'échantillon de validation
set.seed(200)
ll2 <- sample(which(learn$ClaimNb==0), round(0.75*length(which(learn$ClaimNb==0))), replace = FALSE)
ll2 <- c(ll2,sample(which(learn$ClaimNb==1), round(0.75*length(which(learn$ClaimNb==1))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==2), round(0.75*length(which(learn$ClaimNb==2))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==3), round(0.75*length(which(learn$ClaimNb==3))), replace = FALSE))
ll2 <- c(ll2,sample(which(learn$ClaimNb==4), round(0.75*length(which(learn$ClaimNb==4))), replace = FALSE))
## on remet l'indiçage aléatoire
ll2 <- sample(ll2,size=length(ll2),replace = F)
learnNN <- learn[ll2,]
valNN <- learn[-ll2,]
## Création d'une recette pour traiter les données.
## Cette recette est appris sur les données d'entrainement
## et on la cuit, « bake », sur tous les échantillons par la suite.
## Ça fais en sorte que le min et max sont appris sur l'échantillon d'entrainement,
## Dans notre cas, ça fais pas vraiment de différence, car tous les échantillons
## devraient avoir assez de données pour avoir tous les mêmes min et max pour chaque variable.
## Ça serait plus important dans le cas où on standardise en utilisant la moyenne et
## l'écart-type.
rec_obj <-
recipe(ClaimNb ~ .,
data = learnNN) %>% step_rm(PolicyID) %>% #PolicyId n'est pas importante
step_log(Density) %>% # on prend le log de Density
step_range(CarAge, DriverAge, Density,Power) %>% # (x - min(x) )/ (max(x) - min(x) )
step_dummy(Gas,
Brand,
Region,
one_hot = F,
preserve = F) %>% # on crée des variables indicatrices
prep(training = learnNN) # on apprend sur l'échantillon d'entrainement
# On utilise la recette sur chaque échantillon
learn_prepped <- bake(rec_obj, new_data = learnNN) %>% rename(Offset = Exposure)
test_prepped <- bake(rec_obj, new_data = testNN) %>% rename(Offset = Exposure)
val_prepped <- bake(rec_obj, new_data = valNN) %>% rename(Offset=Exposure)
# On doit mettre les données sous forme de matrice.
# Les réseaux ont deux couches d'intrant distintes X et W,
# soit la matrice d'incidence et le offset.
# On crée une matrice pour chaque et un vecteur de la variable réponse
features <- c(2:5,7:22)
XlearnNN <- as.matrix(learn_prepped[,features])
YlearnNN <- as.numeric(as.matrix(learn_prepped[,6]))
# on prend le log de l'exposition
WlearnNN <- as.matrix(log(learn_prepped[,1]))
# Même chose pour l'échantillon de validation et de test
XvalNN <- as.matrix(val_prepped[,features])
YvalNN <- as.numeric(as.matrix(val_prepped[,6]))
WvalNN <- as.matrix(log(val_prepped[,1]))
XtestNN <- as.matrix(test_prepped[,features])
YtestNN <- as.numeric(as.matrix(test_prepped[,6]))
WtestNN <- as.matrix(log(test_prepped[,1]))
## Création d'une fonction de perte sur mesure,
## on doit utiliser les fonctions de keras, k_**.
Poisson.Deviance <- function(y_true,y_pred){
2*k_mean( y_pred - y_true + k_log( ( y_true + k_epsilon() ) / ( y_pred + k_epsilon() ) ) * y_true, axis = -1)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_fs_label.R
\name{read.fs.label.native}
\alias{read.fs.label.native}
\title{Read file in FreeSurfer label format}
\usage{
read.fs.label.native(
filepath,
return_one_based_indices = TRUE,
full = FALSE,
metadata = list()
)
}
\arguments{
\item{filepath}{string. Full path to the input label file.}
\item{return_one_based_indices}{logical. Whether the indices should be 1-based. Indices are stored zero-based in the file, but R uses 1-based indices. Defaults to TRUE, which means that 1 will be added to all indices read from the file before returning them. Notice that for volume labels, the indices are negative (-1), and the coord fields contain the *positions* of the voxels it tkras space (**not** the voxel *indices* in a volume). If a file contains negative indices, they will NOT be incremented, no matter what this is set to.}
\item{full}{logical, whether to return a full object of class `fs.label` instead of only a vector containing the vertex indices. If TRUE, a named list with the following two entries is returned: 'one_based_indices': logical, whether the vertex indices are one-based. 'vertexdata': a data.frame with the following columns: 'vertex_index': integer, see parameter 'return_one_based_indices', 'coord1', 'coord2', 'coord3': float coordinates, 'value': float, scalar data for the vertex, can mean anything. This parameter defaults to FALSE.}
\item{metadata}{named list of arbitrary metadata to store in the instance, ignored unless the paramter `full` is TRUE.}
}
\value{
vector of integers or `fs.label` instance (see parameter `full`). The vertex indices from the label file. See the parameter `return_one_based_indices` for important information regarding the start index.
}
\description{
Read a mask in FreeSurfer label format. A label defines a list of vertices (of an associated surface or morphometry file) which are part of it. All others are not. You can think of it as binary mask. Label files are ASCII text files, which have 5 columns (vertex index, coord1, coord2, coord3, value), but only the vertex indices are of interest. A label can also contain voxels, in that case the indices are -1 and the coordinates are important.
}
\note{
To load volume/voxel labels, you will have to set the 'full' parameter to `TRUE`.
}
\examples{
labelfile = system.file("extdata", "lh.entorhinal_exvivo.label",
package = "freesurferformats", mustWork = TRUE);
label = read.fs.label(labelfile);
}
\seealso{
Other label functions:
\code{\link{read.fs.label.gii}()},
\code{\link{read.fs.label}()},
\code{\link{write.fs.label}()}
}
\concept{label functions}
|
/man/read.fs.label.native.Rd
|
permissive
|
dfsp-spirit/freesurferformats
|
R
| false
| true
| 2,686
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_fs_label.R
\name{read.fs.label.native}
\alias{read.fs.label.native}
\title{Read file in FreeSurfer label format}
\usage{
read.fs.label.native(
filepath,
return_one_based_indices = TRUE,
full = FALSE,
metadata = list()
)
}
\arguments{
\item{filepath}{string. Full path to the input label file.}
\item{return_one_based_indices}{logical. Whether the indices should be 1-based. Indices are stored zero-based in the file, but R uses 1-based indices. Defaults to TRUE, which means that 1 will be added to all indices read from the file before returning them. Notice that for volume labels, the indices are negative (-1), and the coord fields contain the *positions* of the voxels it tkras space (**not** the voxel *indices* in a volume). If a file contains negative indices, they will NOT be incremented, no matter what this is set to.}
\item{full}{logical, whether to return a full object of class `fs.label` instead of only a vector containing the vertex indices. If TRUE, a named list with the following two entries is returned: 'one_based_indices': logical, whether the vertex indices are one-based. 'vertexdata': a data.frame with the following columns: 'vertex_index': integer, see parameter 'return_one_based_indices', 'coord1', 'coord2', 'coord3': float coordinates, 'value': float, scalar data for the vertex, can mean anything. This parameter defaults to FALSE.}
\item{metadata}{named list of arbitrary metadata to store in the instance, ignored unless the paramter `full` is TRUE.}
}
\value{
vector of integers or `fs.label` instance (see parameter `full`). The vertex indices from the label file. See the parameter `return_one_based_indices` for important information regarding the start index.
}
\description{
Read a mask in FreeSurfer label format. A label defines a list of vertices (of an associated surface or morphometry file) which are part of it. All others are not. You can think of it as binary mask. Label files are ASCII text files, which have 5 columns (vertex index, coord1, coord2, coord3, value), but only the vertex indices are of interest. A label can also contain voxels, in that case the indices are -1 and the coordinates are important.
}
\note{
To load volume/voxel labels, you will have to set the 'full' parameter to `TRUE`.
}
\examples{
labelfile = system.file("extdata", "lh.entorhinal_exvivo.label",
package = "freesurferformats", mustWork = TRUE);
label = read.fs.label(labelfile);
}
\seealso{
Other label functions:
\code{\link{read.fs.label.gii}()},
\code{\link{read.fs.label}()},
\code{\link{write.fs.label}()}
}
\concept{label functions}
|
library(tidybayes)
### Name: data_list
### Title: Data lists for input into Bayesian models
### Aliases: data_list as_data_list as_data_list.default
### as_data_list.numeric as_data_list.logical as_data_list.factor
### as_data_list.character as_data_list.list as_data_list.data.frame
### as_data_list.data_list
### Keywords: manip
### ** Examples
# Typically these functions should not be used directly.
# See the compose_data function for examples of how to translate
# data in lists for input to Bayesian modeling functions.
|
/data/genthat_extracted_code/tidybayes/examples/data_list.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 542
|
r
|
library(tidybayes)
### Name: data_list
### Title: Data lists for input into Bayesian models
### Aliases: data_list as_data_list as_data_list.default
### as_data_list.numeric as_data_list.logical as_data_list.factor
### as_data_list.character as_data_list.list as_data_list.data.frame
### as_data_list.data_list
### Keywords: manip
### ** Examples
# Typically these functions should not be used directly.
# See the compose_data function for examples of how to translate
# data in lists for input to Bayesian modeling functions.
|
\name{CAAPhysicalSun_TimeOfStartOfRotation}
\alias{CAAPhysicalSun_TimeOfStartOfRotation}
\title{
CAAPhysicalSun_TimeOfStartOfRotation
}
\description{
CAAPhysicalSun_TimeOfStartOfRotation
}
\usage{
CAAPhysicalSun_TimeOfStartOfRotation(C)
}
\arguments{
\item{C}{
Integer, indicate the Cycles
}
}
\details{
}
\value{
}
\references{
Meeus, J. H. (1991). Astronomical algorithms. Willmann-Bell, Incorporated.
}
\author{
C++ code by PJ Naughter, imported to R by Jinlong Zhang
}
\note{
}
\seealso{
}
\examples{
CAAPhysicalSun_TimeOfStartOfRotation(C = 67)
}
\keyword{ Rotation }
|
/man/CAAPhysicalSun_TimeOfStartOfRotation.Rd
|
no_license
|
helixcn/skycalc
|
R
| false
| false
| 614
|
rd
|
\name{CAAPhysicalSun_TimeOfStartOfRotation}
\alias{CAAPhysicalSun_TimeOfStartOfRotation}
\title{
CAAPhysicalSun_TimeOfStartOfRotation
}
\description{
CAAPhysicalSun_TimeOfStartOfRotation
}
\usage{
CAAPhysicalSun_TimeOfStartOfRotation(C)
}
\arguments{
\item{C}{
Integer, indicate the Cycles
}
}
\details{
}
\value{
}
\references{
Meeus, J. H. (1991). Astronomical algorithms. Willmann-Bell, Incorporated.
}
\author{
C++ code by PJ Naughter, imported to R by Jinlong Zhang
}
\note{
}
\seealso{
}
\examples{
CAAPhysicalSun_TimeOfStartOfRotation(C = 67)
}
\keyword{ Rotation }
|
#' Do arbitrary operations on a tbl.
#'
#' This is a general purpose complement to the specialised manipulation
#' functions [filter()], [select()], [mutate()],
#' [summarise()] and [arrange()]. You can use `do()`
#' to perform arbitrary computation, returning either a data frame or
#' arbitrary objects which will be stored in a list. This is particularly
#' useful when working with models: you can fit models per group with
#' `do()` and then flexibly extract components with either another
#' `do()` or `summarise()`.
#'
#' For an empty data frame, the expressions will be evaluated once, even in the
#' presence of a grouping. This makes sure that the format of the resulting
#' data frame is the same for both empty and non-empty input.
#'
#' @section Connection to plyr:
#'
#' If you're familiar with plyr, `do()` with named arguments is basically
#' equivalent to [plyr::dlply()], and `do()` with a single unnamed argument
#' is basically equivalent to [plyr::ldply()]. However, instead of storing
#' labels in a separate attribute, the result is always a data frame. This
#' means that `summarise()` applied to the result of `do()` can
#' act like `ldply()`.
#'
#' @inheritParams filter
#' @param .data a tbl
#' @param ... Expressions to apply to each group. If named, results will be
#' stored in a new column. If unnamed, should return a data frame. You can
#' use `.` to refer to the current group. You can not mix named and
#' unnamed arguments.
#' @return
#' `do()` always returns a data frame. The first columns in the data frame
#' will be the labels, the others will be computed from `...`. Named
#' arguments become list-columns, with one element for each group; unnamed
#' elements must be data frames and labels will be duplicated accordingly.
#'
#' Groups are preserved for a single unnamed input. This is different to
#' [summarise()] because `do()` generally does not reduce the
#' complexity of the data, it just expresses it in a special way. For
#' multiple named inputs, the output is grouped by row with
#' [rowwise()]. This allows other verbs to work in an intuitive
#' way.
#' @export
#' @examples
#' by_cyl <- group_by(mtcars, cyl)
#' do(by_cyl, head(., 2))
#'
#' models <- by_cyl %>% do(mod = lm(mpg ~ disp, data = .))
#' models
#'
#' summarise(models, rsq = summary(mod)$r.squared)
#' models %>% do(data.frame(coef = coef(.$mod)))
#' models %>% do(data.frame(
#' var = names(coef(.$mod)),
#' coef(summary(.$mod)))
#' )
#'
#' models <- by_cyl %>% do(
#' mod_linear = lm(mpg ~ disp, data = .),
#' mod_quad = lm(mpg ~ poly(disp, 2), data = .)
#' )
#' models
#' compare <- models %>% do(aov = anova(.$mod_linear, .$mod_quad))
#' # compare %>% summarise(p.value = aov$`Pr(>F)`)
#'
#' if (require("nycflights13")) {
#' # You can use it to do any arbitrary computation, like fitting a linear
#' # model. Let's explore how carrier departure delays vary over the time
#' carriers <- group_by(flights, carrier)
#' group_size(carriers)
#'
#' mods <- do(carriers, mod = lm(arr_delay ~ dep_time, data = .))
#' mods %>% do(as.data.frame(coef(.$mod)))
#' mods %>% summarise(rsq = summary(mod)$r.squared)
#'
#' \dontrun{
#' # This longer example shows the progress bar in action
#' by_dest <- flights %>% group_by(dest) %>% filter(n() > 100)
#' library(mgcv)
#' by_dest %>% do(smooth = gam(arr_delay ~ s(dep_time) + month, data = .))
#' }
#' }
do <- function(.data, ...) {
UseMethod("do")
}
#' @export
do.default <- function(.data, ...) {
do_(.data, .dots = compat_as_lazy_dots(...))
}
#' @export
#' @rdname se-deprecated
do_ <- function(.data, ..., .dots = list()) {
UseMethod("do_")
}
#' @export
do.NULL <- function(.data, ...) {
NULL
}
#' @export
do_.NULL <- function(.data, ..., .dots = list()) {
NULL
}
# Helper functions -------------------------------------------------------------
label_output_dataframe <- function(labels, out, groups) {
data_frame <- vapply(out[[1]], is.data.frame, logical(1))
if (any(!data_frame)) {
stop(
"Results are not data frames at positions: ",
paste(which(!data_frame), collapse = ", "),
call. = FALSE
)
}
rows <- vapply(out[[1]], nrow, numeric(1))
out <- bind_rows(out[[1]])
if (!is.null(labels)) {
# Remove any common columns from labels
labels <- labels[setdiff(names(labels), names(out))]
# Repeat each row to match data
labels <- labels[rep(seq_len(nrow(labels)), rows), , drop = FALSE]
rownames(labels) <- NULL
grouped_df(bind_cols(labels, out), groups)
} else {
rowwise(out)
}
}
label_output_list <- function(labels, out, groups) {
if (!is.null(labels)) {
labels[names(out)] <- out
rowwise(labels)
} else {
class(out) <- "data.frame"
attr(out, "row.names") <- .set_row_names(length(out[[1]]))
rowwise(out)
}
}
named_args <- function(args) {
# Arguments must either be all named or all unnamed.
named <- sum(names2(args) != "")
if (!(named == 0 || named == length(args))) {
stop(
"Arguments to do() must either be all named or all unnamed",
call. = FALSE
)
}
if (named == 0 && length(args) > 1) {
stop("Can only supply single unnamed argument to do()", call. = FALSE)
}
# Check for old syntax
if (named == 1 && names(args) == ".f") {
stop(
"do syntax changed in dplyr 0.2. Please see documentation for details",
call. = FALSE
)
}
named != 0
}
|
/R/do.r
|
permissive
|
MhAmine/dplyr
|
R
| false
| false
| 5,400
|
r
|
#' Do arbitrary operations on a tbl.
#'
#' This is a general purpose complement to the specialised manipulation
#' functions [filter()], [select()], [mutate()],
#' [summarise()] and [arrange()]. You can use `do()`
#' to perform arbitrary computation, returning either a data frame or
#' arbitrary objects which will be stored in a list. This is particularly
#' useful when working with models: you can fit models per group with
#' `do()` and then flexibly extract components with either another
#' `do()` or `summarise()`.
#'
#' For an empty data frame, the expressions will be evaluated once, even in the
#' presence of a grouping. This makes sure that the format of the resulting
#' data frame is the same for both empty and non-empty input.
#'
#' @section Connection to plyr:
#'
#' If you're familiar with plyr, `do()` with named arguments is basically
#' equivalent to [plyr::dlply()], and `do()` with a single unnamed argument
#' is basically equivalent to [plyr::ldply()]. However, instead of storing
#' labels in a separate attribute, the result is always a data frame. This
#' means that `summarise()` applied to the result of `do()` can
#' act like `ldply()`.
#'
#' @inheritParams filter
#' @param .data a tbl
#' @param ... Expressions to apply to each group. If named, results will be
#' stored in a new column. If unnamed, should return a data frame. You can
#' use `.` to refer to the current group. You can not mix named and
#' unnamed arguments.
#' @return
#' `do()` always returns a data frame. The first columns in the data frame
#' will be the labels, the others will be computed from `...`. Named
#' arguments become list-columns, with one element for each group; unnamed
#' elements must be data frames and labels will be duplicated accordingly.
#'
#' Groups are preserved for a single unnamed input. This is different to
#' [summarise()] because `do()` generally does not reduce the
#' complexity of the data, it just expresses it in a special way. For
#' multiple named inputs, the output is grouped by row with
#' [rowwise()]. This allows other verbs to work in an intuitive
#' way.
#' @export
#' @examples
#' by_cyl <- group_by(mtcars, cyl)
#' do(by_cyl, head(., 2))
#'
#' models <- by_cyl %>% do(mod = lm(mpg ~ disp, data = .))
#' models
#'
#' summarise(models, rsq = summary(mod)$r.squared)
#' models %>% do(data.frame(coef = coef(.$mod)))
#' models %>% do(data.frame(
#' var = names(coef(.$mod)),
#' coef(summary(.$mod)))
#' )
#'
#' models <- by_cyl %>% do(
#' mod_linear = lm(mpg ~ disp, data = .),
#' mod_quad = lm(mpg ~ poly(disp, 2), data = .)
#' )
#' models
#' compare <- models %>% do(aov = anova(.$mod_linear, .$mod_quad))
#' # compare %>% summarise(p.value = aov$`Pr(>F)`)
#'
#' if (require("nycflights13")) {
#' # You can use it to do any arbitrary computation, like fitting a linear
#' # model. Let's explore how carrier departure delays vary over the time
#' carriers <- group_by(flights, carrier)
#' group_size(carriers)
#'
#' mods <- do(carriers, mod = lm(arr_delay ~ dep_time, data = .))
#' mods %>% do(as.data.frame(coef(.$mod)))
#' mods %>% summarise(rsq = summary(mod)$r.squared)
#'
#' \dontrun{
#' # This longer example shows the progress bar in action
#' by_dest <- flights %>% group_by(dest) %>% filter(n() > 100)
#' library(mgcv)
#' by_dest %>% do(smooth = gam(arr_delay ~ s(dep_time) + month, data = .))
#' }
#' }
do <- function(.data, ...) {
UseMethod("do")
}
#' @export
do.default <- function(.data, ...) {
do_(.data, .dots = compat_as_lazy_dots(...))
}
#' @export
#' @rdname se-deprecated
do_ <- function(.data, ..., .dots = list()) {
UseMethod("do_")
}
#' @export
do.NULL <- function(.data, ...) {
NULL
}
#' @export
do_.NULL <- function(.data, ..., .dots = list()) {
NULL
}
# Helper functions -------------------------------------------------------------
label_output_dataframe <- function(labels, out, groups) {
data_frame <- vapply(out[[1]], is.data.frame, logical(1))
if (any(!data_frame)) {
stop(
"Results are not data frames at positions: ",
paste(which(!data_frame), collapse = ", "),
call. = FALSE
)
}
rows <- vapply(out[[1]], nrow, numeric(1))
out <- bind_rows(out[[1]])
if (!is.null(labels)) {
# Remove any common columns from labels
labels <- labels[setdiff(names(labels), names(out))]
# Repeat each row to match data
labels <- labels[rep(seq_len(nrow(labels)), rows), , drop = FALSE]
rownames(labels) <- NULL
grouped_df(bind_cols(labels, out), groups)
} else {
rowwise(out)
}
}
label_output_list <- function(labels, out, groups) {
if (!is.null(labels)) {
labels[names(out)] <- out
rowwise(labels)
} else {
class(out) <- "data.frame"
attr(out, "row.names") <- .set_row_names(length(out[[1]]))
rowwise(out)
}
}
named_args <- function(args) {
# Arguments must either be all named or all unnamed.
named <- sum(names2(args) != "")
if (!(named == 0 || named == length(args))) {
stop(
"Arguments to do() must either be all named or all unnamed",
call. = FALSE
)
}
if (named == 0 && length(args) > 1) {
stop("Can only supply single unnamed argument to do()", call. = FALSE)
}
# Check for old syntax
if (named == 1 && names(args) == ".f") {
stop(
"do syntax changed in dplyr 0.2. Please see documentation for details",
call. = FALSE
)
}
named != 0
}
|
# page no. 578
G = ((20*60)*(4*144)/(pi*0.87^2));
mu = 0.33;
D = 0.87/12;
Re = (D*G)/mu;
print(Re);
h1 = 630;
F = 1.25;
h = h1*F;
print(h);
|
/Thermodynamics_And_Heat_Power_by_I_Granet_And_M_Bluestein/CH11/EX11.17/Ex11_17.R
|
permissive
|
FOSSEE/R_TBC_Uploads
|
R
| false
| false
| 141
|
r
|
# page no. 578
G = ((20*60)*(4*144)/(pi*0.87^2));
mu = 0.33;
D = 0.87/12;
Re = (D*G)/mu;
print(Re);
h1 = 630;
F = 1.25;
h = h1*F;
print(h);
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
joharidepur/R-program
|
R
| false
| false
| 750
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached result")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
# The code below can be used to reproduce the results of
# An information-theoretic approach for selecting arms in clinical trials
# by P. Mozgunov and T. Jaki (2019)
# Section 5 with co-primary efficacy endpoints
# The "wdesign-co-primary-code.R" is required to be run prior to the code below.
# WE-II Select-the-Best
# Kappa 0.54 - Robust Optimal Value under the ENS Objective Function
# Under the Null Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.10),true2=c(0.45,0.45,0.45),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.095,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.54,nsims=10000,hypothesis=T,test="Fisher")
# Type I Error
design$TypeI.error
# ENS and corresponding SE
design$ENS
design$SE.ENS
design$ENS2
design$SE.ENS2
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Under the Alternative Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.25),true2=c(0.45,0.45,0.60),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.095,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.54,nsims=10000,hypothesis=T,test="Fisher")
# Power
design$Power
# ENS and corresponding SE
design$ENS
design$SE.ENS
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Kappa 0.69 - Robust Optimal Value under the Power Objective Function
# Under the Null Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.10),true2=c(0.45,0.45,0.45),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.0980,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.69,nsims=10000,hypothesis=T,test="Fisher")
# Type I Error
design$TypeI.error
# ENS and corresponding SE
design$ENS
design$SE.ENS
design$ENS2
design$SE.ENS2
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Under the Alternative Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.25),true2=c(0.45,0.45,0.60),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.0980,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.69,nsims=10000,hypothesis=T,test="Fisher")
# Power
design$Power
# ENS and corresponding SE
design$ENS
design$SE.ENS
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
|
/Phase-2-Co-Primary-Run.R
|
no_license
|
adaptive-designs/inf-theory
|
R
| false
| false
| 3,032
|
r
|
# The code below can be used to reproduce the results of
# An information-theoretic approach for selecting arms in clinical trials
# by P. Mozgunov and T. Jaki (2019)
# Section 5 with co-primary efficacy endpoints
# The "wdesign-co-primary-code.R" is required to be run prior to the code below.
# WE-II Select-the-Best
# Kappa 0.54 - Robust Optimal Value under the ENS Objective Function
# Under the Null Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.10),true2=c(0.45,0.45,0.45),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.095,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.54,nsims=10000,hypothesis=T,test="Fisher")
# Type I Error
design$TypeI.error
# ENS and corresponding SE
design$ENS
design$SE.ENS
design$ENS2
design$SE.ENS2
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Under the Alternative Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.25),true2=c(0.45,0.45,0.60),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.095,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.54,nsims=10000,hypothesis=T,test="Fisher")
# Power
design$Power
# ENS and corresponding SE
design$ENS
design$SE.ENS
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Kappa 0.69 - Robust Optimal Value under the Power Objective Function
# Under the Null Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.10),true2=c(0.45,0.45,0.45),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.0980,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.69,nsims=10000,hypothesis=T,test="Fisher")
# Type I Error
design$TypeI.error
# ENS and corresponding SE
design$ENS
design$SE.ENS
design$ENS2
design$SE.ENS2
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
# Under the Alternative Hypothesis
design<-wdesign.co.primary.ph2(true1=c(0.10,0.10,0.25),true2=c(0.45,0.45,0.60),target1=0.999,target2=0.999,n=165,
prior1=rep(0.99,3),prior2=rep(0.99,3),beta1=c(5,2,2),control=1,cut.off.typeI=0.0980,alternative="greater",
correlation=0.75,assignment="best",
beta2=c(5,2,2),kappa=0.69,nsims=10000,hypothesis=T,test="Fisher")
# Power
design$Power
# ENS and corresponding SE
design$ENS
design$SE.ENS
# Experimentation Proportion and corresponding SE
design$Experimentation
design$Experimentation.SE
|
#' Add edges and attributes to graph from a table
#' @description Add edges and their attributes to an
#' existing graph object from data in a CSV file or a
#' data frame.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @param table either a path to a CSV file, or, a data
#' frame object.
#' @param from_col the name of the table column from
#' which edges originate.
#' @param from_mapping a single character value for
#' the mapping of a column in the external table
#' (supplied as \code{from_col}) to a column in the
#' graph's internal node data frame (ndf).
#' @param to_col the name of the table column to
#' which edges terminate.
#' @param to_mapping a single character value for
#' the mapping of a column in the external table
#' (supplied as \code{to_col}) to a column in the
#' graph's internal node data frame (ndf).
#' @param set_rel an optional string to apply a
#' \code{rel} attribute to all edges created from the
#' table records.
#' @param select_cols an optional character vector for
#' specifying which columns in the table that should be
#' imported as edge attributes.
#' @param drop_cols an optional character vector for
#' dropping columns from the incoming data.
#' @param rename_attrs an optional character vector for
#' renaming edge attributes.
#' @param rel_col an option to apply a column of data
#' in the table as \code{rel} attribute values.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' library(magrittr)
#' library(dplyr)
#'
#' # Create a graph from a CSV file
#' graph <- create_graph() %>%
#' add_edges_from_table(
#' system.file("examples/projects_and_contributors.csv",
#' package = "DiagrammeR"),
#' from_col = "contributor_name",
#' to_col = "project_name",
#' rel_col = "contributor_role",
#' set_rel = "contributes_to")
#'
#' # Get a count of nodes in the graph
#' node_count(graph)
#' #> [1] 13
#'
#' # Get a count of edges in the graph
#' edge_count(graph)
#' #> [1] 13
#' }
#' @export add_edges_from_table
add_edges_from_table <- function(graph,
table,
from_col,
from_mapping = NULL,
to_col,
to_mapping = NULL,
set_rel = NULL,
select_cols = NULL,
drop_cols = NULL,
rename_attrs = NULL,
rel_col = NULL) {
if (inherits(table, "character")) {
# Load in CSV file
csv <- read.csv(table, stringsAsFactors = FALSE)
} else if (inherits(table, "data.frame")) {
# Rename `table` object as `csv`
csv <- table
}
# Get numbers of rows and columns in the table
rows_in_csv <- nrow(csv)
cols_in_csv <- ncol(csv)
# Get rownames for existing edges in graph object
edges_existing_rownames <-
rownames(get_edge_df(graph))
# Verify that value for `from_col` is in the table
if (!(from_col %in% colnames(csv))) {
stop("The value specified in `from_col` is not in the table.")
}
# Verify that value for `to_col` is in the table
if (!(to_col %in% colnames(csv))) {
stop("The value specified in `to_col` is not in the table.")
}
# Verify that value for `from_mapping` is in the
# graph's ndf
if (!is.null(from_mapping)) {
if (!(from_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `from_mapping` is not in the graph.")
}
}
# Verify that value for `to_mapping` is in the
# graph's ndf
if (!is.null(to_mapping)) {
if (!(to_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `to_mapping` is not in the graph.")
}
}
if (is.null(from_mapping) & is.null(to_mapping)) {
if (node_count(graph) == 0) {
starting_node <- 1
} else {
if (suppressWarnings(
any(!(is.na(
as.numeric(graph$nodes_df$nodes)))))) {
starting_node <-
suppressWarnings(
max(
as.numeric(
graph$nodes_df[
which(!is.na(
as.numeric(graph$nodes_df$nodes))),
1])) + 1)
} else {
starting_node <- 1
}
}
# If values for `select_cols` are provided, filter
# the CSV columns by those named columns
if (!is.null(select_cols)) {
# If none of the specified values in `select_cols`
# are in the CSV, stop the function
if (all(select_cols %in% colnames(csv)) == FALSE) {
stop("None of the values specified for selecting columns are available.")
}
columns_retained <-
which(colnames(csv) %in% select_cols)
csv <- csv[,columns_retained]
}
# If values for `drop_cols` provided, filter the CSV
# columns by those named columns
if (is.null(select_cols) & !is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[,columns_retained]
}
# If values for `rename_attrs` provided, rename all
# of the CSV columns by those replacement values
# (number of new names should match number of columns
# even after selecting or dropping columns)
if (!is.null(rename_attrs)) {
if (length(rename_attrs) != length(colnames(csv))) {
stop(paste0("The number of values specified for column name changes ",
"does not match the number of columns available"))
}
colnames(csv) <- rename_attrs
}
# Optionally set the `rel` attribute from a
# specified column in the CSV (this copies into
# the `rel` column)
if (!is.null(rel_col)) {
if (any(colnames(csv) == rel_col)) {
csv$rel <- csv[,which(colnames(csv) == rel_col)]
}
}
# Get the unique set of nodes to add to the graph
nodes <-
create_nodes(
nodes = unique(
c(csv[, which(colnames(csv) %in% from_col)],
csv[, which(colnames(csv) %in% to_col)])))
# Add node data frame to the graph
graph <- add_node_df(graph, nodes)
# Create an edge data frame
edges <-
create_edges(
from = csv[, which(colnames(csv) %in% from_col)],
to = csv[, which(colnames(csv) %in% to_col)]
)
# Add edge data frame to the graph
graph <- add_edge_df(graph, edges)
return(graph)
}
# Verify that all values in `from_col` in the table are
# available in the graph
if (!(all(
csv[,which(colnames(csv) == from_col)] %in%
get_node_df(graph)[,
which(colnames(get_node_df(graph)) == from_mapping)]))) {
stop(paste0("The `from` values in the table don't all match the requested",
"node attribute value in the graph."))
}
# Verify that all values in `to_col` in the table are
# available in the graph
if (!(all(csv[,which(colnames(csv) == to_col)] %in%
get_node_df(graph)[,which(colnames(get_node_df(graph)) == to_mapping)]))) {
stop(paste0("The `to` values in the table don't all match the requested",
"node attribute values in the graph."))
}
# If values for `select_cols` provided, filter the
# table columns by those named columns
if (!is.null(select_cols)) {
# If none of the specified values in `select_cols`
# are in the table, stop the function
if (all(select_cols %in% colnames(csv)) == FALSE) {
stop("None of the values specified for selecting columns are available.")
}
columns_retained <- which(colnames(csv) %in% select_cols)
csv <- csv[,columns_retained]
}
# If values for `drop_cols` provided, filter the
# table columns by those named columns
if (is.null(select_cols) & !is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[,columns_retained]
}
# If values for `rename_attrs` provided, rename the
# table columns by those replacement values
if (!is.null(rename_attrs)) {
if (length(rename_attrs) !=
length(colnames(csv))) {
stop(paste0("The number of values specified for column name changes ",
"does not match the number of columns available"))
}
colnames(csv) <- rename_attrs
}
# Get relevant column numbers from the table
from_col_value <- which(colnames(csv) == from_col)
to_col_value <- which(colnames(csv) == to_col)
# Get relevant column numbers from the graph's ndf
from_mapping_value <-
which(colnames(get_node_df(graph)) == from_mapping)
to_mapping_value <-
which(colnames(get_node_df(graph)) == to_mapping)
# Create edges
for (i in 1:rows_in_csv) {
graph <-
add_edge(
graph = graph,
from = get_node_df(graph)[
which(get_node_df(graph)[
,from_mapping_value] ==
csv[i, from_col_value]), 1],
to = get_node_df(graph)[
which(get_node_df(graph)[
,to_mapping_value] ==
csv[i, to_col_value]), 1])
}
# Get rownames for edges created
edges_created_rownames <-
as.numeric(
setdiff(
rownames(
get_edge_df(graph)),
edges_existing_rownames))
# Get column numbers in table that are edge attributes
if (!is.null(rel_col)) {
edge_attr_cols_csv <-
which(colnames(csv) %in%
setdiff(colnames(csv),
c(from_col, to_col, rel_col)))
} else {
edge_attr_cols_csv <-
which(colnames(csv) %in%
setdiff(colnames(csv),
c(from_col, to_col)))
}
# Add table columns as attributes
for (i in edges_created_rownames) {
for (j in edge_attr_cols_csv) {
graph <-
set_edge_attrs(
x = graph,
from = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i), 1],
to = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i), 2],
edge_attr = colnames(csv)[j],
values = csv[i,j])
}
# Optionally set the `rel` attribute from a
# specified column in the table
if (!is.null(rel_col)) {
graph <-
set_edge_attrs(
x = graph,
from = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i),1],
to = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i),2],
edge_attr = "rel",
values = csv[i, which(colnames(csv) %in%
rel_col)])
}
}
# Optionally set the `rel` attribute with a single
# value repeated down
if (!is.null(set_rel)) {
graph <-
select_edges(
graph = graph,
from = get_edge_df(graph)[
edges_created_rownames, 1],
to = get_edge_df(graph)[
edges_created_rownames, 2])
graph <-
set_edge_attr_with_selection(
graph = graph,
edge_attr = "rel",
value = set_rel)
graph <- clear_selection(graph = graph)
}
return(graph)
}
|
/R/add_edges_from_table.R
|
no_license
|
dy-kim/DiagrammeR
|
R
| false
| false
| 11,173
|
r
|
#' Add edges and attributes to graph from a table
#' @description Add edges and their attributes to an
#' existing graph object from data in a CSV file or a
#' data frame.
#' @param graph a graph object of class
#' \code{dgr_graph} that is created using
#' \code{create_graph}.
#' @param table either a path to a CSV file, or, a data
#' frame object.
#' @param from_col the name of the table column from
#' which edges originate.
#' @param from_mapping a single character value for
#' the mapping of a column in the external table
#' (supplied as \code{from_col}) to a column in the
#' graph's internal node data frame (ndf).
#' @param to_col the name of the table column to
#' which edges terminate.
#' @param to_mapping a single character value for
#' the mapping of a column in the external table
#' (supplied as \code{to_col}) to a column in the
#' graph's internal node data frame (ndf).
#' @param set_rel an optional string to apply a
#' \code{rel} attribute to all edges created from the
#' table records.
#' @param select_cols an optional character vector for
#' specifying which columns in the table that should be
#' imported as edge attributes.
#' @param drop_cols an optional character vector for
#' dropping columns from the incoming data.
#' @param rename_attrs an optional character vector for
#' renaming edge attributes.
#' @param rel_col an option to apply a column of data
#' in the table as \code{rel} attribute values.
#' @return a graph object of class \code{dgr_graph}.
#' @examples
#' \dontrun{
#' library(magrittr)
#' library(dplyr)
#'
#' # Create a graph from a CSV file
#' graph <- create_graph() %>%
#' add_edges_from_table(
#' system.file("examples/projects_and_contributors.csv",
#' package = "DiagrammeR"),
#' from_col = "contributor_name",
#' to_col = "project_name",
#' rel_col = "contributor_role",
#' set_rel = "contributes_to")
#'
#' # Get a count of nodes in the graph
#' node_count(graph)
#' #> [1] 13
#'
#' # Get a count of edges in the graph
#' edge_count(graph)
#' #> [1] 13
#' }
#' @export add_edges_from_table
add_edges_from_table <- function(graph,
table,
from_col,
from_mapping = NULL,
to_col,
to_mapping = NULL,
set_rel = NULL,
select_cols = NULL,
drop_cols = NULL,
rename_attrs = NULL,
rel_col = NULL) {
if (inherits(table, "character")) {
# Load in CSV file
csv <- read.csv(table, stringsAsFactors = FALSE)
} else if (inherits(table, "data.frame")) {
# Rename `table` object as `csv`
csv <- table
}
# Get numbers of rows and columns in the table
rows_in_csv <- nrow(csv)
cols_in_csv <- ncol(csv)
# Get rownames for existing edges in graph object
edges_existing_rownames <-
rownames(get_edge_df(graph))
# Verify that value for `from_col` is in the table
if (!(from_col %in% colnames(csv))) {
stop("The value specified in `from_col` is not in the table.")
}
# Verify that value for `to_col` is in the table
if (!(to_col %in% colnames(csv))) {
stop("The value specified in `to_col` is not in the table.")
}
# Verify that value for `from_mapping` is in the
# graph's ndf
if (!is.null(from_mapping)) {
if (!(from_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `from_mapping` is not in the graph.")
}
}
# Verify that value for `to_mapping` is in the
# graph's ndf
if (!is.null(to_mapping)) {
if (!(to_mapping %in% colnames(get_node_df(graph)))) {
stop("The value specified in `to_mapping` is not in the graph.")
}
}
if (is.null(from_mapping) & is.null(to_mapping)) {
if (node_count(graph) == 0) {
starting_node <- 1
} else {
if (suppressWarnings(
any(!(is.na(
as.numeric(graph$nodes_df$nodes)))))) {
starting_node <-
suppressWarnings(
max(
as.numeric(
graph$nodes_df[
which(!is.na(
as.numeric(graph$nodes_df$nodes))),
1])) + 1)
} else {
starting_node <- 1
}
}
# If values for `select_cols` are provided, filter
# the CSV columns by those named columns
if (!is.null(select_cols)) {
# If none of the specified values in `select_cols`
# are in the CSV, stop the function
if (all(select_cols %in% colnames(csv)) == FALSE) {
stop("None of the values specified for selecting columns are available.")
}
columns_retained <-
which(colnames(csv) %in% select_cols)
csv <- csv[,columns_retained]
}
# If values for `drop_cols` provided, filter the CSV
# columns by those named columns
if (is.null(select_cols) & !is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[,columns_retained]
}
# If values for `rename_attrs` provided, rename all
# of the CSV columns by those replacement values
# (number of new names should match number of columns
# even after selecting or dropping columns)
if (!is.null(rename_attrs)) {
if (length(rename_attrs) != length(colnames(csv))) {
stop(paste0("The number of values specified for column name changes ",
"does not match the number of columns available"))
}
colnames(csv) <- rename_attrs
}
# Optionally set the `rel` attribute from a
# specified column in the CSV (this copies into
# the `rel` column)
if (!is.null(rel_col)) {
if (any(colnames(csv) == rel_col)) {
csv$rel <- csv[,which(colnames(csv) == rel_col)]
}
}
# Get the unique set of nodes to add to the graph
nodes <-
create_nodes(
nodes = unique(
c(csv[, which(colnames(csv) %in% from_col)],
csv[, which(colnames(csv) %in% to_col)])))
# Add node data frame to the graph
graph <- add_node_df(graph, nodes)
# Create an edge data frame
edges <-
create_edges(
from = csv[, which(colnames(csv) %in% from_col)],
to = csv[, which(colnames(csv) %in% to_col)]
)
# Add edge data frame to the graph
graph <- add_edge_df(graph, edges)
return(graph)
}
# Verify that all values in `from_col` in the table are
# available in the graph
if (!(all(
csv[,which(colnames(csv) == from_col)] %in%
get_node_df(graph)[,
which(colnames(get_node_df(graph)) == from_mapping)]))) {
stop(paste0("The `from` values in the table don't all match the requested",
"node attribute value in the graph."))
}
# Verify that all values in `to_col` in the table are
# available in the graph
if (!(all(csv[,which(colnames(csv) == to_col)] %in%
get_node_df(graph)[,which(colnames(get_node_df(graph)) == to_mapping)]))) {
stop(paste0("The `to` values in the table don't all match the requested",
"node attribute values in the graph."))
}
# If values for `select_cols` provided, filter the
# table columns by those named columns
if (!is.null(select_cols)) {
# If none of the specified values in `select_cols`
# are in the table, stop the function
if (all(select_cols %in% colnames(csv)) == FALSE) {
stop("None of the values specified for selecting columns are available.")
}
columns_retained <- which(colnames(csv) %in% select_cols)
csv <- csv[,columns_retained]
}
# If values for `drop_cols` provided, filter the
# table columns by those named columns
if (is.null(select_cols) & !is.null(drop_cols)) {
columns_retained <-
which(!(colnames(csv) %in% drop_cols))
csv <- csv[,columns_retained]
}
# If values for `rename_attrs` provided, rename the
# table columns by those replacement values
if (!is.null(rename_attrs)) {
if (length(rename_attrs) !=
length(colnames(csv))) {
stop(paste0("The number of values specified for column name changes ",
"does not match the number of columns available"))
}
colnames(csv) <- rename_attrs
}
# Get relevant column numbers from the table
from_col_value <- which(colnames(csv) == from_col)
to_col_value <- which(colnames(csv) == to_col)
# Get relevant column numbers from the graph's ndf
from_mapping_value <-
which(colnames(get_node_df(graph)) == from_mapping)
to_mapping_value <-
which(colnames(get_node_df(graph)) == to_mapping)
# Create edges
for (i in 1:rows_in_csv) {
graph <-
add_edge(
graph = graph,
from = get_node_df(graph)[
which(get_node_df(graph)[
,from_mapping_value] ==
csv[i, from_col_value]), 1],
to = get_node_df(graph)[
which(get_node_df(graph)[
,to_mapping_value] ==
csv[i, to_col_value]), 1])
}
# Get rownames for edges created
edges_created_rownames <-
as.numeric(
setdiff(
rownames(
get_edge_df(graph)),
edges_existing_rownames))
# Get column numbers in table that are edge attributes
if (!is.null(rel_col)) {
edge_attr_cols_csv <-
which(colnames(csv) %in%
setdiff(colnames(csv),
c(from_col, to_col, rel_col)))
} else {
edge_attr_cols_csv <-
which(colnames(csv) %in%
setdiff(colnames(csv),
c(from_col, to_col)))
}
# Add table columns as attributes
for (i in edges_created_rownames) {
for (j in edge_attr_cols_csv) {
graph <-
set_edge_attrs(
x = graph,
from = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i), 1],
to = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i), 2],
edge_attr = colnames(csv)[j],
values = csv[i,j])
}
# Optionally set the `rel` attribute from a
# specified column in the table
if (!is.null(rel_col)) {
graph <-
set_edge_attrs(
x = graph,
from = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i),1],
to = get_edge_df(graph)[
which(
rownames(get_edge_df(graph)) == i),2],
edge_attr = "rel",
values = csv[i, which(colnames(csv) %in%
rel_col)])
}
}
# Optionally set the `rel` attribute with a single
# value repeated down
if (!is.null(set_rel)) {
graph <-
select_edges(
graph = graph,
from = get_edge_df(graph)[
edges_created_rownames, 1],
to = get_edge_df(graph)[
edges_created_rownames, 2])
graph <-
set_edge_attr_with_selection(
graph = graph,
edge_attr = "rel",
value = set_rel)
graph <- clear_selection(graph = graph)
}
return(graph)
}
|
#'Generate survival times for two endpoints using the joint frailty-copula model for surrogacy
#'
#'Date are generated from the one-step joint frailty-copula model, under the Claton
#'copula function (see \code{\link{jointSurroCopPenal}} for more details)
#'
#'We just considered in this generation, the Gaussian random effects. If the parameter \code{full.data} is set to 1,
#'this function return a list containning severals parameters, including the generated random effects.
#'The desired individual level correlation (Kendall's \eqn{\tau}) depend on the values of the copula parameter
#'\eqn{\theta}, given that \eqn{\tau = \theta /(\theta + 2)} under the clayton copula model.
#'
#' @aliases jointSurrCopSimul
#' @param n.obs Number of considered subjects. The default is \code{600}.
#' @param n.trial Number of considered trials. The default is \code{30}.
#' @param prop.cens A value between \code{0} and \code{1}, \code{1-prop.cens} is the minimum proportion of
#' people who are randomly censored.
#' Represents the quantile to use for generating the random censorship time. In this case, the censorship
#' time follows a uniform distribution in \code{1} and \code{(prop.cens)ieme} percentile of the
#' generated death times. If this argument is set to \code{0}, the fix censorship is considered.
#' The default is \code{0}.
#' @param cens.adm Censorship time. If argument \code{prop.cens} is set to \code{0}, it represents
#' the administrative censorship time, else it represents the fix censoring time. The default is \code{549},
#' for about \code{40\%} of fix censored subjects.
#' @param alpha Fixed value for \eqn{\alpha}. The default is \code{1.5}.
#' @param gamma Fixed value for \eqn{\gamma}. The default is \code{2.5}.
#' @param sigma.s Fixed value for \if{latex}{\eqn{\sigma^2_{v_S}}}
#' \if{html}{\eqn{\sigma}\out{<sup>2</sup><sub>v<sub>S</sub></sub>}}. The default is \code{0.7}.
#' @param sigma.t Fixed value for \if{latex}{\eqn{\sigma^2_{v_T}}}
#' \if{html}{\eqn{\sigma}\out{<sup>2</sup><sub>v<sub>T</sub></sub>}}. The default is \code{0.7}.
#' @param cor Desired level of correlation between \if{latex}{\eqn{v_{S_i}} and
#' \eqn{v_{T_i}}}\if{html}{v\out{<sub>S<sub>i</sub></sub>} and v\out{<sub>T<sub>i</sub></sub>}}.
#' \if{latex}{\eqn{R^2_{trial} = cor^2}}
#' \if{html}{\code{R}\out{<sup>2</sup><sub>trial</sub>} = cor \out{<sup>2</sup>}}.
#' The default is \code{0.8}.
#' @param betas Vector of the fixed effects for \if{latex}{\eqn{\beta_S}} \if{html}{\eqn{\beta}\out{<sub>S</sub>}}.
#' The size must be equal to \code{ver}
#' The default is \code{c(-1.25,0.5)}.
#' @param betat Vector of the fixed effects for \if{latex}{\eqn{\beta_T}} \if{html}{\eqn{\beta}\out{<sub>T</sub>}}.
#' The size must be equal to \code{ver}
#' The default is \code{c(-1.25,0.5)}.
#' @param frailt.base Considered heterogeneity on the baseline risk \code{(1)} or not \code{(0)}.
#' The default is \code{1}.
#' @param lambda.S Desired scale parameter for the \code{Weibull} distribution associated with the Surrogate
#' endpoint. The default is 1.8.
#' @param nu.S Desired shape parameter for the \code{Weibull} distribution associated with the Surrogate
#' endpoint. The default is 0.0045.
#' @param lambda.T Desired scale parameter for the \code{Weibull} distribution associated with the True endpoint.
#' The default is 3.
#' @param nu.T Desired shape parameter for the \code{Weibull} distribution associated with the True endpoint.
#' The default is 0.0025.
#' @param ver Number of covariates. The mandatory covariate is the treatment arm. The default is \code{2}.
#' @param typeOf Type of joint model used for data generation: 0 = classical joint model
#' with a shared individual frailty effect (Rondeau, 2007), 1 = joint frailty-copula model with shared frailty
#' effects \if{latex}{\eqn{u_i}} \if{html}{\code{u}\out{<sub>i</sub>}} and two correlated random effects treatment-by-trial interaction
#' (\if{latex}{\eqn{v_{S_i}}, \eqn{v_{T_i}}}\if{html}{v\out{<sub>S<sub>i</sub></sub>}, v\out{<sub>T<sub>i</sub></sub>}}),
#' see \code{\link{jointSurroCopPenal}}.
#' @param equi.subj.trial A binary variable that indicates if the same proportion of subjects should be included per trial (1)
#' or not (0). If 0, the proportions of subject per trial are required with parameter \code{prop.subj.trial}.
#' @param equi.subj.trt A binary variable that indicates if the same proportion of subjects is randomized per trial (1)
#' or not (0). If 0, the proportions of subject per trial are required with parameter \code{prop.subj.trt}.
#' @param prop.subj.trial The proportions of subjects per trial. Requires if \code{equi.subj.trial = 0}.
#' @param prop.subj.trt The proportions of randomized subject per trial. Requires if \code{equi.subj.trt = 0}.
#' @param full.data Specified if you want the function to return the full dataset (1), including the random effects,
#' or the restictive dataset (0) with at least \code{7} columns as required for the function \code{\link{jointSurroCopPenal}}.
#' @param random.generator The random number generator used by the Fortran compiler,
#' \code{1} for the intrinsec subroutine \code{Random_number} and \code{2} for the
#' subroutine \code{uniran()}. The default is \code{1}.
#' @param random A binary that says if we reset the random number generation with a different environment
#' at each call \code{(1)} or not \code{(0)}. If it is set to \code{1}, we use the computer clock
#' as seed. In the last case, it is not possible to reproduce the generated datasets.
#' The default is \code{0}. Required if \code{random.generator} is set to 1.
#' @param random.nb.sim required if \code{random.generator} is set to 1, and if \code{random} is set to 1.
#' @param seed The seed to use for data (or samples) generation. Required if the argument \code{random.generator} is set to 1.
#' Must be a positive value. If negative, the program do not account for seed. The default is \code{0}.
#' @param nb.reject.data Number of generation to reject before the considered dataset. This parameter is required
#' when data generation is for simulation. With a fixed parameter and \code{random.generator} set to 1,
#' all ganerated data are the same. By varying this parameter, different datasets are obtained during data generations. The default value is 0,
#' in the event of one dataset.
#' @param filter.surr Vector of size the number of covariates, with the i-th element that indicates if the hazard for
#' surrogate is adjusted on the i-th covariate (code 1) or not (code 0). By default, 2 covariates are considered.
#' @param thetacopule The desired value for the copula parameter. The default is \code{6}.
#' @param filter.true Vector defines as \code{filter.surr}, for the true endpoint. \code{filter.true} and \code{filter.surr}
#' should have the same size
#' @param covar.names Vector of the names of covariables. By default it contains "trt" for the
#' tratment arm. Should contains the names of all covarites wished in the generated dataset.
#' @param pfs Is used to specify if the time to progression should be censored by the death time (0) or not (1).
#' The default is 0. In the event with pfs set to 1, death is included in the surrogate endpoint as in the definition of PFS or DFS.
# @param param.weibull A binary for the Weibull parametrization used. The default is \code{0}, as in
# the frailtypack package. If \code{1} the function
# \eqn{f(x)=\nu^\lambda . \lambda . x^{\lambda-1} . \exp(-(\nu x)^\lambda)} is used.
#' @return
#' This function returns if the parameter \code{full.data} is set to 0, a \code{\link{data.frame}} with columns :
#' \item{patientID}{A numeric, that represents the patient's identifier, must be unique;}
#' \item{trialID}{A numeric, that represents the trial in which each patient was randomized;}
#' \item{trt}{The treatment indicator for each patient, with 1 = treated, 0 = untreated;}
#' \item{timeS}{The follow up time associated with the surrogate endpoint;}
#' \item{statusS}{The event indicator associated with the surrogate endpoint. Normally
#' 0 = no event, 1 = event;}
#' \item{timeT}{The follow up time associated with the true endpoint;}
#' \item{statusT}{The event indicator associated with the true endpoint. Normally
#' 0 = no event, 1 = event;}
#'and other covariates named \code{Var2, var3, ..., var[ver-1]} if \code{ver > 1}.
#' If the argument \code{full.data} is set to 1, additionnal colums corresponding to random effects
#'\if{latex}{\eqn{u_i}} \if{html}{\code{u}\out{<sub>i</sub>}}, \if{latex}{\eqn{v_{S_i}} and
#'\eqn{v_{T_i}}}\if{html}{\code{v}\out{<sub>S<sub>i</sub></sub>} and
#' \code{v}\out{<sub>T<sub>i</sub></sub>}} are returned.
#'
#'
#' @author Casimir Ledoux Sofeu \email{casimir.sofeu@u-bordeaux.fr}, \email{scl.ledoux@gmail.com} and
#' Virginie Rondeau \email{virginie.rondeau@inserm.fr}
#'
#' @references
#'
#' Rondeau V., Mathoulin-Pelissier S., Jacqmin-Gadda H., Brouste V. and Soubeyran P. (2007).
#' Joint frailty models for recurring events and death using maximum penalized likelihood
#' estimation: application on cancer events. Biostatistics 8(4), 708-721.
#'
#' Sofeu, C. L., Emura, T., and Rondeau, V. (2020). A joint frailty-copula model for meta-analytic
#' validation of failure time surrogate endpoints in clinical trials. \code{Under review}
#'
#' @seealso \code{\link{jointSurrSimul}, \link{jointSurroCopPenal}}
#' @export
#'
#'
#' @examples
#'
#' \dontrun{
#' # dataset with 2 covariates and fixed censorship
#' data.sim <- jointSurrCopSimul(n.obs=600, n.trial = 30, prop.cens = 0, cens.adm=549,
#' alpha = 1.5, gamma = 2.5, sigma.s = 0.7, sigma.t = 0.7,
#' cor = 0.8, betas = c(-1.25, 0.5), betat = c(-1.25, 0.5),
#' full.data = 0, random.generator = 1,ver = 2, covar.names = "trt",
#' nb.reject.data = 0, thetacopule = 6, filter.surr = c(1,1),
#' filter.true = c(1,1), seed = 0)
#'
#' #dataset with 2 covariates and random censorship
#'
#' data.sim2 <- jointSurrCopSimul(n.obs=600, n.trial = 30, prop.cens = 0.75,
#' cens.adm = 549, alpha = 1.5, gamma = 2.5, sigma.s = 0.7,
#' sigma.t = 0.7, cor = 0.8, betas = c(-1.25, 0.5),
#' betat = c(-1.25, 0.5), full.data = 0, random.generator = 1,
#' ver = 2, covar.names = "trt", nb.reject.data = 0, thetacopule = 6,
#' filter.surr = c(1,1), filter.true = c(1,1), seed = 0)
#'
#' }
jointSurrCopSimul <- function(n.obs = 600, n.trial = 30, prop.cens = 0, cens.adm = 549, alpha = 1.5, gamma = 2.5,
sigma.s = 0.7, sigma.t = 0.7,cor = 0.9, betas = c(-1.25, 0.5), betat = c(-1.25, 0.5),
frailt.base = 1, lambda.S = 1.3, nu.S = 0.0025,lambda.T = 1.1, nu.T = 0.0025, ver = 2, typeOf = 1,
equi.subj.trial = 1 ,equi.subj.trt = 1, prop.subj.trial = NULL, prop.subj.trt = NULL,
full.data = 0, random.generator = 1, random = 0, random.nb.sim = 0, seed = 0, nb.reject.data = 0,
thetacopule = 6, filter.surr = c(1,1), filter.true = c(1,1), covar.names = "trt", pfs = 0){
param.weibull <- 0
theta <- 3.5
zeta <- 1
rsqrt <- cor
# ==============parameters checking======================
if(!(equi.subj.trt %in% c(0,1)) | !(equi.subj.trial %in% c(0,1))){
stop("Model's parameters equi.subj.trt and equi.subj.trial must be set to 0 or 1")
}
if(((equi.subj.trial == 0) & is.null(prop.subj.trial)) | ((equi.subj.trt == 0) & is.null(prop.subj.trt))){
stop("The proportions of randomized subjects per trial (or the proportions of subject per trial) are required in the variables
\bold{prop.subj.trt} (or \bold{prop.subj.trial}). If you want the same proportions, set the parameter \bold{equi.subj.trial} (or \bold{equi.subj.trt}) to 1
model's parameters equi.subj.trt and equi.subj.trial must be set to 0 or 1")
}
if(is.null(filter.surr) | is.null(filter.true)){
stop("The vectors filter.surr and filter.true must contain at least one element corresponding to the effect of the treatment")
}
if(!(length(betas) == ver) | !(length(betat)==ver)){
stop("The vectors betas and betat must contain a number of elements corresponding to ver")
}
if(!(length(filter.surr) == ver) | !(length(filter.true)==ver)){
stop("The vectors filter.surr and filter.true must contain a number of elements corresponding to ver")
}
if(!(length(filter.surr) == length(filter.true))){
stop("The vectors filter.surr and filter.true should have the same size")
}
# ============end parameters checking====================
if(length(filter.surr) > length(covar.names)){
# si plus d'une variable explicatives avec des noms pas preciser, je les nome par var[numero],
covar.names = c(covar.names,paste("var",seq(2,length(filter.surr)), sep = ""))
}
n.col <- 13 + length(filter.surr) -1 #Number of columns of the simulated dataset. The required number is 13 when just the treatment effect is considered as covariate.
data.sim <- NULL
if(typeOf == 1){
# joint surrogate model with shared frailty u_i and omega_ij
lognormal <- 1
}else{
# joint classical model with shared individual frailty effect omega_ij, to take into account heterogeneity at
# the individual level
lognormal <- 2
}
gamma1 <- 2 # paramertre de la loi gamma
gamma2 <- 2 # paramertre de la loi gamma
if(equi.subj.trt==1) p <- rep(0.5,n.trial)
if(equi.subj.trt==0) p <- rep(0,n.trial)
if(equi.subj.trial==1) {
prop_i <- rep(1/n.trial,n.trial)
}
else{
prop_i <- prop.subj.trial
}
don_simul <- as.double(matrix(0, nrow = n.obs , ncol = n.col))
don_simulS1 <- as.double(matrix(0, nrow = n.obs , ncol = n.col))
type.joint.simul = 2
filtre <- filter.surr
filtre2 <- filter.true
# filtre <- matrix(filter.surr, nrow = 1, ncol = ver)
# filtre2 <- matrix(filter.true, nrow = 1, ncol = ver)
ans <- .Fortran(C_surrosim,
don_simul = as.double(matrix(0, nrow = n.obs , ncol = n.col)),
don_simulS1 = as.double(matrix(0, nrow = n.obs , ncol = n.col)),
as.integer(n.obs),
as.integer(n.col),
as.integer(lognormal),
as.integer(0),
vrai_theta=as.double(0),
as.integer(n.obs) ,
as.integer(ver) ,
as.double(alpha) ,
as.double(prop.cens),
as.double(cens.adm),
as.double(gamma1),
as.double(gamma2),
as.double(theta),
as.double(lambda.S),
as.double(nu.S),
as.double(lambda.T),
as.double(nu.T),
as.double(betas),
as.double(betat),
as.integer(n.trial),
as.double(rsqrt),
as.double(sigma.s),
as.double(sigma.t),
as.double(p),
as.double(prop_i),
as.double(gamma),
as.double(zeta) ,
as.integer(frailt.base),
as.integer(random.generator),
as.integer(random),
as.integer(random.nb.sim) ,
as.integer(seed),
as.integer(nb.reject.data),
as.integer(param.weibull),
as.double(thetacopule),
as.integer(filtre),
as.integer(filtre2),
as.integer(type.joint.simul),
as.integer(pfs),
PACKAGE="frailtypack"
)
#ans$don_simul <- data.frame(ans$don_simul)
#ans$don_simulS1 <- data.frame(ans$don_simulS1)
ans$don_simul <- data.frame(matrix(ans$don_simul,nrow = n.obs , ncol = n.col))
ans$don_simulS1 <- data.frame(matrix(ans$don_simulS1,nrow = n.obs , ncol = n.col))
names(ans$don_simul) <- c("trt1","v_s1","v_t1","trialref1","timeS1","timeT1",
"timeC1","statusS1","statusT1","initTime1","Patienref1","u_i1",
covar.names[-1])
names(ans$don_simulS1) <- c("trt1","v_s1","v_t1","trialref1","timeS1","timeT1",
"timeC1","statusS1","statusT1","initTime1","Patienref1","u_i1",
covar.names[-1])
data.sim <- ans$don_simulS1[,c(4, 11, 1, 5, 8)] # donnees sans le true
data.sim <- merge(data.sim,ans$don_simul[,c(11, 6, 9)], by="Patienref1") # on ajoute les donnees sur le True
if(length(covar.names)>1)
data.sim <- merge(data.sim,ans$don_simul[,c(11,12-1+seq(1,length(covar.names))[-1])], by="Patienref1") # on ajoute les donnees sur le True
names(data.sim) <- c("patientID", "trialID", "trt", "timeS", "statusS", "timeT", "statusT", covar.names[-1])
if(full.data == 1){
data.comp <- merge(ans$don_simulS1[,c(11, 4, 1, 12, 2, 3, 5, 8)],
ans$don_simul[,c(11, 6, 9,12-1+seq(1,length(covar.names))[-1])],
by="Patienref1")
names(data.comp) <- c("patientID", "trialID", "trt","u_i","v_Si","v_Ti", "timeS", "statusS", "timeT", "statusT", covar.names[-1])
if(typeOf == 1) {
if(length(covar.names) == 1)
data.comp <- data.comp[c(1:2,7:10,3:6)]
else # ajout des autres covariables
data.comp <- data.comp[c(1:2,7:10,3:6,(ncol(data.comp)-length(covar.names)+2):ncol(data.comp))]
}
if(typeOf == 0) {
if(length(covar.names) == 1)
data.comp <- data.comp[c(1:2,7:10,3)]
else # ajout des autres covariables
data.comp <- data.comp[c(1:2,7:10,3, (ncol(data.comp)-length(covar.names)+2):ncol(data.comp))]
}
#return(ans)
return(data.comp)
}
if(full.data == 0) {
return(data.sim)
#return(ans)
}
}
|
/R/jointSurrCopSimul.R
|
no_license
|
cran/frailtypack
|
R
| false
| false
| 18,515
|
r
|
#'Generate survival times for two endpoints using the joint frailty-copula model for surrogacy
#'
#'Date are generated from the one-step joint frailty-copula model, under the Claton
#'copula function (see \code{\link{jointSurroCopPenal}} for more details)
#'
#'We just considered in this generation, the Gaussian random effects. If the parameter \code{full.data} is set to 1,
#'this function return a list containning severals parameters, including the generated random effects.
#'The desired individual level correlation (Kendall's \eqn{\tau}) depend on the values of the copula parameter
#'\eqn{\theta}, given that \eqn{\tau = \theta /(\theta + 2)} under the clayton copula model.
#'
#' @aliases jointSurrCopSimul
#' @param n.obs Number of considered subjects. The default is \code{600}.
#' @param n.trial Number of considered trials. The default is \code{30}.
#' @param prop.cens A value between \code{0} and \code{1}, \code{1-prop.cens} is the minimum proportion of
#' people who are randomly censored.
#' Represents the quantile to use for generating the random censorship time. In this case, the censorship
#' time follows a uniform distribution in \code{1} and \code{(prop.cens)ieme} percentile of the
#' generated death times. If this argument is set to \code{0}, the fix censorship is considered.
#' The default is \code{0}.
#' @param cens.adm Censorship time. If argument \code{prop.cens} is set to \code{0}, it represents
#' the administrative censorship time, else it represents the fix censoring time. The default is \code{549},
#' for about \code{40\%} of fix censored subjects.
#' @param alpha Fixed value for \eqn{\alpha}. The default is \code{1.5}.
#' @param gamma Fixed value for \eqn{\gamma}. The default is \code{2.5}.
#' @param sigma.s Fixed value for \if{latex}{\eqn{\sigma^2_{v_S}}}
#' \if{html}{\eqn{\sigma}\out{<sup>2</sup><sub>v<sub>S</sub></sub>}}. The default is \code{0.7}.
#' @param sigma.t Fixed value for \if{latex}{\eqn{\sigma^2_{v_T}}}
#' \if{html}{\eqn{\sigma}\out{<sup>2</sup><sub>v<sub>T</sub></sub>}}. The default is \code{0.7}.
#' @param cor Desired level of correlation between \if{latex}{\eqn{v_{S_i}} and
#' \eqn{v_{T_i}}}\if{html}{v\out{<sub>S<sub>i</sub></sub>} and v\out{<sub>T<sub>i</sub></sub>}}.
#' \if{latex}{\eqn{R^2_{trial} = cor^2}}
#' \if{html}{\code{R}\out{<sup>2</sup><sub>trial</sub>} = cor \out{<sup>2</sup>}}.
#' The default is \code{0.8}.
#' @param betas Vector of the fixed effects for \if{latex}{\eqn{\beta_S}} \if{html}{\eqn{\beta}\out{<sub>S</sub>}}.
#' The size must be equal to \code{ver}
#' The default is \code{c(-1.25,0.5)}.
#' @param betat Vector of the fixed effects for \if{latex}{\eqn{\beta_T}} \if{html}{\eqn{\beta}\out{<sub>T</sub>}}.
#' The size must be equal to \code{ver}
#' The default is \code{c(-1.25,0.5)}.
#' @param frailt.base Considered heterogeneity on the baseline risk \code{(1)} or not \code{(0)}.
#' The default is \code{1}.
#' @param lambda.S Desired scale parameter for the \code{Weibull} distribution associated with the Surrogate
#' endpoint. The default is 1.8.
#' @param nu.S Desired shape parameter for the \code{Weibull} distribution associated with the Surrogate
#' endpoint. The default is 0.0045.
#' @param lambda.T Desired scale parameter for the \code{Weibull} distribution associated with the True endpoint.
#' The default is 3.
#' @param nu.T Desired shape parameter for the \code{Weibull} distribution associated with the True endpoint.
#' The default is 0.0025.
#' @param ver Number of covariates. The mandatory covariate is the treatment arm. The default is \code{2}.
#' @param typeOf Type of joint model used for data generation: 0 = classical joint model
#' with a shared individual frailty effect (Rondeau, 2007), 1 = joint frailty-copula model with shared frailty
#' effects \if{latex}{\eqn{u_i}} \if{html}{\code{u}\out{<sub>i</sub>}} and two correlated random effects treatment-by-trial interaction
#' (\if{latex}{\eqn{v_{S_i}}, \eqn{v_{T_i}}}\if{html}{v\out{<sub>S<sub>i</sub></sub>}, v\out{<sub>T<sub>i</sub></sub>}}),
#' see \code{\link{jointSurroCopPenal}}.
#' @param equi.subj.trial A binary variable that indicates if the same proportion of subjects should be included per trial (1)
#' or not (0). If 0, the proportions of subject per trial are required with parameter \code{prop.subj.trial}.
#' @param equi.subj.trt A binary variable that indicates if the same proportion of subjects is randomized per trial (1)
#' or not (0). If 0, the proportions of subject per trial are required with parameter \code{prop.subj.trt}.
#' @param prop.subj.trial The proportions of subjects per trial. Requires if \code{equi.subj.trial = 0}.
#' @param prop.subj.trt The proportions of randomized subject per trial. Requires if \code{equi.subj.trt = 0}.
#' @param full.data Specified if you want the function to return the full dataset (1), including the random effects,
#' or the restictive dataset (0) with at least \code{7} columns as required for the function \code{\link{jointSurroCopPenal}}.
#' @param random.generator The random number generator used by the Fortran compiler,
#' \code{1} for the intrinsec subroutine \code{Random_number} and \code{2} for the
#' subroutine \code{uniran()}. The default is \code{1}.
#' @param random A binary that says if we reset the random number generation with a different environment
#' at each call \code{(1)} or not \code{(0)}. If it is set to \code{1}, we use the computer clock
#' as seed. In the last case, it is not possible to reproduce the generated datasets.
#' The default is \code{0}. Required if \code{random.generator} is set to 1.
#' @param random.nb.sim required if \code{random.generator} is set to 1, and if \code{random} is set to 1.
#' @param seed The seed to use for data (or samples) generation. Required if the argument \code{random.generator} is set to 1.
#' Must be a positive value. If negative, the program do not account for seed. The default is \code{0}.
#' @param nb.reject.data Number of generation to reject before the considered dataset. This parameter is required
#' when data generation is for simulation. With a fixed parameter and \code{random.generator} set to 1,
#' all ganerated data are the same. By varying this parameter, different datasets are obtained during data generations. The default value is 0,
#' in the event of one dataset.
#' @param filter.surr Vector of size the number of covariates, with the i-th element that indicates if the hazard for
#' surrogate is adjusted on the i-th covariate (code 1) or not (code 0). By default, 2 covariates are considered.
#' @param thetacopule The desired value for the copula parameter. The default is \code{6}.
#' @param filter.true Vector defines as \code{filter.surr}, for the true endpoint. \code{filter.true} and \code{filter.surr}
#' should have the same size
#' @param covar.names Vector of the names of covariables. By default it contains "trt" for the
#' tratment arm. Should contains the names of all covarites wished in the generated dataset.
#' @param pfs Is used to specify if the time to progression should be censored by the death time (0) or not (1).
#' The default is 0. In the event with pfs set to 1, death is included in the surrogate endpoint as in the definition of PFS or DFS.
# @param param.weibull A binary for the Weibull parametrization used. The default is \code{0}, as in
# the frailtypack package. If \code{1} the function
# \eqn{f(x)=\nu^\lambda . \lambda . x^{\lambda-1} . \exp(-(\nu x)^\lambda)} is used.
#' @return
#' This function returns if the parameter \code{full.data} is set to 0, a \code{\link{data.frame}} with columns :
#' \item{patientID}{A numeric, that represents the patient's identifier, must be unique;}
#' \item{trialID}{A numeric, that represents the trial in which each patient was randomized;}
#' \item{trt}{The treatment indicator for each patient, with 1 = treated, 0 = untreated;}
#' \item{timeS}{The follow up time associated with the surrogate endpoint;}
#' \item{statusS}{The event indicator associated with the surrogate endpoint. Normally
#' 0 = no event, 1 = event;}
#' \item{timeT}{The follow up time associated with the true endpoint;}
#' \item{statusT}{The event indicator associated with the true endpoint. Normally
#' 0 = no event, 1 = event;}
#'and other covariates named \code{Var2, var3, ..., var[ver-1]} if \code{ver > 1}.
#' If the argument \code{full.data} is set to 1, additionnal colums corresponding to random effects
#'\if{latex}{\eqn{u_i}} \if{html}{\code{u}\out{<sub>i</sub>}}, \if{latex}{\eqn{v_{S_i}} and
#'\eqn{v_{T_i}}}\if{html}{\code{v}\out{<sub>S<sub>i</sub></sub>} and
#' \code{v}\out{<sub>T<sub>i</sub></sub>}} are returned.
#'
#'
#' @author Casimir Ledoux Sofeu \email{casimir.sofeu@u-bordeaux.fr}, \email{scl.ledoux@gmail.com} and
#' Virginie Rondeau \email{virginie.rondeau@inserm.fr}
#'
#' @references
#'
#' Rondeau V., Mathoulin-Pelissier S., Jacqmin-Gadda H., Brouste V. and Soubeyran P. (2007).
#' Joint frailty models for recurring events and death using maximum penalized likelihood
#' estimation: application on cancer events. Biostatistics 8(4), 708-721.
#'
#' Sofeu, C. L., Emura, T., and Rondeau, V. (2020). A joint frailty-copula model for meta-analytic
#' validation of failure time surrogate endpoints in clinical trials. \code{Under review}
#'
#' @seealso \code{\link{jointSurrSimul}, \link{jointSurroCopPenal}}
#' @export
#'
#'
#' @examples
#'
#' \dontrun{
#' # dataset with 2 covariates and fixed censorship
#' data.sim <- jointSurrCopSimul(n.obs=600, n.trial = 30, prop.cens = 0, cens.adm=549,
#' alpha = 1.5, gamma = 2.5, sigma.s = 0.7, sigma.t = 0.7,
#' cor = 0.8, betas = c(-1.25, 0.5), betat = c(-1.25, 0.5),
#' full.data = 0, random.generator = 1,ver = 2, covar.names = "trt",
#' nb.reject.data = 0, thetacopule = 6, filter.surr = c(1,1),
#' filter.true = c(1,1), seed = 0)
#'
#' #dataset with 2 covariates and random censorship
#'
#' data.sim2 <- jointSurrCopSimul(n.obs=600, n.trial = 30, prop.cens = 0.75,
#' cens.adm = 549, alpha = 1.5, gamma = 2.5, sigma.s = 0.7,
#' sigma.t = 0.7, cor = 0.8, betas = c(-1.25, 0.5),
#' betat = c(-1.25, 0.5), full.data = 0, random.generator = 1,
#' ver = 2, covar.names = "trt", nb.reject.data = 0, thetacopule = 6,
#' filter.surr = c(1,1), filter.true = c(1,1), seed = 0)
#'
#' }
jointSurrCopSimul <- function(n.obs = 600, n.trial = 30, prop.cens = 0, cens.adm = 549, alpha = 1.5, gamma = 2.5,
sigma.s = 0.7, sigma.t = 0.7,cor = 0.9, betas = c(-1.25, 0.5), betat = c(-1.25, 0.5),
frailt.base = 1, lambda.S = 1.3, nu.S = 0.0025,lambda.T = 1.1, nu.T = 0.0025, ver = 2, typeOf = 1,
equi.subj.trial = 1 ,equi.subj.trt = 1, prop.subj.trial = NULL, prop.subj.trt = NULL,
full.data = 0, random.generator = 1, random = 0, random.nb.sim = 0, seed = 0, nb.reject.data = 0,
thetacopule = 6, filter.surr = c(1,1), filter.true = c(1,1), covar.names = "trt", pfs = 0){
param.weibull <- 0
theta <- 3.5
zeta <- 1
rsqrt <- cor
# ==============parameters checking======================
if(!(equi.subj.trt %in% c(0,1)) | !(equi.subj.trial %in% c(0,1))){
stop("Model's parameters equi.subj.trt and equi.subj.trial must be set to 0 or 1")
}
if(((equi.subj.trial == 0) & is.null(prop.subj.trial)) | ((equi.subj.trt == 0) & is.null(prop.subj.trt))){
stop("The proportions of randomized subjects per trial (or the proportions of subject per trial) are required in the variables
\bold{prop.subj.trt} (or \bold{prop.subj.trial}). If you want the same proportions, set the parameter \bold{equi.subj.trial} (or \bold{equi.subj.trt}) to 1
model's parameters equi.subj.trt and equi.subj.trial must be set to 0 or 1")
}
if(is.null(filter.surr) | is.null(filter.true)){
stop("The vectors filter.surr and filter.true must contain at least one element corresponding to the effect of the treatment")
}
if(!(length(betas) == ver) | !(length(betat)==ver)){
stop("The vectors betas and betat must contain a number of elements corresponding to ver")
}
if(!(length(filter.surr) == ver) | !(length(filter.true)==ver)){
stop("The vectors filter.surr and filter.true must contain a number of elements corresponding to ver")
}
if(!(length(filter.surr) == length(filter.true))){
stop("The vectors filter.surr and filter.true should have the same size")
}
# ============end parameters checking====================
if(length(filter.surr) > length(covar.names)){
# si plus d'une variable explicatives avec des noms pas preciser, je les nome par var[numero],
covar.names = c(covar.names,paste("var",seq(2,length(filter.surr)), sep = ""))
}
n.col <- 13 + length(filter.surr) -1 #Number of columns of the simulated dataset. The required number is 13 when just the treatment effect is considered as covariate.
data.sim <- NULL
if(typeOf == 1){
# joint surrogate model with shared frailty u_i and omega_ij
lognormal <- 1
}else{
# joint classical model with shared individual frailty effect omega_ij, to take into account heterogeneity at
# the individual level
lognormal <- 2
}
gamma1 <- 2 # paramertre de la loi gamma
gamma2 <- 2 # paramertre de la loi gamma
if(equi.subj.trt==1) p <- rep(0.5,n.trial)
if(equi.subj.trt==0) p <- rep(0,n.trial)
if(equi.subj.trial==1) {
prop_i <- rep(1/n.trial,n.trial)
}
else{
prop_i <- prop.subj.trial
}
don_simul <- as.double(matrix(0, nrow = n.obs , ncol = n.col))
don_simulS1 <- as.double(matrix(0, nrow = n.obs , ncol = n.col))
type.joint.simul = 2
filtre <- filter.surr
filtre2 <- filter.true
# filtre <- matrix(filter.surr, nrow = 1, ncol = ver)
# filtre2 <- matrix(filter.true, nrow = 1, ncol = ver)
ans <- .Fortran(C_surrosim,
don_simul = as.double(matrix(0, nrow = n.obs , ncol = n.col)),
don_simulS1 = as.double(matrix(0, nrow = n.obs , ncol = n.col)),
as.integer(n.obs),
as.integer(n.col),
as.integer(lognormal),
as.integer(0),
vrai_theta=as.double(0),
as.integer(n.obs) ,
as.integer(ver) ,
as.double(alpha) ,
as.double(prop.cens),
as.double(cens.adm),
as.double(gamma1),
as.double(gamma2),
as.double(theta),
as.double(lambda.S),
as.double(nu.S),
as.double(lambda.T),
as.double(nu.T),
as.double(betas),
as.double(betat),
as.integer(n.trial),
as.double(rsqrt),
as.double(sigma.s),
as.double(sigma.t),
as.double(p),
as.double(prop_i),
as.double(gamma),
as.double(zeta) ,
as.integer(frailt.base),
as.integer(random.generator),
as.integer(random),
as.integer(random.nb.sim) ,
as.integer(seed),
as.integer(nb.reject.data),
as.integer(param.weibull),
as.double(thetacopule),
as.integer(filtre),
as.integer(filtre2),
as.integer(type.joint.simul),
as.integer(pfs),
PACKAGE="frailtypack"
)
#ans$don_simul <- data.frame(ans$don_simul)
#ans$don_simulS1 <- data.frame(ans$don_simulS1)
ans$don_simul <- data.frame(matrix(ans$don_simul,nrow = n.obs , ncol = n.col))
ans$don_simulS1 <- data.frame(matrix(ans$don_simulS1,nrow = n.obs , ncol = n.col))
names(ans$don_simul) <- c("trt1","v_s1","v_t1","trialref1","timeS1","timeT1",
"timeC1","statusS1","statusT1","initTime1","Patienref1","u_i1",
covar.names[-1])
names(ans$don_simulS1) <- c("trt1","v_s1","v_t1","trialref1","timeS1","timeT1",
"timeC1","statusS1","statusT1","initTime1","Patienref1","u_i1",
covar.names[-1])
data.sim <- ans$don_simulS1[,c(4, 11, 1, 5, 8)] # donnees sans le true
data.sim <- merge(data.sim,ans$don_simul[,c(11, 6, 9)], by="Patienref1") # on ajoute les donnees sur le True
if(length(covar.names)>1)
data.sim <- merge(data.sim,ans$don_simul[,c(11,12-1+seq(1,length(covar.names))[-1])], by="Patienref1") # on ajoute les donnees sur le True
names(data.sim) <- c("patientID", "trialID", "trt", "timeS", "statusS", "timeT", "statusT", covar.names[-1])
if(full.data == 1){
data.comp <- merge(ans$don_simulS1[,c(11, 4, 1, 12, 2, 3, 5, 8)],
ans$don_simul[,c(11, 6, 9,12-1+seq(1,length(covar.names))[-1])],
by="Patienref1")
names(data.comp) <- c("patientID", "trialID", "trt","u_i","v_Si","v_Ti", "timeS", "statusS", "timeT", "statusT", covar.names[-1])
if(typeOf == 1) {
if(length(covar.names) == 1)
data.comp <- data.comp[c(1:2,7:10,3:6)]
else # ajout des autres covariables
data.comp <- data.comp[c(1:2,7:10,3:6,(ncol(data.comp)-length(covar.names)+2):ncol(data.comp))]
}
if(typeOf == 0) {
if(length(covar.names) == 1)
data.comp <- data.comp[c(1:2,7:10,3)]
else # ajout des autres covariables
data.comp <- data.comp[c(1:2,7:10,3, (ncol(data.comp)-length(covar.names)+2):ncol(data.comp))]
}
#return(ans)
return(data.comp)
}
if(full.data == 0) {
return(data.sim)
#return(ans)
}
}
|
.onAttach <- function(...) {
packageStartupMessage('Super Learner')
packageStartupMessage('Version: ', utils::packageDescription('SuperLearner')$Version)
packageStartupMessage('Package created on ', utils::packageDescription('SuperLearner')$Date, '\n')
packageStartupMessage('Use SuperLearnerNews() to see changes from previous versions and latest news', '\n')
# packageStartupMessage('Suggested packages to install for the Super Learner library:')
# packageStartupMessage(utils::packageDescription('SuperLearner')$Suggests)
}
|
/R/zzz.R
|
no_license
|
ledell/SuperLearner
|
R
| false
| false
| 539
|
r
|
.onAttach <- function(...) {
packageStartupMessage('Super Learner')
packageStartupMessage('Version: ', utils::packageDescription('SuperLearner')$Version)
packageStartupMessage('Package created on ', utils::packageDescription('SuperLearner')$Date, '\n')
packageStartupMessage('Use SuperLearnerNews() to see changes from previous versions and latest news', '\n')
# packageStartupMessage('Suggested packages to install for the Super Learner library:')
# packageStartupMessage(utils::packageDescription('SuperLearner')$Suggests)
}
|
##' update tree
##'
##'
##' @rdname update-TREE
##' @title \%<\%
##' @param pg ggplot2 object
##' @param x update by x
##' @return updated ggplot object
##' @export
##' @author Yu Guangchuang
##' @examples
##' library("ggplot2")
##' nwk <- system.file("extdata", "sample.nwk", package="treeio")
##' tree <- read.tree(nwk)
##' p <- ggtree(tree) + geom_tippoint(color="#b5e521", alpha=1/4, size=10)
##' p %<% rtree(30)
`%<%` <- function(pg, x) {
if (! is.tree(x)) {
stop("input should be a tree object...")
}
pg %place% x
}
##' add annotation data to a tree
##'
##'
##' @rdname add-TREEDATA
##' @title \%<+\%
##' @param pg ggplot2 object
##' @param data annotation data
##' @return ggplot object with annotation data added
##' @export
##' @author Yu Guangchuang
##' @examples
##' nwk <- system.file("extdata", "sample.nwk", package="treeio")
##' tree <- read.tree(nwk)
##' p <- ggtree(tree)
##' dd <- data.frame(taxa=LETTERS[1:13],
##' place=c(rep("GZ", 5), rep("HK", 3), rep("CZ", 4), NA),
##' value=round(abs(rnorm(13, mean=70, sd=10)), digits=1))
##' row.names(dd) <- NULL
##' p %<+% dd + geom_text(aes(color=place, label=label), hjust=-0.5)
`%<+%` <- function(pg, data) {
if (! is.data.frame(data)) {
stop("input should be a data.frame...")
}
pg %add% data
}
`%place%` <- function(pg, tree) {
mrsd <- get("mrsd", envir=pg$plot_env)
layout <- get("layout", envir = pg$plot_env)
yscale <- get("yscale", envir = pg$plot_env)
ladderize <- get("ladderize", envir = pg$plot_env)
right <- get("right", envir = pg$plot_env)
branch.length <- get("branch.length", envir = pg$plot_env)
pg$data <- fortify(tree,
layout = layout,
yscale = yscale,
ladderize = ladderize,
right = right,
branch.length = branch.length,
mrsd = mrsd)
return(pg)
}
`%add%` <- function(p, data) {
p$data <- p$data %add2% data
return(p)
}
`%add2%` <- function(d1, d2) {
if ("node" %in% colnames(d2)) {
cn <- colnames(d2)
ii <- which(cn %in% c("node", cn[!cn %in% colnames(d1)]))
d2 <- d2[, ii]
dd <- merge(d1, d2, by.x="node", by.y="node", all.x=TRUE)
} else {
d2[,1] <- as.character(unlist(d2[,1])) ## `unlist` to work with tbl_df
dd <- merge(d1, d2, by.x="label", by.y=1, all.x=TRUE)
}
dd <- dd[match(d1$node, dd$node),]
return(dd)
}
##' update data with tree info (y coordination and panel)
##'
##'
##' @rdname add_TREEINFO
##' @title \%+>\%
##' @param p tree view
##' @param data data.frame
##' @return updated data.frame
##' @importFrom methods is
##' @export
##' @author Guangchuang Yu
`%+>%` <- function(p, data) {
df <- p$data
lv <- levels(df$.panel)
if (is(data, "GRanges") || is(data, "GRangesList")) {
names(data) <- df$y[match(names(data), df$label)]
res <- data[order(as.numeric(names(data)))]
mcols <- get_fun_from_pkg("GenomicRanges", "mcols")
`mcols<-` <- get_fun_from_pkg("GenomicRanges", "`mcols<-`")
mcols(res)$.panel <- factor(lv[length(lv)], levels=lv)
} else if (is(data, "data.frame") || is(data, "tbl_df")) {
data <- as.data.frame(data)
## res <- merge(df[, c('label', 'y')], data, by.x='label', by.y=1) ## , all.x=TRUE)
res <- merge(df[, !names(df) %in% c('node', 'parent', 'x', 'branch', 'angle')], data, by.x='label', by.y=1)
res[[".panel"]] <- factor(lv[length(lv)], levels=lv)
} else {
stop("input 'data' is not supported...")
}
res <- res[order(res$y),]
return(res)
}
##' pipe
##' @importFrom magrittr %>%
##' @name %>%
##' @export
##' @rdname pipe
##' @param lhs left hand side
##' @param rhs right hand side
##' @usage lhs \%>\% rhs
##' @seealso
##' \link[magrittr]{pipe}
NULL
|
/R/operator.R
|
no_license
|
lzh93/ggtree
|
R
| false
| false
| 3,952
|
r
|
##' update tree
##'
##'
##' @rdname update-TREE
##' @title \%<\%
##' @param pg ggplot2 object
##' @param x update by x
##' @return updated ggplot object
##' @export
##' @author Yu Guangchuang
##' @examples
##' library("ggplot2")
##' nwk <- system.file("extdata", "sample.nwk", package="treeio")
##' tree <- read.tree(nwk)
##' p <- ggtree(tree) + geom_tippoint(color="#b5e521", alpha=1/4, size=10)
##' p %<% rtree(30)
`%<%` <- function(pg, x) {
if (! is.tree(x)) {
stop("input should be a tree object...")
}
pg %place% x
}
##' add annotation data to a tree
##'
##'
##' @rdname add-TREEDATA
##' @title \%<+\%
##' @param pg ggplot2 object
##' @param data annotation data
##' @return ggplot object with annotation data added
##' @export
##' @author Yu Guangchuang
##' @examples
##' nwk <- system.file("extdata", "sample.nwk", package="treeio")
##' tree <- read.tree(nwk)
##' p <- ggtree(tree)
##' dd <- data.frame(taxa=LETTERS[1:13],
##' place=c(rep("GZ", 5), rep("HK", 3), rep("CZ", 4), NA),
##' value=round(abs(rnorm(13, mean=70, sd=10)), digits=1))
##' row.names(dd) <- NULL
##' p %<+% dd + geom_text(aes(color=place, label=label), hjust=-0.5)
`%<+%` <- function(pg, data) {
if (! is.data.frame(data)) {
stop("input should be a data.frame...")
}
pg %add% data
}
`%place%` <- function(pg, tree) {
mrsd <- get("mrsd", envir=pg$plot_env)
layout <- get("layout", envir = pg$plot_env)
yscale <- get("yscale", envir = pg$plot_env)
ladderize <- get("ladderize", envir = pg$plot_env)
right <- get("right", envir = pg$plot_env)
branch.length <- get("branch.length", envir = pg$plot_env)
pg$data <- fortify(tree,
layout = layout,
yscale = yscale,
ladderize = ladderize,
right = right,
branch.length = branch.length,
mrsd = mrsd)
return(pg)
}
`%add%` <- function(p, data) {
p$data <- p$data %add2% data
return(p)
}
`%add2%` <- function(d1, d2) {
if ("node" %in% colnames(d2)) {
cn <- colnames(d2)
ii <- which(cn %in% c("node", cn[!cn %in% colnames(d1)]))
d2 <- d2[, ii]
dd <- merge(d1, d2, by.x="node", by.y="node", all.x=TRUE)
} else {
d2[,1] <- as.character(unlist(d2[,1])) ## `unlist` to work with tbl_df
dd <- merge(d1, d2, by.x="label", by.y=1, all.x=TRUE)
}
dd <- dd[match(d1$node, dd$node),]
return(dd)
}
##' update data with tree info (y coordination and panel)
##'
##'
##' @rdname add_TREEINFO
##' @title \%+>\%
##' @param p tree view
##' @param data data.frame
##' @return updated data.frame
##' @importFrom methods is
##' @export
##' @author Guangchuang Yu
`%+>%` <- function(p, data) {
df <- p$data
lv <- levels(df$.panel)
if (is(data, "GRanges") || is(data, "GRangesList")) {
names(data) <- df$y[match(names(data), df$label)]
res <- data[order(as.numeric(names(data)))]
mcols <- get_fun_from_pkg("GenomicRanges", "mcols")
`mcols<-` <- get_fun_from_pkg("GenomicRanges", "`mcols<-`")
mcols(res)$.panel <- factor(lv[length(lv)], levels=lv)
} else if (is(data, "data.frame") || is(data, "tbl_df")) {
data <- as.data.frame(data)
## res <- merge(df[, c('label', 'y')], data, by.x='label', by.y=1) ## , all.x=TRUE)
res <- merge(df[, !names(df) %in% c('node', 'parent', 'x', 'branch', 'angle')], data, by.x='label', by.y=1)
res[[".panel"]] <- factor(lv[length(lv)], levels=lv)
} else {
stop("input 'data' is not supported...")
}
res <- res[order(res$y),]
return(res)
}
##' pipe
##' @importFrom magrittr %>%
##' @name %>%
##' @export
##' @rdname pipe
##' @param lhs left hand side
##' @param rhs right hand side
##' @usage lhs \%>\% rhs
##' @seealso
##' \link[magrittr]{pipe}
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_qq.R
\name{plot_qq}
\alias{plot_qq}
\title{Plot qqplot}
\usage{
plot_qq(
data = data,
comp.names = NULL,
p.value.flag = "P.Value",
ci = 0.95,
plot.save.to = NULL
)
}
\arguments{
\item{data}{Summary statistics table or a list that contains multiple summary statistics tables from limma or DEseq2, where each row is a gene.}
\item{comp.names}{A character vector that contains the comparison names which correspond to the same order as \code{data}. No default.}
\item{p.value.flag}{The column name of \code{P-VALUE} (NOT FDR, NO multiplicity adjusted p-value) in the summary statistics table. Default = "P.Value".}
\item{ci}{Confidence interval. Default = 0.95}
\item{plot.save.to}{The file name and the address where to save the qq-plot "~/address_to_folder/qqplot.png". Default = NULL.}
}
\value{
The function return a ggplot object of qqplot
}
\description{
This function generates a QQ-plot object with confidence interval from summary statistics table generated by differential expression analysis
like \code{limma} or \code{DESeq2}.
}
\details{
The function produces the qqplot to evaluate the result from differential expression analysis. The output is a ggplot object.
}
\examples{
plot_qq(data = Sample_summary_statistics_table)
plot_qq(data = list(Sample_summary_statistics_table, Sample_summary_statistics_table1),
comp.names = c("A","B"))
}
\references{
Xingpeng Li & Tatiana Gelaf Romer & Olya Besedina, RVA - RNAseq Visualization Automation tool.
}
|
/man/plot_qq.Rd
|
no_license
|
cran/RVA
|
R
| false
| true
| 1,610
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_qq.R
\name{plot_qq}
\alias{plot_qq}
\title{Plot qqplot}
\usage{
plot_qq(
data = data,
comp.names = NULL,
p.value.flag = "P.Value",
ci = 0.95,
plot.save.to = NULL
)
}
\arguments{
\item{data}{Summary statistics table or a list that contains multiple summary statistics tables from limma or DEseq2, where each row is a gene.}
\item{comp.names}{A character vector that contains the comparison names which correspond to the same order as \code{data}. No default.}
\item{p.value.flag}{The column name of \code{P-VALUE} (NOT FDR, NO multiplicity adjusted p-value) in the summary statistics table. Default = "P.Value".}
\item{ci}{Confidence interval. Default = 0.95}
\item{plot.save.to}{The file name and the address where to save the qq-plot "~/address_to_folder/qqplot.png". Default = NULL.}
}
\value{
The function return a ggplot object of qqplot
}
\description{
This function generates a QQ-plot object with confidence interval from summary statistics table generated by differential expression analysis
like \code{limma} or \code{DESeq2}.
}
\details{
The function produces the qqplot to evaluate the result from differential expression analysis. The output is a ggplot object.
}
\examples{
plot_qq(data = Sample_summary_statistics_table)
plot_qq(data = list(Sample_summary_statistics_table, Sample_summary_statistics_table1),
comp.names = c("A","B"))
}
\references{
Xingpeng Li & Tatiana Gelaf Romer & Olya Besedina, RVA - RNAseq Visualization Automation tool.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.