content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(lifecontingencies)
context("Object Initialization")
test_that("Unequal lengths and population at risk is non-increasing", {
x <- 0:5
lx <- c(100, 75, 50, 51, 12)
expect_error(new("lifetable", x = x, lx = lx))
})
test_that("Increasing x", {
x <- c(0, 2, 1, 3)
lx <- c(100, 50, 75, 25)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, sort(x))
expect_equal(tbl@lx, sort(lx, decreasing = TRUE))
})
test_that("Integral, non-negative x and increasing by 1", {
x <- c(0, 1.5, 2, 3)
lx <- c(100, 75, 50, 25)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
x <- c(-2, -1, 0, 1)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
x <- c(0, 1, 3, 4)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
})
test_that("Zeros and NAs in lx are removed", {
x <- 0:4
lx <- c(100, 75, 50, 25, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2, 3))
expect_equal(tbl@lx, c(100, 75, 50, 25))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, 25, 12)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2, 3))
expect_equal(tbl@lx, c(100, 50, 25, 12))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, 25, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2))
expect_equal(tbl@lx, c(100, 50, 25))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, NA, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1))
expect_equal(tbl@lx, c(100, 50))
}) | /lifecontingencies/tests/testthat/testObjectInitialization.R | no_license | akhikolla/InformationHouse | R | false | false | 1,552 | r | library(lifecontingencies)
context("Object Initialization")
test_that("Unequal lengths and population at risk is non-increasing", {
x <- 0:5
lx <- c(100, 75, 50, 51, 12)
expect_error(new("lifetable", x = x, lx = lx))
})
test_that("Increasing x", {
x <- c(0, 2, 1, 3)
lx <- c(100, 50, 75, 25)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, sort(x))
expect_equal(tbl@lx, sort(lx, decreasing = TRUE))
})
test_that("Integral, non-negative x and increasing by 1", {
x <- c(0, 1.5, 2, 3)
lx <- c(100, 75, 50, 25)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
x <- c(-2, -1, 0, 1)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
x <- c(0, 1, 3, 4)
expect_error(tbl<-new("lifetable", x = x, lx = lx))
})
test_that("Zeros and NAs in lx are removed", {
x <- 0:4
lx <- c(100, 75, 50, 25, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2, 3))
expect_equal(tbl@lx, c(100, 75, 50, 25))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, 25, 12)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2, 3))
expect_equal(tbl@lx, c(100, 50, 25, 12))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, 25, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1, 2))
expect_equal(tbl@lx, c(100, 50, 25))
x <- c(0, 1, 1, 2, 3)
lx <- c(100, NA, 50, NA, 0)
tbl <- new("lifetable",x = x, lx = lx)
expect_equal(tbl@x, c(0, 1))
expect_equal(tbl@lx, c(100, 50))
}) |
\name{add.response}
\alias{add.response}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Function to add response values to an experimental design}
\description{
This function allows to add numeric response variables to an experimental plan of
class design. The responses are added both to the data frame and to its desnum
attribute; the response.names element of the design.info attribute is updated -
the function is still experimental.
}
\usage{
add.response(design, response, rdapath=NULL, replace = FALSE,
InDec=options("OutDec")[[1]], tol = .Machine$double.eps ^ 0.5, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{design}{a character string that gives the name of a class \code{\link{design}}
object, to which responses are to be added}
\item{response}{EITHER
a numeric vector, numeric matrix or data frame with at least
one numeric variable (the treatment of these is explained in the details section)
OR
a character string indicating a csv file that contains the typed-in response values;
after reading the csv file with the csv version indicated in the \code{InDec} argument,
numeric variables from response will be added to the design as responses }
\item{rdapath}{a character string indicating the path to a stored rda file that contains the
design }
\item{replace}{ logical: TRUE implies that existing variables are overwritten in \code{design};
cf. also the details section}
\item{InDec}{ decimal separator in the external csv file; defaults to the
\code{OutDec} option (viewable under \code{options("OutDec"}),
and also governs whether the \code{csv}-file is read with \code{\link[utils:read.table]{read.csv}} or with
\code{\link[utils:read.table]{read.csv}}:
separator semicolon goes with decimal comma and triggers use of \code{\link[utils:read.table]{read.csv2}},
separator comma goes with decimal point and trigggers use of \code{\link[utils:read.table]{read.csv}}. )}
\item{tol}{tolerance for comparing numerical values;\cr
useful for designs with numeric factors and for partial replacement of response values;
the value is used in comparisons of design and response via \code{\link{all.equal}};
errors from peculiar rounding behavior of spreadsheet programs can be prevented by
allowing a larger \code{tol} }
\item{\dots}{further arguments; currently not used}
}
\details{
If \code{response} is a data frame or a matrix, responses are assumed to be
all the numeric variables that are neither factor names or block names in \code{design}
(i.e. names of the \code{factor.names} element of the \code{design.info} attribute
or the \code{block.name} element of that same attribute)
nor column names of the \code{run.order} attribute, nor \code{name} or \code{Name}.
If \code{design} already contains columns for the response(s), NA entries of these
are overwritten, if all non-NA entries coincide between \code{design}
and \code{response}.
The idea behind this function is as follows:
After using \code{\link{export.design}} for storing an R work space with the
design object and either a csv or html file externally,
Excel or some other external software is used to type in experimental information.
The thus-obtained data sheet is saved as a csv-file and imported into R again (name provided
in argument \code{response}, and the design object with all attached information is
linked to the typed in response values using function \code{add.response}.
Alternatively, it is possible to simply type in experimental results in R, both
using the R commander plugin (\pkg{RcmdrPlugin.DoE}) or simply function \code{\link[utils]{fix}}.
Copy-pasting into R from Excel is per default NOT possible, which has been the reason for programming this routine.
}
\value{
The value is a modified version of the argument object \code{design},
which remains an object of class \code{\link{design}} with the following modifications:
\itemize{
\item Response columns are added to the data frame
\item the same response columns are added to the desnum attribute
\item the \code{response.names} element of the \code{design.info} attribute is added or modified
}
}
\author{ Ulrike Groemping }
\seealso{ See also \code{\link{export.design}} }
\examples{
plan <- fac.design(nlevels=c(2,3,2,4))
result <- rnorm(2*3*2*4)
add.response(plan,response=result)
## direct use of rnorm() is also possible, but looks better with 48
add.response(plan,response=rnorm(48))
\dontrun{
export.design(path="c:/projectA/experiments",plan)
## open exported file c:/projectA/experiments/plan.html
## with Excel
## carry out the experiment, input data in Excel or elsewhere
## store as csv file with the same name (or a different one, just use
## the correct storage name later in R), after deleting
## the legend portion to the right of the data area
## (alternatively, input data by typing them in in R (function fix or R-commander)
add.response(design="plan",response="c:/projectA/experiments/plan.csv",
rdapath="c:/projectA/experiments/plan.rda")
## plan is the name of the design in the workspace stored in rdapath
## assuming only responses were typed in
## should work on your computer regardless of system,
## if you adapt the path names accordingly
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ array }
\keyword{ design }% __ONLY ONE__ keyword per line
| /man/add.response.Rd | no_license | cran/DoE.base | R | false | false | 5,990 | rd | \name{add.response}
\alias{add.response}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ Function to add response values to an experimental design}
\description{
This function allows to add numeric response variables to an experimental plan of
class design. The responses are added both to the data frame and to its desnum
attribute; the response.names element of the design.info attribute is updated -
the function is still experimental.
}
\usage{
add.response(design, response, rdapath=NULL, replace = FALSE,
InDec=options("OutDec")[[1]], tol = .Machine$double.eps ^ 0.5, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{design}{a character string that gives the name of a class \code{\link{design}}
object, to which responses are to be added}
\item{response}{EITHER
a numeric vector, numeric matrix or data frame with at least
one numeric variable (the treatment of these is explained in the details section)
OR
a character string indicating a csv file that contains the typed-in response values;
after reading the csv file with the csv version indicated in the \code{InDec} argument,
numeric variables from response will be added to the design as responses }
\item{rdapath}{a character string indicating the path to a stored rda file that contains the
design }
\item{replace}{ logical: TRUE implies that existing variables are overwritten in \code{design};
cf. also the details section}
\item{InDec}{ decimal separator in the external csv file; defaults to the
\code{OutDec} option (viewable under \code{options("OutDec"}),
and also governs whether the \code{csv}-file is read with \code{\link[utils:read.table]{read.csv}} or with
\code{\link[utils:read.table]{read.csv}}:
separator semicolon goes with decimal comma and triggers use of \code{\link[utils:read.table]{read.csv2}},
separator comma goes with decimal point and trigggers use of \code{\link[utils:read.table]{read.csv}}. )}
\item{tol}{tolerance for comparing numerical values;\cr
useful for designs with numeric factors and for partial replacement of response values;
the value is used in comparisons of design and response via \code{\link{all.equal}};
errors from peculiar rounding behavior of spreadsheet programs can be prevented by
allowing a larger \code{tol} }
\item{\dots}{further arguments; currently not used}
}
\details{
If \code{response} is a data frame or a matrix, responses are assumed to be
all the numeric variables that are neither factor names or block names in \code{design}
(i.e. names of the \code{factor.names} element of the \code{design.info} attribute
or the \code{block.name} element of that same attribute)
nor column names of the \code{run.order} attribute, nor \code{name} or \code{Name}.
If \code{design} already contains columns for the response(s), NA entries of these
are overwritten, if all non-NA entries coincide between \code{design}
and \code{response}.
The idea behind this function is as follows:
After using \code{\link{export.design}} for storing an R work space with the
design object and either a csv or html file externally,
Excel or some other external software is used to type in experimental information.
The thus-obtained data sheet is saved as a csv-file and imported into R again (name provided
in argument \code{response}, and the design object with all attached information is
linked to the typed in response values using function \code{add.response}.
Alternatively, it is possible to simply type in experimental results in R, both
using the R commander plugin (\pkg{RcmdrPlugin.DoE}) or simply function \code{\link[utils]{fix}}.
Copy-pasting into R from Excel is per default NOT possible, which has been the reason for programming this routine.
}
\value{
The value is a modified version of the argument object \code{design},
which remains an object of class \code{\link{design}} with the following modifications:
\itemize{
\item Response columns are added to the data frame
\item the same response columns are added to the desnum attribute
\item the \code{response.names} element of the \code{design.info} attribute is added or modified
}
}
\author{ Ulrike Groemping }
\seealso{ See also \code{\link{export.design}} }
\examples{
plan <- fac.design(nlevels=c(2,3,2,4))
result <- rnorm(2*3*2*4)
add.response(plan,response=result)
## direct use of rnorm() is also possible, but looks better with 48
add.response(plan,response=rnorm(48))
\dontrun{
export.design(path="c:/projectA/experiments",plan)
## open exported file c:/projectA/experiments/plan.html
## with Excel
## carry out the experiment, input data in Excel or elsewhere
## store as csv file with the same name (or a different one, just use
## the correct storage name later in R), after deleting
## the legend portion to the right of the data area
## (alternatively, input data by typing them in in R (function fix or R-commander)
add.response(design="plan",response="c:/projectA/experiments/plan.csv",
rdapath="c:/projectA/experiments/plan.rda")
## plan is the name of the design in the workspace stored in rdapath
## assuming only responses were typed in
## should work on your computer regardless of system,
## if you adapt the path names accordingly
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ array }
\keyword{ design }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_download.R
\name{db_download}
\alias{db_download}
\alias{db_download_ncbi}
\alias{db_download_itis}
\alias{db_download_tpl}
\alias{db_download_col}
\alias{db_download_gbif}
\title{Download taxonomic databases}
\usage{
db_download_ncbi(verbose = TRUE)
db_download_itis(verbose = TRUE)
db_download_tpl(verbose = TRUE)
db_download_col(verbose = TRUE)
db_download_gbif(verbose = TRUE)
}
\arguments{
\item{verbose}{(logical) Print messages. Default: \code{TRUE}}
}
\value{
Path to the downloaded SQL database
}
\description{
Download taxonomic databases
}
\details{
Downloads sql database, cleans up unneeded files, returns path
to sql file
}
\section{Supported}{
\itemize{
\item ITIS - PostgreSQL
\item The PlantList - PostgreSQL
\item Catalogue of Life - MySQL
\item GBIF - SQLite
}
}
\section{Beware}{
COL database loading takes a long time, e.g., 30 minutes. you may
want to run it in a separate R session, or just look at the db_load_col fxn
and run the commands in your shell.
}
\examples{
\dontrun{
# ITIS
# x <- db_download_itis()
# db_load_itis(x)
# src_itis()
# Plantlist
# x <- db_download_tpl()
# db_load_tpl(x, "sacmac")
# src_tpl()
# COL
# x <- db_download_col()
# db_load_col(x)
# src_col()
# GBIF
# x <- db_download_gbif()
# db_load_gbif()
# src_gbif(x)
# NCBI
# x <- db_download_ncbi()
# db_load_ncbi()
# src_ncbi(x)
}
}
\seealso{
\link{tdb_cache}
}
| /man/db_download.Rd | permissive | aseetharam/taxizedb | R | false | true | 1,455 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db_download.R
\name{db_download}
\alias{db_download}
\alias{db_download_ncbi}
\alias{db_download_itis}
\alias{db_download_tpl}
\alias{db_download_col}
\alias{db_download_gbif}
\title{Download taxonomic databases}
\usage{
db_download_ncbi(verbose = TRUE)
db_download_itis(verbose = TRUE)
db_download_tpl(verbose = TRUE)
db_download_col(verbose = TRUE)
db_download_gbif(verbose = TRUE)
}
\arguments{
\item{verbose}{(logical) Print messages. Default: \code{TRUE}}
}
\value{
Path to the downloaded SQL database
}
\description{
Download taxonomic databases
}
\details{
Downloads sql database, cleans up unneeded files, returns path
to sql file
}
\section{Supported}{
\itemize{
\item ITIS - PostgreSQL
\item The PlantList - PostgreSQL
\item Catalogue of Life - MySQL
\item GBIF - SQLite
}
}
\section{Beware}{
COL database loading takes a long time, e.g., 30 minutes. you may
want to run it in a separate R session, or just look at the db_load_col fxn
and run the commands in your shell.
}
\examples{
\dontrun{
# ITIS
# x <- db_download_itis()
# db_load_itis(x)
# src_itis()
# Plantlist
# x <- db_download_tpl()
# db_load_tpl(x, "sacmac")
# src_tpl()
# COL
# x <- db_download_col()
# db_load_col(x)
# src_col()
# GBIF
# x <- db_download_gbif()
# db_load_gbif()
# src_gbif(x)
# NCBI
# x <- db_download_ncbi()
# db_load_ncbi()
# src_ncbi(x)
}
}
\seealso{
\link{tdb_cache}
}
|
library(parallel)
simRep <- 20000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0529) # The set of varaince of random covariates b as random slope
smooth <- 0 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 20 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_grp20-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) | /full simulation/summer/hetero_power/extension/2020/heter_power_0.0529_pca_u_seed1_20_20.R | no_license | wma9/FMRI-project | R | false | false | 9,187 | r | library(parallel)
simRep <- 20000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.0529) # The set of varaince of random covariates b as random slope
smooth <- 0 # measurement error is added to M if smooth = 0; no measurement error is added if sooth = 1
cores <- 1
r.sim <- b.var
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
library(MASS)
set.seed(iter)
D <- 80 # grid number total
nSubj <- 20 # 200 # I the number of curves
nRep <- 20 # 20 # datasets for each covariance function
totalN <- nSubj * nRep
thetaK.true <- 2
timeGrid <- (1:D)/D
npc.true <- 3
percent <- 0.95
SNR <- 3 # 5, signal noise ratio'
sd.epsilon <- 1 # or 0.5
delta.true <- 0.5
a.mean <- 0
gamma.true <- 2
gammaVar.true <- 1
# hot
gammaI.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI.true <- gammaI.true[rep(1:nrow(gammaI.true), each = nRep), ]
# warm
gammaI2.true <- mapply(rnorm, nSubj, gamma.true, rep(sqrt(gammaVar.true), 1))
gammaI2.true <- gammaI2.true[rep(1:nrow(gammaI2.true), each = nRep), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # dummyX
#generate functional covariates
lambda.sim <- function(degree) {
return(0.5^(degree - 1))
}
psi.fourier <- function(t, degree) {
result <- NA
if(degree == 1){
result <- sqrt(2) * sinpi(2*t)
}else if(degree == 2){
result <- sqrt(2) * cospi(4*t)
}else if(degree == 3){
result <- sqrt(2) * sinpi(4*t)
}
return(result)
}
lambdaVec.true <- mapply(lambda.sim, 1: npc.true)
psi.true <- matrix(data = mapply(psi.fourier, rep(timeGrid, npc.true), rep(1:npc.true, each=D)),
nrow = npc.true,
ncol = D,
byrow = TRUE)
ascore.true <- mvrnorm(totalN, rep(a.mean, npc.true), diag(lambdaVec.true))
Mt.true <- ascore.true %*% psi.true
error <- rnorm(totalN, mean = 0, sd = sd.epsilon)
thetaIK.true <- mvrnorm(nSubj, rep(thetaK.true, npc.true), diag(c(r.sim, r.sim/2, r.sim/4)))
thetaIK.true <- thetaIK.true[rep(1:nrow(thetaIK.true), each = nRep), ]
betaM.true <- thetaIK.true * ascore.true
betaM.true <- rowSums(betaM.true)
Y <- delta.true + dummyX * gammaI.true + (dummyX - 1) * gammaI2.true + betaM.true + error
##########################################################################
ID <- rep(1:nSubj, each = nRep)
if(smooth == 0){
Merror.Var <- sum(lambdaVec.true) / SNR #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true + matrix(rnorm(totalN*D, mean=0, sd = sqrt(Merror.Var)), totalN, D)
}
if(smooth == 1){
Merror.Var <- 0 #SNR = sum(lambdaVec.true)/Merror.Var
Mt.hat <- Mt.true
}
M <- Mt.hat
# M <- M - matrix(rep(colMeans(M), each = totalN), totalN, D) # center:column-means are 0
t <- (1:D)/D
knots <- 5 # previous setting 10
p <- 5 # previous setting p <- 7, the number of degree for B-splines we use
results <- fpca.face(M, center = TRUE, argvals = t, knots = knots, pve = percent, p = p, lambda = 0) # pve need to be chosen!
npc <- results$npc
score <- results$scores
ascore <- score[, 1:npc]/sqrt(D)
# plot(results$efunctions[,2]*sqrt(D))
# lines(1:80, psi.fourier(timeGrid, 2)) #match very well
# to compare lambda: results$evalues/(D))
# to compare estimated M, Mt.hat, Mt.true
# a<-results$scores %*% t(results$efunctions)
# plot(M[300,]) #Mt.hat
# lines(a[300,]+results$mu,col="red") # estimated M
# lines(Mt.true[300,], col="blue") #true Mt
###########################################################################
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nSubj*npc),
nrow = npc,
ncol = nSubj),
matrix(0, nrow = nRep - npc, ncol = nSubj)))
for(k in 1:nSubj){
svd <- svd(ascore[((k-1)*nRep+1):(k*nRep), ] %*% t(ascore[((k-1)*nRep+1):(k*nRep), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:npc]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep+1):(k*nRep)] <- u.tra %*% Y[((k-1)*nRep+1):(k*nRep)]
dummyX[((k-1)*nRep+1):(k*nRep), ] <- u.tra %*% dummyX[((k-1)*nRep+1):(k*nRep), ]
ascore[((k-1)*nRep+1):(k*nRep), ] <- rbind(u.tra[1:npc, ] %*% ascore[((k-1)*nRep+1):(k*nRep), ],
matrix(0,
nrow = nRep - npc,
ncol = npc))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep - npc))
}
###########################################################################
designMatrix <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
ascore = ascore,
z.sim.uni = z.sim.uni)
# 'lmer' model
designMatrix.lmm <- designMatrix
additive0.sim <- paste(1:npc, collapse = " + ascore.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + ascore.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + ascore.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
###################################################################################
return(list(realTau = r.sim,
pvalues.bonf = pvalues.bonf,
Merror.Var = Merror.Var,
smooth = smooth,
npc = npc,
tests2 = tests2))
}
# Setup parallel
#cores <- detectCores()
cluster <- makeCluster(cores)
#clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
# clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
# fileName <- paste("power_", b.var, "_grp20-rep20-", nRandCovariate,".RData", sep = "") # Saving file's name
clusterExport(cluster, c("r.sim", "smooth")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("h_f_power_", smooth, "_",b.var,"_grp20-rep20.RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
#result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalues.bonf = x$pvalues.bonf,
# smooth = x$smooth,
# npc = x$npc)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(node_results, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = c(r.sim,r.sim/2,r.sim/4), smooth = smooth)
# loopIndex <- loopIndex + 1
# } # End outer loop
save(power2.sim, file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster) |
library(QuantPsyc)
### Name: plotNorm
### Title: Normal Density Plot
### Aliases: plotNormX plotNormXm
### Keywords: distribution smooth
### ** Examples
# plot.normX
data(USJudgeRatings) # data packaged with R
plotNormX(USJudgeRatings$CONT)
# creates a pdf file that contains plots for all 12 variables in USJudgeRatings
# plot.normXm
data(USJudgeRatings)
pdf("Judge.pdf") #writes file to working directory
plotNormXm(USJudgeRatings, 12)
dev.off()
| /data/genthat_extracted_code/QuantPsyc/examples/plotNormX.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 463 | r | library(QuantPsyc)
### Name: plotNorm
### Title: Normal Density Plot
### Aliases: plotNormX plotNormXm
### Keywords: distribution smooth
### ** Examples
# plot.normX
data(USJudgeRatings) # data packaged with R
plotNormX(USJudgeRatings$CONT)
# creates a pdf file that contains plots for all 12 variables in USJudgeRatings
# plot.normXm
data(USJudgeRatings)
pdf("Judge.pdf") #writes file to working directory
plotNormXm(USJudgeRatings, 12)
dev.off()
|
\name{EWHP}
\alias{EWHP}
\alias{ewhp}
\docType{data}
\title{House price data set (DataFrame) in England and Wales}
\description{
A house price data set over the England and Wales with 9 hedonic variables from 1999.
}
\usage{data(EWHP)}
\format{
A data frame with 519 observations on the following 12 variables.
\describe{
\item{Easting}{a numeric vector, X coordinate}
\item{Northing}{a numeric vector, Y coordinate}
\item{PurPrice}{a numeric vector, the purchase price of the property}
\item{BldIntWr}{a numeric vector, 1 if the property was built during the world war, 0 otherwise}
\item{BldPostW}{a numeric vector, 1 if the property was built after the world war, 0 otherwise}
\item{Bld60s}{a numeric vector, 1 if the property was built between 1960 and 1969, 0 otherwise}
\item{Bld70s}{a numeric vector, 1 if the property was built between 1970 and 1979, 0 otherwise}
\item{Bld80s}{a numeric vector, 1 if the property was built between 1980 and 1989, 0 otherwise}
\item{TypDetch}{a numeric vector, 1 if the property is detached (i.e. it is a stand-alone house), 0 otherwise}
\item{TypSemiD}{a numeric vector, 1 if the property is semi detached, 0 otherwise}
\item{TypFlat}{a numeric vector, if the property is a flat (or 'apartment' in the USA), 0 otherwise}
\item{FlrArea}{a numeric vector, floor area of the property in square metres}
}
}
\references{
Fotheringham, A.S., Brunsdon, C., and Charlton, M.E. (2002), Geographically Weighted Regression:
The Analysis of Spatially Varying Relationships, Chichester: Wiley.
}
\author{Binbin Lu \email{binbinlu@whu.edu.cn}}
\examples{
###
data(EWHP)
head(ewhp)
houses.spdf <- SpatialPointsDataFrame(ewhp[, 1:2], ewhp)
####Get the border of England and Wales
data(EWOutline)
plot(ewoutline)
plot(houses.spdf, add = TRUE, pch = 16)
}
\keyword{data,house price}
| /man/EWHP.Rd | no_license | DCAL12/GWmodel2 | R | false | false | 1,866 | rd | \name{EWHP}
\alias{EWHP}
\alias{ewhp}
\docType{data}
\title{House price data set (DataFrame) in England and Wales}
\description{
A house price data set over the England and Wales with 9 hedonic variables from 1999.
}
\usage{data(EWHP)}
\format{
A data frame with 519 observations on the following 12 variables.
\describe{
\item{Easting}{a numeric vector, X coordinate}
\item{Northing}{a numeric vector, Y coordinate}
\item{PurPrice}{a numeric vector, the purchase price of the property}
\item{BldIntWr}{a numeric vector, 1 if the property was built during the world war, 0 otherwise}
\item{BldPostW}{a numeric vector, 1 if the property was built after the world war, 0 otherwise}
\item{Bld60s}{a numeric vector, 1 if the property was built between 1960 and 1969, 0 otherwise}
\item{Bld70s}{a numeric vector, 1 if the property was built between 1970 and 1979, 0 otherwise}
\item{Bld80s}{a numeric vector, 1 if the property was built between 1980 and 1989, 0 otherwise}
\item{TypDetch}{a numeric vector, 1 if the property is detached (i.e. it is a stand-alone house), 0 otherwise}
\item{TypSemiD}{a numeric vector, 1 if the property is semi detached, 0 otherwise}
\item{TypFlat}{a numeric vector, if the property is a flat (or 'apartment' in the USA), 0 otherwise}
\item{FlrArea}{a numeric vector, floor area of the property in square metres}
}
}
\references{
Fotheringham, A.S., Brunsdon, C., and Charlton, M.E. (2002), Geographically Weighted Regression:
The Analysis of Spatially Varying Relationships, Chichester: Wiley.
}
\author{Binbin Lu \email{binbinlu@whu.edu.cn}}
\examples{
###
data(EWHP)
head(ewhp)
houses.spdf <- SpatialPointsDataFrame(ewhp[, 1:2], ewhp)
####Get the border of England and Wales
data(EWOutline)
plot(ewoutline)
plot(houses.spdf, add = TRUE, pch = 16)
}
\keyword{data,house price}
|
\name{Illumina.Human.WG6.v2}
\alias{Illumina.Human.WG6.v2}
\docType{data}
\title{
Illumina Human WG6 v2 ChrY probes
}
\description{
Y chromosome probes from the Illumina Human WG6 v2 array. The probes in this dataset mapped to the Y chromosome of Ensembl reference genome GRCh37.p11.
}
\format{
A data frame with 100 observations on the following variable.
\describe{
\item{\code{V1}}{a factor with levels \code{ILMN_1651456} \code{ILMN_1652263} \code{ILMN_1653471} \code{ILMN_1654284} \code{ILMN_1655210} \code{ILMN_1655513} \code{ILMN_1656021} \code{ILMN_1658123} \code{ILMN_1658315} \code{ILMN_1661523} \code{ILMN_1662052} \code{ILMN_1662151} \code{ILMN_1663406} \code{ILMN_1663598} \code{ILMN_1665736} \code{ILMN_1666140} \code{ILMN_1666417} \code{ILMN_1669640} \code{ILMN_1670821} \code{ILMN_1671067} \code{ILMN_1672935} \code{ILMN_1673390} \code{ILMN_1673417} \code{ILMN_1674666} \code{ILMN_1674678} \code{ILMN_1675852} \code{ILMN_1676960} \code{ILMN_1677061} \code{ILMN_1682809} \code{ILMN_1683214} \code{ILMN_1683596} \code{ILMN_1683872} \code{ILMN_1685690} \code{ILMN_1685986} \code{ILMN_1686911} \code{ILMN_1688833} \code{ILMN_1688861} \code{ILMN_1689843} \code{ILMN_1690753} \code{ILMN_1691375} \code{ILMN_1691598} \code{ILMN_1693537} \code{ILMN_1693622} \code{ILMN_1697937} \code{ILMN_1699687} \code{ILMN_1701242} \code{ILMN_1702929} \code{ILMN_1703983} \code{ILMN_1712370} \code{ILMN_1712799} \code{ILMN_1716969} \code{ILMN_1719688} \code{ILMN_1720983} \code{ILMN_1724931} \code{ILMN_1727258} \code{ILMN_1731521} \code{ILMN_1731709} \code{ILMN_1735767} \code{ILMN_1736175} \code{ILMN_1737543} \code{ILMN_1738703} \code{ILMN_1739587} \code{ILMN_1742460} \code{ILMN_1745914} \code{ILMN_1746858} \code{ILMN_1748182} \code{ILMN_1754528} \code{ILMN_1755537} \code{ILMN_1756506} \code{ILMN_1764541} \code{ILMN_1765734} \code{ILMN_1767355} \code{ILMN_1767590} \code{ILMN_1768489} \code{ILMN_1772163} \code{ILMN_1774902} \code{ILMN_1775851} \code{ILMN_1776195} \code{ILMN_1776719} \code{ILMN_1779475} \code{ILMN_1780365} \code{ILMN_1783033} \code{ILMN_1783142} \code{ILMN_1786311} \code{ILMN_1788506} \code{ILMN_1790515} \code{ILMN_1794407} \code{ILMN_1794651} \code{ILMN_1797207} \code{ILMN_1797563} \code{ILMN_1798314} \code{ILMN_1800243} \code{ILMN_1804958} \code{ILMN_1805519} \code{ILMN_1806313} \code{ILMN_1808923} \code{ILMN_1810319} \code{ILMN_1812328} \code{ILMN_1812760} \code{ILMN_1813093}}
}
}
\examples{
data(Illumina.Human.WG6.v2)
## maybe str(Illumina.Human.WG6.v2) ; plot(Illumina.Human.WG6.v2) ...
}
\keyword{datasets}
| /man/Illumina.Human.WG6.v2.Rd | no_license | cran/MASSI | R | false | false | 2,553 | rd | \name{Illumina.Human.WG6.v2}
\alias{Illumina.Human.WG6.v2}
\docType{data}
\title{
Illumina Human WG6 v2 ChrY probes
}
\description{
Y chromosome probes from the Illumina Human WG6 v2 array. The probes in this dataset mapped to the Y chromosome of Ensembl reference genome GRCh37.p11.
}
\format{
A data frame with 100 observations on the following variable.
\describe{
\item{\code{V1}}{a factor with levels \code{ILMN_1651456} \code{ILMN_1652263} \code{ILMN_1653471} \code{ILMN_1654284} \code{ILMN_1655210} \code{ILMN_1655513} \code{ILMN_1656021} \code{ILMN_1658123} \code{ILMN_1658315} \code{ILMN_1661523} \code{ILMN_1662052} \code{ILMN_1662151} \code{ILMN_1663406} \code{ILMN_1663598} \code{ILMN_1665736} \code{ILMN_1666140} \code{ILMN_1666417} \code{ILMN_1669640} \code{ILMN_1670821} \code{ILMN_1671067} \code{ILMN_1672935} \code{ILMN_1673390} \code{ILMN_1673417} \code{ILMN_1674666} \code{ILMN_1674678} \code{ILMN_1675852} \code{ILMN_1676960} \code{ILMN_1677061} \code{ILMN_1682809} \code{ILMN_1683214} \code{ILMN_1683596} \code{ILMN_1683872} \code{ILMN_1685690} \code{ILMN_1685986} \code{ILMN_1686911} \code{ILMN_1688833} \code{ILMN_1688861} \code{ILMN_1689843} \code{ILMN_1690753} \code{ILMN_1691375} \code{ILMN_1691598} \code{ILMN_1693537} \code{ILMN_1693622} \code{ILMN_1697937} \code{ILMN_1699687} \code{ILMN_1701242} \code{ILMN_1702929} \code{ILMN_1703983} \code{ILMN_1712370} \code{ILMN_1712799} \code{ILMN_1716969} \code{ILMN_1719688} \code{ILMN_1720983} \code{ILMN_1724931} \code{ILMN_1727258} \code{ILMN_1731521} \code{ILMN_1731709} \code{ILMN_1735767} \code{ILMN_1736175} \code{ILMN_1737543} \code{ILMN_1738703} \code{ILMN_1739587} \code{ILMN_1742460} \code{ILMN_1745914} \code{ILMN_1746858} \code{ILMN_1748182} \code{ILMN_1754528} \code{ILMN_1755537} \code{ILMN_1756506} \code{ILMN_1764541} \code{ILMN_1765734} \code{ILMN_1767355} \code{ILMN_1767590} \code{ILMN_1768489} \code{ILMN_1772163} \code{ILMN_1774902} \code{ILMN_1775851} \code{ILMN_1776195} \code{ILMN_1776719} \code{ILMN_1779475} \code{ILMN_1780365} \code{ILMN_1783033} \code{ILMN_1783142} \code{ILMN_1786311} \code{ILMN_1788506} \code{ILMN_1790515} \code{ILMN_1794407} \code{ILMN_1794651} \code{ILMN_1797207} \code{ILMN_1797563} \code{ILMN_1798314} \code{ILMN_1800243} \code{ILMN_1804958} \code{ILMN_1805519} \code{ILMN_1806313} \code{ILMN_1808923} \code{ILMN_1810319} \code{ILMN_1812328} \code{ILMN_1812760} \code{ILMN_1813093}}
}
}
\examples{
data(Illumina.Human.WG6.v2)
## maybe str(Illumina.Human.WG6.v2) ; plot(Illumina.Human.WG6.v2) ...
}
\keyword{datasets}
|
library(samr)
library(data.table)
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/CAF20.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/EAP1.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4E.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4G1.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4G2.csv", row.names = 1) -> X
read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/Pab1.csv", row.names = 1) -> X
read.csv("~/DISK2/AD/Data/names.csv") -> genes
xcol <- ncol(X)
xrow <- nrow(X)-1
X2 <- X[1:xrow, 1:xcol]
y1 <-c(rep(1,3), rep(2,3))
data=list(x=as.matrix(X2), y=y1,logged2 =TRUE)
samr.obj <- samr(data, resp.type = "Two class unpaired", nperms = 100)
delta.table <- samr.compute.delta.table(samr.obj, min.foldchange = 0.1, nvals = 200)
siggenes.table <- samr.compute.siggenes.table(samr.obj, del = 0, data, delta.table, all.genes = TRUE)
A <- siggenes.table$genes.up
B <- siggenes.table$genes.lo
c <- rbind(A, B)
lo <- c[as.numeric(c[,8])<1,]
as.data.table(lo) -> fdr
fdr$Row %in% rownames(genes) -> D
cbind(fdr, D) -> E
E[which(E$D == TRUE),] -> F
F$D <- NULL
rownames(genes) %in% fdr$Row -> fdrow
cbind(genes, fdrow) -> comb
comb[which(comb$fdrow == TRUE),] -> yes
yes$fdrow <- NULL
cbind(fdr, yes) -> genes
genes$Row <- genes$`Gene ID` <- genes$`Gene Name` <- genes$`Score(d)` <- genes$`Numerator(r)` <- genes$`Denominator(s+s0)` <- NULL
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/CAF20_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/EAP1_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4E_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4G1_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4G2_DE.csv")
write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/Pab1_DE.csv")
| /AD.normalisation/RBP_SAM.R | no_license | Krutik6/MSci_KrutikPatel_16-17 | R | false | false | 1,794 | r | library(samr)
library(data.table)
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/CAF20.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/EAP1.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4E.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4G1.csv", row.names = 1) -> X
#read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/eIF4G2.csv", row.names = 1) -> X
read.csv("~/DISK2/AD/Data/RBPs/AD_norm_data/Pab1.csv", row.names = 1) -> X
read.csv("~/DISK2/AD/Data/names.csv") -> genes
xcol <- ncol(X)
xrow <- nrow(X)-1
X2 <- X[1:xrow, 1:xcol]
y1 <-c(rep(1,3), rep(2,3))
data=list(x=as.matrix(X2), y=y1,logged2 =TRUE)
samr.obj <- samr(data, resp.type = "Two class unpaired", nperms = 100)
delta.table <- samr.compute.delta.table(samr.obj, min.foldchange = 0.1, nvals = 200)
siggenes.table <- samr.compute.siggenes.table(samr.obj, del = 0, data, delta.table, all.genes = TRUE)
A <- siggenes.table$genes.up
B <- siggenes.table$genes.lo
c <- rbind(A, B)
lo <- c[as.numeric(c[,8])<1,]
as.data.table(lo) -> fdr
fdr$Row %in% rownames(genes) -> D
cbind(fdr, D) -> E
E[which(E$D == TRUE),] -> F
F$D <- NULL
rownames(genes) %in% fdr$Row -> fdrow
cbind(genes, fdrow) -> comb
comb[which(comb$fdrow == TRUE),] -> yes
yes$fdrow <- NULL
cbind(fdr, yes) -> genes
genes$Row <- genes$`Gene ID` <- genes$`Gene Name` <- genes$`Score(d)` <- genes$`Numerator(r)` <- genes$`Denominator(s+s0)` <- NULL
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/CAF20_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/EAP1_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4E_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4G1_DE.csv")
#write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/eIF4G2_DE.csv")
write.csv(genes, "~/DISK2/AD/Data/RBPs/1%/Pab1_DE.csv")
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Wine Quality Regression Analysis"),
sidebarLayout(
sidebarPanel(
helpText(
h2("App Documentation"),
p('This Shiny app aims to demonstrate simple linear regression analysis of data from Wine Quality Data Set taken from http://archive.ics.uci.edu/ml/datasets/Wine+Quality, in which relationship between outcome wine quality and a reactive parameter is determined by ploting the regression line according to the parameter the user selects'),
p('Two datasets are included, related to red and white vinho verde wine samples, from the north of Portugal. The goal is to model wine quality based on physicochemical tests'),
p('In the end, users can select the output format to export the results as PDF, HTML or Word file.'),
strong('See http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality.names for more details of data set'),
em('Refer to http://shiny.rstudio.com/gallery/download-knitr-reports.html for download feature')
),
selectInput('parameters','Please select a regression model of wine quality against:',choices=names(redwine),selected=(names(redwine))[11]),
selectInput('WineType', 'Select Wine Type', c('Red Wine', 'White Wine'),selected='Red Wine'),
radioButtons('format', 'Document format', c('PDF', 'HTML', 'Word')),
downloadButton('downloadReport','download')
),
mainPanel(
h2("Linear Regression Line"),
plotOutput('regPlot'),
h2("Summary of the model"),
verbatimTextOutput("text1")
)
)
)) | /data_Product_APP/ui.R | permissive | macchiatoism/coursera_data_product_project | R | false | false | 1,605 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Wine Quality Regression Analysis"),
sidebarLayout(
sidebarPanel(
helpText(
h2("App Documentation"),
p('This Shiny app aims to demonstrate simple linear regression analysis of data from Wine Quality Data Set taken from http://archive.ics.uci.edu/ml/datasets/Wine+Quality, in which relationship between outcome wine quality and a reactive parameter is determined by ploting the regression line according to the parameter the user selects'),
p('Two datasets are included, related to red and white vinho verde wine samples, from the north of Portugal. The goal is to model wine quality based on physicochemical tests'),
p('In the end, users can select the output format to export the results as PDF, HTML or Word file.'),
strong('See http://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality.names for more details of data set'),
em('Refer to http://shiny.rstudio.com/gallery/download-knitr-reports.html for download feature')
),
selectInput('parameters','Please select a regression model of wine quality against:',choices=names(redwine),selected=(names(redwine))[11]),
selectInput('WineType', 'Select Wine Type', c('Red Wine', 'White Wine'),selected='Red Wine'),
radioButtons('format', 'Document format', c('PDF', 'HTML', 'Word')),
downloadButton('downloadReport','download')
),
mainPanel(
h2("Linear Regression Line"),
plotOutput('regPlot'),
h2("Summary of the model"),
verbatimTextOutput("text1")
)
)
)) |
#!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
require(ape)
require(seqinr)
alig = read.dna(file=args[1],format="fasta")
d = dist.dna(alig)
alig = read.alignment(file=args[1],format="fasta")
dnds = kaks(alig)
cat(paste(labels(d)[1],round(d[1],digits = 4)),labels(d)[2] , dnds$ka, dnds$ks, dnds$ka/dnds$ks, dnds$vka, dnds$vks, "\n")
| /calcdist.R | no_license | loire/Moubata_paper | R | false | false | 352 | r | #!/usr/bin/env Rscript
args = commandArgs(trailingOnly=TRUE)
require(ape)
require(seqinr)
alig = read.dna(file=args[1],format="fasta")
d = dist.dna(alig)
alig = read.alignment(file=args[1],format="fasta")
dnds = kaks(alig)
cat(paste(labels(d)[1],round(d[1],digits = 4)),labels(d)[2] , dnds$ka, dnds$ks, dnds$ka/dnds$ks, dnds$vka, dnds$vks, "\n")
|
library(RgoogleMaps)
#
maplocation <- read.xlsx("map-location.xlsx", sheet = 1)
#
my.lat <- maplocation$'纬度'
my.lon <- maplocation$'经度'
#找邊界
bb = qbbox(my.lat, my.lon)
print(bb)
MyMap <- GetMap.bbox(bb$lonR, bb$latR, maptype = "roadmap")
My.markers <- cbind.data.frame(lat = my.lat, lon = my.lon)
tmp <- PlotOnStaticMap(MyMap, lat = My.markers[,"lat"], lon = My.markers[,"lon"],
cex=3, pch=16, col=10, add=F)
| /top10location.R | no_license | sunnyorsunny/WaveNetProject | R | false | false | 454 | r | library(RgoogleMaps)
#
maplocation <- read.xlsx("map-location.xlsx", sheet = 1)
#
my.lat <- maplocation$'纬度'
my.lon <- maplocation$'经度'
#找邊界
bb = qbbox(my.lat, my.lon)
print(bb)
MyMap <- GetMap.bbox(bb$lonR, bb$latR, maptype = "roadmap")
My.markers <- cbind.data.frame(lat = my.lat, lon = my.lon)
tmp <- PlotOnStaticMap(MyMap, lat = My.markers[,"lat"], lon = My.markers[,"lon"],
cex=3, pch=16, col=10, add=F)
|
library(magrittr)
library(shiny.semantic)
config <- config::get()
ships_data_manager <- ShipsDataManager$new(data.table::fread(config$dataset))
ui <- semanticPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "css/styles.css")
),
shiny.semantic::grid(
create_grid_template(),
map = ship_route_map_ui("ship_route_map"),
header = div(
class = "dashboard-header",
h1(class = "ui header", icon("ship"), div(class = "content", "Ships Explorer")),
ship_selection_menu_ui("ship_selection"),
distance_tile_ui("distance_info")
)
)
)
server <- function(input, output, session) {
ship_route_map_server("ship_route_map", ship_route)
distance_tile_server("distance_info", ship_route)
ship_selection_data <- ship_selection_menu_server("ship_selection", ships_data_manager)
ship_route <- reactive({
req(ship_selection_data$ship_name())
ship_type <- isolate({ ship_selection_data$ship_type() })
ships_data_manager$get_longest_distance_route(
ship_type,
ship_selection_data$ship_name()
)
})
}
shinyApp(ui, server)
| /app.R | permissive | szymanskir/ShipsExplorer | R | false | false | 1,116 | r | library(magrittr)
library(shiny.semantic)
config <- config::get()
ships_data_manager <- ShipsDataManager$new(data.table::fread(config$dataset))
ui <- semanticPage(
tags$head(
tags$link(rel = "stylesheet", type = "text/css", href = "css/styles.css")
),
shiny.semantic::grid(
create_grid_template(),
map = ship_route_map_ui("ship_route_map"),
header = div(
class = "dashboard-header",
h1(class = "ui header", icon("ship"), div(class = "content", "Ships Explorer")),
ship_selection_menu_ui("ship_selection"),
distance_tile_ui("distance_info")
)
)
)
server <- function(input, output, session) {
ship_route_map_server("ship_route_map", ship_route)
distance_tile_server("distance_info", ship_route)
ship_selection_data <- ship_selection_menu_server("ship_selection", ships_data_manager)
ship_route <- reactive({
req(ship_selection_data$ship_name())
ship_type <- isolate({ ship_selection_data$ship_type() })
ships_data_manager$get_longest_distance_route(
ship_type,
ship_selection_data$ship_name()
)
})
}
shinyApp(ui, server)
|
############################################################################################
# This script is a revision of convert_csv_to_linkage.R, to convert a .csv file in chi
# square root table format into a .linkage file. The difference between this and
# convert_csv_to_linkage.R is that the input .csv is oriented with samples being represented
# in rows, and SNPs being represented by columns. I.e. each row is going to be really long,
# which is not recommended in a .csv file. This script should hardly, if ever, need to be
# used. This script is also not built into the LepMAP2pipeline, so if plan to use the
# pipeline, you should just transpose your input .csv file to list the samples as columns
# and the SNPs as rows before running the pipeline, in which case you won't need this script.
#
# Input .csv file format:
# The chi square root table consists of one row listing SNP ids, and a row for
# each sample. The data in the chi square root table is represented as either "H", "A", or
# something ("-" or "NA" for example, you can specify which you use in the
# missingDataIndicator input parameter below) to represent missing or irrelevant data. The
# most frequent genotype in an SNP is represented as H and the second most frequent type
# is represented as A.
#
#
# Information on .linkage file format (for use with Lep-MAP2 software):
#
# Data is tab-delimited.
#
# First 6 Columns contain "pedigree information":
# 1. Family ID (can be alphanumeric)
# 2. Individual ID (must be unique within family, can be alphanumeric)
# 3. Father ID (0 if father is not in family)
# 4. Mother ID (0 if mother is not in family)
# 5. Gender (0=unknown, 1=male, 2=female)
# 6. Affection status (0=unknown, 1=unaffected, 2=affected)
#
# Columns 7 and onward describe the phenotype data, separated by tabs. There
# are four different types of phenotype data supported by the LINKAGE format
# (Numbered Alleles, Binary Factors, Affection Status, Quantitative Traits),
# and the Lep-MAP2 documentation uses Numbered Alleles.
#
# With Numbered Alleles, each genotype is represented as a pair of numbers
# (eg: 1 2 2 2 1 1 1 2). Each number represents an allele, and 0
# represents an unknown allele.
#
# For our purposes (input for the Lep-MAP2 software), we are setting:
# H = "1 2", A = "1 1", B = "2 2", NA = "0 0".
#
# Lep-MAP2 documentation: https://sourceforge.net/p/lepmap2/wiki/browse_pages/
#
# Official LINKAGE file format documentation available:
# http://www.jurgott.org/linkage/LinkagePC.html#__RefHeading__137_1806185151
# http://www.jurgott.org/linkage/LinkageUser.pdf
#
#
############################################################################################
# Input Parameters:
#args <- commandArgs(trailingOnly = TRUE)
#path <- args[1]
path <- "/home/benrancourt/Downloads/r38-753+5092"
#MAF_CUTOFF <- args[2]
#MAF_CUTOFF <- as.double(MAF_CUTOFF)
options(stringsAsFactors = FALSE, warn = 1)
filenameMinusExtension <- "Data-753+5,092"
familyName <- "r38"
reportc <- read.csv(paste(path.expand(path),paste0(filenameMinusExtension,".csv"), sep="/"), check.names=FALSE) # using check.names=FALSE in case the column names have dashes (-) in them. This will prevent them from being converted to periods. However, a column name with a dash in it will not be able to be used as a variable name, so we'll have to refer to columns by their index if accessing them.
#reportc <- reportc[, -1]
if(!("COMBINED" %in% colnames(reportc)))
{
s <- 4
}else
{
s <- 5
}
# #######################################################################
message("converting the chi square report to .linkage format")
reportLinkageGenotypes <- reportc[, -1] # remove the sample id column
message(paste0("ncols: ",ncol(reportLinkageGenotypes)))
reportLinkageGenotypes <- rbind(parent1 = c("A"), parent2 = c("H"), reportLinkageGenotypes) # add two samples to use as parents
message(paste0("ncols: ",ncol(reportLinkageGenotypes)))
#reportLinkageGenotypes <- t(reportLinkageGenotypes) # transpose the report (so it's columns are now rows)
message(paste0("nrows: ",nrow(reportLinkageGenotypes)))
reportLinkageGenotypes[reportLinkageGenotypes=="H"] <- "1 2"
reportLinkageGenotypes[reportLinkageGenotypes=="A"] <- "1 1"
reportLinkageGenotypes[reportLinkageGenotypes=="B"] <- "2 2"
reportLinkageGenotypes[is.na(reportLinkageGenotypes)] <- "0 0"
reportLinkageGenotypes[reportLinkageGenotypes=="-"] <- "0 0" # in case NA "-" has already been substituted with "-"
reportLinkage <- cbind(family = c(familyName), id = c(paste0("S",(1:nrow(reportLinkageGenotypes))-2)), fatherId = c("P1"), motherId = c("P2"), gender = c(0), affectionStatus = c(0), reportLinkageGenotypes)
reportLinkage[1,2:5] <- c("P1","0","0","1") # change id from S-1 to P1, no parents, male
reportLinkage[2,2:5] <- c("P2","0","0","2") # change id from S0 to P2, no parents, female
write.table(reportLinkage, file= paste(path.expand(path),paste0(filenameMinusExtension,".linkage"), sep="/"), append=FALSE, quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE)
message("report_gen part 2 complete")
| /LepMAP/convert_csv_to_linkage-alreadyHorizontal.R | no_license | benranco/docs | R | false | false | 5,096 | r | ############################################################################################
# This script is a revision of convert_csv_to_linkage.R, to convert a .csv file in chi
# square root table format into a .linkage file. The difference between this and
# convert_csv_to_linkage.R is that the input .csv is oriented with samples being represented
# in rows, and SNPs being represented by columns. I.e. each row is going to be really long,
# which is not recommended in a .csv file. This script should hardly, if ever, need to be
# used. This script is also not built into the LepMAP2pipeline, so if plan to use the
# pipeline, you should just transpose your input .csv file to list the samples as columns
# and the SNPs as rows before running the pipeline, in which case you won't need this script.
#
# Input .csv file format:
# The chi square root table consists of one row listing SNP ids, and a row for
# each sample. The data in the chi square root table is represented as either "H", "A", or
# something ("-" or "NA" for example, you can specify which you use in the
# missingDataIndicator input parameter below) to represent missing or irrelevant data. The
# most frequent genotype in an SNP is represented as H and the second most frequent type
# is represented as A.
#
#
# Information on .linkage file format (for use with Lep-MAP2 software):
#
# Data is tab-delimited.
#
# First 6 Columns contain "pedigree information":
# 1. Family ID (can be alphanumeric)
# 2. Individual ID (must be unique within family, can be alphanumeric)
# 3. Father ID (0 if father is not in family)
# 4. Mother ID (0 if mother is not in family)
# 5. Gender (0=unknown, 1=male, 2=female)
# 6. Affection status (0=unknown, 1=unaffected, 2=affected)
#
# Columns 7 and onward describe the phenotype data, separated by tabs. There
# are four different types of phenotype data supported by the LINKAGE format
# (Numbered Alleles, Binary Factors, Affection Status, Quantitative Traits),
# and the Lep-MAP2 documentation uses Numbered Alleles.
#
# With Numbered Alleles, each genotype is represented as a pair of numbers
# (eg: 1 2 2 2 1 1 1 2). Each number represents an allele, and 0
# represents an unknown allele.
#
# For our purposes (input for the Lep-MAP2 software), we are setting:
# H = "1 2", A = "1 1", B = "2 2", NA = "0 0".
#
# Lep-MAP2 documentation: https://sourceforge.net/p/lepmap2/wiki/browse_pages/
#
# Official LINKAGE file format documentation available:
# http://www.jurgott.org/linkage/LinkagePC.html#__RefHeading__137_1806185151
# http://www.jurgott.org/linkage/LinkageUser.pdf
#
#
############################################################################################
# Input Parameters:
#args <- commandArgs(trailingOnly = TRUE)
#path <- args[1]
path <- "/home/benrancourt/Downloads/r38-753+5092"
#MAF_CUTOFF <- args[2]
#MAF_CUTOFF <- as.double(MAF_CUTOFF)
options(stringsAsFactors = FALSE, warn = 1)
filenameMinusExtension <- "Data-753+5,092"
familyName <- "r38"
reportc <- read.csv(paste(path.expand(path),paste0(filenameMinusExtension,".csv"), sep="/"), check.names=FALSE) # using check.names=FALSE in case the column names have dashes (-) in them. This will prevent them from being converted to periods. However, a column name with a dash in it will not be able to be used as a variable name, so we'll have to refer to columns by their index if accessing them.
#reportc <- reportc[, -1]
if(!("COMBINED" %in% colnames(reportc)))
{
s <- 4
}else
{
s <- 5
}
# #######################################################################
message("converting the chi square report to .linkage format")
reportLinkageGenotypes <- reportc[, -1] # remove the sample id column
message(paste0("ncols: ",ncol(reportLinkageGenotypes)))
reportLinkageGenotypes <- rbind(parent1 = c("A"), parent2 = c("H"), reportLinkageGenotypes) # add two samples to use as parents
message(paste0("ncols: ",ncol(reportLinkageGenotypes)))
#reportLinkageGenotypes <- t(reportLinkageGenotypes) # transpose the report (so it's columns are now rows)
message(paste0("nrows: ",nrow(reportLinkageGenotypes)))
reportLinkageGenotypes[reportLinkageGenotypes=="H"] <- "1 2"
reportLinkageGenotypes[reportLinkageGenotypes=="A"] <- "1 1"
reportLinkageGenotypes[reportLinkageGenotypes=="B"] <- "2 2"
reportLinkageGenotypes[is.na(reportLinkageGenotypes)] <- "0 0"
reportLinkageGenotypes[reportLinkageGenotypes=="-"] <- "0 0" # in case NA "-" has already been substituted with "-"
reportLinkage <- cbind(family = c(familyName), id = c(paste0("S",(1:nrow(reportLinkageGenotypes))-2)), fatherId = c("P1"), motherId = c("P2"), gender = c(0), affectionStatus = c(0), reportLinkageGenotypes)
reportLinkage[1,2:5] <- c("P1","0","0","1") # change id from S-1 to P1, no parents, male
reportLinkage[2,2:5] <- c("P2","0","0","2") # change id from S0 to P2, no parents, female
write.table(reportLinkage, file= paste(path.expand(path),paste0(filenameMinusExtension,".linkage"), sep="/"), append=FALSE, quote=FALSE, sep="\t", row.names=FALSE, col.names=FALSE)
message("report_gen part 2 complete")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspre.R
\name{make_weights}
\alias{make_weights}
\title{Helper function to make weights for inspre.}
\usage{
make_weights(SE, max_med_ratio = NULL)
}
\arguments{
\item{SE}{DxD matrix of standard errors.}
\item{max_med_ratio}{Float > 1. Ratio of maximum weight to minimum non-zero
weight. Improves conditioning when some SEs are very small.}
}
\description{
Helper function to make weights for inspre.
}
| /man/make_weights.Rd | permissive | brielin/inspre | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/inspre.R
\name{make_weights}
\alias{make_weights}
\title{Helper function to make weights for inspre.}
\usage{
make_weights(SE, max_med_ratio = NULL)
}
\arguments{
\item{SE}{DxD matrix of standard errors.}
\item{max_med_ratio}{Float > 1. Ratio of maximum weight to minimum non-zero
weight. Improves conditioning when some SEs are very small.}
}
\description{
Helper function to make weights for inspre.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage.R
\name{get_contents}
\alias{get_contents}
\title{Get the contents of a simulator directory}
\usage{
get_contents(dir = ".", out_loc = "out")
}
\arguments{
\item{dir}{name of the directory where directory named "files" exists}
\item{out_loc}{a length-1 character vector that gives location
(relative to model's path) that method outputs are stored.This can be
useful for staying organized when multiple simulations are based on
the same Model and Draws objects. Usually this is just "out"}
}
\description{
Get the contents of a simulator directory
}
| /man/get_contents.Rd | no_license | zdk123/simulator | R | false | true | 638 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/manage.R
\name{get_contents}
\alias{get_contents}
\title{Get the contents of a simulator directory}
\usage{
get_contents(dir = ".", out_loc = "out")
}
\arguments{
\item{dir}{name of the directory where directory named "files" exists}
\item{out_loc}{a length-1 character vector that gives location
(relative to model's path) that method outputs are stored.This can be
useful for staying organized when multiple simulations are based on
the same Model and Draws objects. Usually this is just "out"}
}
\description{
Get the contents of a simulator directory
}
|
library(jsonlite)
setJSONMessageToFlatten <- function(message){
jsonMessage <<- message
}
getFlatJSON <- function(){
df <- data.frame(matrix(unlist(jsonMessage), nrow=1, byrow=T),stringsAsFactors=FALSE)
#colnames(df) <- c("code","msg","credits","remaining_credits","model","score_tag","agreement","subjectivity",
# "confidence","irony","sentence_list_text","inip","endp","bop","confidence","score_tag","agreement",
# "segment_list_text","segment_list_segment_type","segment_list_inip","segment_list_endp",
# "segment_list_confidence", "segment_list_score_tag","sentimented_entity_list_form","sentimented_entity_list_form",
# "sentimented_entity_list_id","sentimented_entity_list_variant","sentimented_entity_list_inip","sentimented_entity_list_endp",
# "sentimented_entity_list_type","sentimented_entity_list_type","sentimented_entity_list_score_tag")
colnames(df) <- c("code","msg","credits","remaining_credits","model","score_tag","agreement","subjectivity",
"confidence","irony","sentence_list_text","inip","endp","bop","confidence","score_tag","agreement",
"segment_list_text","segment_list_segment_type","segment_list_inip","segment_list_endp",
"segment_list_confidence", "segment_list_score_tag","sentimented_entity_list_form","sentimented_entity_list_form",
"sentimented_entity_list_id","sentimented_entity_list_variant","sentimented_entity_list_inip")
dataFrame <<- df[,1:10]
##dataFrame <<- df[,c("sentence_list_text","msg", "score_tag", "confidence","irony","remaining_credits")]
return(dataFrame)
}
getJSONDS <- function(csv=TRUE){
write.csv(file = "sentiment_analysis.csv", x = dataFrame)
}
| /src/json-flattener.R | no_license | Elhios1982/mdc.sa | R | false | false | 1,807 | r | library(jsonlite)
setJSONMessageToFlatten <- function(message){
jsonMessage <<- message
}
getFlatJSON <- function(){
df <- data.frame(matrix(unlist(jsonMessage), nrow=1, byrow=T),stringsAsFactors=FALSE)
#colnames(df) <- c("code","msg","credits","remaining_credits","model","score_tag","agreement","subjectivity",
# "confidence","irony","sentence_list_text","inip","endp","bop","confidence","score_tag","agreement",
# "segment_list_text","segment_list_segment_type","segment_list_inip","segment_list_endp",
# "segment_list_confidence", "segment_list_score_tag","sentimented_entity_list_form","sentimented_entity_list_form",
# "sentimented_entity_list_id","sentimented_entity_list_variant","sentimented_entity_list_inip","sentimented_entity_list_endp",
# "sentimented_entity_list_type","sentimented_entity_list_type","sentimented_entity_list_score_tag")
colnames(df) <- c("code","msg","credits","remaining_credits","model","score_tag","agreement","subjectivity",
"confidence","irony","sentence_list_text","inip","endp","bop","confidence","score_tag","agreement",
"segment_list_text","segment_list_segment_type","segment_list_inip","segment_list_endp",
"segment_list_confidence", "segment_list_score_tag","sentimented_entity_list_form","sentimented_entity_list_form",
"sentimented_entity_list_id","sentimented_entity_list_variant","sentimented_entity_list_inip")
dataFrame <<- df[,1:10]
##dataFrame <<- df[,c("sentence_list_text","msg", "score_tag", "confidence","irony","remaining_credits")]
return(dataFrame)
}
getJSONDS <- function(csv=TRUE){
write.csv(file = "sentiment_analysis.csv", x = dataFrame)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComputeShpilkinMethod.R,
% R/ComputeShpilkinMethod2.R
\name{ComputeShpilkinMethod}
\alias{ComputeShpilkinMethod}
\title{Shpilkin's Method}
\usage{
ComputeShpilkinMethod(data, Candidates, CandidatesText = NULL, MainCandidate, TotalReg, TotalVotes=NULL,
Level = NULL, Methodmax = "M1", WindowSize = 5, MaxtThreshold = 0.8, FigureName, Colors = NULL)
ComputeShpilkinMethod(data, Candidates, CandidatesText = NULL, MainCandidate, TotalReg, TotalVotes=NULL,
Level = NULL, Methodmax = "M1", WindowSize = 5, MaxtThreshold = 0.8, FigureName, Colors = NULL)
}
\arguments{
\item{data}{electoral data.}
\item{Candidates}{vector of variable names for all candidates/parties participated in the election}
\item{CandidatesText}{vector of candidates/parties' names participated in the election used to draw the figure}
\item{MainCandidate}{variable name for main/incumbent candidate}
\item{TotalReg}{variable name for the total number of eligible voters}
\item{TotalVotes}{variable name for the total number of ballots cast (if ommited, computed based on the votes for Candidates)}
\item{Level}{variable name depicting the level of analysis ("National", i.e. whole dataset by default)}
\item{Methodmax}{clean peak search
\itemize{
\item M0 - absolute clean peak search on the left handside from official turnout
\item M1 - absolute clean peak search on the left handside from official turnout with k-means clustering
\item M2 - relative search on the left handside from official turnout within a range defined by WindowSize
}}
\item{WindowSize}{define WindowSize for M0 and M1. Algorithm searches for max value change within prespecified WindowSize (by default WindowSize=5\%)}
\item{MaxtThreshold}{anomalous turnout threshold (by default 0.8)}
\item{FigureName}{figure's name}
\item{Colors}{vector of colors per each candidate/party (Colors=NULL, i.e. randomly generated by default)}
}
\value{
list containing results of analysis
\itemize{
\item list_graphs - list of graphs
\item stats_summary - table with results for analysis of the whole dataset (for Level!=NULL: data are also
summed across the units, aggregation error is computed)
\item Level - external parameter
\item creationdate - date/time of analysis
list_graphs = list_graphs, stats_summary = stats_table, stats_level=stats_level
}
list containing results of analysis
\itemize{
\item list_graphs - list of graphs
\item stats_summary - table with results for analysis of the whole dataset (for Level!=NULL: data are also
summed across the units, aggregation error is computed)
\item Level - external parameter
\item creationdate - date/time of analysis
list_graphs = list_graphs, stats_summary = stats_table, stats_level=stats_level
}
}
\description{
This function implements the revised version of Shpilkin's method (NB! It doesn't replicate original method fully).
This function implements the revised version of Shpilkin's method (NB! It doesn't replicate original method fully).
}
\examples{
library(EFToolkit)
dat<-read.csv(system.file("ruspres2018.csv", package="EFToolkit"))
res<-ComputeShpilkinMethod(dat, Candidates=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8" ),
CandidatesText=c("Baburin", "Grudinin", "Zhirinovsky", "Putin", "Sobchak",
"Suraikin", "Titov", "Yavlinsky"),
MainCandidate="P4",
TotalReg="NVoters",
TotalVotes=c('C9', 'C10'),
Methodmax="M1",
FigureName="Russian Presidential Elections, 2018",
Level="region",
MaxtThreshold=0.85)
library(EFToolkit)
dat<-read.csv(system.file("ruspres2018.csv", package="EFToolkit"))
res<-ComputeShpilkinMethod(dat, Candidates=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8" ),
CandidatesText=c("Baburin", "Grudinin", "Zhirinovsky", "Putin", "Sobchak",
"Suraikin", "Titov", "Yavlinsky"),
MainCandidate="P4",
TotalReg="NVoters",
TotalVotes=c('C9', 'C10'),
Methodmax="M1",
FigureName="Russian Presidential Elections, 2018",
Level="region",
MaxtThreshold=0.85)
}
| /man/ComputeShpilkinMethod.Rd | no_license | rdavis27/EFToolkit | R | false | true | 4,492 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ComputeShpilkinMethod.R,
% R/ComputeShpilkinMethod2.R
\name{ComputeShpilkinMethod}
\alias{ComputeShpilkinMethod}
\title{Shpilkin's Method}
\usage{
ComputeShpilkinMethod(data, Candidates, CandidatesText = NULL, MainCandidate, TotalReg, TotalVotes=NULL,
Level = NULL, Methodmax = "M1", WindowSize = 5, MaxtThreshold = 0.8, FigureName, Colors = NULL)
ComputeShpilkinMethod(data, Candidates, CandidatesText = NULL, MainCandidate, TotalReg, TotalVotes=NULL,
Level = NULL, Methodmax = "M1", WindowSize = 5, MaxtThreshold = 0.8, FigureName, Colors = NULL)
}
\arguments{
\item{data}{electoral data.}
\item{Candidates}{vector of variable names for all candidates/parties participated in the election}
\item{CandidatesText}{vector of candidates/parties' names participated in the election used to draw the figure}
\item{MainCandidate}{variable name for main/incumbent candidate}
\item{TotalReg}{variable name for the total number of eligible voters}
\item{TotalVotes}{variable name for the total number of ballots cast (if ommited, computed based on the votes for Candidates)}
\item{Level}{variable name depicting the level of analysis ("National", i.e. whole dataset by default)}
\item{Methodmax}{clean peak search
\itemize{
\item M0 - absolute clean peak search on the left handside from official turnout
\item M1 - absolute clean peak search on the left handside from official turnout with k-means clustering
\item M2 - relative search on the left handside from official turnout within a range defined by WindowSize
}}
\item{WindowSize}{define WindowSize for M0 and M1. Algorithm searches for max value change within prespecified WindowSize (by default WindowSize=5\%)}
\item{MaxtThreshold}{anomalous turnout threshold (by default 0.8)}
\item{FigureName}{figure's name}
\item{Colors}{vector of colors per each candidate/party (Colors=NULL, i.e. randomly generated by default)}
}
\value{
list containing results of analysis
\itemize{
\item list_graphs - list of graphs
\item stats_summary - table with results for analysis of the whole dataset (for Level!=NULL: data are also
summed across the units, aggregation error is computed)
\item Level - external parameter
\item creationdate - date/time of analysis
list_graphs = list_graphs, stats_summary = stats_table, stats_level=stats_level
}
list containing results of analysis
\itemize{
\item list_graphs - list of graphs
\item stats_summary - table with results for analysis of the whole dataset (for Level!=NULL: data are also
summed across the units, aggregation error is computed)
\item Level - external parameter
\item creationdate - date/time of analysis
list_graphs = list_graphs, stats_summary = stats_table, stats_level=stats_level
}
}
\description{
This function implements the revised version of Shpilkin's method (NB! It doesn't replicate original method fully).
This function implements the revised version of Shpilkin's method (NB! It doesn't replicate original method fully).
}
\examples{
library(EFToolkit)
dat<-read.csv(system.file("ruspres2018.csv", package="EFToolkit"))
res<-ComputeShpilkinMethod(dat, Candidates=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8" ),
CandidatesText=c("Baburin", "Grudinin", "Zhirinovsky", "Putin", "Sobchak",
"Suraikin", "Titov", "Yavlinsky"),
MainCandidate="P4",
TotalReg="NVoters",
TotalVotes=c('C9', 'C10'),
Methodmax="M1",
FigureName="Russian Presidential Elections, 2018",
Level="region",
MaxtThreshold=0.85)
library(EFToolkit)
dat<-read.csv(system.file("ruspres2018.csv", package="EFToolkit"))
res<-ComputeShpilkinMethod(dat, Candidates=c("P1", "P2", "P3", "P4", "P5", "P6", "P7", "P8" ),
CandidatesText=c("Baburin", "Grudinin", "Zhirinovsky", "Putin", "Sobchak",
"Suraikin", "Titov", "Yavlinsky"),
MainCandidate="P4",
TotalReg="NVoters",
TotalVotes=c('C9', 'C10'),
Methodmax="M1",
FigureName="Russian Presidential Elections, 2018",
Level="region",
MaxtThreshold=0.85)
}
|
library(stats)
library(float, quietly=TRUE)
library(rbenchmark)
set.seed(1234)
reps = 5
cols <- c("test", "replications", "elapsed", "relative")
m = 7500
n = 500
x = matrix(rnorm(m*n), m, n)
s = fl(x)
cov_spm = function(x)
{
s = scale(x, TRUE, FALSE)
crossprod(s) / max(1L, nrow(x)-1)
}
cor_spm = function(x)
{
s = scale(x, TRUE, TRUE)
crossprod(s) / max(1L, nrow(x)-1)
}
cat("##### covariance\n")
benchmark(cov_spm(x), cov_spm(s), cov(x), replications=reps, columns=cols)
cat("\n##### correlation\n")
benchmark(cor_spm(x), cor_spm(s), cor(x), replications=reps, columns=cols)
| /inst/benchmarks/cov.r | permissive | wrathematics/float | R | false | false | 592 | r | library(stats)
library(float, quietly=TRUE)
library(rbenchmark)
set.seed(1234)
reps = 5
cols <- c("test", "replications", "elapsed", "relative")
m = 7500
n = 500
x = matrix(rnorm(m*n), m, n)
s = fl(x)
cov_spm = function(x)
{
s = scale(x, TRUE, FALSE)
crossprod(s) / max(1L, nrow(x)-1)
}
cor_spm = function(x)
{
s = scale(x, TRUE, TRUE)
crossprod(s) / max(1L, nrow(x)-1)
}
cat("##### covariance\n")
benchmark(cov_spm(x), cov_spm(s), cov(x), replications=reps, columns=cols)
cat("\n##### correlation\n")
benchmark(cor_spm(x), cor_spm(s), cor(x), replications=reps, columns=cols)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VegXSurfaceTypeDefinition.R
\docType{class}
\name{VegXSurfaceTypeDefinition-class}
\alias{VegXSurfaceTypeDefinition-class}
\title{S4 class for Veg-X surface type definition}
\description{
S4 class for Veg-X surface type definition
}
\section{Slots}{
\describe{
\item{\code{method}}{An object of class \code{\linkS4class{VegXMethodDefinition}}.}
\item{\code{surfaceTypes}}{A list of surface types.}
}}
\examples{
showClass("VegXSurfaceTypeDefinition")
}
| /man/VegXSurfaceTypeDefinition-class.Rd | no_license | Heterocephalus/VegX | R | false | true | 535 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VegXSurfaceTypeDefinition.R
\docType{class}
\name{VegXSurfaceTypeDefinition-class}
\alias{VegXSurfaceTypeDefinition-class}
\title{S4 class for Veg-X surface type definition}
\description{
S4 class for Veg-X surface type definition
}
\section{Slots}{
\describe{
\item{\code{method}}{An object of class \code{\linkS4class{VegXMethodDefinition}}.}
\item{\code{surfaceTypes}}{A list of surface types.}
}}
\examples{
showClass("VegXSurfaceTypeDefinition")
}
|
dir <- "/home/wayne/Documents/Code/R/specdata/"
id <- 332:1
format <- function(x) {
if (nchar(x) == 1){
return (paste("00",x,sep=""))
}
if (nchar(x) == 2) {
return (paste("0",x,sep=""))
}
if (nchar(x) == 3) {
return (paste("",x,sep=""))
}
}
col1 <- character()
col2 <- numeric()
complete <- function(dir, id) {
for (i in id) {
data <- read.csv(file=paste(dir,format(i),".csv",sep=""),header=TRUE,sep=",")
data2 <- data.frame(data[complete.cases(data),])
rows <- nrow(data2)
col1 <- c(col1,i)
col2 <- c(col2,rows)
print(paste(i,rows,sep=" "))
}
#df <- data.frame(col1,col2, stringsAsFactors = FALSE)
require(reshape2)
df <- melt(data.frame(col1,col2))
colnames(df) <- c("id","nobs")
print(df)
}
# complete(dir,id)
set.seed(42)
cc <- complete(dir, 332:1)
use <- sample(332, 10)
print(cc[use, 3 ])
| /specdata/prob2draft.R | no_license | datafyre/RCode | R | false | false | 976 | r |
dir <- "/home/wayne/Documents/Code/R/specdata/"
id <- 332:1
format <- function(x) {
if (nchar(x) == 1){
return (paste("00",x,sep=""))
}
if (nchar(x) == 2) {
return (paste("0",x,sep=""))
}
if (nchar(x) == 3) {
return (paste("",x,sep=""))
}
}
col1 <- character()
col2 <- numeric()
complete <- function(dir, id) {
for (i in id) {
data <- read.csv(file=paste(dir,format(i),".csv",sep=""),header=TRUE,sep=",")
data2 <- data.frame(data[complete.cases(data),])
rows <- nrow(data2)
col1 <- c(col1,i)
col2 <- c(col2,rows)
print(paste(i,rows,sep=" "))
}
#df <- data.frame(col1,col2, stringsAsFactors = FALSE)
require(reshape2)
df <- melt(data.frame(col1,col2))
colnames(df) <- c("id","nobs")
print(df)
}
# complete(dir,id)
set.seed(42)
cc <- complete(dir, 332:1)
use <- sample(332, 10)
print(cc[use, 3 ])
|
## Two functions coded below are used to create a special "matrix" object
## compute the inverse of the same and cache its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | arindam671/ProgrammingAssignment2 | R | false | false | 981 | r | ## Two functions coded below are used to create a special "matrix" object
## compute the inverse of the same and cache its inverse.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above. If the inverse has already
## been calculated (and the matrix has not changed), then the
## cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
install.packages("dplyr")
install.packages("stringr")
install.packages("data.table")
install.packages("pracma")
#Only run code above when running the code for the first time
#and you have not used any of the packages above before.
#After running install.packages code ONCE, run from here until the
#comment below depecifying to run all code up to that point
library("dplyr")
library("stringr")
library("data.table")
library("pracma")
library("readr")
#Cutoff: what percent you want the merges to be above
cutoff <- 0.9
source("dedup_data_lyme.R")
source("dedup_person_lyme.R")
###Run code above here before running the program###
ptm <- proc.time()
person_merge(cutoff)
proc.time() -ptm
| /dedup_lyme.R | no_license | AJCervantes/Deduplication | R | false | false | 728 | r | install.packages("dplyr")
install.packages("stringr")
install.packages("data.table")
install.packages("pracma")
#Only run code above when running the code for the first time
#and you have not used any of the packages above before.
#After running install.packages code ONCE, run from here until the
#comment below depecifying to run all code up to that point
library("dplyr")
library("stringr")
library("data.table")
library("pracma")
library("readr")
#Cutoff: what percent you want the merges to be above
cutoff <- 0.9
source("dedup_data_lyme.R")
source("dedup_person_lyme.R")
###Run code above here before running the program###
ptm <- proc.time()
person_merge(cutoff)
proc.time() -ptm
|
\name{ADDT-package}
\alias{ADDT-package}
\alias{ADDT}
\docType{package}
\title{
Accelerated Destructive Degradation Testing
}
\description{
Accelerated destructive degradation tests (ADDT) are often used to collect necessary data for assessing the long-term properties of polymeric materials. Based on the collected data, a thermal index (TI) is estimated. The TI can be useful for material rating and comparisons. This package implements the traditional method based on the least-squares method, the parametric method based on maximum likelihood estimation, and the semiparametric method based on spline methods, and their corresponding methods for estimating TI for polymeric materials. The traditional approach is a two-step approach that is currently used in industrial standards, while the parametric method is widely used in the statistical literature. The semiparametric method is newly developed. The parametric and semiparametric approaches allow one to do statistical inference such as quantifying uncertainties in estimation, hypothesis testing, and predictions. Publicly available datasets are provided for illustrations. More details can be found in Jin et al. (2017).
}
\details{
\tabular{ll}{
Package: \tab ADDT\cr
Type: \tab Package\cr
Version: \tab 2.0\cr
Date: \tab 2016-10-08\cr
License: \tab GPL-2\cr
}
}
\author{
Yili Hong, Yimeng Xie, Zhongnan Jin, and Caleb King
Maintainer: Yili Hong <yilihong@vt.edu>
}
\references{
C. B. King, Y. Xie, Y. Hong, J. H. Van Mullekom, S. P. DeHart, and P. A. DeFeo, ``A comparison of
traditional and maximum likelihood approaches to estimating thermal indices for polymeric
materials,'' Journal of Quality Technology, in press, 2016.
L. A. Escobar, W. Q. Meeker, D. L. Kugler, and L. L. Kramer, ``Accelerated destructive degradation
tests: Data, models, and analysis,'' in Mathematical and Statistical Methods in Reliability,
B. H. Lindqvist and K. A. Doksum, Eds. River Edge, NJ: World Scientific Publishing
Company, 2003, ch. 21.
M. Li and N. Doganaksoy, ``Batch variability in accelerated-degradation testing,'' Journal of
Quality Technology, vol. 46, pp. 171-180, 2014.
Y. Xie, C. B. King, Y. Hong, and Q. Yang, ``Semi-parametric models for accelerated destructive
degradation test data analysis,'' Preprint: arXiv:1512.03036, 2015.
Y. Xie, Z. Jin, Y. Hong, and J. H. Van Mullekom, ``Statistical methods for thermal index estimation based on accelerated destructive degradation test data,'' in Statistical Modeling for Degradation Data, D. G. Chen, Y. L. Lio, H. K. T. Ng, and T. R. Tsai, Eds. NY: New York: Springer, 2017, ch. 12.
Z. Jin, Y. Xie, Y. Hong, and J. H. Van Mullekom, ``ADDT: An R package for analysis of accelerated destructive degradation test data,'' in Statistical Modeling for Degradation Data, D. G. Chen, Y. L. Lio, H. K. T. Ng, and T. R. Tsai, Eds. NY: New York: Springer, 2017, ch. 14.
}
\keyword{package}
| /man/ADDT-package.Rd | no_license | cran/ADDT | R | false | false | 2,893 | rd | \name{ADDT-package}
\alias{ADDT-package}
\alias{ADDT}
\docType{package}
\title{
Accelerated Destructive Degradation Testing
}
\description{
Accelerated destructive degradation tests (ADDT) are often used to collect necessary data for assessing the long-term properties of polymeric materials. Based on the collected data, a thermal index (TI) is estimated. The TI can be useful for material rating and comparisons. This package implements the traditional method based on the least-squares method, the parametric method based on maximum likelihood estimation, and the semiparametric method based on spline methods, and their corresponding methods for estimating TI for polymeric materials. The traditional approach is a two-step approach that is currently used in industrial standards, while the parametric method is widely used in the statistical literature. The semiparametric method is newly developed. The parametric and semiparametric approaches allow one to do statistical inference such as quantifying uncertainties in estimation, hypothesis testing, and predictions. Publicly available datasets are provided for illustrations. More details can be found in Jin et al. (2017).
}
\details{
\tabular{ll}{
Package: \tab ADDT\cr
Type: \tab Package\cr
Version: \tab 2.0\cr
Date: \tab 2016-10-08\cr
License: \tab GPL-2\cr
}
}
\author{
Yili Hong, Yimeng Xie, Zhongnan Jin, and Caleb King
Maintainer: Yili Hong <yilihong@vt.edu>
}
\references{
C. B. King, Y. Xie, Y. Hong, J. H. Van Mullekom, S. P. DeHart, and P. A. DeFeo, ``A comparison of
traditional and maximum likelihood approaches to estimating thermal indices for polymeric
materials,'' Journal of Quality Technology, in press, 2016.
L. A. Escobar, W. Q. Meeker, D. L. Kugler, and L. L. Kramer, ``Accelerated destructive degradation
tests: Data, models, and analysis,'' in Mathematical and Statistical Methods in Reliability,
B. H. Lindqvist and K. A. Doksum, Eds. River Edge, NJ: World Scientific Publishing
Company, 2003, ch. 21.
M. Li and N. Doganaksoy, ``Batch variability in accelerated-degradation testing,'' Journal of
Quality Technology, vol. 46, pp. 171-180, 2014.
Y. Xie, C. B. King, Y. Hong, and Q. Yang, ``Semi-parametric models for accelerated destructive
degradation test data analysis,'' Preprint: arXiv:1512.03036, 2015.
Y. Xie, Z. Jin, Y. Hong, and J. H. Van Mullekom, ``Statistical methods for thermal index estimation based on accelerated destructive degradation test data,'' in Statistical Modeling for Degradation Data, D. G. Chen, Y. L. Lio, H. K. T. Ng, and T. R. Tsai, Eds. NY: New York: Springer, 2017, ch. 12.
Z. Jin, Y. Xie, Y. Hong, and J. H. Van Mullekom, ``ADDT: An R package for analysis of accelerated destructive degradation test data,'' in Statistical Modeling for Degradation Data, D. G. Chen, Y. L. Lio, H. K. T. Ng, and T. R. Tsai, Eds. NY: New York: Springer, 2017, ch. 14.
}
\keyword{package}
|
% file sn/man/dp2cp.Rd
% This file is a component of the package 'sn' for R
% copyright (C) 2013 Adelchi Azzalini
%---------------------
\name{dp2cp}
\alias{dp2cp}
\alias{cp2dp}
\alias{dp2op}
\alias{op2dp}
\title{Conversion between parametrizations of a skew-elliptical distribution}
\description{
Convert direct parameters (\acronym{DP}) to centred parameters
(\acronym{CP}) of a skew-elliptical distribution and \emph{vice versa}.}
\usage{
dp2cp(dp, family, object = NULL, cp.type = "proper", upto = NULL)
cp2dp(cp, family)
dp2op(dp, family)
op2dp(op, family)
}
\arguments{
\item{dp}{a vector (in the univariate case) or a list (in the multivariate
case) as described in \code{\link{makeSECdistr}}; see \sQuote{Background
and Details} for an extented form of usage.}
\item{cp}{a vector or a list, in agreement with \code{dp} as for type and
dimension.}
\item{op}{a vector or a list, in agreement with \code{dp} as for type and
dimension.}
\item{family}{a characther string with the family acronym,
as described in \code{\link{makeSECdistr}}, except that family
\code{"ESN"} is not implemented.}
\item{object}{optionally, an S4 object of class \code{SECdistrUv} or
\code{SECdistrMv}, as produced by \code{\link{makeSECdistr}}
(default value: \code{NULL}).
If this argument is not \code{NULL}, then \code{family} and \code{dp}
must not be set.}
\item{cp.type}{character string, which has effect only if \code{family="ST"}
or \code{"SC"}, otherwise a warning message is generated. Possible values
are \kbd{"proper", "pseudo", "auto"}, which correspond to the \acronym{CP}
parameter set, their `pseudo-\acronym{CP}' version and an automatic
selection based on \code{nu>4}, where \code{nu} represents the degrees of
freedom of the \acronym{ST} distribution.}
\item{upto}{numeric value (in \code{1:length(dp)}, default=\code{NULL}) to
select how many \acronym{CP} components are computed.
Default value \code{upto=NULL} is equivalent to \code{length(dp)}.}
}
\value{For \code{dp2cp}, a matching vector (in the univariate case) or a list
(in the multivariate case) of \code{cp} parameters.
For \code{cp2dp} and \code{op2dp}, a similar object of \code{dp} parameters,
provided the set of input parameters is in the admissible region.
For \code{dp2op}, a similar set of \code{op} parameters.}
\section{Background}{For a description of the \acronym{DP}
parameters, see Section \sQuote{Details} of \code{\link{makeSECdistr}}. The
\acronym{CP} form of parameterization is cumulant-based. For a univariate
distribution, the \acronym{CP} components are the mean value (first cumulant),
the standard deviation (square root of the 2nd cumulant), the coefficient of
skewness (3rd standardized cumulant) and, for the \acronym{ST},
the coefficient of excess kurtosis (4th standardized cumulant).
For a multivariate distribution, there exists an extension based on the
same logic; its components represent the
vector mean value, the variance matrix, the vector of marginal coefficients of
skewness and, only for the \acronym{ST}, the Mardia's coefficient of excess
kurtosis. The pseudo-\acronym{CP} variant provides an `approximate form' of
\acronym{CP} when not all required cumulants exist; however, this parameter set
is not uniquely invertible to \acronym{DP}. The names of pseudo-\acronym{CP}
components printed in summary output are composed by adding a \code{~}
after the usual component name; for example, the first one is denoted
\code{mean~}.
Additional information is provided by Azzalini and Capitanio (2014).
Specifically, their Section 3.1.4 presents \acronym{CP} in the univariate
\acronym{SN} case, Section 4.3.4 \acronym{CP} for the \acronym{ST} case and
the `pseudo-\acronym{CP}' version. Section 5.2.3 presents the multivariate
extension for the \acronym{SN} distribution, Section 6.2.5 for the
multivariate \acronym{ST} case.
For a more detailed discussion, see Arellano-Valle & Azzalini (2013).
The \acronym{OP} parameterization is very similar to \acronym{DP}, from which
it differs only for the components which regulate dispersion (or scatter)
and slant. Its relevance lies essentially in the multivariate case, where
the components of the slant parameter can be interpreted component-wise and
remain unaffected if marginalization with respect to some other components
is performed.
In the multivariate \acronym{SN} case, the components of \acronym{OP}, denoted
\eqn{\xi, \Psi, \lambda}, are associated to the expression of the density
function (5.30) of Azzalini & Capitanio (2014); see pp.128--131 for more
information. In the univariate case, the slant component of \acronym{DP}
and the one of \acronym{OP} coincide, that is, \eqn{\alpha=\lambda},
Parameter \eqn{\xi} and other parameters which may exist with other families
remain the same of the \acronym{DP} set. The term \acronym{OP} stands for
`original parameterization' since this is, up to a negligible difference,
the parameterization adopted by Azzalini & Dalla Valle (1996).
}
\section{Details}{
While any choice of the components of \acronym{DP} or \acronym{OP} is
admissible, this is not true for \acronym{CP}. An implication is that a
call to \code{cp2dp} may fail with an error message \code{"non-admissible CP"}
for certain input values. The most extreme case is represented by the
\acronym{SC} family, for which \acronym{CP} never exists; hence it makes
to sense to call \code{cp2dp} with \code{family="SC"}.
It is possible to call the functions with \code{dp} or \code{cp} having more
components than those expected for a given family as described above and in
\code{\link{makeSECdistr}}. In the univariate case, this means that \code{dp}
or \code{cp} can be vectors of longer length than indicated earlier. This
occurrence is interpreted in the sense that the additional components after
the first one are regarded as regression coefficients of a \code{selm} model,
and they are transferred unchanged to the matching components of the
transformed parameter set; the motivation is given in Section 3.1.4 of
Azzalini and Capitanio (2014). In the multivariate case, \code{dp[[1]]} and
\code{cp[[1]]} can be matrices instead of vectors; the rows beyond the first
one are transferred unchanged to \code{cp[[1]]} and \code{dp[[1]]},
respectively.
}
\references{
Arellano-Valle, R. B. and Azzalini, A. (2013, available on-line 12 June 2011).
The centred parameterization and related quantities of the skew-\emph{t}
distribution. \emph{J. Multiv. Analysis} \bold{113}, 73-90.
Azzalini, A. with the collaboration of Capitanio, A. (2014).
\emph{The Skew-Normal and Related Families}.
Cambridge University Press, IMS Monographs series.
Azzalini, A. and Dalla Valle, A. (1996).
The multivariate skew-normal distribution.
\emph{Biometrika} \bold{83}, 715--726.
}
\seealso{
\code{\link{makeSECdistr}}, \code{\link{summary.SECdistr}},
\code{\link{sn.cumulants}},
the \sQuote{Note} at \code{\link{summary.selm}} for the reason why
\acronym{CP} is the default parameterization in that function and in
related ones,
the \sQuote{Examples} at \code{\link{rmsn}} for use of the \acronym{CP}
parameterization
}
\examples{
# univariate case
cp <- dp2cp(c(1, 2222, 3333, 2, 3), "SN")
dp <- cp2dp(cp, "SN")
# notice that 2nd and 3rd component remain unchanged
#
# multivariate case
dp3 <- list(xi=1:3, Omega=toeplitz(1/(1:3)), alpha=c(-3, 8, 5), nu=6)
cp3 <- dp2cp(dp3, "ST")
dp3.back <- cp2dp(cp3, "ST")
#
op3 <- dp2op(dp3, "ST")
dp3back <- op2dp(op3,"ST")
}
\keyword{distribution}
| /man/dp2cp.Rd | no_license | Yuqi12222/sn | R | false | false | 7,593 | rd | % file sn/man/dp2cp.Rd
% This file is a component of the package 'sn' for R
% copyright (C) 2013 Adelchi Azzalini
%---------------------
\name{dp2cp}
\alias{dp2cp}
\alias{cp2dp}
\alias{dp2op}
\alias{op2dp}
\title{Conversion between parametrizations of a skew-elliptical distribution}
\description{
Convert direct parameters (\acronym{DP}) to centred parameters
(\acronym{CP}) of a skew-elliptical distribution and \emph{vice versa}.}
\usage{
dp2cp(dp, family, object = NULL, cp.type = "proper", upto = NULL)
cp2dp(cp, family)
dp2op(dp, family)
op2dp(op, family)
}
\arguments{
\item{dp}{a vector (in the univariate case) or a list (in the multivariate
case) as described in \code{\link{makeSECdistr}}; see \sQuote{Background
and Details} for an extented form of usage.}
\item{cp}{a vector or a list, in agreement with \code{dp} as for type and
dimension.}
\item{op}{a vector or a list, in agreement with \code{dp} as for type and
dimension.}
\item{family}{a characther string with the family acronym,
as described in \code{\link{makeSECdistr}}, except that family
\code{"ESN"} is not implemented.}
\item{object}{optionally, an S4 object of class \code{SECdistrUv} or
\code{SECdistrMv}, as produced by \code{\link{makeSECdistr}}
(default value: \code{NULL}).
If this argument is not \code{NULL}, then \code{family} and \code{dp}
must not be set.}
\item{cp.type}{character string, which has effect only if \code{family="ST"}
or \code{"SC"}, otherwise a warning message is generated. Possible values
are \kbd{"proper", "pseudo", "auto"}, which correspond to the \acronym{CP}
parameter set, their `pseudo-\acronym{CP}' version and an automatic
selection based on \code{nu>4}, where \code{nu} represents the degrees of
freedom of the \acronym{ST} distribution.}
\item{upto}{numeric value (in \code{1:length(dp)}, default=\code{NULL}) to
select how many \acronym{CP} components are computed.
Default value \code{upto=NULL} is equivalent to \code{length(dp)}.}
}
\value{For \code{dp2cp}, a matching vector (in the univariate case) or a list
(in the multivariate case) of \code{cp} parameters.
For \code{cp2dp} and \code{op2dp}, a similar object of \code{dp} parameters,
provided the set of input parameters is in the admissible region.
For \code{dp2op}, a similar set of \code{op} parameters.}
\section{Background}{For a description of the \acronym{DP}
parameters, see Section \sQuote{Details} of \code{\link{makeSECdistr}}. The
\acronym{CP} form of parameterization is cumulant-based. For a univariate
distribution, the \acronym{CP} components are the mean value (first cumulant),
the standard deviation (square root of the 2nd cumulant), the coefficient of
skewness (3rd standardized cumulant) and, for the \acronym{ST},
the coefficient of excess kurtosis (4th standardized cumulant).
For a multivariate distribution, there exists an extension based on the
same logic; its components represent the
vector mean value, the variance matrix, the vector of marginal coefficients of
skewness and, only for the \acronym{ST}, the Mardia's coefficient of excess
kurtosis. The pseudo-\acronym{CP} variant provides an `approximate form' of
\acronym{CP} when not all required cumulants exist; however, this parameter set
is not uniquely invertible to \acronym{DP}. The names of pseudo-\acronym{CP}
components printed in summary output are composed by adding a \code{~}
after the usual component name; for example, the first one is denoted
\code{mean~}.
Additional information is provided by Azzalini and Capitanio (2014).
Specifically, their Section 3.1.4 presents \acronym{CP} in the univariate
\acronym{SN} case, Section 4.3.4 \acronym{CP} for the \acronym{ST} case and
the `pseudo-\acronym{CP}' version. Section 5.2.3 presents the multivariate
extension for the \acronym{SN} distribution, Section 6.2.5 for the
multivariate \acronym{ST} case.
For a more detailed discussion, see Arellano-Valle & Azzalini (2013).
The \acronym{OP} parameterization is very similar to \acronym{DP}, from which
it differs only for the components which regulate dispersion (or scatter)
and slant. Its relevance lies essentially in the multivariate case, where
the components of the slant parameter can be interpreted component-wise and
remain unaffected if marginalization with respect to some other components
is performed.
In the multivariate \acronym{SN} case, the components of \acronym{OP}, denoted
\eqn{\xi, \Psi, \lambda}, are associated to the expression of the density
function (5.30) of Azzalini & Capitanio (2014); see pp.128--131 for more
information. In the univariate case, the slant component of \acronym{DP}
and the one of \acronym{OP} coincide, that is, \eqn{\alpha=\lambda},
Parameter \eqn{\xi} and other parameters which may exist with other families
remain the same of the \acronym{DP} set. The term \acronym{OP} stands for
`original parameterization' since this is, up to a negligible difference,
the parameterization adopted by Azzalini & Dalla Valle (1996).
}
\section{Details}{
While any choice of the components of \acronym{DP} or \acronym{OP} is
admissible, this is not true for \acronym{CP}. An implication is that a
call to \code{cp2dp} may fail with an error message \code{"non-admissible CP"}
for certain input values. The most extreme case is represented by the
\acronym{SC} family, for which \acronym{CP} never exists; hence it makes
to sense to call \code{cp2dp} with \code{family="SC"}.
It is possible to call the functions with \code{dp} or \code{cp} having more
components than those expected for a given family as described above and in
\code{\link{makeSECdistr}}. In the univariate case, this means that \code{dp}
or \code{cp} can be vectors of longer length than indicated earlier. This
occurrence is interpreted in the sense that the additional components after
the first one are regarded as regression coefficients of a \code{selm} model,
and they are transferred unchanged to the matching components of the
transformed parameter set; the motivation is given in Section 3.1.4 of
Azzalini and Capitanio (2014). In the multivariate case, \code{dp[[1]]} and
\code{cp[[1]]} can be matrices instead of vectors; the rows beyond the first
one are transferred unchanged to \code{cp[[1]]} and \code{dp[[1]]},
respectively.
}
\references{
Arellano-Valle, R. B. and Azzalini, A. (2013, available on-line 12 June 2011).
The centred parameterization and related quantities of the skew-\emph{t}
distribution. \emph{J. Multiv. Analysis} \bold{113}, 73-90.
Azzalini, A. with the collaboration of Capitanio, A. (2014).
\emph{The Skew-Normal and Related Families}.
Cambridge University Press, IMS Monographs series.
Azzalini, A. and Dalla Valle, A. (1996).
The multivariate skew-normal distribution.
\emph{Biometrika} \bold{83}, 715--726.
}
\seealso{
\code{\link{makeSECdistr}}, \code{\link{summary.SECdistr}},
\code{\link{sn.cumulants}},
the \sQuote{Note} at \code{\link{summary.selm}} for the reason why
\acronym{CP} is the default parameterization in that function and in
related ones,
the \sQuote{Examples} at \code{\link{rmsn}} for use of the \acronym{CP}
parameterization
}
\examples{
# univariate case
cp <- dp2cp(c(1, 2222, 3333, 2, 3), "SN")
dp <- cp2dp(cp, "SN")
# notice that 2nd and 3rd component remain unchanged
#
# multivariate case
dp3 <- list(xi=1:3, Omega=toeplitz(1/(1:3)), alpha=c(-3, 8, 5), nu=6)
cp3 <- dp2cp(dp3, "ST")
dp3.back <- cp2dp(cp3, "ST")
#
op3 <- dp2op(dp3, "ST")
dp3back <- op2dp(op3,"ST")
}
\keyword{distribution}
|
#Question 11
colnames(airquality)
#Q12
airquality[c(1,2),]
#Q13
nrow(airquality)
#Q14
airquality[c(152,153),] #from the answer to Q13, which was 153
airquality[c((nrow(airquality)-1),nrow(airquality)),]
#Q15
airquality[47,1]
airquality[47,"Ozone"]
airquality$Ozone[47]
#Q16
#number_of_na <- function(x = (is.na(airquality[,"Ozone"]))) {
# good <- (TRUE == x)
#length(good)
#}
#isTRUE(is.na(airquality[,"Ozone"]))
is.na(airquality[,"Ozone"])
dataFrameNA <- function(airquality) { # how do I make it generic so that it can
# take any data.frame?
x = 0
for (i in 1:(nrow(airquality))) {
if (is.na(airquality[i,"Ozone"])) {
x = x+1
}
}
x
}
# what did I do up there... so dumb and long
# Much easier version:
sum(is.na(airquality$Ozone))
#Q17
mean(airquality[,"Ozone"]) #I need to exclude NA values
mean(airquality[!is.na(airquality$Ozone), "Ozone"])
#Q18
OzoneAbove31withNA <- airquality[airquality$Ozone > 31, "Ozone"]
TemperatureAbove90withNA <- airquality[airquality$Temp > 90, "Temp"]
complete.cases()
mean(airquality$Solar.R[!is.na(airquality$Solar.R) & airquality$Ozone > 31 &
!is.na(airquality$Ozone) & airquality$Temp > 90])
#Q19
mean(airquality$Temp[airquality$Month == 6])
#Q20
max(airquality$Ozone[!is.na(airquality$Ozone) & airquality$Month == 5])
| /Week1Coursera.R | permissive | liliaevgeniou/Coursera-R-Programming | R | false | false | 1,445 | r | #Question 11
colnames(airquality)
#Q12
airquality[c(1,2),]
#Q13
nrow(airquality)
#Q14
airquality[c(152,153),] #from the answer to Q13, which was 153
airquality[c((nrow(airquality)-1),nrow(airquality)),]
#Q15
airquality[47,1]
airquality[47,"Ozone"]
airquality$Ozone[47]
#Q16
#number_of_na <- function(x = (is.na(airquality[,"Ozone"]))) {
# good <- (TRUE == x)
#length(good)
#}
#isTRUE(is.na(airquality[,"Ozone"]))
is.na(airquality[,"Ozone"])
dataFrameNA <- function(airquality) { # how do I make it generic so that it can
# take any data.frame?
x = 0
for (i in 1:(nrow(airquality))) {
if (is.na(airquality[i,"Ozone"])) {
x = x+1
}
}
x
}
# what did I do up there... so dumb and long
# Much easier version:
sum(is.na(airquality$Ozone))
#Q17
mean(airquality[,"Ozone"]) #I need to exclude NA values
mean(airquality[!is.na(airquality$Ozone), "Ozone"])
#Q18
OzoneAbove31withNA <- airquality[airquality$Ozone > 31, "Ozone"]
TemperatureAbove90withNA <- airquality[airquality$Temp > 90, "Temp"]
complete.cases()
mean(airquality$Solar.R[!is.na(airquality$Solar.R) & airquality$Ozone > 31 &
!is.na(airquality$Ozone) & airquality$Temp > 90])
#Q19
mean(airquality$Temp[airquality$Month == 6])
#Q20
max(airquality$Ozone[!is.na(airquality$Ozone) & airquality$Month == 5])
|
corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
data = numeric(0)
for (i in 1:332) {
filename <- sprintf("%03d.csv", i)
filepath <- paste(directory, filename, sep = "/")
csv <- read.csv(filepath)
complete <- na.omit(csv)
if (nrow(complete) < threshold) {
next
}
corr <- cor(complete[["sulfate"]], complete[["nitrate"]])
data <- c(data, corr)
}
data
}
| /r_programming/ProgrammingAssignment1/corr.R | permissive | popotam/datasciencecoursera | R | false | false | 834 | r | corr <- function(directory, threshold = 0) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'threshold' is a numeric vector of length 1 indicating the
## number of completely observed observations (on all
## variables) required to compute the correlation between
## nitrate and sulfate; the default is 0
## Return a numeric vector of correlations
data = numeric(0)
for (i in 1:332) {
filename <- sprintf("%03d.csv", i)
filepath <- paste(directory, filename, sep = "/")
csv <- read.csv(filepath)
complete <- na.omit(csv)
if (nrow(complete) < threshold) {
next
}
corr <- cor(complete[["sulfate"]], complete[["nitrate"]])
data <- c(data, corr)
}
data
}
|
## Runs Test
## Simon Dedman 28/8/2018
## https://en.wikipedia.org/wiki/Wald%E2%80%93Wolfowitz_runs_test
## https://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
##
install.packages("tseries")
library(tseries)
setwd("C:/Users/simon/Dropbox/Farallon Institute/Data & Analysis/Biological/Anchovy & Sardine Biomass/RunsTest")
anchrun <- read.csv("RT1all.csv")
anchrun <- read.csv("RT2noNA.csv")
anchrun <- read.csv("RT3onlyRuns.csv")
anchrun$AnchRunsTest <- as.factor(anchrun$AnchRunsTest)
runs.test(anchrun$AnchRunsTest)
# 2 noNA
# Standard Normal = 0.80405, p-value = 0.4214
# alternative hypothesis: two.sided
# 3 onlyruns
# Standard Normal = 1.2544, p-value = 0.2097
# alternative hypothesis: two.sided
# Null hypothesis: order of the data is random
# Alternative hypothesis: order of the data is not random
# High p-value means you cannot trash your null hypothesis (e.g. >0.05)
# p-value is probability you reject a null hypothesis when it is actually true. | /R/RunsTest.R | no_license | SimonDedman/FarallonInstitute | R | false | false | 1,004 | r | ## Runs Test
## Simon Dedman 28/8/2018
## https://en.wikipedia.org/wiki/Wald%E2%80%93Wolfowitz_runs_test
## https://www.itl.nist.gov/div898/handbook/eda/section3/eda35d.htm
##
install.packages("tseries")
library(tseries)
setwd("C:/Users/simon/Dropbox/Farallon Institute/Data & Analysis/Biological/Anchovy & Sardine Biomass/RunsTest")
anchrun <- read.csv("RT1all.csv")
anchrun <- read.csv("RT2noNA.csv")
anchrun <- read.csv("RT3onlyRuns.csv")
anchrun$AnchRunsTest <- as.factor(anchrun$AnchRunsTest)
runs.test(anchrun$AnchRunsTest)
# 2 noNA
# Standard Normal = 0.80405, p-value = 0.4214
# alternative hypothesis: two.sided
# 3 onlyruns
# Standard Normal = 1.2544, p-value = 0.2097
# alternative hypothesis: two.sided
# Null hypothesis: order of the data is random
# Alternative hypothesis: order of the data is not random
# High p-value means you cannot trash your null hypothesis (e.g. >0.05)
# p-value is probability you reject a null hypothesis when it is actually true. |
#' Create Rstudio project library
#'
#' Creates project library directory \code{lib} inside specified \code{path}.
#'
#' If there is no .Rproj file inside specified \code{path} then error will be
#' thown.
#'
#' If there is no .Rprofile file inside \code{path} then it will be created there
#' and activation command for project specific library will be written to it.
#' Otherwise activation command for project specific library will be appended to
#' the end of existing .Rprofile file.
#'
#' If there is no \code{lib} directory inside \code{path} then directory will be created,
#' otherwise existing directory will be used as library with warning.
#'
#' @param path Character vector specifying path. May contain '.' for parent
#' directory reference.
#' @param lib Character vector specifying library directory name.
#'
#' @export
use_project_lib <- function(path = '.', lib = 'library'){
if(any(grepl('.+.Rproj', dir(path)))){
profileName <- file.path(path, '.Rprofile')
profileCommand <- sprintf(".libPaths(c(normalizePath(file.path('.', '%s')), .libPaths()))", lib)
if(file.exists(profileName)){
content <- readLines(profileName)
if(any(profileCommand %in% chartr("\"", "'", content))){
stop(sprintf('There is already "%s" command in "%s" file', profileCommand,
normalizePath(profileName)))
}
writeLines(c(content, paste0('\n', profileCommand)), profileName)
} else {
file.create(profileName)
writeLines(profileCommand, profileName)
}
if(!dir.create(file.path(path, lib), showWarnings = FALSE)){
warning(sprintf('Existing directory "%s" set as library directory',
normalizePath(file.path(path, lib))))
}
} else {
stop(sprintf('There is no .Rproj Rstudio project file in "%s" directory',
normalizePath(path, mustWork = TRUE)))
}
return(invisible(NULL))
}
| /R/library.R | permissive | Bijection1to1/prolibr | R | false | false | 1,912 | r | #' Create Rstudio project library
#'
#' Creates project library directory \code{lib} inside specified \code{path}.
#'
#' If there is no .Rproj file inside specified \code{path} then error will be
#' thown.
#'
#' If there is no .Rprofile file inside \code{path} then it will be created there
#' and activation command for project specific library will be written to it.
#' Otherwise activation command for project specific library will be appended to
#' the end of existing .Rprofile file.
#'
#' If there is no \code{lib} directory inside \code{path} then directory will be created,
#' otherwise existing directory will be used as library with warning.
#'
#' @param path Character vector specifying path. May contain '.' for parent
#' directory reference.
#' @param lib Character vector specifying library directory name.
#'
#' @export
use_project_lib <- function(path = '.', lib = 'library'){
if(any(grepl('.+.Rproj', dir(path)))){
profileName <- file.path(path, '.Rprofile')
profileCommand <- sprintf(".libPaths(c(normalizePath(file.path('.', '%s')), .libPaths()))", lib)
if(file.exists(profileName)){
content <- readLines(profileName)
if(any(profileCommand %in% chartr("\"", "'", content))){
stop(sprintf('There is already "%s" command in "%s" file', profileCommand,
normalizePath(profileName)))
}
writeLines(c(content, paste0('\n', profileCommand)), profileName)
} else {
file.create(profileName)
writeLines(profileCommand, profileName)
}
if(!dir.create(file.path(path, lib), showWarnings = FALSE)){
warning(sprintf('Existing directory "%s" set as library directory',
normalizePath(file.path(path, lib))))
}
} else {
stop(sprintf('There is no .Rproj Rstudio project file in "%s" directory',
normalizePath(path, mustWork = TRUE)))
}
return(invisible(NULL))
}
|
mixNB.Sim <- function(nbT,weight,mean,variance,seed=2019)
{
if(FALSE){
nbT=1000
weight=c(0.2,0.6,0.2)
mean=c(100,200,300)
variance=rep(1000,3)
seed=2019
sim=mixNB.Sim(nbT,weight,mean,variance,seed)
sim
x=sim$x
print(sim,"Y")
hist(sim)
plot(sim)
}
if(!is.numeric(c(nbT,weight,mean,variance,seed))) {
stop("Parameters have to be numeric")
} else if(!all(c(nbT,weight,mean,variance,seed)>0)){
stop("Parameters have to be positive")
} else if(length(weight)!=length(mean)|length(weight)!=length(variance)){
stop("Unmached length of Weight,Mean or Varience")
} else {
set.seed(seed)
nbS=length(weight)
eSize_set = mean^2/(variance-mean)
eProb_set = mean/variance
Z = sample(1:nbS, nbT, prob=weight, replace=TRUE)
numPopu = table(Z)
X_ = list()
X = rep(0,nbT)
for(i in 1:nbS){
set.seed(seed)
X_[[i]] = rnbinom(numPopu[i],size = eSize_set[i], prob = eProb_set[i])
X[Z==i] = X_[[i]]
}
# hist(X)
obj <-list(nbT=nbT,
weight=weight,
mean=mean,
variance=variance,
seed=seed,
x=X,
idx=Z)
class(obj)<-"mixNB.Sim"
return(obj)
}
}
| /R/Integrate/mixNB.Sim.R | no_license | Hankrise/classNB | R | false | false | 1,300 | r | mixNB.Sim <- function(nbT,weight,mean,variance,seed=2019)
{
if(FALSE){
nbT=1000
weight=c(0.2,0.6,0.2)
mean=c(100,200,300)
variance=rep(1000,3)
seed=2019
sim=mixNB.Sim(nbT,weight,mean,variance,seed)
sim
x=sim$x
print(sim,"Y")
hist(sim)
plot(sim)
}
if(!is.numeric(c(nbT,weight,mean,variance,seed))) {
stop("Parameters have to be numeric")
} else if(!all(c(nbT,weight,mean,variance,seed)>0)){
stop("Parameters have to be positive")
} else if(length(weight)!=length(mean)|length(weight)!=length(variance)){
stop("Unmached length of Weight,Mean or Varience")
} else {
set.seed(seed)
nbS=length(weight)
eSize_set = mean^2/(variance-mean)
eProb_set = mean/variance
Z = sample(1:nbS, nbT, prob=weight, replace=TRUE)
numPopu = table(Z)
X_ = list()
X = rep(0,nbT)
for(i in 1:nbS){
set.seed(seed)
X_[[i]] = rnbinom(numPopu[i],size = eSize_set[i], prob = eProb_set[i])
X[Z==i] = X_[[i]]
}
# hist(X)
obj <-list(nbT=nbT,
weight=weight,
mean=mean,
variance=variance,
seed=seed,
x=X,
idx=Z)
class(obj)<-"mixNB.Sim"
return(obj)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Submissions.R
\name{submit_current}
\alias{submit_current}
\title{Submit the currently chosen exercise to the TMC server}
\usage{
submit_current(path, credentials)
}
\arguments{
\item{path}{Path to the currently chosen directory.}
\item{credentials}{List of user credentials.}
}
\value{
Submission result with non \code{NULL} \code{results} if
processing the tests in the TMC server was successful. List keys:
\code{results}, \code{error}. Error is not \code{NULL} if processing
the tests ended in error.
}
\description{
Submit the currently chosen exercise to the TMC server
and return the submission result \code{JSON}.
}
\details{
Reads the \code{OAuth2} token and TMC server address from
\code{crendentials} and uploads the currently open exercise to
the TMC server. If the upload was successful, starts querying the TMC
server for the submission result \code{JSON} until the server has
finished processing the tests.
}
\seealso{
\code{\link{getCredentials}},
\code{\link{upload_current_exercise}},
\code{\link{getExerciseFromServer}}
}
| /tmcrstudioaddin/man/submit_current.Rd | no_license | testmycode/tmc-rstudio | R | false | true | 1,120 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Submissions.R
\name{submit_current}
\alias{submit_current}
\title{Submit the currently chosen exercise to the TMC server}
\usage{
submit_current(path, credentials)
}
\arguments{
\item{path}{Path to the currently chosen directory.}
\item{credentials}{List of user credentials.}
}
\value{
Submission result with non \code{NULL} \code{results} if
processing the tests in the TMC server was successful. List keys:
\code{results}, \code{error}. Error is not \code{NULL} if processing
the tests ended in error.
}
\description{
Submit the currently chosen exercise to the TMC server
and return the submission result \code{JSON}.
}
\details{
Reads the \code{OAuth2} token and TMC server address from
\code{crendentials} and uploads the currently open exercise to
the TMC server. If the upload was successful, starts querying the TMC
server for the submission result \code{JSON} until the server has
finished processing the tests.
}
\seealso{
\code{\link{getCredentials}},
\code{\link{upload_current_exercise}},
\code{\link{getExerciseFromServer}}
}
|
# Get station meta data from RCC ACIS
# MAC 11/18/19
# to do: figure out way to gauge missing days?, station types/threadex stations
# load libraries
library(RCurl)
library(jsonlite)
# get valid date ranges
jsonQuery='{"state":"NM","meta":"sids,name,valid_daterange,ll","elems":"pcpn"}'
out<-postForm("http://data.rcc-acis.org/StnMeta",
.opts = list(postfields = jsonQuery,
httpheader = c('Content-Type' = 'application/json', Accept = 'application/json')))
outList<-fromJSON(out, flatten = FALSE) #
# wrangle into dataframe
dates<-data.frame(matrix(unlist(outList$meta$valid_daterange), nrow=nrow(outList$meta), byrow=T))
sids<-as.data.frame(t(sapply(outList$meta$sids, '[', seq(max(sapply(outList$meta$sids, length))))))
names<-outList$meta$name
ll<-data.frame(matrix(unlist(outList$meta$ll), nrow=nrow(outList$meta), byrow=T))
stations<-cbind(names,ll,dates,sids)
colnames(stations)[2:5]<-c("lon","lat","beginYr","endYr")
stations$beginYr<-as.Date(stations$beginYr, format="%Y-%m-%d")
stations$endYr<-as.Date(stations$endYr, format="%Y-%m-%d")
stations$obsN<-stations$endYr-stations$beginYr
# find stations with data in current year
stations<-stations[which(stations$endYr>=as.Date(paste0(format(Sys.Date(), "%Y"),"-01-01")) & stations$obsN/365>=30 ),]
# find station type, find ThreadEx stations too
# test<-mapply(grepl,"US", stations)
# plot points on map
library(leaflet)
leaflet(data = stations) %>% addTiles() %>%
addMarkers(~lon, ~lat, popup = ~as.character(names), label = ~as.character(names)) | /stationPlots/getStations.R | no_license | mcrimmins/ClimPlot | R | false | false | 1,578 | r | # Get station meta data from RCC ACIS
# MAC 11/18/19
# to do: figure out way to gauge missing days?, station types/threadex stations
# load libraries
library(RCurl)
library(jsonlite)
# get valid date ranges
jsonQuery='{"state":"NM","meta":"sids,name,valid_daterange,ll","elems":"pcpn"}'
out<-postForm("http://data.rcc-acis.org/StnMeta",
.opts = list(postfields = jsonQuery,
httpheader = c('Content-Type' = 'application/json', Accept = 'application/json')))
outList<-fromJSON(out, flatten = FALSE) #
# wrangle into dataframe
dates<-data.frame(matrix(unlist(outList$meta$valid_daterange), nrow=nrow(outList$meta), byrow=T))
sids<-as.data.frame(t(sapply(outList$meta$sids, '[', seq(max(sapply(outList$meta$sids, length))))))
names<-outList$meta$name
ll<-data.frame(matrix(unlist(outList$meta$ll), nrow=nrow(outList$meta), byrow=T))
stations<-cbind(names,ll,dates,sids)
colnames(stations)[2:5]<-c("lon","lat","beginYr","endYr")
stations$beginYr<-as.Date(stations$beginYr, format="%Y-%m-%d")
stations$endYr<-as.Date(stations$endYr, format="%Y-%m-%d")
stations$obsN<-stations$endYr-stations$beginYr
# find stations with data in current year
stations<-stations[which(stations$endYr>=as.Date(paste0(format(Sys.Date(), "%Y"),"-01-01")) & stations$obsN/365>=30 ),]
# find station type, find ThreadEx stations too
# test<-mapply(grepl,"US", stations)
# plot points on map
library(leaflet)
leaflet(data = stations) %>% addTiles() %>%
addMarkers(~lon, ~lat, popup = ~as.character(names), label = ~as.character(names)) |
#IMPORT LIBRARIES
library(tidyverse)
library(ggplot2)
library(mice)
library(caTools)
library(scales)
library(caret)
#IMPORT DATASETS
training_set = read.csv("train.csv")
test_set = read.csv("test.csv")
PassengerId = test_set$PassengerId
#PRE-PROCESSING
#Data Cleaning and manipulation
training_set = training_set %>%
mutate(Embarked = ifelse(Embarked=="","S",as.character(Embarked)),
Family = SibSp + Parch) %>%
mutate_at(vars(Survived, Pclass,Embarked), factor) %>%
select(-c(Name,Ticket,Cabin,PassengerId,SibSp,Parch))
test_set = test_set %>%
mutate_at(vars(Pclass), factor) %>%
mutate(Fare = ifelse(is.na(Fare), median(Fare,na.rm = T), Fare),
Family = SibSp + Parch) %>%
select(-c(Name,Ticket,Cabin,PassengerId, SibSp, Parch))
#############################################################################
#MISSING VALUE TREATMENT
#Impute missing values using MICE library
#Identify the missing values#md.pattern(training_set)
set.seed(100)
imputeTraining = mice(data = training_set, method = "pmm", m = 5, maxit = 50, seed = 500)
set.seed(100)
imputeTest = mice(data = test_set, method = "pmm", m = 5, maxit = 50, seed = 500)
#Check the imputed values #training_imputed$imp$Age
training_set = complete(imputeTraining,1)
test_set = complete(imputeTest,1)
########################################################################################
#EXPLORATORY DATA ANALYSIS
#Passenger Class
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Pclass),position = "fill",width=0.5) +
ggtitle("Survived ~ Passenger Class") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("steelblue3","turquoise3","royalblue3"),
name ="Passenger\nClass",
breaks = c(1,2,3),
labels = c("Upper","Middle","Lower")) +
theme_classic()
#Gender of the Passengers
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Sex),position = "fill",width=0.5) +
ggtitle("Survived ~ Passenger's Gender") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("turquoise3","royalblue3"),
name ="Gender",
breaks = c("male","female"),
labels = c("Male","Female")) +
theme_classic()
#Embarked Location
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Embarked),position = "fill",width=0.5) +
ggtitle("Survived ~ Embarked Location") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("steelblue3","turquoise3","royalblue3"),
name ="Embarked City",
breaks = c("C","Q","S"),
labels = c("Cherbourg","Queenstown","Southampton")) +
theme_classic()
#Ticket Fare
training_set %>%
ggplot(aes(x=Survived,y=Fare)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Ticker Fare") +
xlab("Survived") +
ylab("Ticket Fare") +
theme_classic()
#Passenger Age
training_set %>%
ggplot(aes(x=Survived,y=Age)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Passenger Age") +
xlab("Survived") +
ylab("Passenger Age") +
theme_classic()
#Family Size
training_set %>%
ggplot(aes(x=Survived,y=Family)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Family Size") +
xlab("Survived") +
ylab("Family Size") +
theme_classic()
#########################################################################
#SCALING DATA
# training_set = training_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
# test_set = test_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
# training_set[-1] = scale(training_set[-1])
# test_set = scale(test_set)
##########################################################################
#FEATURE SELECTION
#Backward Elimination
model.full = glm(Survived~., data = training_set, family = binomial)
model.null = glm(Survived~1, data = training_set, family = binomial)
model.final = step(model.full, scope = list(lower = model.null),
direction = "backward")
summary(model.final)
#Forward Elimination
model.full = glm(Survived~., data = training_set, family = binomial)
model.null = glm(Survived~1, data = training_set, family = binomial)
model.final = step(model.null, scope = list(upper = model.full),
direction = "forward")
summary(model.final)
#Recursive Feature Elimination - Feature Selection
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
model.rfe <- rfe(training_set[,c(3,2,4,7)], training_set[,1],
sizes=c(1:6), rfeControl=control)
print(model.rfe)
predictors(model.rfe)
plot(model.rfe, type=c("g","o"))
#Final Features - PClass, Sex, Age, Family
###########################################################################
#BUILDING MODELS
control = trainControl(method = "cv", number = 10)
metric = "Accuracy"
#Random Forest
set.seed(100)
fit.rf = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "rf",
metric = metric, trControl = control)
predict.rf = predict(fit.rf, test_set)
predictdfrf = data.frame(PassengerId = PassengerId,Survived=predict.rf)
write.csv(predictdfrf, "predictrf.csv",row.names = F)
#SCALING DATA
training_set = training_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
test_set = test_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
training_set[-1] = scale(training_set[-1])
test_set = scale(test_set)
#Support Vector Machines
set.seed(100)
fit.svm = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "svmRadial",
metric = metric, trControl = control)
predict.svm = predict(fit.svm, test_set)
predictdfsvm = data.frame(PassengerId = PassengerId,Survived=predict.svm)
write.csv(predictdfsvm, "predictsvm.csv",row.names = F)
#KNN
set.seed(100)
fit.knn = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "knn",
metric = metric, trControl = control)
predict.knn = predict(fit.knn, test_set)
predictdfknn = data.frame(PassengerId = PassengerId,Survived=predict.knn)
write.csv(predictdfknn, "predictknn.csv",row.names = F)
#Logistic Regression
set.seed(100)
fit.glm = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "glm",
metric = metric, trControl = control)
predict.glm = predict(fit.glm, test_set)
predictdfglm = data.frame(PassengerId = PassengerId,Survived=predict.glm)
write.csv(predictdfglm, "predictglm.csv",row.names = F)
#LDA
set.seed(100)
fit.lda = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "lda",
metric = metric, trControl = control)
predict.lda = predict(fit.lda, test_set)
predictdflda = data.frame(PassengerId = PassengerId,Survived=predict.lda)
write.csv(predictdflda, "predictlda.csv",row.names = F)
#All models together
predictdf = data.frame(PassengerId = PassengerID,
lda=predict.lda,
rf=predict.rf,
svm=predict.svm,
glm=predict.glm,
knn=predict.knn)
write.csv(predictdf, "predictall.csv",row.names = F)
#XGBoost
#Build an XGBoost Model
xgboost.model = xgb.cv(data = as.matrix(training_set[-1]),
label = as.numeric(training_set$Survived)-1,
objective = "binary:logistic",
eval_metric = "error",
nrounds = 1000,
eta = 0.1,
max_depth = 5,
min_child_weight = 1,
gamma = 0,
subsample = 0.9,
nthread = 4,
#colsample_bytree = 0.9,
early_stopping_rounds = 50,
nfold = 5,
seed = 100)
#Build an Artificial Neural Network
#Fitting ANN
# library(h2o)
# h2o.init()
# install.packages("h2o")
# classifier = h2o.deeplearning(y = "Survived",
# training_frame = as.h2o(training_set),
# activation = "Rectifier",
# hidden = c(6,6),
# epochs = 100,
# train_samples_per_iteration = -2)
# prob_pred = h2o.predict(classifier, as.h2o(test_set))
# pred = prob_pred > 0.5
# pred = as.vector(pred) | /titanic_final.R | no_license | santoshselvarajr/Titanic_Survival_Analysis | R | false | false | 8,674 | r | #IMPORT LIBRARIES
library(tidyverse)
library(ggplot2)
library(mice)
library(caTools)
library(scales)
library(caret)
#IMPORT DATASETS
training_set = read.csv("train.csv")
test_set = read.csv("test.csv")
PassengerId = test_set$PassengerId
#PRE-PROCESSING
#Data Cleaning and manipulation
training_set = training_set %>%
mutate(Embarked = ifelse(Embarked=="","S",as.character(Embarked)),
Family = SibSp + Parch) %>%
mutate_at(vars(Survived, Pclass,Embarked), factor) %>%
select(-c(Name,Ticket,Cabin,PassengerId,SibSp,Parch))
test_set = test_set %>%
mutate_at(vars(Pclass), factor) %>%
mutate(Fare = ifelse(is.na(Fare), median(Fare,na.rm = T), Fare),
Family = SibSp + Parch) %>%
select(-c(Name,Ticket,Cabin,PassengerId, SibSp, Parch))
#############################################################################
#MISSING VALUE TREATMENT
#Impute missing values using MICE library
#Identify the missing values#md.pattern(training_set)
set.seed(100)
imputeTraining = mice(data = training_set, method = "pmm", m = 5, maxit = 50, seed = 500)
set.seed(100)
imputeTest = mice(data = test_set, method = "pmm", m = 5, maxit = 50, seed = 500)
#Check the imputed values #training_imputed$imp$Age
training_set = complete(imputeTraining,1)
test_set = complete(imputeTest,1)
########################################################################################
#EXPLORATORY DATA ANALYSIS
#Passenger Class
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Pclass),position = "fill",width=0.5) +
ggtitle("Survived ~ Passenger Class") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("steelblue3","turquoise3","royalblue3"),
name ="Passenger\nClass",
breaks = c(1,2,3),
labels = c("Upper","Middle","Lower")) +
theme_classic()
#Gender of the Passengers
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Sex),position = "fill",width=0.5) +
ggtitle("Survived ~ Passenger's Gender") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("turquoise3","royalblue3"),
name ="Gender",
breaks = c("male","female"),
labels = c("Male","Female")) +
theme_classic()
#Embarked Location
training_set %>% ggplot(aes(x=Survived)) +
geom_bar(aes(fill=Embarked),position = "fill",width=0.5) +
ggtitle("Survived ~ Embarked Location") +
xlab("Survived") +
ylab("% of Passengers") +
scale_y_continuous(labels = percent_format()) +
scale_fill_manual(values = c("steelblue3","turquoise3","royalblue3"),
name ="Embarked City",
breaks = c("C","Q","S"),
labels = c("Cherbourg","Queenstown","Southampton")) +
theme_classic()
#Ticket Fare
training_set %>%
ggplot(aes(x=Survived,y=Fare)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Ticker Fare") +
xlab("Survived") +
ylab("Ticket Fare") +
theme_classic()
#Passenger Age
training_set %>%
ggplot(aes(x=Survived,y=Age)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Passenger Age") +
xlab("Survived") +
ylab("Passenger Age") +
theme_classic()
#Family Size
training_set %>%
ggplot(aes(x=Survived,y=Family)) +
geom_boxplot(aes(fill = Survived), width=0.5) +
ggtitle("Survived ~ Family Size") +
xlab("Survived") +
ylab("Family Size") +
theme_classic()
#########################################################################
#SCALING DATA
# training_set = training_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
# test_set = test_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
# training_set[-1] = scale(training_set[-1])
# test_set = scale(test_set)
##########################################################################
#FEATURE SELECTION
#Backward Elimination
model.full = glm(Survived~., data = training_set, family = binomial)
model.null = glm(Survived~1, data = training_set, family = binomial)
model.final = step(model.full, scope = list(lower = model.null),
direction = "backward")
summary(model.final)
#Forward Elimination
model.full = glm(Survived~., data = training_set, family = binomial)
model.null = glm(Survived~1, data = training_set, family = binomial)
model.final = step(model.null, scope = list(upper = model.full),
direction = "forward")
summary(model.final)
#Recursive Feature Elimination - Feature Selection
control <- rfeControl(functions=rfFuncs, method="cv", number=10)
model.rfe <- rfe(training_set[,c(3,2,4,7)], training_set[,1],
sizes=c(1:6), rfeControl=control)
print(model.rfe)
predictors(model.rfe)
plot(model.rfe, type=c("g","o"))
#Final Features - PClass, Sex, Age, Family
###########################################################################
#BUILDING MODELS
control = trainControl(method = "cv", number = 10)
metric = "Accuracy"
#Random Forest
set.seed(100)
fit.rf = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "rf",
metric = metric, trControl = control)
predict.rf = predict(fit.rf, test_set)
predictdfrf = data.frame(PassengerId = PassengerId,Survived=predict.rf)
write.csv(predictdfrf, "predictrf.csv",row.names = F)
#SCALING DATA
training_set = training_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
test_set = test_set %>% mutate_at(vars(Pclass,Sex,Embarked),as.numeric)
training_set[-1] = scale(training_set[-1])
test_set = scale(test_set)
#Support Vector Machines
set.seed(100)
fit.svm = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "svmRadial",
metric = metric, trControl = control)
predict.svm = predict(fit.svm, test_set)
predictdfsvm = data.frame(PassengerId = PassengerId,Survived=predict.svm)
write.csv(predictdfsvm, "predictsvm.csv",row.names = F)
#KNN
set.seed(100)
fit.knn = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "knn",
metric = metric, trControl = control)
predict.knn = predict(fit.knn, test_set)
predictdfknn = data.frame(PassengerId = PassengerId,Survived=predict.knn)
write.csv(predictdfknn, "predictknn.csv",row.names = F)
#Logistic Regression
set.seed(100)
fit.glm = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "glm",
metric = metric, trControl = control)
predict.glm = predict(fit.glm, test_set)
predictdfglm = data.frame(PassengerId = PassengerId,Survived=predict.glm)
write.csv(predictdfglm, "predictglm.csv",row.names = F)
#LDA
set.seed(100)
fit.lda = train(Survived~Pclass + Sex + Age + Family,
data = training_set, method = "lda",
metric = metric, trControl = control)
predict.lda = predict(fit.lda, test_set)
predictdflda = data.frame(PassengerId = PassengerId,Survived=predict.lda)
write.csv(predictdflda, "predictlda.csv",row.names = F)
#All models together
predictdf = data.frame(PassengerId = PassengerID,
lda=predict.lda,
rf=predict.rf,
svm=predict.svm,
glm=predict.glm,
knn=predict.knn)
write.csv(predictdf, "predictall.csv",row.names = F)
#XGBoost
#Build an XGBoost Model
xgboost.model = xgb.cv(data = as.matrix(training_set[-1]),
label = as.numeric(training_set$Survived)-1,
objective = "binary:logistic",
eval_metric = "error",
nrounds = 1000,
eta = 0.1,
max_depth = 5,
min_child_weight = 1,
gamma = 0,
subsample = 0.9,
nthread = 4,
#colsample_bytree = 0.9,
early_stopping_rounds = 50,
nfold = 5,
seed = 100)
#Build an Artificial Neural Network
#Fitting ANN
# library(h2o)
# h2o.init()
# install.packages("h2o")
# classifier = h2o.deeplearning(y = "Survived",
# training_frame = as.h2o(training_set),
# activation = "Rectifier",
# hidden = c(6,6),
# epochs = 100,
# train_samples_per_iteration = -2)
# prob_pred = h2o.predict(classifier, as.h2o(test_set))
# pred = prob_pred > 0.5
# pred = as.vector(pred) |
###################################################
# read data from Japanese Human Mortality Database
###################################################
read.jpn_death <- function(region, label = region)
{
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Deaths_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
mx <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."), TRUE)
close(con)
if(class(mx) == "try-error")
stop("Connection error at www.mortality.org. Please check username, password and country label.")
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Exposures_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
pop <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."), TRUE)
close(con)
if(class(pop) == "try-error")
stop("Exposures file not found at www.mortality.org")
return(list(Deaths = mx, pop = pop))
}
read.jpn <- function (region, label = region)
{
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Mx_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
mx <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."),
TRUE)
close(con)
if (class(mx) == "try-error")
stop("Connection error at www.mortality.org. Please check username, password and country label.")
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Exposures_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
pop <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."),
TRUE)
close(con)
if (class(pop) == "try-error")
stop("Exposures file not found at www.mortality.org")
obj <- list(type = "mortality", label = label, lambda = 0)
obj$year <- sort(unique(mx[, 1]))
n <- length(obj$year)
m <- length(unique(mx[, 2]))
obj$age <- mx[1:m, 2]
mnames <- names(mx)[-c(1, 2)]
n.mort <- length(mnames)
obj$rate <- obj$pop <- list()
for (i in 1:n.mort) {
obj$rate[[i]] <- matrix(mx[, i + 2], nrow = m, ncol = n)
obj$rate[[i]][obj$rate[[i]] < 0] <- NA
obj$pop[[i]] <- matrix(pop[, i + 2], nrow = m, ncol = n)
obj$pop[[i]][obj$pop[[i]] < 0] <- NA
dimnames(obj$rate[[i]]) <- dimnames(obj$pop[[i]]) <- list(obj$age,
obj$year)
}
names(obj$pop) = (names(obj$rate) <- tolower(mnames))
obj$age <- as.numeric(as.character(obj$age))
if (is.na(obj$age[m]))
obj$age[m] <- 2 * obj$age[m - 1] - obj$age[m - 2]
return(structure(obj, class = "demogdata"))
}
state = c("Japan", "Hokkaido", "Aomori", "Iwate", "Miyagi", "Akita", "Yamagata", "Fukushima",
"Ibaraki", "Tochigi", "Gunma", "Saitama", "Chiba", "Tokyo", "Kanagawa", "Niigata",
"Toyama", "Ishikawa", "Fukui", "Yamanashi", "Nagano", "Gifu", "Shizuoka", "Aichi",
"Mie", "Shiga", "Kyoto", "Osaka", "Hyogo", "Nara", "Wakayama", "Tottori", "Shimane",
"Okayama", "Hiroshima", "Yamaguchi", "Tokushima", "Kagawa", "Ehime", "Kochi",
"Fukuoka", "Saga", "Nagasaki", "Kumamoto", "Oita", "Miyazaki", "Kagoshima", "Okinawa")
state_smooth = c("Japan_smooth", "Hokkaido_smooth", "Aomori_smooth", "Iwate_smooth",
"Miyagi_smooth", "Akita_smooth", "Yamagata_smooth", "Fukushima_smooth",
"Ibaraki_smooth", "Tochigi_smooth", "Gunma_smooth", "Saitama_smooth",
"Chiba_smooth", "Tokyo_smooth", "Kanagawa_smooth", "Niigata_smooth",
"Toyama_smooth", "Ishikawa_smooth", "Fukui_smooth", "Yamanashi_smooth",
"Nagano_smooth", "Gifu_smooth", "Shizuoka_smooth", "Aichi_smooth",
"Mie_smooth", "Shiga_smooth", "Kyoto_smooth", "Osaka_smooth", "Hyogo_smooth",
"Nara_smooth", "Wakayama_smooth", "Tottori_smooth", "Shimane_smooth",
"Okayama_smooth", "Hiroshima_smooth", "Yamaguchi_smooth", "Tokushima_smooth",
"Kagawa_smooth", "Ehime_smooth", "Kochi_smooth", "Fukuoka_smooth", "Saga_smooth",
"Nagasaki_smooth", "Kumamoto_smooth", "Oita_smooth", "Miyazaki_smooth",
"Kagoshima_smooth", "Okinawa_smooth")
#################################################
# full raw data (1975 to 2013) for ages 0 to 100
#################################################
#######################################
# precise death counts (no repetition)
######################################
dum = read.jpn_death("00", "Japan")$Deaths
Japan_count_F = matrix(dum[3109:7770,3], nrow=111)
Japan_count_M = matrix(dum[3109:7770,4], nrow=111)
Japan_count_T = matrix(dum[3109:7770,5], nrow=111)
Japan_count_female = rbind(Japan_count_F[1:100,], colSums(Japan_count_F[101:111,]))
Japan_count_male = rbind(Japan_count_M[1:100,], colSums(Japan_count_M[101:111,]))
Japan_count_total = rbind(Japan_count_T[1:100,], colSums(Japan_count_T[101:111,]))
rm(dum)
# Hokkaido
dum = read.jpn_death("01", "Hokkaido")$Deaths
Hokkaido_count_F = matrix(dum[,3], nrow=111)
Hokkaido_count_M = matrix(dum[,4], nrow=111)
Hokkaido_count_T = matrix(dum[,5], nrow=111)
Hokkaido_count_female = rbind(Hokkaido_count_F[1:100,], colSums(Hokkaido_count_F[101:111,]))
Hokkaido_count_male = rbind(Hokkaido_count_M[1:100,], colSums(Hokkaido_count_M[101:111,]))
Hokkaido_count_total = rbind(Hokkaido_count_T[1:100,], colSums(Hokkaido_count_T[101:111,]))
rm(dum)
# Aomori
dum = read.jpn_death("02", "Aomori")$Deaths
Aomori_count_F = matrix(dum[,3], nrow=111)
Aomori_count_M = matrix(dum[,4], nrow=111)
Aomori_count_T = matrix(dum[,5], nrow=111)
Aomori_count_female = rbind(Aomori_count_F[1:100,], colSums(Aomori_count_F[101:111,]))
Aomori_count_male = rbind(Aomori_count_M[1:100,], colSums(Aomori_count_M[101:111,]))
Aomori_count_total = rbind(Aomori_count_T[1:100,], colSums(Aomori_count_T[101:111,]))
rm(dum)
# Iwate
dum = read.jpn_death("03", "Iwate")$Deaths
Iwate_count_F = matrix(dum[,3], nrow=111)
Iwate_count_M = matrix(dum[,4], nrow=111)
Iwate_count_T = matrix(dum[,5], nrow=111)
Iwate_count_female = rbind(Iwate_count_F[1:100,], colSums(Iwate_count_F[101:111,]))
Iwate_count_male = rbind(Iwate_count_M[1:100,], colSums(Iwate_count_M[101:111,]))
Iwate_count_total = rbind(Iwate_count_T[1:100,], colSums(Iwate_count_T[101:111,]))
rm(dum)
# Miyagi
dum = read.jpn_death("04", "Miyagi")$Deaths
Miyagi_count_F = matrix(dum[,3], nrow=111)
Miyagi_count_M = matrix(dum[,4], nrow=111)
Miyagi_count_T = matrix(dum[,5], nrow=111)
Miyagi_count_female = rbind(Miyagi_count_F[1:100,], colSums(Miyagi_count_F[101:111,]))
Miyagi_count_male = rbind(Miyagi_count_M[1:100,], colSums(Miyagi_count_M[101:111,]))
Miyagi_count_total = rbind(Miyagi_count_T[1:100,], colSums(Miyagi_count_T[101:111,]))
rm(dum)
# Akita
dum = read.jpn_death("05", "Akita")$Deaths
Akita_count_F = matrix(dum[,3], nrow=111)
Akita_count_M = matrix(dum[,4], nrow=111)
Akita_count_T = matrix(dum[,5], nrow=111)
Akita_count_female = rbind(Akita_count_F[1:100,], colSums(Akita_count_F[101:111,]))
Akita_count_male = rbind(Akita_count_M[1:100,], colSums(Akita_count_M[101:111,]))
Akita_count_total = rbind(Akita_count_T[1:100,], colSums(Akita_count_T[101:111,]))
rm(dum)
# Yamagata
dum = read.jpn_death("06", "Yamagata")$Deaths
Yamagata_count_F = matrix(dum[,3], nrow=111)
Yamagata_count_M = matrix(dum[,4], nrow=111)
Yamagata_count_T = matrix(dum[,5], nrow=111)
Yamagata_count_female = rbind(Yamagata_count_F[1:100,], colSums(Yamagata_count_F[101:111,]))
Yamagata_count_male = rbind(Yamagata_count_M[1:100,], colSums(Yamagata_count_M[101:111,]))
Yamagata_count_total = rbind(Yamagata_count_T[1:100,], colSums(Yamagata_count_T[101:111,]))
rm(dum)
# Fukushima
dum = read.jpn_death("07", "Fukushima")$Deaths
Fukushima_count_F = matrix(dum[,3], nrow=111)
Fukushima_count_M = matrix(dum[,4], nrow=111)
Fukushima_count_T = matrix(dum[,5], nrow=111)
Fukushima_count_female = rbind(Fukushima_count_F[1:100,], colSums(Fukushima_count_F[101:111,]))
Fukushima_count_male = rbind(Fukushima_count_M[1:100,], colSums(Fukushima_count_M[101:111,]))
Fukushima_count_total = rbind(Fukushima_count_T[1:100,], colSums(Fukushima_count_T[101:111,]))
rm(dum)
# Ibaraki
dum = read.jpn_death("08", "Ibaraki")$Deaths
Ibaraki_count_F = matrix(dum[,3], nrow=111)
Ibaraki_count_M = matrix(dum[,4], nrow=111)
Ibaraki_count_T = matrix(dum[,5], nrow=111)
Ibaraki_count_female = rbind(Ibaraki_count_F[1:100,], colSums(Ibaraki_count_F[101:111,]))
Ibaraki_count_male = rbind(Ibaraki_count_M[1:100,], colSums(Ibaraki_count_M[101:111,]))
Ibaraki_count_total = rbind(Ibaraki_count_T[1:100,], colSums(Ibaraki_count_T[101:111,]))
rm(dum)
# Tochigi
dum = read.jpn_death("09", "Tochigi")$Deaths
Tochigi_count_F = matrix(dum[,3], nrow=111)
Tochigi_count_M = matrix(dum[,4], nrow=111)
Tochigi_count_T = matrix(dum[,5], nrow=111)
Tochigi_count_female = rbind(Tochigi_count_F[1:100,], colSums(Tochigi_count_F[101:111,]))
Tochigi_count_male = rbind(Tochigi_count_M[1:100,], colSums(Tochigi_count_M[101:111,]))
Tochigi_count_total = rbind(Tochigi_count_T[1:100,], colSums(Tochigi_count_T[101:111,]))
rm(dum)
# Gunma
dum = read.jpn_death("10", "Gunma")$Deaths
Gunma_count_F = matrix(dum[,3], nrow=111)
Gunma_count_M = matrix(dum[,4], nrow=111)
Gunma_count_T = matrix(dum[,5], nrow=111)
Gunma_count_female = rbind(Gunma_count_F[1:100,], colSums(Gunma_count_F[101:111,]))
Gunma_count_male = rbind(Gunma_count_M[1:100,], colSums(Gunma_count_M[101:111,]))
Gunma_count_total = rbind(Gunma_count_T[1:100,], colSums(Gunma_count_T[101:111,]))
rm(dum)
# Saitama
dum = read.jpn_death("11", "Saitama")$Deaths
Saitama_count_F = matrix(dum[,3], nrow=111)
Saitama_count_M = matrix(dum[,4], nrow=111)
Saitama_count_T = matrix(dum[,5], nrow=111)
Saitama_count_female = rbind(Saitama_count_F[1:100,], colSums(Saitama_count_F[101:111,]))
Saitama_count_male = rbind(Saitama_count_M[1:100,], colSums(Saitama_count_M[101:111,]))
Saitama_count_total = rbind(Saitama_count_T[1:100,], colSums(Saitama_count_T[101:111,]))
rm(dum)
# Chiba
dum = read.jpn_death("12", "Chiba")$Deaths
Chiba_count_F = matrix(dum[,3], nrow=111)
Chiba_count_M = matrix(dum[,4], nrow=111)
Chiba_count_T = matrix(dum[,5], nrow=111)
Chiba_count_female = rbind(Chiba_count_F[1:100,], colSums(Chiba_count_F[101:111,]))
Chiba_count_male = rbind(Chiba_count_M[1:100,], colSums(Chiba_count_M[101:111,]))
Chiba_count_total = rbind(Chiba_count_T[1:100,], colSums(Chiba_count_T[101:111,]))
rm(dum)
# Tokyo
dum = read.jpn_death("13", "Tokyo")$Deaths
Tokyo_count_F = matrix(dum[,3], nrow=111)
Tokyo_count_M = matrix(dum[,4], nrow=111)
Tokyo_count_T = matrix(dum[,5], nrow=111)
Tokyo_count_female = rbind(Tokyo_count_F[1:100,], colSums(Tokyo_count_F[101:111,]))
Tokyo_count_male = rbind(Tokyo_count_M[1:100,], colSums(Tokyo_count_M[101:111,]))
Tokyo_count_total = rbind(Tokyo_count_T[1:100,], colSums(Tokyo_count_T[101:111,]))
rm(dum)
# Kanagawa
dum = read.jpn_death("14", "Kanagawa")$Deaths
Kanagawa_count_F = matrix(dum[,3], nrow=111)
Kanagawa_count_M = matrix(dum[,4], nrow=111)
Kanagawa_count_T = matrix(dum[,5], nrow=111)
Kanagawa_count_female = rbind(Kanagawa_count_F[1:100,], colSums(Kanagawa_count_F[101:111,]))
Kanagawa_count_male = rbind(Kanagawa_count_M[1:100,], colSums(Kanagawa_count_M[101:111,]))
Kanagawa_count_total = rbind(Kanagawa_count_T[1:100,], colSums(Kanagawa_count_T[101:111,]))
rm(dum)
# Niigata
dum = read.jpn_death("15", "Niigata")$Deaths
Niigata_count_F = matrix(dum[,3], nrow=111)
Niigata_count_M = matrix(dum[,4], nrow=111)
Niigata_count_T = matrix(dum[,5], nrow=111)
Niigata_count_female = rbind(Niigata_count_F[1:100,], colSums(Niigata_count_F[101:111,]))
Niigata_count_male = rbind(Niigata_count_M[1:100,], colSums(Niigata_count_M[101:111,]))
Niigata_count_total = rbind(Niigata_count_T[1:100,], colSums(Niigata_count_T[101:111,]))
rm(dum)
# Toyama
dum = read.jpn_death("16", "Toyama")$Deaths
Toyama_count_F = matrix(dum[,3], nrow=111)
Toyama_count_M = matrix(dum[,4], nrow=111)
Toyama_count_T = matrix(dum[,5], nrow=111)
Toyama_count_female = rbind(Toyama_count_F[1:100,], colSums(Toyama_count_F[101:111,]))
Toyama_count_male = rbind(Toyama_count_M[1:100,], colSums(Toyama_count_M[101:111,]))
Toyama_count_total = rbind(Toyama_count_T[1:100,], colSums(Toyama_count_T[101:111,]))
rm(dum)
# Ishikawa
dum = read.jpn_death("17", "Ishikawa")$Deaths
Ishikawa_count_F = matrix(dum[,3], nrow=111)
Ishikawa_count_M = matrix(dum[,4], nrow=111)
Ishikawa_count_T = matrix(dum[,5], nrow=111)
Ishikawa_count_female = rbind(Ishikawa_count_F[1:100,], colSums(Ishikawa_count_F[101:111,]))
Ishikawa_count_male = rbind(Ishikawa_count_M[1:100,], colSums(Ishikawa_count_M[101:111,]))
Ishikawa_count_total = rbind(Ishikawa_count_T[1:100,], colSums(Ishikawa_count_T[101:111,]))
rm(dum)
# Fukui
dum = read.jpn_death("18", "Fukui")$Deaths
Fukui_count_F = matrix(dum[,3], nrow=111)
Fukui_count_M = matrix(dum[,4], nrow=111)
Fukui_count_T = matrix(dum[,5], nrow=111)
Fukui_count_female = rbind(Fukui_count_F[1:100,], colSums(Fukui_count_F[101:111,]))
Fukui_count_male = rbind(Fukui_count_M[1:100,], colSums(Fukui_count_M[101:111,]))
Fukui_count_total = rbind(Fukui_count_T[1:100,], colSums(Fukui_count_T[101:111,]))
rm(dum)
# Yamanashi
dum = read.jpn_death("19", "Yamanashi")$Deaths
Yamanashi_count_F = matrix(dum[,3], nrow=111)
Yamanashi_count_M = matrix(dum[,4], nrow=111)
Yamanashi_count_T = matrix(dum[,5], nrow=111)
Yamanashi_count_female = rbind(Yamanashi_count_F[1:100,], colSums(Yamanashi_count_F[101:111,]))
Yamanashi_count_male = rbind(Yamanashi_count_M[1:100,], colSums(Yamanashi_count_M[101:111,]))
Yamanashi_count_total = rbind(Yamanashi_count_T[1:100,], colSums(Yamanashi_count_T[101:111,]))
rm(dum)
# Nagano
dum = read.jpn_death("20", "Nagano")$Deaths
Nagano_count_F = matrix(dum[,3], nrow=111)
Nagano_count_M = matrix(dum[,4], nrow=111)
Nagano_count_T = matrix(dum[,5], nrow=111)
Nagano_count_female = rbind(Nagano_count_F[1:100,], colSums(Nagano_count_F[101:111,]))
Nagano_count_male = rbind(Nagano_count_M[1:100,], colSums(Nagano_count_M[101:111,]))
Nagano_count_total = rbind(Nagano_count_T[1:100,], colSums(Nagano_count_T[101:111,]))
rm(dum)
# Gifu
dum = read.jpn_death("21", "Gifu")$Deaths
Gifu_count_F = matrix(dum[,3], nrow=111)
Gifu_count_M = matrix(dum[,4], nrow=111)
Gifu_count_T = matrix(dum[,5], nrow=111)
Gifu_count_female = rbind(Gifu_count_F[1:100,], colSums(Gifu_count_F[101:111,]))
Gifu_count_male = rbind(Gifu_count_M[1:100,], colSums(Gifu_count_M[101:111,]))
Gifu_count_total = rbind(Gifu_count_T[1:100,], colSums(Gifu_count_T[101:111,]))
rm(dum)
# Shizuoka
dum = read.jpn_death("22", "Shizuoka")$Deaths
Shizuoka_count_F = matrix(dum[,3], nrow=111)
Shizuoka_count_M = matrix(dum[,4], nrow=111)
Shizuoka_count_T = matrix(dum[,5], nrow=111)
Shizuoka_count_female = rbind(Shizuoka_count_F[1:100,], colSums(Shizuoka_count_F[101:111,]))
Shizuoka_count_male = rbind(Shizuoka_count_M[1:100,], colSums(Shizuoka_count_M[101:111,]))
Shizuoka_count_total = rbind(Shizuoka_count_T[1:100,], colSums(Shizuoka_count_T[101:111,]))
rm(dum)
# Aichi
dum = read.jpn_death("23", "Aichi")$Deaths
Aichi_count_F = matrix(dum[,3], nrow=111)
Aichi_count_M = matrix(dum[,4], nrow=111)
Aichi_count_T = matrix(dum[,5], nrow=111)
Aichi_count_female = rbind(Aichi_count_F[1:100,], colSums(Aichi_count_F[101:111,]))
Aichi_count_male = rbind(Aichi_count_M[1:100,], colSums(Aichi_count_M[101:111,]))
Aichi_count_total = rbind(Aichi_count_T[1:100,], colSums(Aichi_count_T[101:111,]))
rm(dum)
# Mie
dum = read.jpn_death("24", "Mie")$Deaths
Mie_count_F = matrix(dum[,3], nrow=111)
Mie_count_M = matrix(dum[,4], nrow=111)
Mie_count_T = matrix(dum[,5], nrow=111)
Mie_count_female = rbind(Mie_count_F[1:100,], colSums(Mie_count_F[101:111,]))
Mie_count_male = rbind(Mie_count_M[1:100,], colSums(Mie_count_M[101:111,]))
Mie_count_total = rbind(Mie_count_T[1:100,], colSums(Mie_count_T[101:111,]))
rm(dum)
# Shiga
dum = read.jpn_death("25", "Shiga")$Deaths
Shiga_count_F = matrix(dum[,3], nrow=111)
Shiga_count_M = matrix(dum[,4], nrow=111)
Shiga_count_T = matrix(dum[,5], nrow=111)
Shiga_count_female = rbind(Shiga_count_F[1:100,], colSums(Shiga_count_F[101:111,]))
Shiga_count_male = rbind(Shiga_count_M[1:100,], colSums(Shiga_count_M[101:111,]))
Shiga_count_total = rbind(Shiga_count_T[1:100,], colSums(Shiga_count_T[101:111,]))
rm(dum)
# Kyoto
dum = read.jpn_death("26", "Kyoto")$Deaths
Kyoto_count_F = matrix(dum[,3], nrow=111)
Kyoto_count_M = matrix(dum[,4], nrow=111)
Kyoto_count_T = matrix(dum[,5], nrow=111)
Kyoto_count_female = rbind(Kyoto_count_F[1:100,], colSums(Kyoto_count_F[101:111,]))
Kyoto_count_male = rbind(Kyoto_count_M[1:100,], colSums(Kyoto_count_M[101:111,]))
Kyoto_count_total = rbind(Kyoto_count_T[1:100,], colSums(Kyoto_count_T[101:111,]))
rm(dum)
# Osaka
dum = read.jpn_death("27", "Osaka")$Deaths
Osaka_count_F = matrix(dum[,3], nrow=111)
Osaka_count_M = matrix(dum[,4], nrow=111)
Osaka_count_T = matrix(dum[,5], nrow=111)
Osaka_count_female = rbind(Osaka_count_F[1:100,], colSums(Osaka_count_F[101:111,]))
Osaka_count_male = rbind(Osaka_count_M[1:100,], colSums(Osaka_count_M[101:111,]))
Osaka_count_total = rbind(Osaka_count_T[1:100,], colSums(Osaka_count_T[101:111,]))
rm(dum)
# Hyogo
dum = read.jpn_death("28", "Hyogo")$Deaths
Hyogo_count_F = matrix(dum[,3], nrow=111)
Hyogo_count_M = matrix(dum[,4], nrow=111)
Hyogo_count_T = matrix(dum[,5], nrow=111)
Hyogo_count_female = rbind(Hyogo_count_F[1:100,], colSums(Hyogo_count_F[101:111,]))
Hyogo_count_male = rbind(Hyogo_count_M[1:100,], colSums(Hyogo_count_M[101:111,]))
Hyogo_count_total = rbind(Hyogo_count_T[1:100,], colSums(Hyogo_count_T[101:111,]))
rm(dum)
# Nara
dum = read.jpn_death("29", "Nara")$Deaths
Nara_count_F = matrix(dum[,3], nrow=111)
Nara_count_M = matrix(dum[,4], nrow=111)
Nara_count_T = matrix(dum[,5], nrow=111)
Nara_count_female = rbind(Nara_count_F[1:100,], colSums(Nara_count_F[101:111,]))
Nara_count_male = rbind(Nara_count_M[1:100,], colSums(Nara_count_M[101:111,]))
Nara_count_total = rbind(Nara_count_T[1:100,], colSums(Nara_count_T[101:111,]))
rm(dum)
# Wakayama
dum = read.jpn_death("30", "Wakayama")$Deaths
Wakayama_count_F = matrix(dum[,3], nrow=111)
Wakayama_count_M = matrix(dum[,4], nrow=111)
Wakayama_count_T = matrix(dum[,5], nrow=111)
Wakayama_count_female = rbind(Wakayama_count_F[1:100,], colSums(Wakayama_count_F[101:111,]))
Wakayama_count_male = rbind(Wakayama_count_M[1:100,], colSums(Wakayama_count_M[101:111,]))
Wakayama_count_total = rbind(Wakayama_count_T[1:100,], colSums(Wakayama_count_T[101:111,]))
rm(dum)
# Tottori
dum = read.jpn_death("31", "Tottori")$Deaths
Tottori_count_F = matrix(dum[,3], nrow=111)
Tottori_count_M = matrix(dum[,4], nrow=111)
Tottori_count_T = matrix(dum[,5], nrow=111)
Tottori_count_female = rbind(Tottori_count_F[1:100,], colSums(Tottori_count_F[101:111,]))
Tottori_count_male = rbind(Tottori_count_M[1:100,], colSums(Tottori_count_M[101:111,]))
Tottori_count_total = rbind(Tottori_count_T[1:100,], colSums(Tottori_count_T[101:111,]))
rm(dum)
# Shimane
dum = read.jpn_death("32", "Shimane")$Deaths
Shimane_count_F = matrix(dum[,3], nrow=111)
Shimane_count_M = matrix(dum[,4], nrow=111)
Shimane_count_T = matrix(dum[,5], nrow=111)
Shimane_count_female = rbind(Shimane_count_F[1:100,], colSums(Shimane_count_F[101:111,]))
Shimane_count_male = rbind(Shimane_count_M[1:100,], colSums(Shimane_count_M[101:111,]))
Shimane_count_total = rbind(Shimane_count_T[1:100,], colSums(Shimane_count_T[101:111,]))
rm(dum)
# Okayama
dum = read.jpn_death("33", "Okayama")$Deaths
Okayama_count_F = matrix(dum[,3], nrow=111)
Okayama_count_M = matrix(dum[,4], nrow=111)
Okayama_count_T = matrix(dum[,5], nrow=111)
Okayama_count_female = rbind(Okayama_count_F[1:100,], colSums(Okayama_count_F[101:111,]))
Okayama_count_male = rbind(Okayama_count_M[1:100,], colSums(Okayama_count_M[101:111,]))
Okayama_count_total = rbind(Okayama_count_T[1:100,], colSums(Okayama_count_T[101:111,]))
rm(dum)
# Hiroshima
dum = read.jpn_death("34", "Hiroshima")$Deaths
Hiroshima_count_F = matrix(dum[,3], nrow=111)
Hiroshima_count_M = matrix(dum[,4], nrow=111)
Hiroshima_count_T = matrix(dum[,5], nrow=111)
Hiroshima_count_female = rbind(Hiroshima_count_F[1:100,], colSums(Hiroshima_count_F[101:111,]))
Hiroshima_count_male = rbind(Hiroshima_count_M[1:100,], colSums(Hiroshima_count_M[101:111,]))
Hiroshima_count_total = rbind(Hiroshima_count_T[1:100,], colSums(Hiroshima_count_T[101:111,]))
rm(dum)
# Yamaguchi
dum = read.jpn_death("35", "Yamaguchi")$Deaths
Yamaguchi_count_F = matrix(dum[,3], nrow=111)
Yamaguchi_count_M = matrix(dum[,4], nrow=111)
Yamaguchi_count_T = matrix(dum[,5], nrow=111)
Yamaguchi_count_female = rbind(Yamaguchi_count_F[1:100,], colSums(Yamaguchi_count_F[101:111,]))
Yamaguchi_count_male = rbind(Yamaguchi_count_M[1:100,], colSums(Yamaguchi_count_M[101:111,]))
Yamaguchi_count_total = rbind(Yamaguchi_count_T[1:100,], colSums(Yamaguchi_count_T[101:111,]))
rm(dum)
# Tokushima
dum = read.jpn_death("36", "Tokushima")$Deaths
Tokushima_count_F = matrix(dum[,3], nrow=111)
Tokushima_count_M = matrix(dum[,4], nrow=111)
Tokushima_count_T = matrix(dum[,5], nrow=111)
Tokushima_count_female = rbind(Tokushima_count_F[1:100,], colSums(Tokushima_count_F[101:111,]))
Tokushima_count_male = rbind(Tokushima_count_M[1:100,], colSums(Tokushima_count_M[101:111,]))
Tokushima_count_total = rbind(Tokushima_count_T[1:100,], colSums(Tokushima_count_T[101:111,]))
rm(dum)
# Kagawa
dum = read.jpn_death("37", "Kagawa")$Deaths
Kagawa_count_F = matrix(dum[,3], nrow=111)
Kagawa_count_M = matrix(dum[,4], nrow=111)
Kagawa_count_T = matrix(dum[,5], nrow=111)
Kagawa_count_female = rbind(Kagawa_count_F[1:100,], colSums(Kagawa_count_F[101:111,]))
Kagawa_count_male = rbind(Kagawa_count_M[1:100,], colSums(Kagawa_count_M[101:111,]))
Kagawa_count_total = rbind(Kagawa_count_T[1:100,], colSums(Kagawa_count_T[101:111,]))
rm(dum)
# Ehime
dum = read.jpn_death("38", "Ehime")$Deaths
Ehime_count_F = matrix(dum[,3], nrow=111)
Ehime_count_M = matrix(dum[,4], nrow=111)
Ehime_count_T = matrix(dum[,5], nrow=111)
Ehime_count_female = rbind(Ehime_count_F[1:100,], colSums(Ehime_count_F[101:111,]))
Ehime_count_male = rbind(Ehime_count_M[1:100,], colSums(Ehime_count_M[101:111,]))
Ehime_count_total = rbind(Ehime_count_T[1:100,], colSums(Ehime_count_T[101:111,]))
rm(dum)
# Kochi
dum = read.jpn_death("39", "Kochi")$Deaths
Kochi_count_F = matrix(dum[,3], nrow=111)
Kochi_count_M = matrix(dum[,4], nrow=111)
Kochi_count_T = matrix(dum[,5], nrow=111)
Kochi_count_female = rbind(Kochi_count_F[1:100,], colSums(Kochi_count_F[101:111,]))
Kochi_count_male = rbind(Kochi_count_M[1:100,], colSums(Kochi_count_M[101:111,]))
Kochi_count_total = rbind(Kochi_count_T[1:100,], colSums(Kochi_count_T[101:111,]))
rm(dum)
# Fukuoka
dum = read.jpn_death("40", "Fukuoka")$Deaths
Fukuoka_count_F = matrix(dum[,3], nrow=111)
Fukuoka_count_M = matrix(dum[,4], nrow=111)
Fukuoka_count_T = matrix(dum[,5], nrow=111)
Fukuoka_count_female = rbind(Fukuoka_count_F[1:100,], colSums(Fukuoka_count_F[101:111,]))
Fukuoka_count_male = rbind(Fukuoka_count_M[1:100,], colSums(Fukuoka_count_M[101:111,]))
Fukuoka_count_total = rbind(Fukuoka_count_T[1:100,], colSums(Fukuoka_count_T[101:111,]))
rm(dum)
# Saga
dum = read.jpn_death("41", "Saga")$Deaths
Saga_count_F = matrix(dum[,3], nrow=111)
Saga_count_M = matrix(dum[,4], nrow=111)
Saga_count_T = matrix(dum[,5], nrow=111)
Saga_count_female = rbind(Saga_count_F[1:100,], colSums(Saga_count_F[101:111,]))
Saga_count_male = rbind(Saga_count_M[1:100,], colSums(Saga_count_M[101:111,]))
Saga_count_total = rbind(Saga_count_T[1:100,], colSums(Saga_count_T[101:111,]))
rm(dum)
# Nagasaki
dum = read.jpn_death("42", "Nagasaki")$Deaths
Nagasaki_count_F = matrix(dum[,3], nrow=111)
Nagasaki_count_M = matrix(dum[,4], nrow=111)
Nagasaki_count_T = matrix(dum[,5], nrow=111)
Nagasaki_count_female = rbind(Nagasaki_count_F[1:100,], colSums(Nagasaki_count_F[101:111,]))
Nagasaki_count_male = rbind(Nagasaki_count_M[1:100,], colSums(Nagasaki_count_M[101:111,]))
Nagasaki_count_total = rbind(Nagasaki_count_T[1:100,], colSums(Nagasaki_count_T[101:111,]))
rm(dum)
# Kumamoto
dum = read.jpn_death("43", "Kumamoto")$Deaths
Kumamoto_count_F = matrix(dum[,3], nrow=111)
Kumamoto_count_M = matrix(dum[,4], nrow=111)
Kumamoto_count_T = matrix(dum[,5], nrow=111)
Kumamoto_count_female = rbind(Kumamoto_count_F[1:100,], colSums(Kumamoto_count_F[101:111,]))
Kumamoto_count_male = rbind(Kumamoto_count_M[1:100,], colSums(Kumamoto_count_M[101:111,]))
Kumamoto_count_total = rbind(Kumamoto_count_T[1:100,], colSums(Kumamoto_count_T[101:111,]))
rm(dum)
# Oita
dum = read.jpn_death("44", "Oita")$Deaths
Oita_count_F = matrix(dum[,3], nrow=111)
Oita_count_M = matrix(dum[,4], nrow=111)
Oita_count_T = matrix(dum[,5], nrow=111)
Oita_count_female = rbind(Oita_count_F[1:100,], colSums(Oita_count_F[101:111,]))
Oita_count_male = rbind(Oita_count_M[1:100,], colSums(Oita_count_M[101:111,]))
Oita_count_total = rbind(Oita_count_T[1:100,], colSums(Oita_count_T[101:111,]))
rm(dum)
# Miyazaki
dum = read.jpn_death("45", "Miyazaki")$Deaths
Miyazaki_count_F = matrix(dum[,3], nrow=111)
Miyazaki_count_M = matrix(dum[,4], nrow=111)
Miyazaki_count_T = matrix(dum[,5], nrow=111)
Miyazaki_count_female = rbind(Miyazaki_count_F[1:100,], colSums(Miyazaki_count_F[101:111,]))
Miyazaki_count_male = rbind(Miyazaki_count_M[1:100,], colSums(Miyazaki_count_M[101:111,]))
Miyazaki_count_total = rbind(Miyazaki_count_T[1:100,], colSums(Miyazaki_count_T[101:111,]))
rm(dum)
# Kagoshima
dum = read.jpn_death("46", "Kagoshima")$Deaths
Kagoshima_count_F = matrix(dum[,3], nrow=111)
Kagoshima_count_M = matrix(dum[,4], nrow=111)
Kagoshima_count_T = matrix(dum[,5], nrow=111)
Kagoshima_count_female = rbind(Kagoshima_count_F[1:100,], colSums(Kagoshima_count_F[101:111,]))
Kagoshima_count_male = rbind(Kagoshima_count_M[1:100,], colSums(Kagoshima_count_M[101:111,]))
Kagoshima_count_total = rbind(Kagoshima_count_T[1:100,], colSums(Kagoshima_count_T[101:111,]))
rm(dum)
# Okinawa
dum = read.jpn_death("47", "Okinawa")$Deaths
Okinawa_count_F = matrix(dum[,3], nrow=111)
Okinawa_count_M = matrix(dum[,4], nrow=111)
Okinawa_count_T = matrix(dum[,5], nrow=111)
Okinawa_count_female = rbind(Okinawa_count_F[1:100,], colSums(Okinawa_count_F[101:111,]))
Okinawa_count_male = rbind(Okinawa_count_M[1:100,], colSums(Okinawa_count_M[101:111,]))
Okinawa_count_total = rbind(Okinawa_count_T[1:100,], colSums(Okinawa_count_T[101:111,]))
rm(dum)
# rate
Japan = extract.years(extract.ages(read.jpn("00", "Japan"), 0:100), 1975:2018)
Hokkaido = extract.ages(read.jpn("01", "Hokkaido"), 0:100)
Aomori = extract.ages(read.jpn("02", "Aomori"), 0:100)
Iwate = extract.ages(read.jpn("03", "Iwate"), 0:100)
Miyagi = extract.ages(read.jpn("04", "Miyagi"), 0:100)
Akita = extract.ages(read.jpn("05", "Akita"), 0:100)
Yamagata = extract.ages(read.jpn("06", "Yamagata"), 0:100)
Fukushima = extract.ages(read.jpn("07", "Fukushima"), 0:100)
Ibaraki = extract.ages(read.jpn("08", "Ibaraki"), 0:100)
Tochigi = extract.ages(read.jpn("09", "Tochigi"), 0:100)
Gunma = extract.ages(read.jpn("10", "Gunma"), 0:100)
Saitama = extract.ages(read.jpn("11", "Saitama"), 0:100)
Chiba = extract.ages(read.jpn("12", "Chiba"), 0:100)
Tokyo = extract.ages(read.jpn("13", "Tokyo"), 0:100)
Kanagawa = extract.ages(read.jpn("14", "Kanagawa"), 0:100)
Niigata = extract.ages(read.jpn("15", "Niigata"), 0:100)
Toyama = extract.ages(read.jpn("16", "Toyama"), 0:100)
Ishikawa = extract.ages(read.jpn("17", "Ishikawa"), 0:100)
Fukui = extract.ages(read.jpn("18", "Fukui"), 0:100)
Yamanashi = extract.ages(read.jpn("19", "Yamanashi"), 0:100)
Nagano = extract.ages(read.jpn("20", "Nagano"), 0:100)
Gifu = extract.ages(read.jpn("21", "Gifu"), 0:100)
Shizuoka = extract.ages(read.jpn("22", "Shizuoka"), 0:100)
Aichi = extract.ages(read.jpn("23", "Aichi"), 0:100)
Mie = extract.ages(read.jpn("24", "Mie"), 0:100)
Shiga = extract.ages(read.jpn("25", "Shiga"), 0:100)
Kyoto = extract.ages(read.jpn("26", "Kyoto"), 0:100)
Osaka = extract.ages(read.jpn("27", "Osaka"), 0:100)
Hyogo = extract.ages(read.jpn("28", "Hyogo"), 0:100)
Nara = extract.ages(read.jpn("29", "Nara"), 0:100)
Wakayama = extract.ages(read.jpn("30", "Wakayama"), 0:100)
Tottori = extract.ages(read.jpn("31", "Tottori"), 0:100)
Shimane = extract.ages(read.jpn("32", "Shimane"), 0:100)
Okayama = extract.ages(read.jpn("33", "Okayama"), 0:100)
Hiroshima = extract.ages(read.jpn("34", "Hiroshima"), 0:100)
Yamaguchi = extract.ages(read.jpn("35", "Yamaguchi"), 0:100)
Tokushima = extract.ages(read.jpn("36", "Tokushima"), 0:100)
Kagawa = extract.ages(read.jpn("37", "Kagawa"), 0:100)
Ehime = extract.ages(read.jpn("38", "Ehime"), 0:100)
Kochi = extract.ages(read.jpn("39", "Kochi"), 0:100)
Fukuoka = extract.ages(read.jpn("40", "Fukuoka"), 0:100)
Saga = extract.ages(read.jpn("41", "Saga"), 0:100)
Nagasaki = extract.ages(read.jpn("42", "Nagasaki"), 0:100)
Kumamoto = extract.ages(read.jpn("43", "Kumamoto"), 0:100)
Oita = extract.ages(read.jpn("44", "Oita"), 0:100)
Miyazaki = extract.ages(read.jpn("45", "Miyazaki"), 0:100)
Kagoshima = extract.ages(read.jpn("46", "Kagoshima"), 0:100)
Okinawa = extract.ages(read.jpn("47", "Okinawa"), 0:100)
# check if the last year is 2017 for all states
year_store = vector("numeric",47)
for(ik in 1:47)
{
year_store[ik] = tail(get(state[ik])$year, 1)
}
all(year_store == 2018)
# smoothed functional curves using penalized regression spline with monotonic constraint
for(ik in 1:48)
{
assign(state_smooth[ik], smooth.demogdata(get(state[ik])))
print(ik)
}
##############
# Image plots
##############
require(RColorBrewer)
# ratio between each prefecture and total
prefecture_total = prefecture_female = prefecture_male = array(, dim = c(101, 44, 47))
for(iw in 2:48)
{
gettotal <- get(state[iw])$rate$total
gettotal[gettotal==0] <- NA
getmale <- get(state[iw])$rate$male
getmale[getmale==0] <- NA
getfemale <- get(state[iw])$rate$female
getfemale[getfemale==0] <- NA
prefecture_total[,,iw-1] = log(gettotal/Japan$rate$total)
prefecture_female[,,iw-1] = log(getfemale/Japan$rate$female)
prefecture_male[,,iw-1] = log(getmale/Japan$rate$male)
}
| /1_Preparation of mortality data.R | no_license | hanshang/Taylor_law | R | false | false | 31,316 | r | ###################################################
# read data from Japanese Human Mortality Database
###################################################
read.jpn_death <- function(region, label = region)
{
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Deaths_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
mx <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."), TRUE)
close(con)
if(class(mx) == "try-error")
stop("Connection error at www.mortality.org. Please check username, password and country label.")
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Exposures_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
pop <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."), TRUE)
close(con)
if(class(pop) == "try-error")
stop("Exposures file not found at www.mortality.org")
return(list(Deaths = mx, pop = pop))
}
read.jpn <- function (region, label = region)
{
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Mx_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
mx <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."),
TRUE)
close(con)
if (class(mx) == "try-error")
stop("Connection error at www.mortality.org. Please check username, password and country label.")
path <- paste("http://www.ipss.go.jp/p-toukei/JMD/", region, "/STATS/", "Exposures_1x1.txt", sep = "")
txt <- RCurl::getURL(path)
con <- textConnection(txt)
pop <- try(read.table(con, skip = 2, header = TRUE, na.strings = "."),
TRUE)
close(con)
if (class(pop) == "try-error")
stop("Exposures file not found at www.mortality.org")
obj <- list(type = "mortality", label = label, lambda = 0)
obj$year <- sort(unique(mx[, 1]))
n <- length(obj$year)
m <- length(unique(mx[, 2]))
obj$age <- mx[1:m, 2]
mnames <- names(mx)[-c(1, 2)]
n.mort <- length(mnames)
obj$rate <- obj$pop <- list()
for (i in 1:n.mort) {
obj$rate[[i]] <- matrix(mx[, i + 2], nrow = m, ncol = n)
obj$rate[[i]][obj$rate[[i]] < 0] <- NA
obj$pop[[i]] <- matrix(pop[, i + 2], nrow = m, ncol = n)
obj$pop[[i]][obj$pop[[i]] < 0] <- NA
dimnames(obj$rate[[i]]) <- dimnames(obj$pop[[i]]) <- list(obj$age,
obj$year)
}
names(obj$pop) = (names(obj$rate) <- tolower(mnames))
obj$age <- as.numeric(as.character(obj$age))
if (is.na(obj$age[m]))
obj$age[m] <- 2 * obj$age[m - 1] - obj$age[m - 2]
return(structure(obj, class = "demogdata"))
}
state = c("Japan", "Hokkaido", "Aomori", "Iwate", "Miyagi", "Akita", "Yamagata", "Fukushima",
"Ibaraki", "Tochigi", "Gunma", "Saitama", "Chiba", "Tokyo", "Kanagawa", "Niigata",
"Toyama", "Ishikawa", "Fukui", "Yamanashi", "Nagano", "Gifu", "Shizuoka", "Aichi",
"Mie", "Shiga", "Kyoto", "Osaka", "Hyogo", "Nara", "Wakayama", "Tottori", "Shimane",
"Okayama", "Hiroshima", "Yamaguchi", "Tokushima", "Kagawa", "Ehime", "Kochi",
"Fukuoka", "Saga", "Nagasaki", "Kumamoto", "Oita", "Miyazaki", "Kagoshima", "Okinawa")
state_smooth = c("Japan_smooth", "Hokkaido_smooth", "Aomori_smooth", "Iwate_smooth",
"Miyagi_smooth", "Akita_smooth", "Yamagata_smooth", "Fukushima_smooth",
"Ibaraki_smooth", "Tochigi_smooth", "Gunma_smooth", "Saitama_smooth",
"Chiba_smooth", "Tokyo_smooth", "Kanagawa_smooth", "Niigata_smooth",
"Toyama_smooth", "Ishikawa_smooth", "Fukui_smooth", "Yamanashi_smooth",
"Nagano_smooth", "Gifu_smooth", "Shizuoka_smooth", "Aichi_smooth",
"Mie_smooth", "Shiga_smooth", "Kyoto_smooth", "Osaka_smooth", "Hyogo_smooth",
"Nara_smooth", "Wakayama_smooth", "Tottori_smooth", "Shimane_smooth",
"Okayama_smooth", "Hiroshima_smooth", "Yamaguchi_smooth", "Tokushima_smooth",
"Kagawa_smooth", "Ehime_smooth", "Kochi_smooth", "Fukuoka_smooth", "Saga_smooth",
"Nagasaki_smooth", "Kumamoto_smooth", "Oita_smooth", "Miyazaki_smooth",
"Kagoshima_smooth", "Okinawa_smooth")
#################################################
# full raw data (1975 to 2013) for ages 0 to 100
#################################################
#######################################
# precise death counts (no repetition)
######################################
dum = read.jpn_death("00", "Japan")$Deaths
Japan_count_F = matrix(dum[3109:7770,3], nrow=111)
Japan_count_M = matrix(dum[3109:7770,4], nrow=111)
Japan_count_T = matrix(dum[3109:7770,5], nrow=111)
Japan_count_female = rbind(Japan_count_F[1:100,], colSums(Japan_count_F[101:111,]))
Japan_count_male = rbind(Japan_count_M[1:100,], colSums(Japan_count_M[101:111,]))
Japan_count_total = rbind(Japan_count_T[1:100,], colSums(Japan_count_T[101:111,]))
rm(dum)
# Hokkaido
dum = read.jpn_death("01", "Hokkaido")$Deaths
Hokkaido_count_F = matrix(dum[,3], nrow=111)
Hokkaido_count_M = matrix(dum[,4], nrow=111)
Hokkaido_count_T = matrix(dum[,5], nrow=111)
Hokkaido_count_female = rbind(Hokkaido_count_F[1:100,], colSums(Hokkaido_count_F[101:111,]))
Hokkaido_count_male = rbind(Hokkaido_count_M[1:100,], colSums(Hokkaido_count_M[101:111,]))
Hokkaido_count_total = rbind(Hokkaido_count_T[1:100,], colSums(Hokkaido_count_T[101:111,]))
rm(dum)
# Aomori
dum = read.jpn_death("02", "Aomori")$Deaths
Aomori_count_F = matrix(dum[,3], nrow=111)
Aomori_count_M = matrix(dum[,4], nrow=111)
Aomori_count_T = matrix(dum[,5], nrow=111)
Aomori_count_female = rbind(Aomori_count_F[1:100,], colSums(Aomori_count_F[101:111,]))
Aomori_count_male = rbind(Aomori_count_M[1:100,], colSums(Aomori_count_M[101:111,]))
Aomori_count_total = rbind(Aomori_count_T[1:100,], colSums(Aomori_count_T[101:111,]))
rm(dum)
# Iwate
dum = read.jpn_death("03", "Iwate")$Deaths
Iwate_count_F = matrix(dum[,3], nrow=111)
Iwate_count_M = matrix(dum[,4], nrow=111)
Iwate_count_T = matrix(dum[,5], nrow=111)
Iwate_count_female = rbind(Iwate_count_F[1:100,], colSums(Iwate_count_F[101:111,]))
Iwate_count_male = rbind(Iwate_count_M[1:100,], colSums(Iwate_count_M[101:111,]))
Iwate_count_total = rbind(Iwate_count_T[1:100,], colSums(Iwate_count_T[101:111,]))
rm(dum)
# Miyagi
dum = read.jpn_death("04", "Miyagi")$Deaths
Miyagi_count_F = matrix(dum[,3], nrow=111)
Miyagi_count_M = matrix(dum[,4], nrow=111)
Miyagi_count_T = matrix(dum[,5], nrow=111)
Miyagi_count_female = rbind(Miyagi_count_F[1:100,], colSums(Miyagi_count_F[101:111,]))
Miyagi_count_male = rbind(Miyagi_count_M[1:100,], colSums(Miyagi_count_M[101:111,]))
Miyagi_count_total = rbind(Miyagi_count_T[1:100,], colSums(Miyagi_count_T[101:111,]))
rm(dum)
# Akita
dum = read.jpn_death("05", "Akita")$Deaths
Akita_count_F = matrix(dum[,3], nrow=111)
Akita_count_M = matrix(dum[,4], nrow=111)
Akita_count_T = matrix(dum[,5], nrow=111)
Akita_count_female = rbind(Akita_count_F[1:100,], colSums(Akita_count_F[101:111,]))
Akita_count_male = rbind(Akita_count_M[1:100,], colSums(Akita_count_M[101:111,]))
Akita_count_total = rbind(Akita_count_T[1:100,], colSums(Akita_count_T[101:111,]))
rm(dum)
# Yamagata
dum = read.jpn_death("06", "Yamagata")$Deaths
Yamagata_count_F = matrix(dum[,3], nrow=111)
Yamagata_count_M = matrix(dum[,4], nrow=111)
Yamagata_count_T = matrix(dum[,5], nrow=111)
Yamagata_count_female = rbind(Yamagata_count_F[1:100,], colSums(Yamagata_count_F[101:111,]))
Yamagata_count_male = rbind(Yamagata_count_M[1:100,], colSums(Yamagata_count_M[101:111,]))
Yamagata_count_total = rbind(Yamagata_count_T[1:100,], colSums(Yamagata_count_T[101:111,]))
rm(dum)
# Fukushima
dum = read.jpn_death("07", "Fukushima")$Deaths
Fukushima_count_F = matrix(dum[,3], nrow=111)
Fukushima_count_M = matrix(dum[,4], nrow=111)
Fukushima_count_T = matrix(dum[,5], nrow=111)
Fukushima_count_female = rbind(Fukushima_count_F[1:100,], colSums(Fukushima_count_F[101:111,]))
Fukushima_count_male = rbind(Fukushima_count_M[1:100,], colSums(Fukushima_count_M[101:111,]))
Fukushima_count_total = rbind(Fukushima_count_T[1:100,], colSums(Fukushima_count_T[101:111,]))
rm(dum)
# Ibaraki
dum = read.jpn_death("08", "Ibaraki")$Deaths
Ibaraki_count_F = matrix(dum[,3], nrow=111)
Ibaraki_count_M = matrix(dum[,4], nrow=111)
Ibaraki_count_T = matrix(dum[,5], nrow=111)
Ibaraki_count_female = rbind(Ibaraki_count_F[1:100,], colSums(Ibaraki_count_F[101:111,]))
Ibaraki_count_male = rbind(Ibaraki_count_M[1:100,], colSums(Ibaraki_count_M[101:111,]))
Ibaraki_count_total = rbind(Ibaraki_count_T[1:100,], colSums(Ibaraki_count_T[101:111,]))
rm(dum)
# Tochigi
dum = read.jpn_death("09", "Tochigi")$Deaths
Tochigi_count_F = matrix(dum[,3], nrow=111)
Tochigi_count_M = matrix(dum[,4], nrow=111)
Tochigi_count_T = matrix(dum[,5], nrow=111)
Tochigi_count_female = rbind(Tochigi_count_F[1:100,], colSums(Tochigi_count_F[101:111,]))
Tochigi_count_male = rbind(Tochigi_count_M[1:100,], colSums(Tochigi_count_M[101:111,]))
Tochigi_count_total = rbind(Tochigi_count_T[1:100,], colSums(Tochigi_count_T[101:111,]))
rm(dum)
# Gunma
dum = read.jpn_death("10", "Gunma")$Deaths
Gunma_count_F = matrix(dum[,3], nrow=111)
Gunma_count_M = matrix(dum[,4], nrow=111)
Gunma_count_T = matrix(dum[,5], nrow=111)
Gunma_count_female = rbind(Gunma_count_F[1:100,], colSums(Gunma_count_F[101:111,]))
Gunma_count_male = rbind(Gunma_count_M[1:100,], colSums(Gunma_count_M[101:111,]))
Gunma_count_total = rbind(Gunma_count_T[1:100,], colSums(Gunma_count_T[101:111,]))
rm(dum)
# Saitama
dum = read.jpn_death("11", "Saitama")$Deaths
Saitama_count_F = matrix(dum[,3], nrow=111)
Saitama_count_M = matrix(dum[,4], nrow=111)
Saitama_count_T = matrix(dum[,5], nrow=111)
Saitama_count_female = rbind(Saitama_count_F[1:100,], colSums(Saitama_count_F[101:111,]))
Saitama_count_male = rbind(Saitama_count_M[1:100,], colSums(Saitama_count_M[101:111,]))
Saitama_count_total = rbind(Saitama_count_T[1:100,], colSums(Saitama_count_T[101:111,]))
rm(dum)
# Chiba
dum = read.jpn_death("12", "Chiba")$Deaths
Chiba_count_F = matrix(dum[,3], nrow=111)
Chiba_count_M = matrix(dum[,4], nrow=111)
Chiba_count_T = matrix(dum[,5], nrow=111)
Chiba_count_female = rbind(Chiba_count_F[1:100,], colSums(Chiba_count_F[101:111,]))
Chiba_count_male = rbind(Chiba_count_M[1:100,], colSums(Chiba_count_M[101:111,]))
Chiba_count_total = rbind(Chiba_count_T[1:100,], colSums(Chiba_count_T[101:111,]))
rm(dum)
# Tokyo
dum = read.jpn_death("13", "Tokyo")$Deaths
Tokyo_count_F = matrix(dum[,3], nrow=111)
Tokyo_count_M = matrix(dum[,4], nrow=111)
Tokyo_count_T = matrix(dum[,5], nrow=111)
Tokyo_count_female = rbind(Tokyo_count_F[1:100,], colSums(Tokyo_count_F[101:111,]))
Tokyo_count_male = rbind(Tokyo_count_M[1:100,], colSums(Tokyo_count_M[101:111,]))
Tokyo_count_total = rbind(Tokyo_count_T[1:100,], colSums(Tokyo_count_T[101:111,]))
rm(dum)
# Kanagawa
dum = read.jpn_death("14", "Kanagawa")$Deaths
Kanagawa_count_F = matrix(dum[,3], nrow=111)
Kanagawa_count_M = matrix(dum[,4], nrow=111)
Kanagawa_count_T = matrix(dum[,5], nrow=111)
Kanagawa_count_female = rbind(Kanagawa_count_F[1:100,], colSums(Kanagawa_count_F[101:111,]))
Kanagawa_count_male = rbind(Kanagawa_count_M[1:100,], colSums(Kanagawa_count_M[101:111,]))
Kanagawa_count_total = rbind(Kanagawa_count_T[1:100,], colSums(Kanagawa_count_T[101:111,]))
rm(dum)
# Niigata
dum = read.jpn_death("15", "Niigata")$Deaths
Niigata_count_F = matrix(dum[,3], nrow=111)
Niigata_count_M = matrix(dum[,4], nrow=111)
Niigata_count_T = matrix(dum[,5], nrow=111)
Niigata_count_female = rbind(Niigata_count_F[1:100,], colSums(Niigata_count_F[101:111,]))
Niigata_count_male = rbind(Niigata_count_M[1:100,], colSums(Niigata_count_M[101:111,]))
Niigata_count_total = rbind(Niigata_count_T[1:100,], colSums(Niigata_count_T[101:111,]))
rm(dum)
# Toyama
dum = read.jpn_death("16", "Toyama")$Deaths
Toyama_count_F = matrix(dum[,3], nrow=111)
Toyama_count_M = matrix(dum[,4], nrow=111)
Toyama_count_T = matrix(dum[,5], nrow=111)
Toyama_count_female = rbind(Toyama_count_F[1:100,], colSums(Toyama_count_F[101:111,]))
Toyama_count_male = rbind(Toyama_count_M[1:100,], colSums(Toyama_count_M[101:111,]))
Toyama_count_total = rbind(Toyama_count_T[1:100,], colSums(Toyama_count_T[101:111,]))
rm(dum)
# Ishikawa
dum = read.jpn_death("17", "Ishikawa")$Deaths
Ishikawa_count_F = matrix(dum[,3], nrow=111)
Ishikawa_count_M = matrix(dum[,4], nrow=111)
Ishikawa_count_T = matrix(dum[,5], nrow=111)
Ishikawa_count_female = rbind(Ishikawa_count_F[1:100,], colSums(Ishikawa_count_F[101:111,]))
Ishikawa_count_male = rbind(Ishikawa_count_M[1:100,], colSums(Ishikawa_count_M[101:111,]))
Ishikawa_count_total = rbind(Ishikawa_count_T[1:100,], colSums(Ishikawa_count_T[101:111,]))
rm(dum)
# Fukui
dum = read.jpn_death("18", "Fukui")$Deaths
Fukui_count_F = matrix(dum[,3], nrow=111)
Fukui_count_M = matrix(dum[,4], nrow=111)
Fukui_count_T = matrix(dum[,5], nrow=111)
Fukui_count_female = rbind(Fukui_count_F[1:100,], colSums(Fukui_count_F[101:111,]))
Fukui_count_male = rbind(Fukui_count_M[1:100,], colSums(Fukui_count_M[101:111,]))
Fukui_count_total = rbind(Fukui_count_T[1:100,], colSums(Fukui_count_T[101:111,]))
rm(dum)
# Yamanashi
dum = read.jpn_death("19", "Yamanashi")$Deaths
Yamanashi_count_F = matrix(dum[,3], nrow=111)
Yamanashi_count_M = matrix(dum[,4], nrow=111)
Yamanashi_count_T = matrix(dum[,5], nrow=111)
Yamanashi_count_female = rbind(Yamanashi_count_F[1:100,], colSums(Yamanashi_count_F[101:111,]))
Yamanashi_count_male = rbind(Yamanashi_count_M[1:100,], colSums(Yamanashi_count_M[101:111,]))
Yamanashi_count_total = rbind(Yamanashi_count_T[1:100,], colSums(Yamanashi_count_T[101:111,]))
rm(dum)
# Nagano
dum = read.jpn_death("20", "Nagano")$Deaths
Nagano_count_F = matrix(dum[,3], nrow=111)
Nagano_count_M = matrix(dum[,4], nrow=111)
Nagano_count_T = matrix(dum[,5], nrow=111)
Nagano_count_female = rbind(Nagano_count_F[1:100,], colSums(Nagano_count_F[101:111,]))
Nagano_count_male = rbind(Nagano_count_M[1:100,], colSums(Nagano_count_M[101:111,]))
Nagano_count_total = rbind(Nagano_count_T[1:100,], colSums(Nagano_count_T[101:111,]))
rm(dum)
# Gifu
dum = read.jpn_death("21", "Gifu")$Deaths
Gifu_count_F = matrix(dum[,3], nrow=111)
Gifu_count_M = matrix(dum[,4], nrow=111)
Gifu_count_T = matrix(dum[,5], nrow=111)
Gifu_count_female = rbind(Gifu_count_F[1:100,], colSums(Gifu_count_F[101:111,]))
Gifu_count_male = rbind(Gifu_count_M[1:100,], colSums(Gifu_count_M[101:111,]))
Gifu_count_total = rbind(Gifu_count_T[1:100,], colSums(Gifu_count_T[101:111,]))
rm(dum)
# Shizuoka
dum = read.jpn_death("22", "Shizuoka")$Deaths
Shizuoka_count_F = matrix(dum[,3], nrow=111)
Shizuoka_count_M = matrix(dum[,4], nrow=111)
Shizuoka_count_T = matrix(dum[,5], nrow=111)
Shizuoka_count_female = rbind(Shizuoka_count_F[1:100,], colSums(Shizuoka_count_F[101:111,]))
Shizuoka_count_male = rbind(Shizuoka_count_M[1:100,], colSums(Shizuoka_count_M[101:111,]))
Shizuoka_count_total = rbind(Shizuoka_count_T[1:100,], colSums(Shizuoka_count_T[101:111,]))
rm(dum)
# Aichi
dum = read.jpn_death("23", "Aichi")$Deaths
Aichi_count_F = matrix(dum[,3], nrow=111)
Aichi_count_M = matrix(dum[,4], nrow=111)
Aichi_count_T = matrix(dum[,5], nrow=111)
Aichi_count_female = rbind(Aichi_count_F[1:100,], colSums(Aichi_count_F[101:111,]))
Aichi_count_male = rbind(Aichi_count_M[1:100,], colSums(Aichi_count_M[101:111,]))
Aichi_count_total = rbind(Aichi_count_T[1:100,], colSums(Aichi_count_T[101:111,]))
rm(dum)
# Mie
dum = read.jpn_death("24", "Mie")$Deaths
Mie_count_F = matrix(dum[,3], nrow=111)
Mie_count_M = matrix(dum[,4], nrow=111)
Mie_count_T = matrix(dum[,5], nrow=111)
Mie_count_female = rbind(Mie_count_F[1:100,], colSums(Mie_count_F[101:111,]))
Mie_count_male = rbind(Mie_count_M[1:100,], colSums(Mie_count_M[101:111,]))
Mie_count_total = rbind(Mie_count_T[1:100,], colSums(Mie_count_T[101:111,]))
rm(dum)
# Shiga
dum = read.jpn_death("25", "Shiga")$Deaths
Shiga_count_F = matrix(dum[,3], nrow=111)
Shiga_count_M = matrix(dum[,4], nrow=111)
Shiga_count_T = matrix(dum[,5], nrow=111)
Shiga_count_female = rbind(Shiga_count_F[1:100,], colSums(Shiga_count_F[101:111,]))
Shiga_count_male = rbind(Shiga_count_M[1:100,], colSums(Shiga_count_M[101:111,]))
Shiga_count_total = rbind(Shiga_count_T[1:100,], colSums(Shiga_count_T[101:111,]))
rm(dum)
# Kyoto
dum = read.jpn_death("26", "Kyoto")$Deaths
Kyoto_count_F = matrix(dum[,3], nrow=111)
Kyoto_count_M = matrix(dum[,4], nrow=111)
Kyoto_count_T = matrix(dum[,5], nrow=111)
Kyoto_count_female = rbind(Kyoto_count_F[1:100,], colSums(Kyoto_count_F[101:111,]))
Kyoto_count_male = rbind(Kyoto_count_M[1:100,], colSums(Kyoto_count_M[101:111,]))
Kyoto_count_total = rbind(Kyoto_count_T[1:100,], colSums(Kyoto_count_T[101:111,]))
rm(dum)
# Osaka
dum = read.jpn_death("27", "Osaka")$Deaths
Osaka_count_F = matrix(dum[,3], nrow=111)
Osaka_count_M = matrix(dum[,4], nrow=111)
Osaka_count_T = matrix(dum[,5], nrow=111)
Osaka_count_female = rbind(Osaka_count_F[1:100,], colSums(Osaka_count_F[101:111,]))
Osaka_count_male = rbind(Osaka_count_M[1:100,], colSums(Osaka_count_M[101:111,]))
Osaka_count_total = rbind(Osaka_count_T[1:100,], colSums(Osaka_count_T[101:111,]))
rm(dum)
# Hyogo
dum = read.jpn_death("28", "Hyogo")$Deaths
Hyogo_count_F = matrix(dum[,3], nrow=111)
Hyogo_count_M = matrix(dum[,4], nrow=111)
Hyogo_count_T = matrix(dum[,5], nrow=111)
Hyogo_count_female = rbind(Hyogo_count_F[1:100,], colSums(Hyogo_count_F[101:111,]))
Hyogo_count_male = rbind(Hyogo_count_M[1:100,], colSums(Hyogo_count_M[101:111,]))
Hyogo_count_total = rbind(Hyogo_count_T[1:100,], colSums(Hyogo_count_T[101:111,]))
rm(dum)
# Nara
dum = read.jpn_death("29", "Nara")$Deaths
Nara_count_F = matrix(dum[,3], nrow=111)
Nara_count_M = matrix(dum[,4], nrow=111)
Nara_count_T = matrix(dum[,5], nrow=111)
Nara_count_female = rbind(Nara_count_F[1:100,], colSums(Nara_count_F[101:111,]))
Nara_count_male = rbind(Nara_count_M[1:100,], colSums(Nara_count_M[101:111,]))
Nara_count_total = rbind(Nara_count_T[1:100,], colSums(Nara_count_T[101:111,]))
rm(dum)
# Wakayama
dum = read.jpn_death("30", "Wakayama")$Deaths
Wakayama_count_F = matrix(dum[,3], nrow=111)
Wakayama_count_M = matrix(dum[,4], nrow=111)
Wakayama_count_T = matrix(dum[,5], nrow=111)
Wakayama_count_female = rbind(Wakayama_count_F[1:100,], colSums(Wakayama_count_F[101:111,]))
Wakayama_count_male = rbind(Wakayama_count_M[1:100,], colSums(Wakayama_count_M[101:111,]))
Wakayama_count_total = rbind(Wakayama_count_T[1:100,], colSums(Wakayama_count_T[101:111,]))
rm(dum)
# Tottori
dum = read.jpn_death("31", "Tottori")$Deaths
Tottori_count_F = matrix(dum[,3], nrow=111)
Tottori_count_M = matrix(dum[,4], nrow=111)
Tottori_count_T = matrix(dum[,5], nrow=111)
Tottori_count_female = rbind(Tottori_count_F[1:100,], colSums(Tottori_count_F[101:111,]))
Tottori_count_male = rbind(Tottori_count_M[1:100,], colSums(Tottori_count_M[101:111,]))
Tottori_count_total = rbind(Tottori_count_T[1:100,], colSums(Tottori_count_T[101:111,]))
rm(dum)
# Shimane
dum = read.jpn_death("32", "Shimane")$Deaths
Shimane_count_F = matrix(dum[,3], nrow=111)
Shimane_count_M = matrix(dum[,4], nrow=111)
Shimane_count_T = matrix(dum[,5], nrow=111)
Shimane_count_female = rbind(Shimane_count_F[1:100,], colSums(Shimane_count_F[101:111,]))
Shimane_count_male = rbind(Shimane_count_M[1:100,], colSums(Shimane_count_M[101:111,]))
Shimane_count_total = rbind(Shimane_count_T[1:100,], colSums(Shimane_count_T[101:111,]))
rm(dum)
# Okayama
dum = read.jpn_death("33", "Okayama")$Deaths
Okayama_count_F = matrix(dum[,3], nrow=111)
Okayama_count_M = matrix(dum[,4], nrow=111)
Okayama_count_T = matrix(dum[,5], nrow=111)
Okayama_count_female = rbind(Okayama_count_F[1:100,], colSums(Okayama_count_F[101:111,]))
Okayama_count_male = rbind(Okayama_count_M[1:100,], colSums(Okayama_count_M[101:111,]))
Okayama_count_total = rbind(Okayama_count_T[1:100,], colSums(Okayama_count_T[101:111,]))
rm(dum)
# Hiroshima
dum = read.jpn_death("34", "Hiroshima")$Deaths
Hiroshima_count_F = matrix(dum[,3], nrow=111)
Hiroshima_count_M = matrix(dum[,4], nrow=111)
Hiroshima_count_T = matrix(dum[,5], nrow=111)
Hiroshima_count_female = rbind(Hiroshima_count_F[1:100,], colSums(Hiroshima_count_F[101:111,]))
Hiroshima_count_male = rbind(Hiroshima_count_M[1:100,], colSums(Hiroshima_count_M[101:111,]))
Hiroshima_count_total = rbind(Hiroshima_count_T[1:100,], colSums(Hiroshima_count_T[101:111,]))
rm(dum)
# Yamaguchi
dum = read.jpn_death("35", "Yamaguchi")$Deaths
Yamaguchi_count_F = matrix(dum[,3], nrow=111)
Yamaguchi_count_M = matrix(dum[,4], nrow=111)
Yamaguchi_count_T = matrix(dum[,5], nrow=111)
Yamaguchi_count_female = rbind(Yamaguchi_count_F[1:100,], colSums(Yamaguchi_count_F[101:111,]))
Yamaguchi_count_male = rbind(Yamaguchi_count_M[1:100,], colSums(Yamaguchi_count_M[101:111,]))
Yamaguchi_count_total = rbind(Yamaguchi_count_T[1:100,], colSums(Yamaguchi_count_T[101:111,]))
rm(dum)
# Tokushima
dum = read.jpn_death("36", "Tokushima")$Deaths
Tokushima_count_F = matrix(dum[,3], nrow=111)
Tokushima_count_M = matrix(dum[,4], nrow=111)
Tokushima_count_T = matrix(dum[,5], nrow=111)
Tokushima_count_female = rbind(Tokushima_count_F[1:100,], colSums(Tokushima_count_F[101:111,]))
Tokushima_count_male = rbind(Tokushima_count_M[1:100,], colSums(Tokushima_count_M[101:111,]))
Tokushima_count_total = rbind(Tokushima_count_T[1:100,], colSums(Tokushima_count_T[101:111,]))
rm(dum)
# Kagawa
dum = read.jpn_death("37", "Kagawa")$Deaths
Kagawa_count_F = matrix(dum[,3], nrow=111)
Kagawa_count_M = matrix(dum[,4], nrow=111)
Kagawa_count_T = matrix(dum[,5], nrow=111)
Kagawa_count_female = rbind(Kagawa_count_F[1:100,], colSums(Kagawa_count_F[101:111,]))
Kagawa_count_male = rbind(Kagawa_count_M[1:100,], colSums(Kagawa_count_M[101:111,]))
Kagawa_count_total = rbind(Kagawa_count_T[1:100,], colSums(Kagawa_count_T[101:111,]))
rm(dum)
# Ehime
dum = read.jpn_death("38", "Ehime")$Deaths
Ehime_count_F = matrix(dum[,3], nrow=111)
Ehime_count_M = matrix(dum[,4], nrow=111)
Ehime_count_T = matrix(dum[,5], nrow=111)
Ehime_count_female = rbind(Ehime_count_F[1:100,], colSums(Ehime_count_F[101:111,]))
Ehime_count_male = rbind(Ehime_count_M[1:100,], colSums(Ehime_count_M[101:111,]))
Ehime_count_total = rbind(Ehime_count_T[1:100,], colSums(Ehime_count_T[101:111,]))
rm(dum)
# Kochi
dum = read.jpn_death("39", "Kochi")$Deaths
Kochi_count_F = matrix(dum[,3], nrow=111)
Kochi_count_M = matrix(dum[,4], nrow=111)
Kochi_count_T = matrix(dum[,5], nrow=111)
Kochi_count_female = rbind(Kochi_count_F[1:100,], colSums(Kochi_count_F[101:111,]))
Kochi_count_male = rbind(Kochi_count_M[1:100,], colSums(Kochi_count_M[101:111,]))
Kochi_count_total = rbind(Kochi_count_T[1:100,], colSums(Kochi_count_T[101:111,]))
rm(dum)
# Fukuoka
dum = read.jpn_death("40", "Fukuoka")$Deaths
Fukuoka_count_F = matrix(dum[,3], nrow=111)
Fukuoka_count_M = matrix(dum[,4], nrow=111)
Fukuoka_count_T = matrix(dum[,5], nrow=111)
Fukuoka_count_female = rbind(Fukuoka_count_F[1:100,], colSums(Fukuoka_count_F[101:111,]))
Fukuoka_count_male = rbind(Fukuoka_count_M[1:100,], colSums(Fukuoka_count_M[101:111,]))
Fukuoka_count_total = rbind(Fukuoka_count_T[1:100,], colSums(Fukuoka_count_T[101:111,]))
rm(dum)
# Saga
dum = read.jpn_death("41", "Saga")$Deaths
Saga_count_F = matrix(dum[,3], nrow=111)
Saga_count_M = matrix(dum[,4], nrow=111)
Saga_count_T = matrix(dum[,5], nrow=111)
Saga_count_female = rbind(Saga_count_F[1:100,], colSums(Saga_count_F[101:111,]))
Saga_count_male = rbind(Saga_count_M[1:100,], colSums(Saga_count_M[101:111,]))
Saga_count_total = rbind(Saga_count_T[1:100,], colSums(Saga_count_T[101:111,]))
rm(dum)
# Nagasaki
dum = read.jpn_death("42", "Nagasaki")$Deaths
Nagasaki_count_F = matrix(dum[,3], nrow=111)
Nagasaki_count_M = matrix(dum[,4], nrow=111)
Nagasaki_count_T = matrix(dum[,5], nrow=111)
Nagasaki_count_female = rbind(Nagasaki_count_F[1:100,], colSums(Nagasaki_count_F[101:111,]))
Nagasaki_count_male = rbind(Nagasaki_count_M[1:100,], colSums(Nagasaki_count_M[101:111,]))
Nagasaki_count_total = rbind(Nagasaki_count_T[1:100,], colSums(Nagasaki_count_T[101:111,]))
rm(dum)
# Kumamoto
dum = read.jpn_death("43", "Kumamoto")$Deaths
Kumamoto_count_F = matrix(dum[,3], nrow=111)
Kumamoto_count_M = matrix(dum[,4], nrow=111)
Kumamoto_count_T = matrix(dum[,5], nrow=111)
Kumamoto_count_female = rbind(Kumamoto_count_F[1:100,], colSums(Kumamoto_count_F[101:111,]))
Kumamoto_count_male = rbind(Kumamoto_count_M[1:100,], colSums(Kumamoto_count_M[101:111,]))
Kumamoto_count_total = rbind(Kumamoto_count_T[1:100,], colSums(Kumamoto_count_T[101:111,]))
rm(dum)
# Oita
dum = read.jpn_death("44", "Oita")$Deaths
Oita_count_F = matrix(dum[,3], nrow=111)
Oita_count_M = matrix(dum[,4], nrow=111)
Oita_count_T = matrix(dum[,5], nrow=111)
Oita_count_female = rbind(Oita_count_F[1:100,], colSums(Oita_count_F[101:111,]))
Oita_count_male = rbind(Oita_count_M[1:100,], colSums(Oita_count_M[101:111,]))
Oita_count_total = rbind(Oita_count_T[1:100,], colSums(Oita_count_T[101:111,]))
rm(dum)
# Miyazaki
dum = read.jpn_death("45", "Miyazaki")$Deaths
Miyazaki_count_F = matrix(dum[,3], nrow=111)
Miyazaki_count_M = matrix(dum[,4], nrow=111)
Miyazaki_count_T = matrix(dum[,5], nrow=111)
Miyazaki_count_female = rbind(Miyazaki_count_F[1:100,], colSums(Miyazaki_count_F[101:111,]))
Miyazaki_count_male = rbind(Miyazaki_count_M[1:100,], colSums(Miyazaki_count_M[101:111,]))
Miyazaki_count_total = rbind(Miyazaki_count_T[1:100,], colSums(Miyazaki_count_T[101:111,]))
rm(dum)
# Kagoshima
dum = read.jpn_death("46", "Kagoshima")$Deaths
Kagoshima_count_F = matrix(dum[,3], nrow=111)
Kagoshima_count_M = matrix(dum[,4], nrow=111)
Kagoshima_count_T = matrix(dum[,5], nrow=111)
Kagoshima_count_female = rbind(Kagoshima_count_F[1:100,], colSums(Kagoshima_count_F[101:111,]))
Kagoshima_count_male = rbind(Kagoshima_count_M[1:100,], colSums(Kagoshima_count_M[101:111,]))
Kagoshima_count_total = rbind(Kagoshima_count_T[1:100,], colSums(Kagoshima_count_T[101:111,]))
rm(dum)
# Okinawa
dum = read.jpn_death("47", "Okinawa")$Deaths
Okinawa_count_F = matrix(dum[,3], nrow=111)
Okinawa_count_M = matrix(dum[,4], nrow=111)
Okinawa_count_T = matrix(dum[,5], nrow=111)
Okinawa_count_female = rbind(Okinawa_count_F[1:100,], colSums(Okinawa_count_F[101:111,]))
Okinawa_count_male = rbind(Okinawa_count_M[1:100,], colSums(Okinawa_count_M[101:111,]))
Okinawa_count_total = rbind(Okinawa_count_T[1:100,], colSums(Okinawa_count_T[101:111,]))
rm(dum)
# rate
Japan = extract.years(extract.ages(read.jpn("00", "Japan"), 0:100), 1975:2018)
Hokkaido = extract.ages(read.jpn("01", "Hokkaido"), 0:100)
Aomori = extract.ages(read.jpn("02", "Aomori"), 0:100)
Iwate = extract.ages(read.jpn("03", "Iwate"), 0:100)
Miyagi = extract.ages(read.jpn("04", "Miyagi"), 0:100)
Akita = extract.ages(read.jpn("05", "Akita"), 0:100)
Yamagata = extract.ages(read.jpn("06", "Yamagata"), 0:100)
Fukushima = extract.ages(read.jpn("07", "Fukushima"), 0:100)
Ibaraki = extract.ages(read.jpn("08", "Ibaraki"), 0:100)
Tochigi = extract.ages(read.jpn("09", "Tochigi"), 0:100)
Gunma = extract.ages(read.jpn("10", "Gunma"), 0:100)
Saitama = extract.ages(read.jpn("11", "Saitama"), 0:100)
Chiba = extract.ages(read.jpn("12", "Chiba"), 0:100)
Tokyo = extract.ages(read.jpn("13", "Tokyo"), 0:100)
Kanagawa = extract.ages(read.jpn("14", "Kanagawa"), 0:100)
Niigata = extract.ages(read.jpn("15", "Niigata"), 0:100)
Toyama = extract.ages(read.jpn("16", "Toyama"), 0:100)
Ishikawa = extract.ages(read.jpn("17", "Ishikawa"), 0:100)
Fukui = extract.ages(read.jpn("18", "Fukui"), 0:100)
Yamanashi = extract.ages(read.jpn("19", "Yamanashi"), 0:100)
Nagano = extract.ages(read.jpn("20", "Nagano"), 0:100)
Gifu = extract.ages(read.jpn("21", "Gifu"), 0:100)
Shizuoka = extract.ages(read.jpn("22", "Shizuoka"), 0:100)
Aichi = extract.ages(read.jpn("23", "Aichi"), 0:100)
Mie = extract.ages(read.jpn("24", "Mie"), 0:100)
Shiga = extract.ages(read.jpn("25", "Shiga"), 0:100)
Kyoto = extract.ages(read.jpn("26", "Kyoto"), 0:100)
Osaka = extract.ages(read.jpn("27", "Osaka"), 0:100)
Hyogo = extract.ages(read.jpn("28", "Hyogo"), 0:100)
Nara = extract.ages(read.jpn("29", "Nara"), 0:100)
Wakayama = extract.ages(read.jpn("30", "Wakayama"), 0:100)
Tottori = extract.ages(read.jpn("31", "Tottori"), 0:100)
Shimane = extract.ages(read.jpn("32", "Shimane"), 0:100)
Okayama = extract.ages(read.jpn("33", "Okayama"), 0:100)
Hiroshima = extract.ages(read.jpn("34", "Hiroshima"), 0:100)
Yamaguchi = extract.ages(read.jpn("35", "Yamaguchi"), 0:100)
Tokushima = extract.ages(read.jpn("36", "Tokushima"), 0:100)
Kagawa = extract.ages(read.jpn("37", "Kagawa"), 0:100)
Ehime = extract.ages(read.jpn("38", "Ehime"), 0:100)
Kochi = extract.ages(read.jpn("39", "Kochi"), 0:100)
Fukuoka = extract.ages(read.jpn("40", "Fukuoka"), 0:100)
Saga = extract.ages(read.jpn("41", "Saga"), 0:100)
Nagasaki = extract.ages(read.jpn("42", "Nagasaki"), 0:100)
Kumamoto = extract.ages(read.jpn("43", "Kumamoto"), 0:100)
Oita = extract.ages(read.jpn("44", "Oita"), 0:100)
Miyazaki = extract.ages(read.jpn("45", "Miyazaki"), 0:100)
Kagoshima = extract.ages(read.jpn("46", "Kagoshima"), 0:100)
Okinawa = extract.ages(read.jpn("47", "Okinawa"), 0:100)
# check if the last year is 2017 for all states
year_store = vector("numeric",47)
for(ik in 1:47)
{
year_store[ik] = tail(get(state[ik])$year, 1)
}
all(year_store == 2018)
# smoothed functional curves using penalized regression spline with monotonic constraint
for(ik in 1:48)
{
assign(state_smooth[ik], smooth.demogdata(get(state[ik])))
print(ik)
}
##############
# Image plots
##############
require(RColorBrewer)
# ratio between each prefecture and total
prefecture_total = prefecture_female = prefecture_male = array(, dim = c(101, 44, 47))
for(iw in 2:48)
{
gettotal <- get(state[iw])$rate$total
gettotal[gettotal==0] <- NA
getmale <- get(state[iw])$rate$male
getmale[getmale==0] <- NA
getfemale <- get(state[iw])$rate$female
getfemale[getfemale==0] <- NA
prefecture_total[,,iw-1] = log(gettotal/Japan$rate$total)
prefecture_female[,,iw-1] = log(getfemale/Japan$rate$female)
prefecture_male[,,iw-1] = log(getmale/Japan$rate$male)
}
|
## Loop through all attached directories looking for
## regular expression pattern.
objip <-
function(pattern, where = search(), all.names=FALSE, mode="any", class, ## , sorted=TRUE (requires R-3.2.0)a
ls.function=if (mode != "any" || !missing(class)) "ls.str" else "ls")
{
ls.function <- match.arg(ls.function, c("ls", "ls.str"))
result <- list()
for(i in match(where, search())) {
obj <-
if (ls.function=="ls")
ls(pos=i, pattern = pattern, all.names=all.names) ## , sorted=sorted
else
ls.str(pos=i, pattern = pattern, all.names=all.names, mode=mode)
if(length(obj) > 0)
result[[where[i]]] <- obj
}
if (ls.function=="ls.str" && !missing(class))
for (i in names(result)) {
keep <- sapply(result[[i]], function(x, class) is(get(x), class), class)
result[[i]] <- result[[i]][keep]
if (length(result[[i]]) == 0) result[[i]] <- NULL
}
result
}
if (FALSE) {
objip(pat="AE")
objip(pat="AE", class="data.frame")
objip(pat="AE", mode="function")
objip(pat="AE", class="function")
}
| /R/objip.R | no_license | cran/HH | R | false | false | 1,162 | r | ## Loop through all attached directories looking for
## regular expression pattern.
objip <-
function(pattern, where = search(), all.names=FALSE, mode="any", class, ## , sorted=TRUE (requires R-3.2.0)a
ls.function=if (mode != "any" || !missing(class)) "ls.str" else "ls")
{
ls.function <- match.arg(ls.function, c("ls", "ls.str"))
result <- list()
for(i in match(where, search())) {
obj <-
if (ls.function=="ls")
ls(pos=i, pattern = pattern, all.names=all.names) ## , sorted=sorted
else
ls.str(pos=i, pattern = pattern, all.names=all.names, mode=mode)
if(length(obj) > 0)
result[[where[i]]] <- obj
}
if (ls.function=="ls.str" && !missing(class))
for (i in names(result)) {
keep <- sapply(result[[i]], function(x, class) is(get(x), class), class)
result[[i]] <- result[[i]][keep]
if (length(result[[i]]) == 0) result[[i]] <- NULL
}
result
}
if (FALSE) {
objip(pat="AE")
objip(pat="AE", class="data.frame")
objip(pat="AE", mode="function")
objip(pat="AE", class="function")
}
|
#' @importFrom utils packageDescription
.onAttach <- function(libname = find.package("ipmr"), pkgname = "ipmr") {
packageStartupMessage(
"Welcome to `ipmr`! `browseVignettes('ipmr')` to get started."
)
}
| /R/on_load_on_attach.R | permissive | padrinoDB/ipmr | R | false | false | 221 | r | #' @importFrom utils packageDescription
.onAttach <- function(libname = find.package("ipmr"), pkgname = "ipmr") {
packageStartupMessage(
"Welcome to `ipmr`! `browseVignettes('ipmr')` to get started."
)
}
|
context("Fitting and plotting explanations")
set.seed(1)
X <- tibble::as_tibble(MASS::mvrnorm(50, rep(0, 10), diag(1, 10)))
local <- live::sample_locally(data = X,
explained_instance = X[3, ],
explained_var = "V1",
size = 50)
local1 <- live::add_predictions(local, "regr.lm", X)
local_explained <- live::fit_explanation(local1, "regr.lm")
test_that("White box model is fitted correctly", {
expect_is(local_explained, "live_explainer")
expect_is(mlr::getLearnerModel(local_explained$model), "lm")
})
test_that("Plots are created without problems", {
expect_output(plot(local_explained, type = "waterfall"), regexp = NA)
expect_output(plot(local_explained, type = "forest"), regexp = NA)
})
| /tests/testthat/test_explaining.R | no_license | GapData/live | R | false | false | 791 | r | context("Fitting and plotting explanations")
set.seed(1)
X <- tibble::as_tibble(MASS::mvrnorm(50, rep(0, 10), diag(1, 10)))
local <- live::sample_locally(data = X,
explained_instance = X[3, ],
explained_var = "V1",
size = 50)
local1 <- live::add_predictions(local, "regr.lm", X)
local_explained <- live::fit_explanation(local1, "regr.lm")
test_that("White box model is fitted correctly", {
expect_is(local_explained, "live_explainer")
expect_is(mlr::getLearnerModel(local_explained$model), "lm")
})
test_that("Plots are created without problems", {
expect_output(plot(local_explained, type = "waterfall"), regexp = NA)
expect_output(plot(local_explained, type = "forest"), regexp = NA)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neptune_operations.R
\name{neptune_copy_db_cluster_parameter_group}
\alias{neptune_copy_db_cluster_parameter_group}
\title{Copies the specified DB cluster parameter group}
\usage{
neptune_copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier,
TargetDBClusterParameterGroupIdentifier,
TargetDBClusterParameterGroupDescription, Tags)
}
\arguments{
\item{SourceDBClusterParameterGroupIdentifier}{[required] The identifier or Amazon Resource Name (ARN) for the source DB cluster
parameter group. For information about creating an ARN, see
\href{https://docs.aws.amazon.com/neptune/latest/userguide/#tagging.ARN.Constructing}{Constructing an Amazon Resource Name (ARN)}.
Constraints:
\itemize{
\item Must specify a valid DB cluster parameter group.
\item If the source DB cluster parameter group is in the same AWS Region
as the copy, specify a valid DB parameter group identifier, for
example \code{my-db-cluster-param-group}, or a valid ARN.
\item If the source DB parameter group is in a different AWS Region than
the copy, specify a valid DB cluster parameter group ARN, for
example
\code{arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1}.
}}
\item{TargetDBClusterParameterGroupIdentifier}{[required] The identifier for the copied DB cluster parameter group.
Constraints:
\itemize{
\item Cannot be null, empty, or blank
\item Must contain from 1 to 255 letters, numbers, or hyphens
\item First character must be a letter
\item Cannot end with a hyphen or contain two consecutive hyphens
}
Example: \code{my-cluster-param-group1}}
\item{TargetDBClusterParameterGroupDescription}{[required] A description for the copied DB cluster parameter group.}
\item{Tags}{The tags to be assigned to the copied DB cluster parameter group.}
}
\value{
A list with the following syntax:\preformatted{list(
DBClusterParameterGroup = list(
DBClusterParameterGroupName = "string",
DBParameterGroupFamily = "string",
Description = "string",
DBClusterParameterGroupArn = "string"
)
)
}
}
\description{
Copies the specified DB cluster parameter group.
}
\section{Request syntax}{
\preformatted{svc$copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier = "string",
TargetDBClusterParameterGroupIdentifier = "string",
TargetDBClusterParameterGroupDescription = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
| /cran/paws.database/man/neptune_copy_db_cluster_parameter_group.Rd | permissive | TWarczak/paws | R | false | true | 2,524 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neptune_operations.R
\name{neptune_copy_db_cluster_parameter_group}
\alias{neptune_copy_db_cluster_parameter_group}
\title{Copies the specified DB cluster parameter group}
\usage{
neptune_copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier,
TargetDBClusterParameterGroupIdentifier,
TargetDBClusterParameterGroupDescription, Tags)
}
\arguments{
\item{SourceDBClusterParameterGroupIdentifier}{[required] The identifier or Amazon Resource Name (ARN) for the source DB cluster
parameter group. For information about creating an ARN, see
\href{https://docs.aws.amazon.com/neptune/latest/userguide/#tagging.ARN.Constructing}{Constructing an Amazon Resource Name (ARN)}.
Constraints:
\itemize{
\item Must specify a valid DB cluster parameter group.
\item If the source DB cluster parameter group is in the same AWS Region
as the copy, specify a valid DB parameter group identifier, for
example \code{my-db-cluster-param-group}, or a valid ARN.
\item If the source DB parameter group is in a different AWS Region than
the copy, specify a valid DB cluster parameter group ARN, for
example
\code{arn:aws:rds:us-east-1:123456789012:cluster-pg:custom-cluster-group1}.
}}
\item{TargetDBClusterParameterGroupIdentifier}{[required] The identifier for the copied DB cluster parameter group.
Constraints:
\itemize{
\item Cannot be null, empty, or blank
\item Must contain from 1 to 255 letters, numbers, or hyphens
\item First character must be a letter
\item Cannot end with a hyphen or contain two consecutive hyphens
}
Example: \code{my-cluster-param-group1}}
\item{TargetDBClusterParameterGroupDescription}{[required] A description for the copied DB cluster parameter group.}
\item{Tags}{The tags to be assigned to the copied DB cluster parameter group.}
}
\value{
A list with the following syntax:\preformatted{list(
DBClusterParameterGroup = list(
DBClusterParameterGroupName = "string",
DBParameterGroupFamily = "string",
Description = "string",
DBClusterParameterGroupArn = "string"
)
)
}
}
\description{
Copies the specified DB cluster parameter group.
}
\section{Request syntax}{
\preformatted{svc$copy_db_cluster_parameter_group(
SourceDBClusterParameterGroupIdentifier = "string",
TargetDBClusterParameterGroupIdentifier = "string",
TargetDBClusterParameterGroupDescription = "string",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lap.R
\name{LAPJV}
\alias{LAPJV}
\title{Solve linear assignment problem using LAPJV}
\usage{
LAPJV(x)
}
\arguments{
\item{x}{Square matrix of costs.}
}
\value{
A list with two entries: \code{score}, the score of the optimal matching;
and \code{matching}, the columns matched to each row of the matrix in turn.
}
\description{
Use the algorithm of Jonker & Volgenant (1987) to solve the
\href{http://www.assignmentproblems.com/doc/LSAPIntroduction.pdf}{Linear Sum Assignment Problem}.
}
\details{
The Linear Assignment Problem seeks to match each row of a matrix with a
column, such that the cost of the matching is minimized.
The Jonker & Volgenant approach is a faster alternative to the Hungarian
algorithm (Munkres 1957), which is implemented in \code{clue::solve_LSAP()}.
Note: the JV algorithm expects integers. In order to apply the function
to a non-integer \emph{n}, as in the tree distance calculations in this package,
each \emph{n} is multiplied by the largest available integer before applying
the JV algorithm. If two values of \emph{n} exhibit a trivial difference --
e.g. due to floating point errors -- then this can lead to interminable
run times. (If numbers of the magnitude of billions differ only in their
last significant digit, then the JV algorithm may undergo billions of
iterations.) To avoid this, integers over 2^22 that differ by a value of
8 or less are treated as equal.
NB. At present, only square matrices are supported; if you need support for
non-square matrices, drop a note at
\href{https://github.com/ms609/TreeDist/issues/25}{issue #25}
and I'll prioritize development.
}
\examples{
problem <- matrix(c(7, 9, 8, 9,
2, 8, 5, 7,
1, 6, 6, 9,
3, 6, 2, 2), 4, 4, byrow=TRUE)
LAPJV(problem)
}
\references{
\insertRef{Jonker1987}{TreeDist}
\insertRef{Munkres1957}{TreeDist}
}
\author{
\href{https://github.com/yongyanghz/LAPJV-algorithm-c/blob/master/LAPJV/lap.cpp}{C++ code}
by Roy Jonker, MagicLogic Optimization Inc. \href{mailto:roy_jonker@magiclogic.com}{roy_jonker@magiclogic.com},
with contributions from Yong Yang \href{mailto:yongyanglink@gmail.com}{yongyanglink@gmail.com}, after
\href{https://uk.mathworks.com/matlabcentral/profile/authors/69713-yi-cao}{Yi Cao}
}
| /fuzzedpackages/TreeDist/man/LAPJV.Rd | no_license | akhikolla/testpackages | R | false | true | 2,413 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lap.R
\name{LAPJV}
\alias{LAPJV}
\title{Solve linear assignment problem using LAPJV}
\usage{
LAPJV(x)
}
\arguments{
\item{x}{Square matrix of costs.}
}
\value{
A list with two entries: \code{score}, the score of the optimal matching;
and \code{matching}, the columns matched to each row of the matrix in turn.
}
\description{
Use the algorithm of Jonker & Volgenant (1987) to solve the
\href{http://www.assignmentproblems.com/doc/LSAPIntroduction.pdf}{Linear Sum Assignment Problem}.
}
\details{
The Linear Assignment Problem seeks to match each row of a matrix with a
column, such that the cost of the matching is minimized.
The Jonker & Volgenant approach is a faster alternative to the Hungarian
algorithm (Munkres 1957), which is implemented in \code{clue::solve_LSAP()}.
Note: the JV algorithm expects integers. In order to apply the function
to a non-integer \emph{n}, as in the tree distance calculations in this package,
each \emph{n} is multiplied by the largest available integer before applying
the JV algorithm. If two values of \emph{n} exhibit a trivial difference --
e.g. due to floating point errors -- then this can lead to interminable
run times. (If numbers of the magnitude of billions differ only in their
last significant digit, then the JV algorithm may undergo billions of
iterations.) To avoid this, integers over 2^22 that differ by a value of
8 or less are treated as equal.
NB. At present, only square matrices are supported; if you need support for
non-square matrices, drop a note at
\href{https://github.com/ms609/TreeDist/issues/25}{issue #25}
and I'll prioritize development.
}
\examples{
problem <- matrix(c(7, 9, 8, 9,
2, 8, 5, 7,
1, 6, 6, 9,
3, 6, 2, 2), 4, 4, byrow=TRUE)
LAPJV(problem)
}
\references{
\insertRef{Jonker1987}{TreeDist}
\insertRef{Munkres1957}{TreeDist}
}
\author{
\href{https://github.com/yongyanghz/LAPJV-algorithm-c/blob/master/LAPJV/lap.cpp}{C++ code}
by Roy Jonker, MagicLogic Optimization Inc. \href{mailto:roy_jonker@magiclogic.com}{roy_jonker@magiclogic.com},
with contributions from Yong Yang \href{mailto:yongyanglink@gmail.com}{yongyanglink@gmail.com}, after
\href{https://uk.mathworks.com/matlabcentral/profile/authors/69713-yi-cao}{Yi Cao}
}
|
# measures defined in Ren et al. 2008
# with equidistant grid
integrate_fun <- function(X,
n = nrow(X),
nxgrid = ncol(X),
xind = matrix(as.vector(1:ncol(X)),
nrow=nrow(X),
ncol=nxgrid,
byrow=T),
integration = "simpson")
{
# copied from refund:::pffr
# credits to Fabian Scheipl
L <- switch(integration,
"simpson" = {
# \int^b_a f(t) dt = (b-a)/gridlength/3 * [f(a) + 4*f(t_1) + 2*f(t_2) + 4*f(t_3) +
# 2*f(t_3) +...+ f(b)]
((xind[,nxgrid]-xind[,1])/nxgrid)/3 *
matrix(c(1, rep(c(4, 2), length=nxgrid-2), 1), nrow=n, ncol=nxgrid, byrow=T)
},
"trapezoidal" = {
# \int^b_a f(t) dt = .5* sum_i (t_i - t_{i-1}) f(t_i) + f(t_{i-1}) =
# (t_2 - t_1)/2 * f(a=t_1) + sum^{nx-1}_{i=2} ((t_i - t_i-1)/2 + (t_i+1 - t_i)/2) * f(t_i) +
# ... +
# + (t_nx - t_{nx-1})/2 * f(b=t_n)
diffs <- t(apply(xind, 1, diff))
.5 * cbind(diffs[,1],
t(apply(diffs, 1, filter, filter=c(1,1)))[,-(nxgrid-1)],
diffs[,(nxgrid-1)])
},
"riemann" = {
# simple quadrature rule:
# \int^b_a f(t) dt = sum_i (t_i-t_{i-1})*(f(t_i))
diffs <- t(apply(xind, 1, diff))
#assume delta(t_0=a, t_1) = avg. delta
cbind(rep(mean(diffs),n), diffs)
}
)
apply(L*X,1,sum)
}
RMSE <- function(actual_mat, pred_mat, time_diff = ncol(actual_mat)-1, ...)
{
sqrt(integrate_fun((actual_mat - pred_mat)^2, ...)/time_diff)
}
relRMSE <- function(actual_mat, pred_mat, ...)
{
nom <- RMSE(actual_mat, pred_mat, ...)
denom <- 0.5 * (apply(actual_mat, 1, function(x) diff(range(x))) +
apply(pred_mat, 1, function(x) diff(range(x))))
return((nom/denom)*100)
}
cor_fun <- function(actual_mat, pred_mat)
{
sapply(1:nrow(actual_mat), function(i) cor(actual_mat[i,], pred_mat[i,]))
}
all_measures <- function(actual_mat, pred_mat, ...)
{
data.frame(RMSE = RMSE(actual_mat, pred_mat, ...),
relRMSE = relRMSE(actual_mat, pred_mat, ...),
cor = cor_fun(actual_mat, pred_mat))
} | /code/measures_johnson.R | no_license | bernard-liew/2020_lowbackpain_ucm_lifting | R | false | false | 2,518 | r | # measures defined in Ren et al. 2008
# with equidistant grid
integrate_fun <- function(X,
n = nrow(X),
nxgrid = ncol(X),
xind = matrix(as.vector(1:ncol(X)),
nrow=nrow(X),
ncol=nxgrid,
byrow=T),
integration = "simpson")
{
# copied from refund:::pffr
# credits to Fabian Scheipl
L <- switch(integration,
"simpson" = {
# \int^b_a f(t) dt = (b-a)/gridlength/3 * [f(a) + 4*f(t_1) + 2*f(t_2) + 4*f(t_3) +
# 2*f(t_3) +...+ f(b)]
((xind[,nxgrid]-xind[,1])/nxgrid)/3 *
matrix(c(1, rep(c(4, 2), length=nxgrid-2), 1), nrow=n, ncol=nxgrid, byrow=T)
},
"trapezoidal" = {
# \int^b_a f(t) dt = .5* sum_i (t_i - t_{i-1}) f(t_i) + f(t_{i-1}) =
# (t_2 - t_1)/2 * f(a=t_1) + sum^{nx-1}_{i=2} ((t_i - t_i-1)/2 + (t_i+1 - t_i)/2) * f(t_i) +
# ... +
# + (t_nx - t_{nx-1})/2 * f(b=t_n)
diffs <- t(apply(xind, 1, diff))
.5 * cbind(diffs[,1],
t(apply(diffs, 1, filter, filter=c(1,1)))[,-(nxgrid-1)],
diffs[,(nxgrid-1)])
},
"riemann" = {
# simple quadrature rule:
# \int^b_a f(t) dt = sum_i (t_i-t_{i-1})*(f(t_i))
diffs <- t(apply(xind, 1, diff))
#assume delta(t_0=a, t_1) = avg. delta
cbind(rep(mean(diffs),n), diffs)
}
)
apply(L*X,1,sum)
}
RMSE <- function(actual_mat, pred_mat, time_diff = ncol(actual_mat)-1, ...)
{
sqrt(integrate_fun((actual_mat - pred_mat)^2, ...)/time_diff)
}
relRMSE <- function(actual_mat, pred_mat, ...)
{
nom <- RMSE(actual_mat, pred_mat, ...)
denom <- 0.5 * (apply(actual_mat, 1, function(x) diff(range(x))) +
apply(pred_mat, 1, function(x) diff(range(x))))
return((nom/denom)*100)
}
cor_fun <- function(actual_mat, pred_mat)
{
sapply(1:nrow(actual_mat), function(i) cor(actual_mat[i,], pred_mat[i,]))
}
all_measures <- function(actual_mat, pred_mat, ...)
{
data.frame(RMSE = RMSE(actual_mat, pred_mat, ...),
relRMSE = relRMSE(actual_mat, pred_mat, ...),
cor = cor_fun(actual_mat, pred_mat))
} |
######draw heatmap of MCPcounter 10 cell types
######
load("8.KIRC/3.immune.sets.kirc/mcpCounter/socre.Mcpcounter.kirc.RData")
mcpscore.kirc=as.data.frame(t(score.mcp.kirc))
mcpscore.kirc=as.data.frame(scale(mcpscore.kirc))
head(mcpscore.kirc)
#ids=intersect(rownames(kaps.td), rownames(mcpscore))
ht.mcpscore.kirc=cbind(stat.kirc.kaps.vali$kaps.group.kirc, mcpscore.kirc[rownames(stat.kirc.kaps.vali),])
colnames(ht.mcpscore.kirc)[1]="kaps.group.kirc"
head(ht.mcpscore.kirc)
######
######pvalue
datalist <- list()
for(i in names(ht.mcpscore.kirc[,2:ncol(ht.mcpscore.kirc)])){
datalist[[i]] <- kruskal.test(formula(paste(i, "~ kaps.group.kirc")), data = ht.mcpscore.kirc)
}
pvalue.ht.mcp.kirc=do.call(rbind, datalist)
pvalue.ht.mcp.kirc=as.data.frame(pvalue.ht.mcp.kirc)
######matrix
ht.stat.mcp.kirc= ht.mcpscore.kirc %>% group_by(kaps.group.kirc) %>% summarise_all(median,na.rm = TRUE)
ht.stat.mcp.kirc=as.data.frame(ht.stat.mcp.kirc)
rownames(ht.stat.mcp.kirc)=ht.stat.mcp.kirc$kaps.group.kirc
ht.stat.mcp.kirc=ht.stat.mcp.kirc[,-1]
ht.stat.mcp.kirc=as.data.frame(t(ht.stat.mcp.kirc))
#ht.stat.mcp.kirc=ht.stat.mcp.kirc[,c(4,3,2,1)]
head(ht.stat.mcp.kirc)
save(pvalue.ht.mcp.kirc, ht.stat.mcp.kirc,file="8.KIRC/3.immune.sets.kirc/mcpCounter/ht.cell.type.MCPcounter.kirc.RData")
#######
#colore set
min_cor = min(as.vector(ht.stat.mcp.kirc))
max_cor = max(as.vector(ht.stat.mcp.kirc))
range_cor = seq(min(min_cor,0-max_cor),max(abs(min_cor),abs(max_cor)),length.out=50)
col.pal_cor = colorRamp2(range_cor,colorRampPalette(rev(brewer.pal(11, "RdBu")))(50))
col.pal_cor = colorRamp2(range_cor,colorRampPalette(c("#3288bd", "white","#ae017e"))(50))
#
row_ha.left.mcp.kirc = rowAnnotation(kruskal.pvalue=as.numeric(pvalue.ht.mcp.kirc$p.value),
col=list(
kruskal.pvalue=colorRamp2(c(0.05,10e-5,10e-5,10e-10,10e-20,10e-30),
c("#ffffcc","#d9f0a3","#addd8e","#78c679","#31a354","#006837"))
),show_annotation_name = FALSE)
col_ha_top.mcp.kirc = columnAnnotation(
kaps.group=colnames(ht.stat.mcp.kirc),
col=list(kaps.group=c("set4"="#d73027","set3"="#E69F00","set2"="#756bb1","set1"="#00AFBB" )),
show_annotation_name = FALSE,gp = gpar(col = "black"))
####
mcpht.kirc=Heatmap(ht.stat.mcp.kirc, name = "mean.of.z.score",
#col = colorRampPalette(c("#00F5FF", "white","#FF3E96"))(256),
#col = rev(viridis(10)),
width = unit(2, "cm"),
#height = unit(12, "cm"),
border = F,
col=col.pal_cor,
show_column_names = T,show_row_names = T,
cluster_columns = F,cluster_rows = F,
row_names_gp = gpar(fontsize = 5),
column_names_gp = gpar(fontsize = 5),
top_annotation = col_ha_top.mcp.kirc,
#right_annotation = row_ha.right,
show_row_dend = F,show_column_dend = F,
#row_names_side = "left",
left_annotation = row_ha.left.mcp.kirc,
column_title="cell type fraction from MCPcounter.KIRC",
column_title_gp = gpar(fontsize = 8)
)
pdf("8.KIRC/3.immune.sets.kirc/mcpCounter/ht.cell.type.MCPcounter.kirc.pdf",width = 5,height = 6)
draw(mcpht.kirc, padding = unit(c(30, 5, 30,5), "mm"), #bottom, left, top, right paddings,
annotation_legend_side = "right"
,heatmap_legend_side = "right"
)
dev.off()
| /8.KIRC/3.immune.sets.kirc/mcpCounter/ht.cell.type.MCPcounter.kirc.R | no_license | saisaitian/immune.overdrive | R | false | false | 3,494 | r | ######draw heatmap of MCPcounter 10 cell types
######
load("8.KIRC/3.immune.sets.kirc/mcpCounter/socre.Mcpcounter.kirc.RData")
mcpscore.kirc=as.data.frame(t(score.mcp.kirc))
mcpscore.kirc=as.data.frame(scale(mcpscore.kirc))
head(mcpscore.kirc)
#ids=intersect(rownames(kaps.td), rownames(mcpscore))
ht.mcpscore.kirc=cbind(stat.kirc.kaps.vali$kaps.group.kirc, mcpscore.kirc[rownames(stat.kirc.kaps.vali),])
colnames(ht.mcpscore.kirc)[1]="kaps.group.kirc"
head(ht.mcpscore.kirc)
######
######pvalue
datalist <- list()
for(i in names(ht.mcpscore.kirc[,2:ncol(ht.mcpscore.kirc)])){
datalist[[i]] <- kruskal.test(formula(paste(i, "~ kaps.group.kirc")), data = ht.mcpscore.kirc)
}
pvalue.ht.mcp.kirc=do.call(rbind, datalist)
pvalue.ht.mcp.kirc=as.data.frame(pvalue.ht.mcp.kirc)
######matrix
ht.stat.mcp.kirc= ht.mcpscore.kirc %>% group_by(kaps.group.kirc) %>% summarise_all(median,na.rm = TRUE)
ht.stat.mcp.kirc=as.data.frame(ht.stat.mcp.kirc)
rownames(ht.stat.mcp.kirc)=ht.stat.mcp.kirc$kaps.group.kirc
ht.stat.mcp.kirc=ht.stat.mcp.kirc[,-1]
ht.stat.mcp.kirc=as.data.frame(t(ht.stat.mcp.kirc))
#ht.stat.mcp.kirc=ht.stat.mcp.kirc[,c(4,3,2,1)]
head(ht.stat.mcp.kirc)
save(pvalue.ht.mcp.kirc, ht.stat.mcp.kirc,file="8.KIRC/3.immune.sets.kirc/mcpCounter/ht.cell.type.MCPcounter.kirc.RData")
#######
#colore set
min_cor = min(as.vector(ht.stat.mcp.kirc))
max_cor = max(as.vector(ht.stat.mcp.kirc))
range_cor = seq(min(min_cor,0-max_cor),max(abs(min_cor),abs(max_cor)),length.out=50)
col.pal_cor = colorRamp2(range_cor,colorRampPalette(rev(brewer.pal(11, "RdBu")))(50))
col.pal_cor = colorRamp2(range_cor,colorRampPalette(c("#3288bd", "white","#ae017e"))(50))
#
row_ha.left.mcp.kirc = rowAnnotation(kruskal.pvalue=as.numeric(pvalue.ht.mcp.kirc$p.value),
col=list(
kruskal.pvalue=colorRamp2(c(0.05,10e-5,10e-5,10e-10,10e-20,10e-30),
c("#ffffcc","#d9f0a3","#addd8e","#78c679","#31a354","#006837"))
),show_annotation_name = FALSE)
col_ha_top.mcp.kirc = columnAnnotation(
kaps.group=colnames(ht.stat.mcp.kirc),
col=list(kaps.group=c("set4"="#d73027","set3"="#E69F00","set2"="#756bb1","set1"="#00AFBB" )),
show_annotation_name = FALSE,gp = gpar(col = "black"))
####
mcpht.kirc=Heatmap(ht.stat.mcp.kirc, name = "mean.of.z.score",
#col = colorRampPalette(c("#00F5FF", "white","#FF3E96"))(256),
#col = rev(viridis(10)),
width = unit(2, "cm"),
#height = unit(12, "cm"),
border = F,
col=col.pal_cor,
show_column_names = T,show_row_names = T,
cluster_columns = F,cluster_rows = F,
row_names_gp = gpar(fontsize = 5),
column_names_gp = gpar(fontsize = 5),
top_annotation = col_ha_top.mcp.kirc,
#right_annotation = row_ha.right,
show_row_dend = F,show_column_dend = F,
#row_names_side = "left",
left_annotation = row_ha.left.mcp.kirc,
column_title="cell type fraction from MCPcounter.KIRC",
column_title_gp = gpar(fontsize = 8)
)
pdf("8.KIRC/3.immune.sets.kirc/mcpCounter/ht.cell.type.MCPcounter.kirc.pdf",width = 5,height = 6)
draw(mcpht.kirc, padding = unit(c(30, 5, 30,5), "mm"), #bottom, left, top, right paddings,
annotation_legend_side = "right"
,heatmap_legend_side = "right"
)
dev.off()
|
# DB-Lytix Example.
Renv <- new.env(parent = globalenv())
FLenv <- as.FL(Renv)
Renv$tbl <- iris
Renv$tbl$Species <- as.numeric(Renv$tbl$Species)
FLenv$tbl <- as.FLTable(Renv$tbl,tableName = getOption("TestTempTableName"),
temporary=F, drop = TRUE)
#'
#' fliris <- as.FL(rtbl)
#' flirispca <- prcomp(Species~., data = fliris)
Renv$mod <- princomp(Renv$tbl[,-1])
FLenv$mod <- prcomp(Species~.,FLenv$tbl)
eval_expect_equal({
fit <- prcomp(data = tbl)
},Renv,FLenv,
noexpectation = "fit")
##FLexpect_equal(FLenv$mod$rotation, as.matrix(Renv$mod$loading[1:4,1:4]))
FLexpect_equal(FLenv$mod$sdev, as.numeric(Renv$mod$sdev),tolerance = .05)
| /tests/next/test_FLPCA.R | no_license | phani-srikar/AdapteR | R | false | false | 647 | r | # DB-Lytix Example.
Renv <- new.env(parent = globalenv())
FLenv <- as.FL(Renv)
Renv$tbl <- iris
Renv$tbl$Species <- as.numeric(Renv$tbl$Species)
FLenv$tbl <- as.FLTable(Renv$tbl,tableName = getOption("TestTempTableName"),
temporary=F, drop = TRUE)
#'
#' fliris <- as.FL(rtbl)
#' flirispca <- prcomp(Species~., data = fliris)
Renv$mod <- princomp(Renv$tbl[,-1])
FLenv$mod <- prcomp(Species~.,FLenv$tbl)
eval_expect_equal({
fit <- prcomp(data = tbl)
},Renv,FLenv,
noexpectation = "fit")
##FLexpect_equal(FLenv$mod$rotation, as.matrix(Renv$mod$loading[1:4,1:4]))
FLexpect_equal(FLenv$mod$sdev, as.numeric(Renv$mod$sdev),tolerance = .05)
|
# the following needs to be repeated 100 times
run_function_multiple <- function(resolution=c(10,10),
model_type = c("unstructured", "structured", "joint", "unstructuredcov", "jointcov", "jointtwo"),
plotting = FALSE, summary_results = FALSE,
nsamp = NULL, seed = NULL, dim = NULL, lambda = NULL, env.beta = NULL,
kappa = NULL, sigma2x = NULL, strata = NULL, rows = NULL, cols = NULL,
probs = NULL, plot = FALSE, plotdat = FALSE, qsize = NULL, rho = NULL,
parameter = parameter, correlation = FALSE,
biasfield = biasfield){
# removing so they can all have same truth
source("Functions to generate data and sample.R")
g1 <- genDataFunctions(dim = dim,
lambda = lambda,
env.beta = env.beta,
seed = seed,
kappa = kappa,
sigma2x = sigma2x,
strata = strata,
rows = rows,
cols = cols,
probs = probs,
nsamp = nsamp,
plot = FALSE,
plotdat = FALSE,
qsize = qsize,
rho = rho,
correlated = correlation)
structured_data <- g1$structured_data
unstructured_data <- g1$unstructured_data
biasfield <- g1$biasfield
dat1 <- g1$dat1
biascov <- g1$biascov
num_presence <- sum(structured_data$presence)
if(model_type == "structured"){
source("Run models structured.R")
mod_1 <- structured_model(structured_data, dat1, biasfield, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_1[[2]], resolution=c(10,10), join.stack=mod_1[[1]], model_type="structured",
structured_data = structured_data, dat1 = dat1, summary_results=T, qsize = 1, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_1[[2]], resolution=c(10,10), join.stack=mod_1[[1]], model_type="structured",
structured_data = structured_data, dat1 = dat1, summary_results=T, qsize = 1, absolute=FALSE, plotting = FALSE, dim = dim)
}
if(model_type == "unstructured"){
source("Run models.R")
mod_2 <- unstructured_model(unstructured_data, dat1, biasfield, dim = dim, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_2[[2]], resolution=c(10,10), join.stack=mod_2[[1]], model_type="unstructured",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_2[[2]], resolution=c(10,10), join.stack=mod_2[[1]], model_type="unstructured",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=FALSE, plotting = FALSE, dim = dim)
}
if(model_type == "joint"){
source("Run models joint.R")
mod_3 <- joint_model(structured_data, unstructured_data, dat1, biasfield, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_3[[2]], resolution=c(10,10), join.stack=mod_3[[1]], model_type="joint",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_3[[2]], resolution=c(10,10), join.stack=mod_3[[1]], model_type="joint",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute=FALSE, dim = dim, plotting = FALSE)
}
if(model_type == "unstructuredcov"){
source("Run models unstructured bias covariate.R")
mod_4 <- unstructured_model_cov(unstructured_data, dat1, biasfield, dim = dim, plotting = TRUE, biascov=biascov)
source("validation_function.R")
validation <- validation_function(result=mod_4[[2]], resolution=c(10,10), join.stack=mod_4[[1]], model_type="unstructuredcov",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_4[[2]], resolution=c(10,10), join.stack=mod_4[[1]], model_type="unstructuredcov",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=FALSE, dim = dim, plotting = TRUE)
}
if(model_type == "jointcov"){
source("Run models joint covariate for bias.R")
mod_5 <- joint_model_cov(structured_data, unstructured_data, dat1, biasfield, biascov=biascov)
source("validation_function.R")
validation <- validation_function(result=mod_5[[2]], resolution=c(10,10), join.stack=mod_5[[1]], model_type="jointcov",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_5[[2]], resolution=c(10,10), join.stack=mod_5[[1]], model_type="jointcov",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = FALSE, dim = dim, plotting = TRUE)
}
if(model_type == "jointtwo"){
source("Run models joint second spatial field.R")
mod_6 <- joint_model2(structured_data, unstructured_data, dat1, biasfield)
source("validation_function.R")
validation <- validation_function(result=mod_6[[2]], resolution=c(10,10), join.stack=mod_6[[1]], model_type="jointtwo",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_6[[2]], resolution=c(10,10), join.stack=mod_6[[1]], model_type="jointtwo",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = FALSE, dim = dim, plotting = TRUE)
}
return(list(validation_r, parameter, sum(structured_data$presence), length(unstructured_data[,1])))
} | /run_function_multiple.R | no_license | NERC-CEH/IOFFsimwork | R | false | false | 6,461 | r | # the following needs to be repeated 100 times
run_function_multiple <- function(resolution=c(10,10),
model_type = c("unstructured", "structured", "joint", "unstructuredcov", "jointcov", "jointtwo"),
plotting = FALSE, summary_results = FALSE,
nsamp = NULL, seed = NULL, dim = NULL, lambda = NULL, env.beta = NULL,
kappa = NULL, sigma2x = NULL, strata = NULL, rows = NULL, cols = NULL,
probs = NULL, plot = FALSE, plotdat = FALSE, qsize = NULL, rho = NULL,
parameter = parameter, correlation = FALSE,
biasfield = biasfield){
# removing so they can all have same truth
source("Functions to generate data and sample.R")
g1 <- genDataFunctions(dim = dim,
lambda = lambda,
env.beta = env.beta,
seed = seed,
kappa = kappa,
sigma2x = sigma2x,
strata = strata,
rows = rows,
cols = cols,
probs = probs,
nsamp = nsamp,
plot = FALSE,
plotdat = FALSE,
qsize = qsize,
rho = rho,
correlated = correlation)
structured_data <- g1$structured_data
unstructured_data <- g1$unstructured_data
biasfield <- g1$biasfield
dat1 <- g1$dat1
biascov <- g1$biascov
num_presence <- sum(structured_data$presence)
if(model_type == "structured"){
source("Run models structured.R")
mod_1 <- structured_model(structured_data, dat1, biasfield, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_1[[2]], resolution=c(10,10), join.stack=mod_1[[1]], model_type="structured",
structured_data = structured_data, dat1 = dat1, summary_results=T, qsize = 1, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_1[[2]], resolution=c(10,10), join.stack=mod_1[[1]], model_type="structured",
structured_data = structured_data, dat1 = dat1, summary_results=T, qsize = 1, absolute=FALSE, plotting = FALSE, dim = dim)
}
if(model_type == "unstructured"){
source("Run models.R")
mod_2 <- unstructured_model(unstructured_data, dat1, biasfield, dim = dim, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_2[[2]], resolution=c(10,10), join.stack=mod_2[[1]], model_type="unstructured",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_2[[2]], resolution=c(10,10), join.stack=mod_2[[1]], model_type="unstructured",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=FALSE, plotting = FALSE, dim = dim)
}
if(model_type == "joint"){
source("Run models joint.R")
mod_3 <- joint_model(structured_data, unstructured_data, dat1, biasfield, plotting = FALSE)
source("validation_function.R")
validation <- validation_function(result=mod_3[[2]], resolution=c(10,10), join.stack=mod_3[[1]], model_type="joint",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = FALSE)
validation_r <- validation_function(result=mod_3[[2]], resolution=c(10,10), join.stack=mod_3[[1]], model_type="joint",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute=FALSE, dim = dim, plotting = FALSE)
}
if(model_type == "unstructuredcov"){
source("Run models unstructured bias covariate.R")
mod_4 <- unstructured_model_cov(unstructured_data, dat1, biasfield, dim = dim, plotting = TRUE, biascov=biascov)
source("validation_function.R")
validation <- validation_function(result=mod_4[[2]], resolution=c(10,10), join.stack=mod_4[[1]], model_type="unstructuredcov",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_4[[2]], resolution=c(10,10), join.stack=mod_4[[1]], model_type="unstructuredcov",
unstructured_data = unstructured_data, dat1 = dat1, summary_results=T, absolute=FALSE, dim = dim, plotting = TRUE)
}
if(model_type == "jointcov"){
source("Run models joint covariate for bias.R")
mod_5 <- joint_model_cov(structured_data, unstructured_data, dat1, biasfield, biascov=biascov)
source("validation_function.R")
validation <- validation_function(result=mod_5[[2]], resolution=c(10,10), join.stack=mod_5[[1]], model_type="jointcov",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_5[[2]], resolution=c(10,10), join.stack=mod_5[[1]], model_type="jointcov",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = FALSE, dim = dim, plotting = TRUE)
}
if(model_type == "jointtwo"){
source("Run models joint second spatial field.R")
mod_6 <- joint_model2(structured_data, unstructured_data, dat1, biasfield)
source("validation_function.R")
validation <- validation_function(result=mod_6[[2]], resolution=c(10,10), join.stack=mod_6[[1]], model_type="jointtwo",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = TRUE, dim = dim, plotting = TRUE)
validation_r <- validation_function(result=mod_6[[2]], resolution=c(10,10), join.stack=mod_6[[1]], model_type="jointtwo",
unstructured_data = unstructured_data, structured_data = structured_data,
dat1 = dat1, summary_results=T, absolute = FALSE, dim = dim, plotting = TRUE)
}
return(list(validation_r, parameter, sum(structured_data$presence), length(unstructured_data[,1])))
} |
% File rmongodb/man/mongo.shorthand.Rd
\name{mongo.shorthand}
\alias{mongo.shorthand}
\title{Define shorthand for BSON and GridFS}
\description{
Define shorthand names for BSON and GridFS functions and constants.
All symbols dealing with BSON and GridFS are defined by this function excluding the "mongo." prefix.
They will still be available by the orginal names also. For clarity, the symbols in the mongo module
(for communication with the server) are not shortened.
}
\usage{
mongo.shorthand()
}
\value{
TRUE
}
\examples{
mongo.shorthand()
buf <- bson.buffer.create()
bson.buffer.append(buf, "name", "Alice")
b <- bson.from.buffer(buf)
}
\seealso{
\link{mongo.bson},\cr
\link{mongo.gridfs},\cr
\code{\link{mongo.bson.buffer.create}},\cr
\code{\link{mongo.bson.buffer.append}},\cr
\code{\link{mongo.bson.from.buffer}}.
}
| /rstuff/rmongodb/rmongodb/man/mongo.shorthand.Rd | permissive | BigBlueBox/GoodAndBad | R | false | false | 833 | rd | % File rmongodb/man/mongo.shorthand.Rd
\name{mongo.shorthand}
\alias{mongo.shorthand}
\title{Define shorthand for BSON and GridFS}
\description{
Define shorthand names for BSON and GridFS functions and constants.
All symbols dealing with BSON and GridFS are defined by this function excluding the "mongo." prefix.
They will still be available by the orginal names also. For clarity, the symbols in the mongo module
(for communication with the server) are not shortened.
}
\usage{
mongo.shorthand()
}
\value{
TRUE
}
\examples{
mongo.shorthand()
buf <- bson.buffer.create()
bson.buffer.append(buf, "name", "Alice")
b <- bson.from.buffer(buf)
}
\seealso{
\link{mongo.bson},\cr
\link{mongo.gridfs},\cr
\code{\link{mongo.bson.buffer.create}},\cr
\code{\link{mongo.bson.buffer.append}},\cr
\code{\link{mongo.bson.from.buffer}}.
}
|
/ShinyBeginnings/data/helpers.R | no_license | Angnar1997/ShinyPath | R | false | false | 1,420 | r | ||
tabPanel('Box Plot', value = 'tab_box_plot_1',
fluidPage(
fluidRow(
column(12, align = 'left',
h4('Box Plot - I')
)
),
hr(),
fluidRow(
column(12,
tabsetPanel(type = 'tabs',
tabPanel('plotly',
fluidRow(
column(2,
selectInput('boxly1_select_x', 'Variable 1: ',
choices = "", selected = ""),
textInput(inputId = "boxly1_xlabel", label = "X Axes Label: ",
value = "label")
),
column(2,
textInput(inputId = "boxly1_title", label = "Title: ",
value = "title"),
textInput(inputId = "boxly1_ylabel", label = "Y Axes Label: ",
value = "label")
),
column(8, align = 'center',
plotly::plotlyOutput('boxly1_plot_1', height = '600px')
)
)
),
tabPanel('rbokeh',
fluidRow(
column(2,
selectInput('bobox1_select_x', 'Variable: ',
choices = "", selected = ""),
textInput(inputId = "bobox1_xlabel", label = "X Axes Label: ",
value = "label"),
textInput(inputId = "bobox1_color", label = "Color: ",
value = ""),
numericInput(inputId = "bobox1_oshape", label = "Outlier Shape: ",
value = 1, min = 0, max = 25, step = 1),
numericInput(inputId = "bobox1_width", label = "Width: ",
value = 0.9, min = 0, max = 1, step = 0.1),
selectInput('bobox1_xgrid', 'X Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE")
),
column(2,
textInput(inputId = "bobox1_title", label = "Title: ",
value = "title"),
textInput(inputId = "bobox1_ylabel", label = "Y Axes Label: ",
value = "label"),
numericInput(inputId = "bobox1_alpha", label = "Alpha: ",
value = 1, min = 0, max = 1, step = 0.1),
numericInput(inputId = "bobox1_osize", label = "Outlier Size: ",
value = 10, min = 0, step = 1),
textInput(inputId = "bobox1_lcolor", label = "Line Color: ",
value = ""),
selectInput('bobox1_ygrid', 'Y Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE")
),
column(8, align = 'center',
rbokeh::rbokehOutput('bobox1_plot_1', height = '600px')
)
)
)
)
)
)
)
) | /inst/app-visualize/ui/ui_box_plot_1.R | no_license | cran/xplorerr | R | false | false | 2,658 | r | tabPanel('Box Plot', value = 'tab_box_plot_1',
fluidPage(
fluidRow(
column(12, align = 'left',
h4('Box Plot - I')
)
),
hr(),
fluidRow(
column(12,
tabsetPanel(type = 'tabs',
tabPanel('plotly',
fluidRow(
column(2,
selectInput('boxly1_select_x', 'Variable 1: ',
choices = "", selected = ""),
textInput(inputId = "boxly1_xlabel", label = "X Axes Label: ",
value = "label")
),
column(2,
textInput(inputId = "boxly1_title", label = "Title: ",
value = "title"),
textInput(inputId = "boxly1_ylabel", label = "Y Axes Label: ",
value = "label")
),
column(8, align = 'center',
plotly::plotlyOutput('boxly1_plot_1', height = '600px')
)
)
),
tabPanel('rbokeh',
fluidRow(
column(2,
selectInput('bobox1_select_x', 'Variable: ',
choices = "", selected = ""),
textInput(inputId = "bobox1_xlabel", label = "X Axes Label: ",
value = "label"),
textInput(inputId = "bobox1_color", label = "Color: ",
value = ""),
numericInput(inputId = "bobox1_oshape", label = "Outlier Shape: ",
value = 1, min = 0, max = 25, step = 1),
numericInput(inputId = "bobox1_width", label = "Width: ",
value = 0.9, min = 0, max = 1, step = 0.1),
selectInput('bobox1_xgrid', 'X Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE")
),
column(2,
textInput(inputId = "bobox1_title", label = "Title: ",
value = "title"),
textInput(inputId = "bobox1_ylabel", label = "Y Axes Label: ",
value = "label"),
numericInput(inputId = "bobox1_alpha", label = "Alpha: ",
value = 1, min = 0, max = 1, step = 0.1),
numericInput(inputId = "bobox1_osize", label = "Outlier Size: ",
value = 10, min = 0, step = 1),
textInput(inputId = "bobox1_lcolor", label = "Line Color: ",
value = ""),
selectInput('bobox1_ygrid', 'Y Axis Grid: ',
choices = c("TRUE" = TRUE, "FALSE" = FALSE), selected = "TRUE")
),
column(8, align = 'center',
rbokeh::rbokehOutput('bobox1_plot_1', height = '600px')
)
)
)
)
)
)
)
) |
#=============================================================
# Load required packages
#=============================================================
require(h2oEnsemble)
require(ggplot2)
require(h2o)
#=============================================================
# Init H2O (connect to a running H2O cluster)
#=============================================================
h2o.init(port = 54321)
#h2o.removeAll()
#=============================================================
# Load data
#=============================================================
data_frame <-
h2o.importFile(
path = "http://www.dataminingconsultant.com/data/churn.txt",
sep = ",",
destination_frame = "data_frame")
# remove special characters from column names
colnames(data_frame) <- gsub(" ", "_", trimws(gsub("[[:punct:]]", " ", names(data_frame))))
#=============================================================
# Force classification
#=============================================================
data_frame$Churn <- as.factor(data_frame$Churn)
#=============================================================
# Split data into training and validation
#=============================================================
split_df <- h2o.splitFrame(data_frame, 0.7,
destination_frames = c("train_frame","valid_frame"),
seed=2016)
train_frame <- split_df[[1]]
valid_frame <- split_df[[2]]
#=============================================================
# Target and predictors
#=============================================================
y <- "Churn"
x <- setdiff(names(data_frame), y)
#=============================================================
# Specify base learners
#=============================================================
h2o.gbm.1 <-
function(...,
ntrees = 50, learn_rate = 0.03,
max_depth = 3, col_sample_rate = 0.65,
sample_rate = 0.8, seed = seed,
stopping_rounds = 150, stopping_metric = "AUC",
stopping_tolerance = 0.0005)
h2o.gbm.wrapper(...,
ntrees = ntrees, learn_rate = learn_rate,
max_depth = max_depth, col_sample_rate = col_sample_rate,
sample_rate = sample_rate, seed = seed,
stopping_rounds = stopping_rounds, stopping_metric = stopping_metric,
stopping_tolerance = stopping_tolerance)
h2o.gbm.2 <-
function(...,
ntrees = 25, learn_rate = 0.03,
max_depth = 5, col_sample_rate = 0.65,
sample_rate = 0.8, seed = seed,
stopping_rounds = 150, stopping_metric = "AUC",
stopping_tolerance = 0.0005)
h2o.gbm.wrapper(...,
ntrees = ntrees, learn_rate = learn_rate,
max_depth = max_depth, col_sample_rate = col_sample_rate,
sample_rate = sample_rate, seed = seed,
stopping_rounds = stopping_rounds, stopping_metric = stopping_metric,
stopping_tolerance = stopping_tolerance)
h2o.drf.1 <-
function(...,
binomial_double_trees = TRUE,
ntrees = 50, max_depth = 3,
sample_rate = 0.8, stopping_rounds = 150,
stopping_metric = "AUC", stopping_tolerance = 0.0005)
h2o.randomForest.wrapper(...,
binomial_double_trees = binomial_double_trees,
ntrees = ntrees, max_depth = max_depth,
sample_rate = sample_rate, stopping_rounds = stopping_rounds,
stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.drf.2 <-
function(...,
binomial_double_trees = TRUE,
ntrees = 20, max_depth = 5,
sample_rate = 0.8, stopping_rounds = 150,
stopping_metric = "AUC", stopping_tolerance = 0.0005)
h2o.randomForest.wrapper(...,
binomial_double_trees = binomial_double_trees,
ntrees = ntrees, max_depth = max_depth,
sample_rate = sample_rate, stopping_rounds = stopping_rounds,
stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.dl.1 <-
function(...,
activation = "TanhWithDropout", epochs = 600,
hidden = c(10,10), hidden_dropout_ratios = c(0.2,0.2),
input_dropout_ratio = 0.2, score_interval = 0.0001)
h2o.deeplearning.wrapper(...,
activation = activation, epochs = epochs ,
hidden = hidden, hidden_dropout_ratios = hidden_dropout_ratios,
input_dropout_ratio = input_dropout_ratio, score_interval = score_interval)
h2o.dl.2 <-
function(...,
activation = "TanhWithDropout", epochs = 600,
hidden = c(50,50), hidden_dropout_ratios = c(0.5,0.5),
input_dropout_ratio = 0.2, score_interval = 0.0001)
h2o.deeplearning.wrapper(...,
activation = activation, epochs = epochs ,
hidden = hidden, hidden_dropout_ratios = hidden_dropout_ratios,
input_dropout_ratio = input_dropout_ratio, score_interval = score_interval)
#=============================================================
# Create base learner vector
#=============================================================
learner <- c("h2o.gbm.1", "h2o.gbm.2",
"h2o.drf.1", "h2o.drf.2",
"h2o.dl.1", "h2o.dl.2")
#=============================================================
# Specify meta-learner
#=============================================================
# metalearner <- "SL.glm"
metalearner <- "h2o.glm.wrapper"
#=============================================================
# Fit Stacked learners
#=============================================================
family <- "binomial"
ensemble.fit <- h2o.ensemble(
x = x,
y = y,
training_frame = train_frame,
family = family,
learner = learner,
metalearner = metalearner,
cvControl = list(V = 5, shuffle = TRUE))
#=============================================================
# Save the models if you want
#=============================================================
# h2o.save_ensemble(fit, path = "ens_models/", export_levelone = TRUE)
perf <- h2o.ensemble_performance(
object = ensemble.fit,
newdata = valid_frame,
score_base_models = TRUE)
#=============================================================
# Print stacked fit AUC score
#=============================================================
print(perf, metric = "AUC")
#=============================================================
# Print stacked fit MSE score
#=============================================================
print(perf, metric = "MSE")
| /Stacking_H2O.R | no_license | rajkstats/h2o_examples | R | false | false | 7,414 | r | #=============================================================
# Load required packages
#=============================================================
require(h2oEnsemble)
require(ggplot2)
require(h2o)
#=============================================================
# Init H2O (connect to a running H2O cluster)
#=============================================================
h2o.init(port = 54321)
#h2o.removeAll()
#=============================================================
# Load data
#=============================================================
data_frame <-
h2o.importFile(
path = "http://www.dataminingconsultant.com/data/churn.txt",
sep = ",",
destination_frame = "data_frame")
# remove special characters from column names
colnames(data_frame) <- gsub(" ", "_", trimws(gsub("[[:punct:]]", " ", names(data_frame))))
#=============================================================
# Force classification
#=============================================================
data_frame$Churn <- as.factor(data_frame$Churn)
#=============================================================
# Split data into training and validation
#=============================================================
split_df <- h2o.splitFrame(data_frame, 0.7,
destination_frames = c("train_frame","valid_frame"),
seed=2016)
train_frame <- split_df[[1]]
valid_frame <- split_df[[2]]
#=============================================================
# Target and predictors
#=============================================================
y <- "Churn"
x <- setdiff(names(data_frame), y)
#=============================================================
# Specify base learners
#=============================================================
h2o.gbm.1 <-
function(...,
ntrees = 50, learn_rate = 0.03,
max_depth = 3, col_sample_rate = 0.65,
sample_rate = 0.8, seed = seed,
stopping_rounds = 150, stopping_metric = "AUC",
stopping_tolerance = 0.0005)
h2o.gbm.wrapper(...,
ntrees = ntrees, learn_rate = learn_rate,
max_depth = max_depth, col_sample_rate = col_sample_rate,
sample_rate = sample_rate, seed = seed,
stopping_rounds = stopping_rounds, stopping_metric = stopping_metric,
stopping_tolerance = stopping_tolerance)
h2o.gbm.2 <-
function(...,
ntrees = 25, learn_rate = 0.03,
max_depth = 5, col_sample_rate = 0.65,
sample_rate = 0.8, seed = seed,
stopping_rounds = 150, stopping_metric = "AUC",
stopping_tolerance = 0.0005)
h2o.gbm.wrapper(...,
ntrees = ntrees, learn_rate = learn_rate,
max_depth = max_depth, col_sample_rate = col_sample_rate,
sample_rate = sample_rate, seed = seed,
stopping_rounds = stopping_rounds, stopping_metric = stopping_metric,
stopping_tolerance = stopping_tolerance)
h2o.drf.1 <-
function(...,
binomial_double_trees = TRUE,
ntrees = 50, max_depth = 3,
sample_rate = 0.8, stopping_rounds = 150,
stopping_metric = "AUC", stopping_tolerance = 0.0005)
h2o.randomForest.wrapper(...,
binomial_double_trees = binomial_double_trees,
ntrees = ntrees, max_depth = max_depth,
sample_rate = sample_rate, stopping_rounds = stopping_rounds,
stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.drf.2 <-
function(...,
binomial_double_trees = TRUE,
ntrees = 20, max_depth = 5,
sample_rate = 0.8, stopping_rounds = 150,
stopping_metric = "AUC", stopping_tolerance = 0.0005)
h2o.randomForest.wrapper(...,
binomial_double_trees = binomial_double_trees,
ntrees = ntrees, max_depth = max_depth,
sample_rate = sample_rate, stopping_rounds = stopping_rounds,
stopping_metric = stopping_metric, stopping_tolerance = stopping_tolerance)
h2o.dl.1 <-
function(...,
activation = "TanhWithDropout", epochs = 600,
hidden = c(10,10), hidden_dropout_ratios = c(0.2,0.2),
input_dropout_ratio = 0.2, score_interval = 0.0001)
h2o.deeplearning.wrapper(...,
activation = activation, epochs = epochs ,
hidden = hidden, hidden_dropout_ratios = hidden_dropout_ratios,
input_dropout_ratio = input_dropout_ratio, score_interval = score_interval)
h2o.dl.2 <-
function(...,
activation = "TanhWithDropout", epochs = 600,
hidden = c(50,50), hidden_dropout_ratios = c(0.5,0.5),
input_dropout_ratio = 0.2, score_interval = 0.0001)
h2o.deeplearning.wrapper(...,
activation = activation, epochs = epochs ,
hidden = hidden, hidden_dropout_ratios = hidden_dropout_ratios,
input_dropout_ratio = input_dropout_ratio, score_interval = score_interval)
#=============================================================
# Create base learner vector
#=============================================================
learner <- c("h2o.gbm.1", "h2o.gbm.2",
"h2o.drf.1", "h2o.drf.2",
"h2o.dl.1", "h2o.dl.2")
#=============================================================
# Specify meta-learner
#=============================================================
# metalearner <- "SL.glm"
metalearner <- "h2o.glm.wrapper"
#=============================================================
# Fit Stacked learners
#=============================================================
family <- "binomial"
ensemble.fit <- h2o.ensemble(
x = x,
y = y,
training_frame = train_frame,
family = family,
learner = learner,
metalearner = metalearner,
cvControl = list(V = 5, shuffle = TRUE))
#=============================================================
# Save the models if you want
#=============================================================
# h2o.save_ensemble(fit, path = "ens_models/", export_levelone = TRUE)
perf <- h2o.ensemble_performance(
object = ensemble.fit,
newdata = valid_frame,
score_base_models = TRUE)
#=============================================================
# Print stacked fit AUC score
#=============================================================
print(perf, metric = "AUC")
#=============================================================
# Print stacked fit MSE score
#=============================================================
print(perf, metric = "MSE")
|
library("sp")
library('methods')
#library("dplyr")
#library(rSOILWAT2)
#These are the functions I need:
# if (!exists("vwcmatric.dy")) vwcmatric.dy <- get_Response_aggL(swof["sw_vwcmatric"], tscale = "dy",
# scaler = 1, FUN = stats::weighted.mean, weights = layers_width,
# x = runDataSC, st = isim_time, st2 = simTime2, topL = topL, bottomL = bottomL)
# if (!exists("swpmatric.dy")) swpmatric.dy <- get_SWPmatric_aggL(vwcmatric.dy, texture, sand, clay)
#dir.AFRI_Historical <- "/projects/ecogis/SOILWAT2_Projects/AFRI/Historical"
dir.AFRI_Historical <- "/cxfs/projects/usgs/ecosystems/sbsc/AFRI/Historical"
dir.jbHOME <- "/cxfs/projects/usgs/ecosystems/sbsc/drylandeco/AFRI/Exposure_Data"
regions <- c( "CaliforniaAnnual", "ColdDeserts", "HotDeserts", "NorthernMixedSubset", "SGS", "Western_Gap")#list.files(dir.AFRI_Historical)
print(regions)
dir.regions <- file.path(dir.AFRI_Historical, regions)
dir.regions_3Runs <- file.path(dir.AFRI_Historical, regions, "3_Runs" )
dir.regions_1Input <- file.path(dir.AFRI_Historical, regions, "1_Input")
print(dir.regions_3Runs)
print(dir.regions_1Input)
#Function for calculating WDD
calcSWA_AprJun <- function(RUN_DATA, name){
#print("Pre d1")
#print(Sys.time())
# s=1
# sites <- list.files(dir.regions_3Runs[1])
# load(file.path(dir.regions_3Runs[1], sites[s], "sw_output_sc1.RData"))
# RUN_DATA <- runDataSC
# name=sites[s]
dSWA <- as.data.frame(RUN_DATA@SWABULK@Month)
dSWA_AprJun <- dSWA[which(dSWA$Month %in% c(5:8)),]
head(dSWA_AprJun)
numlyrs <- dim(dSWA)[2] - 2
if(numlyrs==1){NA}
if(numlyrs==2){ dSWA_AprJun$Alllyrs <- as.matrix(dSWA_AprJun[, c(3:(numlyrs+2))])}
if(numlyrs>2 & numlyrs<5) {dSWA_AprJun$Alllyrs <- rowSums(as.matrix(dSWA_AprJun[, c(4:(numlyrs+2))]))}
if(numlyrs>4) {dSWA_AprJun$Alllyrs <- rowSums(as.matrix(dSWA_AprJun[, c(4:(4+2))]))}
d <- dSWA_AprJun[, c("Year", "Alllyrs")]
d2 <-aggregate(d, by=list(d$Year), FUN=mean, na.rm=TRUE)
d2 <- d2[, c("Group.1", "Alllyrs")]
names(d2)[2] <- c(name)
d3 <- as.data.frame(t(d2))
rownames(d3) <- c("year", name)
return(d3)
}
print("Start Loop")
print(Sys.time())
#Try in parallel
library("parallel")
library("foreach")
library("doParallel")
#detectCores()
for (r in 1:length(regions)){
# r=1
#print(str(soildata))
sites <- list.files(dir.regions_3Runs[r])
#print(sites[1:10])
cl<-makeCluster(20)
registerDoParallel(cl)
SWA_AprJun = foreach(s = sites, .combine = rbind) %dopar% {
f <- list.files(file.path(dir.regions_3Runs[r], s) )
if(length(f)==1){
load(file.path(dir.regions_3Runs[r], s, "sw_output_sc1.RData"))
d <- calcSWA_AprJun(RUN_DATA = runDataSC, name=s)
d[2,]
}
}
stopCluster(cl)
print(paste(regions[r], "Done"))
print(Sys.time())
ifelse (r == 1, annualSWA_AprJun <- SWA_AprJun, annualSWA_AprJun <- rbind(annualSWA_AprJun, SWA_AprJun))
}
names(annualSWA_AprJun) <- paste(c(1915:2015))
save(annualSWA_AprJun, file=file.path(dir.jbHOME, "annualSWA_Summer19152015"))
#DEVELOPMENT
# soildepths <- read.csv(file=file.path(dir.regions_1Input[1], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
#
# soildata <- read.csv(file=file.path(dir.regions_1Input[1], "datafiles", "SWRuns_InputData_soils_v12.csv"), header=TRUE )
#
# metadata <- readRDS(file=file.path(dir.regions[1], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
#
# layers_width <- getLayersWidth(layers_depth)
#
# load(file.path(dir.regions_3Runs[1], sites[1], "sw_output_sc1.RData"))
# dtemps <- as.data.frame(runDataSC@TEMP@Day)
# dVWC <- as.data.frame(runDataSC@VWCMATRIC@Day)
# dwd <- as.data.frame(runDataSC@WETDAY@Day)
# dSM <- as.data.frame(runDataSC@SWPMATRIC@Day)
# str(dSM)
# names(dSM)[c(-1, -2)] <- paste("SM", names(dSM)[c(-1, -2)])
# d_all2 <- merge(d_all, dSM, by=c("Year", "Day"))
# d_all2[c(3050: 3080),]
#dSNOW <- as.data.frame(runDataSC@SNOWPACK@Day)
#dtst <-aggregate(d_all, by=list(d$Year), FUN=length(), na.rm=TRUE)
| /For_Sense/Ex_SWABall_Summer.R | no_license | bobshriver/Exposure_scripts | R | false | false | 4,492 | r | library("sp")
library('methods')
#library("dplyr")
#library(rSOILWAT2)
#These are the functions I need:
# if (!exists("vwcmatric.dy")) vwcmatric.dy <- get_Response_aggL(swof["sw_vwcmatric"], tscale = "dy",
# scaler = 1, FUN = stats::weighted.mean, weights = layers_width,
# x = runDataSC, st = isim_time, st2 = simTime2, topL = topL, bottomL = bottomL)
# if (!exists("swpmatric.dy")) swpmatric.dy <- get_SWPmatric_aggL(vwcmatric.dy, texture, sand, clay)
#dir.AFRI_Historical <- "/projects/ecogis/SOILWAT2_Projects/AFRI/Historical"
dir.AFRI_Historical <- "/cxfs/projects/usgs/ecosystems/sbsc/AFRI/Historical"
dir.jbHOME <- "/cxfs/projects/usgs/ecosystems/sbsc/drylandeco/AFRI/Exposure_Data"
regions <- c( "CaliforniaAnnual", "ColdDeserts", "HotDeserts", "NorthernMixedSubset", "SGS", "Western_Gap")#list.files(dir.AFRI_Historical)
print(regions)
dir.regions <- file.path(dir.AFRI_Historical, regions)
dir.regions_3Runs <- file.path(dir.AFRI_Historical, regions, "3_Runs" )
dir.regions_1Input <- file.path(dir.AFRI_Historical, regions, "1_Input")
print(dir.regions_3Runs)
print(dir.regions_1Input)
#Function for calculating WDD
calcSWA_AprJun <- function(RUN_DATA, name){
#print("Pre d1")
#print(Sys.time())
# s=1
# sites <- list.files(dir.regions_3Runs[1])
# load(file.path(dir.regions_3Runs[1], sites[s], "sw_output_sc1.RData"))
# RUN_DATA <- runDataSC
# name=sites[s]
dSWA <- as.data.frame(RUN_DATA@SWABULK@Month)
dSWA_AprJun <- dSWA[which(dSWA$Month %in% c(5:8)),]
head(dSWA_AprJun)
numlyrs <- dim(dSWA)[2] - 2
if(numlyrs==1){NA}
if(numlyrs==2){ dSWA_AprJun$Alllyrs <- as.matrix(dSWA_AprJun[, c(3:(numlyrs+2))])}
if(numlyrs>2 & numlyrs<5) {dSWA_AprJun$Alllyrs <- rowSums(as.matrix(dSWA_AprJun[, c(4:(numlyrs+2))]))}
if(numlyrs>4) {dSWA_AprJun$Alllyrs <- rowSums(as.matrix(dSWA_AprJun[, c(4:(4+2))]))}
d <- dSWA_AprJun[, c("Year", "Alllyrs")]
d2 <-aggregate(d, by=list(d$Year), FUN=mean, na.rm=TRUE)
d2 <- d2[, c("Group.1", "Alllyrs")]
names(d2)[2] <- c(name)
d3 <- as.data.frame(t(d2))
rownames(d3) <- c("year", name)
return(d3)
}
print("Start Loop")
print(Sys.time())
#Try in parallel
library("parallel")
library("foreach")
library("doParallel")
#detectCores()
for (r in 1:length(regions)){
# r=1
#print(str(soildata))
sites <- list.files(dir.regions_3Runs[r])
#print(sites[1:10])
cl<-makeCluster(20)
registerDoParallel(cl)
SWA_AprJun = foreach(s = sites, .combine = rbind) %dopar% {
f <- list.files(file.path(dir.regions_3Runs[r], s) )
if(length(f)==1){
load(file.path(dir.regions_3Runs[r], s, "sw_output_sc1.RData"))
d <- calcSWA_AprJun(RUN_DATA = runDataSC, name=s)
d[2,]
}
}
stopCluster(cl)
print(paste(regions[r], "Done"))
print(Sys.time())
ifelse (r == 1, annualSWA_AprJun <- SWA_AprJun, annualSWA_AprJun <- rbind(annualSWA_AprJun, SWA_AprJun))
}
names(annualSWA_AprJun) <- paste(c(1915:2015))
save(annualSWA_AprJun, file=file.path(dir.jbHOME, "annualSWA_Summer19152015"))
#DEVELOPMENT
# soildepths <- read.csv(file=file.path(dir.regions_1Input[1], "SWRuns_InputData_SoilLayers_v9.csv"), header=TRUE )
#
# soildata <- read.csv(file=file.path(dir.regions_1Input[1], "datafiles", "SWRuns_InputData_soils_v12.csv"), header=TRUE )
#
# metadata <- readRDS(file=file.path(dir.regions[1], "SFSW2_project_descriptions.rds") )
# #str(metadata[["sim_time"]])
# isim_time <- metadata[["sim_time"]]
# simTime2 <- metadata[["sim_time"]]$sim_time2_North
#
# layers_width <- getLayersWidth(layers_depth)
#
# load(file.path(dir.regions_3Runs[1], sites[1], "sw_output_sc1.RData"))
# dtemps <- as.data.frame(runDataSC@TEMP@Day)
# dVWC <- as.data.frame(runDataSC@VWCMATRIC@Day)
# dwd <- as.data.frame(runDataSC@WETDAY@Day)
# dSM <- as.data.frame(runDataSC@SWPMATRIC@Day)
# str(dSM)
# names(dSM)[c(-1, -2)] <- paste("SM", names(dSM)[c(-1, -2)])
# d_all2 <- merge(d_all, dSM, by=c("Year", "Day"))
# d_all2[c(3050: 3080),]
#dSNOW <- as.data.frame(runDataSC@SNOWPACK@Day)
#dtst <-aggregate(d_all, by=list(d$Year), FUN=length(), na.rm=TRUE)
|
##Set working directory
if(!file.exists('Project 1')) {
dir.create('Project 1')
}
setwd('.\\Project 1')
## Download data
datafile<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(datafile, destfile = 'household_power_consumption.txt')
rm(datafile)
## Import data
pre_data<-read.table('household_power_consumption.txt',header=T, sep=';',nrows=2)
cl<-sapply(pre_data,class)
cnames<-colnames(pre_data)
rm('pre_data','cl','cnames')
data<-read.table('household_power_consumption.txt',
header=T,
sep=';',
colClasses=cl,
skip=66636,
nrows=2880,
comment.char="?")
colnames(data)<-cnames
date<-strptime(paste(data$Date,data$Time),'%d/%m/%Y %H:%M:%S')
data<-data.frame(date,data[,3:9])
rm(date)
## Make lineplot
png('plot2.png',
width=480,
height=480)
plot(data$date,data$Global_active_power,
type='l',
xlab='',
ylab='Global Active Power (kilowatts)',)
dev.off()
| /plot2.R | no_license | diesteffi/Exploratorive_Data_1 | R | false | false | 1,043 | r | ##Set working directory
if(!file.exists('Project 1')) {
dir.create('Project 1')
}
setwd('.\\Project 1')
## Download data
datafile<-'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(datafile, destfile = 'household_power_consumption.txt')
rm(datafile)
## Import data
pre_data<-read.table('household_power_consumption.txt',header=T, sep=';',nrows=2)
cl<-sapply(pre_data,class)
cnames<-colnames(pre_data)
rm('pre_data','cl','cnames')
data<-read.table('household_power_consumption.txt',
header=T,
sep=';',
colClasses=cl,
skip=66636,
nrows=2880,
comment.char="?")
colnames(data)<-cnames
date<-strptime(paste(data$Date,data$Time),'%d/%m/%Y %H:%M:%S')
data<-data.frame(date,data[,3:9])
rm(date)
## Make lineplot
png('plot2.png',
width=480,
height=480)
plot(data$date,data$Global_active_power,
type='l',
xlab='',
ylab='Global Active Power (kilowatts)',)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iotsitewise_operations.R
\name{iotsitewise_list_asset_relationships}
\alias{iotsitewise_list_asset_relationships}
\title{Retrieves a paginated list of asset relationships for an asset}
\usage{
iotsitewise_list_asset_relationships(assetId, traversalType, nextToken,
maxResults)
}
\arguments{
\item{assetId}{[required] The ID of the asset.}
\item{traversalType}{[required] The type of traversal to use to identify asset relationships. Choose the
following option:
\itemize{
\item \code{PATH_TO_ROOT} – Identify the asset's parent assets up to the root
asset. The asset that you specify in \code{assetId} is the first result
in the list of \code{assetRelationshipSummaries}, and the root asset is
the last result.
}}
\item{nextToken}{The token to be used for the next set of paginated results.}
\item{maxResults}{The maximum number of results to be returned per paginated request.}
}
\description{
Retrieves a paginated list of asset relationships for an asset. You can
use this operation to identify an asset's root asset and all associated
assets between that asset and its root.
}
\section{Request syntax}{
\preformatted{svc$list_asset_relationships(
assetId = "string",
traversalType = "PATH_TO_ROOT",
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
| /paws/man/iotsitewise_list_asset_relationships.Rd | permissive | sanchezvivi/paws | R | false | true | 1,361 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iotsitewise_operations.R
\name{iotsitewise_list_asset_relationships}
\alias{iotsitewise_list_asset_relationships}
\title{Retrieves a paginated list of asset relationships for an asset}
\usage{
iotsitewise_list_asset_relationships(assetId, traversalType, nextToken,
maxResults)
}
\arguments{
\item{assetId}{[required] The ID of the asset.}
\item{traversalType}{[required] The type of traversal to use to identify asset relationships. Choose the
following option:
\itemize{
\item \code{PATH_TO_ROOT} – Identify the asset's parent assets up to the root
asset. The asset that you specify in \code{assetId} is the first result
in the list of \code{assetRelationshipSummaries}, and the root asset is
the last result.
}}
\item{nextToken}{The token to be used for the next set of paginated results.}
\item{maxResults}{The maximum number of results to be returned per paginated request.}
}
\description{
Retrieves a paginated list of asset relationships for an asset. You can
use this operation to identify an asset's root asset and all associated
assets between that asset and its root.
}
\section{Request syntax}{
\preformatted{svc$list_asset_relationships(
assetId = "string",
traversalType = "PATH_TO_ROOT",
nextToken = "string",
maxResults = 123
)
}
}
\keyword{internal}
|
#' Linear Discriminant Analysis using the Schafer-Strimmer Covariance Matrix
#' Estimator
#'
#' Given a set of training data, this function builds the Linear Discriminant
#' Analysis (LDA) classifier, where the distributions of each class are assumed
#' to be multivariate normal and share a common covariance matrix. When the
#' pooled sample covariance matrix is singular, the linear discriminant function
#' is incalculable. This function replaces the inverse of pooled sample
#' covariance matrix with an estimator proposed by Schafer and Strimmer
#' (2005). The estimator is calculated via \code{\link[corpcor]{invcov.shrink}}.
#'
#' The matrix of training observations are given in \code{x}. The rows of \code{x}
#' contain the sample observations, and the columns contain the features for each
#' training observation.
#'
#' The vector of class labels given in \code{y} are coerced to a \code{factor}.
#' The length of \code{y} should match the number of rows in \code{x}.
#'
#' An error is thrown if a given class has less than 2 observations because the
#' variance for each feature within a class cannot be estimated with less than 2
#' observations.
#'
#' The vector, \code{prior}, contains the \emph{a priori} class membership for
#' each class. If \code{prior} is NULL (default), the class membership
#' probabilities are estimated as the sample proportion of observations belonging
#' to each class. Otherwise, \code{prior} should be a vector with the same length
#' as the number of classes in \code{y}. The \code{prior} probabilties should be
#' nonnegative and sum to one.
#'
#' @importFrom corpcor cov.shrink invcov.shrink
#' @export
#'
#' @param x matrix containing the training data. The rows are the sample
#' observations, and the columns are the features.
#' @param y vector of class labels for each training observation
#' @param prior vector with prior probabilities for each class. If NULL
#' (default), then equal probabilities are used. See details.
#' @param ... additional arguments passed to
#' \code{\link[corpcor]{invcov.shrink}}
#' @return \code{lda_schafer} object that contains the trained classifier
#' @examples
#' n <- nrow(iris)
#' train <- sample(seq_len(n), n / 2)
#' lda_schafer_out <- lda_schafer(Species ~ ., data = iris[train, ])
#' predicted <- predict(lda_schafer_out, iris[-train, -5])$class
#'
#' lda_schafer_out2 <- lda_schafer(x = iris[train, -5], y = iris[train, 5])
#' predicted2 <- predict(lda_schafer_out2, iris[-train, -5])$class
#' all.equal(predicted, predicted2)
#' @references Schafer, J., and Strimmer, K. (2005). "A shrinkage approach to
#' large-scale covariance estimation and implications for functional genomics,"
#' Statist. Appl. Genet. Mol. Biol. 4, 32.
lda_schafer <- function(x, ...) {
UseMethod("lda_schafer")
}
#' @rdname lda_schafer
#' @export
lda_schafer.default <- function(x, y, prior = NULL, ...) {
x <- as.matrix(x)
y <- as.factor(y)
obj <- regdiscrim_estimates(x = x, y = y, prior = prior, cov = FALSE)
# Centers the data matrix from each class and then estimates the lda_schafer
# precision matrix
x_centered <- tapply(seq_along(y), y, function(i) {
scale(x[i, ], center = TRUE, scale = FALSE)
})
x_centered <- do.call(rbind, x_centered)
obj$cov_pool <- cov.shrink(x_centered, verbose = FALSE, ...)
obj$cov_inv <- invcov.shrink(x_centered, verbose = FALSE, ...)
# The `corpcor` package returns a `shrinkage` object, which is actually a
# matrix with some attributes.
# Coerces the classes to avoid conflicts downstream.
class(obj$cov_pool) <- "matrix"
class(obj$cov_inv) <- "matrix"
# Creates an object of type 'lda_schafer' and adds the 'match.call' to the object
obj$call <- match.call()
class(obj) <- "lda_schafer"
obj
}
#' @param formula A formula of the form \code{groups ~ x1 + x2 + ...} That is,
#' the response is the grouping factor and the right hand side specifies the
#' (non-factor) discriminators.
#' @param data data frame from which variables specified in \code{formula} are
#' preferentially to be taken.
#' @rdname lda_schafer
#' @export
lda_schafer.formula <- function(formula, data, prior = NULL, ...) {
# The formula interface includes an intercept. If the user includes the
# intercept in the model, it should be removed. Otherwise, errors and doom
# happen.
# To remove the intercept, we update the formula, like so:
# (NOTE: The terms must be collected in case the dot (.) notation is used)
formula <- no_intercept(formula, data)
mf <- model.frame(formula = formula, data = data)
x <- model.matrix(attr(mf, "terms"), data = mf)
y <- model.response(mf)
est <- lda_schafer.default(x = x, y = y, prior = prior)
est$call <- match.call()
est$formula <- formula
est
}
#' Outputs the summary for a lda_schafer classifier object.
#'
#' Summarizes the trained lda_schafer classifier in a nice manner.
#'
#' @param x object to print
#' @param ... unused
#' @rdname lda_schafer
#' @export
print.lda_schafer <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("Sample Size:\n")
print(x$N)
cat("Number of Features:\n")
print(x$p)
cat("Classes:\n")
print(x$groups)
cat("Prior Probabilties:\n")
print(sapply(x$est, function(z) z$prior))
}
#' Predicts of class membership of a matrix of new observations using Linear
#' Discriminant Analysis (LDA) using the Schafer-Strimmer Covariance Matrix
#' Estimator
#'
#' The Linear Discriminant Analysis (LDA) classifier involves the assumption
#' that the distributions of each class are assumed to be multivariate normal
#' and share a common covariance matrix. When the pooled sample covariance
#' matrix is singular, the linear discriminant function is incalculable. Here,
#' the inverse of the pooled sample covariance matrix is replaced with an
#' estimator from Schafer and Strimmer (2005).
#'
#' @rdname lda_schafer
#' @export
#'
#' @references Schafer, J., and Strimmer, K. (2005). "A shrinkage approach to
#' large-scale covariance estimation and implications for functional genomics,"
#' Statist. Appl. Genet. Mol. Biol. 4, 32.
#' @param object trained lda_schafer object
#' @param newdata matrix of observations to predict. Each row corresponds to a
#' new observation.
#' @param ... additional arguments
#' @return list predicted class memberships of each row in newdata
predict.lda_schafer <- function(object, newdata, ...) {
if (!inherits(object, "lda_schafer")) {
stop("object not of class 'lda_schafer'")
}
if (is.vector(newdata)) {
newdata <- matrix(newdata, nrow = 1)
}
# Calculates the discriminant scores for each test observation
scores <- apply(newdata, 1, function(obs) {
sapply(object$est, function(class_est) {
with(class_est, quadform(object$cov_inv, obs - xbar) + log(prior))
})
})
if (is.vector(scores)) {
min_scores <- which.min(scores)
} else {
min_scores <- apply(scores, 2, which.min)
}
# Posterior probabilities via Bayes Theorem
means <- lapply(object$est, "[[", "xbar")
covs <- replicate(n=object$num_groups, object$cov_pool, simplify=FALSE)
priors <- lapply(object$est, "[[", "prior")
posterior <- posterior_probs(x=newdata,
means=means,
covs=covs,
priors=priors)
class <- factor(object$groups[min_scores], levels = object$groups)
list(class = class, scores = scores, posterior = posterior)
}
| /R/lda-schafer.r | no_license | elephann/sparsediscrim | R | false | false | 7,428 | r | #' Linear Discriminant Analysis using the Schafer-Strimmer Covariance Matrix
#' Estimator
#'
#' Given a set of training data, this function builds the Linear Discriminant
#' Analysis (LDA) classifier, where the distributions of each class are assumed
#' to be multivariate normal and share a common covariance matrix. When the
#' pooled sample covariance matrix is singular, the linear discriminant function
#' is incalculable. This function replaces the inverse of pooled sample
#' covariance matrix with an estimator proposed by Schafer and Strimmer
#' (2005). The estimator is calculated via \code{\link[corpcor]{invcov.shrink}}.
#'
#' The matrix of training observations are given in \code{x}. The rows of \code{x}
#' contain the sample observations, and the columns contain the features for each
#' training observation.
#'
#' The vector of class labels given in \code{y} are coerced to a \code{factor}.
#' The length of \code{y} should match the number of rows in \code{x}.
#'
#' An error is thrown if a given class has less than 2 observations because the
#' variance for each feature within a class cannot be estimated with less than 2
#' observations.
#'
#' The vector, \code{prior}, contains the \emph{a priori} class membership for
#' each class. If \code{prior} is NULL (default), the class membership
#' probabilities are estimated as the sample proportion of observations belonging
#' to each class. Otherwise, \code{prior} should be a vector with the same length
#' as the number of classes in \code{y}. The \code{prior} probabilties should be
#' nonnegative and sum to one.
#'
#' @importFrom corpcor cov.shrink invcov.shrink
#' @export
#'
#' @param x matrix containing the training data. The rows are the sample
#' observations, and the columns are the features.
#' @param y vector of class labels for each training observation
#' @param prior vector with prior probabilities for each class. If NULL
#' (default), then equal probabilities are used. See details.
#' @param ... additional arguments passed to
#' \code{\link[corpcor]{invcov.shrink}}
#' @return \code{lda_schafer} object that contains the trained classifier
#' @examples
#' n <- nrow(iris)
#' train <- sample(seq_len(n), n / 2)
#' lda_schafer_out <- lda_schafer(Species ~ ., data = iris[train, ])
#' predicted <- predict(lda_schafer_out, iris[-train, -5])$class
#'
#' lda_schafer_out2 <- lda_schafer(x = iris[train, -5], y = iris[train, 5])
#' predicted2 <- predict(lda_schafer_out2, iris[-train, -5])$class
#' all.equal(predicted, predicted2)
#' @references Schafer, J., and Strimmer, K. (2005). "A shrinkage approach to
#' large-scale covariance estimation and implications for functional genomics,"
#' Statist. Appl. Genet. Mol. Biol. 4, 32.
lda_schafer <- function(x, ...) {
UseMethod("lda_schafer")
}
#' @rdname lda_schafer
#' @export
lda_schafer.default <- function(x, y, prior = NULL, ...) {
x <- as.matrix(x)
y <- as.factor(y)
obj <- regdiscrim_estimates(x = x, y = y, prior = prior, cov = FALSE)
# Centers the data matrix from each class and then estimates the lda_schafer
# precision matrix
x_centered <- tapply(seq_along(y), y, function(i) {
scale(x[i, ], center = TRUE, scale = FALSE)
})
x_centered <- do.call(rbind, x_centered)
obj$cov_pool <- cov.shrink(x_centered, verbose = FALSE, ...)
obj$cov_inv <- invcov.shrink(x_centered, verbose = FALSE, ...)
# The `corpcor` package returns a `shrinkage` object, which is actually a
# matrix with some attributes.
# Coerces the classes to avoid conflicts downstream.
class(obj$cov_pool) <- "matrix"
class(obj$cov_inv) <- "matrix"
# Creates an object of type 'lda_schafer' and adds the 'match.call' to the object
obj$call <- match.call()
class(obj) <- "lda_schafer"
obj
}
#' @param formula A formula of the form \code{groups ~ x1 + x2 + ...} That is,
#' the response is the grouping factor and the right hand side specifies the
#' (non-factor) discriminators.
#' @param data data frame from which variables specified in \code{formula} are
#' preferentially to be taken.
#' @rdname lda_schafer
#' @export
lda_schafer.formula <- function(formula, data, prior = NULL, ...) {
# The formula interface includes an intercept. If the user includes the
# intercept in the model, it should be removed. Otherwise, errors and doom
# happen.
# To remove the intercept, we update the formula, like so:
# (NOTE: The terms must be collected in case the dot (.) notation is used)
formula <- no_intercept(formula, data)
mf <- model.frame(formula = formula, data = data)
x <- model.matrix(attr(mf, "terms"), data = mf)
y <- model.response(mf)
est <- lda_schafer.default(x = x, y = y, prior = prior)
est$call <- match.call()
est$formula <- formula
est
}
#' Outputs the summary for a lda_schafer classifier object.
#'
#' Summarizes the trained lda_schafer classifier in a nice manner.
#'
#' @param x object to print
#' @param ... unused
#' @rdname lda_schafer
#' @export
print.lda_schafer <- function(x, ...) {
cat("Call:\n")
print(x$call)
cat("Sample Size:\n")
print(x$N)
cat("Number of Features:\n")
print(x$p)
cat("Classes:\n")
print(x$groups)
cat("Prior Probabilties:\n")
print(sapply(x$est, function(z) z$prior))
}
#' Predicts of class membership of a matrix of new observations using Linear
#' Discriminant Analysis (LDA) using the Schafer-Strimmer Covariance Matrix
#' Estimator
#'
#' The Linear Discriminant Analysis (LDA) classifier involves the assumption
#' that the distributions of each class are assumed to be multivariate normal
#' and share a common covariance matrix. When the pooled sample covariance
#' matrix is singular, the linear discriminant function is incalculable. Here,
#' the inverse of the pooled sample covariance matrix is replaced with an
#' estimator from Schafer and Strimmer (2005).
#'
#' @rdname lda_schafer
#' @export
#'
#' @references Schafer, J., and Strimmer, K. (2005). "A shrinkage approach to
#' large-scale covariance estimation and implications for functional genomics,"
#' Statist. Appl. Genet. Mol. Biol. 4, 32.
#' @param object trained lda_schafer object
#' @param newdata matrix of observations to predict. Each row corresponds to a
#' new observation.
#' @param ... additional arguments
#' @return list predicted class memberships of each row in newdata
predict.lda_schafer <- function(object, newdata, ...) {
if (!inherits(object, "lda_schafer")) {
stop("object not of class 'lda_schafer'")
}
if (is.vector(newdata)) {
newdata <- matrix(newdata, nrow = 1)
}
# Calculates the discriminant scores for each test observation
scores <- apply(newdata, 1, function(obs) {
sapply(object$est, function(class_est) {
with(class_est, quadform(object$cov_inv, obs - xbar) + log(prior))
})
})
if (is.vector(scores)) {
min_scores <- which.min(scores)
} else {
min_scores <- apply(scores, 2, which.min)
}
# Posterior probabilities via Bayes Theorem
means <- lapply(object$est, "[[", "xbar")
covs <- replicate(n=object$num_groups, object$cov_pool, simplify=FALSE)
priors <- lapply(object$est, "[[", "prior")
posterior <- posterior_probs(x=newdata,
means=means,
covs=covs,
priors=priors)
class <- factor(object$groups[min_scores], levels = object$groups)
list(class = class, scores = scores, posterior = posterior)
}
|
\name{GomezTenureStatus}
\alias{GomezTenureStatus}
\docType{data}
\title{GomezTenureStatus}
\description{GomezTenureStatus}
\usage{data(GomezTenureStatus)}
\format{
A data frame with 187 observations on the following 2 variables.
\describe{
\item{\code{TenureStatus}}{a factor with levels \code{fixed-rent} \code{owner} \code{share-rent}}
\item{\code{FarmerClassif}}{a factor with levels \code{adopter} \code{nonadopter}}
}
}
\keyword{datasets}
| /R Package Creation/STAR/man/GomezTenureStatus.Rd | no_license | djnpisano/RScriptLibrary | R | false | false | 459 | rd | \name{GomezTenureStatus}
\alias{GomezTenureStatus}
\docType{data}
\title{GomezTenureStatus}
\description{GomezTenureStatus}
\usage{data(GomezTenureStatus)}
\format{
A data frame with 187 observations on the following 2 variables.
\describe{
\item{\code{TenureStatus}}{a factor with levels \code{fixed-rent} \code{owner} \code{share-rent}}
\item{\code{FarmerClassif}}{a factor with levels \code{adopter} \code{nonadopter}}
}
}
\keyword{datasets}
|
###### Read in libraries ######
library(jsonlite)
###### Specify study IDs ######
studies <- c(6000000010, 6000000034)
###### Download, reformat, and save JSON file for each study ######
base_url <- Sys.getenv("BASEURL")
if(is.null(base_url)){
base_url <- "https://terraref.org"
}
study_base_url <- paste0(base_url, "/brapi/v1/studies")
for(study in studies){
study_url <- paste0(study_base_url, "/", study)
study_list <- fromJSON(study_url)
study_json <- list(studyDbId = as.character(study), data = study_list)
study_json_final <- list(studies = list(study_json))
write(toJSON(study_json_final, pretty = TRUE, auto_unbox = TRUE),
file = paste0("study_", study, ".json"))
}
| /download_studies.R | no_license | frostbytten/ardn-terra-ref | R | false | false | 699 | r | ###### Read in libraries ######
library(jsonlite)
###### Specify study IDs ######
studies <- c(6000000010, 6000000034)
###### Download, reformat, and save JSON file for each study ######
base_url <- Sys.getenv("BASEURL")
if(is.null(base_url)){
base_url <- "https://terraref.org"
}
study_base_url <- paste0(base_url, "/brapi/v1/studies")
for(study in studies){
study_url <- paste0(study_base_url, "/", study)
study_list <- fromJSON(study_url)
study_json <- list(studyDbId = as.character(study), data = study_list)
study_json_final <- list(studies = list(study_json))
write(toJSON(study_json_final, pretty = TRUE, auto_unbox = TRUE),
file = paste0("study_", study, ".json"))
}
|
## Course Project 1
## Plot 4
# Showing the weekdays in English, otherwise it will be in Dutch...
Sys.setlocale("LC_TIME", "English")
# Load the data
hpc <- read.table("./household_power_consumption.txt", sep=";", header=T, na.strings="?")
hpc$Date <- as.Date(hpc$Date,format="%d/%m/%Y")
# Getting the subset we need: 2007-02-01 and 2007-02-02
idx <- which(hpc$Date == as.Date("2007-02-01") | hpc$Date == as.Date("2007-02-02"))
sub_hpc <-hpc[idx,]
# Combine date and time
td <- paste(sub_hpc$Date,sub_hpc$Time)
sub_hpc$DateTime <- strptime(td,format="%Y-%m-%d %H:%M:%S")
png("./plot4.png",width=480,height=480, units="px")
# Making 4 plots
par(mfcol=c(2,2))
# Upperleft plot
plot(sub_hpc$DateTime,sub_hpc$Global_active_power,type='l',
ylab="Global Active Power (kilowatts)",xlab='')
# Lowerleft plot
with(sub_hpc, {
plot(DateTime, Sub_metering_1, type='l',ylab="Energy sub metering",xlab='',bg=NULL)
lines(DateTime, Sub_metering_2,col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),
lty=c(1,1,1),bty="n",cex=0.9)
})
# Upperright plot
with(sub_hpc, {
plot(DateTime,Voltage,type='l',ylab="Voltage")
})
# Lowerright plot
with(sub_hpc, {
plot(DateTime,Global_reactive_power,type='l',ylab="Global_reactive_power")
})
dev.off()
| /plot4.R | no_license | ErnaOudman/ExData_Plotting1 | R | false | false | 1,377 | r | ## Course Project 1
## Plot 4
# Showing the weekdays in English, otherwise it will be in Dutch...
Sys.setlocale("LC_TIME", "English")
# Load the data
hpc <- read.table("./household_power_consumption.txt", sep=";", header=T, na.strings="?")
hpc$Date <- as.Date(hpc$Date,format="%d/%m/%Y")
# Getting the subset we need: 2007-02-01 and 2007-02-02
idx <- which(hpc$Date == as.Date("2007-02-01") | hpc$Date == as.Date("2007-02-02"))
sub_hpc <-hpc[idx,]
# Combine date and time
td <- paste(sub_hpc$Date,sub_hpc$Time)
sub_hpc$DateTime <- strptime(td,format="%Y-%m-%d %H:%M:%S")
png("./plot4.png",width=480,height=480, units="px")
# Making 4 plots
par(mfcol=c(2,2))
# Upperleft plot
plot(sub_hpc$DateTime,sub_hpc$Global_active_power,type='l',
ylab="Global Active Power (kilowatts)",xlab='')
# Lowerleft plot
with(sub_hpc, {
plot(DateTime, Sub_metering_1, type='l',ylab="Energy sub metering",xlab='',bg=NULL)
lines(DateTime, Sub_metering_2,col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),
lty=c(1,1,1),bty="n",cex=0.9)
})
# Upperright plot
with(sub_hpc, {
plot(DateTime,Voltage,type='l',ylab="Voltage")
})
# Lowerright plot
with(sub_hpc, {
plot(DateTime,Global_reactive_power,type='l',ylab="Global_reactive_power")
})
dev.off()
|
#' @title Stroke dataset
#' @description Data from in-patients with acute stroke
#' @format A dataframe with columns and rows
#' \describe{
#' \item{sex}{sex of patient}
#' \item{race}{race of patient}
#' \item{doa}{date of admission}
#' \item{doa}{date of discharge}
#' }
"stroke"
| /R/stroke.R | no_license | drkamarul/data4medic | R | false | false | 286 | r | #' @title Stroke dataset
#' @description Data from in-patients with acute stroke
#' @format A dataframe with columns and rows
#' \describe{
#' \item{sex}{sex of patient}
#' \item{race}{race of patient}
#' \item{doa}{date of admission}
#' \item{doa}{date of discharge}
#' }
"stroke"
|
#' Water Use Data for the City of Santa Monica
#'
#' A dataset containing metered water use data for customers of the
#' City of Santa Monica public works department. Data has been modified slightly
#' from its original format to drop extra columns and rename columns to simpler
#' descriptive names.
#'
#' @format A data frame with 218067 rows and 7 variables:
#' \describe{
#' \item{cust_id}{customer identifier}
#' \item{usage_ccf}{monthly water use, in hundred cubic feet (CCF)}
#' \item{usage_month}{the month the customer was billed}
#' \item{usage_year}{the year the customer was billed}
#' \item{cust_class}{the general customer class (RESIDENTIAL_SINGLE, IRRIGATION, etc)}
#' \item{usage_date}{a date created from as.Date(usage_year, usage_month, 1)}
#' }
#' @source \url{https://data.smgov.net/Public-Services/Water-Usage/4nnq-5vzx}
"santamonica"
| /R/data.R | permissive | California-Data-Collaborative/RateParser | R | false | false | 869 | r | #' Water Use Data for the City of Santa Monica
#'
#' A dataset containing metered water use data for customers of the
#' City of Santa Monica public works department. Data has been modified slightly
#' from its original format to drop extra columns and rename columns to simpler
#' descriptive names.
#'
#' @format A data frame with 218067 rows and 7 variables:
#' \describe{
#' \item{cust_id}{customer identifier}
#' \item{usage_ccf}{monthly water use, in hundred cubic feet (CCF)}
#' \item{usage_month}{the month the customer was billed}
#' \item{usage_year}{the year the customer was billed}
#' \item{cust_class}{the general customer class (RESIDENTIAL_SINGLE, IRRIGATION, etc)}
#' \item{usage_date}{a date created from as.Date(usage_year, usage_month, 1)}
#' }
#' @source \url{https://data.smgov.net/Public-Services/Water-Usage/4nnq-5vzx}
"santamonica"
|
#-------------------------------------------------------
# MDS - Fundamentos
# Distribuciones multivariantes
# Practica 4 Distribucion normal bidimensional
#-------------------------------------------------------
rm(list=ls())
library(plot3D)
library(scatterplot3d)
#----------------------------------------------
# Normal Bidimensional
# Definicion de la funcion de densidad conjunta
# Especificar: medias, varianzas y corr o cov
#----------------------------------------------
m1<-2 #media1
m2<-2.5 #media2
s1<-0.55 #desv1
s2<-0.45 #desv 2
r<-0.71 #correlacion
x<-seq(m1-3*s1,m1+3*s1,0.1)
y<-seq(m2-3*s2,m2+3*s2,0.1)
Q<-function(x,y){((x-m1)/s1)^2+((y-m2)/s2)^2-2*r*(x-m1)*(y-m2)/(s1*s2)} #forma cuadratica de la funcion. Esto son las elipses
f<-function(x,y){(1/(2*pi*s1*s2*sqrt(1-r^2)))*exp(-Q(x,y)/(2*(1-r^2)))} #funcion de densidad
#----------------------------------------------
# Representacion grafica
# Con Outer Perpective y con ContourPlot
#----------------------------------------------
z<-outer(x,y,f) #el outer es como una hoja de calculo. Es muy bueno para evaluar una funsion.
z
persp(x,y,z,col="lightgreen") #toma la media y la varianza de cada una de las variables a evaluar y su correlacion y con eso se crea el grafico
contour(x,y,z) #contornos. Son las curvas de nivel. Hacer la funcion de densidad constante y
# Contornos colores
filled.contour(x,y,z,col=rainbow(256),nlevels=(256))
#----------------------------------------------
# Con Scatter Plot
#Es otra forma de hacer el grafico pero con puntos (si unirles)
#----------------------------------------------
Datosx<-seq(m1-3*s1,m1+3*s1,0.1)
Datosy<-seq(m2-3*s2,m2+3*s2,0.1)
Datos<-expand.grid(Datosx,Datosy)
x<-Datos[,1]
y<-Datos[,2]
z<-f(x,y)
scatterplot3d(x, y, z, highlight.3d=TRUE,col.axis="blue",col.grid="lightblue")
| /Fundamentos_de_DS/Practica Semana 2/Multivariantes_Practica_4.R | no_license | octadelsueldo/Master_DS_CUNEF | R | false | false | 1,846 | r | #-------------------------------------------------------
# MDS - Fundamentos
# Distribuciones multivariantes
# Practica 4 Distribucion normal bidimensional
#-------------------------------------------------------
rm(list=ls())
library(plot3D)
library(scatterplot3d)
#----------------------------------------------
# Normal Bidimensional
# Definicion de la funcion de densidad conjunta
# Especificar: medias, varianzas y corr o cov
#----------------------------------------------
m1<-2 #media1
m2<-2.5 #media2
s1<-0.55 #desv1
s2<-0.45 #desv 2
r<-0.71 #correlacion
x<-seq(m1-3*s1,m1+3*s1,0.1)
y<-seq(m2-3*s2,m2+3*s2,0.1)
Q<-function(x,y){((x-m1)/s1)^2+((y-m2)/s2)^2-2*r*(x-m1)*(y-m2)/(s1*s2)} #forma cuadratica de la funcion. Esto son las elipses
f<-function(x,y){(1/(2*pi*s1*s2*sqrt(1-r^2)))*exp(-Q(x,y)/(2*(1-r^2)))} #funcion de densidad
#----------------------------------------------
# Representacion grafica
# Con Outer Perpective y con ContourPlot
#----------------------------------------------
z<-outer(x,y,f) #el outer es como una hoja de calculo. Es muy bueno para evaluar una funsion.
z
persp(x,y,z,col="lightgreen") #toma la media y la varianza de cada una de las variables a evaluar y su correlacion y con eso se crea el grafico
contour(x,y,z) #contornos. Son las curvas de nivel. Hacer la funcion de densidad constante y
# Contornos colores
filled.contour(x,y,z,col=rainbow(256),nlevels=(256))
#----------------------------------------------
# Con Scatter Plot
#Es otra forma de hacer el grafico pero con puntos (si unirles)
#----------------------------------------------
Datosx<-seq(m1-3*s1,m1+3*s1,0.1)
Datosy<-seq(m2-3*s2,m2+3*s2,0.1)
Datos<-expand.grid(Datosx,Datosy)
x<-Datos[,1]
y<-Datos[,2]
z<-f(x,y)
scatterplot3d(x, y, z, highlight.3d=TRUE,col.axis="blue",col.grid="lightblue")
|
## When adding new options, be sure to update the VALID_OPTIONS list
## (define your own custom validators by assigning a function)
## and update the default_opts() function + documentation in 'get_opts()' below
VALID_OPTIONS <- list(
auto.snapshot = function(x) x %in% c(TRUE, FALSE),
use.cache = list(TRUE, FALSE),
print.banner.on.startup = list(TRUE, FALSE, "auto"),
vcs.ignore.lib = list(TRUE, FALSE),
vcs.ignore.src = list(TRUE, FALSE),
external.packages = function(x) {
is.null(x) || is.character(x)
},
local.repos = function(x) {
is.null(x) || is.character(x)
},
load.external.packages.on.startup = list(TRUE, FALSE),
ignored.packages = function(x) {
is.null(x) || is.character(x)
}
)
default_opts <- function() {
list(
auto.snapshot = TRUE,
use.cache = FALSE,
print.banner.on.startup = "auto",
vcs.ignore.lib = TRUE,
vcs.ignore.src = FALSE,
external.packages = Sys.getenv("R_PACKRAT_EXTERNAL_PACKAGES", unset = ""),
local.repos = "",
load.external.packages.on.startup = TRUE,
ignored.packages = NULL
)
}
initOptions <- function(project = NULL, options = default_opts()) {
project <- getProjectDir(project)
opts <- c(project = project, options)
do.call(set_opts, opts)
}
##' Get/set packrat project options
##'
##' Get and set options for the current packrat-managed project.
##'
##' @section Valid Options:
##'
##' \itemize{
##' \item \code{auto.snapshot}: Perform automatic, asynchronous snapshots when running interactively?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{use.cache}:
##' Install packages into a global cache, which is then shared across projects? The
##' directory to use is read through \code{Sys.getenv("R_PACKRAT_CACHE_DIR")}.
##' (EXPERIMENTAL; defaults to \code{FALSE})
##' \item \code{print.banner.on.startup}:
##' Print the banner on startup? Can be one of \code{TRUE} (always print),
##' \code{FALSE} (never print), and \code{'auto'} (do the right thing)
##' (defaults to \code{"auto"})
##' \item \code{vcs.ignore.lib}:
##' Add the packrat private library to your version control system ignore?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{vcs.ignore.src}:
##' Add the packrat private sources to your version control system ignore?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{FALSE})
##' \item \code{external.packages}:
##' Packages which should be loaded from the user library. This can be useful for
##' very large packages which you don't want duplicated across multiple projects,
##' e.g. BioConductor annotation packages, or for package development scenarios
##' wherein you want to use e.g. \code{devtools} and \code{roxygen2} for package
##' development, but do not want your package to depend on these packages.
##' (EXPERIMENTAL; defaults to \code{Sys.getenv("R_PACKRAT_EXTERNAL_PACKAGES")})
##' \item \code{local.repos}:
##' Ad-hoc local 'repositories'; i.e., directories containing package sources within
##' sub-directories. (Character vector; empty by default)
##' \item \code{load.external.packages.on.startup}:
##' Load any packages specified within \code{external.packages} on startup?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{ignored.packages}:
##' Prevent packrat from tracking certain packages. Dependencies of these packages
##' will also not be tracked.
##' }
##'
##' @param options A character vector of valid option names.
##' @param simplify Boolean; \code{unlist} the returned options? Useful for when retrieving
##' a single option.
##' @param project The project directory. When in packrat mode, defaults to the current project;
##' otherwise, defaults to the current working directory.
##' @param ... Entries of the form \code{key = value}, used for setting packrat project options.
##' @rdname packrat-options
##' @name packrat-options
##' @export
##' @examples \dontrun{
##' ## use 'devtools' and 'knitr' from the user library
##' packrat::set_opts(external.packages = c("devtools", "knitr"))
##'
##' ## set local repository
##' packrat::set_opts(local.repos = c("~/projects/R"))
##'
##' ## get the set of 'external packages'
##' packrat::opts$external.packages()
##'
##' ## set the external packages
##' packrat::opts$external.packages(c("devtools", "knitr"))
##' }
get_opts <- function(options = NULL, simplify = TRUE, project = NULL) {
project <- getProjectDir(project)
cachedOptions <- get("options", envir = .packrat)
if (is.null(cachedOptions)) {
opts <- read_opts(project = project)
assign("options", opts, envir = .packrat)
} else {
opts <- get("options", envir = .packrat)
}
if (is.null(options)) {
opts
} else {
result <- opts[names(opts) %in% options]
if (simplify) unlist(unname(result))
else result
}
}
make_setter <- function(name) {
force(name)
function(x) {
if (missing(x)) return(get_opts(name))
else do.call(set_opts, setNames(list(x), name))
}
}
##' @rdname packrat-options
##' @name packrat-options
##' @export
set_opts <- function(..., project = NULL) {
project <- getProjectDir(project)
optsPath <- packratOptionsFilePath(project)
if (!file.exists(optsPath)) {
dir.create(dirname(optsPath), recursive = TRUE, showWarnings = FALSE)
file.create(optsPath)
}
dots <- list(...)
validateOptions(dots)
keys <- names(dots)
values <- dots
opts <- read_opts(project = project)
for (i in seq_along(keys)) {
if (is.null(values[[i]]))
opts[keys[[i]]] <- list(NULL)
else
opts[[keys[[i]]]] <- values[[i]]
}
write_opts(opts, project = project)
updateSettings(project)
invisible(opts)
}
##' @rdname packrat-options
##' @format NULL
##' @export
opts <- setNames(lapply(names(VALID_OPTIONS), function(x) {
make_setter(x)
}), names(VALID_OPTIONS))
validateOptions <- function(opts) {
for (i in seq_along(opts)) {
key <- names(opts)[[i]]
value <- opts[[i]]
if (!(key %in% names(VALID_OPTIONS))) {
stop("'", key, "' is not a valid packrat option", call. = FALSE)
}
opt <- VALID_OPTIONS[[key]]
if (is.list(opt)) {
if (!(value %in% opt)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
} else if (is.function(opt)) {
if (!opt(value)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
}
}
}
## Read an options file with fields unparsed
readOptsFile <- function(path) {
content <- readLines(path)
namesRegex <- "^[[:alnum:]\\_\\.]*:"
namesIndices <- grep(namesRegex, content, perl = TRUE)
if (!length(namesIndices)) return(list())
contentIndices <- mapply(seq, namesIndices, c(namesIndices[-1] - 1, length(content)), SIMPLIFY = FALSE)
if (!length(contentIndices)) return(list())
result <- lapply(contentIndices, function(x) {
if (length(x) == 1) {
result <- sub(".*:\\s*", "", content[[x]], perl = TRUE)
} else {
first <- sub(".*:\\s*", "", content[[x[1]]])
if (first == "") first <- NULL
rest <- gsub("^\\s*", "", content[x[2:length(x)]], perl = TRUE)
result <- c(first, rest)
}
result[result != ""]
})
names(result) <- unlist(lapply(strsplit(content[namesIndices], ":", fixed = TRUE), `[[`, 1))
result
}
## Read and parse an options file
read_opts <- function(project = NULL) {
project <- getProjectDir(project)
path <- packratOptionsFilePath(project)
if (!file.exists(path)) return(invisible(NULL))
opts <- readOptsFile(path)
if (!length(opts)) return(list())
opts[] <- lapply(opts, function(x) {
if (identical(x, "TRUE")) {
return(TRUE)
} else if (identical(x, "FALSE")) {
return(FALSE)
} else if (identical(x, "NA")) {
return(NA)
} else {
x
}
})
opts
}
write_opts <- function(options, project = NULL) {
project <- getProjectDir(project)
if (!is.list(options))
stop("Expecting options as an R list of values")
# Fill options that are left out
defaultOpts <- default_opts()
missingOptionNames <- setdiff(names(defaultOpts), names(options))
for (optionName in missingOptionNames) {
opt <- defaultOpts[[optionName]]
if (is.null(opt)) {
options[optionName] <- list(NULL)
} else {
options[[optionName]] <- opt
}
}
# Preserve order
options <- options[names(VALID_OPTIONS)]
labels <- names(options)
if ("external.packages" %in% names(options)) {
oep <- as.character(options$external.packages)
options$external.packages <-
as.character(unlist(strsplit(oep, "\\s*,\\s*", perl = TRUE)))
}
# Update the in-memory options cache
assign("options", options, envir = .packrat)
sep <- ifelse(
unlist(lapply(options, length)) > 1,
":\n",
": "
)
options[] <- lapply(options, function(x) {
if (length(x) == 0) ""
else if (length(x) == 1) as.character(x)
else paste(" ", x, sep = "", collapse = "\n")
})
output <- character(length(labels))
for (i in seq_along(labels)) {
output[[i]] <- paste(labels[[i]], options[[i]], sep = sep[[i]])
}
cat(output, file = packratOptionsFilePath(project), sep = "\n")
}
| /R/options.R | no_license | vasiliosz/packrat | R | false | false | 9,234 | r | ## When adding new options, be sure to update the VALID_OPTIONS list
## (define your own custom validators by assigning a function)
## and update the default_opts() function + documentation in 'get_opts()' below
VALID_OPTIONS <- list(
auto.snapshot = function(x) x %in% c(TRUE, FALSE),
use.cache = list(TRUE, FALSE),
print.banner.on.startup = list(TRUE, FALSE, "auto"),
vcs.ignore.lib = list(TRUE, FALSE),
vcs.ignore.src = list(TRUE, FALSE),
external.packages = function(x) {
is.null(x) || is.character(x)
},
local.repos = function(x) {
is.null(x) || is.character(x)
},
load.external.packages.on.startup = list(TRUE, FALSE),
ignored.packages = function(x) {
is.null(x) || is.character(x)
}
)
default_opts <- function() {
list(
auto.snapshot = TRUE,
use.cache = FALSE,
print.banner.on.startup = "auto",
vcs.ignore.lib = TRUE,
vcs.ignore.src = FALSE,
external.packages = Sys.getenv("R_PACKRAT_EXTERNAL_PACKAGES", unset = ""),
local.repos = "",
load.external.packages.on.startup = TRUE,
ignored.packages = NULL
)
}
initOptions <- function(project = NULL, options = default_opts()) {
project <- getProjectDir(project)
opts <- c(project = project, options)
do.call(set_opts, opts)
}
##' Get/set packrat project options
##'
##' Get and set options for the current packrat-managed project.
##'
##' @section Valid Options:
##'
##' \itemize{
##' \item \code{auto.snapshot}: Perform automatic, asynchronous snapshots when running interactively?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{use.cache}:
##' Install packages into a global cache, which is then shared across projects? The
##' directory to use is read through \code{Sys.getenv("R_PACKRAT_CACHE_DIR")}.
##' (EXPERIMENTAL; defaults to \code{FALSE})
##' \item \code{print.banner.on.startup}:
##' Print the banner on startup? Can be one of \code{TRUE} (always print),
##' \code{FALSE} (never print), and \code{'auto'} (do the right thing)
##' (defaults to \code{"auto"})
##' \item \code{vcs.ignore.lib}:
##' Add the packrat private library to your version control system ignore?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{vcs.ignore.src}:
##' Add the packrat private sources to your version control system ignore?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{FALSE})
##' \item \code{external.packages}:
##' Packages which should be loaded from the user library. This can be useful for
##' very large packages which you don't want duplicated across multiple projects,
##' e.g. BioConductor annotation packages, or for package development scenarios
##' wherein you want to use e.g. \code{devtools} and \code{roxygen2} for package
##' development, but do not want your package to depend on these packages.
##' (EXPERIMENTAL; defaults to \code{Sys.getenv("R_PACKRAT_EXTERNAL_PACKAGES")})
##' \item \code{local.repos}:
##' Ad-hoc local 'repositories'; i.e., directories containing package sources within
##' sub-directories. (Character vector; empty by default)
##' \item \code{load.external.packages.on.startup}:
##' Load any packages specified within \code{external.packages} on startup?
##' (\code{TRUE} / \code{FALSE}; defaults to \code{TRUE})
##' \item \code{ignored.packages}:
##' Prevent packrat from tracking certain packages. Dependencies of these packages
##' will also not be tracked.
##' }
##'
##' @param options A character vector of valid option names.
##' @param simplify Boolean; \code{unlist} the returned options? Useful for when retrieving
##' a single option.
##' @param project The project directory. When in packrat mode, defaults to the current project;
##' otherwise, defaults to the current working directory.
##' @param ... Entries of the form \code{key = value}, used for setting packrat project options.
##' @rdname packrat-options
##' @name packrat-options
##' @export
##' @examples \dontrun{
##' ## use 'devtools' and 'knitr' from the user library
##' packrat::set_opts(external.packages = c("devtools", "knitr"))
##'
##' ## set local repository
##' packrat::set_opts(local.repos = c("~/projects/R"))
##'
##' ## get the set of 'external packages'
##' packrat::opts$external.packages()
##'
##' ## set the external packages
##' packrat::opts$external.packages(c("devtools", "knitr"))
##' }
get_opts <- function(options = NULL, simplify = TRUE, project = NULL) {
project <- getProjectDir(project)
cachedOptions <- get("options", envir = .packrat)
if (is.null(cachedOptions)) {
opts <- read_opts(project = project)
assign("options", opts, envir = .packrat)
} else {
opts <- get("options", envir = .packrat)
}
if (is.null(options)) {
opts
} else {
result <- opts[names(opts) %in% options]
if (simplify) unlist(unname(result))
else result
}
}
make_setter <- function(name) {
force(name)
function(x) {
if (missing(x)) return(get_opts(name))
else do.call(set_opts, setNames(list(x), name))
}
}
##' @rdname packrat-options
##' @name packrat-options
##' @export
set_opts <- function(..., project = NULL) {
project <- getProjectDir(project)
optsPath <- packratOptionsFilePath(project)
if (!file.exists(optsPath)) {
dir.create(dirname(optsPath), recursive = TRUE, showWarnings = FALSE)
file.create(optsPath)
}
dots <- list(...)
validateOptions(dots)
keys <- names(dots)
values <- dots
opts <- read_opts(project = project)
for (i in seq_along(keys)) {
if (is.null(values[[i]]))
opts[keys[[i]]] <- list(NULL)
else
opts[[keys[[i]]]] <- values[[i]]
}
write_opts(opts, project = project)
updateSettings(project)
invisible(opts)
}
##' @rdname packrat-options
##' @format NULL
##' @export
opts <- setNames(lapply(names(VALID_OPTIONS), function(x) {
make_setter(x)
}), names(VALID_OPTIONS))
validateOptions <- function(opts) {
for (i in seq_along(opts)) {
key <- names(opts)[[i]]
value <- opts[[i]]
if (!(key %in% names(VALID_OPTIONS))) {
stop("'", key, "' is not a valid packrat option", call. = FALSE)
}
opt <- VALID_OPTIONS[[key]]
if (is.list(opt)) {
if (!(value %in% opt)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
} else if (is.function(opt)) {
if (!opt(value)) {
stop("'", value, "' is not a valid setting for packrat option '", key, "'", call. = FALSE)
}
}
}
}
## Read an options file with fields unparsed
readOptsFile <- function(path) {
content <- readLines(path)
namesRegex <- "^[[:alnum:]\\_\\.]*:"
namesIndices <- grep(namesRegex, content, perl = TRUE)
if (!length(namesIndices)) return(list())
contentIndices <- mapply(seq, namesIndices, c(namesIndices[-1] - 1, length(content)), SIMPLIFY = FALSE)
if (!length(contentIndices)) return(list())
result <- lapply(contentIndices, function(x) {
if (length(x) == 1) {
result <- sub(".*:\\s*", "", content[[x]], perl = TRUE)
} else {
first <- sub(".*:\\s*", "", content[[x[1]]])
if (first == "") first <- NULL
rest <- gsub("^\\s*", "", content[x[2:length(x)]], perl = TRUE)
result <- c(first, rest)
}
result[result != ""]
})
names(result) <- unlist(lapply(strsplit(content[namesIndices], ":", fixed = TRUE), `[[`, 1))
result
}
## Read and parse an options file
read_opts <- function(project = NULL) {
project <- getProjectDir(project)
path <- packratOptionsFilePath(project)
if (!file.exists(path)) return(invisible(NULL))
opts <- readOptsFile(path)
if (!length(opts)) return(list())
opts[] <- lapply(opts, function(x) {
if (identical(x, "TRUE")) {
return(TRUE)
} else if (identical(x, "FALSE")) {
return(FALSE)
} else if (identical(x, "NA")) {
return(NA)
} else {
x
}
})
opts
}
write_opts <- function(options, project = NULL) {
project <- getProjectDir(project)
if (!is.list(options))
stop("Expecting options as an R list of values")
# Fill options that are left out
defaultOpts <- default_opts()
missingOptionNames <- setdiff(names(defaultOpts), names(options))
for (optionName in missingOptionNames) {
opt <- defaultOpts[[optionName]]
if (is.null(opt)) {
options[optionName] <- list(NULL)
} else {
options[[optionName]] <- opt
}
}
# Preserve order
options <- options[names(VALID_OPTIONS)]
labels <- names(options)
if ("external.packages" %in% names(options)) {
oep <- as.character(options$external.packages)
options$external.packages <-
as.character(unlist(strsplit(oep, "\\s*,\\s*", perl = TRUE)))
}
# Update the in-memory options cache
assign("options", options, envir = .packrat)
sep <- ifelse(
unlist(lapply(options, length)) > 1,
":\n",
": "
)
options[] <- lapply(options, function(x) {
if (length(x) == 0) ""
else if (length(x) == 1) as.character(x)
else paste(" ", x, sep = "", collapse = "\n")
})
output <- character(length(labels))
for (i in seq_along(labels)) {
output[[i]] <- paste(labels[[i]], options[[i]], sep = sep[[i]])
}
cat(output, file = packratOptionsFilePath(project), sep = "\n")
}
|
#' Import MCMC samples into a ggs object than can be used by all ggs_* graphical functions.
#'
#' This function manages MCMC samples from different sources (JAGS, MCMCpack, STAN -both via rstan and via csv files-) and converts them into a data frame tbl. The resulting data frame has four columns (Iteration, Chain, Parameter, value) and six attributes (nChains, nParameters, nIterations, nBurnin, nThin and description). The ggs object returned is then used as the input of the ggs_* functions to actually plot the different convergence diagnostics.
#'
#' @param S Either a \code{mcmc.list} object with samples from JAGS, a \code{mcmc} object with samples from MCMCpack, a \code{stanfit} object with samples from rstan, or a list with the filenames of \code{csv} files generated by stan outside rstan (where the order of the files is assumed to be the order of the chains). ggmcmc guesses what is the original object and tries to import it accordingly. rstan is not expected to be in CRAN soon, and so coda::mcmc is used to extract stan samples instead of the more canonical rstan::extract.
#' @param family Name of the family of parameters to process, as given by a character vector or a regular expression. A family of parameters is considered to be any group of parameters with the same name but different numerical value between square brackets (as beta[1], beta[2], etc).
#' @param description Character vector giving a short descriptive text that identifies the model.
#' @param burnin Logical or numerical value. When logical and TRUE (the default), the number of samples in the burnin period will be taken into account, if it can be guessed by the extracting process. Otherwise, iterations will start counting from 1. If a numerical vector is given, the user then supplies the length of the burnin period.
#' @param par_labels data frame with two colums. One named "Parameter" with the same names of the parameters of the model. Another named "Label" with the label of the parameter. When missing, the names passed to the model are used for representation. When there is no correspondence between a Parameter and a Label, the original name of the parameter is used. The order of the levels of the original Parameter does not change.
#' @param inc_warmup Logical. When dealing with stanfit objects from rstan, logical value whether the warmup samples are included. Defaults to FALSE.
#' @param stan_include_auxiliar Logical value to include "lp__" parameter in rstan, and "lp__", "treedepth__" and "stepsize__" in stan running without rstan. Defaults to FALSE.
#' @export
#' @return D A data frame tbl with the data arranged and ready to be used by the rest of the \code{ggmcmc} functions. The data frame has four columns, namely: Iteration, Chain, Parameter and value, and six attributes: nChains, nParameters, nIterations, nBurnin, nThin and description. A data frame tbl is a wrapper to a local data frame, behaves like a data frame and its advantage is related to printing, which is compact. For more details, see \code{tbl_df()} in package \code{dplyr}.
#' @examples
#' # Assign 'D' to be a data frame suitable for \code{ggmcmc} functions from
#' # a coda object called S
#' data(linear)
#' S <- ggs(s) # s is a coda object
#'
#' # Get samples from 'beta' parameters only
#' S <- ggs(s, family = "beta")
ggs <- function(S, family=NA, description=NA, burnin=TRUE, par_labels=NA, inc_warmup=FALSE, stan_include_auxiliar=FALSE) {
processed <- FALSE # set by default that there has not been any processed samples
#
# Manage stanfit obcjets
# Manage stan output first because it is firstly converted into an mcmc.list
#
if (class(S)=="stanfit") {
# Extract chain by chain
nChains <- S@sim$chains
D <- NULL
for (l in 1:nChains) {
sdf <- as.data.frame(S@sim$samples[[l]])
sdf$Iteration <- 1:dim(sdf)[1]
s <- tidyr::gather(sdf, Parameter, value, -Iteration) %>%
dplyr::mutate(Chain = l) %>%
dplyr::select(Iteration, Chain, Parameter, value)
D <- dplyr::bind_rows(D, s)
}
if (!inc_warmup) {
D <- dplyr::filter(D, Iteration > S@sim$warmup)
D$Iteration <- D$Iteration - S@sim$warmup
nBurnin <- S@sim$warmup
} else {
nBurnin <- 0
}
# Exclude, by default, lp parameter
if (!stan_include_auxiliar) {
D <- dplyr::filter(D, Parameter!="lp__") # delete lp__
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
}
nThin <- S@sim$thin
mDescription <- S@model_name
processed <- TRUE
D <- dplyr::tbl_df(D)
}
#
# Manage csv files than contain stan samples
# Also converted first to an mcmc.list
#
if (class(S)=="list") {
D <- NULL
for (i in 1:length(S)) {
samples.c <- dplyr::tbl_df(read.table(S[[i]], sep=",", header=TRUE,
colClasses="numeric", check.names=FALSE))
D <- dplyr::bind_rows(D,
tidyr::gather(samples.c, Parameter) %>%
dplyr::mutate(Iteration=rep(1:(dim(samples.c)[1]), dim(samples.c)[2]), Chain=i) %>%
dplyr::select(Iteration, Chain, Parameter, value))
}
# Exclude, by default, lp parameter and other auxiliar ones
if (!stan_include_auxiliar) {
D <- D[grep("__$", D$Parameter, invert=TRUE),]
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
}
nBurnin <- as.integer(gsub("warmup=", "", scan(S[[i]], "", skip=12, nlines=1, quiet=TRUE)[2]))
nThin <- as.integer(gsub("thin=", "", scan(S[[i]], "", skip=13, nlines=1, quiet=TRUE)[2]))
processed <- TRUE
}
#
# Manage mcmc.list and mcmc objects
#
if (class(S)=="mcmc.list" | class(S)=="mcmc" | processed) { # JAGS typical output or MCMCpack (or previously processed stan samples)
if (!processed) { # only in JAGS or MCMCpack, using coda
lS <- length(S)
D <- NULL
if (lS == 1 | class(S)=="mcmc") { # Single chain or MCMCpack
if (lS == 1 & class(S)=="mcmc.list") { # single chain
s <- S[[1]]
} else { # MCMCpack
s <- S
}
# Process a single chain
D <- dplyr::mutate(ggs_chain(s), Chain=1) %>%
dplyr::select(Iteration, Chain, Parameter, value)
# Get information from mcpar (burnin period, thinning)
nBurnin <- (attributes(s)$mcpar[1])-(1*attributes(s)$mcpar[3])
nThin <- attributes(s)$mcpar[3]
} else {
# Process multiple chains
for (l in 1:lS) {
s <- S[l][[1]]
D <- dplyr::bind_rows(D, dplyr::mutate(ggs_chain(s), Chain=l))
}
D <- dplyr::select(D, Iteration, Chain, Parameter, value)
# Get information from mcpar (burnin period, thinning). Taking the last
# chain is fine. All chains are assumed to have the same structure.
nBurnin <- (attributes(s)$mcpar[1])-(1*attributes(s)$mcpar[3])
nThin <- attributes(s)$mcpar[3]
}
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
D <- dplyr::arrange(D, Parameter, Chain, Iteration)
}
# Set several attributes to the object, to avoid computations afterwards
# Number of chains
attr(D, "nChains") <- length(unique(D$Chain))
# Number of parameters
attr(D, "nParameters") <- length(unique(D$Parameter))
# Number of Iterations really present in the sample
attr(D, "nIterations") <- max(D$Iteration)
# Number of burning periods previously
if (is.numeric(burnin) & length(burnin)==1) {
attr(D, "nBurnin") <- burnin
} else if (is.logical(burnin)) {
if (burnin) {
attr(D, "nBurnin") <- nBurnin
} else {
attr(D, "nBurnin") <- 0
}
} else {
stop("burnin must be either logical (TRUE/FALSE) or a numerical vector of length one.")
}
# Thinning interval
attr(D, "nThin") <- nThin
# Descriptive text
if (is.character(description)) { # if the description is given, us it when it is a character string
attr(D, "description") <- description
} else {
if (!is.na(description)) { # if it is not a character string and not NA, show an informative message
print("description is not a text string. The name of the imported object is used instead.")
}
if (exists("mDescription")) { # In case of stan model names
attr(D, "description") <- mDescription
} else {
attr(D, "description") <- as.character(sys.call()[2]) # use the name of the source object
}
}
# Manage subsetting a family of parameters
# In order to save memory, the exclusion of parameters would be done ideally
# at the beginning of the processing, but then it has to be done for all
# input types.
if (!is.na(family)) {
D <- get_family(D, family=family)
}
# Change the names of the parameters if par_labels argument has been passed
if (class(par_labels)=="data.frame") {
if (length(which(c("Parameter", "Label") %in% names(par_labels))) == 2) {
aD <- attributes(D)
levels(D$Parameter)[which(levels(D$Parameter) %in% par_labels$Parameter)] <-
as.character(par_labels$Label[
match(levels(D$Parameter)[which(levels(D$Parameter) %in% par_labels$Parameter)], par_labels$Parameter)])
D <- suppressWarnings(dplyr::left_join(D, data.frame(Parameter=par_labels$Label, ParameterOriginal=par_labels$Parameter),
by="Parameter")) %>%
dplyr::select(Iteration, Chain, Parameter, value, ParameterOriginal)
if (class(D$Parameter) == "character") {
D$Parameter <- factor(D$Parameter, levels=custom.sort(D$Parameter))
}
# Unfortunately, the attributes are not inherited in left_join(), so they have to be manually passed again
attr(D, "nChains") <- aD$nChains
attr(D, "nParameters") <- aD$nParameters
attr(D, "nIterations") <- aD$nIterations
attr(D, "nBurnin") <- aD$nBurnin
attr(D, "nThin") <- aD$nThin
attr(D, "description") <- aD$description
# Keep the rest of the variables passed if the data frame has more than Parameter and Label
if (dim(par_labels)[2] > 2) {
aD <- attributes(D)
D <- dplyr::left_join(D, dplyr::select(dplyr::tbl_df(par_labels), -Parameter), by=c("Parameter"="Label"))
if (class(D$Parameter) == "character") {
D$Parameter <- factor(D$Parameter, levels=custom.sort(D$Parameter))
}
}
# Unfortunately, the attributes are not inherited in left_join(), so they have to be manually passed again (for second time).
attr(D, "nChains") <- aD$nChains
attr(D, "nParameters") <- aD$nParameters
attr(D, "nIterations") <- aD$nIterations
attr(D, "nBurnin") <- aD$nBurnin
attr(D, "nThin") <- aD$nThin
attr(D, "description") <- aD$description
} else {
stop("par_labels must include at least columns called 'Parameter' and 'Label'.")
}
} else {
if (!is.na(par_labels)) {
stop("par_labels must be a data frame.")
}
}
# Once everything is ready, return the processed object
return(D)
} else {
stop("ggs is not able to transform the input object into a ggs object suitable for ggmcmc.")
}
}
#' Auxiliary function that extracts information from a single chain.
#'
#' @param s a single chain to convert into a data frame
#' @return D data frame with the chain arranged
ggs_chain <- function(s) {
# Get the number of samples and the vector of iterations
n.samples <- dim(s)[1]
iter <- 1:n.samples
# Prepare the dataframe
d <- data.frame(Iteration=iter, as.matrix(unclass(s)), check.names=FALSE)
D <- d %>%
tidyr::gather(Parameter, value, -Iteration)
# Return the modified data frame as a tbl_df to be used by dplyr
D <- dplyr::tbl_df(D)
return(D)
}
#' Auxiliary function that sorts Parameter names taking into account numeric values
#'
#' @param x a character vector to which we want to sort elements
#' @return X a character vector sorted with family parametrs first and then numeric values
custom.sort <- function(x) {
x <- as.character(unique(x))
family <- gsub("\\[.+\\]", "", x)
Families <- sort(unique(family))
X <- NULL
for (f in Families) {
x.family <- x[family == f]
if (length(grep("\\[", x.family)) > 0) {
x.family <- x.family[order(as.numeric((gsub("]", "", gsub("(.+)\\[", "", x.family)))))]
X <- c(X, x.family)
} else {
X <- c(X, x.family)
}
}
return(X)
}
| /ggmcmc/R/ggs.R | no_license | ingted/R-Examples | R | false | false | 12,505 | r | #' Import MCMC samples into a ggs object than can be used by all ggs_* graphical functions.
#'
#' This function manages MCMC samples from different sources (JAGS, MCMCpack, STAN -both via rstan and via csv files-) and converts them into a data frame tbl. The resulting data frame has four columns (Iteration, Chain, Parameter, value) and six attributes (nChains, nParameters, nIterations, nBurnin, nThin and description). The ggs object returned is then used as the input of the ggs_* functions to actually plot the different convergence diagnostics.
#'
#' @param S Either a \code{mcmc.list} object with samples from JAGS, a \code{mcmc} object with samples from MCMCpack, a \code{stanfit} object with samples from rstan, or a list with the filenames of \code{csv} files generated by stan outside rstan (where the order of the files is assumed to be the order of the chains). ggmcmc guesses what is the original object and tries to import it accordingly. rstan is not expected to be in CRAN soon, and so coda::mcmc is used to extract stan samples instead of the more canonical rstan::extract.
#' @param family Name of the family of parameters to process, as given by a character vector or a regular expression. A family of parameters is considered to be any group of parameters with the same name but different numerical value between square brackets (as beta[1], beta[2], etc).
#' @param description Character vector giving a short descriptive text that identifies the model.
#' @param burnin Logical or numerical value. When logical and TRUE (the default), the number of samples in the burnin period will be taken into account, if it can be guessed by the extracting process. Otherwise, iterations will start counting from 1. If a numerical vector is given, the user then supplies the length of the burnin period.
#' @param par_labels data frame with two colums. One named "Parameter" with the same names of the parameters of the model. Another named "Label" with the label of the parameter. When missing, the names passed to the model are used for representation. When there is no correspondence between a Parameter and a Label, the original name of the parameter is used. The order of the levels of the original Parameter does not change.
#' @param inc_warmup Logical. When dealing with stanfit objects from rstan, logical value whether the warmup samples are included. Defaults to FALSE.
#' @param stan_include_auxiliar Logical value to include "lp__" parameter in rstan, and "lp__", "treedepth__" and "stepsize__" in stan running without rstan. Defaults to FALSE.
#' @export
#' @return D A data frame tbl with the data arranged and ready to be used by the rest of the \code{ggmcmc} functions. The data frame has four columns, namely: Iteration, Chain, Parameter and value, and six attributes: nChains, nParameters, nIterations, nBurnin, nThin and description. A data frame tbl is a wrapper to a local data frame, behaves like a data frame and its advantage is related to printing, which is compact. For more details, see \code{tbl_df()} in package \code{dplyr}.
#' @examples
#' # Assign 'D' to be a data frame suitable for \code{ggmcmc} functions from
#' # a coda object called S
#' data(linear)
#' S <- ggs(s) # s is a coda object
#'
#' # Get samples from 'beta' parameters only
#' S <- ggs(s, family = "beta")
ggs <- function(S, family=NA, description=NA, burnin=TRUE, par_labels=NA, inc_warmup=FALSE, stan_include_auxiliar=FALSE) {
processed <- FALSE # set by default that there has not been any processed samples
#
# Manage stanfit obcjets
# Manage stan output first because it is firstly converted into an mcmc.list
#
if (class(S)=="stanfit") {
# Extract chain by chain
nChains <- S@sim$chains
D <- NULL
for (l in 1:nChains) {
sdf <- as.data.frame(S@sim$samples[[l]])
sdf$Iteration <- 1:dim(sdf)[1]
s <- tidyr::gather(sdf, Parameter, value, -Iteration) %>%
dplyr::mutate(Chain = l) %>%
dplyr::select(Iteration, Chain, Parameter, value)
D <- dplyr::bind_rows(D, s)
}
if (!inc_warmup) {
D <- dplyr::filter(D, Iteration > S@sim$warmup)
D$Iteration <- D$Iteration - S@sim$warmup
nBurnin <- S@sim$warmup
} else {
nBurnin <- 0
}
# Exclude, by default, lp parameter
if (!stan_include_auxiliar) {
D <- dplyr::filter(D, Parameter!="lp__") # delete lp__
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
}
nThin <- S@sim$thin
mDescription <- S@model_name
processed <- TRUE
D <- dplyr::tbl_df(D)
}
#
# Manage csv files than contain stan samples
# Also converted first to an mcmc.list
#
if (class(S)=="list") {
D <- NULL
for (i in 1:length(S)) {
samples.c <- dplyr::tbl_df(read.table(S[[i]], sep=",", header=TRUE,
colClasses="numeric", check.names=FALSE))
D <- dplyr::bind_rows(D,
tidyr::gather(samples.c, Parameter) %>%
dplyr::mutate(Iteration=rep(1:(dim(samples.c)[1]), dim(samples.c)[2]), Chain=i) %>%
dplyr::select(Iteration, Chain, Parameter, value))
}
# Exclude, by default, lp parameter and other auxiliar ones
if (!stan_include_auxiliar) {
D <- D[grep("__$", D$Parameter, invert=TRUE),]
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
}
nBurnin <- as.integer(gsub("warmup=", "", scan(S[[i]], "", skip=12, nlines=1, quiet=TRUE)[2]))
nThin <- as.integer(gsub("thin=", "", scan(S[[i]], "", skip=13, nlines=1, quiet=TRUE)[2]))
processed <- TRUE
}
#
# Manage mcmc.list and mcmc objects
#
if (class(S)=="mcmc.list" | class(S)=="mcmc" | processed) { # JAGS typical output or MCMCpack (or previously processed stan samples)
if (!processed) { # only in JAGS or MCMCpack, using coda
lS <- length(S)
D <- NULL
if (lS == 1 | class(S)=="mcmc") { # Single chain or MCMCpack
if (lS == 1 & class(S)=="mcmc.list") { # single chain
s <- S[[1]]
} else { # MCMCpack
s <- S
}
# Process a single chain
D <- dplyr::mutate(ggs_chain(s), Chain=1) %>%
dplyr::select(Iteration, Chain, Parameter, value)
# Get information from mcpar (burnin period, thinning)
nBurnin <- (attributes(s)$mcpar[1])-(1*attributes(s)$mcpar[3])
nThin <- attributes(s)$mcpar[3]
} else {
# Process multiple chains
for (l in 1:lS) {
s <- S[l][[1]]
D <- dplyr::bind_rows(D, dplyr::mutate(ggs_chain(s), Chain=l))
}
D <- dplyr::select(D, Iteration, Chain, Parameter, value)
# Get information from mcpar (burnin period, thinning). Taking the last
# chain is fine. All chains are assumed to have the same structure.
nBurnin <- (attributes(s)$mcpar[1])-(1*attributes(s)$mcpar[3])
nThin <- attributes(s)$mcpar[3]
}
D$Parameter <- factor(as.character(D$Parameter), levels=custom.sort(D$Parameter))
D <- dplyr::arrange(D, Parameter, Chain, Iteration)
}
# Set several attributes to the object, to avoid computations afterwards
# Number of chains
attr(D, "nChains") <- length(unique(D$Chain))
# Number of parameters
attr(D, "nParameters") <- length(unique(D$Parameter))
# Number of Iterations really present in the sample
attr(D, "nIterations") <- max(D$Iteration)
# Number of burning periods previously
if (is.numeric(burnin) & length(burnin)==1) {
attr(D, "nBurnin") <- burnin
} else if (is.logical(burnin)) {
if (burnin) {
attr(D, "nBurnin") <- nBurnin
} else {
attr(D, "nBurnin") <- 0
}
} else {
stop("burnin must be either logical (TRUE/FALSE) or a numerical vector of length one.")
}
# Thinning interval
attr(D, "nThin") <- nThin
# Descriptive text
if (is.character(description)) { # if the description is given, us it when it is a character string
attr(D, "description") <- description
} else {
if (!is.na(description)) { # if it is not a character string and not NA, show an informative message
print("description is not a text string. The name of the imported object is used instead.")
}
if (exists("mDescription")) { # In case of stan model names
attr(D, "description") <- mDescription
} else {
attr(D, "description") <- as.character(sys.call()[2]) # use the name of the source object
}
}
# Manage subsetting a family of parameters
# In order to save memory, the exclusion of parameters would be done ideally
# at the beginning of the processing, but then it has to be done for all
# input types.
if (!is.na(family)) {
D <- get_family(D, family=family)
}
# Change the names of the parameters if par_labels argument has been passed
if (class(par_labels)=="data.frame") {
if (length(which(c("Parameter", "Label") %in% names(par_labels))) == 2) {
aD <- attributes(D)
levels(D$Parameter)[which(levels(D$Parameter) %in% par_labels$Parameter)] <-
as.character(par_labels$Label[
match(levels(D$Parameter)[which(levels(D$Parameter) %in% par_labels$Parameter)], par_labels$Parameter)])
D <- suppressWarnings(dplyr::left_join(D, data.frame(Parameter=par_labels$Label, ParameterOriginal=par_labels$Parameter),
by="Parameter")) %>%
dplyr::select(Iteration, Chain, Parameter, value, ParameterOriginal)
if (class(D$Parameter) == "character") {
D$Parameter <- factor(D$Parameter, levels=custom.sort(D$Parameter))
}
# Unfortunately, the attributes are not inherited in left_join(), so they have to be manually passed again
attr(D, "nChains") <- aD$nChains
attr(D, "nParameters") <- aD$nParameters
attr(D, "nIterations") <- aD$nIterations
attr(D, "nBurnin") <- aD$nBurnin
attr(D, "nThin") <- aD$nThin
attr(D, "description") <- aD$description
# Keep the rest of the variables passed if the data frame has more than Parameter and Label
if (dim(par_labels)[2] > 2) {
aD <- attributes(D)
D <- dplyr::left_join(D, dplyr::select(dplyr::tbl_df(par_labels), -Parameter), by=c("Parameter"="Label"))
if (class(D$Parameter) == "character") {
D$Parameter <- factor(D$Parameter, levels=custom.sort(D$Parameter))
}
}
# Unfortunately, the attributes are not inherited in left_join(), so they have to be manually passed again (for second time).
attr(D, "nChains") <- aD$nChains
attr(D, "nParameters") <- aD$nParameters
attr(D, "nIterations") <- aD$nIterations
attr(D, "nBurnin") <- aD$nBurnin
attr(D, "nThin") <- aD$nThin
attr(D, "description") <- aD$description
} else {
stop("par_labels must include at least columns called 'Parameter' and 'Label'.")
}
} else {
if (!is.na(par_labels)) {
stop("par_labels must be a data frame.")
}
}
# Once everything is ready, return the processed object
return(D)
} else {
stop("ggs is not able to transform the input object into a ggs object suitable for ggmcmc.")
}
}
#' Auxiliary function that extracts information from a single chain.
#'
#' @param s a single chain to convert into a data frame
#' @return D data frame with the chain arranged
ggs_chain <- function(s) {
# Get the number of samples and the vector of iterations
n.samples <- dim(s)[1]
iter <- 1:n.samples
# Prepare the dataframe
d <- data.frame(Iteration=iter, as.matrix(unclass(s)), check.names=FALSE)
D <- d %>%
tidyr::gather(Parameter, value, -Iteration)
# Return the modified data frame as a tbl_df to be used by dplyr
D <- dplyr::tbl_df(D)
return(D)
}
#' Auxiliary function that sorts Parameter names taking into account numeric values
#'
#' @param x a character vector to which we want to sort elements
#' @return X a character vector sorted with family parametrs first and then numeric values
custom.sort <- function(x) {
x <- as.character(unique(x))
family <- gsub("\\[.+\\]", "", x)
Families <- sort(unique(family))
X <- NULL
for (f in Families) {
x.family <- x[family == f]
if (length(grep("\\[", x.family)) > 0) {
x.family <- x.family[order(as.numeric((gsub("]", "", gsub("(.+)\\[", "", x.family)))))]
X <- c(X, x.family)
} else {
X <- c(X, x.family)
}
}
return(X)
}
|
\name{sofaes}
\alias{sofaes}
\title{sofaes default}
\usage{
sofaes(x, ...)
}
\arguments{
\item{x}{input}
\item{...}{more stuff}
}
\description{
sofaes default
}
| /man/sofaes.Rd | no_license | ramnathv/sofa | R | false | false | 171 | rd | \name{sofaes}
\alias{sofaes}
\title{sofaes default}
\usage{
sofaes(x, ...)
}
\arguments{
\item{x}{input}
\item{...}{more stuff}
}
\description{
sofaes default
}
|
library(dplyr)
Sys.setlocale("LC_ALL", 'en_US.UTF-8') # important, because my OS isn't in English
if (!file.exists('./data')) {
dir.create('./data')
}
## It will download and unzip the data only if there isn't the dataset in the
## current working directory
if (!file.exists('./data/household_power_consumption.txt')) {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(file_url, destfile = "./data/household_power_consumption.zip", mode = "wb", method = "curl")
dateDownloaded <- date()
unzip('./data/household_power_consumption.zip', exdir = 'data')
}
## To avoid unnecessary computation, it will create some variables only if they
## don't exist in the environment
if (!exists("power_data")) {
power_data <- read.table('./data/household_power_consumption.txt', header = TRUE, sep = ";", na.strings = '?')
power_data <- filter(power_data, Date == '1/2/2007' | Date == '2/2/2007')
power_data <- transform(power_data, date_time = paste(Date, Time))
power_data$date_time <-strptime(power_data$date_time, "%d/%m/%Y %H:%M:%S")
}
png(file = "plot4.png")
par(mfrow = c(2, 2))
# plot 1
plot(power_data$date_time, power_data$Global_active_power, type ='n', xlab = '', ylab = 'Global Active Power')
lines(power_data$date_time, power_data$Global_active_power)
# plot 2
plot(power_data$date_time, power_data$Voltage, type ='n', xlab = 'datetime', ylab = 'Voltage')
lines(power_data$date_time, power_data$Voltage)
# plot 3
plot(power_data$date_time, power_data$Sub_metering_1, type ='n', xlab = '', ylab = 'Energy sub metering')
lines(power_data$date_time, power_data$Sub_metering_1)
lines(power_data$date_time, power_data$Sub_metering_2, col = 'red')
lines(power_data$date_time, power_data$Sub_metering_3, col = 'blue')
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), col = c('black' ,'red', 'blue'), lwd = 1, bty = 'n')
# plot 4
plot(power_data$date_time, power_data$Global_reactive_power, type ='n', xlab = 'datetime', ylab = 'Global_reactive_power')
lines(power_data$date_time, power_data$Global_reactive_power)
dev.off()
| /plot4.R | no_license | tuliocasagrande/ExData_Plotting1 | R | false | false | 2,148 | r | library(dplyr)
Sys.setlocale("LC_ALL", 'en_US.UTF-8') # important, because my OS isn't in English
if (!file.exists('./data')) {
dir.create('./data')
}
## It will download and unzip the data only if there isn't the dataset in the
## current working directory
if (!file.exists('./data/household_power_consumption.txt')) {
file_url <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(file_url, destfile = "./data/household_power_consumption.zip", mode = "wb", method = "curl")
dateDownloaded <- date()
unzip('./data/household_power_consumption.zip', exdir = 'data')
}
## To avoid unnecessary computation, it will create some variables only if they
## don't exist in the environment
if (!exists("power_data")) {
power_data <- read.table('./data/household_power_consumption.txt', header = TRUE, sep = ";", na.strings = '?')
power_data <- filter(power_data, Date == '1/2/2007' | Date == '2/2/2007')
power_data <- transform(power_data, date_time = paste(Date, Time))
power_data$date_time <-strptime(power_data$date_time, "%d/%m/%Y %H:%M:%S")
}
png(file = "plot4.png")
par(mfrow = c(2, 2))
# plot 1
plot(power_data$date_time, power_data$Global_active_power, type ='n', xlab = '', ylab = 'Global Active Power')
lines(power_data$date_time, power_data$Global_active_power)
# plot 2
plot(power_data$date_time, power_data$Voltage, type ='n', xlab = 'datetime', ylab = 'Voltage')
lines(power_data$date_time, power_data$Voltage)
# plot 3
plot(power_data$date_time, power_data$Sub_metering_1, type ='n', xlab = '', ylab = 'Energy sub metering')
lines(power_data$date_time, power_data$Sub_metering_1)
lines(power_data$date_time, power_data$Sub_metering_2, col = 'red')
lines(power_data$date_time, power_data$Sub_metering_3, col = 'blue')
legend('topright', legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), col = c('black' ,'red', 'blue'), lwd = 1, bty = 'n')
# plot 4
plot(power_data$date_time, power_data$Global_reactive_power, type ='n', xlab = 'datetime', ylab = 'Global_reactive_power')
lines(power_data$date_time, power_data$Global_reactive_power)
dev.off()
|
## Plot the 30-day mortality rates for heart attack
## Read the outcome data into R via the read.csv function and look at the first few rows.
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
## There are many columns in this dataset. You can see how many by typing ncol(outcome) (you can see
## the number of rows with the nrow function). In addition, you can see the names of each column by typing
names(outcome)
## The names are also in the PDF document.
## To make a simple histogram of the 30-day death rates from heart attack (column 11 in the outcome dataset),
## run
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11])
## 1
## Because we originally read the data in as character (by specifying colClasses = "character" we need to
## coerce the column to be numeric. You may get a warning about NAs being introduced but that is okay.
| /no_submit.R | no_license | bernieh2005/ProgrammingAssignment3Templates | R | false | false | 965 | r | ## Plot the 30-day mortality rates for heart attack
## Read the outcome data into R via the read.csv function and look at the first few rows.
outcome <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
head(outcome)
## There are many columns in this dataset. You can see how many by typing ncol(outcome) (you can see
## the number of rows with the nrow function). In addition, you can see the names of each column by typing
names(outcome)
## The names are also in the PDF document.
## To make a simple histogram of the 30-day death rates from heart attack (column 11 in the outcome dataset),
## run
outcome[, 11] <- as.numeric(outcome[, 11])
## You may get a warning about NAs being introduced; that is okay
hist(outcome[, 11])
## 1
## Because we originally read the data in as character (by specifying colClasses = "character" we need to
## coerce the column to be numeric. You may get a warning about NAs being introduced but that is okay.
|
\name{wmlung5070US}
\alias{wmlung5070US}
\docType{data}
\title{
U.S. lung cancer mortality data for white males, 1950-1969 and 1970-1994
}
\description{
Count and age-adjusted rate of lung cancer mortality among white men for the total U.S.,
aggregated for 1950-69 and 1970-94.
}
\usage{wmlung5070US}
\format{
A data frame with 1 observations on the following 5 variables.
\describe{
\item{RATEWM_50}{a numeric vector, US age adjusted mortality rates for 1950-1969}
\item{COUNTWM_50}{a numeric vector, US number of cases from 1950-1969}
\item{RATEWM_70}{a numeric vector, US age adjusted mortality rates for 1970-1994}
\item{COUNTWM_70}{a numeric vector, US number of cases from 1970-1994}
\item{PERCENT}{a numeric vector, change from 1950-1969 to 1970-1994 US rates.}
}
}
\details{
see wmlung5070 for further details.
The row name is always \var{US} indicating US rates.
This dataset is used by the \var{micromapSEER} examples using the border group of "USStatesDF".
}
\author{Linda W. Pickle and Jim Pearson of StatNet Consulting, LLC, Gaithersburg, MD}
\references{
None
}
\keyword{datasets} | /man/wmlung5070US.Rd | no_license | Suppaman/micromapST | R | false | false | 1,163 | rd | \name{wmlung5070US}
\alias{wmlung5070US}
\docType{data}
\title{
U.S. lung cancer mortality data for white males, 1950-1969 and 1970-1994
}
\description{
Count and age-adjusted rate of lung cancer mortality among white men for the total U.S.,
aggregated for 1950-69 and 1970-94.
}
\usage{wmlung5070US}
\format{
A data frame with 1 observations on the following 5 variables.
\describe{
\item{RATEWM_50}{a numeric vector, US age adjusted mortality rates for 1950-1969}
\item{COUNTWM_50}{a numeric vector, US number of cases from 1950-1969}
\item{RATEWM_70}{a numeric vector, US age adjusted mortality rates for 1970-1994}
\item{COUNTWM_70}{a numeric vector, US number of cases from 1970-1994}
\item{PERCENT}{a numeric vector, change from 1950-1969 to 1970-1994 US rates.}
}
}
\details{
see wmlung5070 for further details.
The row name is always \var{US} indicating US rates.
This dataset is used by the \var{micromapSEER} examples using the border group of "USStatesDF".
}
\author{Linda W. Pickle and Jim Pearson of StatNet Consulting, LLC, Gaithersburg, MD}
\references{
None
}
\keyword{datasets} |
dat = read.csv("saerela.csv")
library(boot)
# function that takes data and indices as input and generates an estimate of the mean outcome under
# always treat - mean outcome under never treat strategy
standard = function(data, indices) {
d = data[indices,]
#d$interv = -1
# IPT weights:
tmodel1 <- glm(Z1 ~ X11 + X12 + X13 + X14 + X15, data = d, family=binomial(link=logit))
tmodel2 <- glm(Z2 ~ Z1 + X21 + X22 + X23 + X24 + X25, data = d, family=binomial(link=logit))
tmodel3 <- glm(Z3 ~ Z2 + X31 + X32 + X33 + X34 + X35, data = d, family=binomial(link=logit))
p1 <- dbinom(d$Z1, 1, prob = (predict(tmodel1, type = "response")))
p2 <- dbinom(d$Z2, 1, prob = (predict(tmodel2, type = "response")))
p3 <- dbinom(d$Z3, 1, prob = (predict(tmodel3, type = "response")))
pt <- p1 * p2 * p3
smodel1 <- glm(Z1 ~ 1, data = d, family=binomial(link=logit))
smodel2 <- glm(Z2 ~ Z1, data = d, family=binomial(link=logit))
smodel3 <- glm(Z3 ~ Z1 + Z2, data = d, family=binomial(link=logit))
sp1 <- dbinom(d$Z1, 1, prob = (predict(smodel1, type = "response")))
sp2 <- dbinom(d$Z2, 1, prob = (predict(smodel2, type = "response")))
sp3 <- dbinom(d$Z3, 1, prob = (predict(smodel3, type = "response")))
sc <- sp1 * sp2 * sp3
# never treat
# always treat
iptw <- 1.0/pt
iptws <- sc * iptw
d$s = with(d, Z1 + Z2 + Z3)
fit = (glm(Y ~ s, data = d, weights = iptws, family = quasibinomial))
ey111 = predict(fit, newdata = data.frame(s=3), type = "response")
ey000 = predict(fit, newdata = data.frame(s=0), type = "response")
return(ey111-ey000)
}
#resultsip <- boot(data = dat, statistic = standard, R = 100)
#resip = data.frame(res = (resultsip$t))
#write.csv(resip, file = "simresults/resip.csv")
| /iptw.R | no_license | DBomber60/is.causal | R | false | false | 1,751 | r | dat = read.csv("saerela.csv")
library(boot)
# function that takes data and indices as input and generates an estimate of the mean outcome under
# always treat - mean outcome under never treat strategy
standard = function(data, indices) {
d = data[indices,]
#d$interv = -1
# IPT weights:
tmodel1 <- glm(Z1 ~ X11 + X12 + X13 + X14 + X15, data = d, family=binomial(link=logit))
tmodel2 <- glm(Z2 ~ Z1 + X21 + X22 + X23 + X24 + X25, data = d, family=binomial(link=logit))
tmodel3 <- glm(Z3 ~ Z2 + X31 + X32 + X33 + X34 + X35, data = d, family=binomial(link=logit))
p1 <- dbinom(d$Z1, 1, prob = (predict(tmodel1, type = "response")))
p2 <- dbinom(d$Z2, 1, prob = (predict(tmodel2, type = "response")))
p3 <- dbinom(d$Z3, 1, prob = (predict(tmodel3, type = "response")))
pt <- p1 * p2 * p3
smodel1 <- glm(Z1 ~ 1, data = d, family=binomial(link=logit))
smodel2 <- glm(Z2 ~ Z1, data = d, family=binomial(link=logit))
smodel3 <- glm(Z3 ~ Z1 + Z2, data = d, family=binomial(link=logit))
sp1 <- dbinom(d$Z1, 1, prob = (predict(smodel1, type = "response")))
sp2 <- dbinom(d$Z2, 1, prob = (predict(smodel2, type = "response")))
sp3 <- dbinom(d$Z3, 1, prob = (predict(smodel3, type = "response")))
sc <- sp1 * sp2 * sp3
# never treat
# always treat
iptw <- 1.0/pt
iptws <- sc * iptw
d$s = with(d, Z1 + Z2 + Z3)
fit = (glm(Y ~ s, data = d, weights = iptws, family = quasibinomial))
ey111 = predict(fit, newdata = data.frame(s=3), type = "response")
ey000 = predict(fit, newdata = data.frame(s=0), type = "response")
return(ey111-ey000)
}
#resultsip <- boot(data = dat, statistic = standard, R = 100)
#resip = data.frame(res = (resultsip$t))
#write.csv(resip, file = "simresults/resip.csv")
|
# Karan app
library(shiny)
library(data.table)
library(fasttime) #for fastPosixct
library(ggplot2)
library(scales)
suppressWarnings(library(plotly))
options(shiny.maxRequestSize=100*1024^2)
rm(list=ls())
Sys.setenv(TZ="Asia/Kolkata")
shinyServer(
function(input, output) {
dframe <- reactive( {
#inputfile <- input$infilepower
validate(
need(input$infilepower != "","Please select a data set")
)
dframe <- fread(input$infilepower$datapath, header = TRUE, sep = ",")
dframe$timestamp <- fastPOSIXct(dframe$timestamp)-19800
# dframe_xts <- xts(dframe$power,dframe$timestamp)
dframe
} )
df <- reactive( {
dframe <- dframe()
if (input$specdaterange| input$specdate){
if(input$specdaterange) {
start_date = input$seldaterange[1]
end_date =input$seldaterange[2]
startdate <- fastPOSIXct(paste0(start_date,' ',"00:00:00"))-19800
enddate <- fastPOSIXct(paste0(end_date,' ',"23:59:59"))-19800
} else {
datex = input$seldate
startdate <- fastPOSIXct(paste0(datex,' ',"00:00:00"))-19800
enddate <- fastPOSIXct(paste0(datex,' ',"23:59:59"))-19800
}
dframe <- dframe[dframe$timestamp >= startdate & dframe$timestamp <= enddate,] #reduced
}
dfs <- dframe
dfs
} )
# this function calls df()[it keeps check on time range for displaying] which in turn call dframe[this function loads intially data from file]
output$lineplt1 <- renderPlotly({
df_sub <- df()
colnames(df_sub) <- c("timestamp","Power")
g <- ggplot(df_sub, aes(timestamp, Power))
g <- g + geom_line() + labs(x = "", y ="power (Watts)")
g <- g + scale_x_datetime(labels = date_format("%Y-%d-%b %H:%M",tz="Asia/Kolkata")) # use scales package
g <- g + theme(axis.text.x = element_text(angle = 90,hjust = 1))
ggplotly(g)
#g
})
} ) | /Karan_app/server.R | no_license | loneharoon/R_apps | R | false | false | 1,982 | r | # Karan app
library(shiny)
library(data.table)
library(fasttime) #for fastPosixct
library(ggplot2)
library(scales)
suppressWarnings(library(plotly))
options(shiny.maxRequestSize=100*1024^2)
rm(list=ls())
Sys.setenv(TZ="Asia/Kolkata")
shinyServer(
function(input, output) {
dframe <- reactive( {
#inputfile <- input$infilepower
validate(
need(input$infilepower != "","Please select a data set")
)
dframe <- fread(input$infilepower$datapath, header = TRUE, sep = ",")
dframe$timestamp <- fastPOSIXct(dframe$timestamp)-19800
# dframe_xts <- xts(dframe$power,dframe$timestamp)
dframe
} )
df <- reactive( {
dframe <- dframe()
if (input$specdaterange| input$specdate){
if(input$specdaterange) {
start_date = input$seldaterange[1]
end_date =input$seldaterange[2]
startdate <- fastPOSIXct(paste0(start_date,' ',"00:00:00"))-19800
enddate <- fastPOSIXct(paste0(end_date,' ',"23:59:59"))-19800
} else {
datex = input$seldate
startdate <- fastPOSIXct(paste0(datex,' ',"00:00:00"))-19800
enddate <- fastPOSIXct(paste0(datex,' ',"23:59:59"))-19800
}
dframe <- dframe[dframe$timestamp >= startdate & dframe$timestamp <= enddate,] #reduced
}
dfs <- dframe
dfs
} )
# this function calls df()[it keeps check on time range for displaying] which in turn call dframe[this function loads intially data from file]
output$lineplt1 <- renderPlotly({
df_sub <- df()
colnames(df_sub) <- c("timestamp","Power")
g <- ggplot(df_sub, aes(timestamp, Power))
g <- g + geom_line() + labs(x = "", y ="power (Watts)")
g <- g + scale_x_datetime(labels = date_format("%Y-%d-%b %H:%M",tz="Asia/Kolkata")) # use scales package
g <- g + theme(axis.text.x = element_text(angle = 90,hjust = 1))
ggplotly(g)
#g
})
} ) |
library(circumplex)
### Name: instruments
### Title: List all available instruments
### Aliases: instruments
### ** Examples
instruments()
| /data/genthat_extracted_code/circumplex/examples/instruments.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 146 | r | library(circumplex)
### Name: instruments
### Title: List all available instruments
### Aliases: instruments
### ** Examples
instruments()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fa_i.R
\name{fa_i}
\alias{fa_i}
\title{Generate a Font Awesome \verb{<i>} tag}
\usage{
fa_i(name, class = NULL, ..., html_dependency = NULL, verify_fa = TRUE)
}
\arguments{
\item{name}{The name of the Font Awesome icon. This could be as a short name
(e.g., \code{"npm"}, \code{"drum"}, etc.), or, a full name (e.g., \code{"fab fa-npm"},
\code{"fas fa-drum"}, etc.). The names should correspond to current Version 5
Font Awesome names. A list of short and full names can be accessed through
the \code{\link[=fa_metadata]{fa_metadata()}} function with \code{fa_metadata()$icon_names} and
\code{fa_metadata()$icon_names_full}. If supplying a Version 4 icon name, it
will be internally translated to the Version 5 icon name and a Version 5
icon will be returned. A data frame containing the short names that changed
from version 4 (\code{v4_name}) to version 5 (\code{v5_name}) can be obtained by
using \code{fa_metadata()$v4_v5_name_tbl}.}
\item{class}{Additional classes to customize the style of the icon (see the
usage examples for details on supported styles).}
\item{...}{Arguments passed to the \verb{<i>} tag of \link[htmltools:builder]{htmltools::tags}.}
\item{html_dependency}{Provides an opportunity to use a custom
\code{html_dependency} object (created via a call to
\code{\link[htmltools:htmlDependency]{htmltools::htmlDependency()}}) instead of one supplied by the function
(which uses Font Awesome's free assets and are bundled in the package). A
custom \code{html_dependency} object is useful when you have paid icons from
Font Awesome or would otherwise like to customize exactly which icon assets
are used (e.g., woff, woff2, eot, etc.). By default, this is \code{NULL} where
the function interally generates an \code{html_dependency}.}
\item{verify_fa}{An option to verify the provided icon \code{name}. If \code{TRUE} (the
default), internal checks will take place and issue messages should the
\code{name} is a Font Awesome 4 icon name (the message will provide the Version
5 name), or, if the icon name cannot be found in either Font Awesome 4 or
5.}
}
\value{
An icon element.
}
\description{
The \code{fa_i()} function creates a Font Awesome \verb{<i>} tag and not an SVG as with
\code{\link[=fa]{fa()}}. The primary use case for \code{fa_i()} is for legacy Shiny applications
that use the \code{shiny::icon()} function. This function is called within a
\code{shiny::icon()} call and all HTML dependencies to support icon generation are
hosted in the \strong{fontawesome} package.
}
\examples{
if (interactive()) {
# Create a Font Awesome icon object
fa_i(name = "r-project")
}
}
| /man/fa_i.Rd | permissive | fiorenzino/fontawesome | R | false | true | 2,686 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fa_i.R
\name{fa_i}
\alias{fa_i}
\title{Generate a Font Awesome \verb{<i>} tag}
\usage{
fa_i(name, class = NULL, ..., html_dependency = NULL, verify_fa = TRUE)
}
\arguments{
\item{name}{The name of the Font Awesome icon. This could be as a short name
(e.g., \code{"npm"}, \code{"drum"}, etc.), or, a full name (e.g., \code{"fab fa-npm"},
\code{"fas fa-drum"}, etc.). The names should correspond to current Version 5
Font Awesome names. A list of short and full names can be accessed through
the \code{\link[=fa_metadata]{fa_metadata()}} function with \code{fa_metadata()$icon_names} and
\code{fa_metadata()$icon_names_full}. If supplying a Version 4 icon name, it
will be internally translated to the Version 5 icon name and a Version 5
icon will be returned. A data frame containing the short names that changed
from version 4 (\code{v4_name}) to version 5 (\code{v5_name}) can be obtained by
using \code{fa_metadata()$v4_v5_name_tbl}.}
\item{class}{Additional classes to customize the style of the icon (see the
usage examples for details on supported styles).}
\item{...}{Arguments passed to the \verb{<i>} tag of \link[htmltools:builder]{htmltools::tags}.}
\item{html_dependency}{Provides an opportunity to use a custom
\code{html_dependency} object (created via a call to
\code{\link[htmltools:htmlDependency]{htmltools::htmlDependency()}}) instead of one supplied by the function
(which uses Font Awesome's free assets and are bundled in the package). A
custom \code{html_dependency} object is useful when you have paid icons from
Font Awesome or would otherwise like to customize exactly which icon assets
are used (e.g., woff, woff2, eot, etc.). By default, this is \code{NULL} where
the function interally generates an \code{html_dependency}.}
\item{verify_fa}{An option to verify the provided icon \code{name}. If \code{TRUE} (the
default), internal checks will take place and issue messages should the
\code{name} is a Font Awesome 4 icon name (the message will provide the Version
5 name), or, if the icon name cannot be found in either Font Awesome 4 or
5.}
}
\value{
An icon element.
}
\description{
The \code{fa_i()} function creates a Font Awesome \verb{<i>} tag and not an SVG as with
\code{\link[=fa]{fa()}}. The primary use case for \code{fa_i()} is for legacy Shiny applications
that use the \code{shiny::icon()} function. This function is called within a
\code{shiny::icon()} call and all HTML dependencies to support icon generation are
hosted in the \strong{fontawesome} package.
}
\examples{
if (interactive()) {
# Create a Font Awesome icon object
fa_i(name = "r-project")
}
}
|
# R has various packages which need to be install and also contain various data sets
# Built-in datasets in R
data()# List of built-in Datasets in R. Open in different tab.
# Loading
data(mtcars)
# Print the first 6 rows
head(mtcars, 6)
| /4.6 Built in Data in R.R | no_license | Vaibhavtomar20/Learning-R | R | false | false | 247 | r | # R has various packages which need to be install and also contain various data sets
# Built-in datasets in R
data()# List of built-in Datasets in R. Open in different tab.
# Loading
data(mtcars)
# Print the first 6 rows
head(mtcars, 6)
|
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(DT))
suppressPackageStartupMessages(library(purrr))
# sets standard concepts for use with mapped vs. direct search options
standard_concepts <- data.table("domain_type"= c("Measurement","Condition","Drug","Observation","Device","Procedure"),"concepts"= c("LOINC,SNOMED,CPT4","SNOMED","RxNorm,CPT4,NDC","SNOMED,CPT4,LOINC,HCPCS","SNOMED,HCPCS","SNOMED,CPT4,HCPCS"))
##################
### CONNECTION ###
##################
setConnectFunction <- function(username, password, host, dbname, port) {
connectString <- paste0("dbname='",dbname,"'")
if (username != ""){
connectString <- paste0(connectString, ", user='", username,"'")
}
if (password != ""){
connectString <- paste0(connectString, ", password='", password,"'")
}
if (host != ""){
connectString <- paste0(connectString, ", host='", host,"'")
}
if (port != ""){
connectString <- paste0(connectString, ", port= ", as.integer(port))
}
fullConnectString <- paste0('DBI::dbConnect(drv, ', connectString , ')')
return(fullConnectString)
}
checkOMOPconnection <- function(driver, username, password, host, dbname, port) {
status<- tryCatch(
{
if (driver=="mysql") {
drv <- dbDriver("MySQL")
fullConnectString <- setConnectFunction(username, password, host, dbname, port)
con <- eval(parse(text = fullConnectString))
} else {
con <- DatabaseConnector::connect(dbms = driver,
server = host,
user = username,
password = password,
schema = dbname,
port = port)
}
},
warning = function(w) {
# ignore
},
error = function(e) {
# ignore
}
)
if(!is.null(status)){
out <- TRUE
if (driver=="mysql") {
on.exit(dbDisconnect(con))
} else {
on.exit(DatabaseConnector::disconnect(con))
}
}else{
out <- FALSE
}
return(out)
}
checkOMOPtables <- function(driver, username, password, host, dbname, port) {
necessaryTables = c("concept","concept_ancestor","concept_relationship","condition_occurrence","death","device_exposure","drug_exposure","measurement","observation","person","procedure_occurrence","visit_occurrence")
if (driver=="mysql") {
drv <- dbDriver("MySQL")
fullConnectString <- setConnectFunction(username, password, host, dbname, port)
con <- eval(parse(text = fullConnectString))
} else {
# creating connection object using DatabaseConnector
con <- DatabaseConnector::connect(dbms = driver,
server = host,
user = username,
password = password,
schema = dbname,
port = port)
}
foundTablesData <- tolower(dbListTables(con))
if (driver=="mysql") {
on.exit(dbDisconnect(con))
} else {
on.exit(DatabaseConnector::disconnect(con))
}
missingTables <- list()
emptyTables <-list()
for (tbls in necessaryTables) {
if (!tbls %in% foundTablesData) { # check if table exists
missingTables <- c(missingTables, tbls)
} else { # check if any data in found table
if (driver=="mysql") {
dataCheckQuery <- paste0("SELECT * FROM " , tbls , " LIMIT 1;")
} else {
dataCheckQuery <- paste0("SELECT TOP 1 * FROM " , tbls, ";")
}
dataCheck <- sqlQuery(dataCheckQuery)
if (nrow(dataCheck)==0) {
emptyTables <- c(emptyTables, tbls)
}
}
}
return(list("missingTables" = missingTables, "emptyTables" = emptyTables))
}
#### check if patient exists for search function
check_if_pt_exists<- function(ptid){
found=FALSE
if(ptid%in% pts_demographics$person_id){
found = TRUE
}
return(found)
}
##################
### PRE - LOAD ###
##################
# Data Ontology
make_data_ontology <- function(){
if (file.exists(paste0(getOption("currentPath"), "dataOntology.rds")) ) {
# if Data Ontology exists, load it
dataOntology = readRDS(paste0(getOption("currentPath"), "dataOntology.rds"))
}else{
# if not, create it, then save
conceptQuery <- "SELECT concept_id, concept_name, domain_id, vocabulary_id, concept_class_id, concept_code FROM concept WHERE (invalid_reason = '' OR invalid_reason IS NULL);"
dataOntology <- sqlQuery(conceptQuery)
dataOntology <- data.table(dataOntology)
dataOntology$concept_name <- enc2utf8(dataOntology$concept_name)
saveRDS(dataOntology, paste0(getOption("currentPath"), "dataOntology.rds")) # save Data Ontology
}
return(dataOntology)
}
# Demographic data
## pre-load demographic data for all patients to save in memory to map to cohort from searches
getDemographics <-function() { # patient list will restrict search
queryStatement <- "SELECT person_id, year_of_birth, gender_concept_id, ethnicity_concept_id, race_concept_id FROM person"
deathqueryStatement <-"SELECT person_id, death_date FROM death"
# first get main patient data
ptDemo <- sqlQuery(queryStatement)
ptDemo <- data.table(ptDemo) # convert to data.table
current_year <- as.numeric(format(Sys.Date(),"%Y")) # get current year to calculate age
ptDemo$age <- current_year - ptDemo$year_of_birth # calculate age
# map concepts to reference table
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Gender",c("concept_id","concept_name")], by.x ="gender_concept_id", by.y = "concept_id" ,all.x=T) # Gender
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Gender' # rename column
ptDemo=markNAasUnknown(ptDemo,"Gender")
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Race",c("concept_id","concept_name")], by.x ="race_concept_id", by.y = "concept_id" ,all.x=T) # Race
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Race' # rename column
ptDemo=markNAasUnknown(ptDemo,"Race")
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Ethnicity",c("concept_id","concept_name")], by.x ="ethnicity_concept_id", by.y = "concept_id" ,all.x=T) # Ethnicity
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Ethnicity' # rename column
ptDemo <- markNAasUnknown(ptDemo,"Ethnicity")
### clean up extra columns
ptDemo <- ptDemo[,-c("ethnicity_concept_id","race_concept_id","gender_concept_id")]
# add in death date
ptDeath <- sqlQuery(deathqueryStatement)
ptDeath <- data.table(ptDeath) # convert to data.table
# merge with patient data
ptDemo <- merge(ptDemo, ptDeath,by="person_id",all.x=T)
# mark Alive/Deceased
ptDemo$Status <- ifelse(is.na(ptDemo$death_date),"Alive","Deceased")
return(ptDemo)
}
####################
#### FORMATTING ####
####################
## unpack vocabularies and codes for search function
unpackAndMap <- function(vocab_term_list) {
vocabularies <- str_split(vocab_term_list, ":") %>% map_chr(`[`, 1)
codes <- str_split(vocab_term_list, ":") %>% map_chr(`[`, 2)
# # match to one another
dataCriteria <- data.table::data.table(vocabularies = vocabularies, codes = codes)
# # map inclusion criteria to dataOntology
dataCriteriaMapped <- merge(dataCriteria, dataOntology, by.x= "codes", by.y = "concept_code")
dataCriteriaMapped <- dataCriteriaMapped[vocabularies==vocabulary_id]
return(dataCriteriaMapped)
}
# for 'Mapped' straegy; map input concept codes to common ontology
identifySynonyms <- function(codesFormatted) {
synonymQuery <- paste0('SELECT concept_id_1, concept_id_2, relationship_id, invalid_reason FROM concept_relationship WHERE concept_id_1 IN (',codesFormatted,');')
synonymData <- sqlQuery(synonymQuery)
synonymData <- data.table::data.table(synonymData)
synonymData <- synonymData[invalid_reason == ""]
synonymData <- synonymData[,-"invalid_reason"]
# check for "Maps to" or "%- RxNorm%" or "%- SNOMED%" | standard concepts
synonymDataFiltered <- synonymData[(relationship_id == "Maps to") | (grepl("- RxNorm",relationship_id)) | (grepl("- SNOMED",relationship_id)) ]
return(synonymDataFiltered)
}
# for 'Mapped' straegy; map input concept codes (from common ontology) to common ontology descendants
identifyMappings <- function(synonymCodes) {
mappingQuery <- paste0('SELECT ancestor_concept_id, descendant_concept_id FROM concept_ancestor A WHERE A.ancestor_concept_id IN (', synonymCodes,' );')
mappingData <- sqlQuery(mappingQuery)
mappingData <- data.table::data.table(mappingData)
mappingDataInfo <- merge(mappingData,dataOntology, by.x = "descendant_concept_id", by.y = "concept_id")
return(mappingDataInfo)
}
# identify tables to seach for concepts of interest (direct strategy)
identifyTablesDirect <- function(criteriaTable) {
searchTable = list()
for(d in unique(standard_concepts$domain_type)){ # scan through all domain types
mappingData = criteriaTable[domain_id == d]
mappingCodes = mappingData[domain_id == d]$concept_id
searchTable[[d]] <- mappingCodes # compile codes per domain type into one table
}
return(searchTable)
}
# identify tables to seach for concepts of interest (mapped strategy)
identifyTablesMapped <- function(mappingDataInfo) {
searchTable = list()
for(d in unique(standard_concepts$domain_type)) { # scan through all domain types
mappingDataInfoFiltered <- mappingDataInfo[domain_id==d]
mappingDataInfoFiltered <- mappingDataInfoFiltered[(grep(gsub(",","|",standard_concepts[domain_type==d,concepts]),vocabulary_id))] # map to common concepts specifically used to the domain
mappingCodes <- mappingDataInfoFiltered$concept_id
searchTable[[d]] <- mappingCodes
}
return(searchTable)
}
### identifyPatients based on function
# function = OR (union)
identifyPatientsOR <- function(pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure) {
patient_list=c()
if (!is.null(pts_condition)) {
patient_list = union(patient_list, unique(pts_condition$person_id))
}
if (!is.null(pts_observation)) {
patient_list = union(patient_list, unique(pts_observation$person_id))
}
if (!is.null(pts_measurement)) {
patient_list = union(patient_list, unique(pts_measurement$person_id))
}
if (!is.null(pts_device)) {
patient_list = union(patient_list, unique(pts_device$person_id))
}
if (!is.null(pts_drug)) {
patient_list = union(patient_list, unique(pts_drug$person_id))
}
if (!is.null(pts_procedure)) {
patient_list = union(patient_list, unique(pts_procedure$person_id))
}
return(patient_list)
}
# function = AND (intersect)
# To identify overlapping patients, we have to backmap the descendant terms to the original concepts
identifyPatientsAND <- function(criteriaMapped, synonymDataFiltered, mappingDataInfo, pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure) {
names(mappingDataInfo)[names(mappingDataInfo) == 'vocabulary_id'] <- 'mapped_vocabulary_id'
names(mappingDataInfo)[names(mappingDataInfo) == 'concept_name'] <- 'mapped_concept_name'
synonymMapped <- merge(mappingDataInfo[,c("descendant_concept_id","ancestor_concept_id","mapped_vocabulary_id","mapped_concept_name")], synonymDataFiltered[,c("concept_id_1","concept_id_2")], by.x = "ancestor_concept_id", by.y = "concept_id_2", allow.cartesian=TRUE)
synonymMapped <- synonymMapped[!duplicated(synonymMapped)]
combinedMapped <- merge(synonymMapped, criteriaMapped, by.x = "concept_id_1", by.y = "concept_id", allow.cartesian=TRUE)
combinedMapped <- combinedMapped[!duplicated(combinedMapped)]
combinedDirect <- merge(mappingDataInfo, criteriaMapped, by.x = "ancestor_concept_id", by.y = "concept_id", allow.cartesian=TRUE)
combinedDirect <- combinedDirect[!duplicated(combinedDirect)]
### derive patient list by concept_codes
# create code dictionary per original concept input
# initializepatient_list
unique_codes <- unique(criteriaMapped$codes)
code_map = list()
patient_list = list()
for(c in unique_codes) {
seed_codes = paste(criteriaMapped[codes == c]$concept_id,collapse=",")
code_map[[c]] <- c(seed_codes) # initialize list with original concept code (i.e. in case of ATC category)
code_map[[c]] <- c(code_map[[c]], combinedDirect[ancestor_concept_id %in% seed_codes]$descendant_concept_id) # add in direct mapped descendants
code_map[[c]] <- c(code_map[[c]], combinedMapped[concept_id_1 %in% seed_codes]$descendant_concept_id) # add in synonym codes and descendants
patient_list[[c]] <- c()
}
if (!is.null(pts_condition)) { #Condition
condition_codes <- unique(criteriaMapped[domain_id=="Condition"]$codes)
for(c in condition_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_condition[condition_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_observation)) { #Observation
observation_codes <- unique(criteriaMapped[domain_id=="Observation"]$codes)
for(c in observation_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_observation[observation_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_measurement)) { #Measurement
measurement_codes <- unique(criteriaMapped[domain_id=="Measurement"]$codes)
for(c in measurement_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_measurement[measurement_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_device)) {#Device
device_codes <- unique(criteriaMapped[domain_id=="Device"]$codes)
for(c in device_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_device[device_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_drug)) { #Drug
drug_codes = unique(criteriaMapped[domain_id=="Drug"]$codes)
for(c in drug_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_drug[drug_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_procedure)) {#Procedure
procedure_codes <- unique(criteriaMapped[domain_id=="Procedure"]$codes)
for(c in procedure_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_procedure[procedure_concept_id %in% code_map[[c]]]$person_id)
}
}
# get intersected list
patient_list_intersected = Reduce(intersect,patient_list)
return(patient_list_intersected)
}
### mark any empty fields as Unknown
markNAasUnknown <- function(tbl, ColToUse) {
if (ColToUse %in% colnames(tbl)) {
if (any(is.na(tbl[is.na(get(ColToUse))]))) {
missing_rows=tbl[is.na(get(ColToUse))]
tbl[is.na(get(ColToUse)),eval(ColToUse):="Unknown"]
}
}
return(tbl)
}
#### generate patient background and summary for report header
generate_pt_background<- function(pt_background){
if(!is.na(pt_background$death_date)){
age_of_death = as.numeric(year(as.Date(pt_background$death_date))) - as.numeric(pt_background$year_of_birth)
}else{
age_of_death = NA
}
str1=paste0("<strong>Status:</strong> ",pt_background$Status)
str2=paste0("<strong>Age:</strong> ",pt_background$age)
str3=paste0("<strong>Age of Death:</strong> ",age_of_death)
str4=paste0("<strong>Ethnicity:</strong> ",pt_background$Ethnicity)
str5=paste0("<strong>Race:</strong> ",pt_background$Race)
#str4=paste0("<strong>Multi-racial?:</strong> " ,pt_background$MultiRacial)
str6=paste0("<strong>Gender:</strong> ",pt_background$Gender)
bstrs=list(str1,str2,str3,str4,str5,str6)
return(bstrs)
}
generate_pt_summary<- function(pt_data){
encounters = pt_data$Encounters
observations = pt_data$Observations
conditions = pt_data$Conditions
procedures = pt_data$Procedures
medications = pt_data$Medications
measurements = pt_data$Measurements
devices = pt_data$Devices
deduped_encounters=encounters[,c("visit_occurrence_id","visit_concept")]
deduped_encounters=deduped_encounters[!duplicated(deduped_encounters),]
deduped_observations=observations[,c("visit_occurrence_id","observation_concept_name")]
deduped_observations=deduped_observations[!duplicated(deduped_observations),]
deduped_conditions=conditions[,c("visit_occurrence_id","condition_concept_name")]
deduped_conditions=deduped_conditions[!duplicated(deduped_conditions),]
deduped_procedures=procedures[,c("visit_occurrence_id","procedure_concept_name")]
deduped_procedures=deduped_procedures[!duplicated(deduped_procedures),]
deduped_medications=medications[,c("visit_occurrence_id","medication_concept_name")]
deduped_medications=deduped_medications[!duplicated(deduped_medications),]
deduped_measurements=measurements[,c("visit_occurrence_id","measurement_concept_name")]
deduped_measurements=deduped_measurements[!duplicated(deduped_measurements),]
deduped_devices=devices[,c("visit_occurrence_id","device_concept_name")]
deduped_devices=deduped_devices[!duplicated(deduped_devices),]
earliest_date = as.Date(encounters$visit_start_date[order(encounters$visit_start_date,decreasing=F)[1]])
recent_date = as.Date(encounters$visit_start_date[order(encounters$visit_start_date,decreasing=T)[1]])
str1a=paste0("<strong>Earliest encounter:</strong> ",earliest_date)
str2a=paste0("<strong>Most recent encounter:</strong> ",recent_date)
str3a=paste0("<strong># unique encounter types:</strong> ",length(unique(deduped_encounters$visit_concept)))
str4a=paste0("<strong># Encounters:</strong> " ,nrow(deduped_encounters))
str5a=paste0("<strong># Outpatient encounters:</strong> ",nrow(deduped_encounters[which(deduped_encounters$visit_concept=="Outpatient Visit"),]))
str6a=paste0("<strong># Inpatient encounters:</strong> ",nrow(deduped_encounters[which(deduped_encounters$Encounter_Is_Inpatient=="Inpatient Visit"),]))
strsa=c(str1a,str2a,str3a,str4a,str5a,str6a)
str1b=paste0("<strong># observations:</strong> ",nrow(deduped_observations))
str2b=paste0("<strong># unique observation concepts:</strong> ",length(unique(deduped_observations[!is.na(observation_concept_name)]$observation_concept_name)))
str3b=paste0("<strong># conditions:</strong> ",nrow(deduped_conditions))
str4b=paste0("<strong># unique condition concepts:</strong> " ,length(unique(deduped_conditions[!is.na(condition_concept_name)]$condition_concept_name)))
str5b=paste0("<strong># procedures:</strong> ",nrow(deduped_procedures))
str6b=paste0("<strong># unique procedure concepts:</strong> ",length(unique(deduped_procedures[!is.na(procedure_concept_name)]$procedure_concept_name)))
str7b=paste0("<strong># medication prescriptions:</strong> ",nrow(deduped_medications))
str8b=paste0("<strong># unique medication concepts:</strong> ",length(unique(deduped_medications[!is.na(medication_concept_name)]$medication_concept_name)))
str9b=paste0("<strong># measurements:</strong> ",nrow(deduped_measurements))
str10b=paste0("<strong># unique measurement concepts:</strong> ",length(unique(deduped_measurements[!is.na(measurement_concept_name)]$measurement_concept_name)))
str11b=paste0("<strong># devices:</strong> ",nrow(deduped_devices))
str12b=paste0("<strong># unique device concepts:</strong> ", length(unique(deduped_devices[!is.na(device_concept_name)]$device_concept_name)))
strsb=c(str1b,str2b,str3b,str4b,str5b,str6b,str7b,str8b,str9b,str10b,str11b,str12b)
return(list(strsa,strsb))
}
#### format data for patient report
generate_pt_report<-function(pt_data){
# initialize master report table
master_report=data.table(
Date = as.Date(character()),
Date_end = character(),
Type = character(),
Event = character(),
Value = character()
)
# extract table-specific data
observations_original = pt_data$Observations
conditions_original = pt_data$Conditions
procedures_original = pt_data$Procedures
medications_original = pt_data$Medications
measurements_original = pt_data$Measurements
devices_original = pt_data$Devices
## format observations
observations=observations_original[,c("observation_date","observation_concept_name", "value_as_number")]
observations$Type = "Observation"
observations$Date_end <- NA
observations=observations[!duplicated(observations),]
observations=observations[!is.na(observations$observation_date),]
observations=observations[!is.na(observations$observation_concept_name),]
observations[value_as_number==0]$value_as_number <- NA
observations=observations[,c("observation_date","Date_end","Type","observation_concept_name","value_as_number")]
colnames(observations)=c("Date","Date_end","Type","Event","Value")
## format conditions
conditions=conditions_original[,c("condition_start_date","condition_end_date","condition_concept_name","condition_source_value")]
conditions$Type = "Condition"
conditions=conditions[!duplicated(conditions),]
conditions=conditions[!is.na(conditions$condition_start_date),]
conditions=conditions[!is.na(conditions$condition_concept_name),]
conditions=conditions[,c("condition_start_date","condition_end_date","Type","condition_concept_name","condition_source_value")]
colnames(conditions)=c("Date","Date_end","Type","Event","Value")
## format procedures
procedures=procedures_original[,c("procedure_date","procedure_concept_name","procedure_source_value")]
procedures$Type = "Procedure"
procedures$Date_end <- NA
procedures=procedures[!duplicated(procedures),]
procedures=procedures[!is.na(procedures$procedure_date),]
procedures=procedures[!is.na(procedures$procedure_concept_name),]
procedures=procedures[,c("procedure_date","Date_end","Type","procedure_concept_name","procedure_source_value")]
colnames(procedures)=c("Date","Date_end","Type","Event","Value")
## format Medications
medications=medications_original[,c("drug_exposure_start_date","drug_exposure_end_date","medication_concept_name","dose_unit_source_value")]
medications$Type = "Medication"
medications=medications[!duplicated(medications),]
medications=medications[!is.na(medications$drug_exposure_start_date),]
medications=medications[!is.na(medications$medication_concept_name),]
medications=medications[,c("drug_exposure_start_date","drug_exposure_end_date","Type","medication_concept_name","dose_unit_source_value")]
colnames(medications)=c("Date","Date_end","Type","Event","Value")
## format Measurements
measurements=measurements_original[,c("measurement_date","measurement_concept_name","value_as_number")]
measurements$Type = "Measurement"
measurements$Date_end <- NA
measurements=measurements[!duplicated(measurements),]
measurements=measurements[!is.na(measurements$measurement_date),]
measurements=measurements[!is.na(measurements$measurement_concept_name),]
measurements=measurements[,c("measurement_date","Date_end","Type","measurement_concept_name","value_as_number")]
colnames(measurements)=c("Date","Date_end","Type","Event","Value")
## format Devices
devices=devices_original[,c("device_exposure_start_date","device_exposure_end_date", "device_concept_name","device_source_value")]
devices$Type = "Device"
devices=devices[!duplicated(devices),]
devices=devices[!is.na(devices$device_exposure_start_date),]
devices=devices[!is.na(devices$device_concept_name),]
devices=devices[,c("device_exposure_start_date","device_exposure_end_date","Type","device_concept_name","device_source_value")]
colnames(devices)=c("Date","Date_end","Type","Event","Value")
## rbind all data modalities together
master_report=rbind(master_report,observations,conditions,procedures,medications,measurements,devices)
# verify Events are characters
master_report$Event = as.character(master_report$Event)
# verify Dates are dates
master_report$Date = as.Date(as.character(master_report$Date))
master_report$Date_end = as.Date(as.character(master_report$Date_end),format="%Y-%m-%d")
return(master_report)
}
### format data for multiplex timeline
format_multiplex_timeline <- function(pt_data_report){
multiplex_timeline <- pt_data_report
multiplex_timeline$id = row.names(multiplex_timeline)
multiplex_timeline$type <- as.character(NA)
multiplex_timeline = multiplex_timeline[,c("id","Event","Date","Date_end","Type","type", "Value")] # keep Value to display when clicked
colnames(multiplex_timeline) = c("id","content","start","end","group","type","Value")
# if end date same as start, set end to NA
multiplex_timeline[start==end]$end <- NA
# if end date is not NA, set type to range
multiplex_timeline[!is.na(end)]$type <- "range"
# otherwise set type to point
multiplex_timeline[is.na(end)]$type <- "point"
return(multiplex_timeline)
}
####################
### LOADING DATA ###
####################
# Wrapper for domain-specific getData functions (e.g., getObservations). Produces a list of tables for all relevant domains.
get_all_pt_data <- function(pt_id){
ptEncs <- getEncounters(pt_id)
ptObsData <- getObservations(pt_id)
ptCondData <- getConditions(pt_id)
ptProcData <- getProcedures(pt_id)
ptsMedsData <- getMedications(pt_id)
ptMeasData <- getMeasurements(pt_id)
ptDeviceData <- getDevices(pt_id)
return(list(Encounters = ptEncs,
Observations = ptObsData,
Conditions = ptCondData,
Procedures = ptProcData,
Medications = ptsMedsData,
Measurements = ptMeasData,
Devices = ptDeviceData
))
} # END get_data function
# modality specific get functions (utilized in get_all_pt_data)
getEncounters <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, visit_occurrence_id, visit_concept_id, visit_start_date, visit_end_date, visit_source_concept_id, visit_source_value, admitting_source_concept_id, discharge_to_concept_id FROM visit_occurrence WHERE person_id = ', pt_id)
# get visit data
ptEncs <- sqlQuery(queryStatement)
ptEncs <- data.table(ptEncs) # convert to data.table
# convert NA source_concept_ids to 0
ptEncs[is.na(admitting_source_concept_id)]$admitting_source_concept_id <- 0
ptEncs[is.na(discharge_to_concept_id)]$discharge_to_concept_id <- 0
# merge in relevant information concept ids
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="visit_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'visit_concept' # rename column
ptEncs <- ptEncs[,-"visit_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="visit_source_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'visit_source_concept' # rename column
ptEncs <- ptEncs[,-"visit_source_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="admitting_source_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'admitting_concept' # rename column
ptEncs <- ptEncs[,-"admitting_source_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="discharge_to_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'discharge_concept' # rename column
ptEncs <- ptEncs[,-"discharge_to_concept_id"]
ptEncs$visit_start_date <- as.Date(ptEncs$visit_start_date)
return(ptEncs)
}
getObservations <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, observation_concept_id, observation_source_concept_id, observation_date, observation_type_concept_id, value_as_number, value_as_string, value_as_concept_id, visit_occurrence_id, observation_source_value, unit_source_value FROM observation WHERE person_id = ', pt_id)
ptObsData <- sqlQuery(queryStatement)
ptObsData <- data.table(ptObsData) # convert to data.table
# obtain table specific ontology
observationTableOntology <- dataOntology[domain_id=="Observation"]
# format clinical data
ptObsData <- merge(ptObsData, observationTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="observation_concept_id",by.y="concept_id",all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_code'] <- 'observation_concept_code' # rename column
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_concept_name' # rename column
names(ptObsData)[names(ptObsData) == 'vocabulary_id'] <- 'observation_concept_vocabulary' # rename column
ptObsData <- ptObsData[,-"observation_concept_id"]
ptObsData <- merge(ptObsData, observationTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="observation_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_code'] <- 'observation_source_code' # rename column
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_source_name' # rename column
names(ptObsData)[names(ptObsData) == 'vocabulary_id'] <- 'observation_source_vocabulary' # rename column
ptObsData <- ptObsData[,-"observation_source_concept_id"]
# format metadata
ptObsData <- merge(ptObsData,dataOntology[,c("concept_id","concept_name")],by.x="observation_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_type' # rename column
ptObsData <- ptObsData[,-"observation_type_concept_id"]
ptObsData=merge(ptObsData,dataOntology[,c("concept_id","concept_name")],by.x="value_as_concept_id",by.y="concept_id", all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'value_concept' # rename column
ptObsData <- ptObsData[,-"value_as_concept_id"]
ptObsData$observation_date <- as.Date(ptObsData$observation_date)
return(ptObsData)
}
getConditions <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, condition_concept_id, condition_start_date, condition_end_date, visit_occurrence_id, condition_type_concept_id, condition_source_value, condition_source_concept_id, condition_status_concept_id FROM condition_occurrence WHERE person_id = ', pt_id)
ptCondData <- sqlQuery(queryStatement)
ptCondData <- data.table(ptCondData) # convert to data.table
# obtain table specific ontology
conditionTableOntology <- dataOntology[grep("Condition",domain_id)]
# format clinical data
ptCondData <- merge(ptCondData, conditionTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="condition_concept_id",by.y="concept_id",all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_code'] <- 'condition_concept_code' # rename column
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_concept_name' # rename column
names(ptCondData)[names(ptCondData) == 'vocabulary_id'] <- 'condition_concept_vocabulary' # rename column
ptCondData <- ptCondData[,-"condition_concept_id"]
ptCondData <- merge(ptCondData, conditionTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="condition_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_code'] <- 'condition_source_code' # rename column
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_source_name' # rename column
names(ptCondData)[names(ptCondData) == 'vocabulary_id'] <- 'condition_source_vocabulary' # rename column
ptCondData <- ptCondData[,-"condition_source_concept_id"]
# format metadatadata
ptCondData <- merge(ptCondData,dataOntology[,c("concept_id","concept_name")],by.x="condition_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_type' # rename column
ptCondData <- ptCondData[,-"condition_type_concept_id"]
ptCondData <- merge(ptCondData,dataOntology[,c("concept_id","concept_name")],by.x="condition_status_concept_id",by.y="concept_id", all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_status_type' # rename column
ptCondData <- ptCondData[,-"condition_status_concept_id"]
ptCondData$condition_start_date <- as.Date(ptCondData$condition_start_date)
return(ptCondData)
}
getProcedures <- function(pt_id){
queryStatement <- paste0('SELECT person_id, procedure_concept_id, procedure_date, quantity, visit_occurrence_id, procedure_type_concept_id, procedure_source_value, procedure_source_concept_id FROM procedure_occurrence WHERE person_id = ', pt_id)
ptProcData <- sqlQuery(queryStatement)
ptProcData <- data.table(ptProcData) # convert to data.table
# obtain table specific ontology
procedureTableOntology <- dataOntology[domain_id=="Procedure"]
# format clinical data
ptProcData <- merge(ptProcData, procedureTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="procedure_concept_id",by.y="concept_id",all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_code'] <- 'procedure_concept_code' # rename column
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_concept_name' # rename column
names(ptProcData)[names(ptProcData) == 'vocabulary_id'] <- 'procedure_concept_vocabulary' # rename column
ptProcData <- ptProcData[,-"procedure_concept_id"]
ptProcData <- merge(ptProcData, procedureTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="procedure_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_code'] <- 'procedure_source_code' # rename column
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_source_name' # rename column
names(ptProcData)[names(ptProcData) == 'vocabulary_id'] <- 'procedure_source_vocabulary' # rename column
ptProcData <- ptProcData[,-"procedure_source_concept_id"]
# format metadata
ptProcData <- merge(ptProcData,dataOntology[,c("concept_id","concept_name")],by.x="procedure_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_type' # rename column
ptProcData <- ptProcData[,-"procedure_type_concept_id"]
ptProcData$procedure_date <- as.Date(ptProcData$procedure_date)
return(ptProcData)
}
getMedications <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, drug_concept_id, drug_exposure_start_date, drug_exposure_end_date, drug_type_concept_id, stop_reason, refills, quantity, days_supply, sig, route_concept_id, dose_unit_source_value, visit_occurrence_id, drug_source_value, drug_source_concept_id, route_source_value FROM drug_exposure WHERE person_id = ', pt_id)
ptsMedsData <- sqlQuery(queryStatement)
ptsMedsData <- data.table(ptsMedsData) # convert to data.table
# obtain table specific ontology
medicationTableOntology <- dataOntology[domain_id=="Drug"]
# format clinical data
ptsMedsData <- merge(ptsMedsData, medicationTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="drug_concept_id",by.y="concept_id",all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_code'] <- 'medication_concept_code' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'medication_concept_name' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'vocabulary_id'] <- 'medication_concept_vocabulary' # rename column
ptsMedsData <- ptsMedsData[,-"drug_concept_id"]
ptsMedsData <- merge(ptsMedsData, medicationTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="drug_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_code'] <- 'medication_source_code' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'medication_source_name' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'vocabulary_id'] <- 'medication_source_vocabulary' # rename column
ptsMedsData <- ptsMedsData[,-"drug_source_concept_id"]
# format metadata
ptsMedsData <- merge(ptsMedsData,dataOntology[,c("concept_id","concept_name")],by.x="drug_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'drug_type' # rename column
ptsMedsData <- ptsMedsData[,-"drug_type_concept_id"]
ptsMedsData <- merge(ptsMedsData,dataOntology[,c("concept_id","concept_name")],by.x="route_concept_id",by.y="concept_id", all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'route_concept' # rename column
ptsMedsData <- ptsMedsData[,-"route_concept_id"]
ptsMedsData$drug_exposure_start_date <- as.Date(ptsMedsData$drug_exposure_start_date)
return(ptsMedsData)
}
getMeasurements <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, measurement_concept_id, measurement_date, measurement_type_concept_id, value_as_number, value_as_concept_id, unit_concept_id, visit_occurrence_id, measurement_source_value, measurement_source_concept_id FROM measurement WHERE person_id = ', pt_id);
ptMeasData <- sqlQuery(queryStatement)
ptMeasData <- data.table(ptMeasData) # convert to data.table
# obtain table specific ontology
measurementTableOntology <- dataOntology[domain_id=="Measurement"]
# format clinical data
ptMeasData <- merge(ptMeasData, measurementTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="measurement_concept_id",by.y="concept_id",all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_code'] <- 'measurement_concept_code' # rename column
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_concept_name' # rename column
names(ptMeasData)[names(ptMeasData) == 'vocabulary_id'] <- 'measurement_concept_vocabulary' # rename column
ptMeasData <- ptMeasData[,-"measurement_concept_id"]
ptMeasData <- merge(ptMeasData, measurementTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="measurement_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_code'] <- 'measurement_source_code' # rename column
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_source_name' # rename column
names(ptMeasData)[names(ptMeasData) == 'vocabulary_id'] <- 'measurement_source_vocabulary' # rename column
ptMeasData <- ptMeasData[,-"measurement_source_concept_id"]
# format metadata
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="measurement_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_type' # rename column
ptMeasData <- ptMeasData[,-"measurement_type_concept_id"]
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="value_as_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'value_concept' # rename column
ptMeasData <- ptMeasData[,-"value_as_concept_id"]
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="unit_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'unit_concept' # rename column
ptMeasData <- ptMeasData[,-"unit_concept_id"]
ptMeasData$measurement_date <- as.Date(ptMeasData$measurement_date)
return(ptMeasData)
}
getDevices <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, device_concept_id, device_exposure_start_date, device_exposure_end_date, device_type_concept_id, device_source_value, visit_occurrence_id, device_source_concept_id FROM device_exposure WHERE person_id = ', pt_id)
ptDeviceData <- sqlQuery(queryStatement)
ptDeviceData <- data.table(ptDeviceData) # convert to data.table
# obtain table specific ontology
deviceTableOntology = dataOntology[grep("Device",domain_id)]
# format clinical data
ptDeviceData <- merge(ptDeviceData, deviceTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="device_concept_id",by.y="concept_id",all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_code'] <- 'device_concept_code' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_concept_name' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'vocabulary_id'] <- 'device_concept_vocabulary' # rename column
ptDeviceData <- ptDeviceData[,-"device_concept_id"]
ptDeviceData <- merge(ptDeviceData, deviceTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="device_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_code'] <- 'device_source_code' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_source_name' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'vocabulary_id'] <- 'device_source_vocabulary' # rename column
ptDeviceData <- ptDeviceData[,-"device_source_concept_id"]
# format metadata
ptDeviceData <- merge(ptDeviceData,dataOntology[,c("concept_id","concept_name")],by.x="device_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_type' # rename column
ptDeviceData <- ptDeviceData[,-"device_type_concept_id"]
ptDeviceData$device_exposure_start_date <- as.Date(ptDeviceData$device_exposure_start_date)
return(ptDeviceData)
}
#####################
### FIND PATIENTS ###
#####################
findPatients <- function(selected_terms, func_type, search_strat) {
dataCriteriaMapped <- unpackAndMap(selected_terms)
if (search_strat == "direct") {
useSource <- "_source" # search _source_concept_id
searchTable <- identifyTablesDirect(dataCriteriaMapped)
} else if (search_strat == "mapped") {
useSource <- "" # search _concept_id
dataCodesFormatted <- paste0(dataCriteriaMapped$concept_id,collapse=",")
# get common ontology synonyms
synonymDataFiltered <- identifySynonyms(dataCodesFormatted)
synonymData <- merge(synonymDataFiltered[,"concept_id_2"], dataOntology[,c("concept_id","domain_id","vocabulary_id")], by.x="concept_id_2",by.y = "concept_id")
colnames(synonymData) <- c("concept_id","domain_id","vocabulary_id")
synonymCodes <- paste(union(dataCriteriaMapped$concept_id, synonymDataFiltered$concept_id_2),collapse = ",") ## adds original codes into ancestor query (b/c of scenarios with ATC))
# get descendents
mappingDataInfo <- identifyMappings(synonymCodes)
mappingData <- mappingDataInfo[,c("descendant_concept_id","domain_id","vocabulary_id")]
colnames(mappingData) <- c("concept_id","domain_id","vocabulary_id")
conceptsCombined <- rbind(dataCriteriaMapped[,c("concept_id","domain_id","vocabulary_id")],synonymData)
conceptsCombined <- rbind(conceptsCombined, mappingData)
conceptsCombined <- conceptsCombined[!duplicated(conceptsCombined),]
# get tables to search for mapped concepts
searchTable <- identifyTablesMapped(conceptsCombined)
}
# if any condition table codes
if (length(searchTable$Condition)>0) {
condition_codes <- paste(searchTable$Condition,collapse=",")
pts_condition <- searchCondition(useSource, condition_codes)
} else {
pts_condition <- NULL
}
# if any observation table codes
if (length(searchTable$Observation)>0) {
observation_codes <- paste(searchTable$Observation,collapse=",")
pts_observation <- searchObservation(useSource, observation_codes)
} else {
pts_observation <- NULL
}
# if any measurement table codes
if (length(searchTable$Measurement)>0) {
measurement_codes <- paste(searchTable$Measurement,collapse=",")
pts_measurement <- searchMeasurement(useSource, measurement_codes)
} else {
pts_measurement <- NULL
}
# if any drug table codes
if (length(searchTable$Drug)>0) {
drug_codes <- paste(searchTable$Drug,collapse=",")
pts_drug <- searchDrug(useSource, drug_codes)
} else {
pts_drug <- NULL
}
# if any device table codes
if (length(searchTable$Device)>0) {
device_codes <- paste(searchTable$Drug,collapse=",")
pts_device <- searchDevice(useSource, device_codes)
} else {
pts_device <- NULL
}
# if any procedure table codes
if (length(searchTable$Procedure)>0) {
procedure_codes <- paste(searchTable$Procedure,collapse=",")
pts_procedure <- searchProcedure(useSource, procedure_codes)
}else{
pts_procedure <- NULL
}
# search
if (func_type=="or") {
patient_list <- identifyPatientsOR(pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure)
} else if (func_type=="and") {
patient_list <- identifyPatientsAND(dataCriteriaMapped, synonymDataFiltered, mappingDataInfo, pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure)
}
return(patient_list)
}
### specific table search functions (used in Find Patients function)
searchCondition <- function(useSource, codes) {
conditionQuery <- paste0('SELECT person_id, condition_concept_id FROM condition_occurrence WHERE condition',useSource,'_concept_id IN (',codes,') ')
dataCondition <- sqlQuery(conditionQuery)
dataCondition <- data.table(dataCondition)
dataCondition <- dataCondition[!duplicated(dataCondition)]
return(dataCondition)
}
searchObservation <- function(useSource, codes) {
observationQuery <- paste0('SELECT person_id, observation_concept_id FROM observation WHERE observation',useSource,'_concept_id IN (',codes,') ')
dataObservation <- sqlQuery(observationQuery)
dataObservation <- data.table(dataObservation)
dataObservation <- dataObservation[!duplicated(dataObservation)]
return(dataObservation)
}
searchMeasurement <- function(useSource, codes) {
measurementQuery <- paste0('SELECT person_id, measurement_concept_id FROM measurement WHERE measurement',useSource,'_concept_id IN (',codes,') ')
dataMeasurement <- sqlQuery(measurementQuery)
dataMeasurement <- data.table(dataMeasurement)
dataMeasurement <- dataMeasurement[!duplicated(dataMeasurement)]
return(dataMeasurement)
}
searchDrug <- function(useSource, codes) {
drugQuery <- paste0('SELECT person_id, drug_concept_id FROM drug_exposure WHERE drug',useSource,'_concept_id IN (',codes,') ')
dataDrug <- sqlQuery(drugQuery)
dataDrug <- data.table(dataDrug)
dataDrug <- dataDrug[!duplicated(dataDrug)]
return(dataDrug)
}
searchDevice <- function(useSource, codes) {
deviceQuery <- paste0('SELECT person_id, device_concept_id FROM device_exposure WHERE device',useSource,'_concept_id IN (',codes,') ')
dataDevice <- sqlQuery(deviceQuery)
dataDevice <- data.table(dataDevice)
dataDevice <- dataDevice[!duplicated(dataDevice)]
return(dataDevice)
}
searchProcedure<- function(useSource, codes) {
procedureQuery <- paste0('SELECT person_id, procedure_concept_id FROM procedure_occurrence WHERE procedure',useSource,'_concept_id IN (',codes,') ')
dataProcedure <- sqlQuery(procedureQuery)
dataProcedure <- data.table(dataProcedure)
dataProcedure <- dataProcedure[!duplicated(dataProcedure)]
return(dataProcedure)
}
| /PatientExploreR-OMOP_functions.R | permissive | BenGlicksberg/PatientExploreR | R | false | false | 48,196 | r | suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(DT))
suppressPackageStartupMessages(library(purrr))
# sets standard concepts for use with mapped vs. direct search options
standard_concepts <- data.table("domain_type"= c("Measurement","Condition","Drug","Observation","Device","Procedure"),"concepts"= c("LOINC,SNOMED,CPT4","SNOMED","RxNorm,CPT4,NDC","SNOMED,CPT4,LOINC,HCPCS","SNOMED,HCPCS","SNOMED,CPT4,HCPCS"))
##################
### CONNECTION ###
##################
setConnectFunction <- function(username, password, host, dbname, port) {
connectString <- paste0("dbname='",dbname,"'")
if (username != ""){
connectString <- paste0(connectString, ", user='", username,"'")
}
if (password != ""){
connectString <- paste0(connectString, ", password='", password,"'")
}
if (host != ""){
connectString <- paste0(connectString, ", host='", host,"'")
}
if (port != ""){
connectString <- paste0(connectString, ", port= ", as.integer(port))
}
fullConnectString <- paste0('DBI::dbConnect(drv, ', connectString , ')')
return(fullConnectString)
}
checkOMOPconnection <- function(driver, username, password, host, dbname, port) {
status<- tryCatch(
{
if (driver=="mysql") {
drv <- dbDriver("MySQL")
fullConnectString <- setConnectFunction(username, password, host, dbname, port)
con <- eval(parse(text = fullConnectString))
} else {
con <- DatabaseConnector::connect(dbms = driver,
server = host,
user = username,
password = password,
schema = dbname,
port = port)
}
},
warning = function(w) {
# ignore
},
error = function(e) {
# ignore
}
)
if(!is.null(status)){
out <- TRUE
if (driver=="mysql") {
on.exit(dbDisconnect(con))
} else {
on.exit(DatabaseConnector::disconnect(con))
}
}else{
out <- FALSE
}
return(out)
}
checkOMOPtables <- function(driver, username, password, host, dbname, port) {
necessaryTables = c("concept","concept_ancestor","concept_relationship","condition_occurrence","death","device_exposure","drug_exposure","measurement","observation","person","procedure_occurrence","visit_occurrence")
if (driver=="mysql") {
drv <- dbDriver("MySQL")
fullConnectString <- setConnectFunction(username, password, host, dbname, port)
con <- eval(parse(text = fullConnectString))
} else {
# creating connection object using DatabaseConnector
con <- DatabaseConnector::connect(dbms = driver,
server = host,
user = username,
password = password,
schema = dbname,
port = port)
}
foundTablesData <- tolower(dbListTables(con))
if (driver=="mysql") {
on.exit(dbDisconnect(con))
} else {
on.exit(DatabaseConnector::disconnect(con))
}
missingTables <- list()
emptyTables <-list()
for (tbls in necessaryTables) {
if (!tbls %in% foundTablesData) { # check if table exists
missingTables <- c(missingTables, tbls)
} else { # check if any data in found table
if (driver=="mysql") {
dataCheckQuery <- paste0("SELECT * FROM " , tbls , " LIMIT 1;")
} else {
dataCheckQuery <- paste0("SELECT TOP 1 * FROM " , tbls, ";")
}
dataCheck <- sqlQuery(dataCheckQuery)
if (nrow(dataCheck)==0) {
emptyTables <- c(emptyTables, tbls)
}
}
}
return(list("missingTables" = missingTables, "emptyTables" = emptyTables))
}
#### check if patient exists for search function
check_if_pt_exists<- function(ptid){
found=FALSE
if(ptid%in% pts_demographics$person_id){
found = TRUE
}
return(found)
}
##################
### PRE - LOAD ###
##################
# Data Ontology
make_data_ontology <- function(){
if (file.exists(paste0(getOption("currentPath"), "dataOntology.rds")) ) {
# if Data Ontology exists, load it
dataOntology = readRDS(paste0(getOption("currentPath"), "dataOntology.rds"))
}else{
# if not, create it, then save
conceptQuery <- "SELECT concept_id, concept_name, domain_id, vocabulary_id, concept_class_id, concept_code FROM concept WHERE (invalid_reason = '' OR invalid_reason IS NULL);"
dataOntology <- sqlQuery(conceptQuery)
dataOntology <- data.table(dataOntology)
dataOntology$concept_name <- enc2utf8(dataOntology$concept_name)
saveRDS(dataOntology, paste0(getOption("currentPath"), "dataOntology.rds")) # save Data Ontology
}
return(dataOntology)
}
# Demographic data
## pre-load demographic data for all patients to save in memory to map to cohort from searches
getDemographics <-function() { # patient list will restrict search
queryStatement <- "SELECT person_id, year_of_birth, gender_concept_id, ethnicity_concept_id, race_concept_id FROM person"
deathqueryStatement <-"SELECT person_id, death_date FROM death"
# first get main patient data
ptDemo <- sqlQuery(queryStatement)
ptDemo <- data.table(ptDemo) # convert to data.table
current_year <- as.numeric(format(Sys.Date(),"%Y")) # get current year to calculate age
ptDemo$age <- current_year - ptDemo$year_of_birth # calculate age
# map concepts to reference table
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Gender",c("concept_id","concept_name")], by.x ="gender_concept_id", by.y = "concept_id" ,all.x=T) # Gender
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Gender' # rename column
ptDemo=markNAasUnknown(ptDemo,"Gender")
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Race",c("concept_id","concept_name")], by.x ="race_concept_id", by.y = "concept_id" ,all.x=T) # Race
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Race' # rename column
ptDemo=markNAasUnknown(ptDemo,"Race")
ptDemo <- merge(ptDemo, dataOntology[domain_id=="Ethnicity",c("concept_id","concept_name")], by.x ="ethnicity_concept_id", by.y = "concept_id" ,all.x=T) # Ethnicity
names(ptDemo)[names(ptDemo) == 'concept_name'] <- 'Ethnicity' # rename column
ptDemo <- markNAasUnknown(ptDemo,"Ethnicity")
### clean up extra columns
ptDemo <- ptDemo[,-c("ethnicity_concept_id","race_concept_id","gender_concept_id")]
# add in death date
ptDeath <- sqlQuery(deathqueryStatement)
ptDeath <- data.table(ptDeath) # convert to data.table
# merge with patient data
ptDemo <- merge(ptDemo, ptDeath,by="person_id",all.x=T)
# mark Alive/Deceased
ptDemo$Status <- ifelse(is.na(ptDemo$death_date),"Alive","Deceased")
return(ptDemo)
}
####################
#### FORMATTING ####
####################
## unpack vocabularies and codes for search function
unpackAndMap <- function(vocab_term_list) {
vocabularies <- str_split(vocab_term_list, ":") %>% map_chr(`[`, 1)
codes <- str_split(vocab_term_list, ":") %>% map_chr(`[`, 2)
# # match to one another
dataCriteria <- data.table::data.table(vocabularies = vocabularies, codes = codes)
# # map inclusion criteria to dataOntology
dataCriteriaMapped <- merge(dataCriteria, dataOntology, by.x= "codes", by.y = "concept_code")
dataCriteriaMapped <- dataCriteriaMapped[vocabularies==vocabulary_id]
return(dataCriteriaMapped)
}
# for 'Mapped' straegy; map input concept codes to common ontology
identifySynonyms <- function(codesFormatted) {
synonymQuery <- paste0('SELECT concept_id_1, concept_id_2, relationship_id, invalid_reason FROM concept_relationship WHERE concept_id_1 IN (',codesFormatted,');')
synonymData <- sqlQuery(synonymQuery)
synonymData <- data.table::data.table(synonymData)
synonymData <- synonymData[invalid_reason == ""]
synonymData <- synonymData[,-"invalid_reason"]
# check for "Maps to" or "%- RxNorm%" or "%- SNOMED%" | standard concepts
synonymDataFiltered <- synonymData[(relationship_id == "Maps to") | (grepl("- RxNorm",relationship_id)) | (grepl("- SNOMED",relationship_id)) ]
return(synonymDataFiltered)
}
# for 'Mapped' straegy; map input concept codes (from common ontology) to common ontology descendants
identifyMappings <- function(synonymCodes) {
mappingQuery <- paste0('SELECT ancestor_concept_id, descendant_concept_id FROM concept_ancestor A WHERE A.ancestor_concept_id IN (', synonymCodes,' );')
mappingData <- sqlQuery(mappingQuery)
mappingData <- data.table::data.table(mappingData)
mappingDataInfo <- merge(mappingData,dataOntology, by.x = "descendant_concept_id", by.y = "concept_id")
return(mappingDataInfo)
}
# identify tables to seach for concepts of interest (direct strategy)
identifyTablesDirect <- function(criteriaTable) {
searchTable = list()
for(d in unique(standard_concepts$domain_type)){ # scan through all domain types
mappingData = criteriaTable[domain_id == d]
mappingCodes = mappingData[domain_id == d]$concept_id
searchTable[[d]] <- mappingCodes # compile codes per domain type into one table
}
return(searchTable)
}
# identify tables to seach for concepts of interest (mapped strategy)
identifyTablesMapped <- function(mappingDataInfo) {
searchTable = list()
for(d in unique(standard_concepts$domain_type)) { # scan through all domain types
mappingDataInfoFiltered <- mappingDataInfo[domain_id==d]
mappingDataInfoFiltered <- mappingDataInfoFiltered[(grep(gsub(",","|",standard_concepts[domain_type==d,concepts]),vocabulary_id))] # map to common concepts specifically used to the domain
mappingCodes <- mappingDataInfoFiltered$concept_id
searchTable[[d]] <- mappingCodes
}
return(searchTable)
}
### identifyPatients based on function
# function = OR (union)
identifyPatientsOR <- function(pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure) {
patient_list=c()
if (!is.null(pts_condition)) {
patient_list = union(patient_list, unique(pts_condition$person_id))
}
if (!is.null(pts_observation)) {
patient_list = union(patient_list, unique(pts_observation$person_id))
}
if (!is.null(pts_measurement)) {
patient_list = union(patient_list, unique(pts_measurement$person_id))
}
if (!is.null(pts_device)) {
patient_list = union(patient_list, unique(pts_device$person_id))
}
if (!is.null(pts_drug)) {
patient_list = union(patient_list, unique(pts_drug$person_id))
}
if (!is.null(pts_procedure)) {
patient_list = union(patient_list, unique(pts_procedure$person_id))
}
return(patient_list)
}
# function = AND (intersect)
# To identify overlapping patients, we have to backmap the descendant terms to the original concepts
identifyPatientsAND <- function(criteriaMapped, synonymDataFiltered, mappingDataInfo, pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure) {
names(mappingDataInfo)[names(mappingDataInfo) == 'vocabulary_id'] <- 'mapped_vocabulary_id'
names(mappingDataInfo)[names(mappingDataInfo) == 'concept_name'] <- 'mapped_concept_name'
synonymMapped <- merge(mappingDataInfo[,c("descendant_concept_id","ancestor_concept_id","mapped_vocabulary_id","mapped_concept_name")], synonymDataFiltered[,c("concept_id_1","concept_id_2")], by.x = "ancestor_concept_id", by.y = "concept_id_2", allow.cartesian=TRUE)
synonymMapped <- synonymMapped[!duplicated(synonymMapped)]
combinedMapped <- merge(synonymMapped, criteriaMapped, by.x = "concept_id_1", by.y = "concept_id", allow.cartesian=TRUE)
combinedMapped <- combinedMapped[!duplicated(combinedMapped)]
combinedDirect <- merge(mappingDataInfo, criteriaMapped, by.x = "ancestor_concept_id", by.y = "concept_id", allow.cartesian=TRUE)
combinedDirect <- combinedDirect[!duplicated(combinedDirect)]
### derive patient list by concept_codes
# create code dictionary per original concept input
# initializepatient_list
unique_codes <- unique(criteriaMapped$codes)
code_map = list()
patient_list = list()
for(c in unique_codes) {
seed_codes = paste(criteriaMapped[codes == c]$concept_id,collapse=",")
code_map[[c]] <- c(seed_codes) # initialize list with original concept code (i.e. in case of ATC category)
code_map[[c]] <- c(code_map[[c]], combinedDirect[ancestor_concept_id %in% seed_codes]$descendant_concept_id) # add in direct mapped descendants
code_map[[c]] <- c(code_map[[c]], combinedMapped[concept_id_1 %in% seed_codes]$descendant_concept_id) # add in synonym codes and descendants
patient_list[[c]] <- c()
}
if (!is.null(pts_condition)) { #Condition
condition_codes <- unique(criteriaMapped[domain_id=="Condition"]$codes)
for(c in condition_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_condition[condition_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_observation)) { #Observation
observation_codes <- unique(criteriaMapped[domain_id=="Observation"]$codes)
for(c in observation_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_observation[observation_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_measurement)) { #Measurement
measurement_codes <- unique(criteriaMapped[domain_id=="Measurement"]$codes)
for(c in measurement_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_measurement[measurement_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_device)) {#Device
device_codes <- unique(criteriaMapped[domain_id=="Device"]$codes)
for(c in device_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_device[device_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_drug)) { #Drug
drug_codes = unique(criteriaMapped[domain_id=="Drug"]$codes)
for(c in drug_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_drug[drug_concept_id %in% code_map[[c]]]$person_id)
}
}
if (!is.null(pts_procedure)) {#Procedure
procedure_codes <- unique(criteriaMapped[domain_id=="Procedure"]$codes)
for(c in procedure_codes) {
patient_list[[c]] <- union(patient_list[[c]], pts_procedure[procedure_concept_id %in% code_map[[c]]]$person_id)
}
}
# get intersected list
patient_list_intersected = Reduce(intersect,patient_list)
return(patient_list_intersected)
}
### mark any empty fields as Unknown
markNAasUnknown <- function(tbl, ColToUse) {
if (ColToUse %in% colnames(tbl)) {
if (any(is.na(tbl[is.na(get(ColToUse))]))) {
missing_rows=tbl[is.na(get(ColToUse))]
tbl[is.na(get(ColToUse)),eval(ColToUse):="Unknown"]
}
}
return(tbl)
}
#### generate patient background and summary for report header
generate_pt_background<- function(pt_background){
if(!is.na(pt_background$death_date)){
age_of_death = as.numeric(year(as.Date(pt_background$death_date))) - as.numeric(pt_background$year_of_birth)
}else{
age_of_death = NA
}
str1=paste0("<strong>Status:</strong> ",pt_background$Status)
str2=paste0("<strong>Age:</strong> ",pt_background$age)
str3=paste0("<strong>Age of Death:</strong> ",age_of_death)
str4=paste0("<strong>Ethnicity:</strong> ",pt_background$Ethnicity)
str5=paste0("<strong>Race:</strong> ",pt_background$Race)
#str4=paste0("<strong>Multi-racial?:</strong> " ,pt_background$MultiRacial)
str6=paste0("<strong>Gender:</strong> ",pt_background$Gender)
bstrs=list(str1,str2,str3,str4,str5,str6)
return(bstrs)
}
generate_pt_summary<- function(pt_data){
encounters = pt_data$Encounters
observations = pt_data$Observations
conditions = pt_data$Conditions
procedures = pt_data$Procedures
medications = pt_data$Medications
measurements = pt_data$Measurements
devices = pt_data$Devices
deduped_encounters=encounters[,c("visit_occurrence_id","visit_concept")]
deduped_encounters=deduped_encounters[!duplicated(deduped_encounters),]
deduped_observations=observations[,c("visit_occurrence_id","observation_concept_name")]
deduped_observations=deduped_observations[!duplicated(deduped_observations),]
deduped_conditions=conditions[,c("visit_occurrence_id","condition_concept_name")]
deduped_conditions=deduped_conditions[!duplicated(deduped_conditions),]
deduped_procedures=procedures[,c("visit_occurrence_id","procedure_concept_name")]
deduped_procedures=deduped_procedures[!duplicated(deduped_procedures),]
deduped_medications=medications[,c("visit_occurrence_id","medication_concept_name")]
deduped_medications=deduped_medications[!duplicated(deduped_medications),]
deduped_measurements=measurements[,c("visit_occurrence_id","measurement_concept_name")]
deduped_measurements=deduped_measurements[!duplicated(deduped_measurements),]
deduped_devices=devices[,c("visit_occurrence_id","device_concept_name")]
deduped_devices=deduped_devices[!duplicated(deduped_devices),]
earliest_date = as.Date(encounters$visit_start_date[order(encounters$visit_start_date,decreasing=F)[1]])
recent_date = as.Date(encounters$visit_start_date[order(encounters$visit_start_date,decreasing=T)[1]])
str1a=paste0("<strong>Earliest encounter:</strong> ",earliest_date)
str2a=paste0("<strong>Most recent encounter:</strong> ",recent_date)
str3a=paste0("<strong># unique encounter types:</strong> ",length(unique(deduped_encounters$visit_concept)))
str4a=paste0("<strong># Encounters:</strong> " ,nrow(deduped_encounters))
str5a=paste0("<strong># Outpatient encounters:</strong> ",nrow(deduped_encounters[which(deduped_encounters$visit_concept=="Outpatient Visit"),]))
str6a=paste0("<strong># Inpatient encounters:</strong> ",nrow(deduped_encounters[which(deduped_encounters$Encounter_Is_Inpatient=="Inpatient Visit"),]))
strsa=c(str1a,str2a,str3a,str4a,str5a,str6a)
str1b=paste0("<strong># observations:</strong> ",nrow(deduped_observations))
str2b=paste0("<strong># unique observation concepts:</strong> ",length(unique(deduped_observations[!is.na(observation_concept_name)]$observation_concept_name)))
str3b=paste0("<strong># conditions:</strong> ",nrow(deduped_conditions))
str4b=paste0("<strong># unique condition concepts:</strong> " ,length(unique(deduped_conditions[!is.na(condition_concept_name)]$condition_concept_name)))
str5b=paste0("<strong># procedures:</strong> ",nrow(deduped_procedures))
str6b=paste0("<strong># unique procedure concepts:</strong> ",length(unique(deduped_procedures[!is.na(procedure_concept_name)]$procedure_concept_name)))
str7b=paste0("<strong># medication prescriptions:</strong> ",nrow(deduped_medications))
str8b=paste0("<strong># unique medication concepts:</strong> ",length(unique(deduped_medications[!is.na(medication_concept_name)]$medication_concept_name)))
str9b=paste0("<strong># measurements:</strong> ",nrow(deduped_measurements))
str10b=paste0("<strong># unique measurement concepts:</strong> ",length(unique(deduped_measurements[!is.na(measurement_concept_name)]$measurement_concept_name)))
str11b=paste0("<strong># devices:</strong> ",nrow(deduped_devices))
str12b=paste0("<strong># unique device concepts:</strong> ", length(unique(deduped_devices[!is.na(device_concept_name)]$device_concept_name)))
strsb=c(str1b,str2b,str3b,str4b,str5b,str6b,str7b,str8b,str9b,str10b,str11b,str12b)
return(list(strsa,strsb))
}
#### format data for patient report
generate_pt_report<-function(pt_data){
# initialize master report table
master_report=data.table(
Date = as.Date(character()),
Date_end = character(),
Type = character(),
Event = character(),
Value = character()
)
# extract table-specific data
observations_original = pt_data$Observations
conditions_original = pt_data$Conditions
procedures_original = pt_data$Procedures
medications_original = pt_data$Medications
measurements_original = pt_data$Measurements
devices_original = pt_data$Devices
## format observations
observations=observations_original[,c("observation_date","observation_concept_name", "value_as_number")]
observations$Type = "Observation"
observations$Date_end <- NA
observations=observations[!duplicated(observations),]
observations=observations[!is.na(observations$observation_date),]
observations=observations[!is.na(observations$observation_concept_name),]
observations[value_as_number==0]$value_as_number <- NA
observations=observations[,c("observation_date","Date_end","Type","observation_concept_name","value_as_number")]
colnames(observations)=c("Date","Date_end","Type","Event","Value")
## format conditions
conditions=conditions_original[,c("condition_start_date","condition_end_date","condition_concept_name","condition_source_value")]
conditions$Type = "Condition"
conditions=conditions[!duplicated(conditions),]
conditions=conditions[!is.na(conditions$condition_start_date),]
conditions=conditions[!is.na(conditions$condition_concept_name),]
conditions=conditions[,c("condition_start_date","condition_end_date","Type","condition_concept_name","condition_source_value")]
colnames(conditions)=c("Date","Date_end","Type","Event","Value")
## format procedures
procedures=procedures_original[,c("procedure_date","procedure_concept_name","procedure_source_value")]
procedures$Type = "Procedure"
procedures$Date_end <- NA
procedures=procedures[!duplicated(procedures),]
procedures=procedures[!is.na(procedures$procedure_date),]
procedures=procedures[!is.na(procedures$procedure_concept_name),]
procedures=procedures[,c("procedure_date","Date_end","Type","procedure_concept_name","procedure_source_value")]
colnames(procedures)=c("Date","Date_end","Type","Event","Value")
## format Medications
medications=medications_original[,c("drug_exposure_start_date","drug_exposure_end_date","medication_concept_name","dose_unit_source_value")]
medications$Type = "Medication"
medications=medications[!duplicated(medications),]
medications=medications[!is.na(medications$drug_exposure_start_date),]
medications=medications[!is.na(medications$medication_concept_name),]
medications=medications[,c("drug_exposure_start_date","drug_exposure_end_date","Type","medication_concept_name","dose_unit_source_value")]
colnames(medications)=c("Date","Date_end","Type","Event","Value")
## format Measurements
measurements=measurements_original[,c("measurement_date","measurement_concept_name","value_as_number")]
measurements$Type = "Measurement"
measurements$Date_end <- NA
measurements=measurements[!duplicated(measurements),]
measurements=measurements[!is.na(measurements$measurement_date),]
measurements=measurements[!is.na(measurements$measurement_concept_name),]
measurements=measurements[,c("measurement_date","Date_end","Type","measurement_concept_name","value_as_number")]
colnames(measurements)=c("Date","Date_end","Type","Event","Value")
## format Devices
devices=devices_original[,c("device_exposure_start_date","device_exposure_end_date", "device_concept_name","device_source_value")]
devices$Type = "Device"
devices=devices[!duplicated(devices),]
devices=devices[!is.na(devices$device_exposure_start_date),]
devices=devices[!is.na(devices$device_concept_name),]
devices=devices[,c("device_exposure_start_date","device_exposure_end_date","Type","device_concept_name","device_source_value")]
colnames(devices)=c("Date","Date_end","Type","Event","Value")
## rbind all data modalities together
master_report=rbind(master_report,observations,conditions,procedures,medications,measurements,devices)
# verify Events are characters
master_report$Event = as.character(master_report$Event)
# verify Dates are dates
master_report$Date = as.Date(as.character(master_report$Date))
master_report$Date_end = as.Date(as.character(master_report$Date_end),format="%Y-%m-%d")
return(master_report)
}
### format data for multiplex timeline
format_multiplex_timeline <- function(pt_data_report){
multiplex_timeline <- pt_data_report
multiplex_timeline$id = row.names(multiplex_timeline)
multiplex_timeline$type <- as.character(NA)
multiplex_timeline = multiplex_timeline[,c("id","Event","Date","Date_end","Type","type", "Value")] # keep Value to display when clicked
colnames(multiplex_timeline) = c("id","content","start","end","group","type","Value")
# if end date same as start, set end to NA
multiplex_timeline[start==end]$end <- NA
# if end date is not NA, set type to range
multiplex_timeline[!is.na(end)]$type <- "range"
# otherwise set type to point
multiplex_timeline[is.na(end)]$type <- "point"
return(multiplex_timeline)
}
####################
### LOADING DATA ###
####################
# Wrapper for domain-specific getData functions (e.g., getObservations). Produces a list of tables for all relevant domains.
get_all_pt_data <- function(pt_id){
ptEncs <- getEncounters(pt_id)
ptObsData <- getObservations(pt_id)
ptCondData <- getConditions(pt_id)
ptProcData <- getProcedures(pt_id)
ptsMedsData <- getMedications(pt_id)
ptMeasData <- getMeasurements(pt_id)
ptDeviceData <- getDevices(pt_id)
return(list(Encounters = ptEncs,
Observations = ptObsData,
Conditions = ptCondData,
Procedures = ptProcData,
Medications = ptsMedsData,
Measurements = ptMeasData,
Devices = ptDeviceData
))
} # END get_data function
# modality specific get functions (utilized in get_all_pt_data)
getEncounters <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, visit_occurrence_id, visit_concept_id, visit_start_date, visit_end_date, visit_source_concept_id, visit_source_value, admitting_source_concept_id, discharge_to_concept_id FROM visit_occurrence WHERE person_id = ', pt_id)
# get visit data
ptEncs <- sqlQuery(queryStatement)
ptEncs <- data.table(ptEncs) # convert to data.table
# convert NA source_concept_ids to 0
ptEncs[is.na(admitting_source_concept_id)]$admitting_source_concept_id <- 0
ptEncs[is.na(discharge_to_concept_id)]$discharge_to_concept_id <- 0
# merge in relevant information concept ids
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="visit_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'visit_concept' # rename column
ptEncs <- ptEncs[,-"visit_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="visit_source_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'visit_source_concept' # rename column
ptEncs <- ptEncs[,-"visit_source_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="admitting_source_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'admitting_concept' # rename column
ptEncs <- ptEncs[,-"admitting_source_concept_id"]
ptEncs <- merge(ptEncs,dataOntology[,c("concept_id","concept_name")], by.x="discharge_to_concept_id", by.y="concept_id", all.x=TRUE)
names(ptEncs)[names(ptEncs) == 'concept_name'] <- 'discharge_concept' # rename column
ptEncs <- ptEncs[,-"discharge_to_concept_id"]
ptEncs$visit_start_date <- as.Date(ptEncs$visit_start_date)
return(ptEncs)
}
getObservations <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, observation_concept_id, observation_source_concept_id, observation_date, observation_type_concept_id, value_as_number, value_as_string, value_as_concept_id, visit_occurrence_id, observation_source_value, unit_source_value FROM observation WHERE person_id = ', pt_id)
ptObsData <- sqlQuery(queryStatement)
ptObsData <- data.table(ptObsData) # convert to data.table
# obtain table specific ontology
observationTableOntology <- dataOntology[domain_id=="Observation"]
# format clinical data
ptObsData <- merge(ptObsData, observationTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="observation_concept_id",by.y="concept_id",all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_code'] <- 'observation_concept_code' # rename column
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_concept_name' # rename column
names(ptObsData)[names(ptObsData) == 'vocabulary_id'] <- 'observation_concept_vocabulary' # rename column
ptObsData <- ptObsData[,-"observation_concept_id"]
ptObsData <- merge(ptObsData, observationTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="observation_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_code'] <- 'observation_source_code' # rename column
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_source_name' # rename column
names(ptObsData)[names(ptObsData) == 'vocabulary_id'] <- 'observation_source_vocabulary' # rename column
ptObsData <- ptObsData[,-"observation_source_concept_id"]
# format metadata
ptObsData <- merge(ptObsData,dataOntology[,c("concept_id","concept_name")],by.x="observation_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'observation_type' # rename column
ptObsData <- ptObsData[,-"observation_type_concept_id"]
ptObsData=merge(ptObsData,dataOntology[,c("concept_id","concept_name")],by.x="value_as_concept_id",by.y="concept_id", all.x=TRUE)
names(ptObsData)[names(ptObsData) == 'concept_name'] <- 'value_concept' # rename column
ptObsData <- ptObsData[,-"value_as_concept_id"]
ptObsData$observation_date <- as.Date(ptObsData$observation_date)
return(ptObsData)
}
getConditions <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, condition_concept_id, condition_start_date, condition_end_date, visit_occurrence_id, condition_type_concept_id, condition_source_value, condition_source_concept_id, condition_status_concept_id FROM condition_occurrence WHERE person_id = ', pt_id)
ptCondData <- sqlQuery(queryStatement)
ptCondData <- data.table(ptCondData) # convert to data.table
# obtain table specific ontology
conditionTableOntology <- dataOntology[grep("Condition",domain_id)]
# format clinical data
ptCondData <- merge(ptCondData, conditionTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="condition_concept_id",by.y="concept_id",all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_code'] <- 'condition_concept_code' # rename column
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_concept_name' # rename column
names(ptCondData)[names(ptCondData) == 'vocabulary_id'] <- 'condition_concept_vocabulary' # rename column
ptCondData <- ptCondData[,-"condition_concept_id"]
ptCondData <- merge(ptCondData, conditionTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="condition_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_code'] <- 'condition_source_code' # rename column
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_source_name' # rename column
names(ptCondData)[names(ptCondData) == 'vocabulary_id'] <- 'condition_source_vocabulary' # rename column
ptCondData <- ptCondData[,-"condition_source_concept_id"]
# format metadatadata
ptCondData <- merge(ptCondData,dataOntology[,c("concept_id","concept_name")],by.x="condition_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_type' # rename column
ptCondData <- ptCondData[,-"condition_type_concept_id"]
ptCondData <- merge(ptCondData,dataOntology[,c("concept_id","concept_name")],by.x="condition_status_concept_id",by.y="concept_id", all.x=TRUE)
names(ptCondData)[names(ptCondData) == 'concept_name'] <- 'condition_status_type' # rename column
ptCondData <- ptCondData[,-"condition_status_concept_id"]
ptCondData$condition_start_date <- as.Date(ptCondData$condition_start_date)
return(ptCondData)
}
getProcedures <- function(pt_id){
queryStatement <- paste0('SELECT person_id, procedure_concept_id, procedure_date, quantity, visit_occurrence_id, procedure_type_concept_id, procedure_source_value, procedure_source_concept_id FROM procedure_occurrence WHERE person_id = ', pt_id)
ptProcData <- sqlQuery(queryStatement)
ptProcData <- data.table(ptProcData) # convert to data.table
# obtain table specific ontology
procedureTableOntology <- dataOntology[domain_id=="Procedure"]
# format clinical data
ptProcData <- merge(ptProcData, procedureTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="procedure_concept_id",by.y="concept_id",all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_code'] <- 'procedure_concept_code' # rename column
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_concept_name' # rename column
names(ptProcData)[names(ptProcData) == 'vocabulary_id'] <- 'procedure_concept_vocabulary' # rename column
ptProcData <- ptProcData[,-"procedure_concept_id"]
ptProcData <- merge(ptProcData, procedureTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="procedure_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_code'] <- 'procedure_source_code' # rename column
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_source_name' # rename column
names(ptProcData)[names(ptProcData) == 'vocabulary_id'] <- 'procedure_source_vocabulary' # rename column
ptProcData <- ptProcData[,-"procedure_source_concept_id"]
# format metadata
ptProcData <- merge(ptProcData,dataOntology[,c("concept_id","concept_name")],by.x="procedure_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptProcData)[names(ptProcData) == 'concept_name'] <- 'procedure_type' # rename column
ptProcData <- ptProcData[,-"procedure_type_concept_id"]
ptProcData$procedure_date <- as.Date(ptProcData$procedure_date)
return(ptProcData)
}
getMedications <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, drug_concept_id, drug_exposure_start_date, drug_exposure_end_date, drug_type_concept_id, stop_reason, refills, quantity, days_supply, sig, route_concept_id, dose_unit_source_value, visit_occurrence_id, drug_source_value, drug_source_concept_id, route_source_value FROM drug_exposure WHERE person_id = ', pt_id)
ptsMedsData <- sqlQuery(queryStatement)
ptsMedsData <- data.table(ptsMedsData) # convert to data.table
# obtain table specific ontology
medicationTableOntology <- dataOntology[domain_id=="Drug"]
# format clinical data
ptsMedsData <- merge(ptsMedsData, medicationTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="drug_concept_id",by.y="concept_id",all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_code'] <- 'medication_concept_code' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'medication_concept_name' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'vocabulary_id'] <- 'medication_concept_vocabulary' # rename column
ptsMedsData <- ptsMedsData[,-"drug_concept_id"]
ptsMedsData <- merge(ptsMedsData, medicationTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="drug_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_code'] <- 'medication_source_code' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'medication_source_name' # rename column
names(ptsMedsData)[names(ptsMedsData) == 'vocabulary_id'] <- 'medication_source_vocabulary' # rename column
ptsMedsData <- ptsMedsData[,-"drug_source_concept_id"]
# format metadata
ptsMedsData <- merge(ptsMedsData,dataOntology[,c("concept_id","concept_name")],by.x="drug_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'drug_type' # rename column
ptsMedsData <- ptsMedsData[,-"drug_type_concept_id"]
ptsMedsData <- merge(ptsMedsData,dataOntology[,c("concept_id","concept_name")],by.x="route_concept_id",by.y="concept_id", all.x=TRUE)
names(ptsMedsData)[names(ptsMedsData) == 'concept_name'] <- 'route_concept' # rename column
ptsMedsData <- ptsMedsData[,-"route_concept_id"]
ptsMedsData$drug_exposure_start_date <- as.Date(ptsMedsData$drug_exposure_start_date)
return(ptsMedsData)
}
getMeasurements <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, measurement_concept_id, measurement_date, measurement_type_concept_id, value_as_number, value_as_concept_id, unit_concept_id, visit_occurrence_id, measurement_source_value, measurement_source_concept_id FROM measurement WHERE person_id = ', pt_id);
ptMeasData <- sqlQuery(queryStatement)
ptMeasData <- data.table(ptMeasData) # convert to data.table
# obtain table specific ontology
measurementTableOntology <- dataOntology[domain_id=="Measurement"]
# format clinical data
ptMeasData <- merge(ptMeasData, measurementTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="measurement_concept_id",by.y="concept_id",all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_code'] <- 'measurement_concept_code' # rename column
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_concept_name' # rename column
names(ptMeasData)[names(ptMeasData) == 'vocabulary_id'] <- 'measurement_concept_vocabulary' # rename column
ptMeasData <- ptMeasData[,-"measurement_concept_id"]
ptMeasData <- merge(ptMeasData, measurementTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="measurement_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_code'] <- 'measurement_source_code' # rename column
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_source_name' # rename column
names(ptMeasData)[names(ptMeasData) == 'vocabulary_id'] <- 'measurement_source_vocabulary' # rename column
ptMeasData <- ptMeasData[,-"measurement_source_concept_id"]
# format metadata
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="measurement_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'measurement_type' # rename column
ptMeasData <- ptMeasData[,-"measurement_type_concept_id"]
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="value_as_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'value_concept' # rename column
ptMeasData <- ptMeasData[,-"value_as_concept_id"]
ptMeasData <- merge(ptMeasData,dataOntology[,c("concept_id","concept_name")],by.x="unit_concept_id",by.y="concept_id", all.x=TRUE)
names(ptMeasData)[names(ptMeasData) == 'concept_name'] <- 'unit_concept' # rename column
ptMeasData <- ptMeasData[,-"unit_concept_id"]
ptMeasData$measurement_date <- as.Date(ptMeasData$measurement_date)
return(ptMeasData)
}
getDevices <- function(pt_id) {
queryStatement <- paste0('SELECT person_id, device_concept_id, device_exposure_start_date, device_exposure_end_date, device_type_concept_id, device_source_value, visit_occurrence_id, device_source_concept_id FROM device_exposure WHERE person_id = ', pt_id)
ptDeviceData <- sqlQuery(queryStatement)
ptDeviceData <- data.table(ptDeviceData) # convert to data.table
# obtain table specific ontology
deviceTableOntology = dataOntology[grep("Device",domain_id)]
# format clinical data
ptDeviceData <- merge(ptDeviceData, deviceTableOntology[,c("concept_id","vocabulary_id","concept_code","concept_name")], by.x="device_concept_id",by.y="concept_id",all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_code'] <- 'device_concept_code' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_concept_name' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'vocabulary_id'] <- 'device_concept_vocabulary' # rename column
ptDeviceData <- ptDeviceData[,-"device_concept_id"]
ptDeviceData <- merge(ptDeviceData, deviceTableOntology[,c("concept_id","vocabulary_id", "concept_code","concept_name")], by.x="device_source_concept_id",by.y="concept_id",all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_code'] <- 'device_source_code' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_source_name' # rename column
names(ptDeviceData)[names(ptDeviceData) == 'vocabulary_id'] <- 'device_source_vocabulary' # rename column
ptDeviceData <- ptDeviceData[,-"device_source_concept_id"]
# format metadata
ptDeviceData <- merge(ptDeviceData,dataOntology[,c("concept_id","concept_name")],by.x="device_type_concept_id",by.y="concept_id", all.x=TRUE)
names(ptDeviceData)[names(ptDeviceData) == 'concept_name'] <- 'device_type' # rename column
ptDeviceData <- ptDeviceData[,-"device_type_concept_id"]
ptDeviceData$device_exposure_start_date <- as.Date(ptDeviceData$device_exposure_start_date)
return(ptDeviceData)
}
#####################
### FIND PATIENTS ###
#####################
findPatients <- function(selected_terms, func_type, search_strat) {
dataCriteriaMapped <- unpackAndMap(selected_terms)
if (search_strat == "direct") {
useSource <- "_source" # search _source_concept_id
searchTable <- identifyTablesDirect(dataCriteriaMapped)
} else if (search_strat == "mapped") {
useSource <- "" # search _concept_id
dataCodesFormatted <- paste0(dataCriteriaMapped$concept_id,collapse=",")
# get common ontology synonyms
synonymDataFiltered <- identifySynonyms(dataCodesFormatted)
synonymData <- merge(synonymDataFiltered[,"concept_id_2"], dataOntology[,c("concept_id","domain_id","vocabulary_id")], by.x="concept_id_2",by.y = "concept_id")
colnames(synonymData) <- c("concept_id","domain_id","vocabulary_id")
synonymCodes <- paste(union(dataCriteriaMapped$concept_id, synonymDataFiltered$concept_id_2),collapse = ",") ## adds original codes into ancestor query (b/c of scenarios with ATC))
# get descendents
mappingDataInfo <- identifyMappings(synonymCodes)
mappingData <- mappingDataInfo[,c("descendant_concept_id","domain_id","vocabulary_id")]
colnames(mappingData) <- c("concept_id","domain_id","vocabulary_id")
conceptsCombined <- rbind(dataCriteriaMapped[,c("concept_id","domain_id","vocabulary_id")],synonymData)
conceptsCombined <- rbind(conceptsCombined, mappingData)
conceptsCombined <- conceptsCombined[!duplicated(conceptsCombined),]
# get tables to search for mapped concepts
searchTable <- identifyTablesMapped(conceptsCombined)
}
# if any condition table codes
if (length(searchTable$Condition)>0) {
condition_codes <- paste(searchTable$Condition,collapse=",")
pts_condition <- searchCondition(useSource, condition_codes)
} else {
pts_condition <- NULL
}
# if any observation table codes
if (length(searchTable$Observation)>0) {
observation_codes <- paste(searchTable$Observation,collapse=",")
pts_observation <- searchObservation(useSource, observation_codes)
} else {
pts_observation <- NULL
}
# if any measurement table codes
if (length(searchTable$Measurement)>0) {
measurement_codes <- paste(searchTable$Measurement,collapse=",")
pts_measurement <- searchMeasurement(useSource, measurement_codes)
} else {
pts_measurement <- NULL
}
# if any drug table codes
if (length(searchTable$Drug)>0) {
drug_codes <- paste(searchTable$Drug,collapse=",")
pts_drug <- searchDrug(useSource, drug_codes)
} else {
pts_drug <- NULL
}
# if any device table codes
if (length(searchTable$Device)>0) {
device_codes <- paste(searchTable$Drug,collapse=",")
pts_device <- searchDevice(useSource, device_codes)
} else {
pts_device <- NULL
}
# if any procedure table codes
if (length(searchTable$Procedure)>0) {
procedure_codes <- paste(searchTable$Procedure,collapse=",")
pts_procedure <- searchProcedure(useSource, procedure_codes)
}else{
pts_procedure <- NULL
}
# search
if (func_type=="or") {
patient_list <- identifyPatientsOR(pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure)
} else if (func_type=="and") {
patient_list <- identifyPatientsAND(dataCriteriaMapped, synonymDataFiltered, mappingDataInfo, pts_condition, pts_observation, pts_measurement, pts_device, pts_drug, pts_procedure)
}
return(patient_list)
}
### specific table search functions (used in Find Patients function)
searchCondition <- function(useSource, codes) {
conditionQuery <- paste0('SELECT person_id, condition_concept_id FROM condition_occurrence WHERE condition',useSource,'_concept_id IN (',codes,') ')
dataCondition <- sqlQuery(conditionQuery)
dataCondition <- data.table(dataCondition)
dataCondition <- dataCondition[!duplicated(dataCondition)]
return(dataCondition)
}
searchObservation <- function(useSource, codes) {
observationQuery <- paste0('SELECT person_id, observation_concept_id FROM observation WHERE observation',useSource,'_concept_id IN (',codes,') ')
dataObservation <- sqlQuery(observationQuery)
dataObservation <- data.table(dataObservation)
dataObservation <- dataObservation[!duplicated(dataObservation)]
return(dataObservation)
}
searchMeasurement <- function(useSource, codes) {
measurementQuery <- paste0('SELECT person_id, measurement_concept_id FROM measurement WHERE measurement',useSource,'_concept_id IN (',codes,') ')
dataMeasurement <- sqlQuery(measurementQuery)
dataMeasurement <- data.table(dataMeasurement)
dataMeasurement <- dataMeasurement[!duplicated(dataMeasurement)]
return(dataMeasurement)
}
searchDrug <- function(useSource, codes) {
drugQuery <- paste0('SELECT person_id, drug_concept_id FROM drug_exposure WHERE drug',useSource,'_concept_id IN (',codes,') ')
dataDrug <- sqlQuery(drugQuery)
dataDrug <- data.table(dataDrug)
dataDrug <- dataDrug[!duplicated(dataDrug)]
return(dataDrug)
}
searchDevice <- function(useSource, codes) {
deviceQuery <- paste0('SELECT person_id, device_concept_id FROM device_exposure WHERE device',useSource,'_concept_id IN (',codes,') ')
dataDevice <- sqlQuery(deviceQuery)
dataDevice <- data.table(dataDevice)
dataDevice <- dataDevice[!duplicated(dataDevice)]
return(dataDevice)
}
searchProcedure<- function(useSource, codes) {
procedureQuery <- paste0('SELECT person_id, procedure_concept_id FROM procedure_occurrence WHERE procedure',useSource,'_concept_id IN (',codes,') ')
dataProcedure <- sqlQuery(procedureQuery)
dataProcedure <- data.table(dataProcedure)
dataProcedure <- dataProcedure[!duplicated(dataProcedure)]
return(dataProcedure)
}
|
#' @title Round a numeric vector; halves will be rounded up, ala Microsoft Excel.
#'
#' @description
#' In base R \code{round()}, halves are rounded to even, e.g., 12.5 and 11.5 are both rounded to 12. This function rounds 12.5 to 13 (assuming \code{digits = 0}). Negative halves are rounded away from zero, e.g., -0.5 is rounded to -1.
#'
#' This may skew subsequent statistical analysis of the data, but may be desirable in certain contexts. This function is implemented exactly from \url{http://stackoverflow.com/a/12688836}; see that question and comments for discussion of this issue.
#'
#' @param x a numeric vector to round.
#' @param digits how many digits should be displayed after the decimal point?
#' @export
#' @examples
#' round_half_up(12.5)
#' round_half_up(1.125, 2)
#' round_half_up(1.125, 1)
#' round_half_up(-0.5, 0) # negatives get rounded away from zero
#'
round_half_up <- function(x, digits = 0) {
posneg <- sign(x)
z <- abs(x) * 10 ^ digits
z <- z + 0.5
z <- trunc(z)
z <- z / 10 ^ digits
z * posneg
}
#' @title Round a numeric vector to the specified number of significant digits; halves will be rounded up.
#'
#' @description
#' In base R \code{signif()}, halves are rounded to even, e.g.,
#' \code{signif(11.5, 2)} and \code{signif(12.5, 2)} are both rounded to 12.
#' This function rounds 12.5 to 13 (assuming \code{digits = 2}). Negative halves
#' are rounded away from zero, e.g., \code{signif(-2.5, 1)} is rounded to -3.
#'
#' This may skew subsequent statistical analysis of the data, but may be
#' desirable in certain contexts. This function is implemented from
#' \url{https://stackoverflow.com/a/1581007}; see that question and
#' comments for discussion of this issue.
#'
#' @param x a numeric vector to round.
#' @param digits integer indicating the number of significant digits to be used.
#' @export
#' @examples
#' signif_half_up(12.5, 2)
#' signif_half_up(1.125, 3)
#' signif_half_up(-2.5, 1) # negatives get rounded away from zero
#'
signif_half_up <- function(x, digits = 6) {
xs <- which(x != 0 & !is.na(x) & !is.infinite(x))
y <- rep(0, length(x))
z <- x
y[xs] <- 10 ^ (digits - ceiling(log10(abs(x[xs]))))
z[xs] <- round_half_up(x[xs] * y[xs]) / y[xs]
return(z)
} | /R/round_half_up.R | permissive | martinctc/janitor | R | false | false | 2,258 | r | #' @title Round a numeric vector; halves will be rounded up, ala Microsoft Excel.
#'
#' @description
#' In base R \code{round()}, halves are rounded to even, e.g., 12.5 and 11.5 are both rounded to 12. This function rounds 12.5 to 13 (assuming \code{digits = 0}). Negative halves are rounded away from zero, e.g., -0.5 is rounded to -1.
#'
#' This may skew subsequent statistical analysis of the data, but may be desirable in certain contexts. This function is implemented exactly from \url{http://stackoverflow.com/a/12688836}; see that question and comments for discussion of this issue.
#'
#' @param x a numeric vector to round.
#' @param digits how many digits should be displayed after the decimal point?
#' @export
#' @examples
#' round_half_up(12.5)
#' round_half_up(1.125, 2)
#' round_half_up(1.125, 1)
#' round_half_up(-0.5, 0) # negatives get rounded away from zero
#'
round_half_up <- function(x, digits = 0) {
posneg <- sign(x)
z <- abs(x) * 10 ^ digits
z <- z + 0.5
z <- trunc(z)
z <- z / 10 ^ digits
z * posneg
}
#' @title Round a numeric vector to the specified number of significant digits; halves will be rounded up.
#'
#' @description
#' In base R \code{signif()}, halves are rounded to even, e.g.,
#' \code{signif(11.5, 2)} and \code{signif(12.5, 2)} are both rounded to 12.
#' This function rounds 12.5 to 13 (assuming \code{digits = 2}). Negative halves
#' are rounded away from zero, e.g., \code{signif(-2.5, 1)} is rounded to -3.
#'
#' This may skew subsequent statistical analysis of the data, but may be
#' desirable in certain contexts. This function is implemented from
#' \url{https://stackoverflow.com/a/1581007}; see that question and
#' comments for discussion of this issue.
#'
#' @param x a numeric vector to round.
#' @param digits integer indicating the number of significant digits to be used.
#' @export
#' @examples
#' signif_half_up(12.5, 2)
#' signif_half_up(1.125, 3)
#' signif_half_up(-2.5, 1) # negatives get rounded away from zero
#'
signif_half_up <- function(x, digits = 6) {
xs <- which(x != 0 & !is.na(x) & !is.infinite(x))
y <- rep(0, length(x))
z <- x
y[xs] <- 10 ^ (digits - ceiling(log10(abs(x[xs]))))
z[xs] <- round_half_up(x[xs] * y[xs]) / y[xs]
return(z)
} |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rap.R
\name{statistics.classNRI}
\alias{statistics.classNRI}
\title{Reclassification metrics with classes (ordinals) as inputs.}
\usage{
statistics.classNRI(c1, c2, y, s1, s2)
}
\arguments{
\item{c1}{Risk class of Reference model (ordinal)}
\item{c2}{Risk class of Reference model (new)}
\item{y}{Binary of outcome of interest. Must be 0 or 1.}
\item{s1}{The savings or benefit when am event is reclassified to a higher group by the new model}
\item{s2}{The benefit when a non-event is reclassified to a lower group}
}
\value{
A matrix of metrics for use within CI.classNRI
}
\description{
The function statistics.classNRI calculates the NRI metrics for reclassification of data already in classes. For use by CI.classNRI.
}
| /R_RiskAssessmentPlot/man/statistics.classNRI.Rd | no_license | aabojana/InterpretableBreastCancerPrognosis | R | false | false | 816 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/rap.R
\name{statistics.classNRI}
\alias{statistics.classNRI}
\title{Reclassification metrics with classes (ordinals) as inputs.}
\usage{
statistics.classNRI(c1, c2, y, s1, s2)
}
\arguments{
\item{c1}{Risk class of Reference model (ordinal)}
\item{c2}{Risk class of Reference model (new)}
\item{y}{Binary of outcome of interest. Must be 0 or 1.}
\item{s1}{The savings or benefit when am event is reclassified to a higher group by the new model}
\item{s2}{The benefit when a non-event is reclassified to a lower group}
}
\value{
A matrix of metrics for use within CI.classNRI
}
\description{
The function statistics.classNRI calculates the NRI metrics for reclassification of data already in classes. For use by CI.classNRI.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relation.R
\name{joinOperatorFromJson}
\alias{joinOperatorFromJson}
\title{joinOperatorFromJson}
\usage{
joinOperatorFromJson(json)
}
\description{
joinOperatorFromJson
}
| /man/joinOperatorFromJson.Rd | no_license | tercen/rtercen | R | false | true | 249 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relation.R
\name{joinOperatorFromJson}
\alias{joinOperatorFromJson}
\title{joinOperatorFromJson}
\usage{
joinOperatorFromJson(json)
}
\description{
joinOperatorFromJson
}
|
#' Enrichment score
#'
#' Calculates an enrichment score for a gene set.
#' @param L Vector with one expression value per gene.
#' @param S Vector of same length as L where 1 indicates that the gene in L is present in the gene set and 1 indicates that it is not.
#' @param p Weight. Default is 1.
#' @param doplot If TRUE, the running sum is plotted. Default is FALSE.
#' @param index If TRUE, the index at which the ES occurs in the sorted list is returned. Default is FALSE.
#' @details See Subramanian et al. for details.
#' @return \item{E }{Enrichment score.}
#' \item{ind }{Index of the enrichment score (if \code{index=TRUE})}.
#' @references Subramanian, A., Tamayo, P., Mootha, V. K., Mukherjee, S., Ebert, B. L.,
#' Gillette, M. A., Paulovich, A., Pomeroy, S. L., Golub, T. R., Lander, E. S.
#' and Mesirov, J. P (2005) Gene set enrichment analysis: A knowledge-based
#' approach for interpreting genome-wide expression profiles, \emph{PNAS}, \bold{102},15545-15550.
#' @author Solve Sabo, Guro Dorum
#' @importFrom graphics abline hist plot
#' @export
es <-
function(L,S,p=1, doplot=FALSE, index=FALSE) {
#Sort L and S according to L
L_sort <- sort(L,decreasing=TRUE,index.return=TRUE)
L <- L_sort$x
S <- S[L_sort$ix]
Sc <- 1-S
Ns <- sum(S)
N <- length(L)
#If L and S have none or all genes in common
if( Ns == 0 )
stop("No genes are member of the gene set")
if( Ns == length(S) )
stop("All genes are members of the gene set")
#Weighting factor (N_R in Subramanian et al., 2005)
Ws <- sum(S*abs(L)^p)
pmiss <- cumsum(Sc)/(N-Ns)
phit <- cumsum(S*abs(L)^p)/Ws
#Running sum
ph_pm <- phit-pmiss
#The enrichment score is the maximum deviation from 0 of the running sum
ind <- which.max(abs(ph_pm))
E <- ph_pm[ind]
names(E) <- NULL
#Plot running sum?
if(doplot)
{
plot(1:N,ph_pm,"l",col=2,lwd=2,xlab="L",ylab="Phit-Pmiss",main="Running sum")
abline(h=0)
abline(v=ind,lty=3)
}
if(index) return(list(E=E, ind=ind)) #Also return index of occurence of ES
else return(E)
}
| /R/es.R | no_license | gdorum/GSEArot | R | false | false | 2,174 | r | #' Enrichment score
#'
#' Calculates an enrichment score for a gene set.
#' @param L Vector with one expression value per gene.
#' @param S Vector of same length as L where 1 indicates that the gene in L is present in the gene set and 1 indicates that it is not.
#' @param p Weight. Default is 1.
#' @param doplot If TRUE, the running sum is plotted. Default is FALSE.
#' @param index If TRUE, the index at which the ES occurs in the sorted list is returned. Default is FALSE.
#' @details See Subramanian et al. for details.
#' @return \item{E }{Enrichment score.}
#' \item{ind }{Index of the enrichment score (if \code{index=TRUE})}.
#' @references Subramanian, A., Tamayo, P., Mootha, V. K., Mukherjee, S., Ebert, B. L.,
#' Gillette, M. A., Paulovich, A., Pomeroy, S. L., Golub, T. R., Lander, E. S.
#' and Mesirov, J. P (2005) Gene set enrichment analysis: A knowledge-based
#' approach for interpreting genome-wide expression profiles, \emph{PNAS}, \bold{102},15545-15550.
#' @author Solve Sabo, Guro Dorum
#' @importFrom graphics abline hist plot
#' @export
es <-
function(L,S,p=1, doplot=FALSE, index=FALSE) {
#Sort L and S according to L
L_sort <- sort(L,decreasing=TRUE,index.return=TRUE)
L <- L_sort$x
S <- S[L_sort$ix]
Sc <- 1-S
Ns <- sum(S)
N <- length(L)
#If L and S have none or all genes in common
if( Ns == 0 )
stop("No genes are member of the gene set")
if( Ns == length(S) )
stop("All genes are members of the gene set")
#Weighting factor (N_R in Subramanian et al., 2005)
Ws <- sum(S*abs(L)^p)
pmiss <- cumsum(Sc)/(N-Ns)
phit <- cumsum(S*abs(L)^p)/Ws
#Running sum
ph_pm <- phit-pmiss
#The enrichment score is the maximum deviation from 0 of the running sum
ind <- which.max(abs(ph_pm))
E <- ph_pm[ind]
names(E) <- NULL
#Plot running sum?
if(doplot)
{
plot(1:N,ph_pm,"l",col=2,lwd=2,xlab="L",ylab="Phit-Pmiss",main="Running sum")
abline(h=0)
abline(v=ind,lty=3)
}
if(index) return(list(E=E, ind=ind)) #Also return index of occurence of ES
else return(E)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesian_code_final.R
\name{consensus_cluster}
\alias{consensus_cluster}
\title{consensus_cluster function}
\usage{
consensus_cluster(R, kmin = 2, kmax, clusterAlg = "hclust_ward", B = 10,
prop_resamp = 0.8)
}
\arguments{
\item{R}{a correlation matrix}
\item{kmin}{the minimum number of clusters}
\item{kmax}{the maximum number of clusters}
\item{clusterAlg}{the clustering algorithm, one of the following kmeans, hclust_ward,hclust_average,hclust_complet,hclust_single}
\item{B}{the number of resampling steps}
\item{prop_resamp}{the proportion of the data that we want to resample}
}
\value{
best k (number of clusters) chosen by this method.
}
\description{
This function performs consensus clustering.
}
| /man/consensus_cluster.Rd | no_license | azolling/EBmodules | R | false | true | 793 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bayesian_code_final.R
\name{consensus_cluster}
\alias{consensus_cluster}
\title{consensus_cluster function}
\usage{
consensus_cluster(R, kmin = 2, kmax, clusterAlg = "hclust_ward", B = 10,
prop_resamp = 0.8)
}
\arguments{
\item{R}{a correlation matrix}
\item{kmin}{the minimum number of clusters}
\item{kmax}{the maximum number of clusters}
\item{clusterAlg}{the clustering algorithm, one of the following kmeans, hclust_ward,hclust_average,hclust_complet,hclust_single}
\item{B}{the number of resampling steps}
\item{prop_resamp}{the proportion of the data that we want to resample}
}
\value{
best k (number of clusters) chosen by this method.
}
\description{
This function performs consensus clustering.
}
|
# Method: "regarima" for the function summary
#' @export
#' @export
summary.regarima <- function(object, ...){
if (is.null(object$arma)) {
result <- list(arma_orders = NULL, results_spec = NULL,
coefficients = list(arima = NULL,
regression = NULL,
fixed_out = NULL,
fixed_var = NULL),
loglik = NULL, residuals_st_err = NULL)
class(result) <- "summary.regarima"
return(result)
}
arma <- object$arma
arima_coef <- object$arima.coefficients
reg_coef <- object$regression.coefficients
rslt_spec <- object$model$spec_rslt
loglik <- object$loglik
res_err <- object$residuals.stat$st.error
usr_spec <- object$specification$regression$userdef$specification
out <- s_preOut(object)
var <- s_preVar(object)$description
fvar <- fout <- NULL
if (!is.null(arima_coef)){
a_tvalues=matrix(2*(1 - pt(abs(arima_coef[,3]), loglik[3])),ncol=1)
colnames(a_tvalues)=c("Pr(>|t|)")
arima_coef <- cbind(arima_coef,a_tvalues)
}
if (!is.null(reg_coef)){
r_tvalues=matrix(2*(1 - pt(abs(reg_coef[,3]), loglik[3])),ncol=1)
colnames(r_tvalues)=c("Pr(>|t|)")
reg_coef <- cbind(reg_coef, r_tvalues)
}
if (usr_spec[1] & usr_spec[2]){
out <- out[out[,3]!=0,]
if (dim(out)[1]!=0){
out_t <- as.character(out[,1])
out_y <- substr(out[,2],1,4)
out_m <- as.character(as.numeric(substr(out[,2],6,7)))
out_dsc <- paste(out_t," (",out_m,"-",out_y,")",sep = "")
colnames(out) <- c("","","Coefficients")
rownames(out) <- out_dsc
fout <- out[3]
fout <- cbind(fout, NA)
colnames(fout)[ncol(fout)] <- "Pr(>|t|)"
}
}
if (usr_spec[3] & usr_spec[4]){
nvar0 <- dim(var)[1]
var <- cbind(var,c(1:nvar0))
var[is.na(var[,2]), 2] <- 0
var <- var[var[,2]!=0,]
nvar <- dim(var)[1]
if (nvar!=0){
colnames(var) <- c("","Coefficients")
fvar <- var[2]
rownames(fvar) <- sprintf("r.%s", rownames(fvar))
fvar <- cbind(fvar, NA)
colnames(fvar)[ncol(fvar)] <- "Pr(>|t|)"
}
}
result <- list(arma_orders = arma,
results_spec = rslt_spec,
coefficients = list(arima = arima_coef,
regression = reg_coef,
fixed_out = fout,
fixed_var = fvar),
loglik = loglik,
residuals_st_err = res_err)
class(result) <- "summary.regarima"
result
}
#' @export
print.summary.regarima <- function (x, digits = max(3L, getOption("digits") - 3L), signif.stars = getOption("show.signif.stars"), ...){
if (is.null(x$arma_orders)) {
cat("No pre-processing")
return(invisible(x))
}
cat("y = regression model + arima ",gsub("c","",deparse(as.numeric(x$arma_orders))),sep="")
cat("\n\n")
cat("Model:",x$results_spec[1,"Model"],sep=" ")
cat("\n")
cat("Estimation span:",x$results_spec[1,"T.span"],sep=" ")
cat("\n")
cat("Log-transformation:",if(x$results_spec[1,"Log transformation"]) {"yes"} else {"no"},sep=" ")
cat("\n")
cat("Regression model:",if(x$results_spec[1,"Mean"]) {"mean"} else {"no mean"},sep=" ")
if(x$results_spec[1,"Trading days"]==0) {cat(", no trading days effect")} else {cat(", trading days effect(",x$results_spec[1,"Trading days"],")",sep="")}
cat(if(x$results_spec[1,"Leap year"]) {", leap year effect"} else {", no leap year effect"},sep="")
cat(if(x$results_spec[1,"Easter"]) {", Easter effect"} else {", no Easter effect"},sep="")
if(x$results_spec[1,"Outliers"]==0) {cat(", no outliers")} else {cat(", outliers(",x$results_spec[1,"Outliers"],")",sep="")}
cat("\n\n")
cat("Coefficients:")
if (!is.null(x$coefficients$arima)){
cat("\n")
cat("ARIMA:","\n")
printCoefmat(x$coefficients$arima, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
}
if (!is.null(x$coefficients$regression)){
cat("\n")
cat("Regression model:","\n")
printCoefmat(x$coefficients$regression, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
}
if (!is.null(x$coefficients$fixed_out)){
printCoefmat(x$coefficients$fixed_out[, -ncol(x$coefficients$fixed_out), drop = FALSE],
digits = digits, P.values= FALSE, na.print = "NA")
}
if (!is.null(x$coefficients$fixed_var)){
cat("\n")
cat("Fixed other regression effects:","\n")
printCoefmat(x$coefficients$fixed_var[,-ncol(x$coefficients$fixed_var), drop = FALSE],
digits = digits, P.values= FALSE, na.print = "NA", ...)
}
loglik <- x$loglik
class(result) <- "summary.regarima"
cat("\n\n")
cat("Residual standard error:",
formatC(x$residuals_st_err,digits = digits),
"on",
loglik["neffectiveobs",] - loglik["np",], "degrees of freedom", sep = " ")
cat("\n")
cat("Log likelihood = ", formatC(loglik["logvalue",], digits = digits),
", aic = ",formatC(loglik["aic", ], digits = digits),
", aicc = ", formatC(loglik["aicc", ], digits = digits),
", bic(corrected for length) = ", formatC(loglik["bicc", ],digits = digits),
sep = "")
cat("\n\n")
invisible(x)
}
# Method: "regarima" for the function print
#' @export
print.regarima <- function (x, digits = max(3L, getOption("digits") - 3L), ...){
if (is.null(x$arma)) {
cat("No pre-processing")
return(invisible(x))
}
arma <- x$arma
arima_coef <- x$arima.coefficients
reg_coef <- x$regression.coefficients
loglik <- x$loglik
res_err <- x$residuals.stat$st.error
usr_spec <- x$specification$regression$userdef$specification
out <- s_preOut(x)
var <- s_preVar(x)$description
rslt_spec <- x$model$spec_rslt
cat("y = regression model + arima ",gsub("c","",deparse(as.numeric(arma))),sep="")
cat("\n")
cat("Log-transformation:",if(rslt_spec[1,"Log transformation"]) {"yes"} else {"no"},sep=" ")
cat("\n")
cat("Coefficients:")
if (!is.null(arima_coef)){
if (!is.matrix(arima_coef[,-3])){
tab.arima=t(as.matrix(arima_coef[,-3]))
rownames(tab.arima)=rownames(arima_coef)
}else{
tab.arima=arima_coef[,-3]
}
cat("\n")
printCoefmat(tab.arima, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
if (!is.null(reg_coef)){
if (!is.matrix(reg_coef[,-3])){
tab.reg=t(as.matrix(reg_coef[,-3]))
rownames(tab.reg)=rownames(reg_coef)
}else{
tab.reg=reg_coef[,-3]
}
cat("\n")
printCoefmat(tab.reg, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
if (usr_spec[1] & usr_spec[2]){
out <- out[out[,3]!=0,]
if (dim(out)[1]!=0){
out_t <- as.character(out[,1])
out_y <- substr(out[,2],1,4)
out_m <- as.character(as.numeric(substr(out[,2],6,7)))
out_dsc <- paste(out_t," (",out_m,"-",out_y,")",sep = "")
fout <- out[3]
colnames(fout) <- "Coefficients"
rownames(fout) <- out_dsc
cat("\n")
cat("Fixed outliers:","\n")
printCoefmat(fout, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
}
if (usr_spec[3] & usr_spec[4]){
nvar0 <- dim(var)[1]
var <- cbind(var,c(1:nvar0))
var[is.na(var[,2]), 2] <- 0
var <- var[var[,2]!=0, ]
nvar <- dim(var)[1]
if (nvar!=0){
var_dsc <- if (nvar0==1){c("r.userdef")} else {paste("r.userdef",var[,3],sep="_")}
colnames(var) <- c("","Coefficients")
# rownames(var) <- var_dsc
fvar <- var[2]
rownames(fvar) <- sprintf("r.%s", rownames(fvar))
cat("\n")
cat("Fixed other regression effects:","\n")
printCoefmat(fvar, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
}
cat("\n\n")
cat("Residual standard error:",formatC(res_err,digits = digits),
"on",loglik["neffectiveobs",] - loglik["np",],"degrees of freedom", sep = " ")
cat("\n")
cat("Log likelihood = ", formatC(loglik["logvalue",], digits = digits),
", aic = ", formatC(loglik["aic",], digits = digits),
" aicc = ", formatC(loglik["aicc",], digits = digits),
", bic(corrected for length) = ",formatC(loglik["bicc", ],digits = digits), sep = "")
cat("\n\n")
invisible(x)
}
# Method: "regarima_rtest" for the print
#' @export
print.regarima_rtests=function (x, digits = max(3L, getOption("digits") - 3L), ...){
doublestar<-paste0("\u002A","\u002A")
triplestar<-paste0("\u002A","\u002A","\u002A")
stat <- x[,1]
pval <- x[,2]
sigcode=vector(mode = "character", length = 6)
sigcode[pval >=0.1] = triplestar
sigcode[pval < 0.1 & pval >= 0.05] = doublestar
sigcode[pval < 0.05] = " "
tabstat=data.frame(stat,pval,sigcode)
rownames(tabstat)=rownames(x)
colnames(tabstat)=c("Statistic","P.value","")
tabstat[,1]=format(tabstat[,1], digits = digits)
tabstat[,2]=format(round(tabstat[,2],max(4,digits)))
cat("\n")
cat("\033[1mNormality\033[22m")
cat("\n")
print (tabstat[1:3,])
cat("\n")
cat("Signif. codes: H0 (normality of residuals) is not rejected at","\n")
usestring<-paste0("significance levels: 0.1 ",triplestar,"0.05 ", doublestar,"\n")
cat(usestring)
cat("\n")
cat("\033[1mIndependence\033[22m")
cat("\n")
print(tabstat[c(4,5),])
cat("\n")
cat("Signif. codes: H0 (independence of residuals) is not rejected at","\n")
cat(usestring)
cat("\n")
cat("\033[1mLinearity\033[22m")
cat("\n")
print(tabstat[6,])
cat("\n")
cat("Signif. codes: H0 (no conditional heteroscedasticity of residuals) is not rejected at","\n")
cat(usestring)
invisible(x)
}
| /R/regarima_print.R | no_license | jdemetra/rjdemetra | R | false | false | 9,624 | r | # Method: "regarima" for the function summary
#' @export
#' @export
summary.regarima <- function(object, ...){
if (is.null(object$arma)) {
result <- list(arma_orders = NULL, results_spec = NULL,
coefficients = list(arima = NULL,
regression = NULL,
fixed_out = NULL,
fixed_var = NULL),
loglik = NULL, residuals_st_err = NULL)
class(result) <- "summary.regarima"
return(result)
}
arma <- object$arma
arima_coef <- object$arima.coefficients
reg_coef <- object$regression.coefficients
rslt_spec <- object$model$spec_rslt
loglik <- object$loglik
res_err <- object$residuals.stat$st.error
usr_spec <- object$specification$regression$userdef$specification
out <- s_preOut(object)
var <- s_preVar(object)$description
fvar <- fout <- NULL
if (!is.null(arima_coef)){
a_tvalues=matrix(2*(1 - pt(abs(arima_coef[,3]), loglik[3])),ncol=1)
colnames(a_tvalues)=c("Pr(>|t|)")
arima_coef <- cbind(arima_coef,a_tvalues)
}
if (!is.null(reg_coef)){
r_tvalues=matrix(2*(1 - pt(abs(reg_coef[,3]), loglik[3])),ncol=1)
colnames(r_tvalues)=c("Pr(>|t|)")
reg_coef <- cbind(reg_coef, r_tvalues)
}
if (usr_spec[1] & usr_spec[2]){
out <- out[out[,3]!=0,]
if (dim(out)[1]!=0){
out_t <- as.character(out[,1])
out_y <- substr(out[,2],1,4)
out_m <- as.character(as.numeric(substr(out[,2],6,7)))
out_dsc <- paste(out_t," (",out_m,"-",out_y,")",sep = "")
colnames(out) <- c("","","Coefficients")
rownames(out) <- out_dsc
fout <- out[3]
fout <- cbind(fout, NA)
colnames(fout)[ncol(fout)] <- "Pr(>|t|)"
}
}
if (usr_spec[3] & usr_spec[4]){
nvar0 <- dim(var)[1]
var <- cbind(var,c(1:nvar0))
var[is.na(var[,2]), 2] <- 0
var <- var[var[,2]!=0,]
nvar <- dim(var)[1]
if (nvar!=0){
colnames(var) <- c("","Coefficients")
fvar <- var[2]
rownames(fvar) <- sprintf("r.%s", rownames(fvar))
fvar <- cbind(fvar, NA)
colnames(fvar)[ncol(fvar)] <- "Pr(>|t|)"
}
}
result <- list(arma_orders = arma,
results_spec = rslt_spec,
coefficients = list(arima = arima_coef,
regression = reg_coef,
fixed_out = fout,
fixed_var = fvar),
loglik = loglik,
residuals_st_err = res_err)
class(result) <- "summary.regarima"
result
}
#' @export
print.summary.regarima <- function (x, digits = max(3L, getOption("digits") - 3L), signif.stars = getOption("show.signif.stars"), ...){
if (is.null(x$arma_orders)) {
cat("No pre-processing")
return(invisible(x))
}
cat("y = regression model + arima ",gsub("c","",deparse(as.numeric(x$arma_orders))),sep="")
cat("\n\n")
cat("Model:",x$results_spec[1,"Model"],sep=" ")
cat("\n")
cat("Estimation span:",x$results_spec[1,"T.span"],sep=" ")
cat("\n")
cat("Log-transformation:",if(x$results_spec[1,"Log transformation"]) {"yes"} else {"no"},sep=" ")
cat("\n")
cat("Regression model:",if(x$results_spec[1,"Mean"]) {"mean"} else {"no mean"},sep=" ")
if(x$results_spec[1,"Trading days"]==0) {cat(", no trading days effect")} else {cat(", trading days effect(",x$results_spec[1,"Trading days"],")",sep="")}
cat(if(x$results_spec[1,"Leap year"]) {", leap year effect"} else {", no leap year effect"},sep="")
cat(if(x$results_spec[1,"Easter"]) {", Easter effect"} else {", no Easter effect"},sep="")
if(x$results_spec[1,"Outliers"]==0) {cat(", no outliers")} else {cat(", outliers(",x$results_spec[1,"Outliers"],")",sep="")}
cat("\n\n")
cat("Coefficients:")
if (!is.null(x$coefficients$arima)){
cat("\n")
cat("ARIMA:","\n")
printCoefmat(x$coefficients$arima, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
}
if (!is.null(x$coefficients$regression)){
cat("\n")
cat("Regression model:","\n")
printCoefmat(x$coefficients$regression, digits = digits, signif.stars = signif.stars,
na.print = "NA", ...)
}
if (!is.null(x$coefficients$fixed_out)){
printCoefmat(x$coefficients$fixed_out[, -ncol(x$coefficients$fixed_out), drop = FALSE],
digits = digits, P.values= FALSE, na.print = "NA")
}
if (!is.null(x$coefficients$fixed_var)){
cat("\n")
cat("Fixed other regression effects:","\n")
printCoefmat(x$coefficients$fixed_var[,-ncol(x$coefficients$fixed_var), drop = FALSE],
digits = digits, P.values= FALSE, na.print = "NA", ...)
}
loglik <- x$loglik
class(result) <- "summary.regarima"
cat("\n\n")
cat("Residual standard error:",
formatC(x$residuals_st_err,digits = digits),
"on",
loglik["neffectiveobs",] - loglik["np",], "degrees of freedom", sep = " ")
cat("\n")
cat("Log likelihood = ", formatC(loglik["logvalue",], digits = digits),
", aic = ",formatC(loglik["aic", ], digits = digits),
", aicc = ", formatC(loglik["aicc", ], digits = digits),
", bic(corrected for length) = ", formatC(loglik["bicc", ],digits = digits),
sep = "")
cat("\n\n")
invisible(x)
}
# Method: "regarima" for the function print
#' @export
print.regarima <- function (x, digits = max(3L, getOption("digits") - 3L), ...){
if (is.null(x$arma)) {
cat("No pre-processing")
return(invisible(x))
}
arma <- x$arma
arima_coef <- x$arima.coefficients
reg_coef <- x$regression.coefficients
loglik <- x$loglik
res_err <- x$residuals.stat$st.error
usr_spec <- x$specification$regression$userdef$specification
out <- s_preOut(x)
var <- s_preVar(x)$description
rslt_spec <- x$model$spec_rslt
cat("y = regression model + arima ",gsub("c","",deparse(as.numeric(arma))),sep="")
cat("\n")
cat("Log-transformation:",if(rslt_spec[1,"Log transformation"]) {"yes"} else {"no"},sep=" ")
cat("\n")
cat("Coefficients:")
if (!is.null(arima_coef)){
if (!is.matrix(arima_coef[,-3])){
tab.arima=t(as.matrix(arima_coef[,-3]))
rownames(tab.arima)=rownames(arima_coef)
}else{
tab.arima=arima_coef[,-3]
}
cat("\n")
printCoefmat(tab.arima, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
if (!is.null(reg_coef)){
if (!is.matrix(reg_coef[,-3])){
tab.reg=t(as.matrix(reg_coef[,-3]))
rownames(tab.reg)=rownames(reg_coef)
}else{
tab.reg=reg_coef[,-3]
}
cat("\n")
printCoefmat(tab.reg, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
if (usr_spec[1] & usr_spec[2]){
out <- out[out[,3]!=0,]
if (dim(out)[1]!=0){
out_t <- as.character(out[,1])
out_y <- substr(out[,2],1,4)
out_m <- as.character(as.numeric(substr(out[,2],6,7)))
out_dsc <- paste(out_t," (",out_m,"-",out_y,")",sep = "")
fout <- out[3]
colnames(fout) <- "Coefficients"
rownames(fout) <- out_dsc
cat("\n")
cat("Fixed outliers:","\n")
printCoefmat(fout, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
}
if (usr_spec[3] & usr_spec[4]){
nvar0 <- dim(var)[1]
var <- cbind(var,c(1:nvar0))
var[is.na(var[,2]), 2] <- 0
var <- var[var[,2]!=0, ]
nvar <- dim(var)[1]
if (nvar!=0){
var_dsc <- if (nvar0==1){c("r.userdef")} else {paste("r.userdef",var[,3],sep="_")}
colnames(var) <- c("","Coefficients")
# rownames(var) <- var_dsc
fvar <- var[2]
rownames(fvar) <- sprintf("r.%s", rownames(fvar))
cat("\n")
cat("Fixed other regression effects:","\n")
printCoefmat(fvar, digits = digits, P.values= FALSE, na.print = "NA", ...)
}
}
cat("\n\n")
cat("Residual standard error:",formatC(res_err,digits = digits),
"on",loglik["neffectiveobs",] - loglik["np",],"degrees of freedom", sep = " ")
cat("\n")
cat("Log likelihood = ", formatC(loglik["logvalue",], digits = digits),
", aic = ", formatC(loglik["aic",], digits = digits),
" aicc = ", formatC(loglik["aicc",], digits = digits),
", bic(corrected for length) = ",formatC(loglik["bicc", ],digits = digits), sep = "")
cat("\n\n")
invisible(x)
}
# Method: "regarima_rtest" for the print
#' @export
print.regarima_rtests=function (x, digits = max(3L, getOption("digits") - 3L), ...){
doublestar<-paste0("\u002A","\u002A")
triplestar<-paste0("\u002A","\u002A","\u002A")
stat <- x[,1]
pval <- x[,2]
sigcode=vector(mode = "character", length = 6)
sigcode[pval >=0.1] = triplestar
sigcode[pval < 0.1 & pval >= 0.05] = doublestar
sigcode[pval < 0.05] = " "
tabstat=data.frame(stat,pval,sigcode)
rownames(tabstat)=rownames(x)
colnames(tabstat)=c("Statistic","P.value","")
tabstat[,1]=format(tabstat[,1], digits = digits)
tabstat[,2]=format(round(tabstat[,2],max(4,digits)))
cat("\n")
cat("\033[1mNormality\033[22m")
cat("\n")
print (tabstat[1:3,])
cat("\n")
cat("Signif. codes: H0 (normality of residuals) is not rejected at","\n")
usestring<-paste0("significance levels: 0.1 ",triplestar,"0.05 ", doublestar,"\n")
cat(usestring)
cat("\n")
cat("\033[1mIndependence\033[22m")
cat("\n")
print(tabstat[c(4,5),])
cat("\n")
cat("Signif. codes: H0 (independence of residuals) is not rejected at","\n")
cat(usestring)
cat("\n")
cat("\033[1mLinearity\033[22m")
cat("\n")
print(tabstat[6,])
cat("\n")
cat("Signif. codes: H0 (no conditional heteroscedasticity of residuals) is not rejected at","\n")
cat(usestring)
invisible(x)
}
|
#' @aliases NULL
#' @import purrr
#' @import stats
#' @import utils
#' @importFrom readr read_csv
#' @importFrom furrr future_map
#' @importFrom magrittr %>%
#' @details
#' Linear Regression with Little Bag of Bootstraps
"_PACKAGE"
#' @details
#' Regression models with Little Bag of Bootstraps
"_PACKAGE"
## quiets concerns of R CMD check re: the .'s that appear in pipelines
# from https://github.com/jennybc/googlesheets/blob/master/R/googlesheets.R
utils::globalVariables(c("."))
#' global read_data function
#' @param folder directory
read_data = function(folder){
file.path(folder, list.files(folder, pattern = "csv$")) %>%
map(read_csv)
}
# function 1: linear regression
#' linear regression using LBB
#' @param formula regression formula
#' @param data data frame
#' @param m splitted data to m parts, default 10 splits
#' @param B numbers of bootstrap, default 5000
#' @export
blblm <- function(formula, data, m = 10, B = 5000) {
data_list <- split_data(data, m)
#regression
estimates <- map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(data), B = B))
#residuals
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
#function 2: linear regression wl parrael
#' linear regression using LLB, with parallelization
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstraps
#' @export
par_blblm <- function(formula, data, m = 10, B = 5000) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
#linear regression
estimates <- future_map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(.), B = B))
#store residuals
res <- list(estimates = estimates, formula = formula)
#assign class for further investigation
class(res) <- "blblm"
invisible(res)
}
# function 3: generalized linear regression
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstrap
#' @param family some glm family
#' @export
blbglm <- function(formula, data, m = 10, B = 5000, family) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
# use glm here
estimates <- map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B, family))
res <- list(estimates = estimates, formula = formula)
class(res) <- "blbglm"
invisible(res)
}
# function 4
#' generalized linear regression with parrel
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstraps
#' @param family some glm family to use
#' @export
par_blbglm <- function(formula, data, m = 10, B = 5000, family) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
estimates <- future_map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B, family))
res <- list(estimates = estimates, formula = formula)
class(res) <- "blbglm"
invisible(res)
}
#' split data into m parts of approximated equal sizes
#' @param data data frame
#' @param m number of splits
split_data <- function(data, m) {
idx <- sample.int(m, nrow(data), replace = TRUE)
data %>% split(idx)
}
##########################the following three are about lm
#' lm for each subsample
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to use
#' @param B numbers of bootstrap
lm_each_subsample <- function(formula, data, n, B) {
replicate(B, lm_each_boot(formula, data, n), simplify = FALSE)
}
#' compute lm for each bootstrap
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to draw and use
lm_each_boot <- function(formula, data, n) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
lm1(formula, data, freqs)
}
#' lm for each bootstrap, specifying frequency
#' @param formula regression model
#' @param data data frame
#' @param freqs weights for each linear regressor
lm1 <- function(formula, data, freqs) {
# drop the original closure of formula,
# otherwise the formula will pick a wront variable from the global scope.
environment(formula) <- environment()
fit <- lm(formula, data, weights = freqs)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
##########################the following three are about glm
#' lm for each subsample
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to use
#' @param B numbers of bootstrap
#' @param family glm family
glm_each_subsample <- function(formula, data, n, B, family) {
replicate(B, glm_each_boot(formula, data, n,family), simplify = FALSE)
}
#' compute glm for each bootstrap
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to draw and use
#' @param family glm family
glm_each_boot <- function(formula, data, n, family) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
glm1(formula, data, freqs, family)
}
#' glm for each bootstrap, specifying frequency
#' @param formula regression model
#' @param data data frame
#' @param freqs weights for each linear regressor
#' @param family glm family
glm1 <- function(formula, data, freqs, family) {
# drop the original closure of formula,
# otherwise the formula will pick a wront variable from the global scope.
environment(formula) <- environment()
fit <- glm(formula, data,weights = freqs, family = family)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
#' compute the coefficients from fit
#' @param fit regression result fit
blbcoef <- function(fit) {
coef(fit)
}
#' compute sigma from fit
#' @param fit regression result fit
blbsigma <- function(fit) {
p <- fit$rank
y <- model.extract(fit$model, "response")
e <- fitted(fit) - y
w <- fit$weights
sqrt(sum(w * (e^2)) / (sum(w) - p))
}
#' @export
#' @method print blblm
#' @param x regression result fit
#' @param ... other customized arguments
print.blblm <- function(x, ...) {
cat("blblm model:", capture.output(x$formula))
cat("\n")
}
#' complute sigma for bootstrap regression fit
#' @export
#' @method sigma blblm
#' @param object LBB regression
#' @param confidence logical/boolean value
#' @param level overall intented confidence level for sigma's CI
#' @param ... other customized arguments
sigma.blblm <- function(object, confidence = FALSE, level = 0.95, ...) {
est <- object$estimates
sigma <- mean(map_dbl(est, ~ mean(map_dbl(., "sigma"))))
if (confidence) {
alpha <- 1 - 0.95
limits <- est %>%
map_mean(~ quantile(map_dbl(., "sigma"), c(alpha / 2, 1 - alpha / 2))) %>%
set_names(NULL)
return(c(sigma = sigma, lwr = limits[1], upr = limits[2]))
} else {
return(sigma)
}
}
#' coefficients for bootstrap lm
#' @export
#' @method coef blblm
#' @param object fit
#' @param ... arguments
coef.blblm <- function(object, ...) {
est <- object$estimates
map_mean(est, ~ map_cbind(., "coef") %>% rowMeans())
}
#' confidence interval for each terms
#' @export
#' @method confint blblm
#' @param object fit
#' @param parm boolean
#' @param level confidence level
#' @param ... arguments
confint.blblm <- function(object, parm = NULL, level = 0.95, ...) {
if (is.null(parm)) {
parm <- attr(terms(object$formula), "term.labels")
}
alpha <- 1 - level
est <- object$estimates
out <- map_rbind(parm, function(p) {
map_mean(est, ~ map_dbl(., list("coef", p)) %>% quantile(c(alpha / 2, 1 - alpha / 2)))
})
if (is.vector(out)) {
out <- as.matrix(t(out))
}
dimnames(out)[[1]] <- parm
out
}
#' @export
#' @method predict blblm
#' @param object fit
#' @param new_data data frame, list or environment
#' @param confidence boolean
#' @param level confidence level
#' @param ... customized arguments
predict.blblm <- function(object, new_data, confidence = FALSE, level = 0.95, ...) {
est <- object$estimates
X <- model.matrix(reformulate(attr(terms(object$formula), "term.labels")), new_data)
if (confidence) {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>%
apply(1, mean_lwr_upr, level = level) %>%
t())
} else {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>% rowMeans())
}
}
mean_lwr_upr <- function(x, level = 0.95) {
alpha <- 1 - level
c(fit = mean(x), quantile(x, c(alpha / 2, 1 - alpha / 2)) %>% set_names(c("lwr", "upr")))
}
map_mean <- function(.x, .f, ...) {
(map(.x, .f, ...) %>% reduce(`+`)) / length(.x)
}
map_cbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(cbind)
}
map_rbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(rbind)
}
| /R/blblm.R | no_license | ivahuang/blblm--2020STA141C | R | false | false | 8,809 | r | #' @aliases NULL
#' @import purrr
#' @import stats
#' @import utils
#' @importFrom readr read_csv
#' @importFrom furrr future_map
#' @importFrom magrittr %>%
#' @details
#' Linear Regression with Little Bag of Bootstraps
"_PACKAGE"
#' @details
#' Regression models with Little Bag of Bootstraps
"_PACKAGE"
## quiets concerns of R CMD check re: the .'s that appear in pipelines
# from https://github.com/jennybc/googlesheets/blob/master/R/googlesheets.R
utils::globalVariables(c("."))
#' global read_data function
#' @param folder directory
read_data = function(folder){
file.path(folder, list.files(folder, pattern = "csv$")) %>%
map(read_csv)
}
# function 1: linear regression
#' linear regression using LBB
#' @param formula regression formula
#' @param data data frame
#' @param m splitted data to m parts, default 10 splits
#' @param B numbers of bootstrap, default 5000
#' @export
blblm <- function(formula, data, m = 10, B = 5000) {
data_list <- split_data(data, m)
#regression
estimates <- map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(data), B = B))
#residuals
res <- list(estimates = estimates, formula = formula)
class(res) <- "blblm"
invisible(res)
}
#function 2: linear regression wl parrael
#' linear regression using LLB, with parallelization
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstraps
#' @export
par_blblm <- function(formula, data, m = 10, B = 5000) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
#linear regression
estimates <- future_map(
data_list,
~ lm_each_subsample(formula = formula, data = ., n = nrow(.), B = B))
#store residuals
res <- list(estimates = estimates, formula = formula)
#assign class for further investigation
class(res) <- "blblm"
invisible(res)
}
# function 3: generalized linear regression
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstrap
#' @param family some glm family
#' @export
blbglm <- function(formula, data, m = 10, B = 5000, family) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
# use glm here
estimates <- map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B, family))
res <- list(estimates = estimates, formula = formula)
class(res) <- "blbglm"
invisible(res)
}
# function 4
#' generalized linear regression with parrel
#' @param formula regression model
#' @param data data frame
#' @param m number of splits
#' @param B number of bootstraps
#' @param family some glm family to use
#' @export
par_blbglm <- function(formula, data, m = 10, B = 5000, family) {
if(class(data) == "character"){
data_list <- read_data(data)
}
else{
data_list <- split_data(data, m)
}
estimates <- future_map(
data_list,
~ glm_each_subsample(formula = formula, data = ., n = nrow(.), B = B, family))
res <- list(estimates = estimates, formula = formula)
class(res) <- "blbglm"
invisible(res)
}
#' split data into m parts of approximated equal sizes
#' @param data data frame
#' @param m number of splits
split_data <- function(data, m) {
idx <- sample.int(m, nrow(data), replace = TRUE)
data %>% split(idx)
}
##########################the following three are about lm
#' lm for each subsample
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to use
#' @param B numbers of bootstrap
lm_each_subsample <- function(formula, data, n, B) {
replicate(B, lm_each_boot(formula, data, n), simplify = FALSE)
}
#' compute lm for each bootstrap
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to draw and use
lm_each_boot <- function(formula, data, n) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
lm1(formula, data, freqs)
}
#' lm for each bootstrap, specifying frequency
#' @param formula regression model
#' @param data data frame
#' @param freqs weights for each linear regressor
lm1 <- function(formula, data, freqs) {
# drop the original closure of formula,
# otherwise the formula will pick a wront variable from the global scope.
environment(formula) <- environment()
fit <- lm(formula, data, weights = freqs)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
##########################the following three are about glm
#' lm for each subsample
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to use
#' @param B numbers of bootstrap
#' @param family glm family
glm_each_subsample <- function(formula, data, n, B, family) {
replicate(B, glm_each_boot(formula, data, n,family), simplify = FALSE)
}
#' compute glm for each bootstrap
#' @param formula regression model
#' @param data data frame
#' @param n how many vectors to draw and use
#' @param family glm family
glm_each_boot <- function(formula, data, n, family) {
freqs <- rmultinom(1, n, rep(1, nrow(data)))
glm1(formula, data, freqs, family)
}
#' glm for each bootstrap, specifying frequency
#' @param formula regression model
#' @param data data frame
#' @param freqs weights for each linear regressor
#' @param family glm family
glm1 <- function(formula, data, freqs, family) {
# drop the original closure of formula,
# otherwise the formula will pick a wront variable from the global scope.
environment(formula) <- environment()
fit <- glm(formula, data,weights = freqs, family = family)
list(coef = blbcoef(fit), sigma = blbsigma(fit))
}
#' compute the coefficients from fit
#' @param fit regression result fit
blbcoef <- function(fit) {
coef(fit)
}
#' compute sigma from fit
#' @param fit regression result fit
blbsigma <- function(fit) {
p <- fit$rank
y <- model.extract(fit$model, "response")
e <- fitted(fit) - y
w <- fit$weights
sqrt(sum(w * (e^2)) / (sum(w) - p))
}
#' @export
#' @method print blblm
#' @param x regression result fit
#' @param ... other customized arguments
print.blblm <- function(x, ...) {
cat("blblm model:", capture.output(x$formula))
cat("\n")
}
#' complute sigma for bootstrap regression fit
#' @export
#' @method sigma blblm
#' @param object LBB regression
#' @param confidence logical/boolean value
#' @param level overall intented confidence level for sigma's CI
#' @param ... other customized arguments
sigma.blblm <- function(object, confidence = FALSE, level = 0.95, ...) {
est <- object$estimates
sigma <- mean(map_dbl(est, ~ mean(map_dbl(., "sigma"))))
if (confidence) {
alpha <- 1 - 0.95
limits <- est %>%
map_mean(~ quantile(map_dbl(., "sigma"), c(alpha / 2, 1 - alpha / 2))) %>%
set_names(NULL)
return(c(sigma = sigma, lwr = limits[1], upr = limits[2]))
} else {
return(sigma)
}
}
#' coefficients for bootstrap lm
#' @export
#' @method coef blblm
#' @param object fit
#' @param ... arguments
coef.blblm <- function(object, ...) {
est <- object$estimates
map_mean(est, ~ map_cbind(., "coef") %>% rowMeans())
}
#' confidence interval for each terms
#' @export
#' @method confint blblm
#' @param object fit
#' @param parm boolean
#' @param level confidence level
#' @param ... arguments
confint.blblm <- function(object, parm = NULL, level = 0.95, ...) {
if (is.null(parm)) {
parm <- attr(terms(object$formula), "term.labels")
}
alpha <- 1 - level
est <- object$estimates
out <- map_rbind(parm, function(p) {
map_mean(est, ~ map_dbl(., list("coef", p)) %>% quantile(c(alpha / 2, 1 - alpha / 2)))
})
if (is.vector(out)) {
out <- as.matrix(t(out))
}
dimnames(out)[[1]] <- parm
out
}
#' @export
#' @method predict blblm
#' @param object fit
#' @param new_data data frame, list or environment
#' @param confidence boolean
#' @param level confidence level
#' @param ... customized arguments
predict.blblm <- function(object, new_data, confidence = FALSE, level = 0.95, ...) {
est <- object$estimates
X <- model.matrix(reformulate(attr(terms(object$formula), "term.labels")), new_data)
if (confidence) {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>%
apply(1, mean_lwr_upr, level = level) %>%
t())
} else {
map_mean(est, ~ map_cbind(., ~ X %*% .$coef) %>% rowMeans())
}
}
mean_lwr_upr <- function(x, level = 0.95) {
alpha <- 1 - level
c(fit = mean(x), quantile(x, c(alpha / 2, 1 - alpha / 2)) %>% set_names(c("lwr", "upr")))
}
map_mean <- function(.x, .f, ...) {
(map(.x, .f, ...) %>% reduce(`+`)) / length(.x)
}
map_cbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(cbind)
}
map_rbind <- function(.x, .f, ...) {
map(.x, .f, ...) %>% reduce(rbind)
}
|
## Read a text file and translate to edge descriptions
##' Read and write text representations of models
##'
##' The functions \code{read.digraph} and \code{parse.digraph} read a model
##' description from a text file and a string respectively, while
##' \code{write.digraph} writes a text representation of the model to and file.
##'
##' These functions recognize the following text format. Each line corresponds
##' to an edge, and must consist of two node labels separated by an arrow. An
##' arrow consists of one of the character sequences "<","*","<>" or "" on the
##' left and ">","*","<>" or "" on the right, separated by a sequence of dashes
##' "-". The number of dashes used in the arrow defines the group number of the
##' edge.
##'
##' @title Text Representations of Models
##' @param file the name of the file to read or write
##' @param lines a string representation of the model
##' @param labels the sequence of labels to use for the nodes
##' @param edges an edge list.
##' @return The \code{write.digraph} function invisibly returns the
##' text that was written to the file.
##'
##' The functions \code{read.digraph} and \code{parse.digraph} return an
##' edge list - a data frame with columns
##'
##' \item{\code{From}}{a factor indicating the origin of each edge (the node
##' that effects)}
##' \item{\code{To}}{a factor indicating the destination of each edge (the node
##' that is effected)}
##' \item{\code{Group}}{an integer vector that indicates the group each edge
##' belongs to}
##' \item{\code{Type}}{a factor indicating the edge type -
##' "N" (negative) ,"P" (positive),"U" (unknown) or "Z" (zero)}
##' \item{\code{Pair}}{an integer vector that indicates the pairing of
##' directed edges}
##'
##' Each edge of the text specification is separated into two directed edges,
##' and every row of an edge list corresponds to a single directed edge.
##'
##' @examples
##' edges <- parse.digraph(c("A <-* B","C *-> A","C <- D",
##' "D -> B","B *--* C","A <--- D"))
##' edges
##' deparse.digraph(edges)
##' @export
read.digraph <- function(file,labels=NULL) {
parse.digraph(readLines(file),labels=labels)
}
##' @rdname read.digraph
##' @export
parse.digraph <- function(lines,labels=NULL) {
## Attempt to parse specifications
m <- regexec("^([^\\*<>-]+)(\\*|<|<>)?(-+)(\\*|>|<>)?([^\\*<>-]+)$",lines)
err <- sapply(m,"[[",1)== -1
if(any(err)) {
warning("Could not parse edges: ",paste(lines[err],collapse=", "))
lines <- lines[!err]
m <- m[!err]
}
m <- regmatches(lines,m)
from <- gsub("^\\s+|\\s+$","",lapply(m,"[[",2))
to <- gsub("^\\s+|\\s+$","",lapply(m,"[[",6))
tail <- sapply(m,"[[",3)
line <- sapply(m,"[[",4)
head <- sapply(m,"[[",5)
if(any(head=="" & tail==""))
warning("Zero edges specified: ",
paste(lines[head=="" & tail==""],collapse=", "))
## Construct edge dataframe
if(is.null(labels)) labels <- sort(unique(c(from,to)))
from <- factor(from,levels=labels)
to <- factor(to,levels=labels)
group <- nchar(line)-1
type <- c("N","P","U","Z")
backward.type <- type[match(tail,c("*","<","<>",""))]
forward.type <- type[match(head,c("*",">","<>",""))]
edges <- rbind(data.frame(From=from,
To=to,
Group=group,
Type=factor(forward.type,type),
Pair=seq_along(lines)),
data.frame(From=to,
To=from,
Group=group,
Type=factor(backward.type,type),
Pair=seq_along(lines)))
## Drop zero weight edges
edges <- edges[edges$Type!="Z",,drop=FALSE]
## Add node labels
attr(edges,"node.labels") <- labels
edges
}
##' @rdname read.digraph
##' @export
deparse.digraph <- function(edges) {
make.edge <- function(edge) {
edge <- edge[order(match(edge$Type,c("P","N","U","Z"),4),
edge$From),]
from <- edge$From[1]
to <- edge$To[1]
fwd <- (edge$From==from & edge$To==to)
line <- paste(rep("-",edge$Group[1]+1),collapse="")
symb <- c(">","*","<>","")
head <- symb[match(edge$Type[fwd],c("P","N","U","Z"),4)]
symb <- c("<","*","<>","")
tail <- symb[match(edge$Type[!fwd],c("P","N","U","Z"),4)]
paste(from," ",tail,line,head," ",to,sep="")
}
sapply(split(edges,interaction(edges$Pair,edges$Group,drop=TRUE)),make.edge)
}
##' @rdname read.digraph
##' @export
write.digraph <- function(edges,file="") {
txt <- deparse.digraph(edges)
cat(txt,sep="\n",file=file)
invisible(txt)
}
##' Parse a text representation of (directed) edges, return the index of the
##' directed edge within the edge list.
##'
##' Each directed edge is represented as a string consisting of two node labels
##' separated by an arrow, where the arrow consists of a sequence of dashes "-"
##' followed by one of the character sequences ">","*","<>". The number of
##' dashes used in the arrow is ignored.
##'
##' @title Indices of (Directed) Edges
##' @param lines a vector of strings representing directed edges
##' @param edges an edge list
##' @return the indices of the directed edges within the edge list
##' @examples
##' ## Sample model
##' edges <- parse.digraph(c(
##' "E *-> D",
##' "D *-> C",
##' "C -> E",
##' "E *-> B",
##' "B *-> A",
##' "A -> E",
##' "D --> B"))
##' edges <- enforce.limitation(edges)
##' parse.edge(c("E->D","D-*E","A-*B"),edges)
##' @export
parse.edge <- function(lines,edges) {
## Parse lines
m <- regexec("([^\\*<>-]+)(-+)(\\*|>)?([^\\*<>-]+)",lines)
err <- sapply(m,"[[",1)== -1
if(any(err)) {
warning("Could not parse constraints: ",paste(lines[err],collapse=", "))
lines <- lines[!err]
m <- m[!err]
}
m <- regmatches(lines,m)
from <- gsub("^\\s+|\\s+$","",lapply(m,"[[",2))
to <- gsub("^\\s+|\\s+$","",lapply(m,"[[",5))
line <- sapply(m,"[[",3)
head <- sapply(m,"[[",4)
## Extract edges
labels <- node.labels(edges)
from <- factor(from,levels=labels)
to <- factor(to,levels=labels)
group <- nchar(line)-1
type <- c("N","P","U","Z")
type <- type[match(head,c("*",">","<>",""))]
es <- mapply(function(from,to,type)
match(TRUE,edges$From==from & edges$To==to & edges$Type==type),
from,to,type)
if(any(is.na(es)))
warning("Encountered undefined edges: ",
paste(lines[is.na(es)],collapse=", "))
es
}
##' Write a DOT specification of the model.
##'
##' Write a DOT specification of the model in a form suitable for use with
##' \code{grViz} from \pkg{DiagrammeR}.
##'
##' @title Export to DOT
##' @param edges An edge list
##' @param name The name of the digraph
##' @param fontsize Fontsize for node labels.
##' @param node.style The node style.
##' @param node.shape The node shape.
##' @param node.color The node color.
##' @param edge.color The edge color.
##' @return Returns a string.
##' @export
grviz.digraph <- function(edges,name="web",
fontsize=10,node.style="filled",
node.shape="oval",node.color="DarkOrange",
edge.color="DarkGrey") {
make.edge <- function(edge) {
edge <- edge[order(match(edge$Type,c("P","N","U","Z"),4),
edge$From),]
from <- edge$From[1]
to <- edge$To[1]
fwd <- (edge$From==from & edge$To==to)
ln <- c("solid","dashed","dotted","bold")
hd <- c("normal","dot","diamond","none")
tl <- c("normal","dot","diamond","none")
paste(
" ",
from[1],"->",to," [",
"style=",ln[edge$Group[1]+1],
",",
"arrowtail=",
if(any(!fwd)) tl[match(edge$Type[!fwd],c("P","N","U","Z"))] else "none",
",",
"arrowhead=",
if(any(fwd)) hd[match(edge$Type[fwd],c("P","N","U","Z"),4)] else "none",
"]",
sep="")
}
## Node definitions
ntxt <- paste(" ",
levels(edges$From),
" [",
"style=",rep(node.style,length.out=nlevels(edges$From)),
",",
"shape=",rep(node.shape,length.out=nlevels(edges$From)),
",",
"color=",rep(node.color,length.out=nlevels(edges$From)),
"]",
sep="",collapse="\n")
## Edge definitions
etxt <- paste(" ",
"edge [dir=both,color=",edge.color,"]\n",
paste(sapply(split(edges,edges$Pair),make.edge),collapse="\n"),
sep="",collapse="")
## Graph definition
paste("digraph ",name," {\n",
" ",
"graph [fontsize=",fontsize,"]\n",
ntxt,
"\n\n",
etxt,
"\n}\n",
sep="")
}
| /R/text.R | no_license | SWotherspoon/QPress | R | false | false | 8,702 | r | ## Read a text file and translate to edge descriptions
##' Read and write text representations of models
##'
##' The functions \code{read.digraph} and \code{parse.digraph} read a model
##' description from a text file and a string respectively, while
##' \code{write.digraph} writes a text representation of the model to and file.
##'
##' These functions recognize the following text format. Each line corresponds
##' to an edge, and must consist of two node labels separated by an arrow. An
##' arrow consists of one of the character sequences "<","*","<>" or "" on the
##' left and ">","*","<>" or "" on the right, separated by a sequence of dashes
##' "-". The number of dashes used in the arrow defines the group number of the
##' edge.
##'
##' @title Text Representations of Models
##' @param file the name of the file to read or write
##' @param lines a string representation of the model
##' @param labels the sequence of labels to use for the nodes
##' @param edges an edge list.
##' @return The \code{write.digraph} function invisibly returns the
##' text that was written to the file.
##'
##' The functions \code{read.digraph} and \code{parse.digraph} return an
##' edge list - a data frame with columns
##'
##' \item{\code{From}}{a factor indicating the origin of each edge (the node
##' that effects)}
##' \item{\code{To}}{a factor indicating the destination of each edge (the node
##' that is effected)}
##' \item{\code{Group}}{an integer vector that indicates the group each edge
##' belongs to}
##' \item{\code{Type}}{a factor indicating the edge type -
##' "N" (negative) ,"P" (positive),"U" (unknown) or "Z" (zero)}
##' \item{\code{Pair}}{an integer vector that indicates the pairing of
##' directed edges}
##'
##' Each edge of the text specification is separated into two directed edges,
##' and every row of an edge list corresponds to a single directed edge.
##'
##' @examples
##' edges <- parse.digraph(c("A <-* B","C *-> A","C <- D",
##' "D -> B","B *--* C","A <--- D"))
##' edges
##' deparse.digraph(edges)
##' @export
read.digraph <- function(file,labels=NULL) {
parse.digraph(readLines(file),labels=labels)
}
##' @rdname read.digraph
##' @export
parse.digraph <- function(lines,labels=NULL) {
## Attempt to parse specifications
m <- regexec("^([^\\*<>-]+)(\\*|<|<>)?(-+)(\\*|>|<>)?([^\\*<>-]+)$",lines)
err <- sapply(m,"[[",1)== -1
if(any(err)) {
warning("Could not parse edges: ",paste(lines[err],collapse=", "))
lines <- lines[!err]
m <- m[!err]
}
m <- regmatches(lines,m)
from <- gsub("^\\s+|\\s+$","",lapply(m,"[[",2))
to <- gsub("^\\s+|\\s+$","",lapply(m,"[[",6))
tail <- sapply(m,"[[",3)
line <- sapply(m,"[[",4)
head <- sapply(m,"[[",5)
if(any(head=="" & tail==""))
warning("Zero edges specified: ",
paste(lines[head=="" & tail==""],collapse=", "))
## Construct edge dataframe
if(is.null(labels)) labels <- sort(unique(c(from,to)))
from <- factor(from,levels=labels)
to <- factor(to,levels=labels)
group <- nchar(line)-1
type <- c("N","P","U","Z")
backward.type <- type[match(tail,c("*","<","<>",""))]
forward.type <- type[match(head,c("*",">","<>",""))]
edges <- rbind(data.frame(From=from,
To=to,
Group=group,
Type=factor(forward.type,type),
Pair=seq_along(lines)),
data.frame(From=to,
To=from,
Group=group,
Type=factor(backward.type,type),
Pair=seq_along(lines)))
## Drop zero weight edges
edges <- edges[edges$Type!="Z",,drop=FALSE]
## Add node labels
attr(edges,"node.labels") <- labels
edges
}
##' @rdname read.digraph
##' @export
deparse.digraph <- function(edges) {
make.edge <- function(edge) {
edge <- edge[order(match(edge$Type,c("P","N","U","Z"),4),
edge$From),]
from <- edge$From[1]
to <- edge$To[1]
fwd <- (edge$From==from & edge$To==to)
line <- paste(rep("-",edge$Group[1]+1),collapse="")
symb <- c(">","*","<>","")
head <- symb[match(edge$Type[fwd],c("P","N","U","Z"),4)]
symb <- c("<","*","<>","")
tail <- symb[match(edge$Type[!fwd],c("P","N","U","Z"),4)]
paste(from," ",tail,line,head," ",to,sep="")
}
sapply(split(edges,interaction(edges$Pair,edges$Group,drop=TRUE)),make.edge)
}
##' @rdname read.digraph
##' @export
write.digraph <- function(edges,file="") {
txt <- deparse.digraph(edges)
cat(txt,sep="\n",file=file)
invisible(txt)
}
##' Parse a text representation of (directed) edges, return the index of the
##' directed edge within the edge list.
##'
##' Each directed edge is represented as a string consisting of two node labels
##' separated by an arrow, where the arrow consists of a sequence of dashes "-"
##' followed by one of the character sequences ">","*","<>". The number of
##' dashes used in the arrow is ignored.
##'
##' @title Indices of (Directed) Edges
##' @param lines a vector of strings representing directed edges
##' @param edges an edge list
##' @return the indices of the directed edges within the edge list
##' @examples
##' ## Sample model
##' edges <- parse.digraph(c(
##' "E *-> D",
##' "D *-> C",
##' "C -> E",
##' "E *-> B",
##' "B *-> A",
##' "A -> E",
##' "D --> B"))
##' edges <- enforce.limitation(edges)
##' parse.edge(c("E->D","D-*E","A-*B"),edges)
##' @export
parse.edge <- function(lines,edges) {
## Parse lines
m <- regexec("([^\\*<>-]+)(-+)(\\*|>)?([^\\*<>-]+)",lines)
err <- sapply(m,"[[",1)== -1
if(any(err)) {
warning("Could not parse constraints: ",paste(lines[err],collapse=", "))
lines <- lines[!err]
m <- m[!err]
}
m <- regmatches(lines,m)
from <- gsub("^\\s+|\\s+$","",lapply(m,"[[",2))
to <- gsub("^\\s+|\\s+$","",lapply(m,"[[",5))
line <- sapply(m,"[[",3)
head <- sapply(m,"[[",4)
## Extract edges
labels <- node.labels(edges)
from <- factor(from,levels=labels)
to <- factor(to,levels=labels)
group <- nchar(line)-1
type <- c("N","P","U","Z")
type <- type[match(head,c("*",">","<>",""))]
es <- mapply(function(from,to,type)
match(TRUE,edges$From==from & edges$To==to & edges$Type==type),
from,to,type)
if(any(is.na(es)))
warning("Encountered undefined edges: ",
paste(lines[is.na(es)],collapse=", "))
es
}
##' Write a DOT specification of the model.
##'
##' Write a DOT specification of the model in a form suitable for use with
##' \code{grViz} from \pkg{DiagrammeR}.
##'
##' @title Export to DOT
##' @param edges An edge list
##' @param name The name of the digraph
##' @param fontsize Fontsize for node labels.
##' @param node.style The node style.
##' @param node.shape The node shape.
##' @param node.color The node color.
##' @param edge.color The edge color.
##' @return Returns a string.
##' @export
grviz.digraph <- function(edges,name="web",
fontsize=10,node.style="filled",
node.shape="oval",node.color="DarkOrange",
edge.color="DarkGrey") {
make.edge <- function(edge) {
edge <- edge[order(match(edge$Type,c("P","N","U","Z"),4),
edge$From),]
from <- edge$From[1]
to <- edge$To[1]
fwd <- (edge$From==from & edge$To==to)
ln <- c("solid","dashed","dotted","bold")
hd <- c("normal","dot","diamond","none")
tl <- c("normal","dot","diamond","none")
paste(
" ",
from[1],"->",to," [",
"style=",ln[edge$Group[1]+1],
",",
"arrowtail=",
if(any(!fwd)) tl[match(edge$Type[!fwd],c("P","N","U","Z"))] else "none",
",",
"arrowhead=",
if(any(fwd)) hd[match(edge$Type[fwd],c("P","N","U","Z"),4)] else "none",
"]",
sep="")
}
## Node definitions
ntxt <- paste(" ",
levels(edges$From),
" [",
"style=",rep(node.style,length.out=nlevels(edges$From)),
",",
"shape=",rep(node.shape,length.out=nlevels(edges$From)),
",",
"color=",rep(node.color,length.out=nlevels(edges$From)),
"]",
sep="",collapse="\n")
## Edge definitions
etxt <- paste(" ",
"edge [dir=both,color=",edge.color,"]\n",
paste(sapply(split(edges,edges$Pair),make.edge),collapse="\n"),
sep="",collapse="")
## Graph definition
paste("digraph ",name," {\n",
" ",
"graph [fontsize=",fontsize,"]\n",
ntxt,
"\n\n",
etxt,
"\n}\n",
sep="")
}
|
\name{LGG-v2.0.1}
\alias{ LGG-v2.0.1 }
\docType{data}
\title{ Brain Lower Grade Glioma }
\description{
A document describing the TCGA cancer code
}
\details{
\preformatted{
> experiments( LGG )
ExperimentList class object of length 13:
[1] LGG_CNASeq-20160128: RaggedExperiment with 6360 rows and 104 columns
[2] LGG_CNASNP-20160128: RaggedExperiment with 411918 rows and 1015 columns
[3] LGG_CNVSNP-20160128: RaggedExperiment with 79791 rows and 1015 columns
[4] LGG_GISTIC_AllByGene-20160128: SummarizedExperiment with 24776 rows and 513 columns
[5] LGG_GISTIC_Peaks-20160128: RangedSummarizedExperiment with 46 rows and 513 columns
[6] LGG_GISTIC_ThresholdedByGene-20160128: SummarizedExperiment with 24776 rows and 513 columns
[7] LGG_miRNASeqGene-20160128: SummarizedExperiment with 1046 rows and 526 columns
[8] LGG_mRNAArray-20160128: SummarizedExperiment with 17814 rows and 27 columns
[9] LGG_Mutation-20160128: RaggedExperiment with 9885 rows and 286 columns
[10] LGG_RNASeq2Gene-20160128: SummarizedExperiment with 20501 rows and 530 columns
[11] LGG_RNASeq2GeneNorm-20160128: SummarizedExperiment with 20501 rows and 530 columns
[12] LGG_RPPAArray-20160128: SummarizedExperiment with 201 rows and 435 columns
[13] LGG_Methylation-20160128: SummarizedExperiment with 485577 rows and 530 columns
> rownames( LGG )
CharacterList of length 13
[["LGG_CNASeq-20160128"]] character(0)
[["LGG_CNASNP-20160128"]] character(0)
[["LGG_CNVSNP-20160128"]] character(0)
[["LGG_GISTIC_AllByGene-20160128"]] character(0)
[["LGG_GISTIC_Peaks-20160128"]] 21 22 1 2 23 3 24 25 ... 45 16 17 46 18 47 48
[["LGG_GISTIC_ThresholdedByGene-20160128"]] character(0)
[["LGG_miRNASeqGene-20160128"]] hsa-let-7a-1 hsa-let-7a-2 ... hsa-mir-99b
[["LGG_mRNAArray-20160128"]] ELMO2 CREB3L1 RPS11 PNMA1 ... SNRPD2 AQP7 CTSC
[["LGG_Mutation-20160128"]] character(0)
[["LGG_RNASeq2Gene-20160128"]] A1BG A1CF A2BP1 A2LD1 ... ZZZ3 psiTPTE22 tAKR
...
<3 more elements>
> colnames( LGG )
CharacterList of length 13
[["LGG_CNASeq-20160128"]] TCGA-CS-4938-01B-11D-1891-02 ...
[["LGG_CNASNP-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_CNVSNP-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_AllByGene-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_Peaks-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_ThresholdedByGene-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_miRNASeqGene-20160128"]] TCGA-CS-4938-01B-11R-1895-13 ...
[["LGG_mRNAArray-20160128"]] TCGA-CS-4942-01A-01R-1470-07 ...
[["LGG_Mutation-20160128"]] TCGA-CS-4938-01B-11D-1893-08 ...
[["LGG_RNASeq2Gene-20160128"]] TCGA-CS-4938-01B-11R-1896-07 ...
...
<3 more elements>
Sizes of each ExperimentList element:
assay size.Mb
1 LGG_CNASeq-20160128 0.2 Mb
2 LGG_CNASNP-20160128 11.3 Mb
3 LGG_CNVSNP-20160128 2.4 Mb
4 LGG_GISTIC_AllByGene-20160128 100.5 Mb
5 LGG_GISTIC_Peaks-20160128 0.3 Mb
6 LGG_GISTIC_ThresholdedByGene-20160128 100.4 Mb
7 LGG_miRNASeqGene-20160128 4.4 Mb
8 LGG_mRNAArray-20160128 5.9 Mb
9 LGG_Mutation-20160128 4.2 Mb
10 LGG_RNASeq2Gene-20160128 85.5 Mb
11 LGG_RNASeq2GeneNorm-20160128 85.5 Mb
12 LGG_RPPAArray-20160128 0.8 Mb
13 LGG_Methylation-20160128 75.1 Mb
---------------------------
Overall survival time-to-event summary (in years):
---------------------------
Call: survfit(formula = survival::Surv(colDat$days_to_death/365, colDat$vital_status) ~
-1)
391 observations deleted due to missingness
n events median 0.95LCL 0.95UCL
125.00 125.00 2.23 1.87 2.83
---------------------------
Available sample meta-data:
---------------------------
years_to_birth:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
14.00 32.00 41.00 42.93 53.00 86.00 2
vital_status:
0 1 NA's
389 126 1
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
7 438 814 1219 1547 5166 391
days_to_last_followup:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
-1.0 384.0 629.0 880.1 1147.0 6423.0 127
tumor_tissue_site:
central nervous system NA's
515 1
gender:
female male NA's
230 285 1
date_of_initial_pathologic_diagnosis:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
1992 2008 2011 2009 2012 2013 1
radiation_therapy:
no yes NA's
186 296 34
karnofsky_performance_score:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
40.00 80.00 90.00 86.64 100.00 100.00 209
histological_type:
astrocytoma oligoastrocytoma oligodendroglioma NA's
194 130 191 1
race:
american indian or alaska native asian
1 8
black or african american white
21 475
NA's
11
ethnicity:
hispanic or latino not hispanic or latino NA's
32 449 35
Including an additional 1764 columns
}}
\keyword{datasets}
| /man/LGG-v2.0.1.Rd | no_license | waldronlab/curatedTCGAData | R | false | false | 5,511 | rd | \name{LGG-v2.0.1}
\alias{ LGG-v2.0.1 }
\docType{data}
\title{ Brain Lower Grade Glioma }
\description{
A document describing the TCGA cancer code
}
\details{
\preformatted{
> experiments( LGG )
ExperimentList class object of length 13:
[1] LGG_CNASeq-20160128: RaggedExperiment with 6360 rows and 104 columns
[2] LGG_CNASNP-20160128: RaggedExperiment with 411918 rows and 1015 columns
[3] LGG_CNVSNP-20160128: RaggedExperiment with 79791 rows and 1015 columns
[4] LGG_GISTIC_AllByGene-20160128: SummarizedExperiment with 24776 rows and 513 columns
[5] LGG_GISTIC_Peaks-20160128: RangedSummarizedExperiment with 46 rows and 513 columns
[6] LGG_GISTIC_ThresholdedByGene-20160128: SummarizedExperiment with 24776 rows and 513 columns
[7] LGG_miRNASeqGene-20160128: SummarizedExperiment with 1046 rows and 526 columns
[8] LGG_mRNAArray-20160128: SummarizedExperiment with 17814 rows and 27 columns
[9] LGG_Mutation-20160128: RaggedExperiment with 9885 rows and 286 columns
[10] LGG_RNASeq2Gene-20160128: SummarizedExperiment with 20501 rows and 530 columns
[11] LGG_RNASeq2GeneNorm-20160128: SummarizedExperiment with 20501 rows and 530 columns
[12] LGG_RPPAArray-20160128: SummarizedExperiment with 201 rows and 435 columns
[13] LGG_Methylation-20160128: SummarizedExperiment with 485577 rows and 530 columns
> rownames( LGG )
CharacterList of length 13
[["LGG_CNASeq-20160128"]] character(0)
[["LGG_CNASNP-20160128"]] character(0)
[["LGG_CNVSNP-20160128"]] character(0)
[["LGG_GISTIC_AllByGene-20160128"]] character(0)
[["LGG_GISTIC_Peaks-20160128"]] 21 22 1 2 23 3 24 25 ... 45 16 17 46 18 47 48
[["LGG_GISTIC_ThresholdedByGene-20160128"]] character(0)
[["LGG_miRNASeqGene-20160128"]] hsa-let-7a-1 hsa-let-7a-2 ... hsa-mir-99b
[["LGG_mRNAArray-20160128"]] ELMO2 CREB3L1 RPS11 PNMA1 ... SNRPD2 AQP7 CTSC
[["LGG_Mutation-20160128"]] character(0)
[["LGG_RNASeq2Gene-20160128"]] A1BG A1CF A2BP1 A2LD1 ... ZZZ3 psiTPTE22 tAKR
...
<3 more elements>
> colnames( LGG )
CharacterList of length 13
[["LGG_CNASeq-20160128"]] TCGA-CS-4938-01B-11D-1891-02 ...
[["LGG_CNASNP-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_CNVSNP-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_AllByGene-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_Peaks-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_GISTIC_ThresholdedByGene-20160128"]] TCGA-CS-4938-01B-11D-1892-01 ...
[["LGG_miRNASeqGene-20160128"]] TCGA-CS-4938-01B-11R-1895-13 ...
[["LGG_mRNAArray-20160128"]] TCGA-CS-4942-01A-01R-1470-07 ...
[["LGG_Mutation-20160128"]] TCGA-CS-4938-01B-11D-1893-08 ...
[["LGG_RNASeq2Gene-20160128"]] TCGA-CS-4938-01B-11R-1896-07 ...
...
<3 more elements>
Sizes of each ExperimentList element:
assay size.Mb
1 LGG_CNASeq-20160128 0.2 Mb
2 LGG_CNASNP-20160128 11.3 Mb
3 LGG_CNVSNP-20160128 2.4 Mb
4 LGG_GISTIC_AllByGene-20160128 100.5 Mb
5 LGG_GISTIC_Peaks-20160128 0.3 Mb
6 LGG_GISTIC_ThresholdedByGene-20160128 100.4 Mb
7 LGG_miRNASeqGene-20160128 4.4 Mb
8 LGG_mRNAArray-20160128 5.9 Mb
9 LGG_Mutation-20160128 4.2 Mb
10 LGG_RNASeq2Gene-20160128 85.5 Mb
11 LGG_RNASeq2GeneNorm-20160128 85.5 Mb
12 LGG_RPPAArray-20160128 0.8 Mb
13 LGG_Methylation-20160128 75.1 Mb
---------------------------
Overall survival time-to-event summary (in years):
---------------------------
Call: survfit(formula = survival::Surv(colDat$days_to_death/365, colDat$vital_status) ~
-1)
391 observations deleted due to missingness
n events median 0.95LCL 0.95UCL
125.00 125.00 2.23 1.87 2.83
---------------------------
Available sample meta-data:
---------------------------
years_to_birth:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
14.00 32.00 41.00 42.93 53.00 86.00 2
vital_status:
0 1 NA's
389 126 1
days_to_death:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
7 438 814 1219 1547 5166 391
days_to_last_followup:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
-1.0 384.0 629.0 880.1 1147.0 6423.0 127
tumor_tissue_site:
central nervous system NA's
515 1
gender:
female male NA's
230 285 1
date_of_initial_pathologic_diagnosis:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
1992 2008 2011 2009 2012 2013 1
radiation_therapy:
no yes NA's
186 296 34
karnofsky_performance_score:
Min. 1st Qu. Median Mean 3rd Qu. Max. NA's
40.00 80.00 90.00 86.64 100.00 100.00 209
histological_type:
astrocytoma oligoastrocytoma oligodendroglioma NA's
194 130 191 1
race:
american indian or alaska native asian
1 8
black or african american white
21 475
NA's
11
ethnicity:
hispanic or latino not hispanic or latino NA's
32 449 35
Including an additional 1764 columns
}}
\keyword{datasets}
|
library(readr)
install.packages("httr")
library(httr)
install.packages("rjstat")
library(rjstat)
library(tidyverse)
library(dplyr)
set1json <- "http://data.ssb.no/api/v0/dataset/95274.json?lang=no"
dftemp <- GET(set1json)
dfjson <- fromJSONstat(content(dftemp, "text"))
df1 <- dfjson[[1]]
df1 <- separate(df1, måned, c("år", "måned"), sep = "M")
set2json <- "http://data.ssb.no/api/v0/dataset/95276.json?lang=no"
dftemp2 <- GET(set2json)
dfjson2 <- fromJSONstat(content(dftemp2, "text"))
df2 <- dfjson2 [[1]]
#Her separer vi kolonen måned, så vi får 2 nye, år og måned.
df2 <- separate(df2, måned, c("år", "måned"), sep = "M")
# Her merger vi datasettene
dfmerged <- left_join(df1, df2, by = c("år", "måned", "statistikkvariabel"))
# Her filtrer vi får å få vekk 0 verdier.
dfmerged <- dfmerged %>%
filter(!(value.x <= 0))
#her filtrer vi får å få et dataset bare med rompris, får så å regne gjennomsnittet.
rompris <- dfmerged %>%
filter(statistikkvariabel == "Pris per rom (kr)")
mean(rompris$value.x)
mean(rompris$value.y)
#Her grupperer vi med fylke, for å finne gjennomsnittlig rompris per fylke
gjennomsnitt_x <- rompris %>%
group_by(region.x) %>%
summarise(gjennomsnitt_verdi = mean(value.x))
gjennomsnitt_x > mean(rompris$value.y)
# Her lager vi et nytt dataframe for å sammenligne gjennomsnittet per fylke, med landets gjennomsnitt.
større_en_gjennomsnitt <- gjennomsnitt_x$gjennomsnitt_verdi >= mean(rompris$value.y)
cbind(gjennomsnitt_x, større_en_gjennomsnitt)
kappasitet_seng <- dfmerged %>%
filter(statistikkvariabel == "Kapasitetsutnytting av senger (prosent)") %>%
filter( år >= 1992)
kappasitet_rom <- dfmerged %>%
filter(statistikkvariabel == "Kapasitetsutnytting av rom (prosent)") %>%
filter( år >= 1992)
cor.test(kappasitet_seng$value.x, rompris$value.x)
cor.test(kappasitet_rom$value.x, rompris$value.x)
# svake korrelasjoner på begge, men litt sterkere på kappasitet rom | /Obligatorisk_innlevering_3.R | no_license | Johnrejor/lecture_3.0 | R | false | false | 2,029 | r | library(readr)
install.packages("httr")
library(httr)
install.packages("rjstat")
library(rjstat)
library(tidyverse)
library(dplyr)
set1json <- "http://data.ssb.no/api/v0/dataset/95274.json?lang=no"
dftemp <- GET(set1json)
dfjson <- fromJSONstat(content(dftemp, "text"))
df1 <- dfjson[[1]]
df1 <- separate(df1, måned, c("år", "måned"), sep = "M")
set2json <- "http://data.ssb.no/api/v0/dataset/95276.json?lang=no"
dftemp2 <- GET(set2json)
dfjson2 <- fromJSONstat(content(dftemp2, "text"))
df2 <- dfjson2 [[1]]
#Her separer vi kolonen måned, så vi får 2 nye, år og måned.
df2 <- separate(df2, måned, c("år", "måned"), sep = "M")
# Her merger vi datasettene
dfmerged <- left_join(df1, df2, by = c("år", "måned", "statistikkvariabel"))
# Her filtrer vi får å få vekk 0 verdier.
dfmerged <- dfmerged %>%
filter(!(value.x <= 0))
#her filtrer vi får å få et dataset bare med rompris, får så å regne gjennomsnittet.
rompris <- dfmerged %>%
filter(statistikkvariabel == "Pris per rom (kr)")
mean(rompris$value.x)
mean(rompris$value.y)
#Her grupperer vi med fylke, for å finne gjennomsnittlig rompris per fylke
gjennomsnitt_x <- rompris %>%
group_by(region.x) %>%
summarise(gjennomsnitt_verdi = mean(value.x))
gjennomsnitt_x > mean(rompris$value.y)
# Her lager vi et nytt dataframe for å sammenligne gjennomsnittet per fylke, med landets gjennomsnitt.
større_en_gjennomsnitt <- gjennomsnitt_x$gjennomsnitt_verdi >= mean(rompris$value.y)
cbind(gjennomsnitt_x, større_en_gjennomsnitt)
kappasitet_seng <- dfmerged %>%
filter(statistikkvariabel == "Kapasitetsutnytting av senger (prosent)") %>%
filter( år >= 1992)
kappasitet_rom <- dfmerged %>%
filter(statistikkvariabel == "Kapasitetsutnytting av rom (prosent)") %>%
filter( år >= 1992)
cor.test(kappasitet_seng$value.x, rompris$value.x)
cor.test(kappasitet_rom$value.x, rompris$value.x)
# svake korrelasjoner på begge, men litt sterkere på kappasitet rom |
testlist <- list(type = 13565952L, z = 1.26480805335359e-321)
result <- do.call(esreg::G1_fun,testlist)
str(result) | /esreg/inst/testfiles/G1_fun/libFuzzer_G1_fun/G1_fun_valgrind_files/1609894392-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 115 | r | testlist <- list(type = 13565952L, z = 1.26480805335359e-321)
result <- do.call(esreg::G1_fun,testlist)
str(result) |
library(SummarizedExperiment)
library(BuenColors)
library(dplyr)
library(Matrix)
library(BuenColors)
library(data.table)
library(ggbeeswarm)
library(dplyr)
library(matrixStats)
library(ComplexHeatmap)
library(circlize)
library(stringr)
# Import signac procssed stuff
d <- readRDS("../output/21March2020_signac_process.rds")
d$barcode <- rownames(d)
# Compute baseline features per bin
base_pbmcs <- readRDS("../../cnv_compute/output/atac_public_pbmcs_cnv.rds"); base_pbmcs[is.na(base_pbmcs)] <- 0
cpm_norm <- (t(t(base_pbmcs)/colSums(base_pbmcs)) * 100000)
row_means <- rowMeans(cpm_norm)
row_std <- sqrt(rowVars(cpm_norm))
# Subset observed new matrix to valid barcodes
mat <- readRDS("../../cnv_compute/output/CRC_tumor_cnv.rds"); mat[is.na(mat)] <- 0
bcs <- fread("../output/CRC_filtered_barcodes.tsv", header = FALSE)[[1]]
mat <- mat[,bcs]
# Functions to make Z score and log2 change w.r.t. baseline from 10x
makeZscoreMat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- (mat_cpm_norm - row_means)/row_std
}
makeLog2Mat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- log2(mat_cpm_norm/(row_medians + 1))
zscore_mat
}
# Compute Zscores
zscore <- makeZscoreMat(mat)
score_base <- makeZscoreMat(base_pbmcs)
region_meta <- str_split_fixed(rownames(zscore), "_", 4)
# Cap for visualiation
zscore[zscore > 3] <- 3
zscore[zscore < -3] <- -3
keep <- TRUE
ordering <- factor(region_meta[keep,1], levels = unique(region_meta[keep,1]))
colVec <- c("dodgerblue3", "purple4")
names(colVec) <- as.character(c(0,1))
d_ss <- d %>% filter(seurat_clusters %in% c(0,1)) %>% arrange(seurat_clusters)
ha1 <- HeatmapAnnotation(df = data.frame(clusters = as.character(d_ss$seurat_clusters)),
col = list(clusters = colVec)
)
pdf(paste0("../plots/CRC_tumor_CNV.pdf"), width=3, height=5)
Heatmap(zscore[keep,as.character(d_ss$barcode)],
col=as.character(jdb_palette("solar_basic",type="continuous")),
show_row_names = FALSE,
cluster_columns = FALSE,
name = "CNV zscore",
row_names_gp = gpar(fontsize = 3),
cluster_rows = FALSE,
split = ordering,
top_annotation = ha1,
show_column_names = FALSE)
dev.off()
ggplot(d, aes(y = pct_reads_in_DNase, x =log10_mtDNA_depth, color = seurat_clusters)) +
geom_point() + scale_color_manual(values = c("dodgerblue3", "purple4", "forestgreen", "orange3", "firebrick", "orange", "green2"))
ggplot(d, aes(x = pct_reads_in_DNase, y = chr12, color = seurat_clusters)) + geom_point() +
scale_color_manual(values = c("dodgerblue3", "purple4", "forestgreen", "orange3", "firebrick", "orange", "green2"))
| /figure_CRC/code/02_cnv_analysis.R | no_license | ChenPeizhan/mtscATACpaper_reproducibility | R | false | false | 2,714 | r | library(SummarizedExperiment)
library(BuenColors)
library(dplyr)
library(Matrix)
library(BuenColors)
library(data.table)
library(ggbeeswarm)
library(dplyr)
library(matrixStats)
library(ComplexHeatmap)
library(circlize)
library(stringr)
# Import signac procssed stuff
d <- readRDS("../output/21March2020_signac_process.rds")
d$barcode <- rownames(d)
# Compute baseline features per bin
base_pbmcs <- readRDS("../../cnv_compute/output/atac_public_pbmcs_cnv.rds"); base_pbmcs[is.na(base_pbmcs)] <- 0
cpm_norm <- (t(t(base_pbmcs)/colSums(base_pbmcs)) * 100000)
row_means <- rowMeans(cpm_norm)
row_std <- sqrt(rowVars(cpm_norm))
# Subset observed new matrix to valid barcodes
mat <- readRDS("../../cnv_compute/output/CRC_tumor_cnv.rds"); mat[is.na(mat)] <- 0
bcs <- fread("../output/CRC_filtered_barcodes.tsv", header = FALSE)[[1]]
mat <- mat[,bcs]
# Functions to make Z score and log2 change w.r.t. baseline from 10x
makeZscoreMat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- (mat_cpm_norm - row_means)/row_std
}
makeLog2Mat <- function(mat){
mat_cpm_norm <- (t(t(mat)/colSums(mat)) * 100000)
zscore_mat <- log2(mat_cpm_norm/(row_medians + 1))
zscore_mat
}
# Compute Zscores
zscore <- makeZscoreMat(mat)
score_base <- makeZscoreMat(base_pbmcs)
region_meta <- str_split_fixed(rownames(zscore), "_", 4)
# Cap for visualiation
zscore[zscore > 3] <- 3
zscore[zscore < -3] <- -3
keep <- TRUE
ordering <- factor(region_meta[keep,1], levels = unique(region_meta[keep,1]))
colVec <- c("dodgerblue3", "purple4")
names(colVec) <- as.character(c(0,1))
d_ss <- d %>% filter(seurat_clusters %in% c(0,1)) %>% arrange(seurat_clusters)
ha1 <- HeatmapAnnotation(df = data.frame(clusters = as.character(d_ss$seurat_clusters)),
col = list(clusters = colVec)
)
pdf(paste0("../plots/CRC_tumor_CNV.pdf"), width=3, height=5)
Heatmap(zscore[keep,as.character(d_ss$barcode)],
col=as.character(jdb_palette("solar_basic",type="continuous")),
show_row_names = FALSE,
cluster_columns = FALSE,
name = "CNV zscore",
row_names_gp = gpar(fontsize = 3),
cluster_rows = FALSE,
split = ordering,
top_annotation = ha1,
show_column_names = FALSE)
dev.off()
ggplot(d, aes(y = pct_reads_in_DNase, x =log10_mtDNA_depth, color = seurat_clusters)) +
geom_point() + scale_color_manual(values = c("dodgerblue3", "purple4", "forestgreen", "orange3", "firebrick", "orange", "green2"))
ggplot(d, aes(x = pct_reads_in_DNase, y = chr12, color = seurat_clusters)) + geom_point() +
scale_color_manual(values = c("dodgerblue3", "purple4", "forestgreen", "orange3", "firebrick", "orange", "green2"))
|
require(dplyr)
## Get the training and test data read into R
test <- read.table("test/X_test.txt", header = FALSE)
train <- read.table("train/X_train.txt", header = FALSE)
ytrain <- read.table("train/y_train.txt", header = FALSE,
stringsAsFactors = FALSE)
ytest <- read.table("test/y_test.txt", header = FALSE,
stringsAsFactors = FALSE)
subtrain <- read.table("train/subject_train.txt", header = FALSE,
stringsAsFactors = FALSE)
subtest <- read.table("test/subject_test.txt", header = FALSE,
stringsAsFactors = FALSE)
## Now we have both dfs, let's add the rows from 'test' to the
## bottom of the 'train' df
final <- rbind(train, test)
final_y <- rbind(ytrain, ytest)
final_sub <- rbind(subtrain, subtest)
## Now we have to get only the mean and std for each measurement.
## To do this, we need to check the features and get the columns
## for which we have either mean() or std() in the name
features <- read.table("features.txt", header = FALSE,
stringsAsFactors = FALSE)
selectcols <- grep("mean\\(\\)|std\\(\\)", features$V2) # Regex the means and stds
## Now cut the df cols out that we want, according to selectcols
final <- final[,selectcols]
## Let's get the activity names and apply them to the rows
activitylabels <- read.table("activity_labels.txt", header = FALSE,
stringsAsFactors = FALSE)
## This one is tricky, we can go through the whole final_y list, and get the
## correct activity name by converting the activity number into a row
## in the activitylables df
activitylist <- sapply(final_y, function(x){activitylabels[x, 2]})
final <- cbind(final, activitylist) # Add a column in the df for activity
final <- cbind(final, final_sub) # Add another column for subject
## Let's pull the names from the features list and apply them to the columns
colnamesfeatures <- features$V2[selectcols]
colnames(final) <- c(colnamesfeatures, "Activity", "Subject")
## Now we are in a position to make a new data set, and summarize it as
## required by question 5.
final_summary <- as_tibble(final)
final_summary <- group_by(final_summary, Activity, Subject)
final_summary <- summarize_all(final_summary, mean)
write.table(final_summary, "final_summary.txt", row.names = FALSE)
## And we are done!
| /run_analysis.R | no_license | adamazoulay/GettingAndCleaningData | R | false | false | 2,426 | r | require(dplyr)
## Get the training and test data read into R
test <- read.table("test/X_test.txt", header = FALSE)
train <- read.table("train/X_train.txt", header = FALSE)
ytrain <- read.table("train/y_train.txt", header = FALSE,
stringsAsFactors = FALSE)
ytest <- read.table("test/y_test.txt", header = FALSE,
stringsAsFactors = FALSE)
subtrain <- read.table("train/subject_train.txt", header = FALSE,
stringsAsFactors = FALSE)
subtest <- read.table("test/subject_test.txt", header = FALSE,
stringsAsFactors = FALSE)
## Now we have both dfs, let's add the rows from 'test' to the
## bottom of the 'train' df
final <- rbind(train, test)
final_y <- rbind(ytrain, ytest)
final_sub <- rbind(subtrain, subtest)
## Now we have to get only the mean and std for each measurement.
## To do this, we need to check the features and get the columns
## for which we have either mean() or std() in the name
features <- read.table("features.txt", header = FALSE,
stringsAsFactors = FALSE)
selectcols <- grep("mean\\(\\)|std\\(\\)", features$V2) # Regex the means and stds
## Now cut the df cols out that we want, according to selectcols
final <- final[,selectcols]
## Let's get the activity names and apply them to the rows
activitylabels <- read.table("activity_labels.txt", header = FALSE,
stringsAsFactors = FALSE)
## This one is tricky, we can go through the whole final_y list, and get the
## correct activity name by converting the activity number into a row
## in the activitylables df
activitylist <- sapply(final_y, function(x){activitylabels[x, 2]})
final <- cbind(final, activitylist) # Add a column in the df for activity
final <- cbind(final, final_sub) # Add another column for subject
## Let's pull the names from the features list and apply them to the columns
colnamesfeatures <- features$V2[selectcols]
colnames(final) <- c(colnamesfeatures, "Activity", "Subject")
## Now we are in a position to make a new data set, and summarize it as
## required by question 5.
final_summary <- as_tibble(final)
final_summary <- group_by(final_summary, Activity, Subject)
final_summary <- summarize_all(final_summary, mean)
write.table(final_summary, "final_summary.txt", row.names = FALSE)
## And we are done!
|
# learner with error "foo" in predict
makeRLearner.classif.__mlrmocklearners__1 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__1", package = character(0L), par.set = makeParamSet(),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__1 = function(.learner, .task, .subset, .weights = NULL, ...) list()
predictLearner.classif.__mlrmocklearners__1 = function(.learner, .model, .newdata, ...) stop("foo")
registerS3method("makeRLearner", "classif.__mlrmocklearners__1", makeRLearner.classif.__mlrmocklearners__1)
registerS3method("trainLearner", "classif.__mlrmocklearners__1", trainLearner.classif.__mlrmocklearners__1)
registerS3method("predictLearner", "classif.__mlrmocklearners__1", predictLearner.classif.__mlrmocklearners__1)
# for tuning, produces errors en masse
makeRLearner.classif.__mlrmocklearners__2 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__2", package = character(0L),
par.set = makeParamSet(
makeNumericLearnerParam("alpha", lower = 0, upper = 1)
),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__2 = function(.learner, .task, .subset, .weights = NULL, alpha, ...) {
if (alpha < 0.5)
stop("foo")
list()
}
predictLearner.classif.__mlrmocklearners__2 = function(.learner, .model, .newdata, ...) {
as.factor(sample(.model$task.desc$class.levels, nrow(.newdata), replace = TRUE))
}
registerS3method("makeRLearner", "classif.__mlrmocklearners__2", makeRLearner.classif.__mlrmocklearners__2)
registerS3method("trainLearner", "classif.__mlrmocklearners__2", trainLearner.classif.__mlrmocklearners__2)
registerS3method("predictLearner", "classif.__mlrmocklearners__2", predictLearner.classif.__mlrmocklearners__2)
# learner with error "foo" in train
makeRLearner.classif.__mlrmocklearners__3 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__3", package = character(0L), par.set = makeParamSet(),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__3 = function(.learner, .task, .subset, .weights = NULL, ...) stop("foo")
predictLearner.classif.__mlrmocklearners__3 = function(.learner, .model, .newdata, ...) 1L
registerS3method("makeRLearner", "classif.__mlrmocklearners__3", makeRLearner.classif.__mlrmocklearners__3)
registerS3method("trainLearner", "classif.__mlrmocklearners__3", trainLearner.classif.__mlrmocklearners__3)
registerS3method("predictLearner", "classif.__mlrmocklearners__3", predictLearner.classif.__mlrmocklearners__3)
# learner with different "when" settings for hyperpars
makeRLearner.regr.__mlrmocklearners__4 = function() {
makeRLearnerRegr(
cl = "regr.__mlrmocklearners__4", package = character(0L),
par.set = makeParamSet(
makeNumericLearnerParam("p1", when = "train"),
makeNumericLearnerParam("p2", when = "predict"),
makeNumericLearnerParam("p3", when = "both")
),
properties = c("missings", "numerics", "factors")
)
}
trainLearner.regr.__mlrmocklearners__4 = function(.learner, .task, .subset, .weights = NULL, p1, p3, ...) {
list(foo = p1 + p3)
}
predictLearner.regr.__mlrmocklearners__4 = function(.learner, .model, .newdata, p2, p3) {
y = rep(1, nrow(.newdata))
y * .model$learner.model$foo + p2 + p3
}
registerS3method("makeRLearner", "regr.__mlrmocklearners__4", makeRLearner.regr.__mlrmocklearners__4)
registerS3method("trainLearner", "regr.__mlrmocklearners__4", trainLearner.regr.__mlrmocklearners__4)
registerS3method("predictLearner", "regr.__mlrmocklearners__4", predictLearner.regr.__mlrmocklearners__4)
# Learner cannot use expression in param requires
makeRLearner.classif.__mlrmocklearners__5 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__5",
package = "mlr",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "a", values = c("x", "y")),
makeNumericLearnerParam(id = "b", lower = 0.0, upper = 1.0, requires = expression(a == "x"))
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__5 = function(.learner, .task, .subset, .weights = NULL, ...) { }
predictLearner.classif.__mlrmocklearners__5 = function(.learner, .model, .newdata) {
rep(factor(.model$factor.levels[[.model$task.desc$target]][1]), nrow(.newdata))
}
registerS3method("makeRLearner", "classif.__mlrmocklearners__5", makeRLearner.classif.__mlrmocklearners__5)
registerS3method("trainLearner", "classif.__mlrmocklearners__5", trainLearner.classif.__mlrmocklearners__5)
registerS3method("predictLearner", "classif.__mlrmocklearners__5", predictLearner.classif.__mlrmocklearners__5)
# stores weights internally so we can see wether they are correctly passed down
makeRLearner.regr.__mlrmocklearners__6 = function() {
makeRLearnerRegr(
cl = "regr.__mlrmocklearners__6", package = character(0L),
par.set = makeParamSet(),
properties = c("missings", "numerics", "factors", "weights")
)
}
trainLearner.regr.__mlrmocklearners__6 = function(.learner, .task, .subset, .weights = NULL, ...) {
list(weights = .weights)
}
predictLearner.regr.__mlrmocklearners__6 = function(.learner, .model, .newdata ) {
rep(1, nrow(.newdata))
}
registerS3method("makeRLearner", "regr.__mlrmocklearners__6", makeRLearner.regr.__mlrmocklearners__6)
registerS3method("trainLearner", "regr.__mlrmocklearners__6", trainLearner.regr.__mlrmocklearners__6)
registerS3method("predictLearner", "regr.__mlrmocklearners__6", predictLearner.regr.__mlrmocklearners__6)
| /tests/testthat/helper_mock_learners.R | no_license | HeidiSeibold/mlr | R | false | false | 5,718 | r | # learner with error "foo" in predict
makeRLearner.classif.__mlrmocklearners__1 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__1", package = character(0L), par.set = makeParamSet(),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__1 = function(.learner, .task, .subset, .weights = NULL, ...) list()
predictLearner.classif.__mlrmocklearners__1 = function(.learner, .model, .newdata, ...) stop("foo")
registerS3method("makeRLearner", "classif.__mlrmocklearners__1", makeRLearner.classif.__mlrmocklearners__1)
registerS3method("trainLearner", "classif.__mlrmocklearners__1", trainLearner.classif.__mlrmocklearners__1)
registerS3method("predictLearner", "classif.__mlrmocklearners__1", predictLearner.classif.__mlrmocklearners__1)
# for tuning, produces errors en masse
makeRLearner.classif.__mlrmocklearners__2 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__2", package = character(0L),
par.set = makeParamSet(
makeNumericLearnerParam("alpha", lower = 0, upper = 1)
),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__2 = function(.learner, .task, .subset, .weights = NULL, alpha, ...) {
if (alpha < 0.5)
stop("foo")
list()
}
predictLearner.classif.__mlrmocklearners__2 = function(.learner, .model, .newdata, ...) {
as.factor(sample(.model$task.desc$class.levels, nrow(.newdata), replace = TRUE))
}
registerS3method("makeRLearner", "classif.__mlrmocklearners__2", makeRLearner.classif.__mlrmocklearners__2)
registerS3method("trainLearner", "classif.__mlrmocklearners__2", trainLearner.classif.__mlrmocklearners__2)
registerS3method("predictLearner", "classif.__mlrmocklearners__2", predictLearner.classif.__mlrmocklearners__2)
# learner with error "foo" in train
makeRLearner.classif.__mlrmocklearners__3 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__3", package = character(0L), par.set = makeParamSet(),
properties = c("twoclass", "multiclass", "missings", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__3 = function(.learner, .task, .subset, .weights = NULL, ...) stop("foo")
predictLearner.classif.__mlrmocklearners__3 = function(.learner, .model, .newdata, ...) 1L
registerS3method("makeRLearner", "classif.__mlrmocklearners__3", makeRLearner.classif.__mlrmocklearners__3)
registerS3method("trainLearner", "classif.__mlrmocklearners__3", trainLearner.classif.__mlrmocklearners__3)
registerS3method("predictLearner", "classif.__mlrmocklearners__3", predictLearner.classif.__mlrmocklearners__3)
# learner with different "when" settings for hyperpars
makeRLearner.regr.__mlrmocklearners__4 = function() {
makeRLearnerRegr(
cl = "regr.__mlrmocklearners__4", package = character(0L),
par.set = makeParamSet(
makeNumericLearnerParam("p1", when = "train"),
makeNumericLearnerParam("p2", when = "predict"),
makeNumericLearnerParam("p3", when = "both")
),
properties = c("missings", "numerics", "factors")
)
}
trainLearner.regr.__mlrmocklearners__4 = function(.learner, .task, .subset, .weights = NULL, p1, p3, ...) {
list(foo = p1 + p3)
}
predictLearner.regr.__mlrmocklearners__4 = function(.learner, .model, .newdata, p2, p3) {
y = rep(1, nrow(.newdata))
y * .model$learner.model$foo + p2 + p3
}
registerS3method("makeRLearner", "regr.__mlrmocklearners__4", makeRLearner.regr.__mlrmocklearners__4)
registerS3method("trainLearner", "regr.__mlrmocklearners__4", trainLearner.regr.__mlrmocklearners__4)
registerS3method("predictLearner", "regr.__mlrmocklearners__4", predictLearner.regr.__mlrmocklearners__4)
# Learner cannot use expression in param requires
makeRLearner.classif.__mlrmocklearners__5 = function() {
makeRLearnerClassif(
cl = "classif.__mlrmocklearners__5",
package = "mlr",
par.set = makeParamSet(
makeDiscreteLearnerParam(id = "a", values = c("x", "y")),
makeNumericLearnerParam(id = "b", lower = 0.0, upper = 1.0, requires = expression(a == "x"))
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob")
)
}
trainLearner.classif.__mlrmocklearners__5 = function(.learner, .task, .subset, .weights = NULL, ...) { }
predictLearner.classif.__mlrmocklearners__5 = function(.learner, .model, .newdata) {
rep(factor(.model$factor.levels[[.model$task.desc$target]][1]), nrow(.newdata))
}
registerS3method("makeRLearner", "classif.__mlrmocklearners__5", makeRLearner.classif.__mlrmocklearners__5)
registerS3method("trainLearner", "classif.__mlrmocklearners__5", trainLearner.classif.__mlrmocklearners__5)
registerS3method("predictLearner", "classif.__mlrmocklearners__5", predictLearner.classif.__mlrmocklearners__5)
# stores weights internally so we can see wether they are correctly passed down
makeRLearner.regr.__mlrmocklearners__6 = function() {
makeRLearnerRegr(
cl = "regr.__mlrmocklearners__6", package = character(0L),
par.set = makeParamSet(),
properties = c("missings", "numerics", "factors", "weights")
)
}
trainLearner.regr.__mlrmocklearners__6 = function(.learner, .task, .subset, .weights = NULL, ...) {
list(weights = .weights)
}
predictLearner.regr.__mlrmocklearners__6 = function(.learner, .model, .newdata ) {
rep(1, nrow(.newdata))
}
registerS3method("makeRLearner", "regr.__mlrmocklearners__6", makeRLearner.regr.__mlrmocklearners__6)
registerS3method("trainLearner", "regr.__mlrmocklearners__6", trainLearner.regr.__mlrmocklearners__6)
registerS3method("predictLearner", "regr.__mlrmocklearners__6", predictLearner.regr.__mlrmocklearners__6)
|
## OPTION1 (NOT USED) Reading the entire table and selecting the two days of interest
## Data <- read.table("household_power_consumption.txt", header=TRUE, sep =";")
## Data <- subset(Data, Date=="1/2/2007" | Date=="2/2/2007")
# OPTION2: Alternative way of reading only relevant dates
library(sqldf)
Data2 <- read.csv.sql("household_power_consumption.txt", header=TRUE, sep=";",
sql="select * from file where Date ='1/2/2007' OR Date ='2/2/2007' ")
# creating a new variable with date and time
datetime <- paste(Data2$Date, Data2$Time, sep=" ")
# converting character vector into time format
Data2$datetime <- strptime(datetime, "%e/%m/%Y %H:%M:%S")
# Date in English
Sys.setlocale("LC_TIME", "English")
# creating the line plot
with(Data2, plot(datetime, Sub_metering_1, type="l", xlab=" ", ylab="Energy sub metering"))
with(Data2, lines(datetime, Sub_metering_2, col="red"))
with(Data2, lines(datetime, Sub_metering_3, col="blue"))
# The legend
legend("topright", fill, col=c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=1)
## generating a PNG plot called "Plot3.png"
dev.copy(png, file = "plot3.png")
dev.off() | /plot3.R | no_license | ADOL1961/ExData_Plotting1 | R | false | false | 1,222 | r | ## OPTION1 (NOT USED) Reading the entire table and selecting the two days of interest
## Data <- read.table("household_power_consumption.txt", header=TRUE, sep =";")
## Data <- subset(Data, Date=="1/2/2007" | Date=="2/2/2007")
# OPTION2: Alternative way of reading only relevant dates
library(sqldf)
Data2 <- read.csv.sql("household_power_consumption.txt", header=TRUE, sep=";",
sql="select * from file where Date ='1/2/2007' OR Date ='2/2/2007' ")
# creating a new variable with date and time
datetime <- paste(Data2$Date, Data2$Time, sep=" ")
# converting character vector into time format
Data2$datetime <- strptime(datetime, "%e/%m/%Y %H:%M:%S")
# Date in English
Sys.setlocale("LC_TIME", "English")
# creating the line plot
with(Data2, plot(datetime, Sub_metering_1, type="l", xlab=" ", ylab="Energy sub metering"))
with(Data2, lines(datetime, Sub_metering_2, col="red"))
with(Data2, lines(datetime, Sub_metering_3, col="blue"))
# The legend
legend("topright", fill, col=c("black", "red", "blue"), legend=c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"), lty=1)
## generating a PNG plot called "Plot3.png"
dev.copy(png, file = "plot3.png")
dev.off() |
#load Data
data <- read.table("household_power_consumption.txt", header=T, sep=";")
subset <- data[data["Date"]=="1/2/2007" | data["Date"]=="2/2/2007",]
subset$NewDate <- strptime(paste(subset$Date,subset$Time), "%d/%m/%Y %H:%M:%S");
#Open Device
png(filename="plot4.png")
#Set Parameter
par(mfcol=c(2,2))
#Plot 1
plot(subset$NewDate, as.numeric(as.character(subset$Global_active_power)), type="l", xlab="", ylab="Global Active Power (Kilowatts)")
#Plot 2
with(subset, plot(subset$NewDate, as.numeric(as.character(subset$Sub_metering_1)), type="n", xlab="", ylab="Energy sub metering"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_1)), type="l", col="black"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_2)), type="l", col="red"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_3)), type="l", col="blue"))
#legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd="0")
legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd="0", bty="n")
#Plot3
plot(subset$NewDate, as.numeric(as.character(subset$Voltage)), type="l", xlab="datetime", ylab="Voltage")
#plot4
plot(subset$NewDate, as.numeric(as.character(subset$Global_reactive_power)), type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() | /plot4.R | no_license | dabergueiro/ExData_Plotting1 | R | false | false | 1,429 | r | #load Data
data <- read.table("household_power_consumption.txt", header=T, sep=";")
subset <- data[data["Date"]=="1/2/2007" | data["Date"]=="2/2/2007",]
subset$NewDate <- strptime(paste(subset$Date,subset$Time), "%d/%m/%Y %H:%M:%S");
#Open Device
png(filename="plot4.png")
#Set Parameter
par(mfcol=c(2,2))
#Plot 1
plot(subset$NewDate, as.numeric(as.character(subset$Global_active_power)), type="l", xlab="", ylab="Global Active Power (Kilowatts)")
#Plot 2
with(subset, plot(subset$NewDate, as.numeric(as.character(subset$Sub_metering_1)), type="n", xlab="", ylab="Energy sub metering"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_1)), type="l", col="black"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_2)), type="l", col="red"))
with(subset, points(subset$NewDate, as.numeric(as.character(subset$Sub_metering_3)), type="l", col="blue"))
#legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd="0")
legend("topright", col=c("black","red","blue"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), lwd="0", bty="n")
#Plot3
plot(subset$NewDate, as.numeric(as.character(subset$Voltage)), type="l", xlab="datetime", ylab="Voltage")
#plot4
plot(subset$NewDate, as.numeric(as.character(subset$Global_reactive_power)), type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
####
# calculates distribution of
# Rural, Town, and Urban residents in sample (subjects only)
# based on current and childhood residences
# and plots as two side-by-side histograms
####
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
# read in data
source('inread.r')
almost_everything <- inread('data/2015_lmsim_data.csv')
df <- almost_everything %>%
select(Subject_Code, Urban_Status.x, Urban_Status.y)
# set up rural-to-urban ordering
df$Urban_Status.x[df$Urban_Status.x %in% "UNKNOWN"] <- NA
df$Urban_Status.y[df$Urban_Status.y %in% "UNKNOWN"] <- NA
orderXZC <- function(myvar){
x <- factor(myvar,
levels = c("XIANG_RURAL","ZHEN_TOWN","CHENG_CITY"),
labels=c("Rural","Town","Urban"),
ordered=TRUE)
return(x)
}
df$Urban_Status.x <- orderXZC(df$Urban_Status.x)
df$Urban_Status.y <- orderXZC(df$Urban_Status.y)
# find and remove rows with incomplete data
no.loc.then <- length(df$Urban_Status.y[is.na(df$Urban_Status.y)])
total.then <- length(df$Subject_Code) - no.loc.then
no.loc.now <- length(df$Urban_Status.x[is.na(df$Urban_Status.x)])
total.now <- length(df$Subject_Code) - no.loc.now
df <- na.omit(df) # to get rid of all missing Location data
en <- length(df$Subject_Code)
# long format for plot
then <- as.data.frame(table(df$Urban_Status.y))
then$time <- 1
now <- as.data.frame(table(df$Urban_Status.x))
now$time <- 2
df2 <- rbind(then, now)
colnames(df2) <- c("res", "n", "time")
df2$time <- factor(df2$time,
labels = c("Childhood Residence", "Current Residence"))
# save plot data to text
write.csv(df2, file="analysis/urb_dist.csv",
fileEncoding = "UTF-8",
row.names=FALSE)
# plot elements
colors <- c("#57a3ad", "#dea73a") #teal, gold
#colors <- c("gray25", "gray75")
X.lab <- "Residence Type"
Y.lab <- "Number of Interviewees"
n <- en
nphrase <- paste("N =", en, "interviewees", sep=" ")
title <- "Rural, town and urban residents in the sample"
p <-
ggplot(data=df2, aes(x = res, y=n, fill=time)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=colors, guide = FALSE)+
facet_wrap( ~ time) +
labs(title = paste(title, nphrase, sep = "\n")) +
theme(plot.title = element_text(hjust = 0.5)) + # center
theme(axis.text=element_text(size=12),
axis.title=element_text(size=12)) +
theme(strip.text = element_text(size=12)) +
xlab(X.lab) + ylab(Y.lab)
# plot out
ggsave(plot=p, file="figures/urb_dist.pdf",
width=8, height=5, units="in")
ggsave(plot=p, file="figures/urb_dist.jpg",
width=8, height=5, units="in")
| /urb_dist.r | permissive | saralakumari/lmsim | R | false | false | 2,592 | r | ####
# calculates distribution of
# Rural, Town, and Urban residents in sample (subjects only)
# based on current and childhood residences
# and plots as two side-by-side histograms
####
rm(list=ls())
library(dplyr)
library(tidyr)
library(ggplot2)
# read in data
source('inread.r')
almost_everything <- inread('data/2015_lmsim_data.csv')
df <- almost_everything %>%
select(Subject_Code, Urban_Status.x, Urban_Status.y)
# set up rural-to-urban ordering
df$Urban_Status.x[df$Urban_Status.x %in% "UNKNOWN"] <- NA
df$Urban_Status.y[df$Urban_Status.y %in% "UNKNOWN"] <- NA
orderXZC <- function(myvar){
x <- factor(myvar,
levels = c("XIANG_RURAL","ZHEN_TOWN","CHENG_CITY"),
labels=c("Rural","Town","Urban"),
ordered=TRUE)
return(x)
}
df$Urban_Status.x <- orderXZC(df$Urban_Status.x)
df$Urban_Status.y <- orderXZC(df$Urban_Status.y)
# find and remove rows with incomplete data
no.loc.then <- length(df$Urban_Status.y[is.na(df$Urban_Status.y)])
total.then <- length(df$Subject_Code) - no.loc.then
no.loc.now <- length(df$Urban_Status.x[is.na(df$Urban_Status.x)])
total.now <- length(df$Subject_Code) - no.loc.now
df <- na.omit(df) # to get rid of all missing Location data
en <- length(df$Subject_Code)
# long format for plot
then <- as.data.frame(table(df$Urban_Status.y))
then$time <- 1
now <- as.data.frame(table(df$Urban_Status.x))
now$time <- 2
df2 <- rbind(then, now)
colnames(df2) <- c("res", "n", "time")
df2$time <- factor(df2$time,
labels = c("Childhood Residence", "Current Residence"))
# save plot data to text
write.csv(df2, file="analysis/urb_dist.csv",
fileEncoding = "UTF-8",
row.names=FALSE)
# plot elements
colors <- c("#57a3ad", "#dea73a") #teal, gold
#colors <- c("gray25", "gray75")
X.lab <- "Residence Type"
Y.lab <- "Number of Interviewees"
n <- en
nphrase <- paste("N =", en, "interviewees", sep=" ")
title <- "Rural, town and urban residents in the sample"
p <-
ggplot(data=df2, aes(x = res, y=n, fill=time)) +
geom_bar(stat = "identity") +
scale_fill_manual(values=colors, guide = FALSE)+
facet_wrap( ~ time) +
labs(title = paste(title, nphrase, sep = "\n")) +
theme(plot.title = element_text(hjust = 0.5)) + # center
theme(axis.text=element_text(size=12),
axis.title=element_text(size=12)) +
theme(strip.text = element_text(size=12)) +
xlab(X.lab) + ylab(Y.lab)
# plot out
ggsave(plot=p, file="figures/urb_dist.pdf",
width=8, height=5, units="in")
ggsave(plot=p, file="figures/urb_dist.jpg",
width=8, height=5, units="in")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permutations.R
\name{runPermutations}
\alias{runPermutations}
\title{Create permutation files}
\usage{
runPermutations(file, trait, cpus, work.dir = getwd(),
topfile = file.path(work.dir, paste(basename(file_path_sans_ext(file)),
"topfile", sep = ".")), out.prefix = file.path(work.dir, "permutations",
paste(basename(file_path_sans_ext(file)), "perm", sep = "_")))
}
\arguments{
\item{file}{[\code{string}]\cr
File path of input MB-MDR file.}
\item{trait}{[\code{string}]\cr
Type of trait. "binary", "continuous" or "survival".}
\item{cpus}{[\code{integer}]\cr
Sets the total amount of CPUs to be used.}
\item{work.dir}{[\code{string}]\cr
Working directory for MB-MDR. Defaults to current working directory.}
\item{topfile}{[\code{string}]\cr
Path of topfile. Defaults to <\code{work.dir}>/<\code{file}>.topfile.}
\item{out.prefix}{[\code{string}]\cr
Path for saving the permutation files. Defaults to <\code{work.dir}>/permutations/<\code{file}>_perm.}
}
\value{
System output of MB-MDR executable.
}
\description{
Third step of parallel workflow of MB-MDR. Run permutations on multiple CPUs.
}
| /man/runPermutations.Rd | no_license | imbs-hl/mbmdR | R | false | true | 1,186 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/permutations.R
\name{runPermutations}
\alias{runPermutations}
\title{Create permutation files}
\usage{
runPermutations(file, trait, cpus, work.dir = getwd(),
topfile = file.path(work.dir, paste(basename(file_path_sans_ext(file)),
"topfile", sep = ".")), out.prefix = file.path(work.dir, "permutations",
paste(basename(file_path_sans_ext(file)), "perm", sep = "_")))
}
\arguments{
\item{file}{[\code{string}]\cr
File path of input MB-MDR file.}
\item{trait}{[\code{string}]\cr
Type of trait. "binary", "continuous" or "survival".}
\item{cpus}{[\code{integer}]\cr
Sets the total amount of CPUs to be used.}
\item{work.dir}{[\code{string}]\cr
Working directory for MB-MDR. Defaults to current working directory.}
\item{topfile}{[\code{string}]\cr
Path of topfile. Defaults to <\code{work.dir}>/<\code{file}>.topfile.}
\item{out.prefix}{[\code{string}]\cr
Path for saving the permutation files. Defaults to <\code{work.dir}>/permutations/<\code{file}>_perm.}
}
\value{
System output of MB-MDR executable.
}
\description{
Third step of parallel workflow of MB-MDR. Run permutations on multiple CPUs.
}
|
#' orthoBlast
#'
#' remove duplicated in query and subject and subset by a coverage and id.cutoff
#'
#' @param raw.blast raw blast tables obtained with ReadBlastTables functino
#' @param coverage numeric coverage cutoff
#' @param id.cutoff numeric, identity percent cutoff
#'
#' @import dplyr
#' @import magrittr
#' @return return a list with the data frames with the tables
#' @export
#'
#' @examples orthoBlast(raw.blast = rawblast, coverage = 50, id.cutoff = 70)
#'
#' # if the tables are in a list
#'
#' sapply(raw.blast.list, orthoBlast, coverage = 70, id.cutoff = 70, simplify = F, USE.NAMES = T)
orthoBlast <- function(raw.blast, coverage, id.cutoff){
df.out <- raw.blast %>%
filter(.[[13]] >= coverage) %>%
filter(.[[3]] >= id.cutoff) %>%
arrange(.[[1]], desc(.[[12]])) %>%
group_by(.[[1]]) %>% slice(1) %>%
ungroup() %>%
select(-`.[[1]]`)
return(df.out)
}
orthoBlast.batch <- function(raw.blast, coverage, id.cutoff){
lapply(raw.blast, function(b){
t1 <- b[b[[13]] >= coverage, ]
t2 <- t1[t1[[3]] >= id.cutoff, ]
t2 <- t2[order(t2[[1]], t2[[12]], decreasing = T), ]
t3 <- t2[!duplicated(t2[[1]]), ]
})
}
#' clean.by.sub
#'
#' clean the blast tables by subject and coverage and identity percent cutoff
#'
#' @param df data frame to be subset
#' @param cov coverage cutoff to apply
#' @param id percent identity cutoff to apply
#'
#' @return return the data frame subseted
#' @export
#'
#' @examples clean.by.sub(df = blast.table, cov = 50, id = 70)
clean.by.sub <- function(df, cov, id){
df <- df[df[[13]] >= cov, ]
df <- df[df[[3]] >=id,]
df <- df[order(df[[2]], df[[12]], decreasing = T), ]
return(df[!duplicated(df[[2]]),])}
#' orthology.pairs
#'
#' remove duplicated in query and subject and subset by a coverage and id.cutoff but return the a table with the pair of orthologs
#'
#' @param df data freame of the blast table
#' @param id numeric, percent identity cutoff
#' @param cov numeric, coverage cutoff
#'
#' @import dplyr
#' @import magrittr
#'
#' @return data frame of two columns with the orthologous pairs
#' @export
#'
#' @examples orthology.pairs(blast.table, id = 70, cov = 50)
orthology.pairs <- function(df, id, cov){
df.out <- df %>%
filter(.[[13]] >= cov) %>%
filter(.[[3]] >= id) %>%
arrange(.[[1]], desc(.[[12]])) %>%
distinct(.[[1]], .keep_all = T)%>%
select(c(1,2))
return(df.out)
}
#' clean.by.query
#'
#' remove the duplicated query hits and apply a cutoff in identity percent and coverage
#'
#' @param df data frame with the blast table
#' @param id numeric, identity percent cutoff
#' @param cov numeric, coverage cutoff
#'
#' @return a data freame with the blast table cleaned
#' @export
#'
#' @examples clean.by.query(df = blast.table, id = 70, cov = 50)
clean.by.query <- function(df, id, cov){
df <- df[df[[13]] >= cov, ]
if(is.null(df)){
stop()
}
df <- df[df[[3]] >=id,]
df <- df[order(df[[1]], df[[12]], decreasing = T), ]
return(df[!duplicated(df[[1]]),])}
| /R/BlastTools.R | no_license | torresmanno/BioTools | R | false | false | 3,002 | r | #' orthoBlast
#'
#' remove duplicated in query and subject and subset by a coverage and id.cutoff
#'
#' @param raw.blast raw blast tables obtained with ReadBlastTables functino
#' @param coverage numeric coverage cutoff
#' @param id.cutoff numeric, identity percent cutoff
#'
#' @import dplyr
#' @import magrittr
#' @return return a list with the data frames with the tables
#' @export
#'
#' @examples orthoBlast(raw.blast = rawblast, coverage = 50, id.cutoff = 70)
#'
#' # if the tables are in a list
#'
#' sapply(raw.blast.list, orthoBlast, coverage = 70, id.cutoff = 70, simplify = F, USE.NAMES = T)
orthoBlast <- function(raw.blast, coverage, id.cutoff){
df.out <- raw.blast %>%
filter(.[[13]] >= coverage) %>%
filter(.[[3]] >= id.cutoff) %>%
arrange(.[[1]], desc(.[[12]])) %>%
group_by(.[[1]]) %>% slice(1) %>%
ungroup() %>%
select(-`.[[1]]`)
return(df.out)
}
orthoBlast.batch <- function(raw.blast, coverage, id.cutoff){
lapply(raw.blast, function(b){
t1 <- b[b[[13]] >= coverage, ]
t2 <- t1[t1[[3]] >= id.cutoff, ]
t2 <- t2[order(t2[[1]], t2[[12]], decreasing = T), ]
t3 <- t2[!duplicated(t2[[1]]), ]
})
}
#' clean.by.sub
#'
#' clean the blast tables by subject and coverage and identity percent cutoff
#'
#' @param df data frame to be subset
#' @param cov coverage cutoff to apply
#' @param id percent identity cutoff to apply
#'
#' @return return the data frame subseted
#' @export
#'
#' @examples clean.by.sub(df = blast.table, cov = 50, id = 70)
clean.by.sub <- function(df, cov, id){
df <- df[df[[13]] >= cov, ]
df <- df[df[[3]] >=id,]
df <- df[order(df[[2]], df[[12]], decreasing = T), ]
return(df[!duplicated(df[[2]]),])}
#' orthology.pairs
#'
#' remove duplicated in query and subject and subset by a coverage and id.cutoff but return the a table with the pair of orthologs
#'
#' @param df data freame of the blast table
#' @param id numeric, percent identity cutoff
#' @param cov numeric, coverage cutoff
#'
#' @import dplyr
#' @import magrittr
#'
#' @return data frame of two columns with the orthologous pairs
#' @export
#'
#' @examples orthology.pairs(blast.table, id = 70, cov = 50)
orthology.pairs <- function(df, id, cov){
df.out <- df %>%
filter(.[[13]] >= cov) %>%
filter(.[[3]] >= id) %>%
arrange(.[[1]], desc(.[[12]])) %>%
distinct(.[[1]], .keep_all = T)%>%
select(c(1,2))
return(df.out)
}
#' clean.by.query
#'
#' remove the duplicated query hits and apply a cutoff in identity percent and coverage
#'
#' @param df data frame with the blast table
#' @param id numeric, identity percent cutoff
#' @param cov numeric, coverage cutoff
#'
#' @return a data freame with the blast table cleaned
#' @export
#'
#' @examples clean.by.query(df = blast.table, id = 70, cov = 50)
clean.by.query <- function(df, id, cov){
df <- df[df[[13]] >= cov, ]
if(is.null(df)){
stop()
}
df <- df[df[[3]] >=id,]
df <- df[order(df[[1]], df[[12]], decreasing = T), ]
return(df[!duplicated(df[[1]]),])}
|
# get list of single unique words from all sources
# blog + news
blog.news.freq <- full_join(blog.freq, news.freq, by = "word") %>%
mutate(blog.news.n = n.x + n.y) %>%
select(-c(n.x, n.y))
# blog + news + twitter
all.freq <- full_join(blog.news.freq, twitter.freq, by = "word") %>%
mutate(freq = blog.news.n + n) %>%
select(word, freq)
# calculate 50%, 90% of word instances
sum.freq <- colSums(all.freq[,"freq"], na.rm = TRUE)
fifty.perc <- sum.freq * 0.5
ninety.perc <- sum.freq * 0.9
# create a while loop that adds up words in the list to its own table until freq hits 50%, 90%
setCoverage <- function(df, cov) {
for(i in 1:nrow(df)) {
tmp <- df[1:i, ]
full.coverage <- colSums(df[,"freq"], na.rm = TRUE)
coverage <- colSums(tmp[,"freq"], na.rm = TRUE) / full.coverage
if (coverage >= cov)
break
}
return(nrow(tmp))
} | /week2q8.R | no_license | MogwaiMomo/capstone | R | false | false | 864 | r |
# get list of single unique words from all sources
# blog + news
blog.news.freq <- full_join(blog.freq, news.freq, by = "word") %>%
mutate(blog.news.n = n.x + n.y) %>%
select(-c(n.x, n.y))
# blog + news + twitter
all.freq <- full_join(blog.news.freq, twitter.freq, by = "word") %>%
mutate(freq = blog.news.n + n) %>%
select(word, freq)
# calculate 50%, 90% of word instances
sum.freq <- colSums(all.freq[,"freq"], na.rm = TRUE)
fifty.perc <- sum.freq * 0.5
ninety.perc <- sum.freq * 0.9
# create a while loop that adds up words in the list to its own table until freq hits 50%, 90%
setCoverage <- function(df, cov) {
for(i in 1:nrow(df)) {
tmp <- df[1:i, ]
full.coverage <- colSums(df[,"freq"], na.rm = TRUE)
coverage <- colSums(tmp[,"freq"], na.rm = TRUE) / full.coverage
if (coverage >= cov)
break
}
return(nrow(tmp))
} |
#' @rdname scheduler
#' @section Plotters:
#' \code{plotPrior:} Method to plot prior distributions.
#' @export
plotPrior <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm)) +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = scheduler@prior.df[arm],
mean = scheduler@prior.mean[arm],
sd = sqrt(scheduler@prior.var[arm])))
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotPosterior:} Method to plot the most recent posterior distributions.
#' @export
plotPosterior <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm)) +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = scheduler@post.df[arm],
mean = scheduler@post.mean[arm],
sd = sqrt(scheduler@post.var[arm])))
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotHistory:} Method to plot all posterior distributions.
#' @export
plotHistory <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
cols <- rev(terrain.colors(sch@step+1))
steps <- 0:(length(cols)-1)
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm))
for(t in steps){
t.post <- sch@history.post[sch@history.post$step == t,]
plot_arm <- plot_arm +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = t.post$df[arm],
mean = t.post$mean[arm],
sd = sqrt(t.post$var[arm])),
colour = cols[t+1])
}
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotAllocation:} Method to plot the allocation ratios for each time step.
#' @export
plotAllocation <- function(scheduler){
packageCheck("ggplot2")
sch <- scheduler
plot_arm <- ggplot2::ggplot(data = sch@history.post,
ggplot2::aes_string(x = "next_ratio", y = "mean", time = "step", label = "step", alpha = "step")) +
ggplot2::geom_text() + ggplot2::geom_path() + ggplot2::facet_grid(arm ~ .) + ggplot2::xlim(0, 1) +
ggplot2::ylab("Posterior Mean") + ggplot2::xlab("Subsequent Allocation Ratio") +
ggplot2::labs(label = "Time Step", alpha = "Time Step") +
ggplot2::theme_bw()
plot_arm
}
| /R/3-scheduler-viz.R | no_license | zhenxuanzhang/rarsim | R | false | false | 3,889 | r | #' @rdname scheduler
#' @section Plotters:
#' \code{plotPrior:} Method to plot prior distributions.
#' @export
plotPrior <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm)) +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = scheduler@prior.df[arm],
mean = scheduler@prior.mean[arm],
sd = sqrt(scheduler@prior.var[arm])))
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotPosterior:} Method to plot the most recent posterior distributions.
#' @export
plotPosterior <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm)) +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = scheduler@post.df[arm],
mean = scheduler@post.mean[arm],
sd = sqrt(scheduler@post.var[arm])))
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotHistory:} Method to plot all posterior distributions.
#' @export
plotHistory <- function(scheduler){
packageCheck("ggplot2")
packageCheck("patchwork")
sch <- scheduler
cols <- rev(terrain.colors(sch@step+1))
steps <- 0:(length(cols)-1)
xmin <- min(-3, min(unlist(sch@rewards)))
xmax <- max(3, max(unlist(sch@rewards)))
plots <- lapply(1:sch@K.arms, function(arm){
plot_arm <- ggplot2::ggplot(data = data.frame(x = c(xmin, xmax)), ggplot2::aes_string("x")) +
ggplot2::scale_y_continuous(breaks = NULL) + ggplot2::coord_flip() + ggplot2::theme_bw() +
ggplot2::ylab("") + ggplot2::xlab(paste0("Experimental Arm ", arm))
for(t in steps){
t.post <- sch@history.post[sch@history.post$step == t,]
plot_arm <- plot_arm +
ggplot2::stat_function(fun = alt_dt, n = 1001,
args = list(df = t.post$df[arm],
mean = t.post$mean[arm],
sd = sqrt(t.post$var[arm])),
colour = cols[t+1])
}
return(plot_arm)
})
patchwork::wrap_plots(plots, nrow = 1)
}
#' @rdname scheduler
#' @section Plotters:
#' \code{plotAllocation:} Method to plot the allocation ratios for each time step.
#' @export
plotAllocation <- function(scheduler){
packageCheck("ggplot2")
sch <- scheduler
plot_arm <- ggplot2::ggplot(data = sch@history.post,
ggplot2::aes_string(x = "next_ratio", y = "mean", time = "step", label = "step", alpha = "step")) +
ggplot2::geom_text() + ggplot2::geom_path() + ggplot2::facet_grid(arm ~ .) + ggplot2::xlim(0, 1) +
ggplot2::ylab("Posterior Mean") + ggplot2::xlab("Subsequent Allocation Ratio") +
ggplot2::labs(label = "Time Step", alpha = "Time Step") +
ggplot2::theme_bw()
plot_arm
}
|
# This is the user-interface definition of a Shiny web application.
library(shiny)
# Fix tag("div", list(...)) : could not find function "showOut…
library(rCharts)
shinyUI(
navbarPage("Storm Database Explorer",
tabPanel("Plot",
sidebarPanel(
sliderInput("range",
"Range:",
min = 1950,
max = 2011,
value = c(1993, 2011),
format="####"),
uiOutput("evtypeControls"),
actionButton(inputId = "clear_all", label = "Clear selection", icon = icon("check-square")),
actionButton(inputId = "select_all", label = "Select all", icon = icon("check-square-o"))
),
mainPanel(
tabsetPanel(
# Data by state
tabPanel(p(icon("map-marker"), "By state"),
column(3,
wellPanel(
radioButtons(
"populationCategory",
"Population impact category:",
c("Both" = "both", "Injuries" = "injuries", "Fatalities" = "fatalities"))
)
),
column(3,
wellPanel(
radioButtons(
"economicCategory",
"Economic impact category:",
c("Both" = "both", "Property damage" = "property", "Crops damage" = "crops"))
)
),
column(7,
plotOutput("populationImpactByState"),
plotOutput("economicImpactByState")
)
),
# Time series data
tabPanel(p(icon("line-chart"), "By year"),
h4('Number of events by year', align = "center"),
showOutput("eventsByYear", "nvd3"),
h4('Population impact by year', align = "center"),
showOutput("populationImpact", "nvd3"),
h4('Economic impact by year', align = "center"),
showOutput("economicImpact", "nvd3")
),
# Data
tabPanel(p(icon("table"), "Data"),
dataTableOutput(outputId="table"),
downloadButton('downloadData', 'Download')
)
)
)
),
tabPanel("About",
mainPanel(
includeMarkdown("include.md")
)
)
)
)
| /ui.R | no_license | renrenang/Developing-Data-Products---Shiny | R | false | false | 3,246 | r | # This is the user-interface definition of a Shiny web application.
library(shiny)
# Fix tag("div", list(...)) : could not find function "showOut…
library(rCharts)
shinyUI(
navbarPage("Storm Database Explorer",
tabPanel("Plot",
sidebarPanel(
sliderInput("range",
"Range:",
min = 1950,
max = 2011,
value = c(1993, 2011),
format="####"),
uiOutput("evtypeControls"),
actionButton(inputId = "clear_all", label = "Clear selection", icon = icon("check-square")),
actionButton(inputId = "select_all", label = "Select all", icon = icon("check-square-o"))
),
mainPanel(
tabsetPanel(
# Data by state
tabPanel(p(icon("map-marker"), "By state"),
column(3,
wellPanel(
radioButtons(
"populationCategory",
"Population impact category:",
c("Both" = "both", "Injuries" = "injuries", "Fatalities" = "fatalities"))
)
),
column(3,
wellPanel(
radioButtons(
"economicCategory",
"Economic impact category:",
c("Both" = "both", "Property damage" = "property", "Crops damage" = "crops"))
)
),
column(7,
plotOutput("populationImpactByState"),
plotOutput("economicImpactByState")
)
),
# Time series data
tabPanel(p(icon("line-chart"), "By year"),
h4('Number of events by year', align = "center"),
showOutput("eventsByYear", "nvd3"),
h4('Population impact by year', align = "center"),
showOutput("populationImpact", "nvd3"),
h4('Economic impact by year', align = "center"),
showOutput("economicImpact", "nvd3")
),
# Data
tabPanel(p(icon("table"), "Data"),
dataTableOutput(outputId="table"),
downloadButton('downloadData', 'Download')
)
)
)
),
tabPanel("About",
mainPanel(
includeMarkdown("include.md")
)
)
)
)
|
testlist <- list(b = c(-1667457875L, -1667457892L, 2100493L, 2100493L, -1835887972L ))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) | /mcga/inst/testfiles/ByteVectorToDoubles/AFL_ByteVectorToDoubles/ByteVectorToDoubles_valgrind_files/1613108319-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 153 | r | testlist <- list(b = c(-1667457875L, -1667457892L, 2100493L, 2100493L, -1835887972L ))
result <- do.call(mcga:::ByteVectorToDoubles,testlist)
str(result) |
dstemp <- ds
ds <- dstemp
library(Hmisc); library(ggplot2); library(caret);
# FUNCTIONS
#sum up detailled data per couple (campaign, App)
groupMeasuresPerCampPerApp <- function(data, subcriteria){
groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
subsetFrom <- length(groupingby) + 1;
subsetTo <- subsetFrom;
if("Date" %in% colnames(data))
{
groupingby$date = data$Date
subsetFrom <- subsetFrom + 1;
subsetTo <- subsetFrom + 1;
}
print(groupingby)
if(missing(subcriteria)){
#groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
}
else{
if(!is.vector(subcriteria) || (is.vector(subcriteria) && length(subcriteria) == 1)){
groupingby$sub = data[[subcriteria]]#list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country, sub = data[[subcriteria]])
subsetFrom <- subsetFrom + 1;
subsetTo <- subsetFrom + 1;
}
else{
#groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
print(length(subcriteria))
for(i in 1:length(subcriteria)){
print(subcriteria[i])
groupingby <- c(groupingby, sub = data[ subcriteria[i] ]) #subcriteria[i] =
names(groupingby)[i+subsetFrom-1] <- subcriteria[i]
}
print(names(groupingby))
#subsetTo = subsetFrom + length(subcriteria) + 1
subsetFrom = subsetFrom + length(subcriteria) #+ 1
susbsetTo = subsetFrom + 1
#print(groupingby)
}
}
list <- list(
earnings = aggregate(data$earningsGlob, groupingby, sum),
displays = aggregate(data$displaysGlob, groupingby, sum),
clicks = aggregate(data$clicksGlob, groupingby, sum),
installs = aggregate(data$installsGlob, groupingby, sum),
leads = aggregate(data$leadsGlob, groupingby, sum),
sessions = aggregate(data$sessionsGlob, groupingby, sum),
new_users = aggregate(data$newUsersGlob, groupingby, sum)
)
print("herte")
#print(list)
#as.data.frame(list)
group_by <- c("cid", "aid", "ctry")
naming <- c("CampaignID", "AppOriginID", "Country", "earnings", "displays", "clicks", "installs", "leads", "sessions", "new_users")
insertPos <- 3;
if("Date" %in% colnames(data))
{
group_by <- c(group_by, "date")
naming <- c("CampaignID", "AppOriginID", "Country", "Date", "earnings", "displays", "clicks", "installs", "leads", "sessions", "new_users")
subsetFrom <- subsetFrom+1; subsetTo <- subsetTo+1;
insertPos <- insertPos + 1;
}
subsetSel <- c(subsetFrom, subsetTo);
print(subsetSel)
#print(group_by)
merger <- function(group_by){
merge(x=list$earnings,
y=merge(x=list$displays[,-subsetSel],
y=merge(x=list$clicks[,-subsetSel],
y=merge(x=list$installs[,-subsetSel],
y=merge(x=list$leads[,-subsetSel],
y=merge(x=list$sessions[,-subsetSel], y=list$new_users[,-subsetSel], by=group_by),
by=group_by),
by=group_by),
by=group_by),
by=group_by),
by=group_by)
}
if(missing(subcriteria))
{
res <- merger(group_by);
names(res) <- naming
}
else if(!is.vector(subcriteria) || (is.vector(subcriteria) && length(subcriteria) == 1)){
print("not vector")
group_by <- c(group_by, c("sub")) # c("cid", "aid", "ctry", "sub")
print(group_by)
res <- merger(group_by);
print(res)
###???groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country, sub = data[[subcriteria]])
names(res) <- append(naming, subcriteria, insertPos)#c("CampaignID", "AppOriginID", "Country", subcriteria, "earnings", "displays", "clicks", "installs", "leads")
}
else{
print("vector")
print(subcriteria)
group_by <- c(group_by, subcriteria)#c(group_by, c("sub"))
print(group_by)
#print(row.names(list))
res <- merger(group_by);
names(res) <- append(naming, subcriteria, insertPos)
#names(res) <- ins(
# c("CampaignID", "AppOriginID", "Country", "earnings", "displays", "clicks", "installs", "leads"),
# list(subcriteria), pos=c(3)
#)
print(names(res))
}
cpb <- sapply(as.numeric(res$CampaignID), function(x){mean(data[as.numeric(data$CampaignID)==x,]$CostPerBid)})
#print(cpb)
res <- cbind(res, costPerBid = cpb)#data[res$CampaignID == data$CampaignID,]$CostPerBid[[1]])
#print(res$costPerBid)
res
}
groupKPIsPerCampPerApp <- function(data, subcriteria){
msrs <- groupMeasuresPerCampPerApp(data, subcriteria);
#print(msrs)
##print(head(data))
#print(head(data[which(data$CampaignID == msrs$CampaignID),]$CampaignID[1]))
msrs <- cbind(msrs,
dispPerSess = ifelse(msrs$displays > 0, msrs$displays / msrs$sessions, NaN),
CPM = ifelse(msrs$displays > 0, msrs$earnings / msrs$displays * 1000, NaN),
CTR = ifelse(msrs$displays > 0, msrs$clicks / msrs$displays, NaN),
CVR = ifelse(msrs$clicks > 0, (msrs$installs + msrs$leads) / msrs$clicks, NaN),
CPMnorm = ifelse(msrs$displays > 0, (msrs$earnings / msrs$displays * 1000)/msrs$costPerBid, NaN) #(data[which(data$CampaignID == msrs$CampaignID)[1],]$CostPerBid)
)
unique(msrs)
}
baysianPredict <- function(){
#library(e1071)
#library(caret)
dsSplitPerSubCriteriaCPMgrouped <- cbind(dsSplitPerSubCriteria, CPMnorm_ = cut(dsSplitPerSubCriteria$CPMnorm, breaks=-1:6), rm.na=T)#(max(dsSplitPerSubCriteria$CPMnorm)+1)
#cut(dsSplitPerSubCriteria$CPMnorm, breaks=-1:(max(dsSplitPerSubCriteria$CPMnorm)+1))
datasplit <- createDataPartition(y=dsSplitPerSubCriteriaCPMgrouped$CPMnorm, list=F, p=.7)
trainset <- dsSplitPerSubCriteriaCPMgrouped[datasplit,]
testset <- dsSplitPerSubCriteriaCPMgrouped[-datasplit,]
modelLDA <- train(CPMnorm_ ~ displays, data=trainset, method='lda')
plda <- predict(modelLDA, newdata=testset)
}
getAverageCPM <- function(data){
mean <- 0
for(i in 1:dim(data)[1]){
mean <- mean + data[i,]$displays * data[i,]$CPMnorm;
}
mean <- mean / sum(data$displays)
mean
}
prepareDS <- function(ds){
nonFeaturesCol <- c("ConnectionTypeName", "AppOriginID", "CampaignID", "Country", "AppAdvertisedID", "requestsGlob", "displaysGlob", "clicksGlob", "installsGlob", "leadsGlob", "earningsGlob", "spendingsGlob", "CPM", "CTR", "CVR") #, "CostPerBid"
# define the 'ni' operator
`%ni%` <- Negate(`%in%`)
dsPrepared <- subset(ds, select=names(ds) %ni% nonFeaturesCol)
dsPrepared
}
setDS <- function(csvFile){
if(is.character(csvFile)){
ds <- read.csv(csvFile)#'StatsForLaunchingPeriodWithConnType.csv') #('CampaignsFirstStats2.csv')#
}
#ds <- subset(ds, select=-c(CountryCode))#AppAdvertisedType, AppOriginType,
#ds <- na.omit(ds)
# we restrict our analysis to the most active object of the platform
visibility.top30AppOriginIDs <- as.factor(c(2241 ,2272 ,2459, 977, 2300, 2001 ,2334 ,2332, 2284, 2363, 2458, 2256, 2539, 2320, 2495, 2500 ,1341 ,2508, 2468, 2485, 2523, 2237, 2462 ,2497, 2402, 2257 ,2464, 2452, 2514, 2367))
visibility.top30CampaignIDs<- as.factor(c(2, 2441, 2401,2443,2453,2129,2033,2114,2258,2091,2448, 2452, 2083, 2093,2249, 2260, 2427, 2084, 2388, 2433))
#print(summary(ds))
ds <- subset(ds,
displaysGlob >= 0
## & (leadsGlob + installsGlob) < (clicksGlob * 0.2)
##& clicksGlob < (displaysGlob * 0.2)
#& AppOriginID %in% visibility.top30AppOriginIDs
#& CampaignID %in% visibility.top30CampaignIDs
)
# setting categorical factors #
#transform(ds, CampaignID = as.factor(CampaignID))
ds$CampaignID <- as.factor(ds$CampaignID)
ds$AppOriginID <- as.factor(ds$AppOriginID)
ds$CampaignCostTypeName <- factor(ds$CampaignCostTypeName, levels=c(
"CPM",
"CPC (multiple / campaign)",
"CPI (unique / campaign)",
"CPL (multiple / campaign / productID) - any kind of goals reached"
))
if("Date" %in% colnames(ds))
{
ds$Date <- as.Date(as.POSIXct(ds$Date, 'GMT'))
}
ds$AppAdvertisedID <- as.factor(ds$AppAdvertisedID)
ds$AppOriginOrientation <- as.factor(ds$AppOriginOrientation)
ds$AppAdvertisedOrientation <- as.factor(ds$AppAdvertisedOrientation)
#ds$AppAdvertisedType <- as.factor(ds$AppAdvertisedType)
#normalise CPM per cost#
ds <- cbind(ds, CPMnorm = ifelse(ds$CostPerBid > 0, (ds$CPM / ds$CostPerBid), NA))
ds
}
simpleRegression<- function(data, outcome, criteria){
LM<- lm(eval(as.symbol(outcome))~eval(as.symbol(criteria)), data)
plot(data[[criteria]], data[[outcome]], ylim=c(quantile(data[[outcome]], .05,na.rm = T),quantile(data[[outcome]], .95,na.rm = T)))#, geom="boxplot"), col=colorRamp(c('red', 'blue'))(0.5)
#plot(data[[criteria]], data[[outcome]], ylim=c(0,1))#, geom="boxplot"), col=colorRamp(c('red', 'blue'))(0.5)
plot(data[[criteria]], LM$fitted, pch=18, cex=3, col=5, add=TRUE) #col=colorRamp(c('red', 'blue'))(0.5),
for(i in 1:length(unique(data[[criteria]]))){
currentMod <- unique(data[[criteria]])[i];
sel <- data[[criteria]]
text(sel[sel== currentMod], min(data[[outcome]]), paste('Tot:',length(sel[sel== currentMod]), sep=":"), col=i)
}
print(LM)
}
multipleRegression <- function(data, outcome, criteria){
LM<- lm(eval(as.symbol(outcome))~., data=subset(data, select=names(data) %ni% c(nonFeaturesCol, names(selection))))
#plot(data[[criteria]], LM$fitted, type="l", lwd=3, col="red")
qplot(data[[criteria]], LM$fitted)
# compare prediction and mean
lapply(split(similarCampaigns, similarCampaigns[criteria]),
function(x){
print(x[1, criteria])
print(list( pred=getAverageCPM(x), mean = mean(x$CPMnorm), med = median(x$CPMnorm[x$CPMnorm>0])))
})
}
## END FUNCTIONS
#feature plot#
featurePlot(x=ds[,c('CampaignTargetTypeName', 'AppAdvertisedCategory', 'AppOriginCategory')], y=ds$CPMnorm, plot="pairs")
ds <- setDS('AppsStatsPerDaySinceFirstJuly.csv')#('StatsOverFullTimeWithConnType.csv')#('StatsForLaunchingPeriodWithConnType.csv')
# we focus on only 1 appOrigin
ds1app <- ds[ds$AppOriginID == '2300',]
# selection of campaigns being exactly the same
selection=list(
CampaignTypeName= "Full screen interstitial (in-app)"
,CampaignSubTypeName = "[PAID] AdDeals user acquisition campaign [quality users]"
,PlatformName = "Windows [Phone/Tablets/PCs]"
,AdTypeName="AdDeals Interstitials Full Screen (Native)"
,AppAdvertisedCategory = "Games"
,AppAdvertisedOrientation = "1"
,AppOriginCategory = "Games"
,AppOriginOrientation = "2"
#,CampaignCostTypeName = "CPI (unique / campaign)"
#,CampaignTargetTypeName = "Web mobile"
#,ConnectionTypeName = "Ethernet"
#,WeekDay = "Mond./Tues."
#,grepl("US", CountrySet) == T
)
selectionNames <- names(selection)
#similarCampaigns <- ds; lapply(selectionNames, function(nameindex){similarCampaigns <- subset(similarCampaigns, similarCampaigns[nameindex]==selection[[nameindex]])}) #ds[x.name]=
similarCampaigns <- subset(ds,
CampaignTypeName== "Full screen interstitial (in-app)"
& CampaignSubTypeName == "[PAID] AdDeals user acquisition campaign [quality users]"
& PlatformName == "Windows [Phone/Tablets/PCs]"
#& AppAdvertisedCategory == "Games"
#& AppOriginCategory == "Games"
& AdTypeName=="AdDeals Interstitials Full Screen (Native)"
#& AppAdvertisedOrientation == "1"
#& AppOriginOrientation == "2"
#& CampaignCostTypeName == "CPI (unique / campaign)"
#& CampaignTargetTypeName == "Web mobile"
#& ConnectionTypeName == "Ethernet"
#& WeekDay == "Satu./Sund."#"Mond./Tues."
& (grepl("US", CountrySet) == T)# || grepl("US", CountrySet) == T)
)# settle a dynamic criteria of study
criteria = c("AppAdvertisedOrientation","CampaignTargetTypeName", "CampaignCostTypeName", "AppOriginCategory" )#"CampaignTargetTypeName"#"CostPerBid"#"WeekDay" #"CampaignTargetTypeName"
#criteria = "AppAdvertisedID"
##dsSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns, criteria),displays>1000)
ds1appSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(ds1app, criteria),displays>1000)
table <- with(ds1appSplitPerSubCriteria, tapply(CTR, list(CampaignID, Date), FUN=mean))
# seeking for CTR per Campaigns..
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR)) + geom_density(aes(color=Country)) + facet_grid(. ~ CampaignID)
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR, AppAdvertisedID)) + geom_point(aes(color=CampaignID))
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR)) +
geom_histogram(aes(color=CampaignID)) +
facet_grid(. ~ AppAdvertisedID) +
ggtitle('Daily CTR per AppAdvertisedID per Campaign') +
geom_smooth(aes(y=sessions/median(sessions)), colour='#ff9422', se=T) + #/10000
geom_smooth(aes(y=new_users/median(new_users)), colour='#422ff9', linetype='dotted') #/100
#+ scale_line_manual(labels = c("NewUsers", "Sessions"), values = c("#422ff9", "#ff9422"))
#ggsave(paste(gsub(' ', '_', 'Daily CTR per Country per Campaign'), 'jpg', sep="."))
lmPerCountry <- function(){
usaData <- dsSplitPerSubCriteria[dsSplitPerSubCriteria$Country=='USA', ];
partCountry <- createDataPartition(p=0.7,y=usaData$CPMnorm, list =F)
trainsetCountry <- dsSplitPerSubCriteria[partCountry,];
testsetCountry <- dsSplitPerSubCriteria[-partCountry,]
lmCountry <- train(CPMnorm ~ AppAdvertisedOrientation + AppOriginCategory + CampaignCostTypeName, data = trainsetCountry, method='lm')
predictCountry <- predict(lmCountry, newdata = testsetCountry)
summary(lmCountry)
lmCountry$finalModel$residuals
}
predictPerCountry <- lmPerCountry()
predict(lm, newdata = testset)
#reagregate per cai, aid having nbDiqsplays > 1000
#dsSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns, c(criteria, "AppOriginOrientation")),displays>1000)
ggplot(data=dsSplitPerSubCriteria, eval(as.symbol(criteria)), CPMnorm) + #aes(CPM, CPMnorm)) +
geom_point(aes(color=AppOriginID, shape=eval(as.symbol(criteria)), size=displays), alpha = 0.6) +
coord_cartesian(xlim=c(0,4), ylim=c(0,7.5)) +
facet_wrap(~CampaignTargetTypeName)
#+ ggtitle("CampaignTypeName: Full screen interstitial (in-app) / CampaignSubTypeName: [PAID] AdDeals user acquisition campaign [quality users] / PlatformName == Windows [Phone/Tablets/PCs] / AppAdvertisedCategory: Games/ AppOriginCategory: Games / AdTypeName: AdDeals Interstitials Full Screen (Native) / AppAdvertisedOrientation: 1 / AppOriginOrientation: 2 / CampaignCostTypeName: CPI (unique / campaign)")
#+ facet_wrap(~CampaignID)
simpleRegression(dsSplitPerSubCriteria, 'CPMnorm', criteria)
dsSplitPerCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns),displays>1000)
ggplot(data=dsSplitPerCriteria, aes(shape=eval(as.symbol(criteria)), CPMnorm)) #+ geom_point(aes(WeekDay,CPMnorm), pch=5, cex=5)
+ geom_point(aes(color=AppOriginID, size=displays), alpha = 0.6)
+ geom_smooth(method = "lm")
#cut2(similarCampaigns$CPMnorm, g=5)
# returns the value of predicted CPMnorm with various criteria modalities in column
comparePred <- sapply(split(similarCampaigns, similarCampaigns[criteria]), getAverageCPM)
cor(as.data.frame(comparePred))
var(split(similarCampaigns, similarCampaigns[criteria])[1], split(similarCampaigns, similarCampaigns[criteria])[2])
dataHistpresentation <- function(data){
par(mfrow=c(1,2))
#attach(dsSplitPerSubCriteria)
qplot(dsSplitPerSubCriteria$CPMnorm, ylim=c(0,20), xlim=c(0,25), binwidth=0.1, color=dsSplitPerSubCriteria$CampaignCostTypeName)
qplot(dsSplitPerSubCriteria$CPM, ylim=c(0,30), xlim=c(0,10), binwidth=0.1, color=dsSplitPerSubCriteria$CampaignCostTypeName)
}
isWeb <- list('Web mobile')
#creating subset to split between WebMobile and others #
dsWeb <- subset(ds, ds$CampaignTargetType %in% isWeb)
dsApp <- subset(ds, !(ds$CampaignTargetType %in% isWeb))
#creating subset per campaignCostType#
dsCPI <- subset(dsApp, grepl('CPI',dsApp$CampaignCostType))
boxplot(data=dsCPI, dsCPI$CPMnorm)
dsCPC <- subset(ds, grepl('CPC',ds$CampaignCostType))
boxplot(data=dsCPC, dsCPC$CPMnorm)
dsCPM <- subset(ds, grepl('CPM',ds$CampaignCostType))
dsCPL <- subset(dsWeb, grepl('CPL',dsWeb$CampaignCostType))
#plot CPM along campaignTargetType#
ggplot(data=ds[with(ds,CPMnorm<10),], aes(CampaignTargetTypeName, CPMnorm)) + geom_point(aes(fill=CTR, shape=AppOriginTypeName), alpha=1/2, size=1.5, pch=1) + labs(title='CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000')
# aggregate dipslays ans earnings for every combination#
displaysGlobPerCampPerApp = aggregate(displaysGlob ~ campaignID + appOriginID, FUN = sum, data=ds)
earningsGlobPerCampPerApp = aggregate(earningsGlob ~ campaignID + appOriginID, FUN = sum, data=ds)
calculCPMGlobPerCampPerApp <- function(rawdata){aggregate(earningsGlob ~ campaignID + appOriginID, FUN=function(x){ifelse(x$CostPerBid > 0, (x$CPM / x$CostPerBid), NA)}, data=rawdata)}
cpmsGlob <- calculCPMGlobPerCampPerApp(similarCampaigns)
#CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000#
ggplot(data=ds[with(ds,CPMnorm<10 && ConnectionTypeName != 'Unknown'),], aes(abbreviate(CampaignCostTypeName, 12), CPMnorm)) + geom_point(aes(shape=AppAdvertisedypeName, color=CostPerBid, size=earningsGlob), alpha=1/2, pch=1) +
labs(x='CampaignCostType', title='CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000') + geom_smooth(method='lm') +
facet_grid(ConnectionTypeName~CampaignTargetTypeName) + coord_cartesian(ylim=c(0,5))
# compute kmodes and assign each combinaison to cluster#
kmodes <- kmodes(data=na.omit(subset(ds, select = -c(apporiginID, campaignID, AppAdvertisedID, CTR, CVR))), modes=4, iter.max=20, weighted=F)
ds <- cbind(ds,kmodes$cluster)
names(ds)[length(ds)] <- 'cluster';
split(ds, ds$cluster)[1]
# display CPM per cluster over various dimensions #
qplot(x=cluster, y=CPM, data=ds, color=AppAdvertisedCategory, shape=CampaignTargetTypeName, ylim=c(0,10))
# tracing the plot of displays = f(AppAdvertisedCategory, AppOriginCategory)#
catVScat <-qplot(data=ds, x=abbreviate(AppAdvertisedCategory_, 7), y=AppOriginCategory_, size=installsGlob, color=displaysGlob, xlab='promotedApp');
trainer <- createDataPartition(y=t$CPM, p=0.75, list=F) #displaysGlob#
trainingset <- t[trainer,]
train <- train(CPM~., data=trainingset, method='glm'); # ~ AppOriginType + AppAdvertisedType#
predict(newdata=t[-trainer, ], train)
library('klaR');
qplot(geom='density', x=CPM, data=ds ,xlim=c(0,4), color=CampaignTargetTypeName);
qplot(x=cluster, y=CPM, data=ds, color=AppOriginCategory ,ylim=c(0,20), shape=CampaignTargetTypeName) | /clustering_over_campaign_AppOriginID.R | no_license | ArnOrgelet/AdNetwork-analysis-with-R | R | false | false | 20,438 | r | dstemp <- ds
ds <- dstemp
library(Hmisc); library(ggplot2); library(caret);
# FUNCTIONS
#sum up detailled data per couple (campaign, App)
groupMeasuresPerCampPerApp <- function(data, subcriteria){
groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
subsetFrom <- length(groupingby) + 1;
subsetTo <- subsetFrom;
if("Date" %in% colnames(data))
{
groupingby$date = data$Date
subsetFrom <- subsetFrom + 1;
subsetTo <- subsetFrom + 1;
}
print(groupingby)
if(missing(subcriteria)){
#groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
}
else{
if(!is.vector(subcriteria) || (is.vector(subcriteria) && length(subcriteria) == 1)){
groupingby$sub = data[[subcriteria]]#list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country, sub = data[[subcriteria]])
subsetFrom <- subsetFrom + 1;
subsetTo <- subsetFrom + 1;
}
else{
#groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country)
print(length(subcriteria))
for(i in 1:length(subcriteria)){
print(subcriteria[i])
groupingby <- c(groupingby, sub = data[ subcriteria[i] ]) #subcriteria[i] =
names(groupingby)[i+subsetFrom-1] <- subcriteria[i]
}
print(names(groupingby))
#subsetTo = subsetFrom + length(subcriteria) + 1
subsetFrom = subsetFrom + length(subcriteria) #+ 1
susbsetTo = subsetFrom + 1
#print(groupingby)
}
}
list <- list(
earnings = aggregate(data$earningsGlob, groupingby, sum),
displays = aggregate(data$displaysGlob, groupingby, sum),
clicks = aggregate(data$clicksGlob, groupingby, sum),
installs = aggregate(data$installsGlob, groupingby, sum),
leads = aggregate(data$leadsGlob, groupingby, sum),
sessions = aggregate(data$sessionsGlob, groupingby, sum),
new_users = aggregate(data$newUsersGlob, groupingby, sum)
)
print("herte")
#print(list)
#as.data.frame(list)
group_by <- c("cid", "aid", "ctry")
naming <- c("CampaignID", "AppOriginID", "Country", "earnings", "displays", "clicks", "installs", "leads", "sessions", "new_users")
insertPos <- 3;
if("Date" %in% colnames(data))
{
group_by <- c(group_by, "date")
naming <- c("CampaignID", "AppOriginID", "Country", "Date", "earnings", "displays", "clicks", "installs", "leads", "sessions", "new_users")
subsetFrom <- subsetFrom+1; subsetTo <- subsetTo+1;
insertPos <- insertPos + 1;
}
subsetSel <- c(subsetFrom, subsetTo);
print(subsetSel)
#print(group_by)
merger <- function(group_by){
merge(x=list$earnings,
y=merge(x=list$displays[,-subsetSel],
y=merge(x=list$clicks[,-subsetSel],
y=merge(x=list$installs[,-subsetSel],
y=merge(x=list$leads[,-subsetSel],
y=merge(x=list$sessions[,-subsetSel], y=list$new_users[,-subsetSel], by=group_by),
by=group_by),
by=group_by),
by=group_by),
by=group_by),
by=group_by)
}
if(missing(subcriteria))
{
res <- merger(group_by);
names(res) <- naming
}
else if(!is.vector(subcriteria) || (is.vector(subcriteria) && length(subcriteria) == 1)){
print("not vector")
group_by <- c(group_by, c("sub")) # c("cid", "aid", "ctry", "sub")
print(group_by)
res <- merger(group_by);
print(res)
###???groupingby <- list(cid = data$CampaignID, aid = data$AppOriginID, ctry = data$Country, sub = data[[subcriteria]])
names(res) <- append(naming, subcriteria, insertPos)#c("CampaignID", "AppOriginID", "Country", subcriteria, "earnings", "displays", "clicks", "installs", "leads")
}
else{
print("vector")
print(subcriteria)
group_by <- c(group_by, subcriteria)#c(group_by, c("sub"))
print(group_by)
#print(row.names(list))
res <- merger(group_by);
names(res) <- append(naming, subcriteria, insertPos)
#names(res) <- ins(
# c("CampaignID", "AppOriginID", "Country", "earnings", "displays", "clicks", "installs", "leads"),
# list(subcriteria), pos=c(3)
#)
print(names(res))
}
cpb <- sapply(as.numeric(res$CampaignID), function(x){mean(data[as.numeric(data$CampaignID)==x,]$CostPerBid)})
#print(cpb)
res <- cbind(res, costPerBid = cpb)#data[res$CampaignID == data$CampaignID,]$CostPerBid[[1]])
#print(res$costPerBid)
res
}
groupKPIsPerCampPerApp <- function(data, subcriteria){
msrs <- groupMeasuresPerCampPerApp(data, subcriteria);
#print(msrs)
##print(head(data))
#print(head(data[which(data$CampaignID == msrs$CampaignID),]$CampaignID[1]))
msrs <- cbind(msrs,
dispPerSess = ifelse(msrs$displays > 0, msrs$displays / msrs$sessions, NaN),
CPM = ifelse(msrs$displays > 0, msrs$earnings / msrs$displays * 1000, NaN),
CTR = ifelse(msrs$displays > 0, msrs$clicks / msrs$displays, NaN),
CVR = ifelse(msrs$clicks > 0, (msrs$installs + msrs$leads) / msrs$clicks, NaN),
CPMnorm = ifelse(msrs$displays > 0, (msrs$earnings / msrs$displays * 1000)/msrs$costPerBid, NaN) #(data[which(data$CampaignID == msrs$CampaignID)[1],]$CostPerBid)
)
unique(msrs)
}
baysianPredict <- function(){
#library(e1071)
#library(caret)
dsSplitPerSubCriteriaCPMgrouped <- cbind(dsSplitPerSubCriteria, CPMnorm_ = cut(dsSplitPerSubCriteria$CPMnorm, breaks=-1:6), rm.na=T)#(max(dsSplitPerSubCriteria$CPMnorm)+1)
#cut(dsSplitPerSubCriteria$CPMnorm, breaks=-1:(max(dsSplitPerSubCriteria$CPMnorm)+1))
datasplit <- createDataPartition(y=dsSplitPerSubCriteriaCPMgrouped$CPMnorm, list=F, p=.7)
trainset <- dsSplitPerSubCriteriaCPMgrouped[datasplit,]
testset <- dsSplitPerSubCriteriaCPMgrouped[-datasplit,]
modelLDA <- train(CPMnorm_ ~ displays, data=trainset, method='lda')
plda <- predict(modelLDA, newdata=testset)
}
getAverageCPM <- function(data){
mean <- 0
for(i in 1:dim(data)[1]){
mean <- mean + data[i,]$displays * data[i,]$CPMnorm;
}
mean <- mean / sum(data$displays)
mean
}
prepareDS <- function(ds){
nonFeaturesCol <- c("ConnectionTypeName", "AppOriginID", "CampaignID", "Country", "AppAdvertisedID", "requestsGlob", "displaysGlob", "clicksGlob", "installsGlob", "leadsGlob", "earningsGlob", "spendingsGlob", "CPM", "CTR", "CVR") #, "CostPerBid"
# define the 'ni' operator
`%ni%` <- Negate(`%in%`)
dsPrepared <- subset(ds, select=names(ds) %ni% nonFeaturesCol)
dsPrepared
}
setDS <- function(csvFile){
if(is.character(csvFile)){
ds <- read.csv(csvFile)#'StatsForLaunchingPeriodWithConnType.csv') #('CampaignsFirstStats2.csv')#
}
#ds <- subset(ds, select=-c(CountryCode))#AppAdvertisedType, AppOriginType,
#ds <- na.omit(ds)
# we restrict our analysis to the most active object of the platform
visibility.top30AppOriginIDs <- as.factor(c(2241 ,2272 ,2459, 977, 2300, 2001 ,2334 ,2332, 2284, 2363, 2458, 2256, 2539, 2320, 2495, 2500 ,1341 ,2508, 2468, 2485, 2523, 2237, 2462 ,2497, 2402, 2257 ,2464, 2452, 2514, 2367))
visibility.top30CampaignIDs<- as.factor(c(2, 2441, 2401,2443,2453,2129,2033,2114,2258,2091,2448, 2452, 2083, 2093,2249, 2260, 2427, 2084, 2388, 2433))
#print(summary(ds))
ds <- subset(ds,
displaysGlob >= 0
## & (leadsGlob + installsGlob) < (clicksGlob * 0.2)
##& clicksGlob < (displaysGlob * 0.2)
#& AppOriginID %in% visibility.top30AppOriginIDs
#& CampaignID %in% visibility.top30CampaignIDs
)
# setting categorical factors #
#transform(ds, CampaignID = as.factor(CampaignID))
ds$CampaignID <- as.factor(ds$CampaignID)
ds$AppOriginID <- as.factor(ds$AppOriginID)
ds$CampaignCostTypeName <- factor(ds$CampaignCostTypeName, levels=c(
"CPM",
"CPC (multiple / campaign)",
"CPI (unique / campaign)",
"CPL (multiple / campaign / productID) - any kind of goals reached"
))
if("Date" %in% colnames(ds))
{
ds$Date <- as.Date(as.POSIXct(ds$Date, 'GMT'))
}
ds$AppAdvertisedID <- as.factor(ds$AppAdvertisedID)
ds$AppOriginOrientation <- as.factor(ds$AppOriginOrientation)
ds$AppAdvertisedOrientation <- as.factor(ds$AppAdvertisedOrientation)
#ds$AppAdvertisedType <- as.factor(ds$AppAdvertisedType)
#normalise CPM per cost#
ds <- cbind(ds, CPMnorm = ifelse(ds$CostPerBid > 0, (ds$CPM / ds$CostPerBid), NA))
ds
}
simpleRegression<- function(data, outcome, criteria){
LM<- lm(eval(as.symbol(outcome))~eval(as.symbol(criteria)), data)
plot(data[[criteria]], data[[outcome]], ylim=c(quantile(data[[outcome]], .05,na.rm = T),quantile(data[[outcome]], .95,na.rm = T)))#, geom="boxplot"), col=colorRamp(c('red', 'blue'))(0.5)
#plot(data[[criteria]], data[[outcome]], ylim=c(0,1))#, geom="boxplot"), col=colorRamp(c('red', 'blue'))(0.5)
plot(data[[criteria]], LM$fitted, pch=18, cex=3, col=5, add=TRUE) #col=colorRamp(c('red', 'blue'))(0.5),
for(i in 1:length(unique(data[[criteria]]))){
currentMod <- unique(data[[criteria]])[i];
sel <- data[[criteria]]
text(sel[sel== currentMod], min(data[[outcome]]), paste('Tot:',length(sel[sel== currentMod]), sep=":"), col=i)
}
print(LM)
}
multipleRegression <- function(data, outcome, criteria){
LM<- lm(eval(as.symbol(outcome))~., data=subset(data, select=names(data) %ni% c(nonFeaturesCol, names(selection))))
#plot(data[[criteria]], LM$fitted, type="l", lwd=3, col="red")
qplot(data[[criteria]], LM$fitted)
# compare prediction and mean
lapply(split(similarCampaigns, similarCampaigns[criteria]),
function(x){
print(x[1, criteria])
print(list( pred=getAverageCPM(x), mean = mean(x$CPMnorm), med = median(x$CPMnorm[x$CPMnorm>0])))
})
}
## END FUNCTIONS
#feature plot#
featurePlot(x=ds[,c('CampaignTargetTypeName', 'AppAdvertisedCategory', 'AppOriginCategory')], y=ds$CPMnorm, plot="pairs")
ds <- setDS('AppsStatsPerDaySinceFirstJuly.csv')#('StatsOverFullTimeWithConnType.csv')#('StatsForLaunchingPeriodWithConnType.csv')
# we focus on only 1 appOrigin
ds1app <- ds[ds$AppOriginID == '2300',]
# selection of campaigns being exactly the same
selection=list(
CampaignTypeName= "Full screen interstitial (in-app)"
,CampaignSubTypeName = "[PAID] AdDeals user acquisition campaign [quality users]"
,PlatformName = "Windows [Phone/Tablets/PCs]"
,AdTypeName="AdDeals Interstitials Full Screen (Native)"
,AppAdvertisedCategory = "Games"
,AppAdvertisedOrientation = "1"
,AppOriginCategory = "Games"
,AppOriginOrientation = "2"
#,CampaignCostTypeName = "CPI (unique / campaign)"
#,CampaignTargetTypeName = "Web mobile"
#,ConnectionTypeName = "Ethernet"
#,WeekDay = "Mond./Tues."
#,grepl("US", CountrySet) == T
)
selectionNames <- names(selection)
#similarCampaigns <- ds; lapply(selectionNames, function(nameindex){similarCampaigns <- subset(similarCampaigns, similarCampaigns[nameindex]==selection[[nameindex]])}) #ds[x.name]=
similarCampaigns <- subset(ds,
CampaignTypeName== "Full screen interstitial (in-app)"
& CampaignSubTypeName == "[PAID] AdDeals user acquisition campaign [quality users]"
& PlatformName == "Windows [Phone/Tablets/PCs]"
#& AppAdvertisedCategory == "Games"
#& AppOriginCategory == "Games"
& AdTypeName=="AdDeals Interstitials Full Screen (Native)"
#& AppAdvertisedOrientation == "1"
#& AppOriginOrientation == "2"
#& CampaignCostTypeName == "CPI (unique / campaign)"
#& CampaignTargetTypeName == "Web mobile"
#& ConnectionTypeName == "Ethernet"
#& WeekDay == "Satu./Sund."#"Mond./Tues."
& (grepl("US", CountrySet) == T)# || grepl("US", CountrySet) == T)
)# settle a dynamic criteria of study
criteria = c("AppAdvertisedOrientation","CampaignTargetTypeName", "CampaignCostTypeName", "AppOriginCategory" )#"CampaignTargetTypeName"#"CostPerBid"#"WeekDay" #"CampaignTargetTypeName"
#criteria = "AppAdvertisedID"
##dsSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns, criteria),displays>1000)
ds1appSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(ds1app, criteria),displays>1000)
table <- with(ds1appSplitPerSubCriteria, tapply(CTR, list(CampaignID, Date), FUN=mean))
# seeking for CTR per Campaigns..
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR)) + geom_density(aes(color=Country)) + facet_grid(. ~ CampaignID)
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR, AppAdvertisedID)) + geom_point(aes(color=CampaignID))
ggplot(data = ds1appSplitPerSubCriteria, aes(CTR)) +
geom_histogram(aes(color=CampaignID)) +
facet_grid(. ~ AppAdvertisedID) +
ggtitle('Daily CTR per AppAdvertisedID per Campaign') +
geom_smooth(aes(y=sessions/median(sessions)), colour='#ff9422', se=T) + #/10000
geom_smooth(aes(y=new_users/median(new_users)), colour='#422ff9', linetype='dotted') #/100
#+ scale_line_manual(labels = c("NewUsers", "Sessions"), values = c("#422ff9", "#ff9422"))
#ggsave(paste(gsub(' ', '_', 'Daily CTR per Country per Campaign'), 'jpg', sep="."))
lmPerCountry <- function(){
usaData <- dsSplitPerSubCriteria[dsSplitPerSubCriteria$Country=='USA', ];
partCountry <- createDataPartition(p=0.7,y=usaData$CPMnorm, list =F)
trainsetCountry <- dsSplitPerSubCriteria[partCountry,];
testsetCountry <- dsSplitPerSubCriteria[-partCountry,]
lmCountry <- train(CPMnorm ~ AppAdvertisedOrientation + AppOriginCategory + CampaignCostTypeName, data = trainsetCountry, method='lm')
predictCountry <- predict(lmCountry, newdata = testsetCountry)
summary(lmCountry)
lmCountry$finalModel$residuals
}
predictPerCountry <- lmPerCountry()
predict(lm, newdata = testset)
#reagregate per cai, aid having nbDiqsplays > 1000
#dsSplitPerSubCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns, c(criteria, "AppOriginOrientation")),displays>1000)
ggplot(data=dsSplitPerSubCriteria, eval(as.symbol(criteria)), CPMnorm) + #aes(CPM, CPMnorm)) +
geom_point(aes(color=AppOriginID, shape=eval(as.symbol(criteria)), size=displays), alpha = 0.6) +
coord_cartesian(xlim=c(0,4), ylim=c(0,7.5)) +
facet_wrap(~CampaignTargetTypeName)
#+ ggtitle("CampaignTypeName: Full screen interstitial (in-app) / CampaignSubTypeName: [PAID] AdDeals user acquisition campaign [quality users] / PlatformName == Windows [Phone/Tablets/PCs] / AppAdvertisedCategory: Games/ AppOriginCategory: Games / AdTypeName: AdDeals Interstitials Full Screen (Native) / AppAdvertisedOrientation: 1 / AppOriginOrientation: 2 / CampaignCostTypeName: CPI (unique / campaign)")
#+ facet_wrap(~CampaignID)
simpleRegression(dsSplitPerSubCriteria, 'CPMnorm', criteria)
dsSplitPerCriteria <- subset(groupKPIsPerCampPerApp(similarCampaigns),displays>1000)
ggplot(data=dsSplitPerCriteria, aes(shape=eval(as.symbol(criteria)), CPMnorm)) #+ geom_point(aes(WeekDay,CPMnorm), pch=5, cex=5)
+ geom_point(aes(color=AppOriginID, size=displays), alpha = 0.6)
+ geom_smooth(method = "lm")
#cut2(similarCampaigns$CPMnorm, g=5)
# returns the value of predicted CPMnorm with various criteria modalities in column
comparePred <- sapply(split(similarCampaigns, similarCampaigns[criteria]), getAverageCPM)
cor(as.data.frame(comparePred))
var(split(similarCampaigns, similarCampaigns[criteria])[1], split(similarCampaigns, similarCampaigns[criteria])[2])
dataHistpresentation <- function(data){
par(mfrow=c(1,2))
#attach(dsSplitPerSubCriteria)
qplot(dsSplitPerSubCriteria$CPMnorm, ylim=c(0,20), xlim=c(0,25), binwidth=0.1, color=dsSplitPerSubCriteria$CampaignCostTypeName)
qplot(dsSplitPerSubCriteria$CPM, ylim=c(0,30), xlim=c(0,10), binwidth=0.1, color=dsSplitPerSubCriteria$CampaignCostTypeName)
}
isWeb <- list('Web mobile')
#creating subset to split between WebMobile and others #
dsWeb <- subset(ds, ds$CampaignTargetType %in% isWeb)
dsApp <- subset(ds, !(ds$CampaignTargetType %in% isWeb))
#creating subset per campaignCostType#
dsCPI <- subset(dsApp, grepl('CPI',dsApp$CampaignCostType))
boxplot(data=dsCPI, dsCPI$CPMnorm)
dsCPC <- subset(ds, grepl('CPC',ds$CampaignCostType))
boxplot(data=dsCPC, dsCPC$CPMnorm)
dsCPM <- subset(ds, grepl('CPM',ds$CampaignCostType))
dsCPL <- subset(dsWeb, grepl('CPL',dsWeb$CampaignCostType))
#plot CPM along campaignTargetType#
ggplot(data=ds[with(ds,CPMnorm<10),], aes(CampaignTargetTypeName, CPMnorm)) + geom_point(aes(fill=CTR, shape=AppOriginTypeName), alpha=1/2, size=1.5, pch=1) + labs(title='CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000')
# aggregate dipslays ans earnings for every combination#
displaysGlobPerCampPerApp = aggregate(displaysGlob ~ campaignID + appOriginID, FUN = sum, data=ds)
earningsGlobPerCampPerApp = aggregate(earningsGlob ~ campaignID + appOriginID, FUN = sum, data=ds)
calculCPMGlobPerCampPerApp <- function(rawdata){aggregate(earningsGlob ~ campaignID + appOriginID, FUN=function(x){ifelse(x$CostPerBid > 0, (x$CPM / x$CostPerBid), NA)}, data=rawdata)}
cpmsGlob <- calculCPMGlobPerCampPerApp(similarCampaigns)
#CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000#
ggplot(data=ds[with(ds,CPMnorm<10 && ConnectionTypeName != 'Unknown'),], aes(abbreviate(CampaignCostTypeName, 12), CPMnorm)) + geom_point(aes(shape=AppAdvertisedypeName, color=CostPerBid, size=earningsGlob), alpha=1/2, pch=1) +
labs(x='CampaignCostType', title='CPM per (Campaign/AppOrigin) since February having nbDisplays > 3000') + geom_smooth(method='lm') +
facet_grid(ConnectionTypeName~CampaignTargetTypeName) + coord_cartesian(ylim=c(0,5))
# compute kmodes and assign each combinaison to cluster#
kmodes <- kmodes(data=na.omit(subset(ds, select = -c(apporiginID, campaignID, AppAdvertisedID, CTR, CVR))), modes=4, iter.max=20, weighted=F)
ds <- cbind(ds,kmodes$cluster)
names(ds)[length(ds)] <- 'cluster';
split(ds, ds$cluster)[1]
# display CPM per cluster over various dimensions #
qplot(x=cluster, y=CPM, data=ds, color=AppAdvertisedCategory, shape=CampaignTargetTypeName, ylim=c(0,10))
# tracing the plot of displays = f(AppAdvertisedCategory, AppOriginCategory)#
catVScat <-qplot(data=ds, x=abbreviate(AppAdvertisedCategory_, 7), y=AppOriginCategory_, size=installsGlob, color=displaysGlob, xlab='promotedApp');
trainer <- createDataPartition(y=t$CPM, p=0.75, list=F) #displaysGlob#
trainingset <- t[trainer,]
train <- train(CPM~., data=trainingset, method='glm'); # ~ AppOriginType + AppAdvertisedType#
predict(newdata=t[-trainer, ], train)
library('klaR');
qplot(geom='density', x=CPM, data=ds ,xlim=c(0,4), color=CampaignTargetTypeName);
qplot(x=cluster, y=CPM, data=ds, color=AppOriginCategory ,ylim=c(0,20), shape=CampaignTargetTypeName) |
# load libraries
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
# This dataset has been downloaded from
# https://www.kaggle.com/jealousleopard/goodreadsbooks
# A few records had special characters in the title (, and ")
# and were throwing parsing errors.
# The errors have been corrected in the local copy of the CSV file
# read the CSV file
dataset <- read_delim("books.csv", delim = ",")
# This returns the structure of the file which will show 11,127 records
str(dataset)
# Examine the dataset to see if any records have 0 average ratings
dataset %>% filter(average_rating == 0) %>% summarize(n = n())
# 26 records have a zero average rating, we will exclude this from the data set
dataset <- dataset %>% filter(average_rating != 0)
# checking row count to see if we now have 11,101 records
nrow(dataset)
# Now splitting the data set into 3 sections
# Based on various articles researched online,
# a common split for training/testing/validation is 70/15/15
# We will first consider 85% for the training & testing partition set
#and 15% for the validation set
set.seed(1983, sample.kind = "Rounding")
test_index_1 <- createDataPartition(y = dataset$average_rating, times = 1,
p = 0.85, list = FALSE)
partition <- dataset[test_index_1,]
validation <- dataset[-test_index_1,]
# Now splitting partition into a training set and a testing set
test_index_2 <- createDataPartition(y = partition$average_rating, times = 1,
p = 70/85, list = FALSE)
train_set <- partition[test_index_2,]
test_set <- partition[-test_index_2,]
#checking to see if the number of rows is what we expected
nrow(train_set)
nrow(test_set)
nrow(validation)
# checking correlation between number of pages and book rating
cor(train_set$average_rating, train_set$num_pages)
# checking correlation between number of ratings and book rating
cor(train_set$average_rating, train_set$ratings_count)
# checking correlation between number of reviews and book rating
cor(train_set$average_rating, train_set$text_reviews_count)
# adding a new column to calculate review rate (% of people who posted a review)
# out of the people who rated it
temp <- train_set %>%
mutate(review_rate = ifelse(ratings_count == 0, 0,
text_reviews_count / ratings_count))
# checking correlation between review rate and book rating
cor(train_set$average_rating, temp$review_rate)
# adding a new column to calculate length of the book's title
temp <- temp %>% mutate(title_length = nchar(title))
# checking correlation between length of the book's title and book rating
cor(train_set$average_rating, temp$title_length)
# store overall average of book ratings in the training set
overall_avg = mean(train_set$average_rating)
# define function to format any data set so that we can apply it
# to all data sets. Note that none of these transformations are making use
# of the book ratings or data beyond the specific row in the data
format_df = function(df) {
df <- df %>%
# add review rate (number of text ratings / number of ratings)
mutate(review_rate = ifelse(ratings_count == 0, 0,
text_reviews_count / ratings_count)) %>%
# add length of book title
mutate(title_length = nchar(title)) %>%
# add grouping for number of pages
mutate(num_pages_group = round(num_pages/100)) %>%
# add grouping for review rate
mutate(review_rate_group = case_when(
review_rate < 0.15 ~ round(review_rate, 2),
TRUE ~ 0.16)) %>%
# regroup by language. Languages with low book count grouped together.
group_by(language_code) %>%
mutate(language_group = case_when(
language_code == "en-US" ~ "eng",
language_code == "en-GB" ~ "eng",
n() > 10 ~ language_code,
TRUE ~"other")) %>%
ungroup() %>%
# group title length by category
mutate(title_length_group = case_when(
title_length <= 10 ~ "0-10",
title_length <= 20 ~ "11-20",
title_length <= 30 ~ "21-30",
title_length <= 40 ~ "31-40",
title_length <= 50 ~ "41-50",
title_length <= 60 ~ "51-60",
title_length <= 70 ~ "61-70",
title_length <= 80 ~ "71-80",
title_length <= 90 ~ "81-90",
title_length <= 100 ~ "91-100",
TRUE ~ "101+"
))
}
# add data transformations to training set
train_set <- format_df(train_set)
# add data transformations to test set
test_set <- format_df(test_set)
# We will add to the linear prediction model: Review Rate, Number of pages,
# and length of the book's title
fit <- lm(average_rating ~ title_length + review_rate +
num_pages, data = train_set)
y_hat_lm <- predict(fit, test_set)
# calculate RMSE for linear model prediction
sqrt(mean((y_hat_lm - test_set$average_rating)^2))
# # train model using randomForest
set.seed(1983, sample.kind = "Rounding")
train_rf <- randomForest(average_rating ~ title_length + review_rate +
num_pages + authors + language_group,
data = train_set,
ntree = 180,
importance=TRUE)
# # see importance of each variable
varImp(train_rf)
#
# # predict test set using RandomForest
y_hat_rf <- predict(train_rf, test_set)
# # calculate RMSE
sqrt(mean((y_hat_rf - test_set$average_rating)^2))
# calculate biases within the training set
train_set <- train_set %>%
# calculate effect of language
group_by(language_group) %>%
mutate(b_language = mean(average_rating - overall_avg)) %>%
ungroup() %>%
# calculate effect of author, after deducting language
group_by(authors) %>%
mutate(b_authors = mean(average_rating - overall_avg - b_language)) %>%
ungroup %>%
# calculate effect of number of pages, after deducting language
# and author effect
group_by(num_pages_group) %>%
mutate(b_num_pages = mean(average_rating - overall_avg - b_language -
b_authors)) %>%
ungroup %>%
# calculate effect of title length after deducting the other effects
group_by(title_length_group) %>%
mutate(b_title_length = mean(average_rating - overall_avg - b_language -
b_num_pages - b_authors)) %>%
ungroup() %>%
# calculate effect of review rate, after deducting the other effects
group_by(review_rate_group) %>%
mutate(b_review_rate = mean(average_rating - overall_avg - b_language -
b_num_pages - b_authors - b_title_length)) %>%
ungroup()
# extracting language effects from the training set
language_averages <- train_set %>%
group_by(language_group) %>%
summarize(b_language = mean(b_language))
# extracting author effects from the training set
authors_averages <- train_set %>%
group_by(authors) %>%
summarize(b_authors = mean(b_authors))
# extracting title length effects from the training set
title_length_averages <- train_set %>%
group_by(title_length_group) %>%
summarize(b_title_length = mean(b_title_length))
# extracting number of pages effect from the training set
num_pages_averages <- train_set %>%
group_by(num_pages_group) %>%
summarize(b_num_pages = mean(b_num_pages))
# extracting review rate effects from the training set
review_rate_averages <- train_set %>%
group_by(review_rate_group) %>%
summarize(b_review_rate = mean(b_review_rate))
# calculate predicted ratings for the test set
predicted_ratings <- test_set %>%
# integrate language effect as extracted from training set
left_join(language_averages, by='language_group') %>%
# applying a default value for categories not found
mutate(b_language_clean = ifelse(is.na(b_language), 0, b_language)) %>%
# integrate author effect as extracted from training set
left_join(authors_averages, by='authors') %>%
mutate(b_authors_clean = ifelse(is.na(b_authors), 0, b_authors)) %>%
# integrate number of pages effect as extracted from training set
left_join(num_pages_averages, by='num_pages_group') %>%
mutate(b_num_pages_clean = ifelse(is.na(b_num_pages), 0,
b_num_pages)) %>%
# integrate title length effect as extracted from training set
left_join(title_length_averages, by='title_length_group') %>%
mutate(b_title_length_clean = ifelse(is.na(b_title_length), 0,
b_title_length)) %>%
# integrate review rate effect as extracted from training set
left_join(review_rate_averages, by='review_rate_group') %>%
mutate(b_review_rate_clean = ifelse(is.na(b_review_rate), 0,
b_review_rate)) %>%
# calculate prediction
mutate(pred = overall_avg +
b_language_clean +
b_authors_clean +
b_num_pages_clean +
b_title_length_clean +
b_review_rate_clean) %>%
# limit prediction to 0.5 to 5.0 range to avoid going outside the range
mutate(pred_capped = ifelse(pred < 0.5, 0.5, ifelse(pred > 5.0, 5.0, pred)))
# calculate RMSE for test set
sqrt(mean((predicted_ratings$pred_capped - test_set$average_rating)^2))
# number of books based on title length
train_set %>% group_by(title_length_group) %>%
summarize(books = n()) %>%
ggplot(aes(x = title_length_group, y = books)) +
geom_col(fill = "red", alpha=0.5)
# average rating of books based on title length
train_set %>% group_by(title_length_group) %>%
summarize(avg_rating = mean(average_rating)) %>%
ggplot(aes(x = title_length_group, y = avg_rating)) +
geom_col(fill = "red", alpha=0.5)
# number of books based on review rate
train_set %>% group_by(review_rate_group) %>%
summarize(books = n()) %>%
ggplot(aes(x = factor(review_rate_group), y = books)) +
geom_col(fill = "red", alpha=0.5)
# average rating of books based on review rate
train_set %>% group_by(review_rate_group) %>%
summarize(avg_rating = mean(average_rating)) %>%
ggplot(aes(x = review_rate_group, y = avg_rating)) +
# geom_col(fill = "red", alpha=0.5) +
geom_line()
# calculate predicted ratings for the validation set
predicted_ratings_2 <- format_df(validation) %>%
# integrate language effect as extracted from training set
left_join(language_averages, by='language_group') %>%
# applying a default value for categories not found
mutate(b_language_clean = ifelse(is.na(b_language), 0, b_language)) %>%
# integrate author effect as extracted from training set
left_join(authors_averages, by='authors') %>%
mutate(b_authors_clean = ifelse(is.na(b_authors), 0, b_authors)) %>%
# integrate number of pages effect as extracted from training set
left_join(num_pages_averages, by='num_pages_group') %>%
mutate(b_num_pages_clean = ifelse(is.na(b_num_pages), 0,
b_num_pages)) %>%
# integrate title length effect as extracted from training set
left_join(title_length_averages, by='title_length_group') %>%
mutate(b_title_length_clean = ifelse(is.na(b_title_length), 0,
b_title_length)) %>%
# integrate review rate effect as extracted from training set
left_join(review_rate_averages, by='review_rate_group') %>%
mutate(b_review_rate_clean = ifelse(is.na(b_review_rate), 0,
b_review_rate)) %>%
# calculate prediction
mutate(pred = overall_avg +
b_language_clean +
b_authors_clean +
b_num_pages_clean +
b_title_length_clean +
b_review_rate_clean) %>%
# limit prediction to 0.5 to 5.0 range to avoid going outside the range
mutate(pred_capped = ifelse(pred < 0.5, 0.5, ifelse(pred > 5.0, 5.0, pred)))
# calculate RMSE for test set
sqrt(mean((predicted_ratings_2$pred_capped - validation$average_rating)^2))
| /Books.R | no_license | blockshade83/Goodreads_Project | R | false | false | 12,255 | r | # load libraries
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(randomForest)) install.packages("randomForest", repos = "http://cran.us.r-project.org")
# This dataset has been downloaded from
# https://www.kaggle.com/jealousleopard/goodreadsbooks
# A few records had special characters in the title (, and ")
# and were throwing parsing errors.
# The errors have been corrected in the local copy of the CSV file
# read the CSV file
dataset <- read_delim("books.csv", delim = ",")
# This returns the structure of the file which will show 11,127 records
str(dataset)
# Examine the dataset to see if any records have 0 average ratings
dataset %>% filter(average_rating == 0) %>% summarize(n = n())
# 26 records have a zero average rating, we will exclude this from the data set
dataset <- dataset %>% filter(average_rating != 0)
# checking row count to see if we now have 11,101 records
nrow(dataset)
# Now splitting the data set into 3 sections
# Based on various articles researched online,
# a common split for training/testing/validation is 70/15/15
# We will first consider 85% for the training & testing partition set
#and 15% for the validation set
set.seed(1983, sample.kind = "Rounding")
test_index_1 <- createDataPartition(y = dataset$average_rating, times = 1,
p = 0.85, list = FALSE)
partition <- dataset[test_index_1,]
validation <- dataset[-test_index_1,]
# Now splitting partition into a training set and a testing set
test_index_2 <- createDataPartition(y = partition$average_rating, times = 1,
p = 70/85, list = FALSE)
train_set <- partition[test_index_2,]
test_set <- partition[-test_index_2,]
#checking to see if the number of rows is what we expected
nrow(train_set)
nrow(test_set)
nrow(validation)
# checking correlation between number of pages and book rating
cor(train_set$average_rating, train_set$num_pages)
# checking correlation between number of ratings and book rating
cor(train_set$average_rating, train_set$ratings_count)
# checking correlation between number of reviews and book rating
cor(train_set$average_rating, train_set$text_reviews_count)
# adding a new column to calculate review rate (% of people who posted a review)
# out of the people who rated it
temp <- train_set %>%
mutate(review_rate = ifelse(ratings_count == 0, 0,
text_reviews_count / ratings_count))
# checking correlation between review rate and book rating
cor(train_set$average_rating, temp$review_rate)
# adding a new column to calculate length of the book's title
temp <- temp %>% mutate(title_length = nchar(title))
# checking correlation between length of the book's title and book rating
cor(train_set$average_rating, temp$title_length)
# store overall average of book ratings in the training set
overall_avg = mean(train_set$average_rating)
# define function to format any data set so that we can apply it
# to all data sets. Note that none of these transformations are making use
# of the book ratings or data beyond the specific row in the data
format_df = function(df) {
df <- df %>%
# add review rate (number of text ratings / number of ratings)
mutate(review_rate = ifelse(ratings_count == 0, 0,
text_reviews_count / ratings_count)) %>%
# add length of book title
mutate(title_length = nchar(title)) %>%
# add grouping for number of pages
mutate(num_pages_group = round(num_pages/100)) %>%
# add grouping for review rate
mutate(review_rate_group = case_when(
review_rate < 0.15 ~ round(review_rate, 2),
TRUE ~ 0.16)) %>%
# regroup by language. Languages with low book count grouped together.
group_by(language_code) %>%
mutate(language_group = case_when(
language_code == "en-US" ~ "eng",
language_code == "en-GB" ~ "eng",
n() > 10 ~ language_code,
TRUE ~"other")) %>%
ungroup() %>%
# group title length by category
mutate(title_length_group = case_when(
title_length <= 10 ~ "0-10",
title_length <= 20 ~ "11-20",
title_length <= 30 ~ "21-30",
title_length <= 40 ~ "31-40",
title_length <= 50 ~ "41-50",
title_length <= 60 ~ "51-60",
title_length <= 70 ~ "61-70",
title_length <= 80 ~ "71-80",
title_length <= 90 ~ "81-90",
title_length <= 100 ~ "91-100",
TRUE ~ "101+"
))
}
# add data transformations to training set
train_set <- format_df(train_set)
# add data transformations to test set
test_set <- format_df(test_set)
# We will add to the linear prediction model: Review Rate, Number of pages,
# and length of the book's title
fit <- lm(average_rating ~ title_length + review_rate +
num_pages, data = train_set)
y_hat_lm <- predict(fit, test_set)
# calculate RMSE for linear model prediction
sqrt(mean((y_hat_lm - test_set$average_rating)^2))
# # train model using randomForest
set.seed(1983, sample.kind = "Rounding")
train_rf <- randomForest(average_rating ~ title_length + review_rate +
num_pages + authors + language_group,
data = train_set,
ntree = 180,
importance=TRUE)
# # see importance of each variable
varImp(train_rf)
#
# # predict test set using RandomForest
y_hat_rf <- predict(train_rf, test_set)
# # calculate RMSE
sqrt(mean((y_hat_rf - test_set$average_rating)^2))
# calculate biases within the training set
train_set <- train_set %>%
# calculate effect of language
group_by(language_group) %>%
mutate(b_language = mean(average_rating - overall_avg)) %>%
ungroup() %>%
# calculate effect of author, after deducting language
group_by(authors) %>%
mutate(b_authors = mean(average_rating - overall_avg - b_language)) %>%
ungroup %>%
# calculate effect of number of pages, after deducting language
# and author effect
group_by(num_pages_group) %>%
mutate(b_num_pages = mean(average_rating - overall_avg - b_language -
b_authors)) %>%
ungroup %>%
# calculate effect of title length after deducting the other effects
group_by(title_length_group) %>%
mutate(b_title_length = mean(average_rating - overall_avg - b_language -
b_num_pages - b_authors)) %>%
ungroup() %>%
# calculate effect of review rate, after deducting the other effects
group_by(review_rate_group) %>%
mutate(b_review_rate = mean(average_rating - overall_avg - b_language -
b_num_pages - b_authors - b_title_length)) %>%
ungroup()
# extracting language effects from the training set
language_averages <- train_set %>%
group_by(language_group) %>%
summarize(b_language = mean(b_language))
# extracting author effects from the training set
authors_averages <- train_set %>%
group_by(authors) %>%
summarize(b_authors = mean(b_authors))
# extracting title length effects from the training set
title_length_averages <- train_set %>%
group_by(title_length_group) %>%
summarize(b_title_length = mean(b_title_length))
# extracting number of pages effect from the training set
num_pages_averages <- train_set %>%
group_by(num_pages_group) %>%
summarize(b_num_pages = mean(b_num_pages))
# extracting review rate effects from the training set
review_rate_averages <- train_set %>%
group_by(review_rate_group) %>%
summarize(b_review_rate = mean(b_review_rate))
# calculate predicted ratings for the test set
predicted_ratings <- test_set %>%
# integrate language effect as extracted from training set
left_join(language_averages, by='language_group') %>%
# applying a default value for categories not found
mutate(b_language_clean = ifelse(is.na(b_language), 0, b_language)) %>%
# integrate author effect as extracted from training set
left_join(authors_averages, by='authors') %>%
mutate(b_authors_clean = ifelse(is.na(b_authors), 0, b_authors)) %>%
# integrate number of pages effect as extracted from training set
left_join(num_pages_averages, by='num_pages_group') %>%
mutate(b_num_pages_clean = ifelse(is.na(b_num_pages), 0,
b_num_pages)) %>%
# integrate title length effect as extracted from training set
left_join(title_length_averages, by='title_length_group') %>%
mutate(b_title_length_clean = ifelse(is.na(b_title_length), 0,
b_title_length)) %>%
# integrate review rate effect as extracted from training set
left_join(review_rate_averages, by='review_rate_group') %>%
mutate(b_review_rate_clean = ifelse(is.na(b_review_rate), 0,
b_review_rate)) %>%
# calculate prediction
mutate(pred = overall_avg +
b_language_clean +
b_authors_clean +
b_num_pages_clean +
b_title_length_clean +
b_review_rate_clean) %>%
# limit prediction to 0.5 to 5.0 range to avoid going outside the range
mutate(pred_capped = ifelse(pred < 0.5, 0.5, ifelse(pred > 5.0, 5.0, pred)))
# calculate RMSE for test set
sqrt(mean((predicted_ratings$pred_capped - test_set$average_rating)^2))
# number of books based on title length
train_set %>% group_by(title_length_group) %>%
summarize(books = n()) %>%
ggplot(aes(x = title_length_group, y = books)) +
geom_col(fill = "red", alpha=0.5)
# average rating of books based on title length
train_set %>% group_by(title_length_group) %>%
summarize(avg_rating = mean(average_rating)) %>%
ggplot(aes(x = title_length_group, y = avg_rating)) +
geom_col(fill = "red", alpha=0.5)
# number of books based on review rate
train_set %>% group_by(review_rate_group) %>%
summarize(books = n()) %>%
ggplot(aes(x = factor(review_rate_group), y = books)) +
geom_col(fill = "red", alpha=0.5)
# average rating of books based on review rate
train_set %>% group_by(review_rate_group) %>%
summarize(avg_rating = mean(average_rating)) %>%
ggplot(aes(x = review_rate_group, y = avg_rating)) +
# geom_col(fill = "red", alpha=0.5) +
geom_line()
# calculate predicted ratings for the validation set
predicted_ratings_2 <- format_df(validation) %>%
# integrate language effect as extracted from training set
left_join(language_averages, by='language_group') %>%
# applying a default value for categories not found
mutate(b_language_clean = ifelse(is.na(b_language), 0, b_language)) %>%
# integrate author effect as extracted from training set
left_join(authors_averages, by='authors') %>%
mutate(b_authors_clean = ifelse(is.na(b_authors), 0, b_authors)) %>%
# integrate number of pages effect as extracted from training set
left_join(num_pages_averages, by='num_pages_group') %>%
mutate(b_num_pages_clean = ifelse(is.na(b_num_pages), 0,
b_num_pages)) %>%
# integrate title length effect as extracted from training set
left_join(title_length_averages, by='title_length_group') %>%
mutate(b_title_length_clean = ifelse(is.na(b_title_length), 0,
b_title_length)) %>%
# integrate review rate effect as extracted from training set
left_join(review_rate_averages, by='review_rate_group') %>%
mutate(b_review_rate_clean = ifelse(is.na(b_review_rate), 0,
b_review_rate)) %>%
# calculate prediction
mutate(pred = overall_avg +
b_language_clean +
b_authors_clean +
b_num_pages_clean +
b_title_length_clean +
b_review_rate_clean) %>%
# limit prediction to 0.5 to 5.0 range to avoid going outside the range
mutate(pred_capped = ifelse(pred < 0.5, 0.5, ifelse(pred > 5.0, 5.0, pred)))
# calculate RMSE for test set
sqrt(mean((predicted_ratings_2$pred_capped - validation$average_rating)^2))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jagsRun.R
\name{jagsRun}
\alias{jagsRun}
\title{Run JAGS}
\usage{
jagsRun(jagsData, jagsModel, jagsInits, params, jagsID, jagsDsc, db_hash,
n_chain = 3, n_adapt = 5000, n_burn, n_draw, n_thin = 1,
DEBUG = FALSE, EXTRA = FALSE, RANDOM = FALSE, Rhat_max = 1.05,
n_rburn = 0, n_max = NULL, params_extra = params,
params_report = params, ppc = NULL, obj_out = FALSE,
save_data = FALSE, report = TRUE)
}
\arguments{
\item{jagsData}{List containing data to feed to JAGS}
\item{jagsModel}{JAGS model file}
\item{jagsInits}{Initial values for JAGS model. Should be a list of lists (number of embedded lists should equal the number of chains being run in the model). NOTE: each chain should specify a different starting value for a particular parameter and/or use a different seed/RNG to avoid identical chains.}
\item{params}{Character string or vector of character strings specifying which parameters to track}
\item{jagsID}{OPTIONAL. Character string with name of jags run (e.g., 'Run_1')}
\item{jagsDsc}{OPTIONAL. Character string with description of the jags run (e.g., 'First model run')}
\item{db_hash}{OPTIONAL. Character string with description of data version which will be printed in the output file. Could be latest git commit hash.}
\item{n_chain}{Numeric specifying number of chains to be run}
\item{n_adapt}{Numeric specifying how many iterations to use for adaptation}
\item{n_burn}{Numeric specifying how any iterations to use for burn-in}
\item{n_draw}{Numeric specifying how many iterations to use for draw (iterations to be kept beyond adaptation and burn-in)}
\item{n_thin}{Numeric specifying thinning rate}
\item{DEBUG}{Logical used to specify whether DEBUG mode should be used. If \code{TRUE}, \code{jags.model} is called which begins adaptation with adapt = 2. This ensures that the likelihood can be calclated and the model run (priors and inits are appropriate).}
\item{EXTRA}{Logical used to specify whether extra iterations should be run if convergence is not met. If \code{TRUE}, another set of iterations (\code{n_draw}) is drawn (up to \code{n_max}) until convergence is reached (specified by \code{Rhat_max})}
\item{RANDOM}{Logical specifying whether to use script to generate random inits. If \code{TRUE}, \code{jagsInits} should be a function that generates random initial values.}
\item{Rhat_max}{Numeric specifying the maximum Rhat value allowed when \code{EXTRA = TRUE}}
\item{n_rburn}{Numeric specifying how many samples to use for burn in if \code{EXTRA = TRUE} and convergence (defined by \code{Rhat_max}) has not been reached.}
\item{n_max}{Numeric specifying the maximum number of samples to be drawn when \code{EXTRA = TRUE}. The total number of iterations will not exceed this value (\code{n_burn}, \code{n_draw}, and \code{n_rburn} values are included in this total). If left blank, \code{n_max} is set to \code{n_burn} + (\code{n_rburn} + \code{n_draw})*2.}
\item{params_extra}{Character string or vector of character strings specifying which parameters to evaluate convergence for when \code{EXTRA = TRUE}. Must be a subset of \code{params}.}
\item{params_report}{Character string or vector of character strings specifying which parameters to report. Must be a subset of \code{params}.}
\item{ppc}{Character string or vector of character strings specifying the name of elements used for the posteriod predictive check (PPC). If specified, the summary information for these elements will be output in the report.}
\item{obj_out}{Logical specifying whether MCMC.list object should be output}
\item{save_data}{Logical specifying whether input data to function should be saved as a .rds object}
\item{report}{Logical specifying whether to generate directory with report and .rds object - if FALSE, MCMC.list object is output}
}
\description{
Run JAGS in parallel and output output of interest. Number of cores used equals number of chains specified. Be sure that your machine has an adequate number of (virtual) cores available to run the model. Function creates a directory with \code{jagsID} name, saves .rds file with model output, and produces output summary in text file format.
}
\section{Notes}{
jagsData should be formatted as such: XXXXX. jagsInits should be formatted as such: XXXXX. Jags params should be formatted as such: XXXXX.
}
| /man/jagsRun.Rd | no_license | caseyyoungflesh/jagsRun | R | false | true | 4,391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jagsRun.R
\name{jagsRun}
\alias{jagsRun}
\title{Run JAGS}
\usage{
jagsRun(jagsData, jagsModel, jagsInits, params, jagsID, jagsDsc, db_hash,
n_chain = 3, n_adapt = 5000, n_burn, n_draw, n_thin = 1,
DEBUG = FALSE, EXTRA = FALSE, RANDOM = FALSE, Rhat_max = 1.05,
n_rburn = 0, n_max = NULL, params_extra = params,
params_report = params, ppc = NULL, obj_out = FALSE,
save_data = FALSE, report = TRUE)
}
\arguments{
\item{jagsData}{List containing data to feed to JAGS}
\item{jagsModel}{JAGS model file}
\item{jagsInits}{Initial values for JAGS model. Should be a list of lists (number of embedded lists should equal the number of chains being run in the model). NOTE: each chain should specify a different starting value for a particular parameter and/or use a different seed/RNG to avoid identical chains.}
\item{params}{Character string or vector of character strings specifying which parameters to track}
\item{jagsID}{OPTIONAL. Character string with name of jags run (e.g., 'Run_1')}
\item{jagsDsc}{OPTIONAL. Character string with description of the jags run (e.g., 'First model run')}
\item{db_hash}{OPTIONAL. Character string with description of data version which will be printed in the output file. Could be latest git commit hash.}
\item{n_chain}{Numeric specifying number of chains to be run}
\item{n_adapt}{Numeric specifying how many iterations to use for adaptation}
\item{n_burn}{Numeric specifying how any iterations to use for burn-in}
\item{n_draw}{Numeric specifying how many iterations to use for draw (iterations to be kept beyond adaptation and burn-in)}
\item{n_thin}{Numeric specifying thinning rate}
\item{DEBUG}{Logical used to specify whether DEBUG mode should be used. If \code{TRUE}, \code{jags.model} is called which begins adaptation with adapt = 2. This ensures that the likelihood can be calclated and the model run (priors and inits are appropriate).}
\item{EXTRA}{Logical used to specify whether extra iterations should be run if convergence is not met. If \code{TRUE}, another set of iterations (\code{n_draw}) is drawn (up to \code{n_max}) until convergence is reached (specified by \code{Rhat_max})}
\item{RANDOM}{Logical specifying whether to use script to generate random inits. If \code{TRUE}, \code{jagsInits} should be a function that generates random initial values.}
\item{Rhat_max}{Numeric specifying the maximum Rhat value allowed when \code{EXTRA = TRUE}}
\item{n_rburn}{Numeric specifying how many samples to use for burn in if \code{EXTRA = TRUE} and convergence (defined by \code{Rhat_max}) has not been reached.}
\item{n_max}{Numeric specifying the maximum number of samples to be drawn when \code{EXTRA = TRUE}. The total number of iterations will not exceed this value (\code{n_burn}, \code{n_draw}, and \code{n_rburn} values are included in this total). If left blank, \code{n_max} is set to \code{n_burn} + (\code{n_rburn} + \code{n_draw})*2.}
\item{params_extra}{Character string or vector of character strings specifying which parameters to evaluate convergence for when \code{EXTRA = TRUE}. Must be a subset of \code{params}.}
\item{params_report}{Character string or vector of character strings specifying which parameters to report. Must be a subset of \code{params}.}
\item{ppc}{Character string or vector of character strings specifying the name of elements used for the posteriod predictive check (PPC). If specified, the summary information for these elements will be output in the report.}
\item{obj_out}{Logical specifying whether MCMC.list object should be output}
\item{save_data}{Logical specifying whether input data to function should be saved as a .rds object}
\item{report}{Logical specifying whether to generate directory with report and .rds object - if FALSE, MCMC.list object is output}
}
\description{
Run JAGS in parallel and output output of interest. Number of cores used equals number of chains specified. Be sure that your machine has an adequate number of (virtual) cores available to run the model. Function creates a directory with \code{jagsID} name, saves .rds file with model output, and produces output summary in text file format.
}
\section{Notes}{
jagsData should be formatted as such: XXXXX. jagsInits should be formatted as such: XXXXX. Jags params should be formatted as such: XXXXX.
}
|
#' @title A Subset of the Ames Data Set with Imputed Values
#' @description A randomly selected subset of the Ames data set.
#' The dataset has had its values imputed and any remaining NA values removed.
#' @name AmesImp
#' @docType data
#' @usage AmesImp
#' @format A data frame with 2047 observations on 74 variables.
#' @keywords datasets
#' @source \url{https://ww2.amstat.org/publications/jse/v19n3/Decock/DataDocumentation.txt}
#' @examples
#' str(AmesImp)
#' plot(AmesImp$Neighborhood, y = AmesImp$SalePrice)
NULL
| /R/AmesImp.R | no_license | pikos90/tree.bins | R | false | false | 521 | r | #' @title A Subset of the Ames Data Set with Imputed Values
#' @description A randomly selected subset of the Ames data set.
#' The dataset has had its values imputed and any remaining NA values removed.
#' @name AmesImp
#' @docType data
#' @usage AmesImp
#' @format A data frame with 2047 observations on 74 variables.
#' @keywords datasets
#' @source \url{https://ww2.amstat.org/publications/jse/v19n3/Decock/DataDocumentation.txt}
#' @examples
#' str(AmesImp)
#' plot(AmesImp$Neighborhood, y = AmesImp$SalePrice)
NULL
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_functions.R
\name{get_col}
\alias{get_col}
\title{Select columns by indices}
\usage{
get_col(.m, cols)
}
\arguments{
\item{.m}{A 2d matrix}
\item{cols}{Numeric or character vector or empty (missing)}
}
\value{
Subset of columns of matrix.
}
\description{
Select columns by indices
}
\examples{
get_col(matrix(1:6, nrow = 2), 2)
get_col(matrix(1:6, nrow = 2), c(2,3))
get_col(matrix(1:6, nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("x", "y", "z"))), "z")
}
| /man/get_col.Rd | permissive | user01/uvadsi | R | false | true | 554 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matrix_functions.R
\name{get_col}
\alias{get_col}
\title{Select columns by indices}
\usage{
get_col(.m, cols)
}
\arguments{
\item{.m}{A 2d matrix}
\item{cols}{Numeric or character vector or empty (missing)}
}
\value{
Subset of columns of matrix.
}
\description{
Select columns by indices
}
\examples{
get_col(matrix(1:6, nrow = 2), 2)
get_col(matrix(1:6, nrow = 2), c(2,3))
get_col(matrix(1:6, nrow = 2, byrow = TRUE, dimnames = list(c("a", "b"), c("x", "y", "z"))), "z")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/googleLoginModule.R
\name{googleLoginModule}
\alias{googleLoginModule}
\title{Generate a google login environment, with preprocessing function to tailor developer app}
\usage{
googleLoginModule()
}
\value{
}
\description{
Generate a google login environment, with preprocessing function to tailor developer app
}
\examples{
none
}
| /man/googleLoginModule.Rd | permissive | tpemartin/webtemplate | R | false | true | 410 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/googleLoginModule.R
\name{googleLoginModule}
\alias{googleLoginModule}
\title{Generate a google login environment, with preprocessing function to tailor developer app}
\usage{
googleLoginModule()
}
\value{
}
\description{
Generate a google login environment, with preprocessing function to tailor developer app
}
\examples{
none
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_RegAttrPages.R
\name{FontFaceElementStemvAttribute}
\alias{FontFaceElementStemvAttribute}
\title{stemv}
\description{
Sets the vertical stem width.
}
\section{Available Attribute Values}{
The value is defined as follows:
\describe{
\item{<number>}{Specifies dominant vertical stem width of the \emph{glyphs} via \emph{units per em}.}
}
}
\section{Animatable}{
Not Animatable
}
\section{Used by the Elements}{
\describe{
\item{\emph{Uncategorized Elements}}{\code{\link[=font-face]{font.face}}}
}
}
\keyword{internal}
| /man/FontFaceElementStemvAttribute.Rd | permissive | mslegrand/svgR | R | false | true | 605 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/doc_RegAttrPages.R
\name{FontFaceElementStemvAttribute}
\alias{FontFaceElementStemvAttribute}
\title{stemv}
\description{
Sets the vertical stem width.
}
\section{Available Attribute Values}{
The value is defined as follows:
\describe{
\item{<number>}{Specifies dominant vertical stem width of the \emph{glyphs} via \emph{units per em}.}
}
}
\section{Animatable}{
Not Animatable
}
\section{Used by the Elements}{
\describe{
\item{\emph{Uncategorized Elements}}{\code{\link[=font-face]{font.face}}}
}
}
\keyword{internal}
|
# ABCD Activities questionnaire data
library(dtplyr)
library(dplyr)
library(tidyr)
library(data.table)
# load data ---------------------------------------------------------------
activitiesFile <- "/Users/jri/Documents/ Research/Projects/simphony/ABCD/Sports and Activities NDA17 Release data.csv"
subjFields <- c("id_redcap","site_name","asnt_timestamp","tlfb_age_calc_inmonths_l","demo_gender_id_v2_l","hr_score","select_language___1")
artIdx <- 5
arts <- c("music", "dance", "art", "drama", "crafts")
theArt = arts[artIdx]
artFieldsBase <- c("_school", "_outside", "_private", "_self", "_nyr", "_nmonth", "_perwk", "_tspent", "_p12")
artFields <- paste0(theArt, artFieldsBase)
data = read.csv(activitiesFile)
names(data) = sub("sai_p_","",names(data)) #remove the prefix
mdata <- data[,c(subjFields, artFields)]
#make names generic for rest of code
names(mdata) = sub(paste0(theArt,"_"), "", names(mdata)) #remove art-specific prefix
setnames(mdata,old=c("school","outside"),new=c("Group_school","Group_outside"))
mdata$anyArt = mdata$Group_school | mdata$Group_outside | mdata$private | mdata$self
#mdata["anyArt"][is.na(mdata["anyArt"])] <- FALSE
#convert perwk and tspent to real numbers
# days per week
mdata$perwk[mdata$perwk==8] <- 0.5 #Once every 2 weeks
mdata$perwk[mdata$perwk==9] <- 0.25 #One day a month (once every 4 weeks)
mdata$perwk[mdata$perwk==10] <- 0.1 #less than one day a month (arbitrary, small number akin to once every 2.5 months)
mdata$perwk[mdata$perwk==0] <- NA
# minutes per session - convert from code to actual minutes
tspent_minutes = c(NA, 15,30,45, 60, 90, 120, 150, 180, 240) #conversion from answercode+1 to minutes (from SAIQP_ABCD_Instrument.pdf)
tspent_ok = !is.na(mdata$tspent)
mdata$tspent[tspent_ok] = tspent_minutes[mdata$tspent[tspent_ok]+1]
#save for aggregation-------------------------------------------------------
shortData <- mdata[,c("id_redcap", "anyArt", "p12")]
setnames(shortData, old=c("anyArt", "p12"), new=c(theArt, paste0(theArt,"_p12")))
if (artIdx==1) {
allData <- shortData
} else {
allData = full_join(allData, shortData)
}
# Descriptive analysis-------------------------------------------------------
print(paste("======== ", theArt, " ========"))
#%music overall
mdata3 <- mdata %>% mutate(percArt = 100 * sum(anyArt,na.rm=TRUE)/n(), percArtP12= 100 * sum(p12,na.rm=TRUE)/n(), artN = sum(anyArt,na.rm=TRUE), siteN = n())
print('=== participation ===')
print(paste("N=",mdata3[1,"siteN"], ", artN=", mdata3[1,"artN"] , ", percent=", mdata3[1,"percArt"], ", active=", mdata3[1,"percArtP12"]))
# music in past 12 months (assumes music at some point in life--question only asked if they indiate some music)
table(mdata[,"p12"],exclude = NULL)
# 0 1 <NA>
# 255 1656 2613
# implies 1656+255 = 1911 have some music ---42% of total 4524
# relationships between four types of music activity
table(mdata[,c("Group_school","Group_outside")])
table(mdata[,c("private","self")])
table(mdata[,c("Group_school","Group_outside","private","self")])
cor(mdata[,c("Group_school","Group_outside","private","self")], use = "complete.obs")
#mean practice amounts
onlymdata <- subset(mdata,anyArt==TRUE)
print('=== mean practice amounts ===')
print(summary(onlymdata[,c("nyr","nmonth","perwk","tspent")]))
#my.f = function(x) c(mean = mean(x,na.rm=TRUE), median = median(x,na.rm=TRUE))
#onlymdata[, sapply(.SD, my.f), .SDcols = c("nyr","nmonth","perwk","tspent"), by = site_name]
#music % by region
mdata2 <- mdata %>% group_by(site_name) %>% mutate(percArt = 100 * sum(anyArt,na.rm=TRUE)/n(), artN = sum(anyArt,na.rm=TRUE), siteN = n())
print('=== participation by region ===')
print(arrange(aggregate(cbind(siteN,artN, percArt) ~ site_name, data=mdata2, FUN=mean), desc(percArt)), row.names=FALSE)
# summary of demographics
mdata$demo_gender_id_v2_l = factor(mdata$demo_gender_id_v2_l)
print(summary(mdata[,c("tlfb_age_calc_inmonths_l", "demo_gender_id_v2_l")]))
#annalyze aggregation
if (FALSE) {
allData$anyArt = allData$music | allData$dance | allData$art | allData$drama | allData$crafts
allData$anyArt_p12 = allData$music_p12 | allData$dance_p12 | allData$art_p12 | allData$drama_p12 | allData$crafts_p12
allData$nArt = rowSums(allData[,arts], na.rm=TRUE)
allData$nArt_p12 = rowSums(allData[,paste0(arts,"_p12")], na.rm=TRUE)
print(table(allData[,"nArt"],exclude = NULL))
print(table(allData[,"nArt_p12"],exclude = NULL))
print(paste("N art=",sum(allData$anyArt, na.rm=TRUE), ", N art p12=", sum(allData$anyArt_p12, na.rm=TRUE)))
}
| /AnalyzeActivities_Arts_NDA17.R | no_license | jiversen/ABCD-saiq-music | R | false | false | 4,536 | r | # ABCD Activities questionnaire data
library(dtplyr)
library(dplyr)
library(tidyr)
library(data.table)
# load data ---------------------------------------------------------------
activitiesFile <- "/Users/jri/Documents/ Research/Projects/simphony/ABCD/Sports and Activities NDA17 Release data.csv"
subjFields <- c("id_redcap","site_name","asnt_timestamp","tlfb_age_calc_inmonths_l","demo_gender_id_v2_l","hr_score","select_language___1")
artIdx <- 5
arts <- c("music", "dance", "art", "drama", "crafts")
theArt = arts[artIdx]
artFieldsBase <- c("_school", "_outside", "_private", "_self", "_nyr", "_nmonth", "_perwk", "_tspent", "_p12")
artFields <- paste0(theArt, artFieldsBase)
data = read.csv(activitiesFile)
names(data) = sub("sai_p_","",names(data)) #remove the prefix
mdata <- data[,c(subjFields, artFields)]
#make names generic for rest of code
names(mdata) = sub(paste0(theArt,"_"), "", names(mdata)) #remove art-specific prefix
setnames(mdata,old=c("school","outside"),new=c("Group_school","Group_outside"))
mdata$anyArt = mdata$Group_school | mdata$Group_outside | mdata$private | mdata$self
#mdata["anyArt"][is.na(mdata["anyArt"])] <- FALSE
#convert perwk and tspent to real numbers
# days per week
mdata$perwk[mdata$perwk==8] <- 0.5 #Once every 2 weeks
mdata$perwk[mdata$perwk==9] <- 0.25 #One day a month (once every 4 weeks)
mdata$perwk[mdata$perwk==10] <- 0.1 #less than one day a month (arbitrary, small number akin to once every 2.5 months)
mdata$perwk[mdata$perwk==0] <- NA
# minutes per session - convert from code to actual minutes
tspent_minutes = c(NA, 15,30,45, 60, 90, 120, 150, 180, 240) #conversion from answercode+1 to minutes (from SAIQP_ABCD_Instrument.pdf)
tspent_ok = !is.na(mdata$tspent)
mdata$tspent[tspent_ok] = tspent_minutes[mdata$tspent[tspent_ok]+1]
#save for aggregation-------------------------------------------------------
shortData <- mdata[,c("id_redcap", "anyArt", "p12")]
setnames(shortData, old=c("anyArt", "p12"), new=c(theArt, paste0(theArt,"_p12")))
if (artIdx==1) {
allData <- shortData
} else {
allData = full_join(allData, shortData)
}
# Descriptive analysis-------------------------------------------------------
print(paste("======== ", theArt, " ========"))
#%music overall
mdata3 <- mdata %>% mutate(percArt = 100 * sum(anyArt,na.rm=TRUE)/n(), percArtP12= 100 * sum(p12,na.rm=TRUE)/n(), artN = sum(anyArt,na.rm=TRUE), siteN = n())
print('=== participation ===')
print(paste("N=",mdata3[1,"siteN"], ", artN=", mdata3[1,"artN"] , ", percent=", mdata3[1,"percArt"], ", active=", mdata3[1,"percArtP12"]))
# music in past 12 months (assumes music at some point in life--question only asked if they indiate some music)
table(mdata[,"p12"],exclude = NULL)
# 0 1 <NA>
# 255 1656 2613
# implies 1656+255 = 1911 have some music ---42% of total 4524
# relationships between four types of music activity
table(mdata[,c("Group_school","Group_outside")])
table(mdata[,c("private","self")])
table(mdata[,c("Group_school","Group_outside","private","self")])
cor(mdata[,c("Group_school","Group_outside","private","self")], use = "complete.obs")
#mean practice amounts
onlymdata <- subset(mdata,anyArt==TRUE)
print('=== mean practice amounts ===')
print(summary(onlymdata[,c("nyr","nmonth","perwk","tspent")]))
#my.f = function(x) c(mean = mean(x,na.rm=TRUE), median = median(x,na.rm=TRUE))
#onlymdata[, sapply(.SD, my.f), .SDcols = c("nyr","nmonth","perwk","tspent"), by = site_name]
#music % by region
mdata2 <- mdata %>% group_by(site_name) %>% mutate(percArt = 100 * sum(anyArt,na.rm=TRUE)/n(), artN = sum(anyArt,na.rm=TRUE), siteN = n())
print('=== participation by region ===')
print(arrange(aggregate(cbind(siteN,artN, percArt) ~ site_name, data=mdata2, FUN=mean), desc(percArt)), row.names=FALSE)
# summary of demographics
mdata$demo_gender_id_v2_l = factor(mdata$demo_gender_id_v2_l)
print(summary(mdata[,c("tlfb_age_calc_inmonths_l", "demo_gender_id_v2_l")]))
#annalyze aggregation
if (FALSE) {
allData$anyArt = allData$music | allData$dance | allData$art | allData$drama | allData$crafts
allData$anyArt_p12 = allData$music_p12 | allData$dance_p12 | allData$art_p12 | allData$drama_p12 | allData$crafts_p12
allData$nArt = rowSums(allData[,arts], na.rm=TRUE)
allData$nArt_p12 = rowSums(allData[,paste0(arts,"_p12")], na.rm=TRUE)
print(table(allData[,"nArt"],exclude = NULL))
print(table(allData[,"nArt_p12"],exclude = NULL))
print(paste("N art=",sum(allData$anyArt, na.rm=TRUE), ", N art p12=", sum(allData$anyArt_p12, na.rm=TRUE)))
}
|
# regression bern-gamma for vector
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
require(rstan)
require('qmap')
set.seed(123)
n <- 100
prob <- 0.8 # non-zero probability
shape <- 0.6
rate <- 2.0
scale <- 1/rate
y <- rberngamma(n, prob, scale=scale, shape=shape)
dataList = list(
y = y,
N = length(y)
)
model_string = "
data {
int<lower=0> N;
vector<lower=0>[N] y; //
}
parameters {
real<lower=0> shape; // shape
real<lower=0> rate; // rate
real<lower=0, upper=1> theta; // non-zero probability
}
model {
shape ~ cauchy(0, 2.5); # half-cauchy
rate ~ cauchy(0, 2.5); # half-cauchy
theta ~ cauchy(0, 2.5); # half-cauchy
for (n in 1:N) {
(y[n] == 0) ~ bernoulli(1-theta);
if (y[n] > 0)
y[n] ~ gamma(shape, rate); //
}
}
"
stanDso <- stan_model( model_code = model_string)
nChains = 4
iterSteps = 5000
burnInSteps = 500
# Get MC sample of posterior:
stanFit <- sampling( object=stanDso ,
data = dataList ,
#pars = parameters , # optional
chains = nChains ,
iter = iterSteps ,
warmup = burnInSteps ,
#init = initsList , # optional
thin = 1 )
sampled <- extract(stanFit) # list
paramNames <- names(sampled)
paramNames <- paramNames[-length(paramNames)]
print(stanFit, digits_summary=3, pars=paramNames, probs=c(0.025, 0.5, 0.975))
traceplot(stanFit, pars=paramNames)
# try with truncated gamma
y1 <- y[y<1.0]
dataList = list(
y = y1,
N = length(y1)
)
model_string_tr = "
data {
int<lower=0> N;
vector<lower=0, upper=1>[N] y; //
}
parameters {
real<lower=0> shape; // shape
real<lower=0> rate; // rate
real<lower=0, upper=1> theta; // non-zero probability
}
model {
shape ~ cauchy(0, 2.5); # half-cauchy
rate ~ cauchy(0, 2.5); # half-cauchy
theta ~ cauchy(0, 2.5); # half-cauchy
for (n in 1:N) {
(y[n] == 0) ~ bernoulli(1-theta);
if (y[n] > 0)
y[n] ~ gamma(shape, rate) T[,1]; //
}
}
"
stanDso <- stan_model( model_code = model_string_tr)
nChains = 4
iterSteps = 5000
burnInSteps = 500
# Get MC sample of posterior:
stanFit <- sampling( object=stanDso ,
data = dataList ,
#pars = parameters , # optional
chains = nChains ,
iter = iterSteps ,
warmup = burnInSteps ,
#init = initsList , # optional
thin = 1 )
print(stanFit, digits_summary=3, pars=paramNames, probs=c(0.025, 0.5, 0.975))
traceplot(stanFit, pars=paramNames)
| /code/berngamma_y.R | no_license | dynaryu/fatality | R | false | false | 2,768 | r | # regression bern-gamma for vector
graphics.off() # This closes all of R's graphics windows.
rm(list=ls()) # Careful! This clears all of R's memory!
require(rstan)
require('qmap')
set.seed(123)
n <- 100
prob <- 0.8 # non-zero probability
shape <- 0.6
rate <- 2.0
scale <- 1/rate
y <- rberngamma(n, prob, scale=scale, shape=shape)
dataList = list(
y = y,
N = length(y)
)
model_string = "
data {
int<lower=0> N;
vector<lower=0>[N] y; //
}
parameters {
real<lower=0> shape; // shape
real<lower=0> rate; // rate
real<lower=0, upper=1> theta; // non-zero probability
}
model {
shape ~ cauchy(0, 2.5); # half-cauchy
rate ~ cauchy(0, 2.5); # half-cauchy
theta ~ cauchy(0, 2.5); # half-cauchy
for (n in 1:N) {
(y[n] == 0) ~ bernoulli(1-theta);
if (y[n] > 0)
y[n] ~ gamma(shape, rate); //
}
}
"
stanDso <- stan_model( model_code = model_string)
nChains = 4
iterSteps = 5000
burnInSteps = 500
# Get MC sample of posterior:
stanFit <- sampling( object=stanDso ,
data = dataList ,
#pars = parameters , # optional
chains = nChains ,
iter = iterSteps ,
warmup = burnInSteps ,
#init = initsList , # optional
thin = 1 )
sampled <- extract(stanFit) # list
paramNames <- names(sampled)
paramNames <- paramNames[-length(paramNames)]
print(stanFit, digits_summary=3, pars=paramNames, probs=c(0.025, 0.5, 0.975))
traceplot(stanFit, pars=paramNames)
# try with truncated gamma
y1 <- y[y<1.0]
dataList = list(
y = y1,
N = length(y1)
)
model_string_tr = "
data {
int<lower=0> N;
vector<lower=0, upper=1>[N] y; //
}
parameters {
real<lower=0> shape; // shape
real<lower=0> rate; // rate
real<lower=0, upper=1> theta; // non-zero probability
}
model {
shape ~ cauchy(0, 2.5); # half-cauchy
rate ~ cauchy(0, 2.5); # half-cauchy
theta ~ cauchy(0, 2.5); # half-cauchy
for (n in 1:N) {
(y[n] == 0) ~ bernoulli(1-theta);
if (y[n] > 0)
y[n] ~ gamma(shape, rate) T[,1]; //
}
}
"
stanDso <- stan_model( model_code = model_string_tr)
nChains = 4
iterSteps = 5000
burnInSteps = 500
# Get MC sample of posterior:
stanFit <- sampling( object=stanDso ,
data = dataList ,
#pars = parameters , # optional
chains = nChains ,
iter = iterSteps ,
warmup = burnInSteps ,
#init = initsList , # optional
thin = 1 )
print(stanFit, digits_summary=3, pars=paramNames, probs=c(0.025, 0.5, 0.975))
traceplot(stanFit, pars=paramNames)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.