content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
`km` <-
function(formula = ~1, design, response, covtype = "matern5_2",
coef.trend = NULL, coef.cov = NULL, coef.var = NULL,
nugget = NULL, nugget.estim = FALSE, noise.var = NULL,
estim.method = "MLE", penalty = NULL,
optim.method = "BFGS", lower = NULL, upper = NULL,
parinit = NULL, multistart = 1, control = NULL, gr = TRUE,
iso = FALSE, scaling = FALSE, knots = NULL, kernel = NULL) {
if (!is.null(kernel)){
covtype <- "covUser"
nugget.estim <- FALSE
}
model <- new("km")
model@call <- match.call()
## formula : remove automatically the response from it
data <- data.frame(design)
model@trend.formula <- formula <- drop.response(formula, data = data)
F <- model.matrix(formula, data=data)
# X <- as.matrix(design)
X <- as.matrix(data)
y <- as.matrix(response)
model@X <- X
model@y <- y
model@d <- ncol(X)
model@n <- nrow(X)
model@F <- F
model@p <- ncol(F)
model@noise.flag <- (length(noise.var) != 0)
model@noise.var <- as.numeric(noise.var)
isTrend <- length(coef.trend) != 0
isCov <- length(coef.cov) != 0
isVar <- length(coef.var) != 0
if ((isTrend && isCov && isVar) || (covtype == "covUser")) {
known.param <- "All"
nugget.estim <- FALSE
} else if ((isTrend) && ((!isCov) || (!isVar))) {
known.param <- "Trend"
} else if ((!isTrend) && isCov && isVar) {
known.param <- "CovAndVar"
nugget.estim <- FALSE
} else { # In the other cases: All parameters are estimated (at this stage)
known.param <- "None"
coef.var <- coef.cov <- NULL
}
if (isCov) { # curious : why 'known.covparam' is not a boolean ??
known.covparam <- "All"
} else {
known.covparam <- "None"
}
model@covariance <- covStruct.create(covtype = covtype, d = model@d,
known.covparam = known.covparam, var.names = colnames(X),
coef.cov=coef.cov, coef.var=coef.var, nugget = nugget,
nugget.estim = nugget.estim,
nugget.flag = ((length(nugget) != 0) || nugget.estim),
iso = iso, scaling = scaling, knots = knots, kernel = kernel)
model@known.param <- known.param
## Now, at least some parameters are unknown
if (known.param=="All") {
model@trend.coef <- as.numeric(coef.trend)
model@param.estim <- FALSE
validObject(model)
model <- computeAuxVariables(model)
return(model)
}
if (known.param=="CovAndVar") {
model@param.estim <- TRUE
validObject(model)
model <- computeAuxVariables(model)
x <- backsolve(t(model@T), model@y, upper.tri = FALSE)
beta <- compute.beta.hat(x=x, M=model@M)
z <- compute.z(x=x, M=model@M, beta=beta)
model@z <- z
model@trend.coef <- beta
return(model)
}
if (known.param=="Trend") {
model@trend.coef <- as.numeric(coef.trend)
}
if (length(penalty) == 0) {
if (is.element(estim.method, c("MLE", "LOO"))) {
model@method <- estim.method
} else {
stop("estim.method must be: 'MLE' or 'LOO'")
}
} else {
if (covtype != "gauss") {
stop("At this stage, Penalized Maximum Likelihood is coded only for Gaussian covariance")
}
penalty.set<- c("SCAD")
if (!is.element(penalty$fun, penalty.set)) {
stop("At this stage, the penalty #function has to be one of : SCAD")
}
if (length(penalty$value) == 0) {
penalty$value <- sqrt(2*log(model@n)/model@n)*seq(from = 1, by = 0.5, length = 15)
}
penalty$fun.derivative <- paste(penalty$fun, ".derivative", sep = "")
model@penalty <- penalty
model@method <- "PMLE"
}
model@param.estim <- TRUE
model@optim.method <- as.character(optim.method)
if ((length(lower) == 0) || (length(upper) == 0)) {
bounds <- covParametersBounds(model@covariance, design)
if (length(lower) == 0) lower <- bounds$lower
if (length(upper) == 0) upper <- bounds$upper
}
if ((multistart>1) && (optim.method=="gen")){
warning("The 'multistart' argument is not used when 'optim.method' is 'gen'.")
multistart <- 1
}
control$multistart <- multistart
model@lower <- as.numeric(lower)
model@upper <- as.numeric(upper)
model@parinit <- as.numeric(parinit)
if (optim.method == "BFGS") {
if (length(control$pop.size) == 0) control$pop.size <- 20
control$pop.size <- max(control$pop.size, multistart)
if (identical(control$trace, FALSE)) control$trace <- 0
if ((length(control$trace) == 0) || (identical(control$trace, TRUE))) {
control$trace <- 3
}
}
if (optim.method == "gen") {
d <- ncol(design)
if (length(control$pop.size) == 0) control$pop.size <- min(20, floor(4 + 3*log(d)))
if (length(control$max.generations) == 0) control$max.generations <- 5
if (length(control$wait.generations) == 0) control$wait.generations <- 2
if (length(control$BFGSburnin)==0) control$BFGSburnin <- 0
if (identical(control$trace, FALSE)) {
control$trace <- 0}
else control$trace <- 1
}
upper.alpha <- control$upper.alpha
if (length(upper.alpha) == 0) {
control$upper.alpha <- 1 - 1e-8
} else if ((upper.alpha < 0) || (upper.alpha > 1)) {
control$upper.alpha <- 1 - 1e-8
}
model@control <- control
model@gr <- as.logical(gr)
envir.logLik <- new.env()
validObject(model, complete=TRUE)
varStationaryClass <- c("covTensorProduct", "covScaling", "covAffineScaling", "covIso")
if (length(noise.var)!=0) { # noisy observations
model@case <- "LLconcentration_beta"
} else if (!is.element(class(model@covariance), varStationaryClass)) {
model@case <- "LLconcentration_beta"
} else { # variance-stationary kernels
knownNugget <- ((length(nugget) > 0) && (!nugget.estim))
if (nugget.estim) { # then concentrate / beta, v=sigma^2+tau^2 and alpha=sigma^2/v
model@case <- "LLconcentration_beta_v_alpha"
} else if (knownNugget) {
model@case <- "LLconcentration_beta"
} else {
model@case <- "LLconcentration_beta_sigma2"
}
}
# if ((length(noise.var) != 0) || ((length(nugget) != 0) && (!nugget.estim))) {
# model@case <- "Nuggets"
# }
#
# if ((length(nugget) == 0) && (!nugget.estim) && (length(noise.var) == 0)) {
# model@case <- "NoNugget"
# }
#
# if ((length(noise.var) == 0) && (nugget.estim)) {
# model@case <- "1Nugget"
# }
# knownNugget <- (length(nugget)>0) & (!nugget.estim)
if ((model@method=="LOO") & (model@case!="LLconcentration_beta_sigma2")) {
stop("leave-One-Out is not available for this model")
}
f <- kmEstimate
if (identical(model@method, "PMLE")) {
cv <- function(lambda, object, f) {
object@penalty$value <- lambda
object@control$trace <- 0
object <- f(object, envir = envir.logLik)
criterion <- sum((object@y-leaveOneOut.km(object, type = "UK")$mean)^2)
return(criterion)
}
lambda.val <- model@penalty$value
nval <- length(lambda.val)
u <- rep(0, nval)
for (i in 1L:nval) {
u[i] <- cv(lambda.val[i], object = model, f)
}
plot(lambda.val, u)
lambda <- lambda.val[which.min(u)]
model@penalty$value <- lambda
model <- f(model, envir = envir.logLik)
} else {
model <- f(model, envir = envir.logLik)
}
return(model)
}
|
/DiceKriging/R/km.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 7,463
|
r
|
`km` <-
function(formula = ~1, design, response, covtype = "matern5_2",
coef.trend = NULL, coef.cov = NULL, coef.var = NULL,
nugget = NULL, nugget.estim = FALSE, noise.var = NULL,
estim.method = "MLE", penalty = NULL,
optim.method = "BFGS", lower = NULL, upper = NULL,
parinit = NULL, multistart = 1, control = NULL, gr = TRUE,
iso = FALSE, scaling = FALSE, knots = NULL, kernel = NULL) {
if (!is.null(kernel)){
covtype <- "covUser"
nugget.estim <- FALSE
}
model <- new("km")
model@call <- match.call()
## formula : remove automatically the response from it
data <- data.frame(design)
model@trend.formula <- formula <- drop.response(formula, data = data)
F <- model.matrix(formula, data=data)
# X <- as.matrix(design)
X <- as.matrix(data)
y <- as.matrix(response)
model@X <- X
model@y <- y
model@d <- ncol(X)
model@n <- nrow(X)
model@F <- F
model@p <- ncol(F)
model@noise.flag <- (length(noise.var) != 0)
model@noise.var <- as.numeric(noise.var)
isTrend <- length(coef.trend) != 0
isCov <- length(coef.cov) != 0
isVar <- length(coef.var) != 0
if ((isTrend && isCov && isVar) || (covtype == "covUser")) {
known.param <- "All"
nugget.estim <- FALSE
} else if ((isTrend) && ((!isCov) || (!isVar))) {
known.param <- "Trend"
} else if ((!isTrend) && isCov && isVar) {
known.param <- "CovAndVar"
nugget.estim <- FALSE
} else { # In the other cases: All parameters are estimated (at this stage)
known.param <- "None"
coef.var <- coef.cov <- NULL
}
if (isCov) { # curious : why 'known.covparam' is not a boolean ??
known.covparam <- "All"
} else {
known.covparam <- "None"
}
model@covariance <- covStruct.create(covtype = covtype, d = model@d,
known.covparam = known.covparam, var.names = colnames(X),
coef.cov=coef.cov, coef.var=coef.var, nugget = nugget,
nugget.estim = nugget.estim,
nugget.flag = ((length(nugget) != 0) || nugget.estim),
iso = iso, scaling = scaling, knots = knots, kernel = kernel)
model@known.param <- known.param
## Now, at least some parameters are unknown
if (known.param=="All") {
model@trend.coef <- as.numeric(coef.trend)
model@param.estim <- FALSE
validObject(model)
model <- computeAuxVariables(model)
return(model)
}
if (known.param=="CovAndVar") {
model@param.estim <- TRUE
validObject(model)
model <- computeAuxVariables(model)
x <- backsolve(t(model@T), model@y, upper.tri = FALSE)
beta <- compute.beta.hat(x=x, M=model@M)
z <- compute.z(x=x, M=model@M, beta=beta)
model@z <- z
model@trend.coef <- beta
return(model)
}
if (known.param=="Trend") {
model@trend.coef <- as.numeric(coef.trend)
}
if (length(penalty) == 0) {
if (is.element(estim.method, c("MLE", "LOO"))) {
model@method <- estim.method
} else {
stop("estim.method must be: 'MLE' or 'LOO'")
}
} else {
if (covtype != "gauss") {
stop("At this stage, Penalized Maximum Likelihood is coded only for Gaussian covariance")
}
penalty.set<- c("SCAD")
if (!is.element(penalty$fun, penalty.set)) {
stop("At this stage, the penalty #function has to be one of : SCAD")
}
if (length(penalty$value) == 0) {
penalty$value <- sqrt(2*log(model@n)/model@n)*seq(from = 1, by = 0.5, length = 15)
}
penalty$fun.derivative <- paste(penalty$fun, ".derivative", sep = "")
model@penalty <- penalty
model@method <- "PMLE"
}
model@param.estim <- TRUE
model@optim.method <- as.character(optim.method)
if ((length(lower) == 0) || (length(upper) == 0)) {
bounds <- covParametersBounds(model@covariance, design)
if (length(lower) == 0) lower <- bounds$lower
if (length(upper) == 0) upper <- bounds$upper
}
if ((multistart>1) && (optim.method=="gen")){
warning("The 'multistart' argument is not used when 'optim.method' is 'gen'.")
multistart <- 1
}
control$multistart <- multistart
model@lower <- as.numeric(lower)
model@upper <- as.numeric(upper)
model@parinit <- as.numeric(parinit)
if (optim.method == "BFGS") {
if (length(control$pop.size) == 0) control$pop.size <- 20
control$pop.size <- max(control$pop.size, multistart)
if (identical(control$trace, FALSE)) control$trace <- 0
if ((length(control$trace) == 0) || (identical(control$trace, TRUE))) {
control$trace <- 3
}
}
if (optim.method == "gen") {
d <- ncol(design)
if (length(control$pop.size) == 0) control$pop.size <- min(20, floor(4 + 3*log(d)))
if (length(control$max.generations) == 0) control$max.generations <- 5
if (length(control$wait.generations) == 0) control$wait.generations <- 2
if (length(control$BFGSburnin)==0) control$BFGSburnin <- 0
if (identical(control$trace, FALSE)) {
control$trace <- 0}
else control$trace <- 1
}
upper.alpha <- control$upper.alpha
if (length(upper.alpha) == 0) {
control$upper.alpha <- 1 - 1e-8
} else if ((upper.alpha < 0) || (upper.alpha > 1)) {
control$upper.alpha <- 1 - 1e-8
}
model@control <- control
model@gr <- as.logical(gr)
envir.logLik <- new.env()
validObject(model, complete=TRUE)
varStationaryClass <- c("covTensorProduct", "covScaling", "covAffineScaling", "covIso")
if (length(noise.var)!=0) { # noisy observations
model@case <- "LLconcentration_beta"
} else if (!is.element(class(model@covariance), varStationaryClass)) {
model@case <- "LLconcentration_beta"
} else { # variance-stationary kernels
knownNugget <- ((length(nugget) > 0) && (!nugget.estim))
if (nugget.estim) { # then concentrate / beta, v=sigma^2+tau^2 and alpha=sigma^2/v
model@case <- "LLconcentration_beta_v_alpha"
} else if (knownNugget) {
model@case <- "LLconcentration_beta"
} else {
model@case <- "LLconcentration_beta_sigma2"
}
}
# if ((length(noise.var) != 0) || ((length(nugget) != 0) && (!nugget.estim))) {
# model@case <- "Nuggets"
# }
#
# if ((length(nugget) == 0) && (!nugget.estim) && (length(noise.var) == 0)) {
# model@case <- "NoNugget"
# }
#
# if ((length(noise.var) == 0) && (nugget.estim)) {
# model@case <- "1Nugget"
# }
# knownNugget <- (length(nugget)>0) & (!nugget.estim)
if ((model@method=="LOO") & (model@case!="LLconcentration_beta_sigma2")) {
stop("leave-One-Out is not available for this model")
}
f <- kmEstimate
if (identical(model@method, "PMLE")) {
cv <- function(lambda, object, f) {
object@penalty$value <- lambda
object@control$trace <- 0
object <- f(object, envir = envir.logLik)
criterion <- sum((object@y-leaveOneOut.km(object, type = "UK")$mean)^2)
return(criterion)
}
lambda.val <- model@penalty$value
nval <- length(lambda.val)
u <- rep(0, nval)
for (i in 1L:nval) {
u[i] <- cv(lambda.val[i], object = model, f)
}
plot(lambda.val, u)
lambda <- lambda.val[which.min(u)]
model@penalty$value <- lambda
model <- f(model, envir = envir.logLik)
} else {
model <- f(model, envir = envir.logLik)
}
return(model)
}
|
local_disable_cache()
describe("Compiling against theme", {
# Example CSS that includes one variable, one function call, one mixin
bs4_css <- ".foo { background-color: $primary; color: color-yiq($primary); width: 120px; height: 120px; }"
resolved_css <- ".foo { background-color: #007bff; color: #fff; width: 120px; height: 120px; }"
# Compare bs_sass(input1) and sass(input2)
expect_bs4_equal <- function(input1, input2, options = sass_options(), theme = bs_theme()) {
expect_css(
sass_partial(input1, theme, options = options),
sass(input2, options = options)
)
}
it("automatically includes Bootstrap 4 variables/functions", {
expect_bs4_equal(bs4_css, resolved_css)
})
it("respects output_style", {
expect_bs4_equal(bs4_css, resolved_css, options = sass_options(output_style = "compressed"))
})
it("respects theme options", {
# Theme options are respected
expect_bs4_equal(
bs4_css, ".foo { background-color: #FF0000; color: #fff; width: 120px; height: 120px; }",
theme = bs_theme(primary = "red")
)
# Unless they're not
expect_bs4_equal(bs4_css, resolved_css)
})
})
|
/tests/testthat/test-bs4-deps.R
|
permissive
|
Andreas-Lafferte/bslib
|
R
| false
| false
| 1,161
|
r
|
local_disable_cache()
describe("Compiling against theme", {
# Example CSS that includes one variable, one function call, one mixin
bs4_css <- ".foo { background-color: $primary; color: color-yiq($primary); width: 120px; height: 120px; }"
resolved_css <- ".foo { background-color: #007bff; color: #fff; width: 120px; height: 120px; }"
# Compare bs_sass(input1) and sass(input2)
expect_bs4_equal <- function(input1, input2, options = sass_options(), theme = bs_theme()) {
expect_css(
sass_partial(input1, theme, options = options),
sass(input2, options = options)
)
}
it("automatically includes Bootstrap 4 variables/functions", {
expect_bs4_equal(bs4_css, resolved_css)
})
it("respects output_style", {
expect_bs4_equal(bs4_css, resolved_css, options = sass_options(output_style = "compressed"))
})
it("respects theme options", {
# Theme options are respected
expect_bs4_equal(
bs4_css, ".foo { background-color: #FF0000; color: #fff; width: 120px; height: 120px; }",
theme = bs_theme(primary = "red")
)
# Unless they're not
expect_bs4_equal(bs4_css, resolved_css)
})
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitr.R
\name{guess_knitr_output_format}
\alias{guess_knitr_output_format}
\title{Guess knitr output format}
\usage{
guess_knitr_output_format()
}
\value{
'html', 'latex', or something else. If we are not in a knitr document, returns an empty string.
}
\description{
Convenience function which tries to guess the ultimate output from knitr and rmarkdown.
}
\examples{
\dontrun{
# in a knitr document
guess_knitr_output_format()
}
}
|
/man/guess_knitr_output_format.Rd
|
no_license
|
md0u80c9/huxtable
|
R
| false
| true
| 510
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/knitr.R
\name{guess_knitr_output_format}
\alias{guess_knitr_output_format}
\title{Guess knitr output format}
\usage{
guess_knitr_output_format()
}
\value{
'html', 'latex', or something else. If we are not in a knitr document, returns an empty string.
}
\description{
Convenience function which tries to guess the ultimate output from knitr and rmarkdown.
}
\examples{
\dontrun{
# in a knitr document
guess_knitr_output_format()
}
}
|
stopifnot(length(.dynLibs()) > 0)
pkgload::load_all("empty")
stopifnot(length(.dynLibs()) > 0)
gc()
stopifnot(length(.dynLibs()) > 0)
pkgload::load_all("empty")
stopifnot(length(.dynLibs()) > 0)
gc()
stopifnot(length(.dynLibs()) > 0)
|
/tests/wipe.R
|
no_license
|
r-lib/pkgload
|
R
| false
| false
| 234
|
r
|
stopifnot(length(.dynLibs()) > 0)
pkgload::load_all("empty")
stopifnot(length(.dynLibs()) > 0)
gc()
stopifnot(length(.dynLibs()) > 0)
pkgload::load_all("empty")
stopifnot(length(.dynLibs()) > 0)
gc()
stopifnot(length(.dynLibs()) > 0)
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), person_id = integer(0))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
/dexterMST/inst/testfiles/is_person_booklet_sorted/libFuzzer_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1612726097-test.R
|
no_license
|
akhikolla/updatedatatype-list1
|
R
| false
| false
| 456
|
r
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), person_id = integer(0))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result)
|
library(easyVerification)
### Name: veriApply
### Title: Apply Verification Metrics to Large Datasets
### Aliases: veriApply
### Keywords: utilities
### ** Examples
tm <- toyarray()
f.me <- veriApply('EnsMe', tm$fcst, tm$obs)
## find more examples and instructions in the vignette
## Not run:
##D devtools::install_github("MeteoSwiss/easyVerification", build_vignettes=TRUE)
##D library('easyVerification')
##D vignette('easyVerification')
## End(Not run)
|
/data/genthat_extracted_code/easyVerification/examples/veriApply.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 467
|
r
|
library(easyVerification)
### Name: veriApply
### Title: Apply Verification Metrics to Large Datasets
### Aliases: veriApply
### Keywords: utilities
### ** Examples
tm <- toyarray()
f.me <- veriApply('EnsMe', tm$fcst, tm$obs)
## find more examples and instructions in the vignette
## Not run:
##D devtools::install_github("MeteoSwiss/easyVerification", build_vignettes=TRUE)
##D library('easyVerification')
##D vignette('easyVerification')
## End(Not run)
|
# title: "Run KNN"
# author: "Corne Oosthuizen - OSTAND005"
# date: "29 June 2017"
library(caret)
library(FNN)
train <- as.data.frame(readRDS('data/train_14_symAdd_pca.rds'))
test <- as.data.frame(readRDS('data/test_14_symAdd_pca.rds'))
names(train)[1] <- "label"
train[,1] <- as.factor(train[,1])
names(test)[1] <- "label"
test[,1] <- as.factor(test[,1])
mod.file <- "results/digit_knn_y111.rds"
mod.txt <- "results/digit_knn_y111.txt"
if (file.exists(mod.file)) {
mod <- readRDS(mod.file)
} else {
startTime <- Sys.time()
mod <- knn( train = train[,-1], test = test[,-1], cl = train$label, k = 5, algorithm=c("kd_tree"))
endTime <- Sys.time()
runTime <- as.numeric(difftime(endTime, startTime, units = "secs"))
saveRDS(mod, file = mod.file)
}
cm <- confusionMatrix(mod, test$label)
accuracy <- cm$overall["Accuracy"]
accuracy
yhatTest = as.factor(as.matrix(mod))
a = (yhatTest == test$label)
saveRDS(a, file="results/digit_knn_y111_err.rds")
fc <- file(mod.txt)
writeLines (c(paste("RunTime: ", runTime), paste("Accuracy: ", accuracy)), fc)
close(fc)
|
/run_digits_knn_Y111.R
|
no_license
|
TurRil/STA5076Z-Supervised-Learning
|
R
| false
| false
| 1,093
|
r
|
# title: "Run KNN"
# author: "Corne Oosthuizen - OSTAND005"
# date: "29 June 2017"
library(caret)
library(FNN)
train <- as.data.frame(readRDS('data/train_14_symAdd_pca.rds'))
test <- as.data.frame(readRDS('data/test_14_symAdd_pca.rds'))
names(train)[1] <- "label"
train[,1] <- as.factor(train[,1])
names(test)[1] <- "label"
test[,1] <- as.factor(test[,1])
mod.file <- "results/digit_knn_y111.rds"
mod.txt <- "results/digit_knn_y111.txt"
if (file.exists(mod.file)) {
mod <- readRDS(mod.file)
} else {
startTime <- Sys.time()
mod <- knn( train = train[,-1], test = test[,-1], cl = train$label, k = 5, algorithm=c("kd_tree"))
endTime <- Sys.time()
runTime <- as.numeric(difftime(endTime, startTime, units = "secs"))
saveRDS(mod, file = mod.file)
}
cm <- confusionMatrix(mod, test$label)
accuracy <- cm$overall["Accuracy"]
accuracy
yhatTest = as.factor(as.matrix(mod))
a = (yhatTest == test$label)
saveRDS(a, file="results/digit_knn_y111_err.rds")
fc <- file(mod.txt)
writeLines (c(paste("RunTime: ", runTime), paste("Accuracy: ", accuracy)), fc)
close(fc)
|
altm <- function(x){
y <- dim(x)
z <- x
z[1,] <- x[1,]*x[y[1],]
return(z)
}
# function converts naiive post-breeding leslie matrix
# into post-breeding leslie matrix
# by multilying survival values in last col by
# fecundity values in first col
# essentially assumes that only adults breed
|
/R/functions/altm.R
|
no_license
|
geryan/rfst
|
R
| false
| false
| 312
|
r
|
altm <- function(x){
y <- dim(x)
z <- x
z[1,] <- x[1,]*x[y[1],]
return(z)
}
# function converts naiive post-breeding leslie matrix
# into post-breeding leslie matrix
# by multilying survival values in last col by
# fecundity values in first col
# essentially assumes that only adults breed
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 163800
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 163784
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 163784
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/LinearBitvectorRankingFunction/audio_ddksynth_voice.cpp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 48380
c no.of clauses 163800
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 163784
c
c QBFLIB/Wintersteiger/LinearBitvectorRankingFunction/audio_ddksynth_voice.cpp.qdimacs 48380 163800 E1 [1191 12594 24807 36210] 0 256 48119 163784 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Wintersteiger/LinearBitvectorRankingFunction/audio_ddksynth_voice.cpp/audio_ddksynth_voice.cpp.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 825
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 163800
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 163784
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 163784
c
c Input Parameter (command line, file):
c input filename QBFLIB/Wintersteiger/LinearBitvectorRankingFunction/audio_ddksynth_voice.cpp.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 48380
c no.of clauses 163800
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 163784
c
c QBFLIB/Wintersteiger/LinearBitvectorRankingFunction/audio_ddksynth_voice.cpp.qdimacs 48380 163800 E1 [1191 12594 24807 36210] 0 256 48119 163784 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcovFPC.R
\name{getZt}
\alias{getZt}
\title{getZt - get Zt from an lme object analogous to
getME(object.merMod, "Zt"). See \code{\link{getME}}.}
\usage{
getZt(object)
}
\arguments{
\item{object}{An \code{lme} model}
}
\description{
getZt - get Zt from an lme object analogous to
getME(object.merMod, "Zt"). See \code{\link{getME}}.
}
\examples{
# see examples in \code{vcovFPC}
}
|
/man/getZt.Rd
|
no_license
|
ICTatRTI/PersonAlytics
|
R
| false
| true
| 479
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vcovFPC.R
\name{getZt}
\alias{getZt}
\title{getZt - get Zt from an lme object analogous to
getME(object.merMod, "Zt"). See \code{\link{getME}}.}
\usage{
getZt(object)
}
\arguments{
\item{object}{An \code{lme} model}
}
\description{
getZt - get Zt from an lme object analogous to
getME(object.merMod, "Zt"). See \code{\link{getME}}.
}
\examples{
# see examples in \code{vcovFPC}
}
|
#stats analysis phyllochron 1-2
#stats analysis phyllochron 1-3
#by Dana Looschelders
library(agricolae)
library(PMCMRplus)
library(PMCMR)
library(tidyverse)
library(MASS)
setwd("C:/00 Dana/Uni/Internship/Work")
dat=read.table("Phyllochron.csv", sep=";", dec=" ", header=T)
names(dat)=c("ID", "Variety", "Treatment", "Chamber", "Phyllo12", "Phyllo13", "Phyllo14", "Phyllo23", "Phyllo24", "Phyllo34", "Phyllo1S", "Type")
str(dat)
phyllo=cbind.data.frame("Variety"=dat$Variety,
"Treatment"=dat$Treatment,
"Phyllochron"=dat$Phyllo12,
"type"=dat$Type)
str(phyllo)
#write Master Table with signficance results (only kruskal, not posthoc)
master_stats=data.frame(names(dat[,5:11]), "sig_treatment"=NA, "sig_Variety"=NA, "sig_type"=NA)
master_stats=master_stats[-7,]
#data exploration
boxplot(phyllo$Phyllochron~phyllo$type,
main="Phyllochron 1-2 among types",
xlab="Treatment", ylab="Phyllochron [d]")
boxplot(phyllo$Phyllochron~phyllo$Treatment,
main="Phyllochron 1-2 among Treatments",
xlab="Treatment", ylab="Phyllochron [d]")
boxplot(phyllo$Phyllochron~phyllo$Variety)
#test for normality
qqnorm(phyllo$Phyllochron)
qqline(phyllo$Phyllochron)
shapiro.test(phyllo$Phyllochron) #p-value is 8.838*10^-7
#data is not normally distributed
#use kruskal test
kruskal.test(phyllo$Phyllochron~phyllo$type) #p-value = 0.02205
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo12"]=0.02205
kruskal.test(phyllo$Phyllochron~phyllo$Variety) #p-value = 2.181e-06
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo12"]=2.18e-06
kruskal.test(phyllo$Phyllochron~phyllo$Treatment) #p-value = 0.03885
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo12"]=0.0389
#posthoc test
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$type, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$Treatment, dist="Chisquare") #no significance
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$Variety, dist="Chisquare")
#glm
phyllo_glm=glm(phyllo$Phyllochron~phyllo$type+phyllo$Treatment+phyllo$Variety)
summary(phyllo_glm)
plot(phyllo_glm)
#glm
glm(phyllo$Phyllochron~phyllo$type)
#*******************************************************************************************************
#phyllochron 1-3
phyllo13=cbind.data.frame("Variety"=dat$Variety,
"Treatment"=dat$Treatment,
"Phyllochron"=dat$Phyllochron.1.3,
"type"=dat$type)
str(phyllo13)
#data exploration
hist(phyllo13$Phyllochron)
boxplot(phyllo13$Phyllochron~phyllo13$Treatment)
boxplot(phyllo13$Phyllochron~phyllo13$Variety)
boxplot(phyllo13$Phyllochron~phyllo13$type)
#test assumptions
qqnorm(phyllo13$Phyllochron)
qqline(phyllo13$Phyllochron)
shapiro.test(phyllo13$Phyllochron) #not normally distributed p-value: 0.00013
#kruskal test
kruskal.test(phyllo13$Phyllochron~phyllo13$Variety) #significant: p value 1.664e-06
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo13"]=1.66e-06
kruskal.test(phyllo13$Phyllochron~phyllo13$Treatment) #significant: p value 0.0005
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo13"]=0.0005
kruskal.test(phyllo13$Phyllochron~phyllo13$type) ##significant: p-value = 0.02428
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo13"]=0.02428
#posthoc tests
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$Variety, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$Treatment, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$type, dist="Chisquare")
#fit a glm
summary(glm(phyllo13$Phyllochron~phyllo13$Treatment*phyllo13$Variety))
#try a boxcox transformation
test.lm=lm(phyllo13$Phyllochron~phyllo13$Treatment*phyllo13$Variety)
plot(test.lm)
bc=boxcox(test.lm)
#phyllochron 2-3
hist(dat$Phyllo23)
boxplot(dat$Phyllo23~dat$Variety)
boxplot(dat$Phyllo23~dat$Treatment)
boxplot(dat$Phyllo23~dat$Type)
#test for normality
qqnorm(dat$Phyllo23)
qqline(dat$Phyllo23)
shapiro.test(dat$Phyllo23)
#not normally distributed
kruskal.test(dat$Phyllo23~dat$Variety) #not significant
kruskal.test(dat$Phyllo23~dat$Treatment) #barely significant
kruskal.test(dat$Phyllo23~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo23"]=0.0469
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo23"]="NOT"
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo23"]="NOT"
posthoc.kruskal.nemenyi.test(dat$Phyllo23~dat$Treatment) #no significane
#phyllo 34
hist(dat$Phyllo34)
boxplot(dat$Phyllo34~dat$Variety)
boxplot(dat$Phyllo34~dat$Treatment)
boxplot(dat$Phyllo34~dat$Type)
qqnorm(dat$Phyllo34)
qqline(dat$Phyllo34)
shapiro.test(dat$Phyllo34)
kruskal.test(dat$Phyllo34~dat$Variety) #significant
kruskal.test(dat$Phyllo34~dat$Treatment) #not significant
kruskal.test(dat$Phyllo34~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo34"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo34"]=0.0117
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo34"]="NOT"
#pyhllo 14
hist(dat$Phyllo14)
boxplot(dat$Phyllo14~dat$Variety)
boxplot(dat$Phyllo14~dat$Treatment)
boxplot(dat$Phyllo14~dat$Type)
qqnorm(dat$Phyllo14)
qqline(dat$Phyllo14)
shapiro.test(dat$Phyllo14) #not normally distributed
kruskal.test(dat$Phyllo14~dat$Variety) #significant 4.29e-06
kruskal.test(dat$Phyllo14~dat$Treatment) #not significant
kruskal.test(dat$Phyllo14~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo14"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo14"]=4.29e-06
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo14"]="NOT"
#phyllo 24
hist(dat$Phyllo24)
boxplot(dat$Phyllo24~dat$Variety)
boxplot(dat$Phyllo24~dat$Treatment)
boxplot(dat$Phyllo24~dat$Type)
qqnorm(dat$Phyllo24)
qqline(dat$Phyllo24)
shapiro.test(dat$Phyllo24) #not normally distributed
kruskal.test(dat$Phyllo24~dat$Variety) #significant
kruskal.test(dat$Phyllo24~dat$Treatment) #not significant
kruskal.test(dat$Phyllo24~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo24"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo24"]=0.00144
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo24"]="NOT"
write.csv(x=master_stats, file="master_stats.csv")
#write master table to aggregate data for all phyllochrons
agg_data_mean=aggregate(cbind(Phyllo12, Phyllo13, Phyllo14, Phyllo23, Phyllo24, Phyllo34)~Variety+Treatment, data=dat, FUN =mean)
agg_data_sd=aggregate(cbind(Phyllo12, Phyllo13, Phyllo14, Phyllo23, Phyllo24, Phyllo34)~Variety+Treatment, data=dat, FUN =sd)
names=names(agg_data_sd)
names_new=paste("sd_",names)
names(agg_data_sd)=names_new
names2=names(agg_data_mean)
names(agg_data_mean)=paste("mean_",names2)
agg_data=cbind(agg_data_mean, agg_data_sd)
str(agg_data)
agg_data=agg_data[,-9]
agg_data=agg_data[,-9]
str(agg_data)
plot(agg_data$Variety[agg_data$Treatment=="CON"], agg_data$Phyllo12[agg_data$Treatment=="CON"],"l")
summary(agg_data)
write.csv(agg_data, file="agg_data.csv")
|
/stats_analysis_phyllochron.R
|
no_license
|
DanaLooschelders/Stats_Phenology
|
R
| false
| false
| 7,456
|
r
|
#stats analysis phyllochron 1-2
#stats analysis phyllochron 1-3
#by Dana Looschelders
library(agricolae)
library(PMCMRplus)
library(PMCMR)
library(tidyverse)
library(MASS)
setwd("C:/00 Dana/Uni/Internship/Work")
dat=read.table("Phyllochron.csv", sep=";", dec=" ", header=T)
names(dat)=c("ID", "Variety", "Treatment", "Chamber", "Phyllo12", "Phyllo13", "Phyllo14", "Phyllo23", "Phyllo24", "Phyllo34", "Phyllo1S", "Type")
str(dat)
phyllo=cbind.data.frame("Variety"=dat$Variety,
"Treatment"=dat$Treatment,
"Phyllochron"=dat$Phyllo12,
"type"=dat$Type)
str(phyllo)
#write Master Table with signficance results (only kruskal, not posthoc)
master_stats=data.frame(names(dat[,5:11]), "sig_treatment"=NA, "sig_Variety"=NA, "sig_type"=NA)
master_stats=master_stats[-7,]
#data exploration
boxplot(phyllo$Phyllochron~phyllo$type,
main="Phyllochron 1-2 among types",
xlab="Treatment", ylab="Phyllochron [d]")
boxplot(phyllo$Phyllochron~phyllo$Treatment,
main="Phyllochron 1-2 among Treatments",
xlab="Treatment", ylab="Phyllochron [d]")
boxplot(phyllo$Phyllochron~phyllo$Variety)
#test for normality
qqnorm(phyllo$Phyllochron)
qqline(phyllo$Phyllochron)
shapiro.test(phyllo$Phyllochron) #p-value is 8.838*10^-7
#data is not normally distributed
#use kruskal test
kruskal.test(phyllo$Phyllochron~phyllo$type) #p-value = 0.02205
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo12"]=0.02205
kruskal.test(phyllo$Phyllochron~phyllo$Variety) #p-value = 2.181e-06
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo12"]=2.18e-06
kruskal.test(phyllo$Phyllochron~phyllo$Treatment) #p-value = 0.03885
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo12"]=0.0389
#posthoc test
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$type, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$Treatment, dist="Chisquare") #no significance
posthoc.kruskal.nemenyi.test(phyllo$Phyllochron~phyllo$Variety, dist="Chisquare")
#glm
phyllo_glm=glm(phyllo$Phyllochron~phyllo$type+phyllo$Treatment+phyllo$Variety)
summary(phyllo_glm)
plot(phyllo_glm)
#glm
glm(phyllo$Phyllochron~phyllo$type)
#*******************************************************************************************************
#phyllochron 1-3
phyllo13=cbind.data.frame("Variety"=dat$Variety,
"Treatment"=dat$Treatment,
"Phyllochron"=dat$Phyllochron.1.3,
"type"=dat$type)
str(phyllo13)
#data exploration
hist(phyllo13$Phyllochron)
boxplot(phyllo13$Phyllochron~phyllo13$Treatment)
boxplot(phyllo13$Phyllochron~phyllo13$Variety)
boxplot(phyllo13$Phyllochron~phyllo13$type)
#test assumptions
qqnorm(phyllo13$Phyllochron)
qqline(phyllo13$Phyllochron)
shapiro.test(phyllo13$Phyllochron) #not normally distributed p-value: 0.00013
#kruskal test
kruskal.test(phyllo13$Phyllochron~phyllo13$Variety) #significant: p value 1.664e-06
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo13"]=1.66e-06
kruskal.test(phyllo13$Phyllochron~phyllo13$Treatment) #significant: p value 0.0005
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo13"]=0.0005
kruskal.test(phyllo13$Phyllochron~phyllo13$type) ##significant: p-value = 0.02428
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo13"]=0.02428
#posthoc tests
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$Variety, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$Treatment, dist="Chisquare")
posthoc.kruskal.nemenyi.test(phyllo13$Phyllochron~phyllo13$type, dist="Chisquare")
#fit a glm
summary(glm(phyllo13$Phyllochron~phyllo13$Treatment*phyllo13$Variety))
#try a boxcox transformation
test.lm=lm(phyllo13$Phyllochron~phyllo13$Treatment*phyllo13$Variety)
plot(test.lm)
bc=boxcox(test.lm)
#phyllochron 2-3
hist(dat$Phyllo23)
boxplot(dat$Phyllo23~dat$Variety)
boxplot(dat$Phyllo23~dat$Treatment)
boxplot(dat$Phyllo23~dat$Type)
#test for normality
qqnorm(dat$Phyllo23)
qqline(dat$Phyllo23)
shapiro.test(dat$Phyllo23)
#not normally distributed
kruskal.test(dat$Phyllo23~dat$Variety) #not significant
kruskal.test(dat$Phyllo23~dat$Treatment) #barely significant
kruskal.test(dat$Phyllo23~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo23"]=0.0469
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo23"]="NOT"
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo23"]="NOT"
posthoc.kruskal.nemenyi.test(dat$Phyllo23~dat$Treatment) #no significane
#phyllo 34
hist(dat$Phyllo34)
boxplot(dat$Phyllo34~dat$Variety)
boxplot(dat$Phyllo34~dat$Treatment)
boxplot(dat$Phyllo34~dat$Type)
qqnorm(dat$Phyllo34)
qqline(dat$Phyllo34)
shapiro.test(dat$Phyllo34)
kruskal.test(dat$Phyllo34~dat$Variety) #significant
kruskal.test(dat$Phyllo34~dat$Treatment) #not significant
kruskal.test(dat$Phyllo34~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo34"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo34"]=0.0117
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo34"]="NOT"
#pyhllo 14
hist(dat$Phyllo14)
boxplot(dat$Phyllo14~dat$Variety)
boxplot(dat$Phyllo14~dat$Treatment)
boxplot(dat$Phyllo14~dat$Type)
qqnorm(dat$Phyllo14)
qqline(dat$Phyllo14)
shapiro.test(dat$Phyllo14) #not normally distributed
kruskal.test(dat$Phyllo14~dat$Variety) #significant 4.29e-06
kruskal.test(dat$Phyllo14~dat$Treatment) #not significant
kruskal.test(dat$Phyllo14~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo14"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo14"]=4.29e-06
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo14"]="NOT"
#phyllo 24
hist(dat$Phyllo24)
boxplot(dat$Phyllo24~dat$Variety)
boxplot(dat$Phyllo24~dat$Treatment)
boxplot(dat$Phyllo24~dat$Type)
qqnorm(dat$Phyllo24)
qqline(dat$Phyllo24)
shapiro.test(dat$Phyllo24) #not normally distributed
kruskal.test(dat$Phyllo24~dat$Variety) #significant
kruskal.test(dat$Phyllo24~dat$Treatment) #not significant
kruskal.test(dat$Phyllo24~dat$Type) #not significant
master_stats$sig_treatment[master_stats$names.dat...5.11..=="Phyllo24"]="NOT"
master_stats$sig_Variety[master_stats$names.dat...5.11..=="Phyllo24"]=0.00144
master_stats$sig_type[master_stats$names.dat...5.11..=="Phyllo24"]="NOT"
write.csv(x=master_stats, file="master_stats.csv")
#write master table to aggregate data for all phyllochrons
agg_data_mean=aggregate(cbind(Phyllo12, Phyllo13, Phyllo14, Phyllo23, Phyllo24, Phyllo34)~Variety+Treatment, data=dat, FUN =mean)
agg_data_sd=aggregate(cbind(Phyllo12, Phyllo13, Phyllo14, Phyllo23, Phyllo24, Phyllo34)~Variety+Treatment, data=dat, FUN =sd)
names=names(agg_data_sd)
names_new=paste("sd_",names)
names(agg_data_sd)=names_new
names2=names(agg_data_mean)
names(agg_data_mean)=paste("mean_",names2)
agg_data=cbind(agg_data_mean, agg_data_sd)
str(agg_data)
agg_data=agg_data[,-9]
agg_data=agg_data[,-9]
str(agg_data)
plot(agg_data$Variety[agg_data$Treatment=="CON"], agg_data$Phyllo12[agg_data$Treatment=="CON"],"l")
summary(agg_data)
write.csv(agg_data, file="agg_data.csv")
|
vplog <- getLogger('vis.preprocess')
detect_language <- function(text) {
lang_detected <- lapply(text, textcat)
# from russian-iso8859_5 only take russian
lang_detected <- unlist(lapply(lang_detected,
function(x) {vapply(strsplit(x,"-"), `[`, 1, FUN.VALUE=character(1))}
))
lang_detected <-
return(lang_detected)
}
filter_duplicates <- function(metadata, text, list_size) {
#If list_size is greater than -1 and smaller than the actual list size, deduplicate titles
if(list_size > -1) {
output = deduplicate_titles(metadata, list_size)
text = subset(text, !(id %in% output))
metadata = subset(metadata, !(id %in% output))
text = head(text, list_size)
metadata = head(metadata, list_size)
}
return(list(metadata=metadata, text=text))
}
deduplicate_titles <- function(metadata, list_size) {
output <- c()
metadata$oa_state[metadata$oa_state == "2"] <- 0
metadata = metadata[order(-as.numeric(metadata$oa_state),-stri_length(metadata$subject),
-stri_length(metadata$paper_abstract),-stri_length(metadata$authors),
-stri_length(metadata$published_in)),]
index = (grepl(" ", metadata$title) == FALSE | stri_length(metadata$title) < 15)
metadata$title[index] <- paste(metadata$title[index], metadata$authors[index], sep=" ")
num_items = length(metadata$id)
max_replacements = ifelse(num_items > list_size, num_items - list_size, -1)
ids = metadata$id
titles = metadata$title
titles = unlist(lapply(titles, tolower))
count = 1
lv_matrix = stringdistmatrix(titles, method="lv")
length_matrix <- stri_length(titles)
n = length(length_matrix)
str_matrix = matrix(length_matrix, n, n)
str_matrix_t <- t(str_matrix)
str_max_matrix = pmax(str_matrix, str_matrix_t)
lv_ratio_matrix = as.matrix(lv_matrix)/str_max_matrix
duplicates <- lv_ratio_matrix < 1/15.83
duplicates[lower.tri(duplicates, diag=TRUE)] <- NA
remove_ids <- which(apply(duplicates, 2, FUN=function(x){any(x)}))
output = ids[remove_ids]
vplog$info(paste0("Number of max. duplicate entries: ", length(output)))
if(max_replacements > -1) {
output = head(output, max_replacements)
}
vplog$info(paste0("Number of duplicate entries: ", length(output)))
return(output)
}
replace_keywords_if_empty <- function(metadata, stops, service) {
missing_subjects = which(lapply(metadata$subject, function(x) {nchar(x)}) <= 1)
if (service == "linkedcat" || service == "linkedcat_authorview" || service == "linkedcat_browseview") {
metadata$subject[missing_subjects] <- metadata$bkl_caption[missing_subjects]
metadata$subject[is.na(metadata$subject)] <- ""
} else {
candidates = mapply(paste, metadata$title)
candidates = lapply(candidates, function(x)paste(removeWords(x, stops), collapse=""))
candidates = lapply(candidates, function(x) {gsub("[^[:alpha:]]", " ", x)})
candidates = lapply(candidates, function(x) {gsub(" +", " ", x)})
candidates_bigrams = lapply(lapply(candidates, function(x)unlist(lapply(ngrams(unlist(strsplit(x, split=" ")), 2), paste, collapse="_"))), paste, collapse=" ")
#candidates_trigrams = lapply(lapply(candidates, function(x)unlist(lapply(ngrams(unlist(strsplit(x, split=" ")), 3), paste, collapse="_"))), paste, collapse=" ")
candidates = mapply(paste, candidates, candidates_bigrams)
#candidates = lapply(candidates, function(x) {gsub('\\b\\d+\\s','', x)})
nn_corpus = Corpus(VectorSource(candidates))
nn_tfidf = TermDocumentMatrix(nn_corpus, control = list(tokenize = SplitTokenizer, weighting = function(x) weightSMART(x, spec="ntn")))
tfidf_top = apply(nn_tfidf, 2, function(x) {x2 <- sort(x, TRUE);x2[x2>=x2[3]]})
tfidf_top_names = lapply(tfidf_top, names)
replacement_keywords <- lapply(tfidf_top_names, function(x) filter_out_nested_ngrams(x, 3))
replacement_keywords = lapply(replacement_keywords, FUN = function(x) {paste(unlist(x), collapse="; ")})
replacement_keywords = gsub("_", " ", replacement_keywords)
metadata$subject[missing_subjects] <- replacement_keywords[missing_subjects]
}
return(metadata)
}
get_OHE_feature <-function(metadata, feature_name) {
ohe_encoder <- onehot(metadata[feature_name], stringsAsFactors = TRUE, max_levels = 100)
ohe_feat <- data.frame(predict(ohe_encoder, metadata[feature_name]))
rownames(ohe_feat) <- metadata$id
return(ohe_feat)
}
|
/R/preprocess.R
|
permissive
|
sbalci/okm
|
R
| false
| false
| 4,439
|
r
|
vplog <- getLogger('vis.preprocess')
detect_language <- function(text) {
lang_detected <- lapply(text, textcat)
# from russian-iso8859_5 only take russian
lang_detected <- unlist(lapply(lang_detected,
function(x) {vapply(strsplit(x,"-"), `[`, 1, FUN.VALUE=character(1))}
))
lang_detected <-
return(lang_detected)
}
filter_duplicates <- function(metadata, text, list_size) {
#If list_size is greater than -1 and smaller than the actual list size, deduplicate titles
if(list_size > -1) {
output = deduplicate_titles(metadata, list_size)
text = subset(text, !(id %in% output))
metadata = subset(metadata, !(id %in% output))
text = head(text, list_size)
metadata = head(metadata, list_size)
}
return(list(metadata=metadata, text=text))
}
deduplicate_titles <- function(metadata, list_size) {
output <- c()
metadata$oa_state[metadata$oa_state == "2"] <- 0
metadata = metadata[order(-as.numeric(metadata$oa_state),-stri_length(metadata$subject),
-stri_length(metadata$paper_abstract),-stri_length(metadata$authors),
-stri_length(metadata$published_in)),]
index = (grepl(" ", metadata$title) == FALSE | stri_length(metadata$title) < 15)
metadata$title[index] <- paste(metadata$title[index], metadata$authors[index], sep=" ")
num_items = length(metadata$id)
max_replacements = ifelse(num_items > list_size, num_items - list_size, -1)
ids = metadata$id
titles = metadata$title
titles = unlist(lapply(titles, tolower))
count = 1
lv_matrix = stringdistmatrix(titles, method="lv")
length_matrix <- stri_length(titles)
n = length(length_matrix)
str_matrix = matrix(length_matrix, n, n)
str_matrix_t <- t(str_matrix)
str_max_matrix = pmax(str_matrix, str_matrix_t)
lv_ratio_matrix = as.matrix(lv_matrix)/str_max_matrix
duplicates <- lv_ratio_matrix < 1/15.83
duplicates[lower.tri(duplicates, diag=TRUE)] <- NA
remove_ids <- which(apply(duplicates, 2, FUN=function(x){any(x)}))
output = ids[remove_ids]
vplog$info(paste0("Number of max. duplicate entries: ", length(output)))
if(max_replacements > -1) {
output = head(output, max_replacements)
}
vplog$info(paste0("Number of duplicate entries: ", length(output)))
return(output)
}
replace_keywords_if_empty <- function(metadata, stops, service) {
missing_subjects = which(lapply(metadata$subject, function(x) {nchar(x)}) <= 1)
if (service == "linkedcat" || service == "linkedcat_authorview" || service == "linkedcat_browseview") {
metadata$subject[missing_subjects] <- metadata$bkl_caption[missing_subjects]
metadata$subject[is.na(metadata$subject)] <- ""
} else {
candidates = mapply(paste, metadata$title)
candidates = lapply(candidates, function(x)paste(removeWords(x, stops), collapse=""))
candidates = lapply(candidates, function(x) {gsub("[^[:alpha:]]", " ", x)})
candidates = lapply(candidates, function(x) {gsub(" +", " ", x)})
candidates_bigrams = lapply(lapply(candidates, function(x)unlist(lapply(ngrams(unlist(strsplit(x, split=" ")), 2), paste, collapse="_"))), paste, collapse=" ")
#candidates_trigrams = lapply(lapply(candidates, function(x)unlist(lapply(ngrams(unlist(strsplit(x, split=" ")), 3), paste, collapse="_"))), paste, collapse=" ")
candidates = mapply(paste, candidates, candidates_bigrams)
#candidates = lapply(candidates, function(x) {gsub('\\b\\d+\\s','', x)})
nn_corpus = Corpus(VectorSource(candidates))
nn_tfidf = TermDocumentMatrix(nn_corpus, control = list(tokenize = SplitTokenizer, weighting = function(x) weightSMART(x, spec="ntn")))
tfidf_top = apply(nn_tfidf, 2, function(x) {x2 <- sort(x, TRUE);x2[x2>=x2[3]]})
tfidf_top_names = lapply(tfidf_top, names)
replacement_keywords <- lapply(tfidf_top_names, function(x) filter_out_nested_ngrams(x, 3))
replacement_keywords = lapply(replacement_keywords, FUN = function(x) {paste(unlist(x), collapse="; ")})
replacement_keywords = gsub("_", " ", replacement_keywords)
metadata$subject[missing_subjects] <- replacement_keywords[missing_subjects]
}
return(metadata)
}
get_OHE_feature <-function(metadata, feature_name) {
ohe_encoder <- onehot(metadata[feature_name], stringsAsFactors = TRUE, max_levels = 100)
ohe_feat <- data.frame(predict(ohe_encoder, metadata[feature_name]))
rownames(ohe_feat) <- metadata$id
return(ohe_feat)
}
|
# script to plot correlation/interaction plot
args<-commandArgs(TRUE)
if (!require("gplots")) {
install.packages("gplots", dependencies = TRUE)
library(gplots)
}
if (!require("RColorBrewer")) {
install.packages("RColorBrewer", dependencies = TRUE)
library(RColorBrewer)
}
valA=args[1] # e.g. eRNA.expression.tab
valB=args[2] # e.g. genes.expression.tab
posA=args[3] # e.g. eRNA postion bed
posB=args[4] # e.g. genes postition bed
regions=args[5]
bins=args[6] # 100
# debug
valA="eRNA.meanRPM.xls"; valB="/data/neurogen/rnaseq_PD/results/merged/genes.fpkm.cuffnorm.allSamples.uniq.xls";
posA="eRNA.bed"; posB="/data/neurogen/referenceGenome/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed";
regions="chr17_43583680_44506585"; bins=500;
message("reading data ...")
# ===============================================
valA=read.table(valA, header=T, stringsAsFactors =F) # e.g. eRNA.expression.tab
rownames(valA) = valA[,1]; valA=valA[,-1];
valB=read.table(valB, header=T, stringsAsFactors =F) # e.g. genes.expression.tab
rownames(valB) = valB[,1]; valB=valB[,-1];
colnames(valB)=gsub("FPKM.","",colnames(valB))
colnames(valB)=gsub("_0$","",colnames(valB))
# same order of columns for A and B
common = intersect(colnames(valA), colnames(valB))
valA=valA[,common]; valB=valB[,common]
posA=read.table(posA, header=F, stringsAsFactors =F) # e.g. eRNA postion bed
colnames(posA) = c('chr','start','end','name') #,'score','strand')
posA$score=0; posA$strand=".";
posB=read.table(posB, header=F, stringsAsFactors =F) # e.g. genes postition bed
colnames(posB) = c('chr','start','end','name','score','strand','symbol','type')
rownames(posA) = posA$name; rownames(posB) = posB$name
region = strsplit(gsub(",","",regions), "[:-_]")[[1]]
c=region[1]; s=as.numeric(region[2]); e=as.numeric(region[3])
step=(e-s)/bins
message("calculating the binned correlation matrix ...")
# ===============================================
df=matrix(data=0, nrow=bins, ncol=bins)
for(i in 1:bins){
Si=start+(i-1)*step; Ei=Si+step;
nms = subset(posA, chr==chr & start<Ei & end>Si, select=name)[,1]
X=apply(valA[nms,],2,mean)
message(paste(" ",i,"bin..."))
for(j in 1:bins){
Sj=start+(j-1)*step; Ej=Sj+step;
nms = subset(posB, chr==chr & start<Ej & end>Sj, select=name)[,1]
Y=apply(valB[nms,],2,mean)
df[i,j]=cor(X,Y,method='spearman')
}
}
message("plot the correlation matrix ...")
# ===============================================
# creates a 5 x 5 inch image
png(paste(gsub(",","",regions),"correlationplot","png", sep="."), # create PNG for the heat map
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8) # smaller font size
my_palette <- colorRampPalette(c("blue", "white", "red"))(n = 1000)
heatmap.2(df,
main = "Correlation", # heat map title
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
margins =c(12,9), # widens margins around plot
col=my_palette, # use on color palette defined earlier
dendrogram="none", # only draw a row dendrogram
Colv = F,
Rowv = F,
symbreaks=T)
dev.off()
|
/modules/binned.correlation.plot.R
|
no_license
|
sterding/BRAINcode
|
R
| false
| false
| 3,306
|
r
|
# script to plot correlation/interaction plot
args<-commandArgs(TRUE)
if (!require("gplots")) {
install.packages("gplots", dependencies = TRUE)
library(gplots)
}
if (!require("RColorBrewer")) {
install.packages("RColorBrewer", dependencies = TRUE)
library(RColorBrewer)
}
valA=args[1] # e.g. eRNA.expression.tab
valB=args[2] # e.g. genes.expression.tab
posA=args[3] # e.g. eRNA postion bed
posB=args[4] # e.g. genes postition bed
regions=args[5]
bins=args[6] # 100
# debug
valA="eRNA.meanRPM.xls"; valB="/data/neurogen/rnaseq_PD/results/merged/genes.fpkm.cuffnorm.allSamples.uniq.xls";
posA="eRNA.bed"; posB="/data/neurogen/referenceGenome/Homo_sapiens/UCSC/hg19/Annotation/Genes/genes.bed";
regions="chr17_43583680_44506585"; bins=500;
message("reading data ...")
# ===============================================
valA=read.table(valA, header=T, stringsAsFactors =F) # e.g. eRNA.expression.tab
rownames(valA) = valA[,1]; valA=valA[,-1];
valB=read.table(valB, header=T, stringsAsFactors =F) # e.g. genes.expression.tab
rownames(valB) = valB[,1]; valB=valB[,-1];
colnames(valB)=gsub("FPKM.","",colnames(valB))
colnames(valB)=gsub("_0$","",colnames(valB))
# same order of columns for A and B
common = intersect(colnames(valA), colnames(valB))
valA=valA[,common]; valB=valB[,common]
posA=read.table(posA, header=F, stringsAsFactors =F) # e.g. eRNA postion bed
colnames(posA) = c('chr','start','end','name') #,'score','strand')
posA$score=0; posA$strand=".";
posB=read.table(posB, header=F, stringsAsFactors =F) # e.g. genes postition bed
colnames(posB) = c('chr','start','end','name','score','strand','symbol','type')
rownames(posA) = posA$name; rownames(posB) = posB$name
region = strsplit(gsub(",","",regions), "[:-_]")[[1]]
c=region[1]; s=as.numeric(region[2]); e=as.numeric(region[3])
step=(e-s)/bins
message("calculating the binned correlation matrix ...")
# ===============================================
df=matrix(data=0, nrow=bins, ncol=bins)
for(i in 1:bins){
Si=start+(i-1)*step; Ei=Si+step;
nms = subset(posA, chr==chr & start<Ei & end>Si, select=name)[,1]
X=apply(valA[nms,],2,mean)
message(paste(" ",i,"bin..."))
for(j in 1:bins){
Sj=start+(j-1)*step; Ej=Sj+step;
nms = subset(posB, chr==chr & start<Ej & end>Sj, select=name)[,1]
Y=apply(valB[nms,],2,mean)
df[i,j]=cor(X,Y,method='spearman')
}
}
message("plot the correlation matrix ...")
# ===============================================
# creates a 5 x 5 inch image
png(paste(gsub(",","",regions),"correlationplot","png", sep="."), # create PNG for the heat map
width = 5*300, # 5 x 300 pixels
height = 5*300,
res = 300, # 300 pixels per inch
pointsize = 8) # smaller font size
my_palette <- colorRampPalette(c("blue", "white", "red"))(n = 1000)
heatmap.2(df,
main = "Correlation", # heat map title
density.info="none", # turns off density plot inside color legend
trace="none", # turns off trace lines inside the heat map
margins =c(12,9), # widens margins around plot
col=my_palette, # use on color palette defined earlier
dendrogram="none", # only draw a row dendrogram
Colv = F,
Rowv = F,
symbreaks=T)
dev.off()
|
library(tidyverse)
library(readxl)
library(extrafont)
library(ggthemes)
lodi_weather <- read_xlsx("Lodi weather data 94 thru 99.xlsx")
#Keep date column, precipitation and temperature
lodi_weather <- lodi_weather %>% select(DATE,PRCP,TMAX,TMIN)
#Combine precipation so I get total precipitation across each year
lodi_weather <- lodi_weather %>% separate(DATE,c("year", "month", "day"), sep="-", remove=F)
lodi_weather <- lodi_weather %>% group_by(year) %>% mutate(total_precip=cumsum(PRCP))
#turns out some dates are missing so I can't just order them by dates
lodi_weather <- lodi_weather %>% arrange(year, month, day) %>% mutate(date_order=1:length(year))
lodi_weather <- lodi_weather %>% select(date=DATE, year:day, max_temp=TMAX, min_temp=TMIN, total_precip:date_order)
#weather plots
#Remove weather temps that are 0
lodi_weather$min_temp <- gsub("\\<0\\>", NA, lodi_weather$min_temp)
lodi_weather$max_temp <- gsub("\\<0\\>", NA, lodi_weather$max_temp)
#precipitation
library(scales)
lodi_weather$date <- as.Date(lodi_weather$date)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
require(gridExtra)
pdf("figureS2_precip.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
#TEMPERATURE min
lodi_weather$min_temp <- as.numeric(lodi_weather$min_temp)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
require(gridExtra)
pdf("figureS2_min_temp.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
#TEMPERATURE max
lodi_weather$max_temp <- as.numeric(lodi_weather$max_temp)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
require(gridExtra)
pdf("figureS2_max_temp.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
|
/figureS2.R
|
permissive
|
zoemigicovsky/grapevine_rootstocks
|
R
| false
| false
| 7,777
|
r
|
library(tidyverse)
library(readxl)
library(extrafont)
library(ggthemes)
lodi_weather <- read_xlsx("Lodi weather data 94 thru 99.xlsx")
#Keep date column, precipitation and temperature
lodi_weather <- lodi_weather %>% select(DATE,PRCP,TMAX,TMIN)
#Combine precipation so I get total precipitation across each year
lodi_weather <- lodi_weather %>% separate(DATE,c("year", "month", "day"), sep="-", remove=F)
lodi_weather <- lodi_weather %>% group_by(year) %>% mutate(total_precip=cumsum(PRCP))
#turns out some dates are missing so I can't just order them by dates
lodi_weather <- lodi_weather %>% arrange(year, month, day) %>% mutate(date_order=1:length(year))
lodi_weather <- lodi_weather %>% select(date=DATE, year:day, max_temp=TMAX, min_temp=TMIN, total_precip:date_order)
#weather plots
#Remove weather temps that are 0
lodi_weather$min_temp <- gsub("\\<0\\>", NA, lodi_weather$min_temp)
lodi_weather$max_temp <- gsub("\\<0\\>", NA, lodi_weather$max_temp)
#precipitation
library(scales)
lodi_weather$date <- as.Date(lodi_weather$date)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=total_precip)) +
geom_line(size=1.5)+
theme_few()+
labs(x="Date", y="Cumultative Precipitation (in)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(0,32)
require(gridExtra)
pdf("figureS2_precip.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
#TEMPERATURE min
lodi_weather$min_temp <- as.numeric(lodi_weather$min_temp)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=min_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="min temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(20,74)
require(gridExtra)
pdf("figureS2_min_temp.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
#TEMPERATURE max
lodi_weather$max_temp <- as.numeric(lodi_weather$max_temp)
year0 <- lodi_weather %>% filter(year=="1994") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year1 <- lodi_weather %>% filter(year=="1995") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year2 <- lodi_weather %>% filter(year=="1996") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year3 <- lodi_weather %>% filter(year=="1997") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year4 <- lodi_weather %>% filter(year=="1998") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
year5 <- lodi_weather %>% filter(year=="1999") %>% ggplot(aes(x=date, y=max_temp)) +
geom_point()+
theme_few()+
labs(x="Date", y="max temp (F)") +
theme(axis.text=element_text(size=10, colour="black"),axis.title=element_text(size=12,face="bold", colour="black"))+
theme(legend.title = element_blank())+
ylim(38,106)
require(gridExtra)
pdf("figureS2_max_temp.pdf",width = 15, height=10,family="Arial")
grid.arrange(year0, year1, year2, year3,year4, year5, ncol=3)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexruntimeservice_operations.R
\name{lexruntimeservice_post_content}
\alias{lexruntimeservice_post_content}
\title{Sends user input (text or speech) to Amazon Lex}
\usage{
lexruntimeservice_post_content(botName, botAlias, userId,
sessionAttributes, requestAttributes, contentType, accept, inputStream,
activeContexts)
}
\arguments{
\item{botName}{[required] Name of the Amazon Lex bot.}
\item{botAlias}{[required] Alias of the Amazon Lex bot.}
\item{userId}{[required] The ID of the client application user. Amazon Lex uses this to identify
a user's conversation with your bot. At runtime, each request must
contain the \code{userID} field.
To decide the user ID to use for your application, consider the
following factors.
\itemize{
\item The \code{userID} field must not contain any personally identifiable
information of the user, for example, name, personal identification
numbers, or other end user personal information.
\item If you want a user to start a conversation on one device and
continue on another device, use a user-specific identifier.
\item If you want the same user to be able to have two independent
conversations on two different devices, choose a device-specific
identifier.
\item A user can't have two independent conversations with two different
versions of the same bot. For example, a user can't have a
conversation with the PROD and BETA versions of the same bot. If you
anticipate that a user will need to have conversation with two
different versions, for example, while testing, include the bot
alias in the user ID to separate the two conversations.
}}
\item{sessionAttributes}{You pass this value as the \code{x-amz-lex-session-attributes} HTTP header.
Application-specific information passed between Amazon Lex and a client
application. The value must be a JSON serialized and base64 encoded map
with string keys and values. The total size of the \code{sessionAttributes}
and \code{requestAttributes} headers is limited to 12 KB.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs}{Setting Session Attributes}.}
\item{requestAttributes}{You pass this value as the \code{x-amz-lex-request-attributes} HTTP header.
Request-specific information passed between Amazon Lex and a client
application. The value must be a JSON serialized and base64 encoded map
with string keys and values. The total size of the \code{requestAttributes}
and \code{sessionAttributes} headers is limited to 12 KB.
The namespace \verb{x-amz-lex:} is reserved for special attributes. Don't
create any request attributes with the prefix \verb{x-amz-lex:}.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs}{Setting Request Attributes}.}
\item{contentType}{[required] You pass this value as the \code{Content-Type} HTTP header.
Indicates the audio format or text. The header value must start with one
of the following prefixes:
\itemize{
\item PCM format, audio data must be in little-endian byte order.
\itemize{
\item audio/l16; rate=16000; channels=1
\item audio/x-l16; sample-rate=16000; channel-count=1
\item audio/lpcm; sample-rate=8000; sample-size-bits=16;
channel-count=1; is-big-endian=false
}
\item Opus format
\itemize{
\item audio/x-cbr-opus-with-preamble; preamble-size=0;
bit-rate=256000; frame-size-milliseconds=4
}
\item Text format
\itemize{
\item text/plain; charset=utf-8
}
}}
\item{accept}{You pass this value as the \code{Accept} HTTP header.
The message Amazon Lex returns in the response can be either text or
speech based on the \code{Accept} HTTP header value in the request.
\itemize{
\item If the value is \verb{text/plain; charset=utf-8}, Amazon Lex returns text
in the response.
\item If the value begins with \verb{audio/}, Amazon Lex returns speech in the
response. Amazon Lex uses Amazon Polly to generate the speech (using
the configuration you specified in the \code{Accept} header). For
example, if you specify \code{audio/mpeg} as the value, Amazon Lex
returns speech in the MPEG format.
\item If the value is \code{audio/pcm}, the speech returned is \code{audio/pcm} in
16-bit, little endian format.
\item The following are the accepted values:
\itemize{
\item audio/mpeg
\item audio/ogg
\item audio/pcm
\item text/plain; charset=utf-8
\item audio/* (defaults to mpeg)
}
}}
\item{inputStream}{[required] User input in PCM or Opus audio format or text format as described in
the \code{Content-Type} HTTP header.
You can stream audio data to Amazon Lex or you can create a local buffer
that captures all of the audio data before sending. In general, you get
better performance if you stream audio data rather than buffering the
data locally.}
\item{activeContexts}{A list of contexts active for the request. A context can be activated
when a previous intent is fulfilled, or by including the context in the
request,
If you don't specify a list of contexts, Amazon Lex will use the current
list of contexts for the session. If you specify an empty list, all
contexts for the session are cleared.}
}
\value{
A list with the following syntax:\preformatted{list(
contentType = "string",
intentName = "string",
nluIntentConfidence = "string",
alternativeIntents = "string",
slots = "string",
sessionAttributes = "string",
sentimentResponse = "string",
message = "string",
messageFormat = "PlainText"|"CustomPayload"|"SSML"|"Composite",
dialogState = "ElicitIntent"|"ConfirmIntent"|"ElicitSlot"|"Fulfilled"|"ReadyForFulfillment"|"Failed",
slotToElicit = "string",
inputTranscript = "string",
audioStream = raw,
botVersion = "string",
sessionId = "string",
activeContexts = "string"
)
}
}
\description{
Sends user input (text or speech) to Amazon Lex. Clients use this API to
send text and audio requests to Amazon Lex at runtime. Amazon Lex
interprets the user input using the machine learning model that it built
for the bot.
The \code{\link[=lexruntimeservice_post_content]{post_content}} operation supports
audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher
speech recognition accuracy in telephone audio applications.
In response, Amazon Lex returns the next message to convey to the user.
Consider the following example messages:
\itemize{
\item For a user input "I would like a pizza," Amazon Lex might return a
response with a message eliciting slot data (for example,
\code{PizzaSize}): "What size pizza would you like?".
\item After the user provides all of the pizza order information, Amazon
Lex might return a response with a message to get user confirmation:
"Order the pizza?".
\item After the user replies "Yes" to the confirmation prompt, Amazon Lex
might return a conclusion statement: "Thank you, your cheese pizza
has been ordered.".
}
Not all Amazon Lex messages require a response from the user. For
example, conclusion statements do not require a response. Some messages
require only a yes or no response. In addition to the \code{message}, Amazon
Lex provides additional context about the message in the response that
you can use to enhance client behavior, such as displaying the
appropriate client user interface. Consider the following examples:
\itemize{
\item If the message is to elicit slot data, Amazon Lex returns the
following context information:
\itemize{
\item \code{x-amz-lex-dialog-state} header set to \code{ElicitSlot}
\item \code{x-amz-lex-intent-name} header set to the intent name in the
current context
\item \code{x-amz-lex-slot-to-elicit} header set to the slot name for which
the \code{message} is eliciting information
\item \code{x-amz-lex-slots} header set to a map of slots configured for
the intent with their current values
}
\item If the message is a confirmation prompt, the
\code{x-amz-lex-dialog-state} header is set to \code{Confirmation} and the
\code{x-amz-lex-slot-to-elicit} header is omitted.
\item If the message is a clarification prompt configured for the intent,
indicating that the user intent is not understood, the
\code{x-amz-dialog-state} header is set to \code{ElicitIntent} and the
\code{x-amz-slot-to-elicit} header is omitted.
}
In addition, Amazon Lex also returns your application-specific
\code{sessionAttributes}. For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html}{Managing Conversation Context}.
}
\section{Request syntax}{
\preformatted{svc$post_content(
botName = "string",
botAlias = "string",
userId = "string",
sessionAttributes = "string",
requestAttributes = "string",
contentType = "string",
accept = "string",
inputStream = raw,
activeContexts = "string"
)
}
}
\keyword{internal}
|
/cran/paws.machine.learning/man/lexruntimeservice_post_content.Rd
|
permissive
|
TWarczak/paws
|
R
| false
| true
| 8,761
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lexruntimeservice_operations.R
\name{lexruntimeservice_post_content}
\alias{lexruntimeservice_post_content}
\title{Sends user input (text or speech) to Amazon Lex}
\usage{
lexruntimeservice_post_content(botName, botAlias, userId,
sessionAttributes, requestAttributes, contentType, accept, inputStream,
activeContexts)
}
\arguments{
\item{botName}{[required] Name of the Amazon Lex bot.}
\item{botAlias}{[required] Alias of the Amazon Lex bot.}
\item{userId}{[required] The ID of the client application user. Amazon Lex uses this to identify
a user's conversation with your bot. At runtime, each request must
contain the \code{userID} field.
To decide the user ID to use for your application, consider the
following factors.
\itemize{
\item The \code{userID} field must not contain any personally identifiable
information of the user, for example, name, personal identification
numbers, or other end user personal information.
\item If you want a user to start a conversation on one device and
continue on another device, use a user-specific identifier.
\item If you want the same user to be able to have two independent
conversations on two different devices, choose a device-specific
identifier.
\item A user can't have two independent conversations with two different
versions of the same bot. For example, a user can't have a
conversation with the PROD and BETA versions of the same bot. If you
anticipate that a user will need to have conversation with two
different versions, for example, while testing, include the bot
alias in the user ID to separate the two conversations.
}}
\item{sessionAttributes}{You pass this value as the \code{x-amz-lex-session-attributes} HTTP header.
Application-specific information passed between Amazon Lex and a client
application. The value must be a JSON serialized and base64 encoded map
with string keys and values. The total size of the \code{sessionAttributes}
and \code{requestAttributes} headers is limited to 12 KB.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-session-attribs}{Setting Session Attributes}.}
\item{requestAttributes}{You pass this value as the \code{x-amz-lex-request-attributes} HTTP header.
Request-specific information passed between Amazon Lex and a client
application. The value must be a JSON serialized and base64 encoded map
with string keys and values. The total size of the \code{requestAttributes}
and \code{sessionAttributes} headers is limited to 12 KB.
The namespace \verb{x-amz-lex:} is reserved for special attributes. Don't
create any request attributes with the prefix \verb{x-amz-lex:}.
For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html#context-mgmt-request-attribs}{Setting Request Attributes}.}
\item{contentType}{[required] You pass this value as the \code{Content-Type} HTTP header.
Indicates the audio format or text. The header value must start with one
of the following prefixes:
\itemize{
\item PCM format, audio data must be in little-endian byte order.
\itemize{
\item audio/l16; rate=16000; channels=1
\item audio/x-l16; sample-rate=16000; channel-count=1
\item audio/lpcm; sample-rate=8000; sample-size-bits=16;
channel-count=1; is-big-endian=false
}
\item Opus format
\itemize{
\item audio/x-cbr-opus-with-preamble; preamble-size=0;
bit-rate=256000; frame-size-milliseconds=4
}
\item Text format
\itemize{
\item text/plain; charset=utf-8
}
}}
\item{accept}{You pass this value as the \code{Accept} HTTP header.
The message Amazon Lex returns in the response can be either text or
speech based on the \code{Accept} HTTP header value in the request.
\itemize{
\item If the value is \verb{text/plain; charset=utf-8}, Amazon Lex returns text
in the response.
\item If the value begins with \verb{audio/}, Amazon Lex returns speech in the
response. Amazon Lex uses Amazon Polly to generate the speech (using
the configuration you specified in the \code{Accept} header). For
example, if you specify \code{audio/mpeg} as the value, Amazon Lex
returns speech in the MPEG format.
\item If the value is \code{audio/pcm}, the speech returned is \code{audio/pcm} in
16-bit, little endian format.
\item The following are the accepted values:
\itemize{
\item audio/mpeg
\item audio/ogg
\item audio/pcm
\item text/plain; charset=utf-8
\item audio/* (defaults to mpeg)
}
}}
\item{inputStream}{[required] User input in PCM or Opus audio format or text format as described in
the \code{Content-Type} HTTP header.
You can stream audio data to Amazon Lex or you can create a local buffer
that captures all of the audio data before sending. In general, you get
better performance if you stream audio data rather than buffering the
data locally.}
\item{activeContexts}{A list of contexts active for the request. A context can be activated
when a previous intent is fulfilled, or by including the context in the
request,
If you don't specify a list of contexts, Amazon Lex will use the current
list of contexts for the session. If you specify an empty list, all
contexts for the session are cleared.}
}
\value{
A list with the following syntax:\preformatted{list(
contentType = "string",
intentName = "string",
nluIntentConfidence = "string",
alternativeIntents = "string",
slots = "string",
sessionAttributes = "string",
sentimentResponse = "string",
message = "string",
messageFormat = "PlainText"|"CustomPayload"|"SSML"|"Composite",
dialogState = "ElicitIntent"|"ConfirmIntent"|"ElicitSlot"|"Fulfilled"|"ReadyForFulfillment"|"Failed",
slotToElicit = "string",
inputTranscript = "string",
audioStream = raw,
botVersion = "string",
sessionId = "string",
activeContexts = "string"
)
}
}
\description{
Sends user input (text or speech) to Amazon Lex. Clients use this API to
send text and audio requests to Amazon Lex at runtime. Amazon Lex
interprets the user input using the machine learning model that it built
for the bot.
The \code{\link[=lexruntimeservice_post_content]{post_content}} operation supports
audio input at 8kHz and 16kHz. You can use 8kHz audio to achieve higher
speech recognition accuracy in telephone audio applications.
In response, Amazon Lex returns the next message to convey to the user.
Consider the following example messages:
\itemize{
\item For a user input "I would like a pizza," Amazon Lex might return a
response with a message eliciting slot data (for example,
\code{PizzaSize}): "What size pizza would you like?".
\item After the user provides all of the pizza order information, Amazon
Lex might return a response with a message to get user confirmation:
"Order the pizza?".
\item After the user replies "Yes" to the confirmation prompt, Amazon Lex
might return a conclusion statement: "Thank you, your cheese pizza
has been ordered.".
}
Not all Amazon Lex messages require a response from the user. For
example, conclusion statements do not require a response. Some messages
require only a yes or no response. In addition to the \code{message}, Amazon
Lex provides additional context about the message in the response that
you can use to enhance client behavior, such as displaying the
appropriate client user interface. Consider the following examples:
\itemize{
\item If the message is to elicit slot data, Amazon Lex returns the
following context information:
\itemize{
\item \code{x-amz-lex-dialog-state} header set to \code{ElicitSlot}
\item \code{x-amz-lex-intent-name} header set to the intent name in the
current context
\item \code{x-amz-lex-slot-to-elicit} header set to the slot name for which
the \code{message} is eliciting information
\item \code{x-amz-lex-slots} header set to a map of slots configured for
the intent with their current values
}
\item If the message is a confirmation prompt, the
\code{x-amz-lex-dialog-state} header is set to \code{Confirmation} and the
\code{x-amz-lex-slot-to-elicit} header is omitted.
\item If the message is a clarification prompt configured for the intent,
indicating that the user intent is not understood, the
\code{x-amz-dialog-state} header is set to \code{ElicitIntent} and the
\code{x-amz-slot-to-elicit} header is omitted.
}
In addition, Amazon Lex also returns your application-specific
\code{sessionAttributes}. For more information, see \href{https://docs.aws.amazon.com/lex/latest/dg/context-mgmt.html}{Managing Conversation Context}.
}
\section{Request syntax}{
\preformatted{svc$post_content(
botName = "string",
botAlias = "string",
userId = "string",
sessionAttributes = "string",
requestAttributes = "string",
contentType = "string",
accept = "string",
inputStream = raw,
activeContexts = "string"
)
}
}
\keyword{internal}
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615850515-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 188
|
r
|
testlist <- list(Rs = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = numeric(0), temp = numeric(0))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
#' Export cast a vector to specified type
#'
#' Casting supports a wider range of transformations that are automatically
#' imputed by coercion (e.g. with [vec_coerce()]).
#'
#' @section Casting rules:
#'
#' Casting is more flexible than coercion, and allows for the possibility of
#' information loss. This diagram summarises possible coercions. `vec_cast()`
#' from any type connected to another type, provided that the arrows are
#' followed in only one direction. For example you can cast from logical to
#' character, and list to time, but you can not cast from logical to datetime.
#'
#' \figure{cast.png}
#'
#' The rules for coercing from a list a fairly strict: each component of the
#' list must be of length 1, and must be coercible to type `to`.
#'
#' @param x Vector to cast.
#' @param to Type to cast to.
#' @return A vector the same length as `x` with the same type as `to`,
#' or an error if the cast is not possible. A warning is generated if
#' information is lost when casting between compatible types (i.e. when
#' there is no 1-to-1 mapping for a specific value).
#' @export
#' @keywords internal
#' @examples
#' # x is a double, but no information is lost
#' vec_cast(1, integer())
#'
#' # Information is lost so a warning is generated
#' vec_cast(1.5, integer())
#'
#' # No sensible coercion is possible so an error is generated
#' \dontrun{
#' vec_cast(1.5, factor("a"))
#' }
#'
vec_cast <- function(x, to) {
UseMethod("vec_cast", to)
}
# Base vectors --------------------------------------------------------------
#' @export
vec_cast.NULL <- function(x, to) {
x
}
#' @export
vec_cast.logical <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x)) {
x
} else if (is_bare_integer(x)) {
warn_cast_lossy_vector(x, to, !x %in% c(0L, 1L))
vec_coerce_bare(x, "logical")
} else if (is_bare_double(x)) {
warn_cast_lossy_vector(x, to, !x %in% c(0, 1))
vec_coerce_bare(x, "logical")
} else if (is_bare_character(x)) {
warn_cast_lossy_vector(x, to, !toupper(x) %in% c("T", "F", "TRUE", "FALSE"))
vec_coerce_bare(x, "logical")
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.integer <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x)) {
vec_coerce_bare(x, "integer")
} else if (is_bare_integer(x)) {
x
} else if (is_bare_double(x) || is_bare_character(x)) {
out <- suppressWarnings(vec_coerce_bare(x, "integer"))
warn_cast_lossy_vector(x, to, (out != x) | xor(is.na(x), is.na(out)))
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.double <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x) || is_bare_integer(x)) {
vec_coerce_bare(x, "double")
} else if (is_bare_double(x)) {
x
} else if (is_bare_character(x)) {
out <- suppressWarnings(vec_coerce_bare(x, "double"))
warn_cast_lossy_vector(x, to, (out != x) | xor(is.na(x), is.na(out)))
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.character <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_atomic(x)) {
vec_coerce_bare(x, "character")
} else if (inherits(x, "difftime")) {
paste(x, units(x))
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
as.character(x)
}
}
#' @export
vec_cast.list <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_list_of(x)) {
warn_cast_lossy(from = x, to = to)
as.list(x)
} else {
as.list(x)
}
}
#' @export
vec_cast.list_of <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_list_of(x) || is_bare_list(x)) {
as_list_of(x, .type = attr(to, "type"))
} else {
abort_no_cast(x, to)
}
}
# S3 vectors --------------------------------------------------------------
#' @export
vec_cast.factor <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is.character(x) || is.factor(x)) {
if (length(levels(to)) == 0L) {
factor(as.character(x), levels = unique(x), ordered = is.ordered(to))
} else {
warn_cast_lossy_vector(x, to, !x %in% levels(to))
factor(x, levels = levels(to), ordered = is.ordered(to))
}
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.Date <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
as.Date(x, origin = "1970-01-01")
} else if (is_bare_character(x)) {
as.Date(x, format = "%Y-%m-%d")
} else if (inherits(x, "Date")) {
x
} else if (inherits(x, "POSIXt")) {
out <- as.Date(x)
warn_cast_lossy_vector(x, to, abs(x - as.POSIXct(out)) > 1e-9)
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.POSIXt <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
x <- as.POSIXct(x, origin = "1970-01-01")
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (is_bare_character(x)) {
as.POSIXct(x, tz = "UTC")
} else if (inherits(x, "Date")) {
x <- as.POSIXct(x)
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (inherits(x, "POSIXt")) {
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.difftime <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
structure(
as.double(x), # strip attributes
class = "difftime",
units = units(to)
)
} else if (inherits(x, "difftime")) {
if (identical(units(x), units(to))) {
x
} else {
# Hack: I can't see any obvious way of changing the units
origin <- as.POSIXct(0, origin = "1970-01-01")
difftime(origin, origin - x, units = units(to))
}
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.data.frame <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is.data.frame(x)) {
# Coerce common columns
common <- intersect(names(x), names(to))
x[common] <- map2(x[common], to[common], vec_cast)
# Add new columns
from_type <- setdiff(names(to), names(x))
x[from_type] <- map(to[from_type], vec_na, n = vec_length(x))
# Warn about dropped columns
dropped <- setdiff(names(x), names(to))
if (length(dropped) > 0 ) {
warn_cast_lossy_dataframe(x, to, dropped)
}
x[names(to)]
} else {
abort_no_cast(x, to)
}
}
# Helpers -----------------------------------------------------------------
cast_from_list <- function(x, to) {
ns <- map_int(x, length)
if (any(ns != 1)) {
abort_no_cast(x, to, "All list elements are not length 1")
}
n <- length(x)
out <- vec_na(to, n)
for (i in seq_len(n)) {
out[[i]] <- vec_cast(x[[i]], to)
}
out
}
abort_no_cast <- function(from, to, details = NULL) {
from <- as_vec_type(from)
to <- as_vec_type(to)
msg <- glue::glue("Can't cast {from} to {to}")
if (!is.null(details)) {
msg <- paste0(msg, "\n", details)
}
abort(
"error_no_cast",
message = msg,
from = from,
to = to,
details = details
)
}
warn_cast_lossy <- function(message = NULL, .subclass = NULL, from, to, ..., class) {
from <- as_vec_type(from)
to <- as_vec_type(to)
if (is.null(message)) {
message <- glue::glue("Lossy conversion from {from} to {to}")
}
warn(
c(.subclass, "warning_cast_lossy"),
message = message,
from = from,
to = to,
...
)
}
warn_cast_lossy_vector <- function(from, to, is_lossy) {
which <- which(is_lossy)
if (length(which) == 0) {
return()
}
from <- as_vec_type(from)
to <- as_vec_type(to)
pos <- glue::glue_collapse(which, width = 80)
msg <- glue::glue("
Lossy conversion from {from} to {to}
At positions: {pos}"
)
warn_cast_lossy(
"warning_cast_lossy_vector",
message = msg,
from = from,
to = to,
which = which
)
}
warn_cast_lossy_dataframe <- function(from, to, dropped) {
from <- as_vec_type(from)
to <- as_vec_type(to)
vars <- glue::glue_collapse(dropped, width = 80)
msg <- glue::glue("
Lossy conversion from data.frame to data.frame
Dropped variables: {vars}"
)
warn_cast_lossy(
"warning_cast_lossy_dataframe",
message = msg,
from = from,
to = to,
dropped = dropped
)
}
|
/R/cast.R
|
no_license
|
gergness/vctrs
|
R
| false
| false
| 8,561
|
r
|
#' Export cast a vector to specified type
#'
#' Casting supports a wider range of transformations that are automatically
#' imputed by coercion (e.g. with [vec_coerce()]).
#'
#' @section Casting rules:
#'
#' Casting is more flexible than coercion, and allows for the possibility of
#' information loss. This diagram summarises possible coercions. `vec_cast()`
#' from any type connected to another type, provided that the arrows are
#' followed in only one direction. For example you can cast from logical to
#' character, and list to time, but you can not cast from logical to datetime.
#'
#' \figure{cast.png}
#'
#' The rules for coercing from a list a fairly strict: each component of the
#' list must be of length 1, and must be coercible to type `to`.
#'
#' @param x Vector to cast.
#' @param to Type to cast to.
#' @return A vector the same length as `x` with the same type as `to`,
#' or an error if the cast is not possible. A warning is generated if
#' information is lost when casting between compatible types (i.e. when
#' there is no 1-to-1 mapping for a specific value).
#' @export
#' @keywords internal
#' @examples
#' # x is a double, but no information is lost
#' vec_cast(1, integer())
#'
#' # Information is lost so a warning is generated
#' vec_cast(1.5, integer())
#'
#' # No sensible coercion is possible so an error is generated
#' \dontrun{
#' vec_cast(1.5, factor("a"))
#' }
#'
vec_cast <- function(x, to) {
UseMethod("vec_cast", to)
}
# Base vectors --------------------------------------------------------------
#' @export
vec_cast.NULL <- function(x, to) {
x
}
#' @export
vec_cast.logical <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x)) {
x
} else if (is_bare_integer(x)) {
warn_cast_lossy_vector(x, to, !x %in% c(0L, 1L))
vec_coerce_bare(x, "logical")
} else if (is_bare_double(x)) {
warn_cast_lossy_vector(x, to, !x %in% c(0, 1))
vec_coerce_bare(x, "logical")
} else if (is_bare_character(x)) {
warn_cast_lossy_vector(x, to, !toupper(x) %in% c("T", "F", "TRUE", "FALSE"))
vec_coerce_bare(x, "logical")
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.integer <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x)) {
vec_coerce_bare(x, "integer")
} else if (is_bare_integer(x)) {
x
} else if (is_bare_double(x) || is_bare_character(x)) {
out <- suppressWarnings(vec_coerce_bare(x, "integer"))
warn_cast_lossy_vector(x, to, (out != x) | xor(is.na(x), is.na(out)))
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.double <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_logical(x) || is_bare_integer(x)) {
vec_coerce_bare(x, "double")
} else if (is_bare_double(x)) {
x
} else if (is_bare_character(x)) {
out <- suppressWarnings(vec_coerce_bare(x, "double"))
warn_cast_lossy_vector(x, to, (out != x) | xor(is.na(x), is.na(out)))
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.character <- function(x, to) {
if (is_null(x)) {
x
} else if (is_bare_atomic(x)) {
vec_coerce_bare(x, "character")
} else if (inherits(x, "difftime")) {
paste(x, units(x))
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
as.character(x)
}
}
#' @export
vec_cast.list <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_list_of(x)) {
warn_cast_lossy(from = x, to = to)
as.list(x)
} else {
as.list(x)
}
}
#' @export
vec_cast.list_of <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_list_of(x) || is_bare_list(x)) {
as_list_of(x, .type = attr(to, "type"))
} else {
abort_no_cast(x, to)
}
}
# S3 vectors --------------------------------------------------------------
#' @export
vec_cast.factor <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is.character(x) || is.factor(x)) {
if (length(levels(to)) == 0L) {
factor(as.character(x), levels = unique(x), ordered = is.ordered(to))
} else {
warn_cast_lossy_vector(x, to, !x %in% levels(to))
factor(x, levels = levels(to), ordered = is.ordered(to))
}
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.Date <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
as.Date(x, origin = "1970-01-01")
} else if (is_bare_character(x)) {
as.Date(x, format = "%Y-%m-%d")
} else if (inherits(x, "Date")) {
x
} else if (inherits(x, "POSIXt")) {
out <- as.Date(x)
warn_cast_lossy_vector(x, to, abs(x - as.POSIXct(out)) > 1e-9)
out
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.POSIXt <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
x <- as.POSIXct(x, origin = "1970-01-01")
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (is_bare_character(x)) {
as.POSIXct(x, tz = "UTC")
} else if (inherits(x, "Date")) {
x <- as.POSIXct(x)
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (inherits(x, "POSIXt")) {
attr(x, "tzone") <- attr(to, "tzone")
x
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.difftime <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is_bare_double(x)) {
structure(
as.double(x), # strip attributes
class = "difftime",
units = units(to)
)
} else if (inherits(x, "difftime")) {
if (identical(units(x), units(to))) {
x
} else {
# Hack: I can't see any obvious way of changing the units
origin <- as.POSIXct(0, origin = "1970-01-01")
difftime(origin, origin - x, units = units(to))
}
} else if (is.list(x)) {
cast_from_list(x, to)
} else {
abort_no_cast(x, to)
}
}
#' @export
vec_cast.data.frame <- function(x, to) {
if (is_null(x)) {
NULL
} else if (is.data.frame(x)) {
# Coerce common columns
common <- intersect(names(x), names(to))
x[common] <- map2(x[common], to[common], vec_cast)
# Add new columns
from_type <- setdiff(names(to), names(x))
x[from_type] <- map(to[from_type], vec_na, n = vec_length(x))
# Warn about dropped columns
dropped <- setdiff(names(x), names(to))
if (length(dropped) > 0 ) {
warn_cast_lossy_dataframe(x, to, dropped)
}
x[names(to)]
} else {
abort_no_cast(x, to)
}
}
# Helpers -----------------------------------------------------------------
cast_from_list <- function(x, to) {
ns <- map_int(x, length)
if (any(ns != 1)) {
abort_no_cast(x, to, "All list elements are not length 1")
}
n <- length(x)
out <- vec_na(to, n)
for (i in seq_len(n)) {
out[[i]] <- vec_cast(x[[i]], to)
}
out
}
abort_no_cast <- function(from, to, details = NULL) {
from <- as_vec_type(from)
to <- as_vec_type(to)
msg <- glue::glue("Can't cast {from} to {to}")
if (!is.null(details)) {
msg <- paste0(msg, "\n", details)
}
abort(
"error_no_cast",
message = msg,
from = from,
to = to,
details = details
)
}
warn_cast_lossy <- function(message = NULL, .subclass = NULL, from, to, ..., class) {
from <- as_vec_type(from)
to <- as_vec_type(to)
if (is.null(message)) {
message <- glue::glue("Lossy conversion from {from} to {to}")
}
warn(
c(.subclass, "warning_cast_lossy"),
message = message,
from = from,
to = to,
...
)
}
warn_cast_lossy_vector <- function(from, to, is_lossy) {
which <- which(is_lossy)
if (length(which) == 0) {
return()
}
from <- as_vec_type(from)
to <- as_vec_type(to)
pos <- glue::glue_collapse(which, width = 80)
msg <- glue::glue("
Lossy conversion from {from} to {to}
At positions: {pos}"
)
warn_cast_lossy(
"warning_cast_lossy_vector",
message = msg,
from = from,
to = to,
which = which
)
}
warn_cast_lossy_dataframe <- function(from, to, dropped) {
from <- as_vec_type(from)
to <- as_vec_type(to)
vars <- glue::glue_collapse(dropped, width = 80)
msg <- glue::glue("
Lossy conversion from data.frame to data.frame
Dropped variables: {vars}"
)
warn_cast_lossy(
"warning_cast_lossy_dataframe",
message = msg,
from = from,
to = to,
dropped = dropped
)
}
|
#' @title ATA Package Large Example Item Metadata
#' @description Sample data used to demonstrate automated test assembly.
#' @format A data frame with 1096 rows and 44 variables:
#' \describe{
#' \item{Item}{Unique item identifier, alpha-numeric index code.}
#' \item{Content}{Content label, as factor identifying content "A" and "B".}
#' \item{Content_A}{Dummy code for content "A", 0 and 1 indicators.}
#' \item{Content_B}{Dummy code for content "B", 0 and 1 indicators.}
#' \item{Content_C}{Dummy code for content "C", 0 and 1 indicators.}
#' \item{Content_D}{Dummy code for content "D", 0 and 1 indicators.}
#' \item{Content_E}{Dummy code for content "E", 0 and 1 indicators.}
#' \item{Content_F}{Dummy code for content "F", 0 and 1 indicators.}
#' \item{Content_G}{Dummy code for content "G", 0 and 1 indicators.}
#' \item{Content_H}{Dummy code for content "H", 0 and 1 indicators.}
#' \item{Content_I}{Dummy code for content "I", 0 and 1 indicators.}
#' \item{Content_J}{Dummy code for content "J", 0 and 1 indicators.}
#' \item{Content_K}{Dummy code for content "K", 0 and 1 indicators.}
#' \item{Content_L}{Dummy code for content "L", 0 and 1 indicators.}
#' \item{Content_M}{Dummy code for content "M", 0 and 1 indicators.}
#' \item{Content_N}{Dummy code for content "N", 0 and 1 indicators.}
#' \item{Content_O}{Dummy code for content "O", 0 and 1 indicators.}
#' \item{Content_P}{Dummy code for content "P", 0 and 1 indicators.}
#' \item{Content_Q}{Dummy code for content "Q", 0 and 1 indicators.}
#' \item{Content_R}{Dummy code for content "R", 0 and 1 indicators.}
#' \item{Content_S}{Dummy code for content "S", 0 and 1 indicators.}
#' \item{Content_T}{Dummy code for content "T", 0 and 1 indicators.}
#' \item{Content_U}{Dummy code for content "U", 0 and 1 indicators.}
#' \item{Content_V}{Dummy code for content "V", 0 and 1 indicators.}
#' \item{Content_W}{Dummy code for content "W", 0 and 1 indicators.}
#' \item{Content_X}{Dummy code for content "X", 0 and 1 indicators.}
#' \item{Content_Y}{Dummy code for content "Y", 0 and 1 indicators.}
#' \item{Content_Z}{Dummy code for content "Z", 0 and 1 indicators.}
#' \item{p}{Item proportion correct responding.}
#' \item{rpbis}{Item-total point biserial correlation, rounded decimal correlation in range -1.00 to 1.00}
#' \item{iSx}{Item contribution to total composite standard deviation.}
#' \item{Time}{Observed median item response time, in seconds.}
#' \item{Choices}{Number of response choices in the multiple-choice question.}
#' \item{Answer}{Correct answer key. In the multiple-choice questions answer 1 = A, 2 = B, and so on.}
#' \item{Skill}{Formative insight skill classification.}
#' \item{Skill_1}{Formative skill insight dummy code for skill 1 (S1): Interpretive.}
#' \item{Skill_2}{Formative skill insight dummy code for skill 2 (S2): Factual.}
#' \item{Skill_3}{Formative skill insight dummy code for skill 3 (S3): Evaluative.}
#' \item{IIF_m2}{Item Response Theory (IRT) item information function value at a theta = -2.0.}
#' \item{IIF_m1}{Item Response Theory (IRT) item information function value at a theta = -1.0.}
#' \item{IIF_0}{Item Response Theory (IRT) item information function value at a theta = 0.0.}
#' \item{IIF_1}{Item Response Theory (IRT) item information function value at a theta = 1.0.}
#' \item{IIF_2}{Item Response Theory (IRT) item information function value at a theta = 2.0.}
#' \item{Parent0}{Passage based item set parent identification.}
#' }
#'
"metadata_large_example"
|
/R/metadata_large_example.R
|
no_license
|
cran/ata
|
R
| false
| false
| 3,625
|
r
|
#' @title ATA Package Large Example Item Metadata
#' @description Sample data used to demonstrate automated test assembly.
#' @format A data frame with 1096 rows and 44 variables:
#' \describe{
#' \item{Item}{Unique item identifier, alpha-numeric index code.}
#' \item{Content}{Content label, as factor identifying content "A" and "B".}
#' \item{Content_A}{Dummy code for content "A", 0 and 1 indicators.}
#' \item{Content_B}{Dummy code for content "B", 0 and 1 indicators.}
#' \item{Content_C}{Dummy code for content "C", 0 and 1 indicators.}
#' \item{Content_D}{Dummy code for content "D", 0 and 1 indicators.}
#' \item{Content_E}{Dummy code for content "E", 0 and 1 indicators.}
#' \item{Content_F}{Dummy code for content "F", 0 and 1 indicators.}
#' \item{Content_G}{Dummy code for content "G", 0 and 1 indicators.}
#' \item{Content_H}{Dummy code for content "H", 0 and 1 indicators.}
#' \item{Content_I}{Dummy code for content "I", 0 and 1 indicators.}
#' \item{Content_J}{Dummy code for content "J", 0 and 1 indicators.}
#' \item{Content_K}{Dummy code for content "K", 0 and 1 indicators.}
#' \item{Content_L}{Dummy code for content "L", 0 and 1 indicators.}
#' \item{Content_M}{Dummy code for content "M", 0 and 1 indicators.}
#' \item{Content_N}{Dummy code for content "N", 0 and 1 indicators.}
#' \item{Content_O}{Dummy code for content "O", 0 and 1 indicators.}
#' \item{Content_P}{Dummy code for content "P", 0 and 1 indicators.}
#' \item{Content_Q}{Dummy code for content "Q", 0 and 1 indicators.}
#' \item{Content_R}{Dummy code for content "R", 0 and 1 indicators.}
#' \item{Content_S}{Dummy code for content "S", 0 and 1 indicators.}
#' \item{Content_T}{Dummy code for content "T", 0 and 1 indicators.}
#' \item{Content_U}{Dummy code for content "U", 0 and 1 indicators.}
#' \item{Content_V}{Dummy code for content "V", 0 and 1 indicators.}
#' \item{Content_W}{Dummy code for content "W", 0 and 1 indicators.}
#' \item{Content_X}{Dummy code for content "X", 0 and 1 indicators.}
#' \item{Content_Y}{Dummy code for content "Y", 0 and 1 indicators.}
#' \item{Content_Z}{Dummy code for content "Z", 0 and 1 indicators.}
#' \item{p}{Item proportion correct responding.}
#' \item{rpbis}{Item-total point biserial correlation, rounded decimal correlation in range -1.00 to 1.00}
#' \item{iSx}{Item contribution to total composite standard deviation.}
#' \item{Time}{Observed median item response time, in seconds.}
#' \item{Choices}{Number of response choices in the multiple-choice question.}
#' \item{Answer}{Correct answer key. In the multiple-choice questions answer 1 = A, 2 = B, and so on.}
#' \item{Skill}{Formative insight skill classification.}
#' \item{Skill_1}{Formative skill insight dummy code for skill 1 (S1): Interpretive.}
#' \item{Skill_2}{Formative skill insight dummy code for skill 2 (S2): Factual.}
#' \item{Skill_3}{Formative skill insight dummy code for skill 3 (S3): Evaluative.}
#' \item{IIF_m2}{Item Response Theory (IRT) item information function value at a theta = -2.0.}
#' \item{IIF_m1}{Item Response Theory (IRT) item information function value at a theta = -1.0.}
#' \item{IIF_0}{Item Response Theory (IRT) item information function value at a theta = 0.0.}
#' \item{IIF_1}{Item Response Theory (IRT) item information function value at a theta = 1.0.}
#' \item{IIF_2}{Item Response Theory (IRT) item information function value at a theta = 2.0.}
#' \item{Parent0}{Passage based item set parent identification.}
#' }
#'
"metadata_large_example"
|
household_power_consumption <- read.csv("~/Data_science_specialization/Exploratory_data_analysis/household_power_consumption.txt", sep=";", na.strings = "?")
hpc<- subset(household_power_consumption, Date=="1/2/2007" | Date== "2/2/2007")
#The date and time settings of the variables in the input table are converted
hpc$DateTime<-strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
hpc$Weekdays<-weekdays(hpc$DateTime)
#A png file is created
png(filename="plot2.png", width=480, height=480)
#The plot is created
with(hpc, plot(DateTime,Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
/plot2.R
|
no_license
|
jmzam-origins/ExData_Plotting1
|
R
| false
| false
| 642
|
r
|
household_power_consumption <- read.csv("~/Data_science_specialization/Exploratory_data_analysis/household_power_consumption.txt", sep=";", na.strings = "?")
hpc<- subset(household_power_consumption, Date=="1/2/2007" | Date== "2/2/2007")
#The date and time settings of the variables in the input table are converted
hpc$DateTime<-strptime(paste(hpc$Date, hpc$Time), "%d/%m/%Y %H:%M:%S", tz="GMT")
hpc$Weekdays<-weekdays(hpc$DateTime)
#A png file is created
png(filename="plot2.png", width=480, height=480)
#The plot is created
with(hpc, plot(DateTime,Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)"))
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate.R
\name{calculate}
\alias{calculate}
\title{calculate greta arrays given fixed values}
\usage{
calculate(
...,
values = list(),
nsim = NULL,
seed = NULL,
precision = c("double", "single"),
trace_batch_size = 100
)
}
\arguments{
\item{...}{one or more greta_arrays for which to calculate the value}
\item{values}{a named list giving temporary values of the greta arrays with
which \code{target} is connected, or a \code{greta_mcmc_list} object
returned by \code{\link[=mcmc]{mcmc()}}.}
\item{nsim}{an optional positive integer scalar for the number of responses
to simulate if stochastic greta arrays are present in the model - see
Details.}
\item{seed}{an optional seed to be used in set.seed immediately before the
simulation so as to generate a reproducible sample}
\item{precision}{the floating point precision to use when calculating values.}
\item{trace_batch_size}{the number of posterior samples to process at a time
when \code{target} is a \code{greta_mcmc_list} object; reduce this to
reduce memory demands}
}
\value{
Values of the target greta array(s), given values of the greta arrays
on which they depend (either specified in \code{values} or sampled from
their priors). If \code{values} is a
\code{\link[=mcmc]{greta_mcmc_list()}} and \code{nsim = NULL}, this will
be a \code{greta_mcmc_list} object of posterior samples for the target
greta arrays. Otherwise, the result will be a named list of numeric R
arrays. If \code{nsim = NULL} the dimensions of returned numeric R arrays
will be the same as the corresponding greta arrays, otherwise an additional
dimension with \code{nsim} elements will be prepended, to represent
multiple simulations.
}
\description{
Calculate the values that greta arrays would take, given
temporary, or simulated values for the greta arrays on which they depend.
This can be used to check the behaviour of your model, make predictions to
new data after model fitting, or simulate datasets from either the prior or
posterior of your model.
}
\details{
The greta arrays named in \code{values} need not be variables, they
can also be other operations or even data.
At present, if \code{values} is a named list it must contain values for
\emph{all} of the variable greta arrays with which \code{target} is
connected, even values are given for intermediate operations, or the target
doesn't depend on the variable. That may be relaxed in a future release.
If the model contains stochastic greta arrays; those with a distribution,
calculate can be used to sample from these distributions (and all greta
arrays that depend on them) by setting the \code{nsim} argument to a
positive integer for the required number of samples. If \code{values} is
specified (either as a list of fixed values or as draws), those values will
be used, and remaining variables will be sampled conditional on them.
Observed data with distributions (i.e. response variables defined with
\code{distribution()} can also be sampled, provided they are defined as
greta arrays. This behaviour can be used for a number of tasks, like
simulating datasets for known parameter sets, simulating parameters and
data from a set of priors, or simulating datasets from a model posterior.
See some examples of these below.
}
\examples{
\dontrun{
# define a variable greta array, and another that is calculated from it
# then calculate what value y would take for different values of x
x <- normal(0, 1, dim = 3)
a <- lognormal(0, 1)
y <- sum(x^2) + a
calculate(y, values = list(x = c(0.1, 0.2, 0.3), a = 2))
# by setting nsim, you can also sample values from their priors
calculate(y, nsim = 3)
# you can combine sampling and fixed values
calculate(y, values = list(a = 2), nsim = 3)
# if the greta array only depends on data,
# you can pass an empty list to values (this is the default)
x <- ones(3, 3)
y <- sum(x)
calculate(y)
# define a model
alpha <- normal(0, 1)
beta <- normal(0, 1)
sigma <- lognormal(1, 0.1)
y <- as_data(iris$Petal.Width)
mu <- alpha + iris$Petal.Length * beta
distribution(y) <- normal(mu, sigma)
m <- model(alpha, beta, sigma)
# sample values of the parameters, or different observation data (y), from
# the priors (useful for prior # predictive checking) - see also
# ?simulate.greta_model
calculate(alpha, beta, sigma, nsim = 100)
calculate(y, nsim = 100)
# calculate intermediate greta arrays, given some parameter values (useful
# for debugging models)
calculate(mu[1:5], values = list(alpha = 1, beta = 2, sigma = 0.5))
calculate(mu[1:5], values = list(alpha = -1, beta = 0.2, sigma = 0.5))
# simulate datasets given fixed parameter values
calculate(y, values = list(alpha = -1, beta = 0.2, sigma = 0.5), nsim = 10)
# you can use calculate in conjunction with posterior samples from MCMC, e.g.
# sampling different observation datasets, given a random set of these
# posterior samples - useful for posterior predictive model checks
draws <- mcmc(m, n_samples = 500)
calculate(y, values = draws, nsim = 100)
# you can use calculate on greta arrays created even after the inference on
# the model - e.g. to plot response curves
petal_length_plot <- seq(min(iris$Petal.Length),
max(iris$Petal.Length),
length.out = 100
)
mu_plot <- alpha + petal_length_plot * beta
mu_plot_draws <- calculate(mu_plot, values = draws)
mu_est <- colMeans(mu_plot_draws[[1]])
plot(mu_est ~ petal_length_plot,
type = "n",
ylim = range(mu_plot_draws[[1]])
)
apply(mu_plot_draws[[1]], 1, lines,
x = petal_length_plot, col = grey(0.8)
)
lines(mu_est ~ petal_length_plot, lwd = 2)
# trace_batch_size can be changed to trade off speed against memory usage
# when calculating. These all produce the same result, but have increasing
# memory requirements:
mu_plot_draws_1 <- calculate(mu_plot,
values = draws,
trace_batch_size = 1
)
mu_plot_draws_10 <- calculate(mu_plot,
values = draws,
trace_batch_size = 10
)
mu_plot_draws_inf <- calculate(mu_plot,
values = draws,
trace_batch_size = Inf
)
}
}
|
/man/calculate.Rd
|
permissive
|
njtierney/greta
|
R
| false
| true
| 6,036
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate.R
\name{calculate}
\alias{calculate}
\title{calculate greta arrays given fixed values}
\usage{
calculate(
...,
values = list(),
nsim = NULL,
seed = NULL,
precision = c("double", "single"),
trace_batch_size = 100
)
}
\arguments{
\item{...}{one or more greta_arrays for which to calculate the value}
\item{values}{a named list giving temporary values of the greta arrays with
which \code{target} is connected, or a \code{greta_mcmc_list} object
returned by \code{\link[=mcmc]{mcmc()}}.}
\item{nsim}{an optional positive integer scalar for the number of responses
to simulate if stochastic greta arrays are present in the model - see
Details.}
\item{seed}{an optional seed to be used in set.seed immediately before the
simulation so as to generate a reproducible sample}
\item{precision}{the floating point precision to use when calculating values.}
\item{trace_batch_size}{the number of posterior samples to process at a time
when \code{target} is a \code{greta_mcmc_list} object; reduce this to
reduce memory demands}
}
\value{
Values of the target greta array(s), given values of the greta arrays
on which they depend (either specified in \code{values} or sampled from
their priors). If \code{values} is a
\code{\link[=mcmc]{greta_mcmc_list()}} and \code{nsim = NULL}, this will
be a \code{greta_mcmc_list} object of posterior samples for the target
greta arrays. Otherwise, the result will be a named list of numeric R
arrays. If \code{nsim = NULL} the dimensions of returned numeric R arrays
will be the same as the corresponding greta arrays, otherwise an additional
dimension with \code{nsim} elements will be prepended, to represent
multiple simulations.
}
\description{
Calculate the values that greta arrays would take, given
temporary, or simulated values for the greta arrays on which they depend.
This can be used to check the behaviour of your model, make predictions to
new data after model fitting, or simulate datasets from either the prior or
posterior of your model.
}
\details{
The greta arrays named in \code{values} need not be variables, they
can also be other operations or even data.
At present, if \code{values} is a named list it must contain values for
\emph{all} of the variable greta arrays with which \code{target} is
connected, even values are given for intermediate operations, or the target
doesn't depend on the variable. That may be relaxed in a future release.
If the model contains stochastic greta arrays; those with a distribution,
calculate can be used to sample from these distributions (and all greta
arrays that depend on them) by setting the \code{nsim} argument to a
positive integer for the required number of samples. If \code{values} is
specified (either as a list of fixed values or as draws), those values will
be used, and remaining variables will be sampled conditional on them.
Observed data with distributions (i.e. response variables defined with
\code{distribution()} can also be sampled, provided they are defined as
greta arrays. This behaviour can be used for a number of tasks, like
simulating datasets for known parameter sets, simulating parameters and
data from a set of priors, or simulating datasets from a model posterior.
See some examples of these below.
}
\examples{
\dontrun{
# define a variable greta array, and another that is calculated from it
# then calculate what value y would take for different values of x
x <- normal(0, 1, dim = 3)
a <- lognormal(0, 1)
y <- sum(x^2) + a
calculate(y, values = list(x = c(0.1, 0.2, 0.3), a = 2))
# by setting nsim, you can also sample values from their priors
calculate(y, nsim = 3)
# you can combine sampling and fixed values
calculate(y, values = list(a = 2), nsim = 3)
# if the greta array only depends on data,
# you can pass an empty list to values (this is the default)
x <- ones(3, 3)
y <- sum(x)
calculate(y)
# define a model
alpha <- normal(0, 1)
beta <- normal(0, 1)
sigma <- lognormal(1, 0.1)
y <- as_data(iris$Petal.Width)
mu <- alpha + iris$Petal.Length * beta
distribution(y) <- normal(mu, sigma)
m <- model(alpha, beta, sigma)
# sample values of the parameters, or different observation data (y), from
# the priors (useful for prior # predictive checking) - see also
# ?simulate.greta_model
calculate(alpha, beta, sigma, nsim = 100)
calculate(y, nsim = 100)
# calculate intermediate greta arrays, given some parameter values (useful
# for debugging models)
calculate(mu[1:5], values = list(alpha = 1, beta = 2, sigma = 0.5))
calculate(mu[1:5], values = list(alpha = -1, beta = 0.2, sigma = 0.5))
# simulate datasets given fixed parameter values
calculate(y, values = list(alpha = -1, beta = 0.2, sigma = 0.5), nsim = 10)
# you can use calculate in conjunction with posterior samples from MCMC, e.g.
# sampling different observation datasets, given a random set of these
# posterior samples - useful for posterior predictive model checks
draws <- mcmc(m, n_samples = 500)
calculate(y, values = draws, nsim = 100)
# you can use calculate on greta arrays created even after the inference on
# the model - e.g. to plot response curves
petal_length_plot <- seq(min(iris$Petal.Length),
max(iris$Petal.Length),
length.out = 100
)
mu_plot <- alpha + petal_length_plot * beta
mu_plot_draws <- calculate(mu_plot, values = draws)
mu_est <- colMeans(mu_plot_draws[[1]])
plot(mu_est ~ petal_length_plot,
type = "n",
ylim = range(mu_plot_draws[[1]])
)
apply(mu_plot_draws[[1]], 1, lines,
x = petal_length_plot, col = grey(0.8)
)
lines(mu_est ~ petal_length_plot, lwd = 2)
# trace_batch_size can be changed to trade off speed against memory usage
# when calculating. These all produce the same result, but have increasing
# memory requirements:
mu_plot_draws_1 <- calculate(mu_plot,
values = draws,
trace_batch_size = 1
)
mu_plot_draws_10 <- calculate(mu_plot,
values = draws,
trace_batch_size = 10
)
mu_plot_draws_inf <- calculate(mu_plot,
values = draws,
trace_batch_size = Inf
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_SCE.R
\name{plot_abundance}
\alias{plot_abundance}
\title{Plot clonotype abundance}
\usage{
plot_abundance(
SCE_in,
clonotype_col = "cdr3_nt",
cluster_col = NULL,
calc_abundances = FALSE,
plot_type = "bar",
yaxis = "percent",
plot_colors = NULL,
plot_lvls = NULL,
label_col = "cdr3",
n_clonotypes = 5,
color_col = NULL,
label_aes = list(),
facet_rows = 1,
facet_scales = "free_x",
...
)
}
\arguments{
\item{SCE_in}{Seurat object containing V(D)J data}
\item{clonotype_col}{meta.data column containing clonotype IDs to use for
calculating clonotype abundance}
\item{cluster_col}{meta.data column containing cluster IDs to use for
grouping cells when calculating clonotype abundance}
\item{calc_abundances}{Default: FALSE. Set to TRUE if the clonotype abundances
are not part of the SCE_in yet.}
\item{plot_type}{Type of plot to create, can be 'bar' or 'line'}
\item{yaxis}{Units to plot on the y-axis, either "frequency" or "percent"}
\item{plot_colors}{Character vector containing colors for plotting}
\item{plot_lvls}{Character vector containing levels for ordering}
\item{label_col}{meta.data column containing labels to use for plot}
\item{n_clonotypes}{Number of clonotypes to label}
\item{color_col}{meta.data column to use for coloring bars}
\item{label_aes}{Named list providing additional label aesthetics (color,
size, etc.)}
\item{facet_rows}{The number of facet rows. Use this argument if plot_type =
'bar'}
\item{facet_scales}{If plot_type = 'bar', this argument passes a scales
specification to facet_wrap, can be "fixed", "free", "free_x", or "free_y"}
\item{...}{Additional arguments to pass to geom_line}
}
\value{
ggplot object
}
\description{
Plot clonotype abundance
}
|
/man/plot_abundance.Rd
|
permissive
|
friedue/SCEdjvdj
|
R
| false
| true
| 1,816
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots_SCE.R
\name{plot_abundance}
\alias{plot_abundance}
\title{Plot clonotype abundance}
\usage{
plot_abundance(
SCE_in,
clonotype_col = "cdr3_nt",
cluster_col = NULL,
calc_abundances = FALSE,
plot_type = "bar",
yaxis = "percent",
plot_colors = NULL,
plot_lvls = NULL,
label_col = "cdr3",
n_clonotypes = 5,
color_col = NULL,
label_aes = list(),
facet_rows = 1,
facet_scales = "free_x",
...
)
}
\arguments{
\item{SCE_in}{Seurat object containing V(D)J data}
\item{clonotype_col}{meta.data column containing clonotype IDs to use for
calculating clonotype abundance}
\item{cluster_col}{meta.data column containing cluster IDs to use for
grouping cells when calculating clonotype abundance}
\item{calc_abundances}{Default: FALSE. Set to TRUE if the clonotype abundances
are not part of the SCE_in yet.}
\item{plot_type}{Type of plot to create, can be 'bar' or 'line'}
\item{yaxis}{Units to plot on the y-axis, either "frequency" or "percent"}
\item{plot_colors}{Character vector containing colors for plotting}
\item{plot_lvls}{Character vector containing levels for ordering}
\item{label_col}{meta.data column containing labels to use for plot}
\item{n_clonotypes}{Number of clonotypes to label}
\item{color_col}{meta.data column to use for coloring bars}
\item{label_aes}{Named list providing additional label aesthetics (color,
size, etc.)}
\item{facet_rows}{The number of facet rows. Use this argument if plot_type =
'bar'}
\item{facet_scales}{If plot_type = 'bar', this argument passes a scales
specification to facet_wrap, can be "fixed", "free", "free_x", or "free_y"}
\item{...}{Additional arguments to pass to geom_line}
}
\value{
ggplot object
}
\description{
Plot clonotype abundance
}
|
# mark AOIs as distance from their centers ========================== CHECK NEW AOIS
# make function
data$aoi <- "out"
for(ia in 1:naoi){
data$aoi[sqrt((data$gazePointX-aoimat[1,1,ia])^2+(data$gazePointY-aoimat[1,2,ia])^2)<=aoicorr[1,ia]]<-aoinames[ia]
}
|
/markCircularAOIs.R
|
no_license
|
datigrezzi/rgaze_draft
|
R
| false
| false
| 255
|
r
|
# mark AOIs as distance from their centers ========================== CHECK NEW AOIS
# make function
data$aoi <- "out"
for(ia in 1:naoi){
data$aoi[sqrt((data$gazePointX-aoimat[1,1,ia])^2+(data$gazePointY-aoimat[1,2,ia])^2)<=aoicorr[1,ia]]<-aoinames[ia]
}
|
28f340190b84e913d9fc666a1357283a dungeon_i25-m12-u3-v0.pddl_planlen=170.qdimacs 121289 1150637
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1+A1/Database/Kronegger-Pfandler-Pichler/dungeon/dungeon_i25-m12-u3-v0.pddl_planlen=170/dungeon_i25-m12-u3-v0.pddl_planlen=170.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 94
|
r
|
28f340190b84e913d9fc666a1357283a dungeon_i25-m12-u3-v0.pddl_planlen=170.qdimacs 121289 1150637
|
est_mc_cov_latent <-
function(S,X1=NULL,X2=NULL,yv=rep(1,nrow(S)),start=0,tol=10^-8,maxit=1000,out_se=FALSE,output=FALSE,fort=TRUE){
# Preliminaries
check_der = FALSE # to check derivatives
n = sum(yv)
sS = dim(S)
ns = sS[1]
TT = sS[2]
b = max(S)
if(min(S,na.rm=T)>0){
cat("|------------------- WARNING -------------------|\n")
cat("|The first response category must be coded as 0 |\n")
cat("|-----------------------------------------------|\n")
}
if(is.data.frame(S)){
warning("Data frame not allowed for S")
}
if(ns!=length(yv)) stop("dimensions mismatch between S and yv")
# Covariate structure and related matrices: initial probabilities
if((b+1) == 2) GBe = as.matrix(c(0,1)) else{
GBe = diag(b+1); GBe = GBe[,-1]
}
if(is.null(X1)){
nc1=0
Xlab = rep(1,ns)
nameBe = NULL
}else{
if(is.vector(X1)) X1 = matrix(X1,ns,1)
nc1 = dim(X1)[2] # number of covariates on the initial probabilities
if(ns!= dim(X1)[1]) stop("dimension mismatch between S and X1")
nameBe = colnames(X1)
out = aggr_data(X1,fort=fort)
Xdis = out$data_dis
if(nc1==1) Xdis = matrix(Xdis,length(Xdis),1)
Xlab = out$label
}
Xndis = max(Xlab)
XXdis = array(0,c(b+1,b*(nc1+1),Xndis))
for(i in 1:Xndis){
if(nc1==0) xdis = 1 else xdis = c(1,Xdis[i,])
XXdis[,,i] = GBe%*%(diag(b)%x%t(xdis))
}
# for the transition probabilities
if(is.null(X2)){
nc2 = 0
Zlab = rep(1,ns*(TT-1))
nameGa = NULL
Zndis = max(Zlab)
}else{
if(TT==2) X2 = array(X2,c(ns,1,dim(X2)[2]))
if(is.matrix(X2)) X2 = array(X2,c(ns,TT-1,1))
nc2 = dim(X2)[3] # number of covariates on the transition probabilities
if(ns!= dim(X2)[1]) stop("dimension mismatch between S and X2")
nameGa = colnames(aperm(X2,c(1,3,2)))
Z = NULL
for(t in 1:(TT-1)) Z = rbind(Z,X2[,t,])
if(nc2==1) Z = as.vector(X2)
out = aggr_data(Z,fort=fort); Zdis = out$data_dis; Zlab = out$label; Zndis = max(Zlab)
if(nc2==1) Zdis=matrix(Zdis,length(Zdis),1)
}
ZZdis = array(0,c(b+1,(b)*(nc2+1),Zndis,b+1))
for(h in 1:(b+1)){
if((b+1)==2){
if(h == 1) GGa = as.matrix(c(0,1)) else GGa = as.matrix(c(1,0))
}else{
GGa = diag(b+1); GGa = GGa[,-h]
}
for(i in 1:Zndis){
if(nc2==0) zdis = 1 else zdis = c(1,Zdis[i,])
ZZdis[,,i,h] = GGa%*%(diag(b)%x%t(zdis))
}
}
# parameters on initial probabilities
if(start==0) be = array(0,(nc1+1)*b)
else if(start==1){
be = c(rnorm(1),rep(0,nc1))
if((b+1)>2) for(h in 2:b) be = c(be,rnorm(1),rep(0,nc1))
}
out = prob_multilogit(XXdis,be,Xlab,fort)
Piv = out$P; Pivdis = out$Pdis
# parameters on transition probabilities
Ga = matrix(0,(nc2+1)*b,b+1)
if(start==0) Ga[1+(0:(b-1))*(nc2+1),] = -log(10)
else if(start==1) Ga[1+(0:(b-1))*(nc2+1),] = -abs(rnorm(b))
PIdis = array(0,c(Zndis,b+1,b+1)); PI = array(0,c(b+1,b+1,ns,TT))
for(h in 1:(b+1)){
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
out = prob_multilogit(tmp,Ga[,h],Zlab,fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1))
}
#updating be
V = matrix(0,ns,b+1)
for(i in 1:ns) V[i,S[i,1]+1]=yv[i]
out = est_multilogit(V,XXdis,Xlab,be,Pivdis,fort=fort)
be = out$be; Pivdis = out$Pdi; Piv = out$P
if(out_se){
iFi = ginv(out$Fi)
sebe = sqrt(diag(iFi))
}
#Updating Ga
U = array(0,c(b+1,b+1,ns,TT))
for(i in 1:ns) for(t in 2:TT){
U[S[i,t-1]+1,S[i,t]+1,i,t] = yv[i]
}
if(out_se) sega = matrix(0,(nc2+1)*b,b+1)
for(h in 1:(b+1)){
UU = NULL
for(t in 2:TT) UU = rbind(UU,t(U[h,,,t]))
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
tmp2 = PIdis[,,h]
if(Zndis==1) tmp2 = matrix(tmp2,1,b+1)
out = est_multilogit(UU,tmp,Zlab,Ga[,h],tmp2,fort=fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1)); Ga[,h] = out$be
if(out_se){
iFi = ginv(out$Fi)
sega[,h] = sqrt(diag(iFi))
}
}
# Compute log-likelihood
lk = sum(V*log(Piv))+sum(U[,,,2:TT]*log(PI[,,,2:TT]))
# Compute number of parameters
np = b*(nc1+1)
np = np+b*(nc2+1)*(b+1)
aic = -2*lk+np*2
bic = -2*lk+np*log(n)
Be = matrix(be,nc1+1,b)
if (is.null(nameBe)){
if(nc1==0) nameBe = c("Intercept") else nameBe = c("intercept",paste("X1",1:nc1,sep=""))
}else{
nameBe = c("intercept",nameBe)
}
dimnames(Be) = list(nameBe,logit=2:(b+1))
if(out_se) {seBe = matrix(sebe,nc1+1,b); dimnames(seBe) = list(nameBe,logit=2:(b+1))}
if(is.null(nameGa)){
if(nc2==0) nameGa = c("Intercept") else nameGa = c("intercept", paste("X2",1:nc2,sep=""))
}else{
nameGa = c("intercept",nameGa)
}
if((b+1)>2) {
Ga = array(as.vector(Ga),c(nc2+1,b,b+1))
dimnames(Ga) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}else if((b+1)==2){
dimnames(Ga) = list(nameGa,logit=1:(b+1))
}
if(out_se){
if((b+1)==2){
seGa = matrix(sega,nc2+1,2)
dimnames(seGa) = list(nameGa,logit=1:(b+1))
}else if((b+1)>2){
seGa = array(as.vector(sega),c(nc2+1,b,b+1))
dimnames(seGa) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}
}
# adjust output
lk = as.vector(lk)
if(output){
dimnames(Piv)=list(subject=1:ns,category=0:b)
dimnames(PI)=list(category=0:b,category=0:b,subject=1:ns,time=1:TT)
}
out = list(lk=lk,Be=Be,Ga=Ga,np=np,aic=aic,bic=bic,
call=match.call())
if(out_se){
out$seBe = seBe
out$seGa = seGa
}
# final output
if(output){
out$PI = PI
out$Piv = Piv
}
#cat(" |-------------|-------------|-------------|-------------|\n");
class(out)="MClatent"
return(out)
}
|
/R/est_mc_cov_latent.R
|
no_license
|
jmorten/LMest
|
R
| false
| false
| 5,633
|
r
|
est_mc_cov_latent <-
function(S,X1=NULL,X2=NULL,yv=rep(1,nrow(S)),start=0,tol=10^-8,maxit=1000,out_se=FALSE,output=FALSE,fort=TRUE){
# Preliminaries
check_der = FALSE # to check derivatives
n = sum(yv)
sS = dim(S)
ns = sS[1]
TT = sS[2]
b = max(S)
if(min(S,na.rm=T)>0){
cat("|------------------- WARNING -------------------|\n")
cat("|The first response category must be coded as 0 |\n")
cat("|-----------------------------------------------|\n")
}
if(is.data.frame(S)){
warning("Data frame not allowed for S")
}
if(ns!=length(yv)) stop("dimensions mismatch between S and yv")
# Covariate structure and related matrices: initial probabilities
if((b+1) == 2) GBe = as.matrix(c(0,1)) else{
GBe = diag(b+1); GBe = GBe[,-1]
}
if(is.null(X1)){
nc1=0
Xlab = rep(1,ns)
nameBe = NULL
}else{
if(is.vector(X1)) X1 = matrix(X1,ns,1)
nc1 = dim(X1)[2] # number of covariates on the initial probabilities
if(ns!= dim(X1)[1]) stop("dimension mismatch between S and X1")
nameBe = colnames(X1)
out = aggr_data(X1,fort=fort)
Xdis = out$data_dis
if(nc1==1) Xdis = matrix(Xdis,length(Xdis),1)
Xlab = out$label
}
Xndis = max(Xlab)
XXdis = array(0,c(b+1,b*(nc1+1),Xndis))
for(i in 1:Xndis){
if(nc1==0) xdis = 1 else xdis = c(1,Xdis[i,])
XXdis[,,i] = GBe%*%(diag(b)%x%t(xdis))
}
# for the transition probabilities
if(is.null(X2)){
nc2 = 0
Zlab = rep(1,ns*(TT-1))
nameGa = NULL
Zndis = max(Zlab)
}else{
if(TT==2) X2 = array(X2,c(ns,1,dim(X2)[2]))
if(is.matrix(X2)) X2 = array(X2,c(ns,TT-1,1))
nc2 = dim(X2)[3] # number of covariates on the transition probabilities
if(ns!= dim(X2)[1]) stop("dimension mismatch between S and X2")
nameGa = colnames(aperm(X2,c(1,3,2)))
Z = NULL
for(t in 1:(TT-1)) Z = rbind(Z,X2[,t,])
if(nc2==1) Z = as.vector(X2)
out = aggr_data(Z,fort=fort); Zdis = out$data_dis; Zlab = out$label; Zndis = max(Zlab)
if(nc2==1) Zdis=matrix(Zdis,length(Zdis),1)
}
ZZdis = array(0,c(b+1,(b)*(nc2+1),Zndis,b+1))
for(h in 1:(b+1)){
if((b+1)==2){
if(h == 1) GGa = as.matrix(c(0,1)) else GGa = as.matrix(c(1,0))
}else{
GGa = diag(b+1); GGa = GGa[,-h]
}
for(i in 1:Zndis){
if(nc2==0) zdis = 1 else zdis = c(1,Zdis[i,])
ZZdis[,,i,h] = GGa%*%(diag(b)%x%t(zdis))
}
}
# parameters on initial probabilities
if(start==0) be = array(0,(nc1+1)*b)
else if(start==1){
be = c(rnorm(1),rep(0,nc1))
if((b+1)>2) for(h in 2:b) be = c(be,rnorm(1),rep(0,nc1))
}
out = prob_multilogit(XXdis,be,Xlab,fort)
Piv = out$P; Pivdis = out$Pdis
# parameters on transition probabilities
Ga = matrix(0,(nc2+1)*b,b+1)
if(start==0) Ga[1+(0:(b-1))*(nc2+1),] = -log(10)
else if(start==1) Ga[1+(0:(b-1))*(nc2+1),] = -abs(rnorm(b))
PIdis = array(0,c(Zndis,b+1,b+1)); PI = array(0,c(b+1,b+1,ns,TT))
for(h in 1:(b+1)){
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
out = prob_multilogit(tmp,Ga[,h],Zlab,fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1))
}
#updating be
V = matrix(0,ns,b+1)
for(i in 1:ns) V[i,S[i,1]+1]=yv[i]
out = est_multilogit(V,XXdis,Xlab,be,Pivdis,fort=fort)
be = out$be; Pivdis = out$Pdi; Piv = out$P
if(out_se){
iFi = ginv(out$Fi)
sebe = sqrt(diag(iFi))
}
#Updating Ga
U = array(0,c(b+1,b+1,ns,TT))
for(i in 1:ns) for(t in 2:TT){
U[S[i,t-1]+1,S[i,t]+1,i,t] = yv[i]
}
if(out_se) sega = matrix(0,(nc2+1)*b,b+1)
for(h in 1:(b+1)){
UU = NULL
for(t in 2:TT) UU = rbind(UU,t(U[h,,,t]))
tmp = ZZdis[,,,h]
if(nc2==0) tmp = array(tmp,c(b+1,b,Zndis))
tmp2 = PIdis[,,h]
if(Zndis==1) tmp2 = matrix(tmp2,1,b+1)
out = est_multilogit(UU,tmp,Zlab,Ga[,h],tmp2,fort=fort)
PIdis[,,h] = out$Pdis; PI[h,,,2:TT] = array(as.vector(t(out$P)),c(1,b+1,ns,TT-1)); Ga[,h] = out$be
if(out_se){
iFi = ginv(out$Fi)
sega[,h] = sqrt(diag(iFi))
}
}
# Compute log-likelihood
lk = sum(V*log(Piv))+sum(U[,,,2:TT]*log(PI[,,,2:TT]))
# Compute number of parameters
np = b*(nc1+1)
np = np+b*(nc2+1)*(b+1)
aic = -2*lk+np*2
bic = -2*lk+np*log(n)
Be = matrix(be,nc1+1,b)
if (is.null(nameBe)){
if(nc1==0) nameBe = c("Intercept") else nameBe = c("intercept",paste("X1",1:nc1,sep=""))
}else{
nameBe = c("intercept",nameBe)
}
dimnames(Be) = list(nameBe,logit=2:(b+1))
if(out_se) {seBe = matrix(sebe,nc1+1,b); dimnames(seBe) = list(nameBe,logit=2:(b+1))}
if(is.null(nameGa)){
if(nc2==0) nameGa = c("Intercept") else nameGa = c("intercept", paste("X2",1:nc2,sep=""))
}else{
nameGa = c("intercept",nameGa)
}
if((b+1)>2) {
Ga = array(as.vector(Ga),c(nc2+1,b,b+1))
dimnames(Ga) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}else if((b+1)==2){
dimnames(Ga) = list(nameGa,logit=1:(b+1))
}
if(out_se){
if((b+1)==2){
seGa = matrix(sega,nc2+1,2)
dimnames(seGa) = list(nameGa,logit=1:(b+1))
}else if((b+1)>2){
seGa = array(as.vector(sega),c(nc2+1,b,b+1))
dimnames(seGa) = list(nameGa,logit=2:(b+1),logit=1:(b+1))
}
}
# adjust output
lk = as.vector(lk)
if(output){
dimnames(Piv)=list(subject=1:ns,category=0:b)
dimnames(PI)=list(category=0:b,category=0:b,subject=1:ns,time=1:TT)
}
out = list(lk=lk,Be=Be,Ga=Ga,np=np,aic=aic,bic=bic,
call=match.call())
if(out_se){
out$seBe = seBe
out$seGa = seGa
}
# final output
if(output){
out$PI = PI
out$Piv = Piv
}
#cat(" |-------------|-------------|-------------|-------------|\n");
class(out)="MClatent"
return(out)
}
|
#start time ALL, to see how long is processing
startTime_ALL <- Sys.time()
#' ## 1 Introduction
#'
#' This project aims to create a movie recommendation system using the MovieLens dat aset.
#' Information on the available scripts, implementation system and processing time can be found in the appendix at the end
#' ## 2 Method and Analysis
#' ### 2.1 Download Data and Generate Data Sets
#' ### 2.1.1 Install packages and load library
#' First it is necessary to download and install the R packages used in this project
#'
#2.1.1 Install packages and call library for the project
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(dslabs)) install.packages("dslabs", repos = "http://cran.us.r-project.org")
if(!require(ggthemes)) install.packages("ggthemes", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(recosystem)) install.packages("recosystem", repos = "http://cran.us.r-project.org")
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org")
if(!require(kableExtra)) install.packages("kableExtra", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org")
if(!require(jjb)) install.packages("jjb", repos = "http://cran.us.r-project.org")
if(!require(naniar)) install.packages("naniar", repos = "http://cran.us.r-project.org")
#2.1.1 Load library
library(tidyverse)
library(lubridate)
library(caret)
library(data.table)
library(dslabs)
library(ggthemes)
library(scales)
library(recosystem)
library(knitr)
library(kableExtra)
library(ggplot2)
library(gridExtra)
library(jjb)
library(naniar)
#make directory figs to save figures
mkdir("figs")
#' ### 2.1.2 Download Data
#' The data source for the project is a 10M version of the MovieLens data sets
#' (http://files.grouplens.org/datasets/movielens/ml-10m.zip)
#' The 10M MovieLens files are downloaded so that can be used to create the data sets used in the project.
#' ### 2.1.3 Generate Data Sets
#2.1.2 Download data
#download data
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
#2.1.3 Generate datasets
#Split the downloaded MovieLens dataset into
#edx set 90% and
#validation set 10%
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
## if using R 3.6 or earlier
##movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
## title = as.character(title),
## genres = as.character(genres))
# if using R 4.0 or later
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#Split edx set in two sets -
#train_edx with 80% and
#test_edx with 20% of edx set data
set.seed(1, sample.kind="Rounding")
edx_test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train_edx <- edx[-edx_test_index, ]
temp_2 <- edx[edx_test_index, ]
#Make sure userId and movieId in test_edx set are also in train_edx set
test_edx <- temp_2 %>%
semi_join(train_edx, by = "movieId") %>%
semi_join(train_edx, by = "userId")
# Add rows removed from test_edx set back into train set
remove <- anti_join(temp_2, test_edx)
train_edx <- rbind(train_edx, remove)
rm(edx_test_index, temp_2, remove)
#' ### 2.2 Exploration and Vizualization
#' Insight into the basic characteristics and properties of the data
#' is obtained by inspecting the edx data set.
#head(edx)
kable(edx[1:5, ], caption="edx") %>% kable_styling(latex_options = "hold_position", font_size = 8)
str(edx)
#Classes ‘data.table’ and 'data.frame': 9000055 obs. of 6 variables:
# $ userId : int 1 1 1 1 1 1 1 1 1 1 ...
# $ movieId : num 122 185 292 316 329 355 356 362 364 370 ...
# $ rating : num 5 5 5 5 5 5 5 5 5 5 ...
# $ timestamp: int 838985046 838983525 838983421 838983392 838983392 838984474 838983653 ...
# $ title : chr "Boomerang (1992)" "Net, The (1995)" "Outbreak (1995)" ...
# $ genres : chr "Comedy|Romance" "Action|Crime|Thriller" "Action|Drama|Sci-Fi|Thriller" ...
#' ### 2.2.1 About Rating
#' To see how often a certain rank is used
edx %>% group_by(rating) %>%
summarise(count=n()) %>%
ggplot(aes(x=rating, y=count)) +
geom_col()+
xlab("rating") +
ylab("count") +
theme(text = element_text(size=9)) +
scale_y_continuous(labels = comma) +
scale_x_continuous(breaks=seq(0, 5, by= 0.5)) +
ggtitle("Rating Count")+
theme(plot.title = element_text(size = 9))
kable(edx %>% group_by(rating) %>% summarize(n=n()) %>% t(), format.args = list(big.mark = ",", scientific = FALSE) , caption= "Rating count", align = "llllllllll") %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' ### 2.2.2 About MovieId
format(n_distinct(edx$movieId), big.mark= ',')
#' There are 10,677 different movies in the edx data set
#' and theirs distribution by number of rankings is shown in the graph below.
#Ratings distribution by movie
edx %>% group_by(movieId) %>%
summarise(n=n()) %>%
ggplot(aes(n)) +
geom_histogram(bins=20) +
ggtitle("Ratings distribution by movie")+
theme(plot.title = element_text(size = 9)) +
scale_x_log10() +
theme(text = element_text(size=9)) +
xlab("rating count") +
ylab("movieId count")
#' ### 2.2.3 About UserId
format(n_distinct(edx$userId), big.mark= ',')
#' There are 69,878 different users in the edx set.
#Ratings distribution by user
edx %>% group_by(userId) %>%
summarise(n=n()) %>%
ggplot(aes(n)) +
geom_histogram(bins=20) +
scale_x_log10() +
theme(text = element_text(size=9)) +
xlab("rating count") +
ylab("userId count") +
ggtitle("Ratings distribution by user")+
theme(plot.title = element_text(size = 9))
#' ### 2.2.4 About Year Rated (timestamp)
#'
#' The timestamp variable represents the time and date in which the rating was provided
#Mean ratings by year rated
edx %>% mutate(year_rated = year(as_datetime(timestamp))) %>%
group_by(year_rated) %>%
summarise(rating =mean(rating)) %>%
ggplot(aes(year_rated,rating)) +
geom_point() +
geom_smooth(method = 'loess', se = FALSE, span = 2/3) +
theme(text = element_text(size=9)) +
ggtitle("Mean ratings by year rated") +
theme(plot.title = element_text(size = 9))
#' ### 2.2.5 About Genres
n_distinct((edx$genres))
#' The edx data set has 797 distinct genres.
#' The previously described genre variable in the edx data set has over
#' 700 different values.
#' we will split the genres information into multiple row into the genres_edx data set.
genres_edx <- edx %>% separate_rows(genres, sep ="\\|")
table_2 <- tibble(data_set="edx", rows=format(nrow(edx), big.mark= ','))
table_2 <- bind_rows(table_2, tibble(data_set="genres_edx", rows=format(nrow(genres_edx), big.mark= ',')))
print.data.frame(table_2)
#Number of ratings genres
p1 <- genres_edx %>%
group_by(genres) %>%
summarize(n=n()) %>%
mutate(genres=reorder(genres,n)) %>%
ggplot(aes(x=n, y=genres)) +
geom_bar(stat = "identity") +
ggtitle("Number of ratings genres") +
theme(plot.title = element_text(size = 7)) +
xlab("number of ratings") +
ylab("genres") +
scale_x_continuous(labels = comma) +
theme(text = element_text(size=7))
#Distinct movies by genres
p2 <- genres_edx %>%
group_by(genres) %>%
summarize(n=n_distinct(movieId)) %>%
mutate(genres=reorder(genres,n)) %>%
ggplot(aes(x=n, y=genres)) +
geom_bar(stat = "identity") +
ggtitle("Distinct movies by genres") +
theme(plot.title = element_text(size = 7)) +
xlab("distinct movies") +
ylab("genres") +
scale_x_continuous(labels = comma) +
theme(text = element_text(size=7))
grid.arrange(p1, p2, ncol = 2, widths=c(1, 1))
#' The following graph shows the average rating per each genres.
png(file="figs/fig_1.png", width=480, height=300)
#average rating per each genres
genres_edx %>%
group_by(genres) %>%
boxplot(rating ~ genres, ., las=2, cex.axis = 0.8)
dev.off()
include_graphics("figs/fig_1.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.2.6 About Movie Relase Year
#'
#' The title column in the edx data set contains the title of the movie and also the movie release year
##movie release year
movie_relase_year_edx <- edx %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
kable(movie_relase_year_edx[1:5, ] ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' distribution of the average rating of movies according to the year in which the movie was released.
png(file="figs/fig_2.png", width=480, height=300)
#average rating of movies according to release year
movie_relase_year_edx %>%
group_by(movie_relase_year) %>%
boxplot(rating ~ movie_relase_year, ., las=2, cex.axis = 0.6)
dev.off()
include_graphics("figs/fig_2.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#Mean ratings by movie relase year
edx %>% mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2))) %>%
group_by(movie_relase_year) %>%
summarise(rating = mean(rating)) %>%
ggplot(aes(movie_relase_year, rating)) +
geom_point() +
geom_smooth(method = 'loess', span = 2/3) +
ggtitle("Mean ratings by movie relase year") +
theme(plot.title = element_text(size = 9))
#' ### 2.2.7 User - Movie Matrix
#user - movie rating matrix
matrix_all_edx <- edx %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>% as.matrix()
#' The table below shows how much empty data there is in the matrix (matrix_all_edx).
#all
lenght_2 <- length(matrix_all_edx)
#empty
lenght_2_na <- length(matrix_all_edx[is.na(matrix_all_edx)])
#filled
lenght_2_not_na <- length(matrix_all_edx[!is.na(matrix_all_edx)])
table_2_7 <- tibble(data="all", rows=format(lenght_2, big.mark= ','))
table_2_7 <- bind_rows(table_2_7, tibble(data="empty", rows=format(lenght_2_na, big.mark= ',')))
table_2_7 <- bind_rows(table_2_7, tibble(data="filled", rows=format(lenght_2_not_na, big.mark= ',')))
print.data.frame(table_2_7)
#' The figure below shows for 100 randomly selected users and movies
#' which movie is rated by which user (filled fields) and unrated films are those with empty fields.
png(file="figs/fig_3.png", width=480, height=270)
#user - movie matrix
edx %>% filter(userId %in% sample(unique(edx$userId), 100)) %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>%
select(sample(ncol(.), 100)) %>%
as.matrix() %>% t(.) %>%
image(1:100, 1:100,. , xlab="movies", ylab="users")
abline(h=0:100+0.5, v=0:100+0.5, col = "grey")
#title("user - movie matrix")
dev.off()
include_graphics("figs/fig_3.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.3 Preprocessing, Data Cleaning and Prepare
#preparing data sets
edx <- edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
train_edx <- train_edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
test_edx <- test_edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
validation <- validation %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
kable(edx[1:5, ], caption='modeling information' ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
table_2_3_1 <- tibble(MovieLens_split="edx", rows=format(nrow(edx), big.mark= ',' ))
table_2_3_1 <- bind_rows(table_2_3_1, tibble(MovieLens_split="validation", rows=format(nrow(validation), big.mark= ',' )))
table_2_3_1b <- tibble(edx_split="train_edx", rows=format(nrow(train_edx), big.mark= ',' ))
table_2_3_1b <- bind_rows(table_2_3_1b, tibble(edx_split="test_edx", rows=format(nrow(test_edx), big.mark= ',' )))
kable( list(table_2_3_1, table_2_3_1b), caption = 'Data sets', booktabs = TRUE, valign = 't') %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' ### 2.4 Modeling Approach
#' Each model is trained on "train_edx" and then tested on "test_edx" data
#' and finally the resulting Root Mean Square Error (RMSE) value of the model is calculated.
#function that calculates RMSE error
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#' ### 2.4.1 Model 1 - Baseline
#'
#' Model 1 is based only on ratings data and predicts that all userId will give the same rating to all movieId.
#calculating the mean rating value
m1_mean <- mean(train_edx$rating)
m1_mean
#result for Model 1 - Baseline
m1_rmse <- RMSE(test_edx$rating, m1_mean)
m1_rmse
#RMSE calculation and listing in the results table
rmse_table <- tibble(Model="1 Baseline", RMSE=RMSE(test_edx$rating, m1_mean))
#print rmse_table
#kable(rmse_table, caption="RMSE results") %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.2 Model 2 - Movie Effect
#' Train Model 2 for movie effect on train_edx data set:
#train model for movie effect
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = mean(rating - m1_mean), .groups = "drop")
#' Test Model 2 with movie effect on test_edx data set,
#test movie effect model
pred_m2_avgs_movie <- m1_mean + test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
.$avgs_movie
#' and calculate RMSE:
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "2 Movie Effect",
RMSE=RMSE(test_edx$rating, pred_m2_avgs_movie)))
#print rmse_table
#kable(rmse_table, caption="RMSE results") %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.3 Model 3 – Movie and User Effects
#train model for movie and user effects
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = mean(rating - m1_mean - avgs_movie), .groups = "drop")
#test movie and user effects model
pred_m3_avgs_user <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "3 Movie and User Effects",
RMSE=RMSE(test_edx$rating, pred_m3_avgs_user)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.4 Model 4 – Movie, User and Release Year Effects
#' Train Model 4 for movie, user and release year effects on train_edx data set:
#train model for movie, user and release year effects
m4_avgs_relase <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = mean(rating - m1_mean - avgs_movie - avgs_user), .groups = "drop")
#test movie, user and release year effects model
pred_m4_avgs_relase <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "4 Movie, User and Release Year Effects",
RMSE=RMSE(test_edx$rating,pred_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.5 Model 5 – Regularized Movie and User Effects
#regularization function with lambda parameter (movie and user effects)
regularization_m_u2 <- function(lambda, train_set, test_set){
#baseline model
m1_mean <- mean(train_set$rating)
#train movie effect regularized with lambda
m2_avgs_movie <-
train_set %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train user effect regularized with lambda
m3_avgs_user <-
train_set %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#test regularized movie and user effects model
pred_m3_avgs_user <-
test_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#return RMSE for regularized model with lambda parameter
return(RMSE(pred_m3_avgs_user, test_set$rating))
}
#define a set of lambdas
lambda_set <- seq(0, 10, 0.25)
#calculate RMSE for each lambda
rmses_L <- sapply(lambda_set, regularization_m_u2,
train_set=train_edx, test_set=test_edx)
#take lambda which returns the lowest RMSE, best lambda
lambda <- lambda_set[which.min(rmses_L)]
#train Model 5 using best lambda parameter
#train base lane model
m1_mean <- mean(train_edx$rating)
#train regularized movie effect model
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train regularized user effect model
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#test regularized model
pred_reg_m3_avgs_user <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "5 Regularized Movie and User Effects",
RMSE=RMSE(test_edx$rating, pred_reg_m3_avgs_user)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.6 Model 6 – Regularized Movie, User and Release Year Effects
#regularization function with lambda parameter (movie, user, release effects)
regularization_m_u_r4 <- function(lambda, train_set, test_set){
#baseline model
m1_mean <- mean(train_set$rating)
#train movie effect regularized with lambda
m2_avgs_movie <-
train_set %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train user effect regularized with lambda
m3_avgs_user <-
train_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#train release year effect regularized with lambda
m4_avgs_relase <-
train_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - m1_mean - avgs_movie - avgs_user)/(n()+lambda), .groups = "drop")
#test regularized movie, user and release year effects model
pred_m4_avgs_relase <- test_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#return RMSE for regularized model with lambda parameter
return(RMSE(pred_m4_avgs_relase, test_set$rating))
}
#define a set of lambdas
lambda_set <- seq(0, 10, 0.25)
#calculate RMSE for each lambda
rmses_L <- sapply(lambda_set, regularization_m_u_r4,
train_set=train_edx, test_set=test_edx)
#save best lambda for later, for Final validation
lamda_r_m_u_r <- lambda_set[which.min(rmses_L)]
#take lambda which returns the lowest RMSE, best lambda
lambda <- lambda_set[which.min(rmses_L)]
lambda
#train model using best lambda parameter
#train base lane model
m1_mean <- mean(train_edx$rating)
#train regularized movie effect model
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train regularized user effect model
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#train regularized release year effect model
m4_avgs_relase <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - m1_mean - avgs_movie - avgs_user)/(n()+lambda), .groups = "drop")
#test regularized Model 6
pred_reg_m4_avgs_relase <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "6 Regularized Movie, User and Release Effects",
RMSE=RMSE(test_edx$rating, pred_reg_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.7 Model 7 - Matrix Factorization
#' For matrix factorization the recosystem package is used.
#train_edx and test_edx convert to recosystem column format
mf_train_edx <- with(train_edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
mf_test_edx <- with(test_edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#create model object
mf_reco <- recosystem::Reco()
#set seed for randomized values
#find best tuning parameters
#this can take VERY LONG execution time
set.seed(123, sample.kind = "Rounding")
opts_2 <- mf_reco$tune(mf_train_edx, opts = list(dim = c(10, 20, 30),
lrate = c(0.1, 0.2),
costp_l2 = c(0.01, 0.1),
costq_l2 = c(0.01, 0.1),
nthread = 1, niter = 10))
#train model calling train, with best parameters from tune
mf_reco$train(mf_train_edx, opts = c(opts_2$min, nthread = 1, niter = 20))
#test Model 7
pred_mf_reco <- mf_reco$predict(mf_test_edx, out_memory())
head(pred_mf_reco, 5)
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "7 Matrix Factorization",
RMSE=RMSE(test_edx$rating, pred_mf_reco)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ## 3 Results
#' ### 3.1 Final Validation for Model 6 – Regularized Movie, User and Release Year Effects
#Model 6 best lambda, lamda_r_m_u_r
lamda_r_m_u_r
#final train Model 6 using best lambda parameter on edx data set
#train base lane model on edx data set
edx_mean <- mean(edx$rating)
#train regularized movie effect model on edx data set
m2_avgs_movie <-
edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - edx_mean)/(n()+lamda_r_m_u_r), .groups = "drop")
#train regularized user effect model on edx data set
m3_avgs_user <-
edx %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - edx_mean - avgs_movie)/(n()+lamda_r_m_u_r), .groups = "drop")
#train regularized release year effect model on edx data set
m4_avgs_relase <-
edx %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - edx_mean - avgs_movie - avgs_user)/(n()+lamda_r_m_u_r), .groups = "drop")
#test regularized Model 6 on validation data set
final_pred_reg_m4_avgs_relase <- validation %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = edx_mean + avgs_movie + avgs_user + avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "8 Final Validation for Model 6 Regularization m+u+r",
RMSE=RMSE(validation$rating, final_pred_reg_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
# ### 3.2 Final validation for Model 7 - Matrix Factorization
#edx and validation data set convert to recosystem format
mf_edx <- with(edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
mf_validation <- with(validation, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#create model object
mf_reco_final <- recosystem::Reco()
#set seed for randomized values
#find best tuning parameters
#this can take VERY LONG execution time
set.seed(123, sample.kind = "Rounding")
opts_final <- mf_reco_final$tune(mf_edx, opts = list(dim = c(10, 20, 30),
lrate = c(0.1, 0.2),
costp_l2 = c(0.01, 0.1),
costq_l2 = c(0.01, 0.1),
nthread = 1, niter = 10))
#train model calling train, with best parameters from tune
mf_reco_final$train(mf_edx, opts = c(opts_final$min, nthread = 1, niter = 20))
#test Model 7 on validation data
pred_mf_reco_final <- mf_reco_final$predict(mf_validation, out_memory())
head(pred_mf_reco_final, 5)
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "9 Final Validation for Model 7 Matrix Factorization",
RMSE=RMSE(validation$rating, pred_mf_reco_final)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.3 Rating prediction for Final Validation for Model 7 - Matrix Factorization
#'
#' With the best model of all previously tested,
#' a system of recommendations based on matrix factorization,
#' a rating prediction will be performed on validation data set.
#prediction with matrix factorization
mf_prediction <- tibble(userId=validation$userId, movieId=validation$movieId,
title = validation$title, rating=validation$rating,
mf_pred_rating = pred_mf_reco_final) %>%
arrange(-mf_pred_rating)
#top 5, highest predicted rating value
head(mf_prediction, 5)
#predict for userId 35305, for edx data set movies which that user did not rate
#creating a list of movies from the edx data set that was not rated by userId=35305
#make list with userID=35305, empty ratings ratings=NA, and movies movieId
#user 35305 rated movies list
edx_35305 <- edx %>% filter(edx$userId==35305)
movieID_35305 <- unique(edx_35305$movieId)
#edx movie list
edx_movieId <- unique(edx$movieId)
#movies which user 35305 did not rate
a_b <- setdiff( unique(edx$movieId), unique(edx_35305$movieId))
predict_movieId <- data.table(movieId=a_b)
#add movie title information
predict_movieId <- left_join(x = predict_movieId, y = edx)
# Remove duplicate rows
predict_movieId <- predict_movieId %>% distinct(movieId, .keep_all= TRUE)
#set all list with userId=35305
predict_movieId$userId[predict_movieId$userId >= 1] <- 35305
#set all list with rating=NA
predict_movieId <- predict_movieId %>%
replace_with_na(replace = list(rating = c(0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5)))
#list of non rating movies for userId=35305, rating=NA, movieId, title
edx_movieId_set_35305 <- predict_movieId
#edx_movieId_set_35305 convert to recosystem format
mf_edx_movieId_set_35305 <- with(edx_movieId_set_35305, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#rating prediction for userId 35305
pred_mf_edx_movieId_set_35305 <- mf_reco_final$predict(mf_edx_movieId_set_35305, out_memory())
head(pred_mf_edx_movieId_set_35305, 5)
mf_prediction_edx_35305 <- tibble(userId=edx_movieId_set_35305$userId,
movieId=edx_movieId_set_35305$movieId,
title = edx_movieId_set_35305$title,
predicted__rating = pred_mf_edx_movieId_set_35305) %>%
arrange(-predicted__rating )
#top 5 predicted for userId 35305
head(mf_prediction_edx_35305 , 5)
#last 5 predicted for userId 35305
tail(mf_prediction_edx_35305 , 5)
#' ## 5 Appendix
#' The project used a Windows 10 computer, with an i3 processor and 8 GB of RAM.
#' The program code is written in R (version 4.0.2) and RStudio (version 1.3.1073) was used for development.
#' For the described system,
#' processing time is about 3.5 hours, with the most time-consuming, about 90%,
#' falling on the parts related to matrix factorization (2.4.7 Model 7 - Matrix Factorization
#' and 3.2 Final validation for Model 7 Matrix Factorization).
#end Time ALL
endTime_All <- Sys.time()
#processin time (start-end)
#start time ALL
startTime_ALL
#end Time ALL
endTime_All
|
/MovieLens.R
|
no_license
|
matej-s/MovieLens
|
R
| false
| false
| 33,885
|
r
|
#start time ALL, to see how long is processing
startTime_ALL <- Sys.time()
#' ## 1 Introduction
#'
#' This project aims to create a movie recommendation system using the MovieLens dat aset.
#' Information on the available scripts, implementation system and processing time can be found in the appendix at the end
#' ## 2 Method and Analysis
#' ### 2.1 Download Data and Generate Data Sets
#' ### 2.1.1 Install packages and load library
#' First it is necessary to download and install the R packages used in this project
#'
#2.1.1 Install packages and call library for the project
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(lubridate)) install.packages("lubridate", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
if(!require(data.table)) install.packages("data.table", repos = "http://cran.us.r-project.org")
if(!require(dslabs)) install.packages("dslabs", repos = "http://cran.us.r-project.org")
if(!require(ggthemes)) install.packages("ggthemes", repos = "http://cran.us.r-project.org")
if(!require(scales)) install.packages("scales", repos = "http://cran.us.r-project.org")
if(!require(recosystem)) install.packages("recosystem", repos = "http://cran.us.r-project.org")
if(!require(knitr)) install.packages("knitr", repos = "http://cran.us.r-project.org")
if(!require(kableExtra)) install.packages("kableExtra", repos = "http://cran.us.r-project.org")
if(!require(ggplot2)) install.packages("ggplot2", repos = "http://cran.us.r-project.org")
if(!require(gridExtra)) install.packages("gridExtra", repos = "http://cran.us.r-project.org")
if(!require(jjb)) install.packages("jjb", repos = "http://cran.us.r-project.org")
if(!require(naniar)) install.packages("naniar", repos = "http://cran.us.r-project.org")
#2.1.1 Load library
library(tidyverse)
library(lubridate)
library(caret)
library(data.table)
library(dslabs)
library(ggthemes)
library(scales)
library(recosystem)
library(knitr)
library(kableExtra)
library(ggplot2)
library(gridExtra)
library(jjb)
library(naniar)
#make directory figs to save figures
mkdir("figs")
#' ### 2.1.2 Download Data
#' The data source for the project is a 10M version of the MovieLens data sets
#' (http://files.grouplens.org/datasets/movielens/ml-10m.zip)
#' The 10M MovieLens files are downloaded so that can be used to create the data sets used in the project.
#' ### 2.1.3 Generate Data Sets
#2.1.2 Download data
#download data
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
#2.1.3 Generate datasets
#Split the downloaded MovieLens dataset into
#edx set 90% and
#validation set 10%
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
## if using R 3.6 or earlier
##movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
## title = as.character(title),
## genres = as.character(genres))
# if using R 4.0 or later
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(movieId),
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
#Split edx set in two sets -
#train_edx with 80% and
#test_edx with 20% of edx set data
set.seed(1, sample.kind="Rounding")
edx_test_index <- createDataPartition(y = edx$rating, times = 1, p = 0.2, list = FALSE)
train_edx <- edx[-edx_test_index, ]
temp_2 <- edx[edx_test_index, ]
#Make sure userId and movieId in test_edx set are also in train_edx set
test_edx <- temp_2 %>%
semi_join(train_edx, by = "movieId") %>%
semi_join(train_edx, by = "userId")
# Add rows removed from test_edx set back into train set
remove <- anti_join(temp_2, test_edx)
train_edx <- rbind(train_edx, remove)
rm(edx_test_index, temp_2, remove)
#' ### 2.2 Exploration and Vizualization
#' Insight into the basic characteristics and properties of the data
#' is obtained by inspecting the edx data set.
#head(edx)
kable(edx[1:5, ], caption="edx") %>% kable_styling(latex_options = "hold_position", font_size = 8)
str(edx)
#Classes ‘data.table’ and 'data.frame': 9000055 obs. of 6 variables:
# $ userId : int 1 1 1 1 1 1 1 1 1 1 ...
# $ movieId : num 122 185 292 316 329 355 356 362 364 370 ...
# $ rating : num 5 5 5 5 5 5 5 5 5 5 ...
# $ timestamp: int 838985046 838983525 838983421 838983392 838983392 838984474 838983653 ...
# $ title : chr "Boomerang (1992)" "Net, The (1995)" "Outbreak (1995)" ...
# $ genres : chr "Comedy|Romance" "Action|Crime|Thriller" "Action|Drama|Sci-Fi|Thriller" ...
#' ### 2.2.1 About Rating
#' To see how often a certain rank is used
edx %>% group_by(rating) %>%
summarise(count=n()) %>%
ggplot(aes(x=rating, y=count)) +
geom_col()+
xlab("rating") +
ylab("count") +
theme(text = element_text(size=9)) +
scale_y_continuous(labels = comma) +
scale_x_continuous(breaks=seq(0, 5, by= 0.5)) +
ggtitle("Rating Count")+
theme(plot.title = element_text(size = 9))
kable(edx %>% group_by(rating) %>% summarize(n=n()) %>% t(), format.args = list(big.mark = ",", scientific = FALSE) , caption= "Rating count", align = "llllllllll") %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' ### 2.2.2 About MovieId
format(n_distinct(edx$movieId), big.mark= ',')
#' There are 10,677 different movies in the edx data set
#' and theirs distribution by number of rankings is shown in the graph below.
#Ratings distribution by movie
edx %>% group_by(movieId) %>%
summarise(n=n()) %>%
ggplot(aes(n)) +
geom_histogram(bins=20) +
ggtitle("Ratings distribution by movie")+
theme(plot.title = element_text(size = 9)) +
scale_x_log10() +
theme(text = element_text(size=9)) +
xlab("rating count") +
ylab("movieId count")
#' ### 2.2.3 About UserId
format(n_distinct(edx$userId), big.mark= ',')
#' There are 69,878 different users in the edx set.
#Ratings distribution by user
edx %>% group_by(userId) %>%
summarise(n=n()) %>%
ggplot(aes(n)) +
geom_histogram(bins=20) +
scale_x_log10() +
theme(text = element_text(size=9)) +
xlab("rating count") +
ylab("userId count") +
ggtitle("Ratings distribution by user")+
theme(plot.title = element_text(size = 9))
#' ### 2.2.4 About Year Rated (timestamp)
#'
#' The timestamp variable represents the time and date in which the rating was provided
#Mean ratings by year rated
edx %>% mutate(year_rated = year(as_datetime(timestamp))) %>%
group_by(year_rated) %>%
summarise(rating =mean(rating)) %>%
ggplot(aes(year_rated,rating)) +
geom_point() +
geom_smooth(method = 'loess', se = FALSE, span = 2/3) +
theme(text = element_text(size=9)) +
ggtitle("Mean ratings by year rated") +
theme(plot.title = element_text(size = 9))
#' ### 2.2.5 About Genres
n_distinct((edx$genres))
#' The edx data set has 797 distinct genres.
#' The previously described genre variable in the edx data set has over
#' 700 different values.
#' we will split the genres information into multiple row into the genres_edx data set.
genres_edx <- edx %>% separate_rows(genres, sep ="\\|")
table_2 <- tibble(data_set="edx", rows=format(nrow(edx), big.mark= ','))
table_2 <- bind_rows(table_2, tibble(data_set="genres_edx", rows=format(nrow(genres_edx), big.mark= ',')))
print.data.frame(table_2)
#Number of ratings genres
p1 <- genres_edx %>%
group_by(genres) %>%
summarize(n=n()) %>%
mutate(genres=reorder(genres,n)) %>%
ggplot(aes(x=n, y=genres)) +
geom_bar(stat = "identity") +
ggtitle("Number of ratings genres") +
theme(plot.title = element_text(size = 7)) +
xlab("number of ratings") +
ylab("genres") +
scale_x_continuous(labels = comma) +
theme(text = element_text(size=7))
#Distinct movies by genres
p2 <- genres_edx %>%
group_by(genres) %>%
summarize(n=n_distinct(movieId)) %>%
mutate(genres=reorder(genres,n)) %>%
ggplot(aes(x=n, y=genres)) +
geom_bar(stat = "identity") +
ggtitle("Distinct movies by genres") +
theme(plot.title = element_text(size = 7)) +
xlab("distinct movies") +
ylab("genres") +
scale_x_continuous(labels = comma) +
theme(text = element_text(size=7))
grid.arrange(p1, p2, ncol = 2, widths=c(1, 1))
#' The following graph shows the average rating per each genres.
png(file="figs/fig_1.png", width=480, height=300)
#average rating per each genres
genres_edx %>%
group_by(genres) %>%
boxplot(rating ~ genres, ., las=2, cex.axis = 0.8)
dev.off()
include_graphics("figs/fig_1.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.2.6 About Movie Relase Year
#'
#' The title column in the edx data set contains the title of the movie and also the movie release year
##movie release year
movie_relase_year_edx <- edx %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
kable(movie_relase_year_edx[1:5, ] ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' distribution of the average rating of movies according to the year in which the movie was released.
png(file="figs/fig_2.png", width=480, height=300)
#average rating of movies according to release year
movie_relase_year_edx %>%
group_by(movie_relase_year) %>%
boxplot(rating ~ movie_relase_year, ., las=2, cex.axis = 0.6)
dev.off()
include_graphics("figs/fig_2.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#Mean ratings by movie relase year
edx %>% mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2))) %>%
group_by(movie_relase_year) %>%
summarise(rating = mean(rating)) %>%
ggplot(aes(movie_relase_year, rating)) +
geom_point() +
geom_smooth(method = 'loess', span = 2/3) +
ggtitle("Mean ratings by movie relase year") +
theme(plot.title = element_text(size = 9))
#' ### 2.2.7 User - Movie Matrix
#user - movie rating matrix
matrix_all_edx <- edx %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>% as.matrix()
#' The table below shows how much empty data there is in the matrix (matrix_all_edx).
#all
lenght_2 <- length(matrix_all_edx)
#empty
lenght_2_na <- length(matrix_all_edx[is.na(matrix_all_edx)])
#filled
lenght_2_not_na <- length(matrix_all_edx[!is.na(matrix_all_edx)])
table_2_7 <- tibble(data="all", rows=format(lenght_2, big.mark= ','))
table_2_7 <- bind_rows(table_2_7, tibble(data="empty", rows=format(lenght_2_na, big.mark= ',')))
table_2_7 <- bind_rows(table_2_7, tibble(data="filled", rows=format(lenght_2_not_na, big.mark= ',')))
print.data.frame(table_2_7)
#' The figure below shows for 100 randomly selected users and movies
#' which movie is rated by which user (filled fields) and unrated films are those with empty fields.
png(file="figs/fig_3.png", width=480, height=270)
#user - movie matrix
edx %>% filter(userId %in% sample(unique(edx$userId), 100)) %>%
select(userId, movieId, rating) %>%
mutate(rating = 1) %>%
spread(movieId, rating) %>%
select(sample(ncol(.), 100)) %>%
as.matrix() %>% t(.) %>%
image(1:100, 1:100,. , xlab="movies", ylab="users")
abline(h=0:100+0.5, v=0:100+0.5, col = "grey")
#title("user - movie matrix")
dev.off()
include_graphics("figs/fig_3.png", auto_pdf = getOption("knitr.graphics.auto_pdf", FALSE), dpi=NA)
#' ### 2.3 Preprocessing, Data Cleaning and Prepare
#preparing data sets
edx <- edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
train_edx <- train_edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
test_edx <- test_edx %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
validation <- validation %>% select(userId, movieId, rating, title) %>%
mutate (movie_relase_year=as.numeric(str_sub(title, -5, -2)))
kable(edx[1:5, ], caption='modeling information' ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
table_2_3_1 <- tibble(MovieLens_split="edx", rows=format(nrow(edx), big.mark= ',' ))
table_2_3_1 <- bind_rows(table_2_3_1, tibble(MovieLens_split="validation", rows=format(nrow(validation), big.mark= ',' )))
table_2_3_1b <- tibble(edx_split="train_edx", rows=format(nrow(train_edx), big.mark= ',' ))
table_2_3_1b <- bind_rows(table_2_3_1b, tibble(edx_split="test_edx", rows=format(nrow(test_edx), big.mark= ',' )))
kable( list(table_2_3_1, table_2_3_1b), caption = 'Data sets', booktabs = TRUE, valign = 't') %>% kable_styling(latex_options = "hold_position", font_size = 8)
#' ### 2.4 Modeling Approach
#' Each model is trained on "train_edx" and then tested on "test_edx" data
#' and finally the resulting Root Mean Square Error (RMSE) value of the model is calculated.
#function that calculates RMSE error
RMSE <- function(true_ratings, predicted_ratings){
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#' ### 2.4.1 Model 1 - Baseline
#'
#' Model 1 is based only on ratings data and predicts that all userId will give the same rating to all movieId.
#calculating the mean rating value
m1_mean <- mean(train_edx$rating)
m1_mean
#result for Model 1 - Baseline
m1_rmse <- RMSE(test_edx$rating, m1_mean)
m1_rmse
#RMSE calculation and listing in the results table
rmse_table <- tibble(Model="1 Baseline", RMSE=RMSE(test_edx$rating, m1_mean))
#print rmse_table
#kable(rmse_table, caption="RMSE results") %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.2 Model 2 - Movie Effect
#' Train Model 2 for movie effect on train_edx data set:
#train model for movie effect
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = mean(rating - m1_mean), .groups = "drop")
#' Test Model 2 with movie effect on test_edx data set,
#test movie effect model
pred_m2_avgs_movie <- m1_mean + test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
.$avgs_movie
#' and calculate RMSE:
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "2 Movie Effect",
RMSE=RMSE(test_edx$rating, pred_m2_avgs_movie)))
#print rmse_table
#kable(rmse_table, caption="RMSE results") %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.3 Model 3 – Movie and User Effects
#train model for movie and user effects
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = mean(rating - m1_mean - avgs_movie), .groups = "drop")
#test movie and user effects model
pred_m3_avgs_user <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "3 Movie and User Effects",
RMSE=RMSE(test_edx$rating, pred_m3_avgs_user)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.4 Model 4 – Movie, User and Release Year Effects
#' Train Model 4 for movie, user and release year effects on train_edx data set:
#train model for movie, user and release year effects
m4_avgs_relase <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = mean(rating - m1_mean - avgs_movie - avgs_user), .groups = "drop")
#test movie, user and release year effects model
pred_m4_avgs_relase <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "4 Movie, User and Release Year Effects",
RMSE=RMSE(test_edx$rating,pred_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.5 Model 5 – Regularized Movie and User Effects
#regularization function with lambda parameter (movie and user effects)
regularization_m_u2 <- function(lambda, train_set, test_set){
#baseline model
m1_mean <- mean(train_set$rating)
#train movie effect regularized with lambda
m2_avgs_movie <-
train_set %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train user effect regularized with lambda
m3_avgs_user <-
train_set %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#test regularized movie and user effects model
pred_m3_avgs_user <-
test_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#return RMSE for regularized model with lambda parameter
return(RMSE(pred_m3_avgs_user, test_set$rating))
}
#define a set of lambdas
lambda_set <- seq(0, 10, 0.25)
#calculate RMSE for each lambda
rmses_L <- sapply(lambda_set, regularization_m_u2,
train_set=train_edx, test_set=test_edx)
#take lambda which returns the lowest RMSE, best lambda
lambda <- lambda_set[which.min(rmses_L)]
#train Model 5 using best lambda parameter
#train base lane model
m1_mean <- mean(train_edx$rating)
#train regularized movie effect model
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train regularized user effect model
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#test regularized model
pred_reg_m3_avgs_user <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
mutate(pred_m3 = m1_mean + avgs_movie + avgs_user) %>%
.$pred_m3
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "5 Regularized Movie and User Effects",
RMSE=RMSE(test_edx$rating, pred_reg_m3_avgs_user)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.6 Model 6 – Regularized Movie, User and Release Year Effects
#regularization function with lambda parameter (movie, user, release effects)
regularization_m_u_r4 <- function(lambda, train_set, test_set){
#baseline model
m1_mean <- mean(train_set$rating)
#train movie effect regularized with lambda
m2_avgs_movie <-
train_set %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train user effect regularized with lambda
m3_avgs_user <-
train_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#train release year effect regularized with lambda
m4_avgs_relase <-
train_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - m1_mean - avgs_movie - avgs_user)/(n()+lambda), .groups = "drop")
#test regularized movie, user and release year effects model
pred_m4_avgs_relase <- test_set %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#return RMSE for regularized model with lambda parameter
return(RMSE(pred_m4_avgs_relase, test_set$rating))
}
#define a set of lambdas
lambda_set <- seq(0, 10, 0.25)
#calculate RMSE for each lambda
rmses_L <- sapply(lambda_set, regularization_m_u_r4,
train_set=train_edx, test_set=test_edx)
#save best lambda for later, for Final validation
lamda_r_m_u_r <- lambda_set[which.min(rmses_L)]
#take lambda which returns the lowest RMSE, best lambda
lambda <- lambda_set[which.min(rmses_L)]
lambda
#train model using best lambda parameter
#train base lane model
m1_mean <- mean(train_edx$rating)
#train regularized movie effect model
m2_avgs_movie <-
train_edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - m1_mean)/(n()+lambda), .groups = "drop")
#train regularized user effect model
m3_avgs_user <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - m1_mean - avgs_movie)/(n()+lambda), .groups = "drop")
#train regularized release year effect model
m4_avgs_relase <-
train_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - m1_mean - avgs_movie - avgs_user)/(n()+lambda), .groups = "drop")
#test regularized Model 6
pred_reg_m4_avgs_relase <- test_edx %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = m1_mean + avgs_movie + avgs_user+avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "6 Regularized Movie, User and Release Effects",
RMSE=RMSE(test_edx$rating, pred_reg_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 2.4.7 Model 7 - Matrix Factorization
#' For matrix factorization the recosystem package is used.
#train_edx and test_edx convert to recosystem column format
mf_train_edx <- with(train_edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
mf_test_edx <- with(test_edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#create model object
mf_reco <- recosystem::Reco()
#set seed for randomized values
#find best tuning parameters
#this can take VERY LONG execution time
set.seed(123, sample.kind = "Rounding")
opts_2 <- mf_reco$tune(mf_train_edx, opts = list(dim = c(10, 20, 30),
lrate = c(0.1, 0.2),
costp_l2 = c(0.01, 0.1),
costq_l2 = c(0.01, 0.1),
nthread = 1, niter = 10))
#train model calling train, with best parameters from tune
mf_reco$train(mf_train_edx, opts = c(opts_2$min, nthread = 1, niter = 20))
#test Model 7
pred_mf_reco <- mf_reco$predict(mf_test_edx, out_memory())
head(pred_mf_reco, 5)
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "7 Matrix Factorization",
RMSE=RMSE(test_edx$rating, pred_mf_reco)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ## 3 Results
#' ### 3.1 Final Validation for Model 6 – Regularized Movie, User and Release Year Effects
#Model 6 best lambda, lamda_r_m_u_r
lamda_r_m_u_r
#final train Model 6 using best lambda parameter on edx data set
#train base lane model on edx data set
edx_mean <- mean(edx$rating)
#train regularized movie effect model on edx data set
m2_avgs_movie <-
edx %>%
group_by(movieId) %>%
summarize(avgs_movie = sum(rating - edx_mean)/(n()+lamda_r_m_u_r), .groups = "drop")
#train regularized user effect model on edx data set
m3_avgs_user <-
edx %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
group_by(userId) %>%
summarize(avgs_user = sum(rating - edx_mean - avgs_movie)/(n()+lamda_r_m_u_r), .groups = "drop")
#train regularized release year effect model on edx data set
m4_avgs_relase <-
edx %>%
left_join(m2_avgs_movie, by = 'movieId') %>%
left_join(m3_avgs_user, by = "userId") %>%
group_by(movie_relase_year) %>%
summarize(avgs_relase = sum(rating - edx_mean - avgs_movie - avgs_user)/(n()+lamda_r_m_u_r), .groups = "drop")
#test regularized Model 6 on validation data set
final_pred_reg_m4_avgs_relase <- validation %>%
left_join(m2_avgs_movie, by = "movieId") %>%
left_join(m3_avgs_user, by = "userId") %>%
left_join(m4_avgs_relase, by = "movie_relase_year") %>%
mutate(pred_m4 = edx_mean + avgs_movie + avgs_user + avgs_relase) %>%
.$pred_m4
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "8 Final Validation for Model 6 Regularization m+u+r",
RMSE=RMSE(validation$rating, final_pred_reg_m4_avgs_relase)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
# ### 3.2 Final validation for Model 7 - Matrix Factorization
#edx and validation data set convert to recosystem format
mf_edx <- with(edx, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
mf_validation <- with(validation, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#create model object
mf_reco_final <- recosystem::Reco()
#set seed for randomized values
#find best tuning parameters
#this can take VERY LONG execution time
set.seed(123, sample.kind = "Rounding")
opts_final <- mf_reco_final$tune(mf_edx, opts = list(dim = c(10, 20, 30),
lrate = c(0.1, 0.2),
costp_l2 = c(0.01, 0.1),
costq_l2 = c(0.01, 0.1),
nthread = 1, niter = 10))
#train model calling train, with best parameters from tune
mf_reco_final$train(mf_edx, opts = c(opts_final$min, nthread = 1, niter = 20))
#test Model 7 on validation data
pred_mf_reco_final <- mf_reco_final$predict(mf_validation, out_memory())
head(pred_mf_reco_final, 5)
#RMSE calculation and listing in the results table
rmse_table <- bind_rows(rmse_table, tibble(Model = "9 Final Validation for Model 7 Matrix Factorization",
RMSE=RMSE(validation$rating, pred_mf_reco_final)))
#print rmse_table
#kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "hold_position", font_size = 8)
kable(rmse_table, caption="RMSE results" ) %>% kable_styling(latex_options = "HOLD_position", font_size = 8)
#' ### 3.3 Rating prediction for Final Validation for Model 7 - Matrix Factorization
#'
#' With the best model of all previously tested,
#' a system of recommendations based on matrix factorization,
#' a rating prediction will be performed on validation data set.
#prediction with matrix factorization
mf_prediction <- tibble(userId=validation$userId, movieId=validation$movieId,
title = validation$title, rating=validation$rating,
mf_pred_rating = pred_mf_reco_final) %>%
arrange(-mf_pred_rating)
#top 5, highest predicted rating value
head(mf_prediction, 5)
#predict for userId 35305, for edx data set movies which that user did not rate
#creating a list of movies from the edx data set that was not rated by userId=35305
#make list with userID=35305, empty ratings ratings=NA, and movies movieId
#user 35305 rated movies list
edx_35305 <- edx %>% filter(edx$userId==35305)
movieID_35305 <- unique(edx_35305$movieId)
#edx movie list
edx_movieId <- unique(edx$movieId)
#movies which user 35305 did not rate
a_b <- setdiff( unique(edx$movieId), unique(edx_35305$movieId))
predict_movieId <- data.table(movieId=a_b)
#add movie title information
predict_movieId <- left_join(x = predict_movieId, y = edx)
# Remove duplicate rows
predict_movieId <- predict_movieId %>% distinct(movieId, .keep_all= TRUE)
#set all list with userId=35305
predict_movieId$userId[predict_movieId$userId >= 1] <- 35305
#set all list with rating=NA
predict_movieId <- predict_movieId %>%
replace_with_na(replace = list(rating = c(0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5)))
#list of non rating movies for userId=35305, rating=NA, movieId, title
edx_movieId_set_35305 <- predict_movieId
#edx_movieId_set_35305 convert to recosystem format
mf_edx_movieId_set_35305 <- with(edx_movieId_set_35305, data_memory(user_index = userId,
item_index = movieId,
rating = rating))
#rating prediction for userId 35305
pred_mf_edx_movieId_set_35305 <- mf_reco_final$predict(mf_edx_movieId_set_35305, out_memory())
head(pred_mf_edx_movieId_set_35305, 5)
mf_prediction_edx_35305 <- tibble(userId=edx_movieId_set_35305$userId,
movieId=edx_movieId_set_35305$movieId,
title = edx_movieId_set_35305$title,
predicted__rating = pred_mf_edx_movieId_set_35305) %>%
arrange(-predicted__rating )
#top 5 predicted for userId 35305
head(mf_prediction_edx_35305 , 5)
#last 5 predicted for userId 35305
tail(mf_prediction_edx_35305 , 5)
#' ## 5 Appendix
#' The project used a Windows 10 computer, with an i3 processor and 8 GB of RAM.
#' The program code is written in R (version 4.0.2) and RStudio (version 1.3.1073) was used for development.
#' For the described system,
#' processing time is about 3.5 hours, with the most time-consuming, about 90%,
#' falling on the parts related to matrix factorization (2.4.7 Model 7 - Matrix Factorization
#' and 3.2 Final validation for Model 7 Matrix Factorization).
#end Time ALL
endTime_All <- Sys.time()
#processin time (start-end)
#start time ALL
startTime_ALL
#end Time ALL
endTime_All
|
library(igraph)
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
print("end of exp1, start 1m, 100m")
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
print("end of exp2, start 10m 100m")
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
|
/iGraph/er.r
|
permissive
|
Yili0616/graph-analytics-comparison
|
R
| false
| false
| 1,942
|
r
|
library(igraph)
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,10000000,"gnm")
end_time <- Sys.time()
end_time - start_time
print("end of exp1, start 1m, 100m")
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(1000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
print("end of exp2, start 10m 100m")
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
start_time <- Sys.time()
g <- erdos.renyi.game(10000000,100000000,"gnm")
end_time <- Sys.time()
end_time - start_time
|
library(numDeriv)
library(MASS)
#' Covariate Balancing Propensity Score (CBPS) for Marginal Structural Models
#'
#' \code{CBMSM} estimates propensity scores such that both covariate balance
#' and prediction of treatment assignment are maximized. With longitudinal
#' data, the method returns marginal structural model weights that can be
#' entered directly into a linear model. The method also handles multiple
#' binary treatments administered concurrently.
#'
#' Fits covariate balancing propensity scores for marginal structural models.
#'
#' ### @aliases CBMSM CBMSM.fit
#' @param formula A formula of the form treat ~ X. The same covariates are used
#' in each time period. At default values, a single set of coefficients is estimated
#' across all time periods. To allow a different set of coefficients for each
#' time period, set \code{time.vary = TRUE}. Data should be sorted by time.
#' @param id A vector which identifies the unit associated with each row of
#' treat and X.
#' @param time A vector which identifies the time period associated with each
#' row of treat and X. All data should be sorted by time.
#' @param data An optional data frame, list or environment (or object coercible
#' by as.data.frame to a data frame) containing the variables in the model. If
#' not found in data, the variables are taken from \code{environment(formula)},
#' typically the environment from which \code{CBMSM} is called. Data should be
#' sorted by time.
#' @param twostep Set to \code{TRUE} to use a two-step estimator, which will
#' run substantially faster than continuous-updating. Default is \code{FALSE},
#' which uses the continuous-updating estimator described by Imai and Ratkovic
#' (2014).
#' @param msm.variance Default is \code{FALSE}, which uses the low-rank
#' approximation of the variance described in Imai and Ratkovic (2014). Set to
#' \code{TRUE} to use the full variance matrix.
#' @param time.vary Default is \code{FALSE}, which uses the same coefficients
#' across time period. Set to \code{TRUE} to fit one set per time period.
#' @param type "MSM" for a marginal structural model, with multiple time
#' periods or "MultiBin" for multiple binary treatments at the same time
#' period.
#' @param init Default is \code{"opt"}, which uses CBPS and logistic regression
#' starting values, and chooses the one that achieves the best balance. Other options
#' are "glm" and "CBPS"
#' @param ... Other parameters to be passed through to \code{optim()}
#' @return \item{weights}{The optimal weights.} \item{fitted.values}{The fitted
#' propensity score for each observation.} \item{y}{The treatment vector used.}
#' \item{x}{The covariate matrix.} \item{id}{The vector id used in CBMSM.fit.}
#' \item{time}{The vector time used in CBMSM.fit.} \item{model}{The model
#' frame.} \item{call}{The matched call.} \item{formula}{The formula supplied.}
#' \item{data}{The data argument.} \item{treat.hist}{A matrix of the treatment
#' history, with each observation in rows and time in columns.}
#' \item{treat.cum}{A vector of the cumulative treatment history, by
#' individual.}
#' @author Marc Ratkovic, Christian Fong, and Kosuke Imai; The CBMSM function
#' is based on the code for version 2.15.0 of the glm function implemented in
#' the stats package, originally written by Simon Davies. This documenation is
#' likewise modeled on the documentation for glm and borrows its language where
#' the arguments and values are the same.
#' @seealso \link{plot.CBMSM}
#' @references
#'
#' Imai, Kosuke and Marc Ratkovic. 2014. ``Covariate Balancing Propensity
#' Score.'' Journal of the Royal Statistical Society, Series B (Statistical
#' Methodology). \url{http://imai.princeton.edu/research/CBPS.html}
#'
#' Imai, Kosuke and Marc Ratkovic. 2015. ``Robust Estimation of Inverse
#' Probability Weights for Marginal Structural Models.'' Journal of the
#' American Statistical Association.
#' \url{http://imai.princeton.edu/research/MSM.html}
#' @examples
#'
#'
#' ##Load Blackwell data
#'
#' data(Blackwell)
#'
#' ## Quickly fit a short model to test
#' form0 <- "d.gone.neg ~ d.gone.neg.l1 + camp.length"
#' fit0<-CBMSM(formula = form0, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "approx", time.vary = FALSE)
#'
#' \dontrun{
#' ##Fitting the models in Imai and Ratkovic (2014)
#' ##Warning: may take a few mintues; setting time.vary to FALSE
#' ##Results in a quicker fit but with poorer balance
#' ##Usually, it is best to use time.vary TRUE
#' form1<-"d.gone.neg ~ d.gone.neg.l1 + d.gone.neg.l2 + d.neg.frac.l3 +
#' camp.length + camp.length + deminc + base.poll + year.2002 +
#' year.2004 + year.2006 + base.und + office"
#'
#' ##Note that init="glm" gives the published results but the default is now init="opt"
#' fit1<-CBMSM(formula = form1, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "full", time.vary = TRUE, init="glm")
#'
#' fit2<-CBMSM(formula = form1, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "approx", time.vary = TRUE, init="glm")
#'
#'
#' ##Assessing balance
#'
#' bal1<-balance.CBMSM(fit1)
#' bal2<-balance.CBMSM(fit2)
#'
#' ##Effect estimation: Replicating Effect Estimates in
#' ##Table 3 of Imai and Ratkovic (2014)
#'
#' lm1<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit1$glm.weights)
#' lm2<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit1$weights)
#' lm3<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit2$weights)
#'
#' lm4<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit1$glm.weights)
#' lm5<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit1$weights)
#' lm6<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit2$weights)
#'
#'
#'
#' ### Example: Multiple Binary Treatments Administered at the Same Time
#' n<-200
#' k<-4
#' set.seed(1040)
#' X1<-cbind(1,matrix(rnorm(n*k),ncol=k))
#'
#' betas.1<-betas.2<-betas.3<-c(2,4,4,-4,3)/5
#' probs.1<-probs.2<-probs.3<-(1+exp(-X1 %*% betas.1))^-1
#'
#' treat.1<-rbinom(n=length(probs.1),size=1,probs.1)
#' treat.2<-rbinom(n=length(probs.2),size=1,probs.2)
#' treat.3<-rbinom(n=length(probs.3),size=1,probs.3)
#' treat<-c(treat.1,treat.2,treat.3)
#' X<-rbind(X1,X1,X1)
#' time<-c(rep(1,nrow(X1)),rep(2,nrow(X1)),rep(3,nrow(X1)))
#' id<-c(rep(1:nrow(X1),3))
#' y<-cbind(treat.1,treat.2,treat.3) %*% c(2,2,2) +
#' X1 %*% c(-2,8,7,6,2) + rnorm(n,sd=5)
#'
#' multibin1<-CBMSM(treat~X,id=id,time=time,type="MultiBin",twostep=TRUE)
#' summary(lm(y~-1+treat.1+treat.2+treat.3+X1, weights=multibin1$w))
#' }
#'
#' @export CBMSM
#'
CBMSM<-function(formula, id, time, data, type="MSM", twostep = TRUE, msm.variance = "approx", time.vary = FALSE, init="opt",...){
if (missing(data))
data <- environment(formula)
call <- match.call()
family <- binomial()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt)) model.matrix(mt, mf)#[,-2]
else matrix(, NROW(Y), 0L)
##Format treatment matrix
id<-as.numeric(as.factor(id))
unique.id<-sort(unique(id))
treat.hist<-matrix(NA,nrow=length(unique.id),ncol=length(unique(time)))
colnames(treat.hist)<-sort(unique(time))
rownames(treat.hist)<-unique.id
for(i in 1:length(unique(unique.id))) for(j in sort(unique(time)))
{{
treat.hist[i,j]<-Y[id==unique.id[i] & time==j]
}}
#treat.hist.fac<-apply(treat.hist,1,function(x) paste(x, collapse="+"))
cm.treat<-rowSums(treat.hist)
if(type=="MSM") {
MultiBin.fit<-FALSE
}
if(type=="MultiBin"){
MultiBin.fit<-TRUE
X<-cbind(1,X[,apply(X,2,sd)>0])
names.X<-c("Intercept",colnames(X)[-1])
}
fit <- eval(call("CBMSM.fit", treat = Y, X = X, id = id, time=time,
MultiBin.fit = MultiBin.fit, twostep = twostep, msm.variance = msm.variance,
time.vary = time.vary, init = init))
fit$call<-call
fit$formula<-formula
fit$y<-Y
fit$x<-X
fit$id<-id
fit$time<-time
fit$model<-mf
fit$data<-data
fit$treat.hist<-treat.hist
fit$treat.cum<-rowSums(treat.hist)
fit$weights<-fit$weights[time==min(time)]
fit
}
########################
###Calls loss function
########################
#' CBMSM.fit
#'
#' @param treat A vector of treatment assignments. For N observations over T
#' time periods, the length of treat should be N*T.
#' @param X A covariate matrix. For N observations over T time periods, X
#' should have N*T rows.
#' @param id A vector which identifies the unit associated with each row of
#' treat and X.
#' @param time A vector which identifies the time period associated with each
#' row of treat and X.
#' @param MultiBin.fit A parameter for whether the multiple binary treatments
#' occur concurrently (\code{FALSE}) or over consecutive time periods
#' (\code{TRUE}) as in a marginal structural model. Setting type = "MultiBin"
#' when calling \code{CBMSM} will set MultiBin.fit to \code{TRUE} when
#' CBMSM.fit is called.
#' @param twostep Set to \code{TRUE} to use a two-step estimator, which will
#' run substantially faster than continuous-updating. Default is \code{FALSE},
#' which uses the continuous-updating estimator described by Imai and Ratkovic
#' (2014).
#' @param msm.variance Default is \code{FALSE}, which uses the low-rank
#' approximation of the variance described in Imai and Ratkovic (2014). Set to
#' \code{TRUE} to use the full variance matrix.
#' @param time.vary Default is \code{FALSE}, which uses the same coefficients
#' across time period. Set to \code{TRUE} to fit one set per time period.
#' @param init Default is \code{"opt"}, which uses CBPS and logistic regression
#' starting values, and chooses the one that achieves the best balance. Other options
#' are "glm" and "CBPS"
#' @param ... Other parameters to be passed through to \code{optim()}
#'
CBMSM.fit<-function(treat, X, id, time, MultiBin.fit, twostep, msm.variance, time.vary, init, ...){
id0<-id
id<-as.numeric(as.factor(id0))
if(msm.variance=="approx") full.var<-FALSE
if(msm.variance=="full") full.var<-TRUE
X.mat<-X
X.mat<-X.mat[,apply(X.mat,2,sd)>0, drop = FALSE]
##Format design matrix, run glm
glm1<-glm(treat~X.mat,family="binomial")
glm1$coefficients<-CBPS(treat~X.mat, ATT=0,method="exact")$coefficients
##################
##Make SVD matrix of covariates
##and matrix of treatment history
##################
#if(time.vary==FALSE){
X.svd<-X.mat
#X.svd<-apply(X.svd,2,FUN=function(x) (x-mean(x))/sd(x), drop=FALSE)
X.svd<-scale(X.svd) # Edit by Christian; this was causing an error
#X.svd[,c(1,2,7)]<-X.svd[,c(1,2,7)]*10
X.svd<-svd(X.svd)$u%*%diag(svd(X.svd)$d>0.0001)
X.svd<-X.svd[,apply(X.svd,2,sd)>0,drop=FALSE]
glm1<-glm(treat~X.svd,family="binomial")
glm.cb<-glm1
glm.cb<-CBPS(treat~X.svd, ATT=0,method="exact")$coefficients
glm1<-glm1$coefficients
if(time.vary==TRUE){
#} else{
X.svd<-NULL
for(i in sort(unique(time))){
X.sub<-X.mat[time==i,,drop=FALSE]
#X.sub<-apply(X.sub,2,FUN=function(x) (x-mean(x))/sd(x))
X.sub <- scale(X.sub) # Edit by Christian; this was causing an error
X.sub[is.na(X.sub)]<-0
X.sub<-svd(X.sub)$u%*%diag(svd(X.sub)$d>0.0001)
X.sub<-X.sub[,apply(X.sub,2,sd)>0,drop=FALSE]
X.svd<-rbind(X.svd,X.sub)
}
##Make matrix of time-varying glm starting vals
cbps.coefs<-glm.coefs<-NULL
n.time<-length(unique(time))
for(i in 1:n.time){
glm1<-summary(glm(treat~X.svd, subset=(time==i)))$coefficients[,1]
glm.cb<-glm1
glm.cb<-CBPS(treat[time==i]~X.svd[time==i,], ATT=0,method="exact")$coefficients
glm.coefs<-cbind(glm.coefs,glm1)
cbps.coefs<-cbind(cbps.coefs,glm.cb)
}
glm.coefs[is.na(glm.coefs)]<-0
cbps.coefs[is.na(cbps.coefs)]<-0
glm1<-as.vector(glm.coefs)
glm.cb<-as.vector(cbps.coefs)
}
##################
## Start optimization
##################
#Twostep is true
msm.loss1<-function(x,...) msm.loss.func(betas=x, X=cbind(1,X.svd), treat=treat, time=time,...)$loss
glm.fit<-msm.loss.func(glm1,X=cbind(1,X.svd),time=time,treat=treat,full.var=full.var,twostep=FALSE)
cb.fit<-msm.loss.func(glm.cb,X=cbind(1,X.svd),time=time,treat=treat,full.var=full.var,twostep=FALSE)
type.fit<-"Returning Estimates from Logistic Regression\n"
if((cb.fit$loss<glm.fit$loss & init=="opt")|init=="CBPS") {
glm1<-glm.cb
glm.fit<-cb.fit
type.fit<-"Returning Estimates from CBPS\n"
}
##Twostep is true; full variance option is passed
#Run twostep regardless for starting vals
#if(twostep==TRUE){
Vcov.inv<-glm.fit
msm.opt<-optim(glm1,msm.loss1,full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE,method="BFGS")
msm.twostep<-msm.fit<-msm.loss.func(msm.opt$par,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE)
l3<-msm.loss.func(glm1,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE)
if((l3$loss<msm.fit$loss) & init=="opt") {
msm.fit<-l3
warning("Warning: Optimization did not improve over initial estimates\n")
cat(type.fit)
}
if(twostep==FALSE) {
if(init=="opt") msm.opt<-optim(msm.fit$par,msm.loss1,full.var=full.var,bal.only=TRUE,twostep=FALSE,method="BFGS")
if(init!="opt") msm.opt<-optim(msm.opt$par,msm.loss1,full.var=full.var,bal.only=TRUE,twostep=FALSE,method="BFGS")
msm.fit<-msm.loss.func(msm.opt$par,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=FALSE)
l3<-msm.loss.func(glm1,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,
Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=FALSE)
if((l3$loss<msm.fit$loss) & init=="opt") {
msm.fit<-l3
cat("\nWarning: Optimization did not improve over initial estimates\n")
cat(type.fit)
}
}
##################
## Calculate unconditional probs and treatment matrix
##################
n.obs<-length(unique(id))
n.time<-length(unique(time))
treat.hist<-matrix(NA,nrow=n.obs,ncol=n.time)
name.cands<-sort(unique(id))
for(i in 1:n.obs) for(j in 1:n.time) treat.hist[i,j]<-treat[id==name.cands[i] & time==j ]
treat.hist.unique<-unique(treat.hist,MAR=1)
treat.unique<-rep(NA,n.obs)
for(i in 1:n.obs) treat.unique[i]<- which(apply(treat.hist.unique,1,FUN=function(x) sum((x-treat.hist[i,])^2) )==0)
treat.unique<-as.factor(treat.unique)
uncond.probs.cand<-rep(0,n.obs)
for(i in 1:n.obs) {for(j in 1:n.obs) {
check<-mean(treat.hist[j,]==treat.hist[i,])==1
if(check) uncond.probs.cand[i]<-uncond.probs.cand[i]+1
}
}
uncond.probs.cand<-uncond.probs.cand/n.obs
###########
##Produce Weights
###########
wts.out<-rep(uncond.probs.cand/msm.fit$pr,n.time)[time==1]
probs.out<-msm.fit$pr
uncond.probs<-uncond.probs.cand
loss.glm<-glm.fit$loss
loss.msm<-msm.fit$loss
if(loss.glm<loss.msm){
warning("CBMSM fails to improve covariate balance relative to MLE. \n GLM loss: ", glm.fit$loss, "\n CBMSM loss: ", msm.fit$loss, "\n")
}
# I know I'm putting probs.out in the weights and wts.out in the fitted values, but that
# is what Marc said to do
out<-list("weights"=probs.out,"fitted.values"=wts.out,"id"=id0[1:n.obs],"glm.g"=glm.fit$g.all,"msm.g"=msm.fit$g.all,"glm.weights"=(uncond.probs/glm.fit$pr)[time==1])
class(out)<-c("CBMSM","list")
return(out)
}
########################
###Loss function for MSM
########################
msm.loss.func<-function(betas,X=X,treat=treat,time=time,bal.only=F,time.sub=0,twostep=FALSE, Vcov.inv=NULL,full.var=FALSE,
constant.var=FALSE){
if((length(betas)==dim(X)[2]) ) betas<-rep(betas, dim(X)[2]/length(betas))
time<-time-min(time)+1
unique.time<-sort(unique(time))
n.t<-length(unique.time)
n<-dim(X)[1]/n.t
treat.use<-betas.use<-NULL
X.t<-NULL
for(i in 1:n.t){
betas.use<-cbind(betas.use,betas[1:dim(X)[2]+(i-1)*dim(X)[2] ])
treat.use<-cbind(treat.use,treat[time==unique.time[i]])
X.t<-cbind(X.t,X[time==unique.time[i],])
}
betas<-betas.use
betas[is.na(betas)]<-0
treat<-treat.use
thetas<-NULL
for(i in 1:n.t)
thetas<-cbind(thetas,X[time==i,]%*%betas[,i] )
probs.trim<-.0001
probs<-(1+exp(-thetas))^(-1)
probs<-pmax(probs,probs.trim)
probs<-pmin(probs,1-probs.trim)
probs.obs<-treat*probs+(1-treat)*(1-probs)
w.each<-treat/probs+(1-treat)/(1-probs)#+(treat-probs)^2/(probs*(1-probs))
w.all<-apply(w.each,1,prod)#*probs.uncond
bin.mat<-matrix(0,nrow=(2^n.t-1),ncol=n.t)
for(i in 1:(2^n.t-1)) bin.mat[i,(n.t-length(integer.base.b(i))+1):n.t]<-
integer.base.b(i)
num.valid.outer<-constr.mat.outer<-NULL
for(i.time in 1:n.t){
num.valid<-rep(0,dim(treat)[1])
constr.mat.prop<-constr.mat<-matrix(0,nrow=dim(treat)[1],ncol=dim(bin.mat)[1])
for(i in 1:dim(bin.mat)[1]){
is.valid<-sum(bin.mat[i,(i.time):dim(bin.mat)[2]])>0
if(is.valid){
#for(i.wt in i.time:n.t) w.all.now<-w.all.now*1/(1+3*probs[,i.wt]*(1-probs[,i.wt]))
constr.mat[,i]<-(w.all*(-1)^(treat%*%bin.mat[i,]))
num.valid<-num.valid+1
}else{
constr.mat[,i]<-0
}
}
num.valid.outer<-c(num.valid.outer,num.valid)
constr.mat.outer<-rbind(constr.mat.outer,constr.mat)
}
if(twostep==FALSE){
if(full.var==TRUE){
var.big<-0
X.t.big<-matrix(NA,nrow=n.t*dim(X.t)[1],ncol=dim(X.t)[2])
for(i in 1:n.t){X.t.big[1:dim(X.t)[1]+(i-1)*dim(X.t)[1],]<-X.t}
for(i in 1:dim(X.t.big)[1]){
mat1<-(X.t.big[i,])%*%t(X.t.big[i,])
mat2<-constr.mat.outer[i,]%*%t(constr.mat.outer[i,])
var.big<-var.big+mat2 %x%mat1
}
}
}
X.wt<-X.prop<-g.wt<-g.prop<-NULL
for(i in 1:n.t){
g.prop<-c(g.prop, 1/n*t(X[time==i,])%*%(treat[,i]-probs[,i]))
g.wt<-rbind(g.wt,1/n*t(X[time==i,])%*%cbind(constr.mat.outer[time==i,])*(i>time.sub))
X.prop.curr<-matrix(0,ncol=n,nrow=dim(X)[2])
X.wt.curr<-matrix(0,ncol=n,nrow=dim(X)[2])
X.prop<-rbind(X.prop,1/n^.5*t((X[time==i,]*(probs.obs[,i]*(1-probs.obs[,i]))^.5)))
if(bal.only){
X.wt<-rbind(X.wt,1/n^.5*t(X[time==i,]*unique(num.valid.outer)[i]^.5)) } else{
X.wt<-rbind(X.wt,1/n^.5*t(X[time==i,]*w.all^.5*unique(num.valid.outer)[i]^.5))
}
}
mat.prop<-matrix(0,nrow=n, ncol=dim(X.wt)[2])
mat.prop[,1]<-1
g.prop.all<-0*g.wt
g.prop.all[,1]<-g.prop
#g.prop.all<-g.prop
if(bal.only==T) g.prop.all<-0*g.prop.all
g.all<-rbind(g.prop.all,g.wt)
X.all<-rbind(X.prop*(1-bal.only),X.wt)
if(twostep==TRUE){
var.X.inv<-Vcov.inv
if(constant.var==TRUE) var.X.inv<-Vcov.inv*0
} else{
if(full.var==FALSE){
var.X.inv<-ginv((X.all)%*%t(X.all))}else{
var.X.inv<-ginv(var.big/n)
}
}
length.zero<-dim(g.prop.all)[2]#length(g.prop)
#var.X[(length.zero+1):(2*length.zero),1:length.zero]<-0
#var.X[1:length.zero,(length.zero+1):(2*length.zero)]<-0
#print(dim(g.all))
#print(dim(var.X.inv))
if(full.var==TRUE) g.all<-as.vector(g.wt)
loss<-t(g.all)%*%var.X.inv%*%g.all
out=list("loss"=(sum(diag(loss)))*n,"Var.inv"=var.X.inv,"probs"=w.all,"g.all"=g.all)
#t(g.prop)%*%ginv(X.prop%*%t(X.prop))%*%g.prop +sum(diag(t(g.wt)%*%ginv(X.wt%*%t(X.wt))%*%g.wt ))
}#closes msm.loss.func
########################
###Makes binary representation
########################
integer.base.b <-
function(x, b=2){
xi <- as.integer(x)
if(any(is.na(xi) | ((x-xi)!=0)))
print(list(ERROR="x not integer", x=x))
N <- length(x)
xMax <- max(x)
ndigits <- (floor(logb(xMax, base=2))+1)
Base.b <- array(NA, dim=c(N, ndigits))
for(i in 1:ndigits){#i <- 1
Base.b[, ndigits-i+1] <- (x %% b)
x <- (x %/% b)
}
if(N ==1) Base.b[1, ] else Base.b
}
#' @export
balance.CBMSM<-function(object, ...)
{
treat.hist<-matrix(NA,nrow=length(unique(object$id)),ncol=length(unique(object$time)))
ids<-sort(unique(object$id))
times<-sort(unique(object$time))
for(i in 1:length(ids)) {
for(j in 1:length(times)){
treat.hist[i,j]<-object$y[object$id== ids[i] & object$time==j]
}
}
treat.hist.fac<-apply(treat.hist,1,function(x) paste(x, collapse="+"))
bal<-matrix(NA,nrow=(ncol(object$x)-1),ncol=length(unique(treat.hist.fac))*2)
baseline<-matrix(NA,nrow=(ncol(object$x)-1),ncol=length(unique(treat.hist.fac))*2)
cnames<-array()
for (i in 1:length(unique(treat.hist.fac)))
{
for (j in 2:ncol(object$x))
{
bal[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[which(object$time == times[1]),j]*object$w)/sum(object$w*(treat.hist.fac == unique(treat.hist.fac)[i]))
#bal[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[,j]*object$w)/sum(object$w*(treat.hist.fac == unique(treat.hist.fac)[i]))
# print(c(j,i,bal[j-1,i]))
bal[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$w*object$x[which(object$time == times[1]),j])
#bal[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$w*object$x[,j])
baseline[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[which(object$time == times[1]),j]*object$glm.w)/sum(object$glm.w*(treat.hist.fac == unique(treat.hist.fac)[i]))
baseline[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$glm.w*object$x[which(object$time == times[1]),j])
#baseline[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[,j]*object$glm.w)/sum(object$glm.w*(treat.hist.fac == unique(treat.hist.fac)[i]))
#baseline[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$glm.w*object$x[,j])
}
bal[is.na(bal)]<-0
baseline[is.na(baseline)]<-0
cnames[i]<-paste0(unique(treat.hist.fac)[i],".mean")
cnames[i+length(unique(treat.hist.fac))]<-paste0(unique(treat.hist.fac)[i],".std.mean")
}
colnames(bal)<-cnames
rnames<-colnames(object$x)[-1]
rownames(bal)<-rnames
colnames(baseline)<-cnames
rownames(baseline)<-rnames
statbal<-sum((bal-bal[,1])*(bal!=0)^2)
statloh<-sum((baseline-baseline[,1])*(baseline!=0)^2)
list("Balanced"=bal, "Unweighted"=baseline, "StatBal")
}
#' Plotting CBPS Estimation for Marginal Structural Models
#'
#' Plots the absolute difference in standardized means before and after
#' weighting.
#'
#' Covariate balance is improved if the plot's points are below the plotted
#' line of y=x.
#'
#' @param x an object of class \dQuote{CBMSM}.
#' @param covars Indices of the covariates to be plotted (excluding the
#' intercept). For example, if only the first two covariates from
#' \code{balance} are desired, set \code{covars} to 1:2. The default is
#' \code{NULL}, which plots all covariates.
#' @param silent If set to \code{FALSE}, returns the absolute imbalance for
#' each treatment history pair before and after weighting. This helps the user
#' to create his or her own customized plot. Default is \code{TRUE}, which
#' returns nothing.
#' @param boxplot If set to \code{TRUE}, returns a boxplot summarizing the
#' imbalance on the covariates instead of a point for each covariate. Useful
#' if there are many covariates.
#' @param ... Additional arguments to be passed to plot.
#' @return The x-axis gives the imbalance for each covariate-treatment history
#' pair without any weighting, and the y-axis gives the imbalance for each
#' covariate-treatment history pair after CBMSM weighting. Imbalance is
#' measured as the absolute difference in standardized means for the two
#' treatment histories. Means are standardized by the standard deviation of
#' the covariate in the full sample.
#' @author Marc Ratkovic and Christian Fong
#' @seealso \link{CBMSM}, \link{plot}
#'
#' @export
#'
plot.CBMSM<-function(x, covars = NULL, silent = TRUE, boxplot = FALSE, ...)
{
bal.out<-balance.CBMSM(x)
bal<-bal.out$Balanced
baseline<-bal.out$Unweighted
no.treats<-ncol(bal)/2
if (is.null(covars))
{
covars<-1:nrow(bal)
}
covarlist<-c()
contrast<-c()
bal.std.diff<-c()
baseline.std.diff<-c()
treat.hist.names<-sapply(colnames(bal)[1:no.treats],function(s) substr(s, 1, nchar(s)-5))
for (i in covars)
{
for (j in 1:(no.treats-1))
{
for (k in (j+1):no.treats)
{
covarlist<-c(covarlist, rownames(bal)[i])
contrast<-c(contrast, paste(treat.hist.names[j],treat.hist.names[k],sep=":",collapse=""))
bal.std.diff<-c(bal.std.diff,abs(bal[i,no.treats+j] - bal[i,no.treats+k]))
baseline.std.diff<-c(baseline.std.diff,abs(baseline[i,no.treats+j] - baseline[i,no.treats+k]))
}
}
}
range.x<-range.y<-range(c(bal.std.diff,baseline.std.diff))
if (!boxplot){
plot(x=baseline.std.diff,y=bal.std.diff,asp="1",xlab="Unweighted Regression Imbalance",ylab="CBMSM Imbalance",
xlim=range.x, ylim = range.y, main = "Difference in Standardized Means", ...)
abline(0,1)
}
else{
boxplot(baseline.std.diff, bal.std.diff, horizontal = TRUE, yaxt = 'n', xlab = "Difference in Standardized Means", ...)
axis(side=2, at=c(1,2),c("CBMSM Weighted", "Unweighted"))
}
if(!silent) return(data.frame("Covariate" = covarlist, "Contrast"=contrast, "Unweighted"=baseline.std.diff, "Balanced"=bal.std.diff))
}
|
/R/CBMSM.R
|
no_license
|
kosukeimai/CBPS
|
R
| false
| false
| 25,378
|
r
|
library(numDeriv)
library(MASS)
#' Covariate Balancing Propensity Score (CBPS) for Marginal Structural Models
#'
#' \code{CBMSM} estimates propensity scores such that both covariate balance
#' and prediction of treatment assignment are maximized. With longitudinal
#' data, the method returns marginal structural model weights that can be
#' entered directly into a linear model. The method also handles multiple
#' binary treatments administered concurrently.
#'
#' Fits covariate balancing propensity scores for marginal structural models.
#'
#' ### @aliases CBMSM CBMSM.fit
#' @param formula A formula of the form treat ~ X. The same covariates are used
#' in each time period. At default values, a single set of coefficients is estimated
#' across all time periods. To allow a different set of coefficients for each
#' time period, set \code{time.vary = TRUE}. Data should be sorted by time.
#' @param id A vector which identifies the unit associated with each row of
#' treat and X.
#' @param time A vector which identifies the time period associated with each
#' row of treat and X. All data should be sorted by time.
#' @param data An optional data frame, list or environment (or object coercible
#' by as.data.frame to a data frame) containing the variables in the model. If
#' not found in data, the variables are taken from \code{environment(formula)},
#' typically the environment from which \code{CBMSM} is called. Data should be
#' sorted by time.
#' @param twostep Set to \code{TRUE} to use a two-step estimator, which will
#' run substantially faster than continuous-updating. Default is \code{FALSE},
#' which uses the continuous-updating estimator described by Imai and Ratkovic
#' (2014).
#' @param msm.variance Default is \code{FALSE}, which uses the low-rank
#' approximation of the variance described in Imai and Ratkovic (2014). Set to
#' \code{TRUE} to use the full variance matrix.
#' @param time.vary Default is \code{FALSE}, which uses the same coefficients
#' across time period. Set to \code{TRUE} to fit one set per time period.
#' @param type "MSM" for a marginal structural model, with multiple time
#' periods or "MultiBin" for multiple binary treatments at the same time
#' period.
#' @param init Default is \code{"opt"}, which uses CBPS and logistic regression
#' starting values, and chooses the one that achieves the best balance. Other options
#' are "glm" and "CBPS"
#' @param ... Other parameters to be passed through to \code{optim()}
#' @return \item{weights}{The optimal weights.} \item{fitted.values}{The fitted
#' propensity score for each observation.} \item{y}{The treatment vector used.}
#' \item{x}{The covariate matrix.} \item{id}{The vector id used in CBMSM.fit.}
#' \item{time}{The vector time used in CBMSM.fit.} \item{model}{The model
#' frame.} \item{call}{The matched call.} \item{formula}{The formula supplied.}
#' \item{data}{The data argument.} \item{treat.hist}{A matrix of the treatment
#' history, with each observation in rows and time in columns.}
#' \item{treat.cum}{A vector of the cumulative treatment history, by
#' individual.}
#' @author Marc Ratkovic, Christian Fong, and Kosuke Imai; The CBMSM function
#' is based on the code for version 2.15.0 of the glm function implemented in
#' the stats package, originally written by Simon Davies. This documenation is
#' likewise modeled on the documentation for glm and borrows its language where
#' the arguments and values are the same.
#' @seealso \link{plot.CBMSM}
#' @references
#'
#' Imai, Kosuke and Marc Ratkovic. 2014. ``Covariate Balancing Propensity
#' Score.'' Journal of the Royal Statistical Society, Series B (Statistical
#' Methodology). \url{http://imai.princeton.edu/research/CBPS.html}
#'
#' Imai, Kosuke and Marc Ratkovic. 2015. ``Robust Estimation of Inverse
#' Probability Weights for Marginal Structural Models.'' Journal of the
#' American Statistical Association.
#' \url{http://imai.princeton.edu/research/MSM.html}
#' @examples
#'
#'
#' ##Load Blackwell data
#'
#' data(Blackwell)
#'
#' ## Quickly fit a short model to test
#' form0 <- "d.gone.neg ~ d.gone.neg.l1 + camp.length"
#' fit0<-CBMSM(formula = form0, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "approx", time.vary = FALSE)
#'
#' \dontrun{
#' ##Fitting the models in Imai and Ratkovic (2014)
#' ##Warning: may take a few mintues; setting time.vary to FALSE
#' ##Results in a quicker fit but with poorer balance
#' ##Usually, it is best to use time.vary TRUE
#' form1<-"d.gone.neg ~ d.gone.neg.l1 + d.gone.neg.l2 + d.neg.frac.l3 +
#' camp.length + camp.length + deminc + base.poll + year.2002 +
#' year.2004 + year.2006 + base.und + office"
#'
#' ##Note that init="glm" gives the published results but the default is now init="opt"
#' fit1<-CBMSM(formula = form1, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "full", time.vary = TRUE, init="glm")
#'
#' fit2<-CBMSM(formula = form1, time=Blackwell$time,id=Blackwell$demName,
#' data=Blackwell, type="MSM", iterations = NULL, twostep = TRUE,
#' msm.variance = "approx", time.vary = TRUE, init="glm")
#'
#'
#' ##Assessing balance
#'
#' bal1<-balance.CBMSM(fit1)
#' bal2<-balance.CBMSM(fit2)
#'
#' ##Effect estimation: Replicating Effect Estimates in
#' ##Table 3 of Imai and Ratkovic (2014)
#'
#' lm1<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit1$glm.weights)
#' lm2<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit1$weights)
#' lm3<-lm(demprcnt[time==1]~fit1$treat.hist,data=Blackwell,
#' weights=fit2$weights)
#'
#' lm4<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit1$glm.weights)
#' lm5<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit1$weights)
#' lm6<-lm(demprcnt[time==1]~fit1$treat.cum,data=Blackwell,
#' weights=fit2$weights)
#'
#'
#'
#' ### Example: Multiple Binary Treatments Administered at the Same Time
#' n<-200
#' k<-4
#' set.seed(1040)
#' X1<-cbind(1,matrix(rnorm(n*k),ncol=k))
#'
#' betas.1<-betas.2<-betas.3<-c(2,4,4,-4,3)/5
#' probs.1<-probs.2<-probs.3<-(1+exp(-X1 %*% betas.1))^-1
#'
#' treat.1<-rbinom(n=length(probs.1),size=1,probs.1)
#' treat.2<-rbinom(n=length(probs.2),size=1,probs.2)
#' treat.3<-rbinom(n=length(probs.3),size=1,probs.3)
#' treat<-c(treat.1,treat.2,treat.3)
#' X<-rbind(X1,X1,X1)
#' time<-c(rep(1,nrow(X1)),rep(2,nrow(X1)),rep(3,nrow(X1)))
#' id<-c(rep(1:nrow(X1),3))
#' y<-cbind(treat.1,treat.2,treat.3) %*% c(2,2,2) +
#' X1 %*% c(-2,8,7,6,2) + rnorm(n,sd=5)
#'
#' multibin1<-CBMSM(treat~X,id=id,time=time,type="MultiBin",twostep=TRUE)
#' summary(lm(y~-1+treat.1+treat.2+treat.3+X1, weights=multibin1$w))
#' }
#'
#' @export CBMSM
#'
CBMSM<-function(formula, id, time, data, type="MSM", twostep = TRUE, msm.variance = "approx", time.vary = FALSE, init="opt",...){
if (missing(data))
data <- environment(formula)
call <- match.call()
family <- binomial()
mf <- match.call(expand.dots = FALSE)
m <- match(c("formula", "data"), names(mf), 0L)
mf <- mf[c(1L, m)]
mf$drop.unused.levels <- TRUE
mf[[1L]] <- as.name("model.frame")
mf <- eval(mf, parent.frame())
mt <- attr(mf, "terms")
Y <- model.response(mf, "any")
if (length(dim(Y)) == 1L) {
nm <- rownames(Y)
dim(Y) <- NULL
if (!is.null(nm))
names(Y) <- nm
}
X <- if (!is.empty.model(mt)) model.matrix(mt, mf)#[,-2]
else matrix(, NROW(Y), 0L)
##Format treatment matrix
id<-as.numeric(as.factor(id))
unique.id<-sort(unique(id))
treat.hist<-matrix(NA,nrow=length(unique.id),ncol=length(unique(time)))
colnames(treat.hist)<-sort(unique(time))
rownames(treat.hist)<-unique.id
for(i in 1:length(unique(unique.id))) for(j in sort(unique(time)))
{{
treat.hist[i,j]<-Y[id==unique.id[i] & time==j]
}}
#treat.hist.fac<-apply(treat.hist,1,function(x) paste(x, collapse="+"))
cm.treat<-rowSums(treat.hist)
if(type=="MSM") {
MultiBin.fit<-FALSE
}
if(type=="MultiBin"){
MultiBin.fit<-TRUE
X<-cbind(1,X[,apply(X,2,sd)>0])
names.X<-c("Intercept",colnames(X)[-1])
}
fit <- eval(call("CBMSM.fit", treat = Y, X = X, id = id, time=time,
MultiBin.fit = MultiBin.fit, twostep = twostep, msm.variance = msm.variance,
time.vary = time.vary, init = init))
fit$call<-call
fit$formula<-formula
fit$y<-Y
fit$x<-X
fit$id<-id
fit$time<-time
fit$model<-mf
fit$data<-data
fit$treat.hist<-treat.hist
fit$treat.cum<-rowSums(treat.hist)
fit$weights<-fit$weights[time==min(time)]
fit
}
########################
###Calls loss function
########################
#' CBMSM.fit
#'
#' @param treat A vector of treatment assignments. For N observations over T
#' time periods, the length of treat should be N*T.
#' @param X A covariate matrix. For N observations over T time periods, X
#' should have N*T rows.
#' @param id A vector which identifies the unit associated with each row of
#' treat and X.
#' @param time A vector which identifies the time period associated with each
#' row of treat and X.
#' @param MultiBin.fit A parameter for whether the multiple binary treatments
#' occur concurrently (\code{FALSE}) or over consecutive time periods
#' (\code{TRUE}) as in a marginal structural model. Setting type = "MultiBin"
#' when calling \code{CBMSM} will set MultiBin.fit to \code{TRUE} when
#' CBMSM.fit is called.
#' @param twostep Set to \code{TRUE} to use a two-step estimator, which will
#' run substantially faster than continuous-updating. Default is \code{FALSE},
#' which uses the continuous-updating estimator described by Imai and Ratkovic
#' (2014).
#' @param msm.variance Default is \code{FALSE}, which uses the low-rank
#' approximation of the variance described in Imai and Ratkovic (2014). Set to
#' \code{TRUE} to use the full variance matrix.
#' @param time.vary Default is \code{FALSE}, which uses the same coefficients
#' across time period. Set to \code{TRUE} to fit one set per time period.
#' @param init Default is \code{"opt"}, which uses CBPS and logistic regression
#' starting values, and chooses the one that achieves the best balance. Other options
#' are "glm" and "CBPS"
#' @param ... Other parameters to be passed through to \code{optim()}
#'
CBMSM.fit<-function(treat, X, id, time, MultiBin.fit, twostep, msm.variance, time.vary, init, ...){
id0<-id
id<-as.numeric(as.factor(id0))
if(msm.variance=="approx") full.var<-FALSE
if(msm.variance=="full") full.var<-TRUE
X.mat<-X
X.mat<-X.mat[,apply(X.mat,2,sd)>0, drop = FALSE]
##Format design matrix, run glm
glm1<-glm(treat~X.mat,family="binomial")
glm1$coefficients<-CBPS(treat~X.mat, ATT=0,method="exact")$coefficients
##################
##Make SVD matrix of covariates
##and matrix of treatment history
##################
#if(time.vary==FALSE){
X.svd<-X.mat
#X.svd<-apply(X.svd,2,FUN=function(x) (x-mean(x))/sd(x), drop=FALSE)
X.svd<-scale(X.svd) # Edit by Christian; this was causing an error
#X.svd[,c(1,2,7)]<-X.svd[,c(1,2,7)]*10
X.svd<-svd(X.svd)$u%*%diag(svd(X.svd)$d>0.0001)
X.svd<-X.svd[,apply(X.svd,2,sd)>0,drop=FALSE]
glm1<-glm(treat~X.svd,family="binomial")
glm.cb<-glm1
glm.cb<-CBPS(treat~X.svd, ATT=0,method="exact")$coefficients
glm1<-glm1$coefficients
if(time.vary==TRUE){
#} else{
X.svd<-NULL
for(i in sort(unique(time))){
X.sub<-X.mat[time==i,,drop=FALSE]
#X.sub<-apply(X.sub,2,FUN=function(x) (x-mean(x))/sd(x))
X.sub <- scale(X.sub) # Edit by Christian; this was causing an error
X.sub[is.na(X.sub)]<-0
X.sub<-svd(X.sub)$u%*%diag(svd(X.sub)$d>0.0001)
X.sub<-X.sub[,apply(X.sub,2,sd)>0,drop=FALSE]
X.svd<-rbind(X.svd,X.sub)
}
##Make matrix of time-varying glm starting vals
cbps.coefs<-glm.coefs<-NULL
n.time<-length(unique(time))
for(i in 1:n.time){
glm1<-summary(glm(treat~X.svd, subset=(time==i)))$coefficients[,1]
glm.cb<-glm1
glm.cb<-CBPS(treat[time==i]~X.svd[time==i,], ATT=0,method="exact")$coefficients
glm.coefs<-cbind(glm.coefs,glm1)
cbps.coefs<-cbind(cbps.coefs,glm.cb)
}
glm.coefs[is.na(glm.coefs)]<-0
cbps.coefs[is.na(cbps.coefs)]<-0
glm1<-as.vector(glm.coefs)
glm.cb<-as.vector(cbps.coefs)
}
##################
## Start optimization
##################
#Twostep is true
msm.loss1<-function(x,...) msm.loss.func(betas=x, X=cbind(1,X.svd), treat=treat, time=time,...)$loss
glm.fit<-msm.loss.func(glm1,X=cbind(1,X.svd),time=time,treat=treat,full.var=full.var,twostep=FALSE)
cb.fit<-msm.loss.func(glm.cb,X=cbind(1,X.svd),time=time,treat=treat,full.var=full.var,twostep=FALSE)
type.fit<-"Returning Estimates from Logistic Regression\n"
if((cb.fit$loss<glm.fit$loss & init=="opt")|init=="CBPS") {
glm1<-glm.cb
glm.fit<-cb.fit
type.fit<-"Returning Estimates from CBPS\n"
}
##Twostep is true; full variance option is passed
#Run twostep regardless for starting vals
#if(twostep==TRUE){
Vcov.inv<-glm.fit
msm.opt<-optim(glm1,msm.loss1,full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE,method="BFGS")
msm.twostep<-msm.fit<-msm.loss.func(msm.opt$par,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE)
l3<-msm.loss.func(glm1,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=TRUE)
if((l3$loss<msm.fit$loss) & init=="opt") {
msm.fit<-l3
warning("Warning: Optimization did not improve over initial estimates\n")
cat(type.fit)
}
if(twostep==FALSE) {
if(init=="opt") msm.opt<-optim(msm.fit$par,msm.loss1,full.var=full.var,bal.only=TRUE,twostep=FALSE,method="BFGS")
if(init!="opt") msm.opt<-optim(msm.opt$par,msm.loss1,full.var=full.var,bal.only=TRUE,twostep=FALSE,method="BFGS")
msm.fit<-msm.loss.func(msm.opt$par,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=FALSE)
l3<-msm.loss.func(glm1,X=cbind(1,X.svd), treat=treat, time=time, full.var=full.var,
Vcov.inv=Vcov.inv$V,bal.only=TRUE,twostep=FALSE)
if((l3$loss<msm.fit$loss) & init=="opt") {
msm.fit<-l3
cat("\nWarning: Optimization did not improve over initial estimates\n")
cat(type.fit)
}
}
##################
## Calculate unconditional probs and treatment matrix
##################
n.obs<-length(unique(id))
n.time<-length(unique(time))
treat.hist<-matrix(NA,nrow=n.obs,ncol=n.time)
name.cands<-sort(unique(id))
for(i in 1:n.obs) for(j in 1:n.time) treat.hist[i,j]<-treat[id==name.cands[i] & time==j ]
treat.hist.unique<-unique(treat.hist,MAR=1)
treat.unique<-rep(NA,n.obs)
for(i in 1:n.obs) treat.unique[i]<- which(apply(treat.hist.unique,1,FUN=function(x) sum((x-treat.hist[i,])^2) )==0)
treat.unique<-as.factor(treat.unique)
uncond.probs.cand<-rep(0,n.obs)
for(i in 1:n.obs) {for(j in 1:n.obs) {
check<-mean(treat.hist[j,]==treat.hist[i,])==1
if(check) uncond.probs.cand[i]<-uncond.probs.cand[i]+1
}
}
uncond.probs.cand<-uncond.probs.cand/n.obs
###########
##Produce Weights
###########
wts.out<-rep(uncond.probs.cand/msm.fit$pr,n.time)[time==1]
probs.out<-msm.fit$pr
uncond.probs<-uncond.probs.cand
loss.glm<-glm.fit$loss
loss.msm<-msm.fit$loss
if(loss.glm<loss.msm){
warning("CBMSM fails to improve covariate balance relative to MLE. \n GLM loss: ", glm.fit$loss, "\n CBMSM loss: ", msm.fit$loss, "\n")
}
# I know I'm putting probs.out in the weights and wts.out in the fitted values, but that
# is what Marc said to do
out<-list("weights"=probs.out,"fitted.values"=wts.out,"id"=id0[1:n.obs],"glm.g"=glm.fit$g.all,"msm.g"=msm.fit$g.all,"glm.weights"=(uncond.probs/glm.fit$pr)[time==1])
class(out)<-c("CBMSM","list")
return(out)
}
########################
###Loss function for MSM
########################
msm.loss.func<-function(betas,X=X,treat=treat,time=time,bal.only=F,time.sub=0,twostep=FALSE, Vcov.inv=NULL,full.var=FALSE,
constant.var=FALSE){
if((length(betas)==dim(X)[2]) ) betas<-rep(betas, dim(X)[2]/length(betas))
time<-time-min(time)+1
unique.time<-sort(unique(time))
n.t<-length(unique.time)
n<-dim(X)[1]/n.t
treat.use<-betas.use<-NULL
X.t<-NULL
for(i in 1:n.t){
betas.use<-cbind(betas.use,betas[1:dim(X)[2]+(i-1)*dim(X)[2] ])
treat.use<-cbind(treat.use,treat[time==unique.time[i]])
X.t<-cbind(X.t,X[time==unique.time[i],])
}
betas<-betas.use
betas[is.na(betas)]<-0
treat<-treat.use
thetas<-NULL
for(i in 1:n.t)
thetas<-cbind(thetas,X[time==i,]%*%betas[,i] )
probs.trim<-.0001
probs<-(1+exp(-thetas))^(-1)
probs<-pmax(probs,probs.trim)
probs<-pmin(probs,1-probs.trim)
probs.obs<-treat*probs+(1-treat)*(1-probs)
w.each<-treat/probs+(1-treat)/(1-probs)#+(treat-probs)^2/(probs*(1-probs))
w.all<-apply(w.each,1,prod)#*probs.uncond
bin.mat<-matrix(0,nrow=(2^n.t-1),ncol=n.t)
for(i in 1:(2^n.t-1)) bin.mat[i,(n.t-length(integer.base.b(i))+1):n.t]<-
integer.base.b(i)
num.valid.outer<-constr.mat.outer<-NULL
for(i.time in 1:n.t){
num.valid<-rep(0,dim(treat)[1])
constr.mat.prop<-constr.mat<-matrix(0,nrow=dim(treat)[1],ncol=dim(bin.mat)[1])
for(i in 1:dim(bin.mat)[1]){
is.valid<-sum(bin.mat[i,(i.time):dim(bin.mat)[2]])>0
if(is.valid){
#for(i.wt in i.time:n.t) w.all.now<-w.all.now*1/(1+3*probs[,i.wt]*(1-probs[,i.wt]))
constr.mat[,i]<-(w.all*(-1)^(treat%*%bin.mat[i,]))
num.valid<-num.valid+1
}else{
constr.mat[,i]<-0
}
}
num.valid.outer<-c(num.valid.outer,num.valid)
constr.mat.outer<-rbind(constr.mat.outer,constr.mat)
}
if(twostep==FALSE){
if(full.var==TRUE){
var.big<-0
X.t.big<-matrix(NA,nrow=n.t*dim(X.t)[1],ncol=dim(X.t)[2])
for(i in 1:n.t){X.t.big[1:dim(X.t)[1]+(i-1)*dim(X.t)[1],]<-X.t}
for(i in 1:dim(X.t.big)[1]){
mat1<-(X.t.big[i,])%*%t(X.t.big[i,])
mat2<-constr.mat.outer[i,]%*%t(constr.mat.outer[i,])
var.big<-var.big+mat2 %x%mat1
}
}
}
X.wt<-X.prop<-g.wt<-g.prop<-NULL
for(i in 1:n.t){
g.prop<-c(g.prop, 1/n*t(X[time==i,])%*%(treat[,i]-probs[,i]))
g.wt<-rbind(g.wt,1/n*t(X[time==i,])%*%cbind(constr.mat.outer[time==i,])*(i>time.sub))
X.prop.curr<-matrix(0,ncol=n,nrow=dim(X)[2])
X.wt.curr<-matrix(0,ncol=n,nrow=dim(X)[2])
X.prop<-rbind(X.prop,1/n^.5*t((X[time==i,]*(probs.obs[,i]*(1-probs.obs[,i]))^.5)))
if(bal.only){
X.wt<-rbind(X.wt,1/n^.5*t(X[time==i,]*unique(num.valid.outer)[i]^.5)) } else{
X.wt<-rbind(X.wt,1/n^.5*t(X[time==i,]*w.all^.5*unique(num.valid.outer)[i]^.5))
}
}
mat.prop<-matrix(0,nrow=n, ncol=dim(X.wt)[2])
mat.prop[,1]<-1
g.prop.all<-0*g.wt
g.prop.all[,1]<-g.prop
#g.prop.all<-g.prop
if(bal.only==T) g.prop.all<-0*g.prop.all
g.all<-rbind(g.prop.all,g.wt)
X.all<-rbind(X.prop*(1-bal.only),X.wt)
if(twostep==TRUE){
var.X.inv<-Vcov.inv
if(constant.var==TRUE) var.X.inv<-Vcov.inv*0
} else{
if(full.var==FALSE){
var.X.inv<-ginv((X.all)%*%t(X.all))}else{
var.X.inv<-ginv(var.big/n)
}
}
length.zero<-dim(g.prop.all)[2]#length(g.prop)
#var.X[(length.zero+1):(2*length.zero),1:length.zero]<-0
#var.X[1:length.zero,(length.zero+1):(2*length.zero)]<-0
#print(dim(g.all))
#print(dim(var.X.inv))
if(full.var==TRUE) g.all<-as.vector(g.wt)
loss<-t(g.all)%*%var.X.inv%*%g.all
out=list("loss"=(sum(diag(loss)))*n,"Var.inv"=var.X.inv,"probs"=w.all,"g.all"=g.all)
#t(g.prop)%*%ginv(X.prop%*%t(X.prop))%*%g.prop +sum(diag(t(g.wt)%*%ginv(X.wt%*%t(X.wt))%*%g.wt ))
}#closes msm.loss.func
########################
###Makes binary representation
########################
integer.base.b <-
function(x, b=2){
xi <- as.integer(x)
if(any(is.na(xi) | ((x-xi)!=0)))
print(list(ERROR="x not integer", x=x))
N <- length(x)
xMax <- max(x)
ndigits <- (floor(logb(xMax, base=2))+1)
Base.b <- array(NA, dim=c(N, ndigits))
for(i in 1:ndigits){#i <- 1
Base.b[, ndigits-i+1] <- (x %% b)
x <- (x %/% b)
}
if(N ==1) Base.b[1, ] else Base.b
}
#' @export
balance.CBMSM<-function(object, ...)
{
treat.hist<-matrix(NA,nrow=length(unique(object$id)),ncol=length(unique(object$time)))
ids<-sort(unique(object$id))
times<-sort(unique(object$time))
for(i in 1:length(ids)) {
for(j in 1:length(times)){
treat.hist[i,j]<-object$y[object$id== ids[i] & object$time==j]
}
}
treat.hist.fac<-apply(treat.hist,1,function(x) paste(x, collapse="+"))
bal<-matrix(NA,nrow=(ncol(object$x)-1),ncol=length(unique(treat.hist.fac))*2)
baseline<-matrix(NA,nrow=(ncol(object$x)-1),ncol=length(unique(treat.hist.fac))*2)
cnames<-array()
for (i in 1:length(unique(treat.hist.fac)))
{
for (j in 2:ncol(object$x))
{
bal[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[which(object$time == times[1]),j]*object$w)/sum(object$w*(treat.hist.fac == unique(treat.hist.fac)[i]))
#bal[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[,j]*object$w)/sum(object$w*(treat.hist.fac == unique(treat.hist.fac)[i]))
# print(c(j,i,bal[j-1,i]))
bal[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$w*object$x[which(object$time == times[1]),j])
#bal[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$w*object$x[,j])
baseline[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[which(object$time == times[1]),j]*object$glm.w)/sum(object$glm.w*(treat.hist.fac == unique(treat.hist.fac)[i]))
baseline[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$glm.w*object$x[which(object$time == times[1]),j])
#baseline[j-1,i]<-sum((treat.hist.fac==unique(treat.hist.fac)[i])*object$x[,j]*object$glm.w)/sum(object$glm.w*(treat.hist.fac == unique(treat.hist.fac)[i]))
#baseline[j-1,i+length(unique(treat.hist.fac))]<-bal[j-1,i]/sd(object$glm.w*object$x[,j])
}
bal[is.na(bal)]<-0
baseline[is.na(baseline)]<-0
cnames[i]<-paste0(unique(treat.hist.fac)[i],".mean")
cnames[i+length(unique(treat.hist.fac))]<-paste0(unique(treat.hist.fac)[i],".std.mean")
}
colnames(bal)<-cnames
rnames<-colnames(object$x)[-1]
rownames(bal)<-rnames
colnames(baseline)<-cnames
rownames(baseline)<-rnames
statbal<-sum((bal-bal[,1])*(bal!=0)^2)
statloh<-sum((baseline-baseline[,1])*(baseline!=0)^2)
list("Balanced"=bal, "Unweighted"=baseline, "StatBal")
}
#' Plotting CBPS Estimation for Marginal Structural Models
#'
#' Plots the absolute difference in standardized means before and after
#' weighting.
#'
#' Covariate balance is improved if the plot's points are below the plotted
#' line of y=x.
#'
#' @param x an object of class \dQuote{CBMSM}.
#' @param covars Indices of the covariates to be plotted (excluding the
#' intercept). For example, if only the first two covariates from
#' \code{balance} are desired, set \code{covars} to 1:2. The default is
#' \code{NULL}, which plots all covariates.
#' @param silent If set to \code{FALSE}, returns the absolute imbalance for
#' each treatment history pair before and after weighting. This helps the user
#' to create his or her own customized plot. Default is \code{TRUE}, which
#' returns nothing.
#' @param boxplot If set to \code{TRUE}, returns a boxplot summarizing the
#' imbalance on the covariates instead of a point for each covariate. Useful
#' if there are many covariates.
#' @param ... Additional arguments to be passed to plot.
#' @return The x-axis gives the imbalance for each covariate-treatment history
#' pair without any weighting, and the y-axis gives the imbalance for each
#' covariate-treatment history pair after CBMSM weighting. Imbalance is
#' measured as the absolute difference in standardized means for the two
#' treatment histories. Means are standardized by the standard deviation of
#' the covariate in the full sample.
#' @author Marc Ratkovic and Christian Fong
#' @seealso \link{CBMSM}, \link{plot}
#'
#' @export
#'
plot.CBMSM<-function(x, covars = NULL, silent = TRUE, boxplot = FALSE, ...)
{
bal.out<-balance.CBMSM(x)
bal<-bal.out$Balanced
baseline<-bal.out$Unweighted
no.treats<-ncol(bal)/2
if (is.null(covars))
{
covars<-1:nrow(bal)
}
covarlist<-c()
contrast<-c()
bal.std.diff<-c()
baseline.std.diff<-c()
treat.hist.names<-sapply(colnames(bal)[1:no.treats],function(s) substr(s, 1, nchar(s)-5))
for (i in covars)
{
for (j in 1:(no.treats-1))
{
for (k in (j+1):no.treats)
{
covarlist<-c(covarlist, rownames(bal)[i])
contrast<-c(contrast, paste(treat.hist.names[j],treat.hist.names[k],sep=":",collapse=""))
bal.std.diff<-c(bal.std.diff,abs(bal[i,no.treats+j] - bal[i,no.treats+k]))
baseline.std.diff<-c(baseline.std.diff,abs(baseline[i,no.treats+j] - baseline[i,no.treats+k]))
}
}
}
range.x<-range.y<-range(c(bal.std.diff,baseline.std.diff))
if (!boxplot){
plot(x=baseline.std.diff,y=bal.std.diff,asp="1",xlab="Unweighted Regression Imbalance",ylab="CBMSM Imbalance",
xlim=range.x, ylim = range.y, main = "Difference in Standardized Means", ...)
abline(0,1)
}
else{
boxplot(baseline.std.diff, bal.std.diff, horizontal = TRUE, yaxt = 'n', xlab = "Difference in Standardized Means", ...)
axis(side=2, at=c(1,2),c("CBMSM Weighted", "Unweighted"))
}
if(!silent) return(data.frame("Covariate" = covarlist, "Contrast"=contrast, "Unweighted"=baseline.std.diff, "Balanced"=bal.std.diff))
}
|
rm(list = ls(all = TRUE))
x<-date()
print(x)
###########################################################################################
### PROJECT: Immune Repertoire. Analysis B cells antibodies
###
### CITATION:
###
### PROCESS:
###
### DESCRIP: Analysis of the VDJ data
###
###
### Author: Silvia Pineda
### Date: November, 2016
############################################################################################
library("ggplot2")
library(lme4)
library("RColorBrewer")
library("randomForest")
library("VSURF")
library(pheatmap)
library(vcd)
library(gridGraphics)
library(gridExtra)
##################
### Analysis in the clone data matrix
#################
setwd("/Users/Pinedasans/VDJ/ResultsAllClones//")
load("~/VDJ/Data/clones_gDNA.Rdata")
###Clones individual analysis across outcome and time points
##Delete Individual 8
clone_type_gDNA_df_no8<-clone_type_gDNA_df[which(clone_type_gDNA_df$individual_id!="Individual8"),]
id_sample<-match(rownames(clone_type_gDNA_df_no8),rownames(clone_type_gDNA_num_reduced))
clone_type_gDNA_num_reduced_no8<-clone_type_gDNA_num_reduced[id_sample,]
###PCA Analysis
pca<-prcomp(clone_type_gDNA_num_reduced_no8[which(clone_type_gDNA_df_no8$time==24),])
plot(pca, type = "l")
SPP <- clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)]
COLOR <- c("chartreuse4", "dodgerblue3","darkorange2")
pc <- c(1,2)
plot(pca$x[,pc[1]], pca$x[,pc[2]], col=COLOR[SPP],pch=20,xlab="PCA1",ylab="PCA2")
legend(-110,-100, legend=levels(levels.SPP), col=COLOR,pch=20,cex=0.8)
#############
### Analysis 1: present/no-present
############
matrix_clones_presence<-apply(clone_type_gDNA_num_reduced_no8,1,function(x) ifelse(x==0,"No Present","Present"))
clone_type_gDNA_df_no8$time<-replace(clone_type_gDNA_df_no8$time,clone_type_gDNA_df_no8$time==12,6)
matrix_clones_presence_time0<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==0)]
matrix_clones_presence_time6<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==6)]
matrix_clones_presence_time24<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==24)]
p_value_0=NULL
p_value_6=NULL
p_value_24=NULL
for(i in 1:dim(matrix_clones_presence_time0)[1]){
print(i)
tab<-table(matrix_clones_presence_time0[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)])
if(dim(tab)[1]>1){
p_value_0[i]=fisher.test(tab)$p.value
}
tab<-table(matrix_clones_presence_time6[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)])
if(dim(tab)[1]>1){
p_value_6[i]=fisher.test(tab)$p.value
}
tab<-table(matrix_clones_presence_time24[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)])
if(dim(tab)[1]>1){
p_value_24[i]=fisher.test(tab)$p.value
}
}
splitpop<-strsplit(rownames(matrix_clones_presence_time24),"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
counts = (matrix(data = c(16, 66, 10348, 107875), nrow = 2)) #p-value = 0.5
chisq.test(counts)
##time0
matrix_clones_presence_significant_time0<-matrix_clones_presence_time0[which(p_value_0<0.05),] #8
results_time0<-list()
results_time0_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time0)[1]){
tab<-table(matrix_clones_presence_significant_time0[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)])
id<-match(names(which(matrix_clones_presence_significant_time0[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time0)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time0)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time0_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
results_time0[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
#clone_status<-matrix_clones_presence_significant_time0[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time0)[i], gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time0.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],plots[[5]],plots[[6]], plots[[7]],
plots[[8]],ncol=4)
dev.off()
p_value_0[which(p_value_0<0.05)]
names(results_time0)<-rownames(matrix_clones_presence_significant_time0)
cat(capture.output(print(results_time0), file="clones_fisher_time0.txt"))
##time6
matrix_clones_presence_significant_time6<-matrix_clones_presence_time6[which(p_value_6<0.05),] #4
results_time6<-list()
results_time6_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time6)[1]){
tab<-table(matrix_clones_presence_significant_time6[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)])
id<-match(names(which(matrix_clones_presence_significant_time6[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time6)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time6)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time6[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
results_time6_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
#clone_status<-matrix_clones_presence_significant_time6[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time6)[i],
# gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time6.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],ncol=4)
dev.off()
p_value_6[which(p_value_6<0.05)]
names(results_time6)<-rownames(matrix_clones_presence_significant_time6)
cat(capture.output(print(results_time6), file="clones_fisher_time6.txt"))
write.csv(names(results_time6),file="clones_results_time6.csv")
##time24
matrix_clones_presence_significant_time24<-matrix_clones_presence_time24[which(p_value_24<0.05),] #21
results_time24<-list()
results_time24_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time24)[1]){
tab<-table(matrix_clones_presence_significant_time24[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)])
id<-match(names(which(matrix_clones_presence_significant_time24[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time24)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time24)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time24_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
results_time24[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
#clone_status<-matrix_clones_presence_significant_time24[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time24)[i],
# gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time24.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],plots[[5]],plots[[6]], plots[[7]],
plots[[8]],plots[[9]],plots[[10]],plots[[11]],plots[[12]],plots[[13]],plots[[14]],
plots[[15]],plots[[16]],plots[[17]],plots[[18]],plots[[19]],plots[[20]],plots[[21]],ncol=5)
dev.off()
p_value_24[which(p_value_24<0.05)]
names(results_time24)<-rownames(matrix_clones_presence_significant_time24)
cat(capture.output(print(results_time24), file="clones_fisher_time24.txt"))
write.csv(names(results_time24),file="clones_results_time24.csv")
###Are IGHV3-23 over-repressented among the shared clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,names(results_time24))
table(is.na(id)==F) ##268 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#49 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(49, 268, 297829, 326578), nrow = 2)) #p-value = 2.8*10-30
chisq.test(counts)
###############
## Find if there is something longitudinally
##############
##Consider only this that pass the filters
reads_clones_annot_gDNA_qc<-reads_clones_annot[which(reads_clones_annot$clones_gDNA>100),]
clone_type_gDNA_df_no8_qc<-clone_type_gDNA_df_no8[na.omit(match(reads_clones_annot_gDNA_qc$specimen_id,rownames(clone_type_gDNA_df_no8))),]
clone_type_gDNA_num_reduced_no8_qc<-clone_type_gDNA_num_reduced_no8[na.omit(match(reads_clones_annot_gDNA_qc$specimen_id,rownames(clone_type_gDNA_num_reduced_no8))),]
##Obtain the persistance by sample
##Each sample need to have at least two time points and we count the persistance across time points
###Delete the time 9
clone_type_gDNA_df_no8_qc_notime9<-clone_type_gDNA_df_no8_qc[which(rownames(clone_type_gDNA_df_no8_qc)!="7_S63"),]
clone_type_gDNA_num_reduced_no8_qc_notime9<-clone_type_gDNA_num_reduced_no8_qc[which(rownames(clone_type_gDNA_num_reduced_no8_qc)!="7_S63"),]
###To measure which individuals have clones across data points
sample<-unique(clone_type_gDNA_df_no8_qc$individual_id)
#filter by the ones who has three time points to study the persistance
sample_filter<-sample[-c(5,7)]
persistance<-list()
for (i in 1:length(sample_filter)){
print(i)
clone_matrix_sample<-clone_type_gDNA_num_reduced_no8_qc[which(clone_type_gDNA_df_no8_qc$individual_id==sample_filter[i]),]
if(dim(clone_matrix_sample)[1]==3){
time0<-clone_matrix_sample[1,which(clone_matrix_sample[1,]!=0)]
time6<-clone_matrix_sample[2,which(clone_matrix_sample[2,]!=0)]
time24<-clone_matrix_sample[3,which(clone_matrix_sample[3,]!=0)]
persistance[[i]]<-intersect(names(time0),names(time6))
persistance[[i]]<-unique(c(persistance[[i]],intersect(names(time6),names(time24))))
persistance[[i]]<-unique(c(persistance[[i]],intersect(names(time0),names(time24))))
} else {
time0<-clone_matrix_sample[1,which(clone_matrix_sample[1,]!=0)]
time6<-clone_matrix_sample[2,which(clone_matrix_sample[2,]!=0)]
persistance[[i]]<-intersect(names(time0),names(time6))
}
}
names(persistance)<-sample_filter
###Are IGHV3-23 over-repressented among the persistence clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,unlist(persistance))
table(is.na(id)==F) ##3205 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#397 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(397, 2808, 297829, 326578), nrow = 2)) #p-value < 2.2*10-16
chisq.test(counts)
###Number of clones that persist
persistance_number<-NULL
for (i in 1:length(persistance)){
persistance_number[i]<-length(persistance[[i]])
}
clinical_outcome<-c("NP","NP","NP","NP","NP","NP","NP","PNR","PNR","PNR","PNR","PNR","PNR","PNR","PNR","PNR",
"PR","PR","PR","PR","PR","PR","PR")
COLOR=c("chartreuse4", "dodgerblue3","darkorange2")
summary(glm(persistance_number~clinical_outcome))
tiff("Boxplot_number_persistance_clones.tiff",res=300,h=1500,w=1000)
boxplot(persistance_number~clinical_outcome,col=c("chartreuse4", "dodgerblue3","darkorange2"),ylab="Number of persistence clones")
dev.off()
##plotting persistence
#For this analysis we are putting a cut-off on clones because v-genes can be biased at low clonality
id<-match(data_gDNA_long$specimen_label,reads_clones_annot_gDNA_qc$specimen_id)
data_gDNA_long_qc<-data_gDNA_long[which(is.na(id)==F),]
data_gDNA_long_qc$specimen_label<-factor(data_gDNA_long_qc$specimen_label)
clone_df<-data.frame(table(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,data_gDNA_long_qc$specimen_label))
colnames(clone_df)<-c("clone","specimen","count")
colnames(reads_clones_annot)[4]<-c("specimen")
clone_merge<-merge(clone_df,reads_clones_annot[,c(1,3,4,6)],by = "specimen")
clone_df_noceros = clone_merge[which(clone_merge$count!=0),] #120,574
unique_clones<-unique(unlist(persistance))
id<-NULL
for (i in 1:length(unique_clones)){
print(i)
id<-c(id,grep(unique_clones[i],clone_df_noceros$clone))
}
clone_df_persistace<-clone_df_noceros[id,]
clone_df_persistace$time<-replace(clone_df_persistace$time,clone_df_persistace$time=="32","24")
clone_df_persistace$time<-as.numeric(as.character(clone_df_persistace$time))
g1<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="NP"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
g2<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="PNR"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
g3<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="PR"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
tiff("Clonal_persistant_gDNA_long.tiff",res=300,h=1700,w=2700)
multiplot(g1,g2,g3)
dev.off()
###Expanded clones
expansion_NP<-clone_df_persistace$count[which(clone_df_persistace$clin=="NP")]
expansion_PNR<-clone_df_persistace$count[which(clone_df_persistace$clin=="PNR")]
expansion_PR<-clone_df_persistace$count[which(clone_df_persistace$clin=="PR")]
expansion<-c(expansion_NP,expansion_PNR,expansion_PR)
clinical_outcome_expansion<-c(rep("NP",length(expansion_NP)),rep("PNR",length(expansion_PNR)),rep("PR",length(expansion_PR)))
summary(glm(expansion~clinical_outcome_expansion))
COLOR=c("chartreuse4", "dodgerblue3","darkorange2")
summary(glm(persistance_number~clinical_outcome))
tiff("Boxplot_expansion_persistance_clones.tiff",res=300,h=1500,w=1000)
boxplot(expansion~clinical_outcome_expansion,col=c("chartreuse4", "dodgerblue3","darkorange2"),ylab="Number of persistence clones")
dev.off()
##Common clones that persist
library (plyr)
persistance_df <- ldply (persistance, data.frame)
colnames(persistance_df)<-c("Individual","clone")
persistance_df$clin<-c(rep("NP",55),rep("PNR",75),rep("PR",133))
table(duplicated(persistance_df$clone))
colnames(persistance_df)<-c("individual_id","V_J_lenghCDR3_CloneId","clin")
clones<-unique(as.character(persistance_df[which(duplicated(persistance_df$V_J_lenghCDR3_CloneId)==T),"V_J_lenghCDR3_CloneId"]))
clones_list_shared<-list()
for(i in 1:length(clones)){
clones_list_shared[[i]]<-persistance_df[grep(clones[i],persistance_df$V_J_lenghCDR3_CloneId),]
}
###Are IGHV3-23 over-repressented among the shared persistence clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,clones)
table(is.na(id)==F) ##1051 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#140 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(140, 1051, 297829, 326578), nrow = 2)) #p-value < 2.2*10-16
chisq.test(counts)
###Shared sequences before and after transplantation
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,clones)
table(is.na(id)==F) ##1051 sequences belong to a persistnece clone
shared<-data_gDNA_long_qc[which(is.na(id)==F),c("V_J_lenghCDR3_CloneId","time","clin")]
#NP
shared_NP<-shared[which(shared$clin=="NP"),]
pre<-table(shared_NP[which(shared_NP$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_NP[which(shared_NP$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="NP" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="NP" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
#PNR
shared_PNR<-shared[which(shared$clin=="PNR"),]
pre<-table(shared_PNR[which(shared_PNR$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_PNR[which(shared_PNR$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PNR" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PNR" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
#PR
shared_PR<-shared[which(shared$clin=="PR"),]
pre<-table(shared_PR[which(shared_PR$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_PR[which(shared_PR$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PR" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PR" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
clones_list_shared_df<-do.call(rbind.data.frame, clones_list_shared)
clones_list_shared_df_cdr3aa<-merge(clones_list_shared_df,data_gDNA_long_qc[,c("individual_id","V_J_lenghCDR3_CloneId","clin","cdr3_seq_aa_q")],by=c("individual_id","V_J_lenghCDR3_CloneId","clin"))
clones_list_shared_df_cdr3aa_unique<-unique(clones_list_shared_df_cdr3aa)
clones_list_shared_df_cdr3aa_order<-clones_list_shared_df_cdr3aa_unique[order(clones_list_shared_df_cdr3aa_unique$V_J_lenghCDR3_CloneId),]
write.csv(clones_list_shared_df_cdr3aa_order,"clones_persistence_table3.csv")
|
/Clone_type_Analysis_gDNA.R
|
no_license
|
silviapineda/VDJ
|
R
| false
| false
| 18,528
|
r
|
rm(list = ls(all = TRUE))
x<-date()
print(x)
###########################################################################################
### PROJECT: Immune Repertoire. Analysis B cells antibodies
###
### CITATION:
###
### PROCESS:
###
### DESCRIP: Analysis of the VDJ data
###
###
### Author: Silvia Pineda
### Date: November, 2016
############################################################################################
library("ggplot2")
library(lme4)
library("RColorBrewer")
library("randomForest")
library("VSURF")
library(pheatmap)
library(vcd)
library(gridGraphics)
library(gridExtra)
##################
### Analysis in the clone data matrix
#################
setwd("/Users/Pinedasans/VDJ/ResultsAllClones//")
load("~/VDJ/Data/clones_gDNA.Rdata")
###Clones individual analysis across outcome and time points
##Delete Individual 8
clone_type_gDNA_df_no8<-clone_type_gDNA_df[which(clone_type_gDNA_df$individual_id!="Individual8"),]
id_sample<-match(rownames(clone_type_gDNA_df_no8),rownames(clone_type_gDNA_num_reduced))
clone_type_gDNA_num_reduced_no8<-clone_type_gDNA_num_reduced[id_sample,]
###PCA Analysis
pca<-prcomp(clone_type_gDNA_num_reduced_no8[which(clone_type_gDNA_df_no8$time==24),])
plot(pca, type = "l")
SPP <- clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)]
COLOR <- c("chartreuse4", "dodgerblue3","darkorange2")
pc <- c(1,2)
plot(pca$x[,pc[1]], pca$x[,pc[2]], col=COLOR[SPP],pch=20,xlab="PCA1",ylab="PCA2")
legend(-110,-100, legend=levels(levels.SPP), col=COLOR,pch=20,cex=0.8)
#############
### Analysis 1: present/no-present
############
matrix_clones_presence<-apply(clone_type_gDNA_num_reduced_no8,1,function(x) ifelse(x==0,"No Present","Present"))
clone_type_gDNA_df_no8$time<-replace(clone_type_gDNA_df_no8$time,clone_type_gDNA_df_no8$time==12,6)
matrix_clones_presence_time0<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==0)]
matrix_clones_presence_time6<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==6)]
matrix_clones_presence_time24<-matrix_clones_presence[,which(clone_type_gDNA_df_no8$time==24)]
p_value_0=NULL
p_value_6=NULL
p_value_24=NULL
for(i in 1:dim(matrix_clones_presence_time0)[1]){
print(i)
tab<-table(matrix_clones_presence_time0[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)])
if(dim(tab)[1]>1){
p_value_0[i]=fisher.test(tab)$p.value
}
tab<-table(matrix_clones_presence_time6[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)])
if(dim(tab)[1]>1){
p_value_6[i]=fisher.test(tab)$p.value
}
tab<-table(matrix_clones_presence_time24[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)])
if(dim(tab)[1]>1){
p_value_24[i]=fisher.test(tab)$p.value
}
}
splitpop<-strsplit(rownames(matrix_clones_presence_time24),"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
counts = (matrix(data = c(16, 66, 10348, 107875), nrow = 2)) #p-value = 0.5
chisq.test(counts)
##time0
matrix_clones_presence_significant_time0<-matrix_clones_presence_time0[which(p_value_0<0.05),] #8
results_time0<-list()
results_time0_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time0)[1]){
tab<-table(matrix_clones_presence_significant_time0[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)])
id<-match(names(which(matrix_clones_presence_significant_time0[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time0)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time0)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time0_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
results_time0[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
#clone_status<-matrix_clones_presence_significant_time0[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==0)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time0)[i], gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time0.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],plots[[5]],plots[[6]], plots[[7]],
plots[[8]],ncol=4)
dev.off()
p_value_0[which(p_value_0<0.05)]
names(results_time0)<-rownames(matrix_clones_presence_significant_time0)
cat(capture.output(print(results_time0), file="clones_fisher_time0.txt"))
##time6
matrix_clones_presence_significant_time6<-matrix_clones_presence_time6[which(p_value_6<0.05),] #4
results_time6<-list()
results_time6_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time6)[1]){
tab<-table(matrix_clones_presence_significant_time6[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)])
id<-match(names(which(matrix_clones_presence_significant_time6[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time6)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time6)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time6[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
results_time6_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
#clone_status<-matrix_clones_presence_significant_time6[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==6)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time6)[i],
# gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time6.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],ncol=4)
dev.off()
p_value_6[which(p_value_6<0.05)]
names(results_time6)<-rownames(matrix_clones_presence_significant_time6)
cat(capture.output(print(results_time6), file="clones_fisher_time6.txt"))
write.csv(names(results_time6),file="clones_results_time6.csv")
##time24
matrix_clones_presence_significant_time24<-matrix_clones_presence_time24[which(p_value_24<0.05),] #21
results_time24<-list()
results_time24_cdr3<-list()
plots<-list()
for(i in 1:dim(matrix_clones_presence_significant_time24)[1]){
tab<-table(matrix_clones_presence_significant_time24[i,],clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)])
id<-match(names(which(matrix_clones_presence_significant_time24[i,]=="Present")),rownames(clone_type_gDNA_df_no8))
id_clone<-match(rownames(matrix_clones_presence_significant_time24)[i],colnames(clone_type_gDNA_df_no8))
id_clone2<-grep(rownames(matrix_clones_presence_significant_time24)[i],data_gDNA_long$V_J_lenghCDR3_CloneId)
results_time24_cdr3[[i]]<-unique(as.character(data_gDNA_long[id_clone2,"cdr3_seq_aa_q"]))
results_time24[[i]]<-clone_type_gDNA_df_no8[id,c(1:3,id_clone)]
#clone_status<-matrix_clones_presence_significant_time24[i,]
#clinical_outcome<-clone_type_gDNA_df_no8$clin[which(clone_type_gDNA_df_no8$time==24)]
#tab_str<-structable(clone_status~clinical_outcome)
#mosaic(tab_str,shade=T,main= rownames(matrix_clones_presence_significant_time24)[i],
# gp = shading_hcl, gp_args = list(interpolate = c(1, 1.8)))
#plots[[i]]<-grid.grab()
}
grid.newpage()
tiff("mosaicplot_time24.tiff",res=300,h=5500,w=5500)
grid.arrange(plots[[1]],plots[[2]],plots[[3]],plots[[4]],plots[[5]],plots[[6]], plots[[7]],
plots[[8]],plots[[9]],plots[[10]],plots[[11]],plots[[12]],plots[[13]],plots[[14]],
plots[[15]],plots[[16]],plots[[17]],plots[[18]],plots[[19]],plots[[20]],plots[[21]],ncol=5)
dev.off()
p_value_24[which(p_value_24<0.05)]
names(results_time24)<-rownames(matrix_clones_presence_significant_time24)
cat(capture.output(print(results_time24), file="clones_fisher_time24.txt"))
write.csv(names(results_time24),file="clones_results_time24.csv")
###Are IGHV3-23 over-repressented among the shared clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,names(results_time24))
table(is.na(id)==F) ##268 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#49 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(49, 268, 297829, 326578), nrow = 2)) #p-value = 2.8*10-30
chisq.test(counts)
###############
## Find if there is something longitudinally
##############
##Consider only this that pass the filters
reads_clones_annot_gDNA_qc<-reads_clones_annot[which(reads_clones_annot$clones_gDNA>100),]
clone_type_gDNA_df_no8_qc<-clone_type_gDNA_df_no8[na.omit(match(reads_clones_annot_gDNA_qc$specimen_id,rownames(clone_type_gDNA_df_no8))),]
clone_type_gDNA_num_reduced_no8_qc<-clone_type_gDNA_num_reduced_no8[na.omit(match(reads_clones_annot_gDNA_qc$specimen_id,rownames(clone_type_gDNA_num_reduced_no8))),]
##Obtain the persistance by sample
##Each sample need to have at least two time points and we count the persistance across time points
###Delete the time 9
clone_type_gDNA_df_no8_qc_notime9<-clone_type_gDNA_df_no8_qc[which(rownames(clone_type_gDNA_df_no8_qc)!="7_S63"),]
clone_type_gDNA_num_reduced_no8_qc_notime9<-clone_type_gDNA_num_reduced_no8_qc[which(rownames(clone_type_gDNA_num_reduced_no8_qc)!="7_S63"),]
###To measure which individuals have clones across data points
sample<-unique(clone_type_gDNA_df_no8_qc$individual_id)
#filter by the ones who has three time points to study the persistance
sample_filter<-sample[-c(5,7)]
persistance<-list()
for (i in 1:length(sample_filter)){
print(i)
clone_matrix_sample<-clone_type_gDNA_num_reduced_no8_qc[which(clone_type_gDNA_df_no8_qc$individual_id==sample_filter[i]),]
if(dim(clone_matrix_sample)[1]==3){
time0<-clone_matrix_sample[1,which(clone_matrix_sample[1,]!=0)]
time6<-clone_matrix_sample[2,which(clone_matrix_sample[2,]!=0)]
time24<-clone_matrix_sample[3,which(clone_matrix_sample[3,]!=0)]
persistance[[i]]<-intersect(names(time0),names(time6))
persistance[[i]]<-unique(c(persistance[[i]],intersect(names(time6),names(time24))))
persistance[[i]]<-unique(c(persistance[[i]],intersect(names(time0),names(time24))))
} else {
time0<-clone_matrix_sample[1,which(clone_matrix_sample[1,]!=0)]
time6<-clone_matrix_sample[2,which(clone_matrix_sample[2,]!=0)]
persistance[[i]]<-intersect(names(time0),names(time6))
}
}
names(persistance)<-sample_filter
###Are IGHV3-23 over-repressented among the persistence clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,unlist(persistance))
table(is.na(id)==F) ##3205 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#397 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(397, 2808, 297829, 326578), nrow = 2)) #p-value < 2.2*10-16
chisq.test(counts)
###Number of clones that persist
persistance_number<-NULL
for (i in 1:length(persistance)){
persistance_number[i]<-length(persistance[[i]])
}
clinical_outcome<-c("NP","NP","NP","NP","NP","NP","NP","PNR","PNR","PNR","PNR","PNR","PNR","PNR","PNR","PNR",
"PR","PR","PR","PR","PR","PR","PR")
COLOR=c("chartreuse4", "dodgerblue3","darkorange2")
summary(glm(persistance_number~clinical_outcome))
tiff("Boxplot_number_persistance_clones.tiff",res=300,h=1500,w=1000)
boxplot(persistance_number~clinical_outcome,col=c("chartreuse4", "dodgerblue3","darkorange2"),ylab="Number of persistence clones")
dev.off()
##plotting persistence
#For this analysis we are putting a cut-off on clones because v-genes can be biased at low clonality
id<-match(data_gDNA_long$specimen_label,reads_clones_annot_gDNA_qc$specimen_id)
data_gDNA_long_qc<-data_gDNA_long[which(is.na(id)==F),]
data_gDNA_long_qc$specimen_label<-factor(data_gDNA_long_qc$specimen_label)
clone_df<-data.frame(table(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,data_gDNA_long_qc$specimen_label))
colnames(clone_df)<-c("clone","specimen","count")
colnames(reads_clones_annot)[4]<-c("specimen")
clone_merge<-merge(clone_df,reads_clones_annot[,c(1,3,4,6)],by = "specimen")
clone_df_noceros = clone_merge[which(clone_merge$count!=0),] #120,574
unique_clones<-unique(unlist(persistance))
id<-NULL
for (i in 1:length(unique_clones)){
print(i)
id<-c(id,grep(unique_clones[i],clone_df_noceros$clone))
}
clone_df_persistace<-clone_df_noceros[id,]
clone_df_persistace$time<-replace(clone_df_persistace$time,clone_df_persistace$time=="32","24")
clone_df_persistace$time<-as.numeric(as.character(clone_df_persistace$time))
g1<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="NP"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
g2<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="PNR"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
g3<-ggplot(clone_df_persistace[which(clone_df_persistace$clin=="PR"),], aes(x=time,y=count,group=clone,fill=clone)) +
scale_x_continuous(breaks = c(0,6,12,24)) + scale_y_continuous(limits = c(0,200)) + geom_area(aes(fill=clone)) + theme(legend.position="none") +
facet_grid(clin ~ Individual.id) + labs(x = "time", y = "Clonal persistence")
tiff("Clonal_persistant_gDNA_long.tiff",res=300,h=1700,w=2700)
multiplot(g1,g2,g3)
dev.off()
###Expanded clones
expansion_NP<-clone_df_persistace$count[which(clone_df_persistace$clin=="NP")]
expansion_PNR<-clone_df_persistace$count[which(clone_df_persistace$clin=="PNR")]
expansion_PR<-clone_df_persistace$count[which(clone_df_persistace$clin=="PR")]
expansion<-c(expansion_NP,expansion_PNR,expansion_PR)
clinical_outcome_expansion<-c(rep("NP",length(expansion_NP)),rep("PNR",length(expansion_PNR)),rep("PR",length(expansion_PR)))
summary(glm(expansion~clinical_outcome_expansion))
COLOR=c("chartreuse4", "dodgerblue3","darkorange2")
summary(glm(persistance_number~clinical_outcome))
tiff("Boxplot_expansion_persistance_clones.tiff",res=300,h=1500,w=1000)
boxplot(expansion~clinical_outcome_expansion,col=c("chartreuse4", "dodgerblue3","darkorange2"),ylab="Number of persistence clones")
dev.off()
##Common clones that persist
library (plyr)
persistance_df <- ldply (persistance, data.frame)
colnames(persistance_df)<-c("Individual","clone")
persistance_df$clin<-c(rep("NP",55),rep("PNR",75),rep("PR",133))
table(duplicated(persistance_df$clone))
colnames(persistance_df)<-c("individual_id","V_J_lenghCDR3_CloneId","clin")
clones<-unique(as.character(persistance_df[which(duplicated(persistance_df$V_J_lenghCDR3_CloneId)==T),"V_J_lenghCDR3_CloneId"]))
clones_list_shared<-list()
for(i in 1:length(clones)){
clones_list_shared[[i]]<-persistance_df[grep(clones[i],persistance_df$V_J_lenghCDR3_CloneId),]
}
###Are IGHV3-23 over-repressented among the shared persistence clones??
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,clones)
table(is.na(id)==F) ##1051 sequences belong to a persistnece clone
data_gDNA_long_qc[which(is.na(id)==F),"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[which(is.na(id)==F),"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#140 sequence are IGHV3-23
##Total
data_gDNA_long_qc[,"v_gene"]
splitpop<-strsplit(data_gDNA_long_qc[,"v_gene"],"_")
xx<-unlist(lapply(splitpop, `[[`, 1))
table(xx)#28750 sequence are IGHV3-23
counts = (matrix(data = c(140, 1051, 297829, 326578), nrow = 2)) #p-value < 2.2*10-16
chisq.test(counts)
###Shared sequences before and after transplantation
id<-match(data_gDNA_long_qc$V_J_lenghCDR3_CloneId,clones)
table(is.na(id)==F) ##1051 sequences belong to a persistnece clone
shared<-data_gDNA_long_qc[which(is.na(id)==F),c("V_J_lenghCDR3_CloneId","time","clin")]
#NP
shared_NP<-shared[which(shared$clin=="NP"),]
pre<-table(shared_NP[which(shared_NP$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_NP[which(shared_NP$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="NP" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="NP" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
#PNR
shared_PNR<-shared[which(shared$clin=="PNR"),]
pre<-table(shared_PNR[which(shared_PNR$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_PNR[which(shared_PNR$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PNR" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PNR" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
#PR
shared_PR<-shared[which(shared$clin=="PR"),]
pre<-table(shared_PR[which(shared_PR$time==0),"V_J_lenghCDR3_CloneId"])
post<-table(shared_PR[which(shared_PR$time>0),"V_J_lenghCDR3_CloneId"])
freqpre<-pre/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PR" & data_gDNA_long_qc$time==0),])[1]
sum(freqpre)
freqpost<-post/dim(data_gDNA_long_qc[which(data_gDNA_long_qc$clin=="PR" & data_gDNA_long_qc$time>0),])[1]
sum(freqpost)
clones_list_shared_df<-do.call(rbind.data.frame, clones_list_shared)
clones_list_shared_df_cdr3aa<-merge(clones_list_shared_df,data_gDNA_long_qc[,c("individual_id","V_J_lenghCDR3_CloneId","clin","cdr3_seq_aa_q")],by=c("individual_id","V_J_lenghCDR3_CloneId","clin"))
clones_list_shared_df_cdr3aa_unique<-unique(clones_list_shared_df_cdr3aa)
clones_list_shared_df_cdr3aa_order<-clones_list_shared_df_cdr3aa_unique[order(clones_list_shared_df_cdr3aa_unique$V_J_lenghCDR3_CloneId),]
write.csv(clones_list_shared_df_cdr3aa_order,"clones_persistence_table3.csv")
|
#' Get coordinates (i.e., row, column, (layer)) of an element of a matrix or
#' array element given by the index.
#'
#' This function is an inverse function to [index()].
#'
#' @param m A matrix or an array whose element coordinates have to be computed
#' @param index The index of the element.
#' @return For matrix, it returns a vector of two values (row, column). For
#' array, a vector of three values is returned (row, column, layer).
#' @seealso [index()]
#' @export
point <- function(m, index, ...) {
UseMethod('point')
}
#' @rdname point
#' @export
point.matrix <- function(m, index, ...) {
assert_that(is.hexmatrix(m))
assert_that(is.index(m, index))
c((index - 1) %% nrow(m) + 1,
((index - 1) %/% nrow(m)) + 1)
}
#' @rdname point
#' @export
point.array <- function(m, index, ...) {
assert_that(is.hexarray(m))
assert_that(is.index(m, index))
rowcol <- nrow(m) * ncol(m)
c((index - 1) %% nrow(m) + 1,
((index - 1) %% rowcol) %/% nrow(m) + 1,
(index - 1) %/% rowcol + 1)
}
#' @rdname point
#' @export
is.point <- function(m, point) {
UseMethod('is.point')
}
#' @rdname point
#' @export
is.point.matrix <- function(m, point) {
is.vector(point) &&
length(point) == 2 &&
is.count(point[1]) &&
is.count(point[2]) &&
point[1] <= nrow(m) &&
point[2] <= ncol(m)
}
#' @rdname point
#' @export
is.point.array <- function(m, point) {
is.vector(point) &&
length(point) == 3 &&
is.count(point[1]) &&
is.count(point[2]) &&
is.count(point[3]) &&
point[1] <= nrow(m) &&
point[2] <= ncol(m) &&
point[3] <= nlayer(m)
}
|
/R/point.R
|
no_license
|
beerda/hexmatrix
|
R
| false
| false
| 1,608
|
r
|
#' Get coordinates (i.e., row, column, (layer)) of an element of a matrix or
#' array element given by the index.
#'
#' This function is an inverse function to [index()].
#'
#' @param m A matrix or an array whose element coordinates have to be computed
#' @param index The index of the element.
#' @return For matrix, it returns a vector of two values (row, column). For
#' array, a vector of three values is returned (row, column, layer).
#' @seealso [index()]
#' @export
point <- function(m, index, ...) {
UseMethod('point')
}
#' @rdname point
#' @export
point.matrix <- function(m, index, ...) {
assert_that(is.hexmatrix(m))
assert_that(is.index(m, index))
c((index - 1) %% nrow(m) + 1,
((index - 1) %/% nrow(m)) + 1)
}
#' @rdname point
#' @export
point.array <- function(m, index, ...) {
assert_that(is.hexarray(m))
assert_that(is.index(m, index))
rowcol <- nrow(m) * ncol(m)
c((index - 1) %% nrow(m) + 1,
((index - 1) %% rowcol) %/% nrow(m) + 1,
(index - 1) %/% rowcol + 1)
}
#' @rdname point
#' @export
is.point <- function(m, point) {
UseMethod('is.point')
}
#' @rdname point
#' @export
is.point.matrix <- function(m, point) {
is.vector(point) &&
length(point) == 2 &&
is.count(point[1]) &&
is.count(point[2]) &&
point[1] <= nrow(m) &&
point[2] <= ncol(m)
}
#' @rdname point
#' @export
is.point.array <- function(m, point) {
is.vector(point) &&
length(point) == 3 &&
is.count(point[1]) &&
is.count(point[2]) &&
is.count(point[3]) &&
point[1] <= nrow(m) &&
point[2] <= ncol(m) &&
point[3] <= nlayer(m)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{word_scr}
\alias{word_scr}
\alias{word_stacfis}
\title{Creates an R Markdown Word NAFO-formatted document}
\usage{
word_scr(...)
word_stacfis(...)
}
\arguments{
\item{...}{arguments to pass to \code{\link[bookdown:word_document2]{bookdown::word_document2()}}}
}
\value{
A Word Document based on the NAFO SCR word template.
}
\description{
This is a function called in output in the YAML of the driver Rmd file
to specify the creation of a Microsoft Word version of an SCR or a
STACFIC report.
}
|
/man/word_scr.Rd
|
permissive
|
ursusbob/NAFOdown
|
R
| false
| true
| 588
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{word_scr}
\alias{word_scr}
\alias{word_stacfis}
\title{Creates an R Markdown Word NAFO-formatted document}
\usage{
word_scr(...)
word_stacfis(...)
}
\arguments{
\item{...}{arguments to pass to \code{\link[bookdown:word_document2]{bookdown::word_document2()}}}
}
\value{
A Word Document based on the NAFO SCR word template.
}
\description{
This is a function called in output in the YAML of the driver Rmd file
to specify the creation of a Microsoft Word version of an SCR or a
STACFIC report.
}
|
library(testthat)
context("listSpecialSets")
test_that("lists", {
expect_silent(listSpecialSets(T))
expect_silent(listSpecialSets(F))
})
|
/tests/testthat/test_misc_listspecialset.R
|
permissive
|
xoopR/set6
|
R
| false
| false
| 143
|
r
|
library(testthat)
context("listSpecialSets")
test_that("lists", {
expect_silent(listSpecialSets(T))
expect_silent(listSpecialSets(F))
})
|
#' urlr
#'
#' Convert selected text into a url
#'
#' @return a markdown image link
#' @export
#' @importFrom rstudioapi getSourceEditorContext modifyRange
#'
urlr <- function() {
adc <- rstudioapi::getSourceEditorContext()
before_last_space <- '(\\s)(?!.*\\s)(.*?)$'
after_last_space <- '^(.*?)(\\s)(?!.*\\s)'
txt <- adc$selection[[1]]$text
txt_last <- gsub(after_last_space,'',txt, perl = TRUE)
txt_last <- ifelse(is_link(txt_last),txt_last,"# Error : selection is not an image link")
content <- switch(as.character(strc(txt = txt)),
'0' = '[]()' ,
'-1' = {
if(is_link(txt_last)){
sprintf('(%s)[%s]',txt_last,txt_last)
}else{
sprintf('()[%s]',txt_last,txt_last)
}
},
sprintf('(%s)[%s]',gsub(before_last_space,'',txt, perl = TRUE),txt_last)
)
rstudioapi::modifyRange(location = adc$selection[[1]]$range, text = content, id = adc$id)
}
|
/R/urlr.R
|
no_license
|
aoles/remedy
|
R
| false
| false
| 1,069
|
r
|
#' urlr
#'
#' Convert selected text into a url
#'
#' @return a markdown image link
#' @export
#' @importFrom rstudioapi getSourceEditorContext modifyRange
#'
urlr <- function() {
adc <- rstudioapi::getSourceEditorContext()
before_last_space <- '(\\s)(?!.*\\s)(.*?)$'
after_last_space <- '^(.*?)(\\s)(?!.*\\s)'
txt <- adc$selection[[1]]$text
txt_last <- gsub(after_last_space,'',txt, perl = TRUE)
txt_last <- ifelse(is_link(txt_last),txt_last,"# Error : selection is not an image link")
content <- switch(as.character(strc(txt = txt)),
'0' = '[]()' ,
'-1' = {
if(is_link(txt_last)){
sprintf('(%s)[%s]',txt_last,txt_last)
}else{
sprintf('()[%s]',txt_last,txt_last)
}
},
sprintf('(%s)[%s]',gsub(before_last_space,'',txt, perl = TRUE),txt_last)
)
rstudioapi::modifyRange(location = adc$selection[[1]]$range, text = content, id = adc$id)
}
|
#this function calculate the null distribution of colonization timing
colnllcal <- function(states,pars,tot,dt) {
func0 <- function (time,state1,par) {
with(as.list(c(state1,par)), {
sB <- par[1]
sA <- par[2]
sAB <- par[3]
xB <- par[4]
xA <- par[5]
dB <- par[6]
dA <- par[7]
deA <- -(sA+dA+xA)*eA+xA+dA*eAB+sA*eA*eA
deB <- -(sB+dB+xB)*eB+xB+dB*eAB+sB*eB*eB
deAB <- -(sA+sB+sAB+xA+xB)*eAB+xA*eB+xB*eA+sA*eAB*eA+sB*eAB*eB+sAB*eA*eB
dnA <- -(sA+dA+xA)*nA+dA*nAB+2*sA*nA*eA
dnB <- -(sB+dB+xB)*nB+0+2*sB*nB*eB
dnAB <- -(sA+sB+sAB+xA+xB)*nAB+xA*nB+xB*nA+sA*(eA*nAB+eAB*nA)+sB*(eB*nAB+eAB*nB)+sAB*(eA*nB+eB*nA)
return(list(c(dnA,dnB,dnAB,deA,deB,deAB)))
})
}
func <- function (time,state1,par) {
with(as.list(c(state1,par)), {
sB <- par[1]
sA <- par[2]
sAB <- par[3]
xB <- par[4]
xA <- par[5]
dB <- par[6]
dA <- par[7]
deA <- -(sA+dA+xA)*eA+xA+dA*eAB+sA*eA*eA
deB <- -(sB+dB+xB)*eB+xB+dB*eAB+sB*eB*eB
deAB <- -(sA+sB+sAB+xA+xB)*eAB+xA*eB+xB*eA+sA*eAB*eA+sB*eAB*eB+sAB*eA*eB
dnA <- -(sA+dA+xA)*nA+dA*nAB+2*sA*nA*eA
dnB <- -(sB+dB+xB)*nB+dB*nAB+2*sB*nB*eB
dnAB <- -(sA+sB+sAB+xA+xB)*nAB+xA*nB+xB*nA+sA*(eA*nAB+eAB*nA)+sB*(eB*nAB+eAB*nB)+sAB*(eA*nB+eB*nA)
return(list(c(dnA,dnB,dnAB,deA,deB,deAB)))
})
}
dtcal2 <- function (state,pars,dt,t) {
Tt <- trunc(t/dt)
state1 <- matrix(NA,Tt+1,6)
tmp <- ode(c(nA=state[1],nB=state[2],nAB=state[3],eA=state[4],eB=state[5],eAB=state[6]),c(0,t),func,pars)
state1[1,] <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
for (i in 1:Tt) {
tmp <- ode(c(nA=state[1],nB=state[2],nAB=state[3],eA=state[4],eB=state[5],eAB=state[6]),c(0,i*dt),func0,pars)
state2 <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
tmp <- ode(c(nA=state2[1],nB=state2[2],nAB=state2[3],eA=state2[4],eB=state2[5],eAB=state2[6]),c(i*dt,t),func,pars)
state1[i+1,] <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
}
list(state1)
}
RES <- dtcal2(states,pars,dt,tot)
prob <- RES[[1]][,2]*RES[[1]][,2]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])+RES[[1]][,1]*RES[[1]][,1]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])+RES[[1]][,3]*RES[[1]][,3]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])
prob <- prob[-length(prob)]-prob[-1]
prob[prob<0] <- 0
prob <- prob/sum(prob)
}
|
/colnllcal.R
|
no_license
|
huaxia1985/TestingConstantSpeciesAddition
|
R
| false
| false
| 2,410
|
r
|
#this function calculate the null distribution of colonization timing
colnllcal <- function(states,pars,tot,dt) {
func0 <- function (time,state1,par) {
with(as.list(c(state1,par)), {
sB <- par[1]
sA <- par[2]
sAB <- par[3]
xB <- par[4]
xA <- par[5]
dB <- par[6]
dA <- par[7]
deA <- -(sA+dA+xA)*eA+xA+dA*eAB+sA*eA*eA
deB <- -(sB+dB+xB)*eB+xB+dB*eAB+sB*eB*eB
deAB <- -(sA+sB+sAB+xA+xB)*eAB+xA*eB+xB*eA+sA*eAB*eA+sB*eAB*eB+sAB*eA*eB
dnA <- -(sA+dA+xA)*nA+dA*nAB+2*sA*nA*eA
dnB <- -(sB+dB+xB)*nB+0+2*sB*nB*eB
dnAB <- -(sA+sB+sAB+xA+xB)*nAB+xA*nB+xB*nA+sA*(eA*nAB+eAB*nA)+sB*(eB*nAB+eAB*nB)+sAB*(eA*nB+eB*nA)
return(list(c(dnA,dnB,dnAB,deA,deB,deAB)))
})
}
func <- function (time,state1,par) {
with(as.list(c(state1,par)), {
sB <- par[1]
sA <- par[2]
sAB <- par[3]
xB <- par[4]
xA <- par[5]
dB <- par[6]
dA <- par[7]
deA <- -(sA+dA+xA)*eA+xA+dA*eAB+sA*eA*eA
deB <- -(sB+dB+xB)*eB+xB+dB*eAB+sB*eB*eB
deAB <- -(sA+sB+sAB+xA+xB)*eAB+xA*eB+xB*eA+sA*eAB*eA+sB*eAB*eB+sAB*eA*eB
dnA <- -(sA+dA+xA)*nA+dA*nAB+2*sA*nA*eA
dnB <- -(sB+dB+xB)*nB+dB*nAB+2*sB*nB*eB
dnAB <- -(sA+sB+sAB+xA+xB)*nAB+xA*nB+xB*nA+sA*(eA*nAB+eAB*nA)+sB*(eB*nAB+eAB*nB)+sAB*(eA*nB+eB*nA)
return(list(c(dnA,dnB,dnAB,deA,deB,deAB)))
})
}
dtcal2 <- function (state,pars,dt,t) {
Tt <- trunc(t/dt)
state1 <- matrix(NA,Tt+1,6)
tmp <- ode(c(nA=state[1],nB=state[2],nAB=state[3],eA=state[4],eB=state[5],eAB=state[6]),c(0,t),func,pars)
state1[1,] <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
for (i in 1:Tt) {
tmp <- ode(c(nA=state[1],nB=state[2],nAB=state[3],eA=state[4],eB=state[5],eAB=state[6]),c(0,i*dt),func0,pars)
state2 <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
tmp <- ode(c(nA=state2[1],nB=state2[2],nAB=state2[3],eA=state2[4],eB=state2[5],eAB=state2[6]),c(i*dt,t),func,pars)
state1[i+1,] <- as.numeric(c(tmp[2,2:4],tmp[2,5:7]))
}
list(state1)
}
RES <- dtcal2(states,pars,dt,tot)
prob <- RES[[1]][,2]*RES[[1]][,2]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])+RES[[1]][,1]*RES[[1]][,1]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])+RES[[1]][,3]*RES[[1]][,3]/(RES[[1]][,1]+RES[[1]][,2]+RES[[1]][,3])
prob <- prob[-length(prob)]-prob[-1]
prob[prob<0] <- 0
prob <- prob/sum(prob)
}
|
## 1. Explore the features of the gamma distribution
## 1a. Graph the probability density function for the gamma distribution
## with a range of shape values, keeping the rate parameter constant
curve( dgamma( x, 0.5, 1 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 1, 1 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 1 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 4, 1 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 8, 1 ), from=0, to=10, col=5, add=TRUE )
## 1b. Graph the probability density function for the gamma distribution
## with a range of rate values, keeping the shape parameter constant
curve( dgamma( x, 2, 8 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 2, 4 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 2 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 2, 1 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 2, 0.5 ), from=0, to=10, col=5, add=TRUE )
## 1c. Graph the probability density function for the gamma distribution
## with a range of shape values, keeping the mean of the distribution
## constant (mean = shape*rate)
curve( dgamma( x, 0.5, 0.5/4 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 1, 1/4 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 2/4 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 4, 4/4 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 8, 8/4 ), from=0, to=10, col=5, add=TRUE )
## 1d. Graph the cumulative distribution function for the gamma
## distribution with a range of shape values, keeping the rate
## parameter constant
curve( pgamma( x, 0.5, 1 ), from=0, to=10, ylab="cdf" )
curve( pgamma( x, 1, 1 ), from=0, to=10, col=2, add=TRUE )
curve( pgamma( x, 2, 1 ), from=0, to=10, col=3, add=TRUE )
curve( pgamma( x, 4, 1 ), from=0, to=10, col=4, add=TRUE )
curve( pgamma( x, 8, 1 ), from=0, to=10, col=5, add=TRUE )
## 1e. Compute percentiles of a gamma distribution with shape 2
## and rate 1: 5th, 25th, 50th, 75th, and 95th percentiles
qgamma( c(0.05, 0.25, 0.50, 0.75, 0.95), 2, 1 )
## 1f. Simulate 20 random numbers from a gamma distribution
## with shape 0.5 and rate 0.5. Compute the histogram
## of the simulated values. How does the histogram compare
## to the density function? Repeat for 100 random numbers,
## 1000 random numbers, 10,000 random numbers.
par(mfrow=c(2,2))
hist( rgamma( 20, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 100, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 1000, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 10000, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
## 2. Explore the features of the beta distribution
## 2a. Look at the pdf of the beta distribution
## for several sets of parameter values (shape1, shape2):
## (0.5,0.5); (0.5,2); (1,1); (5,2); (4,1); (5,10)
par(mfrow=c(1,1))
curve( dbeta( x, 0.5, 0.5 ), from=0, to=1, ylab="pdf", ylim=c(0,3.5) )
curve( dbeta( x, 0.5, 2 ), from=0, to=1, col=2, add=TRUE )
curve( dbeta( x, 1, 1 ), from=0, to=1, col=3, add=TRUE )
curve( dbeta( x, 5, 2 ), from=0, to=1, col=4, add=TRUE )
curve( dbeta( x, 4, 1 ), from=0, to=1, col=5, add=TRUE )
curve( dbeta( x, 5, 10 ), from=0, to=1, col=6, add=TRUE )
## 2b. Estimate the mean, standard deviation, and skewness
## of a beta distribution based on a random sample
## from a beta distribution
x <- rbeta( 1000, 4, 8 )
mean(x)
sd(x)
skewness <- function(z){
thirdcentralmoment <- mean( ( z - mean(z) )^3 )
secondcentralmoment <- mean( ( z - mean(z) )^2 )
skewness <- thirdcentralmoment / secondcentralmoment^1.5
return( skewness )
}
skewness(x)
## 3. Explore the sample function
## 3a. Randomly choose 5 distinct values from the integers from 1 to 20
sample( 1:20, 5, replace=FALSE )
## 3b. Randomly choose 30 values from the integers from 1 to 20
sample( 1:20, 30, replace=TRUE )
## Bonus - bootstrap
## 3c. Construct a bootstrap estimate for the standard error of
## the sample skewness of a beta distribution with shape parameters
## 4 and 8, based on 30 samples
## 3c1. Generate a sample from the distribution
x <- rbeta( 30, 4, 8 )
## 3c2. Construct many resamples of the data
## This code will generate 1000 new re-samples of the data
## each of sample size 30
xresample <- matrix( sample( x, 1000 * 30, replace=TRUE ), ncol=30 )
## 3c3. Compute the skewness for each resample
xresampleskew <- apply( xresample, 1, skewness )
## 3c4. Look at the bootstrap distribution of the skewness
hist( xresampleskew )
## 3c5. Compute the bootstrap standard error of the skewness
sd( xresampleskew )
## Note: the boot package provides a one-line command for constructing
## bootstrap estimates
## 4. Test out setting random number seeds
## 4a. Set the random number seed the same as your neighbor
## and generate 5 cauchy random variables.
## Check that the 5 number match your neighbor
set.seed(1234)
rcauchy( 5 )
## 4b. Repeat a and show that you get the exact same results
set.seed(1234)
rcauchy( 5 )
## 4c. Generate 5 more and show that they're different if you
## don't reset the random number seed
rcauchy( 5 )
## 4d. Set the random number seed differently from your neighbor
## and compare a histogram of 50 cauchy random variables
## You:
set.seed(1234)
hist( rcauchy( 50 ) )
## Your neighbor:
set.seed(12345)
hist( rcauchy( 50 ) )
## Note: The cauchy distribution has extremely long tails and thus
## produces occasional values that are very high or very low
## 5. Probability Integral Transform and Multivariate
## 5a. Generate a (sub-standard) random sample from the normal
## distribution by generating uniform random variates and
## using the probability integral transform
u <- runif( 1000 )
x <- qnorm( u, 0, 1 )
hist( x )
## 5b. Generate a (sub-standard) random sample from the
## uniform distribution by generating normal random
## variates and tranforming to uniform
x <- rnorm( 1000, 0, 1 )
u <- pnorm( x, 0, 1 )
## 5c. Generate a set of multivariate normal random variates
## with dimenstion 3, that has correlations of:
# x.y: 0.6, x.z: 0.9, y.z: 0.2
means <- c( 0, 0, 0 )
cormat <- matrix( c( 1, 0.6, 0.9,
0.6, 1, 0.2,
0.9, 0.2, 1 ), ncol=3 )
xyz <- MASS::mvrnorm( 1000, means, cormat )
colnames( xyz ) <- c("x","y","z")
pairs( xyz )
cor( xyz )
## 5d. Generate a set of correlated uniform random variates
## by transforming the correlated normal variates.
## What is the correlation now?
uvw <- pnorm( xyz )
colnames( uvw ) <- c("u","v","w")
pairs( uvw )
cor( uvw )
## 5e. Generate a set of correlated exponential random variates
## by transforming the correlated uniforms.
## What is the correlation now?
abc <- data.frame( a=qgamma( uvw[,1], 1, 0.5 ),
b=qgamma( uvw[,2], 2, 0.1 ),
c=qgamma( uvw[,3], 0.5, 2 ) )
pairs( abc )
cor( abc )
## 5f. Compute the derived quantity a / ( b + c ) for the
## gamma random variates. Look at the distribution, and
## calculate summary statistics
d <- abc$a / ( abc$b + abc$c )
hist( d )
summary( d )
sd( d )
## 5g. Repeat 5f, but with uncorrelated random variates.
## How do the results differ?
nabc <- data.frame( a=rgamma( 1000, 1, 0.5 ),
b=rgamma( 1000, 2, 0.1 ),
c=rgamma( 1000, 0.5, 2 ) )
e <- nabc$a / ( nabc$b + nabc$c )
brks <- seq( 0, max(d,e)+0.1, 0.05 )
par( mfrow=c(2,1) )
hist( d, breaks=brks )
hist( e, breaks=brks )
summary( e )
sd( e )
## Reset graphing window:
par( mfrow=c(1,1) )
## 6. Monte Carlo integration
## 6a. Compute a Monte Carlo estimate of the mean value of
## exp(x)/(x+1), based on 100 random variates from the Student's t
## distribution with 2 degrees of freedom
## Correct answer = 15.552
x <- rgamma( 100, 6, 6 )
mean( exp(x)/x^4 )
## 6b. Increase the sample size to 1000 and re-compute
x <- c( x, rgamma( 1000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6c. Increase the sample size to 10,000 and re-compute
x <- c( x, rgamma( 10000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6d. Increase the sample size to 100,000 and re-compute
x <- c( x, rgamma( 100000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6e. Repeat 6a-6d, using a quasi-random sequence
library(gsl)
q <- qrng_alloc( dim=1, type="sobol" )
u <- qrng_get( q, 100 )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 1000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 10000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 100000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
## 6f. Which result looks better?
## The expected value of this function depends very heavily
## on the tails of the distribution.
## With the pseudo-random sequence, you might have gotten
## closer to the answer quickly, but likely the answer did
## not stabilize quickly as sample size was increased
## With the quasi-random sequence, the small sample size
## answers are relatively poor, because the tails of the
## distribution are not "sampled" yet. But as the sample
## size is increased, the estimate generally improves
|
/RStuff/NeptuneAdvTraining_Athens/1Simulation/ExSimKP.R
|
no_license
|
puruckertom/swat_gof
|
R
| false
| false
| 9,232
|
r
|
## 1. Explore the features of the gamma distribution
## 1a. Graph the probability density function for the gamma distribution
## with a range of shape values, keeping the rate parameter constant
curve( dgamma( x, 0.5, 1 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 1, 1 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 1 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 4, 1 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 8, 1 ), from=0, to=10, col=5, add=TRUE )
## 1b. Graph the probability density function for the gamma distribution
## with a range of rate values, keeping the shape parameter constant
curve( dgamma( x, 2, 8 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 2, 4 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 2 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 2, 1 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 2, 0.5 ), from=0, to=10, col=5, add=TRUE )
## 1c. Graph the probability density function for the gamma distribution
## with a range of shape values, keeping the mean of the distribution
## constant (mean = shape*rate)
curve( dgamma( x, 0.5, 0.5/4 ), from=0, to=10, ylab="pdf" )
curve( dgamma( x, 1, 1/4 ), from=0, to=10, col=2, add=TRUE )
curve( dgamma( x, 2, 2/4 ), from=0, to=10, col=3, add=TRUE )
curve( dgamma( x, 4, 4/4 ), from=0, to=10, col=4, add=TRUE )
curve( dgamma( x, 8, 8/4 ), from=0, to=10, col=5, add=TRUE )
## 1d. Graph the cumulative distribution function for the gamma
## distribution with a range of shape values, keeping the rate
## parameter constant
curve( pgamma( x, 0.5, 1 ), from=0, to=10, ylab="cdf" )
curve( pgamma( x, 1, 1 ), from=0, to=10, col=2, add=TRUE )
curve( pgamma( x, 2, 1 ), from=0, to=10, col=3, add=TRUE )
curve( pgamma( x, 4, 1 ), from=0, to=10, col=4, add=TRUE )
curve( pgamma( x, 8, 1 ), from=0, to=10, col=5, add=TRUE )
## 1e. Compute percentiles of a gamma distribution with shape 2
## and rate 1: 5th, 25th, 50th, 75th, and 95th percentiles
qgamma( c(0.05, 0.25, 0.50, 0.75, 0.95), 2, 1 )
## 1f. Simulate 20 random numbers from a gamma distribution
## with shape 0.5 and rate 0.5. Compute the histogram
## of the simulated values. How does the histogram compare
## to the density function? Repeat for 100 random numbers,
## 1000 random numbers, 10,000 random numbers.
par(mfrow=c(2,2))
hist( rgamma( 20, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 100, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 1000, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
hist( rgamma( 10000, 0.5, 0.5 ), freq=FALSE )
curve( dgamma( x, 0.5, 0.5 ), add=TRUE, col=2 )
## 2. Explore the features of the beta distribution
## 2a. Look at the pdf of the beta distribution
## for several sets of parameter values (shape1, shape2):
## (0.5,0.5); (0.5,2); (1,1); (5,2); (4,1); (5,10)
par(mfrow=c(1,1))
curve( dbeta( x, 0.5, 0.5 ), from=0, to=1, ylab="pdf", ylim=c(0,3.5) )
curve( dbeta( x, 0.5, 2 ), from=0, to=1, col=2, add=TRUE )
curve( dbeta( x, 1, 1 ), from=0, to=1, col=3, add=TRUE )
curve( dbeta( x, 5, 2 ), from=0, to=1, col=4, add=TRUE )
curve( dbeta( x, 4, 1 ), from=0, to=1, col=5, add=TRUE )
curve( dbeta( x, 5, 10 ), from=0, to=1, col=6, add=TRUE )
## 2b. Estimate the mean, standard deviation, and skewness
## of a beta distribution based on a random sample
## from a beta distribution
x <- rbeta( 1000, 4, 8 )
mean(x)
sd(x)
skewness <- function(z){
thirdcentralmoment <- mean( ( z - mean(z) )^3 )
secondcentralmoment <- mean( ( z - mean(z) )^2 )
skewness <- thirdcentralmoment / secondcentralmoment^1.5
return( skewness )
}
skewness(x)
## 3. Explore the sample function
## 3a. Randomly choose 5 distinct values from the integers from 1 to 20
sample( 1:20, 5, replace=FALSE )
## 3b. Randomly choose 30 values from the integers from 1 to 20
sample( 1:20, 30, replace=TRUE )
## Bonus - bootstrap
## 3c. Construct a bootstrap estimate for the standard error of
## the sample skewness of a beta distribution with shape parameters
## 4 and 8, based on 30 samples
## 3c1. Generate a sample from the distribution
x <- rbeta( 30, 4, 8 )
## 3c2. Construct many resamples of the data
## This code will generate 1000 new re-samples of the data
## each of sample size 30
xresample <- matrix( sample( x, 1000 * 30, replace=TRUE ), ncol=30 )
## 3c3. Compute the skewness for each resample
xresampleskew <- apply( xresample, 1, skewness )
## 3c4. Look at the bootstrap distribution of the skewness
hist( xresampleskew )
## 3c5. Compute the bootstrap standard error of the skewness
sd( xresampleskew )
## Note: the boot package provides a one-line command for constructing
## bootstrap estimates
## 4. Test out setting random number seeds
## 4a. Set the random number seed the same as your neighbor
## and generate 5 cauchy random variables.
## Check that the 5 number match your neighbor
set.seed(1234)
rcauchy( 5 )
## 4b. Repeat a and show that you get the exact same results
set.seed(1234)
rcauchy( 5 )
## 4c. Generate 5 more and show that they're different if you
## don't reset the random number seed
rcauchy( 5 )
## 4d. Set the random number seed differently from your neighbor
## and compare a histogram of 50 cauchy random variables
## You:
set.seed(1234)
hist( rcauchy( 50 ) )
## Your neighbor:
set.seed(12345)
hist( rcauchy( 50 ) )
## Note: The cauchy distribution has extremely long tails and thus
## produces occasional values that are very high or very low
## 5. Probability Integral Transform and Multivariate
## 5a. Generate a (sub-standard) random sample from the normal
## distribution by generating uniform random variates and
## using the probability integral transform
u <- runif( 1000 )
x <- qnorm( u, 0, 1 )
hist( x )
## 5b. Generate a (sub-standard) random sample from the
## uniform distribution by generating normal random
## variates and tranforming to uniform
x <- rnorm( 1000, 0, 1 )
u <- pnorm( x, 0, 1 )
## 5c. Generate a set of multivariate normal random variates
## with dimenstion 3, that has correlations of:
# x.y: 0.6, x.z: 0.9, y.z: 0.2
means <- c( 0, 0, 0 )
cormat <- matrix( c( 1, 0.6, 0.9,
0.6, 1, 0.2,
0.9, 0.2, 1 ), ncol=3 )
xyz <- MASS::mvrnorm( 1000, means, cormat )
colnames( xyz ) <- c("x","y","z")
pairs( xyz )
cor( xyz )
## 5d. Generate a set of correlated uniform random variates
## by transforming the correlated normal variates.
## What is the correlation now?
uvw <- pnorm( xyz )
colnames( uvw ) <- c("u","v","w")
pairs( uvw )
cor( uvw )
## 5e. Generate a set of correlated exponential random variates
## by transforming the correlated uniforms.
## What is the correlation now?
abc <- data.frame( a=qgamma( uvw[,1], 1, 0.5 ),
b=qgamma( uvw[,2], 2, 0.1 ),
c=qgamma( uvw[,3], 0.5, 2 ) )
pairs( abc )
cor( abc )
## 5f. Compute the derived quantity a / ( b + c ) for the
## gamma random variates. Look at the distribution, and
## calculate summary statistics
d <- abc$a / ( abc$b + abc$c )
hist( d )
summary( d )
sd( d )
## 5g. Repeat 5f, but with uncorrelated random variates.
## How do the results differ?
nabc <- data.frame( a=rgamma( 1000, 1, 0.5 ),
b=rgamma( 1000, 2, 0.1 ),
c=rgamma( 1000, 0.5, 2 ) )
e <- nabc$a / ( nabc$b + nabc$c )
brks <- seq( 0, max(d,e)+0.1, 0.05 )
par( mfrow=c(2,1) )
hist( d, breaks=brks )
hist( e, breaks=brks )
summary( e )
sd( e )
## Reset graphing window:
par( mfrow=c(1,1) )
## 6. Monte Carlo integration
## 6a. Compute a Monte Carlo estimate of the mean value of
## exp(x)/(x+1), based on 100 random variates from the Student's t
## distribution with 2 degrees of freedom
## Correct answer = 15.552
x <- rgamma( 100, 6, 6 )
mean( exp(x)/x^4 )
## 6b. Increase the sample size to 1000 and re-compute
x <- c( x, rgamma( 1000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6c. Increase the sample size to 10,000 and re-compute
x <- c( x, rgamma( 10000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6d. Increase the sample size to 100,000 and re-compute
x <- c( x, rgamma( 100000, 6, 6 ) )
mean( exp(x)/x^4 )
## 6e. Repeat 6a-6d, using a quasi-random sequence
library(gsl)
q <- qrng_alloc( dim=1, type="sobol" )
u <- qrng_get( q, 100 )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 1000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 10000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
u <- c( u, qrng_get( q, 100000 ) )
y <- qgamma( u, 6, 6 )
mean( exp(y)/y^4 )
## 6f. Which result looks better?
## The expected value of this function depends very heavily
## on the tails of the distribution.
## With the pseudo-random sequence, you might have gotten
## closer to the answer quickly, but likely the answer did
## not stabilize quickly as sample size was increased
## With the quasi-random sequence, the small sample size
## answers are relatively poor, because the tails of the
## distribution are not "sampled" yet. But as the sample
## size is increased, the estimate generally improves
|
## Opening the required libraries and some.
library(dplyr)
library(datasets)
library(reshape2)
## First of all, we are going to read in all the data given in the form of .txt files
## Reading all activities ID and their names and apprpriately labeling the columns
labels <- read.table("./UCI HAR Dataset/activity_labels.txt",col.names=c("activity_id","activity_name"))
## Reading all the dataframe's column names
features <- read.table("./UCI HAR Dataset/features.txt")
feature_names <- features[,2]
## Reading the X values of training data and labeling the dataframe's columns
train_data <- read.table("./UCI HAR Dataset/train/X_train.txt")
colnames(train_data) <- feature_names
## Reading the subject ids and labeling the columns
train_subject_id <- read.table("./UCI HAR Dataset/train/subject_train.txt")
colnames(train_subject_id) <- "subject_id"
## Now reading the activity id's of the Y values' training data file and labeling the columns
train_activity_id <- read.table("./UCI HAR Dataset/train/Y_train.txt")
colnames(train_activity_id) <- "activity_id"
## Reading the X values of test data and labeling the dataframe's columns
test_data <- read.table("./UCI HAR Dataset/test/X_test.txt")
colnames(test_data) <- feature_names
## Reading the subject ids and labeling the columns
test_subject_id <- read.table("./UCI HAR Dataset/test/subject_test.txt")
colnames(test_subject_id) <- "subject_id"
## Now reading the activity id's of the Y values' test data file and labeling the columns
test_activity_id <- read.table("./UCI HAR Dataset/test/Y_test.txt")
colnames(test_activity_id) <- "activity_id"
## Now merging the X and Y values of the test file and the train file
traindata <- cbind(train_subject_id , train_activity_id , train_data)
testdata <- cbind(test_subject_id , test_activity_id , test_data)
## Now merging the train and test data
all_data <- rbind(traindata,testdata)
## Extracting the columns refering to mean() or std() values
mean_col_idx <- grep("mean",names(all_data),ignore.case = TRUE)
mean_col_names <- names(all_data)[mean_col_idx]
std_col_idx <- grep("std",names(all_data),ignore.case = TRUE)
std_col_names <- names(all_data)[std_col_idx]
meanstddata <-all_data[,c("subject_id","activity_id",mean_col_names,std_col_names)]
## Merging the activities dataset with the mean/std values dataset to get one dataset with descriptive activity names
descrnames <- merge(labels,meanstddata,by.x="activity_id",by.y="activity_id",all=TRUE)
## And melting the dataset with the descriptive activity names for better handling usin melt()
data_melt <- melt(descrnames,id=c("activity_id","activity_name","subject_id"))
## Now casting the melted dataset according to the average of each variable for each activity and each subject
## using dcast()
mean_data <- dcast(data_melt,activity_id + activity_name + subject_id ~ variable,mean)
## Create a file with the new tidy dataset
write.table(mean_data,"./UCI HAR Dataset/tidy_data.txt", row.names = FALSE)
|
/run_analysis.R
|
no_license
|
v-nayjack/Getting-and-Cleaning-Data-Course_Project
|
R
| false
| false
| 3,024
|
r
|
## Opening the required libraries and some.
library(dplyr)
library(datasets)
library(reshape2)
## First of all, we are going to read in all the data given in the form of .txt files
## Reading all activities ID and their names and apprpriately labeling the columns
labels <- read.table("./UCI HAR Dataset/activity_labels.txt",col.names=c("activity_id","activity_name"))
## Reading all the dataframe's column names
features <- read.table("./UCI HAR Dataset/features.txt")
feature_names <- features[,2]
## Reading the X values of training data and labeling the dataframe's columns
train_data <- read.table("./UCI HAR Dataset/train/X_train.txt")
colnames(train_data) <- feature_names
## Reading the subject ids and labeling the columns
train_subject_id <- read.table("./UCI HAR Dataset/train/subject_train.txt")
colnames(train_subject_id) <- "subject_id"
## Now reading the activity id's of the Y values' training data file and labeling the columns
train_activity_id <- read.table("./UCI HAR Dataset/train/Y_train.txt")
colnames(train_activity_id) <- "activity_id"
## Reading the X values of test data and labeling the dataframe's columns
test_data <- read.table("./UCI HAR Dataset/test/X_test.txt")
colnames(test_data) <- feature_names
## Reading the subject ids and labeling the columns
test_subject_id <- read.table("./UCI HAR Dataset/test/subject_test.txt")
colnames(test_subject_id) <- "subject_id"
## Now reading the activity id's of the Y values' test data file and labeling the columns
test_activity_id <- read.table("./UCI HAR Dataset/test/Y_test.txt")
colnames(test_activity_id) <- "activity_id"
## Now merging the X and Y values of the test file and the train file
traindata <- cbind(train_subject_id , train_activity_id , train_data)
testdata <- cbind(test_subject_id , test_activity_id , test_data)
## Now merging the train and test data
all_data <- rbind(traindata,testdata)
## Extracting the columns refering to mean() or std() values
mean_col_idx <- grep("mean",names(all_data),ignore.case = TRUE)
mean_col_names <- names(all_data)[mean_col_idx]
std_col_idx <- grep("std",names(all_data),ignore.case = TRUE)
std_col_names <- names(all_data)[std_col_idx]
meanstddata <-all_data[,c("subject_id","activity_id",mean_col_names,std_col_names)]
## Merging the activities dataset with the mean/std values dataset to get one dataset with descriptive activity names
descrnames <- merge(labels,meanstddata,by.x="activity_id",by.y="activity_id",all=TRUE)
## And melting the dataset with the descriptive activity names for better handling usin melt()
data_melt <- melt(descrnames,id=c("activity_id","activity_name","subject_id"))
## Now casting the melted dataset according to the average of each variable for each activity and each subject
## using dcast()
mean_data <- dcast(data_melt,activity_id + activity_name + subject_id ~ variable,mean)
## Create a file with the new tidy dataset
write.table(mean_data,"./UCI HAR Dataset/tidy_data.txt", row.names = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network.generic.R
\docType{methods}
\name{network.glmnet.private,ANY,matrix-method}
\alias{network.glmnet.private,ANY,matrix-method}
\title{Calculate GLM model with network-based regularization}
\usage{
\S4method{network.glmnet.private}{ANY,matrix}(fun, xdata, ydata, network,
network.options = network.options.default(), ...)
}
\arguments{
\item{xdata}{matrix.}
}
\value{
an object just as glmnet
}
\description{
Calculate GLM model with network-based regularization
}
|
/man/network.glmnet.private-ANY-matrix-method.Rd
|
no_license
|
averissimo/network.cox
|
R
| false
| true
| 550
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/network.generic.R
\docType{methods}
\name{network.glmnet.private,ANY,matrix-method}
\alias{network.glmnet.private,ANY,matrix-method}
\title{Calculate GLM model with network-based regularization}
\usage{
\S4method{network.glmnet.private}{ANY,matrix}(fun, xdata, ydata, network,
network.options = network.options.default(), ...)
}
\arguments{
\item{xdata}{matrix.}
}
\value{
an object just as glmnet
}
\description{
Calculate GLM model with network-based regularization
}
|
xdf_mlbSub <- read.table("mlb_filter_example.tsv", header=TRUE, sep="\t", quote="\"")
tail(xdf_mlbSub)
dim(xdf_mlbSub)
############## the field PID_ndx gives the row index for the last
############## appearance of pitcher
############## gives zero if first appearance in data
############
x <- as.vector(xdf_mlbSub[ , "x" ])
###########
xndx <- xdf_mlbSub[ , "PID_ndx" ]
N <- nrow(xdf_mlbSub)
###################### g is our filter's 'gain'
g <- rep(1, N)
###################### this vector will hold our a-posteriori predictions
xhat <- numeric(N)
###################### this vector will hold our a-prior predictions
xhat_forecast <- numeric(N)
################### signal to noise ratio. Sometimes called rho.
################### controls amount of 'forgetting'
################### when zero, filter 'remembers' everything
################### the larger the value, the more the filter 'forgets' the past
################### this is really the only tuning parameter in this filter
SNR <- 1/1000
for(ii in 1:N) {
xthis_ndx <- xndx[ ii ]
######################## if first appearance of pitcher, set to initial values
if(xthis_ndx == 0) {
xprevious_xhat <- 0
xprevious_g <- 1
} else { ############## otherwise get previous values
xprevious_xhat <- xhat[ xthis_ndx ]
xprevious_g <- g[ xthis_ndx ]
}
xhat_forecast[ ii ] <- xprevious_xhat
xthis <- x[ ii ]
xhat[ ii ] <- xprevious_xhat + xprevious_g * (xthis - xprevious_xhat)
g[ ii ] <- (xprevious_g + SNR) / (xprevious_g + SNR + 1)
}
sqrt( mean( ( x - xhat_forecast )^2 ) )
############# question: what value of SNR gives the lowest forecast RMSE ?
|
/_07_localLevel_MLB_example_01.R
|
no_license
|
davezes/Stats_141SL
|
R
| false
| false
| 1,727
|
r
|
xdf_mlbSub <- read.table("mlb_filter_example.tsv", header=TRUE, sep="\t", quote="\"")
tail(xdf_mlbSub)
dim(xdf_mlbSub)
############## the field PID_ndx gives the row index for the last
############## appearance of pitcher
############## gives zero if first appearance in data
############
x <- as.vector(xdf_mlbSub[ , "x" ])
###########
xndx <- xdf_mlbSub[ , "PID_ndx" ]
N <- nrow(xdf_mlbSub)
###################### g is our filter's 'gain'
g <- rep(1, N)
###################### this vector will hold our a-posteriori predictions
xhat <- numeric(N)
###################### this vector will hold our a-prior predictions
xhat_forecast <- numeric(N)
################### signal to noise ratio. Sometimes called rho.
################### controls amount of 'forgetting'
################### when zero, filter 'remembers' everything
################### the larger the value, the more the filter 'forgets' the past
################### this is really the only tuning parameter in this filter
SNR <- 1/1000
for(ii in 1:N) {
xthis_ndx <- xndx[ ii ]
######################## if first appearance of pitcher, set to initial values
if(xthis_ndx == 0) {
xprevious_xhat <- 0
xprevious_g <- 1
} else { ############## otherwise get previous values
xprevious_xhat <- xhat[ xthis_ndx ]
xprevious_g <- g[ xthis_ndx ]
}
xhat_forecast[ ii ] <- xprevious_xhat
xthis <- x[ ii ]
xhat[ ii ] <- xprevious_xhat + xprevious_g * (xthis - xprevious_xhat)
g[ ii ] <- (xprevious_g + SNR) / (xprevious_g + SNR + 1)
}
sqrt( mean( ( x - xhat_forecast )^2 ) )
############# question: what value of SNR gives the lowest forecast RMSE ?
|
#' Alpine newt body temperatures and associated operative temperatures
#'
#' Active field body temperature data of Belgian alpine newts
#' (\emph{Ichthyosaura alpestris}) and associated operative temperature data
#' from agar models, collected during nocturnal surveys in spring. Data are in
#' degrees Celsius.
#'
#' @docType data
#'
#' @usage data(ichalp)
#'
#' @source Wouter Beukema \email{wouter.beukema@@gmail.com}
#'
"ichalp"
|
/R/ichalp-data.R
|
no_license
|
wouterbeukema/ectotemp
|
R
| false
| false
| 453
|
r
|
#' Alpine newt body temperatures and associated operative temperatures
#'
#' Active field body temperature data of Belgian alpine newts
#' (\emph{Ichthyosaura alpestris}) and associated operative temperature data
#' from agar models, collected during nocturnal surveys in spring. Data are in
#' degrees Celsius.
#'
#' @docType data
#'
#' @usage data(ichalp)
#'
#' @source Wouter Beukema \email{wouter.beukema@@gmail.com}
#'
"ichalp"
|
errate = function(fun) {
function(...) {
warn <- err <- NULL
result <- withCallingHandlers(
tryCatch(
fun(...),
error=function(e) {
err <<- conditionMessage(e)
NULL
}),
warning=function(w) {
warn <<- append(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
return(list(result=result, err=err, warn=warn))
}
}
hasError = function(x) {
!is.null(x[["err"]])
}
hasWarning = function(x) {
!is.null(x[["warn"]])
}
hasErrata = function(x) {
(hasError(x) || hasWarning(x))
}
errata = function(x) {
list(err = x[["err"]],
warn = x[["warn"]])
# vs. x[["result"]] = NULL
}
unErrate = function(x) {
x[["result"]]
}
# factory <- function(fun)
# function(...) {
# warn <- err <- NULL
# val <- withCallingHandlers(
# tryCatch(fun(...), error=function(e) {
# err <<- conditionMessage(e)
# NULL
# }), warning=function(w) {
# warn <<- append(warn, conditionMessage(w))
# invokeRestart("muffleWarning")
# })
# list(value=val, warn=warn, err=err)
# }
#
# .has <- function(x, what)
# !sapply(lapply(x, "[[", what), is.null)
# hasWarning <- function(x) .has(x, "warn")
# hasError <- function(x) .has(x, "err")
# isClean <- function(x) !(hasError(x) | hasWarning(x))
# cleanv <- function(x) sapply(x[isClean(x)], "[[", 1)
# factoryValue <- function(x) sapply(x, "[[", 1)
# factoryStatus <- function(x) sapply(x, "[[", c(2, 3))
#
#
# withWarnings <- function(expr) {
# myWarnings <- NULL
# wHandler <- function(w) {
# myWarnings <<- c(myWarnings, list(w))
# invokeRestart("muffleWarning")
# }
# val <- withCallingHandlers(expr, warning = wHandler)
# list(value = val, warn = myWarnings)
# }
#
#
#
# .has <- function(x, what)
# !sapply(lapply(x, "[[", what), is.null)
# hasWarning <- function(x) .has(x, "warn")
# hasError <- function(x) .has(x, "err")
# isClean <- function(x) !(hasError(x) | hasWarning(x))
# cleanv <- function(x) sapply(x[isClean(x)], "[[", 1)
# factoryValue <- function(x) sapply(x, "[[", 1)
# factoryStatus <- function(x) sapply(x, "[[", c(2, 3))
|
/R/errate.R
|
permissive
|
spgolden/mynr
|
R
| false
| false
| 2,173
|
r
|
errate = function(fun) {
function(...) {
warn <- err <- NULL
result <- withCallingHandlers(
tryCatch(
fun(...),
error=function(e) {
err <<- conditionMessage(e)
NULL
}),
warning=function(w) {
warn <<- append(warn, conditionMessage(w))
invokeRestart("muffleWarning")
})
return(list(result=result, err=err, warn=warn))
}
}
hasError = function(x) {
!is.null(x[["err"]])
}
hasWarning = function(x) {
!is.null(x[["warn"]])
}
hasErrata = function(x) {
(hasError(x) || hasWarning(x))
}
errata = function(x) {
list(err = x[["err"]],
warn = x[["warn"]])
# vs. x[["result"]] = NULL
}
unErrate = function(x) {
x[["result"]]
}
# factory <- function(fun)
# function(...) {
# warn <- err <- NULL
# val <- withCallingHandlers(
# tryCatch(fun(...), error=function(e) {
# err <<- conditionMessage(e)
# NULL
# }), warning=function(w) {
# warn <<- append(warn, conditionMessage(w))
# invokeRestart("muffleWarning")
# })
# list(value=val, warn=warn, err=err)
# }
#
# .has <- function(x, what)
# !sapply(lapply(x, "[[", what), is.null)
# hasWarning <- function(x) .has(x, "warn")
# hasError <- function(x) .has(x, "err")
# isClean <- function(x) !(hasError(x) | hasWarning(x))
# cleanv <- function(x) sapply(x[isClean(x)], "[[", 1)
# factoryValue <- function(x) sapply(x, "[[", 1)
# factoryStatus <- function(x) sapply(x, "[[", c(2, 3))
#
#
# withWarnings <- function(expr) {
# myWarnings <- NULL
# wHandler <- function(w) {
# myWarnings <<- c(myWarnings, list(w))
# invokeRestart("muffleWarning")
# }
# val <- withCallingHandlers(expr, warning = wHandler)
# list(value = val, warn = myWarnings)
# }
#
#
#
# .has <- function(x, what)
# !sapply(lapply(x, "[[", what), is.null)
# hasWarning <- function(x) .has(x, "warn")
# hasError <- function(x) .has(x, "err")
# isClean <- function(x) !(hasError(x) | hasWarning(x))
# cleanv <- function(x) sapply(x[isClean(x)], "[[", 1)
# factoryValue <- function(x) sapply(x, "[[", 1)
# factoryStatus <- function(x) sapply(x, "[[", c(2, 3))
|
library(geoSpectral)
### Name: spc.getwavelengths
### Title: Extract wave lenghts of a 'Spectra' object
### Aliases: spc.getwavelengths spc.getwavelengths,Spectra-method
### ** Examples
x <- spc.example_spectra()
spc.getwavelengths(x)
|
/data/genthat_extracted_code/geoSpectral/examples/spc.getwavelengths.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 246
|
r
|
library(geoSpectral)
### Name: spc.getwavelengths
### Title: Extract wave lenghts of a 'Spectra' object
### Aliases: spc.getwavelengths spc.getwavelengths,Spectra-method
### ** Examples
x <- spc.example_spectra()
spc.getwavelengths(x)
|
##-------------------------------------
## fit a drift term in a Tweedie model
## CM for discussion with Louise
## 22/06/2021
##-------------------------------------
library(icesDatras)
library(ggplot2); theme_set(theme_bw())
library(mgcv)
library(tweedie)
## Norway pout WoRMS ID
aphia_id <- 126444
##------------------------
## HH data - haul details
##------------------------
all_hh <- NULL
s <- "IE-IGFS"
available_years <- getSurveyYearList(s)
for(y in available_years){
tmp_hh <- getDATRAS(record = "HH", s, y, quarters = 4)
all_hh <- rbind(all_hh, tmp_hh)
}
## get valid tows
all_hh <- subset(all_hh, HaulVal == "V")
with(all_hh, plot(ShootLong, ShootLat))
## keeping all for now but can restrict areas etc
##-------------------------------
## HL data - biological sampling
##-------------------------------
all_hl <- NULL
for(y in available_years){
tmp_hl <- getDATRAS(record = "HL", s, y, quarters = 4)
tmp_hl <- subset(tmp_hl, Valid_Aphia == aphia_id)
all_hl <- rbind(all_hl, tmp_hl)
}
table(all_hl$LngtCode) ## all in mm
## outlying sub-sampling factors?
## keeping all for now
## convert to mass
## using North Sea W = 0.00450 L^3.148 from
## https://www.fishbase.se/popdyn/LWRelationshipList.php?ID=1023&GenusName=Trisopterus&SpeciesName=esmarkii&fc=183
a <- 0.00450
b <- 3.148
## raised mass per length class per haul
all_hl$mass_g <- with(all_hl, HLNoAtLngt * SubFactor * a * (LngtClass / 10)^b)
## in kgs
all_hl$mass_kg <- all_hl$mass_g / 1e3
## sum to the haul level - ignoring swept area here
haul_hl <- aggregate(mass_kg ~ Year + HaulNo, FUN = sum, data = all_hl)
## bring in the zeros
vars2keep <- c("Year", "HaulNo")
all_dat <- merge(haul_hl, all_hh[, vars2keep], all = TRUE)
all_dat$mass_kg[is.na(all_dat$mass_kg)] <- 0
with(all_dat, plot(jitter(Year), mass_kg))
## fit a models
library(TMB)
compile("tweedie_ts.cpp")
dyn.load(dynlib("tweedie_ts"))
data <- list(y = all_dat$mass_kg,
idx = all_dat$Year - min(all_dat$Year))
data$Y <- max(data$idx)
## Parameter initial guess
parameters <- list(logmu = rep(log(mean(data$y)), data$Y + 1),
logsdmu = log(0.1),
d = 0,
logphi = log(1),
logitp = qlogis(0.5))
## Fit model
obj <- MakeADFun(data,
parameters,
DLL = "tweedie_ts",
random = c("logmu"))
obj$fn()
obj$gr()
opt <- nlminb(obj$par, obj$fn, obj$gr)
rep <- sdreport(obj)
srep <- summary(rep)
## profile the drift term
prof <- tmbprofile(obj, "d")
plot(prof)
confint(prof)
p_hat <- 1.0001 + 0.9999 * plogis(srep["logitp", "Estimate"])
phi_hat <- exp(srep["logphi", "Estimate"])
mu_hat <- exp(srep["logmu", "Estimate"])
library(cplm)
fit <- cpglm(mass_kg ~ -1 + factor(Year), data = all_dat, link = "log")
fit2 <- cpglm(mass_kg ~ -1 + factor(Year), data = all_dat, link = "log")
tmp <- summary(fit)$coefficients
rownames(tmp) <- NULL
pred_R <- data.frame(Year = available_years,
est = exp(tmp[, "Estimate"]),
lwr = exp(tmp[, "Estimate"] - 2 * tmp[, "Std. Error"]),
upr = exp(tmp[, "Estimate"] + 2 * tmp[, "Std. Error"]),
mod = "R Tweedie")
## add own
tmp <- srep[grep("logmu", rownames(srep)), ]
pred_own <- data.frame(Year = available_years,
est = exp(tmp[, "Estimate"]),
lwr = exp(tmp[, "Estimate"] - 2 * tmp[, "Std. Error"]),
upr = exp(tmp[, "Estimate"] + 2 * tmp[, "Std. Error"]),
mod = "Own C++ Tweedie with drift")
all_pred <- rbind(pred_R, pred_own)
pdf("Tweedie_drift_model_example.pdf", height = 7, width = 8)
ggplot(all_pred, aes(x = Year, y = est, colour = mod)) +
geom_line(size = 1) +
geom_point() +
geom_line(aes(y = lwr), lty = 2) +
geom_line(aes(y = upr), lty = 2) +
scale_colour_manual("Model", values = c("blue", "slategrey")) +
theme(legend.position = "bottom") +
xlab("Year") +
ylab("Mean CPUE (kg/tow)") +
ggtitle("Norway pout")
##
plot(prof, xlab = "Drift", ylab = "Profile likelihood", bty = "l", main = "Norway pout drift term profile likelihood")
abline(v = 0, lty = 2)
dev.off()
|
/coilin_sandbox/tweedie_drift/tweedie.R
|
no_license
|
IrishMarineInstitute/ClimFish
|
R
| false
| false
| 4,231
|
r
|
##-------------------------------------
## fit a drift term in a Tweedie model
## CM for discussion with Louise
## 22/06/2021
##-------------------------------------
library(icesDatras)
library(ggplot2); theme_set(theme_bw())
library(mgcv)
library(tweedie)
## Norway pout WoRMS ID
aphia_id <- 126444
##------------------------
## HH data - haul details
##------------------------
all_hh <- NULL
s <- "IE-IGFS"
available_years <- getSurveyYearList(s)
for(y in available_years){
tmp_hh <- getDATRAS(record = "HH", s, y, quarters = 4)
all_hh <- rbind(all_hh, tmp_hh)
}
## get valid tows
all_hh <- subset(all_hh, HaulVal == "V")
with(all_hh, plot(ShootLong, ShootLat))
## keeping all for now but can restrict areas etc
##-------------------------------
## HL data - biological sampling
##-------------------------------
all_hl <- NULL
for(y in available_years){
tmp_hl <- getDATRAS(record = "HL", s, y, quarters = 4)
tmp_hl <- subset(tmp_hl, Valid_Aphia == aphia_id)
all_hl <- rbind(all_hl, tmp_hl)
}
table(all_hl$LngtCode) ## all in mm
## outlying sub-sampling factors?
## keeping all for now
## convert to mass
## using North Sea W = 0.00450 L^3.148 from
## https://www.fishbase.se/popdyn/LWRelationshipList.php?ID=1023&GenusName=Trisopterus&SpeciesName=esmarkii&fc=183
a <- 0.00450
b <- 3.148
## raised mass per length class per haul
all_hl$mass_g <- with(all_hl, HLNoAtLngt * SubFactor * a * (LngtClass / 10)^b)
## in kgs
all_hl$mass_kg <- all_hl$mass_g / 1e3
## sum to the haul level - ignoring swept area here
haul_hl <- aggregate(mass_kg ~ Year + HaulNo, FUN = sum, data = all_hl)
## bring in the zeros
vars2keep <- c("Year", "HaulNo")
all_dat <- merge(haul_hl, all_hh[, vars2keep], all = TRUE)
all_dat$mass_kg[is.na(all_dat$mass_kg)] <- 0
with(all_dat, plot(jitter(Year), mass_kg))
## fit a models
library(TMB)
compile("tweedie_ts.cpp")
dyn.load(dynlib("tweedie_ts"))
data <- list(y = all_dat$mass_kg,
idx = all_dat$Year - min(all_dat$Year))
data$Y <- max(data$idx)
## Parameter initial guess
parameters <- list(logmu = rep(log(mean(data$y)), data$Y + 1),
logsdmu = log(0.1),
d = 0,
logphi = log(1),
logitp = qlogis(0.5))
## Fit model
obj <- MakeADFun(data,
parameters,
DLL = "tweedie_ts",
random = c("logmu"))
obj$fn()
obj$gr()
opt <- nlminb(obj$par, obj$fn, obj$gr)
rep <- sdreport(obj)
srep <- summary(rep)
## profile the drift term
prof <- tmbprofile(obj, "d")
plot(prof)
confint(prof)
p_hat <- 1.0001 + 0.9999 * plogis(srep["logitp", "Estimate"])
phi_hat <- exp(srep["logphi", "Estimate"])
mu_hat <- exp(srep["logmu", "Estimate"])
library(cplm)
fit <- cpglm(mass_kg ~ -1 + factor(Year), data = all_dat, link = "log")
fit2 <- cpglm(mass_kg ~ -1 + factor(Year), data = all_dat, link = "log")
tmp <- summary(fit)$coefficients
rownames(tmp) <- NULL
pred_R <- data.frame(Year = available_years,
est = exp(tmp[, "Estimate"]),
lwr = exp(tmp[, "Estimate"] - 2 * tmp[, "Std. Error"]),
upr = exp(tmp[, "Estimate"] + 2 * tmp[, "Std. Error"]),
mod = "R Tweedie")
## add own
tmp <- srep[grep("logmu", rownames(srep)), ]
pred_own <- data.frame(Year = available_years,
est = exp(tmp[, "Estimate"]),
lwr = exp(tmp[, "Estimate"] - 2 * tmp[, "Std. Error"]),
upr = exp(tmp[, "Estimate"] + 2 * tmp[, "Std. Error"]),
mod = "Own C++ Tweedie with drift")
all_pred <- rbind(pred_R, pred_own)
pdf("Tweedie_drift_model_example.pdf", height = 7, width = 8)
ggplot(all_pred, aes(x = Year, y = est, colour = mod)) +
geom_line(size = 1) +
geom_point() +
geom_line(aes(y = lwr), lty = 2) +
geom_line(aes(y = upr), lty = 2) +
scale_colour_manual("Model", values = c("blue", "slategrey")) +
theme(legend.position = "bottom") +
xlab("Year") +
ylab("Mean CPUE (kg/tow)") +
ggtitle("Norway pout")
##
plot(prof, xlab = "Drift", ylab = "Profile likelihood", bty = "l", main = "Norway pout drift term profile likelihood")
abline(v = 0, lty = 2)
dev.off()
|
GetMeta <- function(StudyName, subCol, groups=TRUE){
if(grepl("Stanley", StudyName)){
collection = gsub("Stanley", "", StudyName)
StanleyStudies <- read.table("StanleyStudies.txt", sep="\t", header=T)
MetaAll <- read.table(paste0(path, "MetadataStanley.csv"), header = T, sep="\t")
names(MetaAll) <- sapply(names(MetaAll), function(x) gsub("\\.*", "", x))
Metadata <- subset(MetaAll, MetaAll$CollectionType == collection)
for(meta in list.files(path = StudyName, pattern = "Study.*?csv")){
assign(paste0("Meta", sapply(meta, function(x) strsplit(x, "\\.")[[1]][1])),
read.table(paste0(StudyName, "/", meta), header = T, sep="\t"))
}
#######################################################################################################
#Create data_frame matching the subject ids from different studies
samples <- rep(NA, length = nrow(Metadata))
SampleIDs <- as.data.frame(sapply(ls(pat="MetaStudy"),
function(x) {
temp <- eval(as.name(x))
names(temp) <- gsub("\\.", "", names(temp))
samples[match(temp$StanleyID, Metadata$StanleyID)] <- as.character(temp$Filename)
return(samples)
}, simplify = "array" ))
rm(samples)
rownames(SampleIDs) <- Metadata$StanleyID
names(SampleIDs) <- gsub("Meta", "", names(SampleIDs))
Metadata <- cbind(SampleIDs, Metadata)
Metadata <- Metadata[order(Metadata$Profile),]
Metadata %<>% melt(measure.vars = grep("Study", names(.)), variable.name="Study", value.name="Filename", na.rm=T)
Metadata$Filename <- sapply(Metadata$Filename, function(filename) list.files(paste0(name, "/data/"),
pattern=paste0("^",filename)))
Metadata$Filename <- sapply(Metadata$Filename, function(x) gsub(".cel", "", x, ignore.case = T))
Metadata$Series_sample_id <- Metadata$Filename
Metadata$Series_sample_id <- sapply(Metadata$Series_sample_id, function(x) strsplit(x, "\\.|_")[[1]][1])
names(Metadata)[grep("pmi", names(Metadata), ignore.case=TRUE)] <- "PMI"
names(Metadata)[grep("gender", names(Metadata), ignore.case=TRUE)] <- "Sex"
names(Metadata)[grep("ph", names(Metadata), ignore.case=TRUE)] <- "pH"
#Set up the region
Metadata$Region <- StanleyStudies$Region[match(sapply(Metadata$Study,
function(x) gsub("Study.[0-9]?", "", x)),
StanleyStudies$Investigator)]
#Set up the platform
Metadata$Platform <- StanleyStudies$ArrayType[match(sapply(Metadata$Study,
function(x) gsub("Study.[0-9]?", "", x)),
StanleyStudies$Investigator)]
Metadata$Platform <- sapply(Metadata$Platform, function(x) {
if(grepl("hgu133a", tolower(x))){
"GPL570"
} else if (grepl("hgu133p", tolower(x))){
"GPL96"
} else if (grepl("hgu95av2", tolower(x))){
"GPL8300"
} else {
NA
}
}, simplify=TRUE)
} else {
if(length(list.files(name, pattern="Metadata.tsv")) == 0){
if(length(list.files(path = paste0(StudyName,"/data"), pattern=".soft")) == 0){
softDown(StudyName, paste0(StudyName, "/data/",paste0(StudyName, ".soft")))
}
Metadata <- ReadSoft(paste0(name,"/data/", list.files(paste0(name,"/data/"), pattern=".soft")))
if(name == "GSE1138"){
names(Metadata)[grepl("stage.of.illness", names(Metadata))] <- "Stage_of_illness"
Metadata$Profile = sapply(Metadata$Stage_of_illness, function(x) strsplit(as.character(x), " ?- ?")[[1]][2])
Metadata$Stage_of_illness <- sapply(Metadata$Stage_of_illness, function(x) strsplit(as.character(x), " ?- ?")[[1]][1]) %>% factor
}
write.table(Metadata, paste0(name,"/Metadata.tsv"), sep="\t", row.names=FALSE)
}
Metadata <- read.table(paste0(name,"/Metadata.tsv"), sep="\t", header=T)
names(Metadata)[grep("pmi|mortem", names(Metadata), ignore.case=TRUE)] <- "PMI"
names(Metadata)[grep("ph$|tissue.ph", names(Metadata), ignore.case=TRUE)] <- "pH"
names(Metadata)[grep("^age$|age.*?death", names(Metadata), ignore.case=TRUE)] <- "Age"
names(Metadata)[grep("disease|Diagnosis|profile|phenotype", names(Metadata), ignore.case=TRUE)] <- "Profile"
names(Metadata)[grep("gender|sex", names(Metadata), ignore.case=TRUE)] <- "Sex"
names(Metadata)[grep("platform", names(Metadata), ignore.case = TRUE)] <- "Platform"
Metadata$Filename = Metadata$Series_sample_id
names(Metadata) <- sapply(names(Metadata), function(x) gsub(" ", "_", x))
}
if(groups & sum(grepl("disease|diagnosis|phenotype|profile", tolower(names(Metadata)))) == 0){
print("No group information, modify Metadata file")
} else if (groups & sum(grepl("disease|diagnosis|profile", tolower(names(Metadata)))) > 0){
# Harmonize profile names
Metadata$Profile <- sapply(Metadata$Profile, function(x) {
if(grepl("bipolar", tolower(x))){
"BP"
} else if (grepl("control|unaffected|normal", tolower(x))){
"Cont"
} else if (grepl("schizo|scz", tolower(x))){
"SCZ"
} else if (grepl("depres", tolower(x))){
"MD"
} else {
NA
}
}, simplify=TRUE) %>% factor
#remove MD subjects
Metadata %<>% filter(!is.na(Profile), Profile != "MD") %>% droplevels()
Metadata <- Metadata[order(Metadata$Profile),]
} else {
Metadata$Profile <- "Cont"
}
Metadata <- GetCommonName(Metadata,char=c("age", "sex", "PMI", "ph"))
OrgRegion <- grep("(tissue|region|brain.?region)", names(Metadata),ignore.case = TRUE, value = T)
if(length(OrgRegion) == 0){
browser()
print("No brain region specified, modify Metadata file")
} else if(length(OrgRegion) > 1) {
print(paste0("Multiple columns define region (", paste0(OrgRegion, collapse = ","), ") , modify Metadata file and OrgRegion object"))
browser()
}
Metadata$OrgRegion <- Metadata %>%
select_(.dots = OrgRegion) %>% unlist
Metadata %<>% mutate(NeuExpRegion = OrgRegion)
Metadata$NeuExpRegion <- sapply(Metadata$NeuExpRegion, function(x) {
if (grepl("cerebe",tolower(x))){
"Cerebellum"
} else if (grepl("cortex|pfc|dlpfc|frontalba|^ba|gyrus|^an?cc$|^an?cg$",tolower(x))){
"Cortex"
} else if (grepl("hippocampus|hip|hpc",tolower(x))){
"Hippocampus"
} else if (grepl("thalamus",tolower(x))) {
"Thalamus"
} else if (grepl("str",tolower(x))){
"Striatum"
} else if (grepl("putamen",tolower(x))){
"Putamen"
} else if (grepl("nigra",tolower(x))){
"SubstantiaNigra"
} else if (grepl("brain",tolower(x))){
"Cortex"
} else {
NA
}
}, simplify=TRUE) %>% factor
if(Metadata[[OrgRegion]] %>% unique %in% c("brain", "Brain")){
warning("No specific brain region specified, rigion was set to Cortex")
}
if(length(Metadata[[OrgRegion]] %>% unique) > length(Metadata$NeuExpRegion %>% unique)){
subCol = OrgRegion
}
#If there are replicates, change their names accordngly
repSamples <- sapply(unique(Metadata[[subCol]]), function(subdata){
subMeta <- Metadata %>% filter_(.dots = paste0(subCol, " =='", subdata, "'"))
RepSample <- subMeta$CommonName[!duplicated(subMeta$CommonName)]
DF <- subMeta %>% filter(CommonName %in% RepSample) %>% select(Series_sample_id, CommonName)
RepName <- sapply(as.character(unique(DF$CommonName)), function(SampName){
names <- subMeta %>% filter(CommonName == SampName) %>% select(Series_sample_id, CommonName)
cbind(as.character(names$Series_sample_id), paste0(unique(names$CommonName), "rep",seq(1:nrow(names))))
}, simplify=FALSE) %>% do.call(rbind, .) %>% data.frame()
DF$RepName <- RepName$X2[match(DF$Series_sample_id, RepName$X1)]
DF
}, simplify=FALSE)
names(repSamples) <- unique(Metadata[[subCol]])
Metadata$CommonName <- as.character(Metadata$CommonName)
for(region in names(repSamples)){
Metadata$CommonName[match(repSamples[[region]]$Series_sample_id,
Metadata$Series_sample_id)] <- as.character(repSamples[[region]]$RepName)
}
Metadata$CommonName <- as.factor(Metadata$CommonName)
Metadata[[name]] <- Metadata$Series_sample_id
return(Metadata)
}
ReadCell <- function(path, QC = 0, platform, CelFiles=list.files(paste0(name, "/data"))){ # QC =1 / 0, QC = 1 runs arrayQualityMetrics. Takes some time..
Anno_file <- GetAnnoFiles(platform)
if(platform %in% c("GPL96", "GPL97", "GPL570", "GPL1352", "GPL1261", "GPL390")){
source(paste0(GenScriptPath,"Read_and_annotate_CELaffy.R"), local = T)
#Add Gene symbol and annotation
aned <- add_symbols(ned, Anno_file)
names(aned) <- sapply(names(aned), function(x) strsplit(x, "\\.")[[1]][1])
#Identify scanDate
scanDate <- protocolData(affydata)$ScanDate
scanDate <- sapply(scanDate, function(x) strsplit(x, " |T")[[1]][1])
names(scanDate) <- rownames(pData(affydata))
names(scanDate) <- sapply(names(scanDate), function(x) strsplit(x, "\\.")[[1]][1])
} else if (platform %in% c("GPL6244", "GPL5175", "GPL5188")) {
source(paste0(GenScriptPath,"Read_and_annotate_CELoligo.R"), local = T)
#Add Gene symbol and annotation
aned <- add_symbols(exp_value, Anno_file)
names(aned) <- sapply(names(aned), function(x) strsplit(x, "\\.|_")[[1]][1])
#Identify scanDate
scanDate <- pData(protocolData(data))$dates %>% as.character
scanDate <- sapply(scanDate, function(x) strsplit(x, "T| ")[[1]][1])
names(scanDate) <- rownames(pData(protocolData(data)))
names(scanDate) <- sapply(names(scanDate), function(x) strsplit(x, "\\.|_")[[1]][1])
}
aned[,-c(1:3)] <- apply(aned[,-c(1:3)], 2,
function(x) as.numeric(as.character(x)))
study <-list("aned" = aned, "scanDate" = scanDate)
return(study)
}
makeSet <- function(data, meta, name=NULL){
expData <- data$aned
#Add probset names as rownames
if(is.null(rownames(expData))){
rownames(expData) <- expData$Probe
}
#Add the scan date to the meta file
meta$ScanDate <- data$scanDate[pmatch(meta$Series_sample_id, names(data$scanDate))]
studyName <- name
if(is.null(name)){
studyName <- readline("What is study name?")
}
index <- grep(studyName, names(meta), ignore.case=T) #get the relevan colomn for study sample names
meta <- meta[!is.na(meta[index]),]
names(meta) <- sapply(names(meta), function(x) strsplit(x, "\\.\\.")[[1]][1])
rownames(meta) <- as.character(meta$CommonName)
expr <- as.matrix(expData[,sapply(expData[1,], function(x) is.numeric(x))])
phenoData <- AnnotatedDataFrame(meta[match(colnames(expr), rownames(meta)),])
featureData <- AnnotatedDataFrame(expData[,c(1:3)])
eSetObj <- ExpressionSet(assayData=expr,
phenoData = phenoData, featureData=featureData )
return(eSetObj)
}
PreProcces <- function(eSet, study=NULL){
aned <- as.data.frame(cbind(pData(featureData(eSet)),exprs(eSet)))
Metadata <- pData(eSet)
source(paste0(GenScriptPath,"pre-proccess.R"), local=T)
output <- list(aned_high, aned_good, aned_low, MaxNoise,Z_scores_High,
exclude_samples_low, exclude_samples_high, exclude_samples, Metadata)
names(output) <- c("aned_high", "aned_good", "aned_low", "NoiseThreshold", "Z_scores_High",
"exclude_samples_low", "exclude_samples_high", "exclude_samples", "Metadata")
return(output)
}
datasGenerate <- function(genes, exp = "aned_good"){
datas <- lapply(sapply(names(studyFinal), function(s) studyFinal[[s]][[exp]],
simplify=FALSE),
function(x) subset(x, GeneSymbol %in% genes & Probe != "243712_at"))
names(datas) <- names(studyFinal)
return(datas)
}
HeatMapGen <- function(datas, Meta, path=NULL, save = 1){
out <- sapply(names(datas), function(x) {
if(nrow(datas[[x]]) > 1){
study <- x
x <- datas[[x]]
dummy <- data.frame("CommonName"=names(x)[sapply(x[1,], function(y) is.numeric(y))])
KmeanGen <- GeneSex(x, dummy )
KmeanGen$BioGender[KmeanGen$BioGender=="F"] <- "grey"; KmeanGen$BioGender[KmeanGen$BioGender=="M"] <- "black"
KmeanSex <- KmeanGen$BioGender
data <- t(x[sapply(x[1,], function(y) is.numeric(y))])
values <- data.frame(row.names=Meta$CommonName)
values[,1:ncol(data)] <- NA
values[match(rownames(data), rownames(values)),] <- data
MetaSex = as.character(Meta$Sex[match(rownames(data), Meta$CommonName)])
MetaSex[grepl("F|female|^wom", MetaSex, ignore.case=T)] <- "deeppink"
MetaSex[grepl("^M|^male|^man", MetaSex, ignore.case=T)] <- "darkslateblue"
Row_col = as.character(x$GeneSymbol)
AllSexGen <- data.frame(gene = unique(Row_col))
AllSexGen$sex <- sapply(unique(Row_col), function(gene){
if(grepl("KDM5D|RPS4Y1", gene)){
"(Male)"
} else if (grepl("XIST", gene)){
"(Female)"
} else{
NA
}
})
AllSexGen$color <- sapply(AllSexGen$gene, function(x){
switch(as.character(x),
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Row_col <- sapply(Row_col, function(x){
switch(x,
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Row_col <- t(as.matrix(Row_col))
Col_col <- cbind(MetaSex, KmeanSex)
colnames(Col_col) <- c("Metadata Gender", "Kmeans cluster")
rownames(Row_col) <- "Gene"
myPalette <- colorRampPalette(c("skyblue1", "#123766"))(99)
if(save==1){
pdf(paste0(path,study, "Gender.pdf"),width=20, height=15, useDingbats = FALSE)
cex1 = 2
cex2 = 2
cexCol = 2
cex.axis=3
} else if (save==2) {
cex1 = 0.8
cex2 = 1
cexCol=1
cex.axis=1.5
}
heatmap.2b(t(scale(data)),
dendrogram="none",Rowv="none", density.info="density",
ColSideColorsSize = 2, cexCol = cexCol,
col=myPalette, RowSideColors=Row_col,
ColSideColors = Col_col,
symbreaks=T, trace="none", symkey=T,
margins = c(10, 10), na.color="black",
key=T, keysize = 0.8, key.title = "", KeyValueName="Normalized expression",
na.rm=T, cexRow = 1.5 , cex.axis=cex.axis)
legend("left", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=apply(AllSexGen, 1, function(x){
paste(x[1:2], collapse="\n")
}),
fill=AllSexGen$color)
legend("top", xjust=0.5,legend=c("Male\n(Metadata)", "Female\n(Metadata)", "","Male\n(cluster)", "Female\n(cluster)"),
fill=c("darkslateblue", "deeppink", "white", "black", "grey"), bty="n",
border=FALSE, x.intersp=0.8, cex=cex2, horiz=T)
if (save==1){
dev.off()
}
} else {
print("Only one sex gene, better use a plot")
}
})
names(out) <- names(datas)
}
HeatMapGen2 <- function(datas, Meta, missmatched, path=NULL, save = 1){
if(length(names(datas)) >1){
Combined <- lapply(datas, function(x) {
rownames(x) <- x$Probe
data <- t(x[sapply(x[1,], function(y) is.numeric(y))])
values <- data.frame(row.names=Meta$CommonName)
values[,1:ncol(data)] <- NA
values[match(rownames(data), rownames(values)),] <- data
colnames(values) <- colnames(data)
values
})
Features <- lapply(datas, function(x) {
Probes <- x[sapply(x[1,], function(y) !is.numeric(y))]
Probes
})
AllData <- do.call(cbind, args=Combined)
#Add region/study to colnames in case it is not there already
if(!grepl("\\.", names(AllData)[1])){
names(AllData) <- paste(names(datas), names(AllData), sep=".")
}
#Remove samples that were excluded in all of the datasets
SampleRM <- apply(AllData, 1, complete.cases) %>% t %>% rowSums
SampleRM <- names(SampleRM)[SampleRM == 0]
AllData <- subset(AllData, !rownames(AllData) %in% SampleRM)
Meta <- subset(Meta, !CommonName %in% SampleRM)
AllData <- AllData[order(Meta$Sex),]
AllFeatures <- unique(do.call(rbind, args=Features))
MetaSex = as.character(Meta$Sex[match(rownames(AllData), Meta$CommonName)])
MetaSex[MetaSex == "F"] <- "deeppink" ; MetaSex[MetaSex == "M"] <- "darkslateblue"
Probe_col <- sapply(names(AllData), function(x) strsplit(as.character(x), "\\.")[[1]][2])
Probe_col <- as.character(AllFeatures$GeneSymbol[match(Probe_col, AllFeatures$Probe)])
AllSexGen <- data.frame(gene = unique(Probe_col))
AllSexGen$sex <- sapply(unique(Probe_col), function(gene){
if(grepl("KDM5D|RPS4Y1", gene)){
"(Male)"
} else if (grepl("XIST", gene)){
"(Female)"
} else{
NA
}
})
AllSexGen$color <- sapply(AllSexGen$gene, function(x){
switch(as.character(x),
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Probe_col <- sapply(Probe_col, function(x){
switch(x,
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Study <- sapply(colnames(AllData), function(x)
strsplit(x, "\\.")[[1]][1])
Study_col <- c("palegreen4", "black", "palevioletred4", "steelblue", "aquamarine3")[as.factor(Study)]
Row_col <- rbind(Study_col, Probe_col)
Col_col <- as.matrix(MetaSex)
names(AllData) <- sapply(names(AllData), function(x) strsplit(x, "\\.")[[1]][2])
colnames(Col_col) <- "Metadata Gender"
rownames(Row_col) <- c("Study", "Gene")
myPalette <- colorRampPalette(c("skyblue1", "#123766"))(99)
SampleCol <- rep("grey", nrow(AllData))
StudyNum <- unique(tolower(sapply(Study, function(x) strsplit(x, "\\.")[[1]][1])))
sapply(StudyNum, function(x){
MM <- sapply(missmatched[[grep(x, names(missmatched), ignore.case=T)]],
function(subject) grep(paste0(subject,"$"), rownames(AllData)), simplify=T)
if(length(MM)>0){
studies <- unique(tolower(Study))
SampleCol[MM] <<- unique(Study_col)[grep(x, studies)]
}
})
if(save==1){
pdf(paste0(path,"CombinedGenderHeatmap.pdf"),width=20, height=15, useDingbats = FALSE)
cex1 = 2
cex2 = 2
cexCol = 2
cex.axis=3
} else if (save==2) {
cex1 = 0.8
cex2 = 1
cexCol=1
cex.axis=1.5
}
heatmap.2b(t(scale(AllData)), density.info="density",
dendrogram="none",Rowv="none", Colv="none",
colCol=SampleCol, colRow = "black",
RowSideColorsSize = 1.5,
cexCol=cexCol, cexRow=2.5,
col=myPalette, RowSideColors=Row_col,
ColSideColors = Col_col,
symbreaks=T, trace="none",
margins = c(12, 12), na.color="grey80",
key=T, keysize = 0.8, key.title = "", KeyValueName="Normalized expression",
na.rm=F, cex.axis=cex.axis)
legend("left", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=apply(AllSexGen, 1, function(x){
paste(x[1:2], collapse="\n")
}),
fill=AllSexGen$color)
legend("bottomleft", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=unique(sapply(Study,
function(x) {
name <- strsplit(x, "\\.")[[1]][1]
name <- strsplit(name, "_")[[1]]
if(length(name) > 1){
paste0(name[1],"\n",name[2])
} else{
name
}
})),
fill=unique(Study_col))
legend("top", xjust=0.5, legend=c("Female", "Male"),
fill=c("deeppink", "darkslateblue"), bty="n",
border=FALSE, x.intersp=0.8, cex=cex2, horiz=T)
if (save==1){
dev.off()
}
} else {
print("Only one dataset")
}
}
PlotAllStudies <- function(Cells=CellType_genes, data, Meta=MetaConsort, remove = "Marker", main=NULL) {
Cells[[remove]] <- NULL #remove the non-relevant list elements
Data <- lapply(data, function(x) x$modified)
PC_combined <- list()
for(i in names(Cells)){
print(i)
PC1 <- lapply(Data, function(dat){
pc_1 <- data.frame(row.names=Meta$CommonName)
pc_1[,1] <- NA
pc_1[match(rownames(dat[[i]]$x), as.character(Meta$CommonName)),1] <- dat[[i]]$x[,1]
pc_1
})
all_PCA <- as.data.frame(PC1)
colnames(all_PCA) <- c("study1", "study3", "study5", "study7")
exclude <- apply(all_PCA, 1, function(x) sum(is.na(x)))
all_PCA <- all_PCA[!exclude %in% c(3,4),]
if(nrow(all_PCA) > 0){
all_PCA <- all_PCA[order(apply(all_PCA,1, function(x) median(x, na.rm=T))),]
stripchart(as.data.frame(t(all_PCA)),
main=paste(strsplit(i, "_Genes")[[1]][1], main),
vertical=T,
col="white",
xaxt="none",
las=2)
boxplot(t(all_PCA[grep("BP", rownames(all_PCA)),]),
col="brown4",
at=grep("BP", rownames(all_PCA)),
add=T, xaxt="none",
yaxt="none")
boxplot(t(all_PCA[grep("Cont", rownames(all_PCA)),]),
col="burlywood",
add=T,
at=grep("Cont", rownames(all_PCA)),
xaxt="none",
yaxt="none")
boxplot(t(all_PCA[grep("SCZ", rownames(all_PCA)),]),
col="chartreuse4",
at=grep("SCZ", rownames(all_PCA)),
xaxt="none",
yaxt="none",
add=T)
stripchart(as.data.frame(t(all_PCA)),
cex=0.7,
pch=16,
vertical=T,
xaxt="none",
yaxt="none",
add=T)
axis(side=1, at=grep("SCZ", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("SCZ", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="chartreuse4")
axis(side=1, at=grep("BP", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("BP", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="brown4")
axis(side=1, at=which(rownames(all_PCA) %in% BP_II),
labels=paste("***", rownames(all_PCA)[which(rownames(all_PCA) %in% BP_II)]),
cex.axis=0.6, las=2, col.axis="brown4")
axis(side=1, at=grep("Cont", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("Cont", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="burlywood")
legend("bottomright", legend=c("Control","Bipolar", "Schizophrenia"), fill=c("burlywood", "brown4", "chartreuse4"), cex=1.5)
PC_combined[[i]] <- all_PCA
}
}
return(PC_combined)
}
FindRep <- function(Metadata, char){
names(Metadata) <- tolower(names(Metadata))
char <- tolower(char)
char <- char[char %in% names(Metadata)]
AllChar <- Metadata %>% select_(.dots=char)
Metadata$CharVec <- apply(AllChar, 1, function(x) paste0(x, collapse="_"))
RepTable <- table(Metadata$CharVec)
Replicates <- Metadata %>% filter(CharVec %in% names(RepTable[RepTable > 1])) %>% arrange(CharVec)
Unique <- Metadata %>% filter(!CharVec %in% names(RepTable[RepTable > 1]))
return(list(Replic = Replicates,
Unique = Unique))
}
GetCommonName <- function(Metadata, char=char){
SameSub <- FindRep(Metadata=Metadata, char=char)
UniqueName <- rbind(SameSub$Replic[!duplicated(SameSub$Replic$CharVec),],
SameSub$Unique) %>% arrange(profile)
UniqueName$CommonName <- UniqueName$profile
sampleSum <- table(UniqueName$CommonName)
UniqueName$CommonName <- melt(sapply(names(sampleSum), function(x) {
paste(x, seq(1:sum(UniqueName$CommonName == x)), sep="_")
}))$value
Metadata$CharVec <- sapply(Metadata$Filename,
function(x) rbind(SameSub$Replic, SameSub$Unique) %>%
filter(filename == x) %>%
.$CharVec) %>% unlist
Metadata$CommonName <- UniqueName$CommonName[match(Metadata$CharVec, UniqueName$CharVec)]
return(Metadata)
}
GeneMGPcombined <- function(dataGenes, metaGenes, dataMGP, NameVarMGP="CommonName", GeneVar = "GeneSymbol", GeneList){
rownames(dataMGP) <- dataMGP[[NameVarMGP]] %>% as.character
GeneExp <- dataGenes %>% filter_(paste0(GeneVar, " %in% ", paste0("c(",paste0("'",GeneList,"'", collapse=","), ")")))
rownames(GeneExp) <- dataGenes[[GeneVar]][dataGenes[[GeneVar]] %in% GeneList] %>% as.character
rownames(GeneExp) <- paste0(rownames(GeneExp), "_gene")
GeneExp <- t(GeneExp[sapply(names(GeneExp), function(x) is.numeric(GeneExp[[x]]))])
dataCombined <- merge(dataMGP, GeneExp, by = "row.names")
return(dataCombined)
}
plotGeneMGPcor <- function(dataGenes, dataMGP, GeneList,
ListName = NULL,
grpVar = "Profile",CellVar=NULL,
CellExtend = "_Genes", CellName = NULL,
grpRef = "Cont",
groups=c("Cont", "BP", "SCZ")){
temp <- GeneMGPcombined(dataGenes = dataGenes, dataMGP = dataMGP , GeneList = GeneList)
GeneMGPcor <- sapply(groups, function(grp){
sapply(names(temp)[grepl("_gene", names(temp))], function(gene){
cor.test(formula(paste0("~", CellVar, "+", gene)), data=temp %>% filter_(paste0(grpVar,"=='", grp,"'")))$estimate
})
}) %>% data.frame
GeneMGPcor$GeneSymbol <- sapply(rownames(GeneMGPcor), function(x) strsplit(x, "_gene")[[1]][1])
GeneMGPcor %<>% arrange(desc(.[[grpRef]]))
GeneMGPcor <- melt(GeneMGPcor, id.vars="GeneSymbol", variable.name = grpVar, value.name="Cor")
GeneMGPcor$GeneSymbol <- factor(GeneMGPcor$GeneSymbol, levels = unique(GeneMGPcor$GeneSymbol))
grpColors = c("burlywood3", "cornflowerblue", "indianred4")
ggplot(GeneMGPcor, aes(x = GeneSymbol, y=Cor))+
theme_bw(base_size = 12) +
theme(axis.text.y = element_text(size = rel(0.8)),
axis.text.x = element_text(size = rel(0.8), angle=90),
panel.grid = element_blank()) +
labs(title = CellName, x = ListName, y=paste0("Correlation to ",CellName, " MGP"))+
scale_color_manual(values = grpColors, name="Group") +
geom_point(aes_string(color=grpVar), size=3, shape=16)
}
getCerebellumAstro <- function(data){
AstroGenes <- neuroExpressoAnalysis::mouseMarkerGenes$Cortex$Astrocyte
AstroHuman <- mouse2human(AstroGenes)$humanGene
BregmannGenes <- neuroExpressoAnalysis::mouseMarkerGenes$Cerebellum$Bergmann
BregmannHuman <- mouse2human(BregmannGenes)$humanGene
dataAstro <- data %>% filter(GeneSymbol %in% AstroHuman)
dataAstro$Mean <- apply(dataAstro[sapply(names(data), function(x) is.numeric(data[[x]]))], 1, mean)
dataAstro$Cell <- "Astrocyte"
dataBregmann <- data %>% filter(GeneSymbol %in% BregmannHuman)
dataBregmann$Mean <- apply(dataBregmann[sapply(names(data), function(x) is.numeric(data[[x]]))], 1, mean)
dataBregmann$Cell <- "BregmannGlia"
dataBoth <- rbind(dataAstro %>% select(GeneSymbol, Mean, Cell),
dataBregmann %>% select(GeneSymbol, Mean, Cell))
return(dataBoth)
}
PlotAllStudyOneGeneMGPcor <- function(exclGRP = "MD", gene, MGPname = "GabaPV_Genes"){
#get correlations
corStat <- sapply(ls(pat="^Cortex", .GlobalEnv), function(std){
study = eval(as.name(std))
exp <- study$aned_high[,!grepl(exclGRP , names(study$aned_high))]
meta <- study$Metadata %>% filter(Profile != exclGRP) %>% droplevels()
meta <- meta[match(names(exp)[-c(1:3)], meta$CommonName),]
exp <- exp %>% filter(GeneSymbol == gene) %>% unlist %>% .[-c(1:3)] %>% as.numeric
corStat = cor(exp, meta[[MGPname]], method="spearman", use="complete.obs") %>% round(digits=2)
paste0("rho = ", corStat)
}, simplify=FALSE)
#combine all studies
allStudyCor <- sapply(ls(pat="^Cortex", .GlobalEnv), function(std){
study = eval(as.name(std))
exp <- study$aned_high[,!grepl(exclGRP, names(study$aned_high))]
meta <- study$Metadata %>% filter(Profile != exclGRP) %>% droplevels() %>%
select_(.dots = c("CommonName", "Profile", MGPname))
exp <- exp %>% filter(GeneSymbol == gene) %>% .[,-c(1:3)] %>% t
colnames(exp) <- gene
data <- merge(meta, exp, by.x="CommonName", by.y="row.names")
}, simplify=FALSE) %>% do.call(rbind, .)
allStudyCor$Study <- gsub("Cortex|\\..*", "", rownames(allStudyCor)) %>% as.factor
#plot
p <- ggplot(allStudyCor, aes_string(gene, MGPname, color = "Profile"))
MGPname2 = gsub("_Genes", "", MGPname)
txtSize = 12
plot <- p + labs(x = paste0(gene, "expression(log2)"), y=paste0(MGPname2, "relative MGP"), fill="Profile")+
theme_grey(base_size = txtSize) +
theme(axis.text.y = element_text(size = rel(0.8)),
axis.text.x = element_text(size = rel(1.2)),
panel.grid = element_blank()) +
scale_color_manual(values = c("burlywood3", "cornflowerblue", "indianred4")) +
geom_point(pch=16) +
facet_wrap(~Study) +
annotate("text", label = unlist(corStat), size = 0.25*txtSize, x = 6.5, y = 0.9)
ggsave(paste0(gene, "_", MGPname2, "\\.pdf"), width = 8, height = 6, units = "in", plot = plot, dpi=300)
}
CreateMGPcellTypeDF <- function(cellData, cellMeta, MGPcorGenes, sampleRegex = "GSM",
cellTypeMetaVar = "PyramidalDeep",
idVars = c("Probe", "Gene.Symbol", "GeneNames"),title = NULL){
dataMGPcorGenes <- sapply(names(MGPcorGenes), function(cellType){
cellGenes <- na.omit(MGPcorGenes[[cellType]]) %>% as.vector
data <- cellData %>% filter(Gene.Symbol %in% cellGenes)
data
}, simplify = FALSE) %>% rbindlist(use.names = TRUE, idcol = "GeneType")
#Add information regarding whether a gene is a marker
dataMGPcorGenes$Marker <- NA
for(i in 1:nrow(dataMGPcorGenes)){
gene = dataMGPcorGenes$Gene.Symbol[i]
mgp = dataMGPcorGenes[i,] %>% .$GeneType %>% gsub("_MGP", "", .)
if(gene %in% markerGenes[[mgp]]){
dataMGPcorGenes$Marker[i] <- "YES"
} else {
dataMGPcorGenes$Marker[i] <- "NO"
}
}
#Normalize signals 0-1
dataMGPcorGenesNorm <- apply(dataMGPcorGenes %>% select(matches(sampleRegex)), 1, function(gene){
rescale(gene, c(0,1))
}) %>% t
dataMGPcorGenesNorm <- cbind(dataMGPcorGenes %>% select(-matches(sampleRegex)), dataMGPcorGenesNorm)
dataMGPcorGenesNorm$Gene.Symbol <- factor(dataMGPcorGenes$Gene.Symbol,
levels = unique(dataMGPcorGenes$Gene.Symbol))
dataMGPcorGenesMelt <- melt(dataMGPcorGenesNorm, id.vars = c("GeneType", idVars, "Marker"),
variable.name = "SampleName", value = "Exp")
dataMGPcorGenesMelt$CellType <- factor(cellMeta[[cellTypeMetaVar]][match(dataMGPcorGenesMelt$SampleName, cellMeta$sampleNameCol)],
levels = c("Astrocyte", "Microglia", "Oligo", "GabaPV", "GabaVIPReln",
"Pyramidal_S100a10", "PyramidalCorticoThalam"))
dataMGPcorGenesMelt %<>% filter(!CellType %in% c("GabaRelnCalb", "Pyramidal_Glt_25d2", "Pyramidal_Thy1",
"Microglia_activation_MGP", "Microglia_deactivation_MGP")) %>% droplevels()
dataMGPcorGenesMelt$GeneType <- factor(dataMGPcorGenesMelt$GeneType, levels = dataMGPcorGenesMelt$GeneType %>% unique())
return(dataMGPcorGenesMelt)
}
PlotMGPcellTypes <- function(data, title=NULL, ylab="Normalized expression",
size=12, width=0.1, save = FALSE, path = NULL,
CellColors = c("goldenrod1", "grey", "darkolivegreen4",
"firebrick1", "firebrick", "dodgerblue3",
"deepskyblue2")){
GeneNum <- group_by(data %>% filter(SampleName==levels(data$SampleName)[1]), GeneType) %>% summarise(n = n()) %>% data.frame
data$GeneType2 <- sapply(data$GeneType, function(genetype){
num = GeneNum %>% filter(GeneType == genetype) %>% .$n
paste0("Top correlated genes - ", genetype, " (", num, ")")
}) %>% factor(levels = unique(.))
plot <- ggplot(data, aes(CellType, Exp)) +
labs(title = title, y=ylab) +
theme_bw(base_size = size) +
theme(axis.text.x = element_text(angle = 40,hjust = 1),
panel.grid = element_blank()) +
scale_fill_manual(values = CellColors) +
geom_violin(alpha=0.8, aes(fill = CellType)) +
geom_boxplot(width=width, outlier.size = 0) +
facet_wrap(~GeneType2, nrow = length(levels(data$CellType)))
if(!save){
print(plot)
} else {
ggsave(ggsave(paste0(path, "/MGPcellTypes_", title, ".pdf"),
plot = plot, width = 12, height = 8, units = "in",
dpi=300))
}
}
|
/ProjectScripts/projectFunc.R
|
no_license
|
ltoker/CellularChangesPsychiatry
|
R
| false
| false
| 33,905
|
r
|
GetMeta <- function(StudyName, subCol, groups=TRUE){
if(grepl("Stanley", StudyName)){
collection = gsub("Stanley", "", StudyName)
StanleyStudies <- read.table("StanleyStudies.txt", sep="\t", header=T)
MetaAll <- read.table(paste0(path, "MetadataStanley.csv"), header = T, sep="\t")
names(MetaAll) <- sapply(names(MetaAll), function(x) gsub("\\.*", "", x))
Metadata <- subset(MetaAll, MetaAll$CollectionType == collection)
for(meta in list.files(path = StudyName, pattern = "Study.*?csv")){
assign(paste0("Meta", sapply(meta, function(x) strsplit(x, "\\.")[[1]][1])),
read.table(paste0(StudyName, "/", meta), header = T, sep="\t"))
}
#######################################################################################################
#Create data_frame matching the subject ids from different studies
samples <- rep(NA, length = nrow(Metadata))
SampleIDs <- as.data.frame(sapply(ls(pat="MetaStudy"),
function(x) {
temp <- eval(as.name(x))
names(temp) <- gsub("\\.", "", names(temp))
samples[match(temp$StanleyID, Metadata$StanleyID)] <- as.character(temp$Filename)
return(samples)
}, simplify = "array" ))
rm(samples)
rownames(SampleIDs) <- Metadata$StanleyID
names(SampleIDs) <- gsub("Meta", "", names(SampleIDs))
Metadata <- cbind(SampleIDs, Metadata)
Metadata <- Metadata[order(Metadata$Profile),]
Metadata %<>% melt(measure.vars = grep("Study", names(.)), variable.name="Study", value.name="Filename", na.rm=T)
Metadata$Filename <- sapply(Metadata$Filename, function(filename) list.files(paste0(name, "/data/"),
pattern=paste0("^",filename)))
Metadata$Filename <- sapply(Metadata$Filename, function(x) gsub(".cel", "", x, ignore.case = T))
Metadata$Series_sample_id <- Metadata$Filename
Metadata$Series_sample_id <- sapply(Metadata$Series_sample_id, function(x) strsplit(x, "\\.|_")[[1]][1])
names(Metadata)[grep("pmi", names(Metadata), ignore.case=TRUE)] <- "PMI"
names(Metadata)[grep("gender", names(Metadata), ignore.case=TRUE)] <- "Sex"
names(Metadata)[grep("ph", names(Metadata), ignore.case=TRUE)] <- "pH"
#Set up the region
Metadata$Region <- StanleyStudies$Region[match(sapply(Metadata$Study,
function(x) gsub("Study.[0-9]?", "", x)),
StanleyStudies$Investigator)]
#Set up the platform
Metadata$Platform <- StanleyStudies$ArrayType[match(sapply(Metadata$Study,
function(x) gsub("Study.[0-9]?", "", x)),
StanleyStudies$Investigator)]
Metadata$Platform <- sapply(Metadata$Platform, function(x) {
if(grepl("hgu133a", tolower(x))){
"GPL570"
} else if (grepl("hgu133p", tolower(x))){
"GPL96"
} else if (grepl("hgu95av2", tolower(x))){
"GPL8300"
} else {
NA
}
}, simplify=TRUE)
} else {
if(length(list.files(name, pattern="Metadata.tsv")) == 0){
if(length(list.files(path = paste0(StudyName,"/data"), pattern=".soft")) == 0){
softDown(StudyName, paste0(StudyName, "/data/",paste0(StudyName, ".soft")))
}
Metadata <- ReadSoft(paste0(name,"/data/", list.files(paste0(name,"/data/"), pattern=".soft")))
if(name == "GSE1138"){
names(Metadata)[grepl("stage.of.illness", names(Metadata))] <- "Stage_of_illness"
Metadata$Profile = sapply(Metadata$Stage_of_illness, function(x) strsplit(as.character(x), " ?- ?")[[1]][2])
Metadata$Stage_of_illness <- sapply(Metadata$Stage_of_illness, function(x) strsplit(as.character(x), " ?- ?")[[1]][1]) %>% factor
}
write.table(Metadata, paste0(name,"/Metadata.tsv"), sep="\t", row.names=FALSE)
}
Metadata <- read.table(paste0(name,"/Metadata.tsv"), sep="\t", header=T)
names(Metadata)[grep("pmi|mortem", names(Metadata), ignore.case=TRUE)] <- "PMI"
names(Metadata)[grep("ph$|tissue.ph", names(Metadata), ignore.case=TRUE)] <- "pH"
names(Metadata)[grep("^age$|age.*?death", names(Metadata), ignore.case=TRUE)] <- "Age"
names(Metadata)[grep("disease|Diagnosis|profile|phenotype", names(Metadata), ignore.case=TRUE)] <- "Profile"
names(Metadata)[grep("gender|sex", names(Metadata), ignore.case=TRUE)] <- "Sex"
names(Metadata)[grep("platform", names(Metadata), ignore.case = TRUE)] <- "Platform"
Metadata$Filename = Metadata$Series_sample_id
names(Metadata) <- sapply(names(Metadata), function(x) gsub(" ", "_", x))
}
if(groups & sum(grepl("disease|diagnosis|phenotype|profile", tolower(names(Metadata)))) == 0){
print("No group information, modify Metadata file")
} else if (groups & sum(grepl("disease|diagnosis|profile", tolower(names(Metadata)))) > 0){
# Harmonize profile names
Metadata$Profile <- sapply(Metadata$Profile, function(x) {
if(grepl("bipolar", tolower(x))){
"BP"
} else if (grepl("control|unaffected|normal", tolower(x))){
"Cont"
} else if (grepl("schizo|scz", tolower(x))){
"SCZ"
} else if (grepl("depres", tolower(x))){
"MD"
} else {
NA
}
}, simplify=TRUE) %>% factor
#remove MD subjects
Metadata %<>% filter(!is.na(Profile), Profile != "MD") %>% droplevels()
Metadata <- Metadata[order(Metadata$Profile),]
} else {
Metadata$Profile <- "Cont"
}
Metadata <- GetCommonName(Metadata,char=c("age", "sex", "PMI", "ph"))
OrgRegion <- grep("(tissue|region|brain.?region)", names(Metadata),ignore.case = TRUE, value = T)
if(length(OrgRegion) == 0){
browser()
print("No brain region specified, modify Metadata file")
} else if(length(OrgRegion) > 1) {
print(paste0("Multiple columns define region (", paste0(OrgRegion, collapse = ","), ") , modify Metadata file and OrgRegion object"))
browser()
}
Metadata$OrgRegion <- Metadata %>%
select_(.dots = OrgRegion) %>% unlist
Metadata %<>% mutate(NeuExpRegion = OrgRegion)
Metadata$NeuExpRegion <- sapply(Metadata$NeuExpRegion, function(x) {
if (grepl("cerebe",tolower(x))){
"Cerebellum"
} else if (grepl("cortex|pfc|dlpfc|frontalba|^ba|gyrus|^an?cc$|^an?cg$",tolower(x))){
"Cortex"
} else if (grepl("hippocampus|hip|hpc",tolower(x))){
"Hippocampus"
} else if (grepl("thalamus",tolower(x))) {
"Thalamus"
} else if (grepl("str",tolower(x))){
"Striatum"
} else if (grepl("putamen",tolower(x))){
"Putamen"
} else if (grepl("nigra",tolower(x))){
"SubstantiaNigra"
} else if (grepl("brain",tolower(x))){
"Cortex"
} else {
NA
}
}, simplify=TRUE) %>% factor
if(Metadata[[OrgRegion]] %>% unique %in% c("brain", "Brain")){
warning("No specific brain region specified, rigion was set to Cortex")
}
if(length(Metadata[[OrgRegion]] %>% unique) > length(Metadata$NeuExpRegion %>% unique)){
subCol = OrgRegion
}
#If there are replicates, change their names accordngly
repSamples <- sapply(unique(Metadata[[subCol]]), function(subdata){
subMeta <- Metadata %>% filter_(.dots = paste0(subCol, " =='", subdata, "'"))
RepSample <- subMeta$CommonName[!duplicated(subMeta$CommonName)]
DF <- subMeta %>% filter(CommonName %in% RepSample) %>% select(Series_sample_id, CommonName)
RepName <- sapply(as.character(unique(DF$CommonName)), function(SampName){
names <- subMeta %>% filter(CommonName == SampName) %>% select(Series_sample_id, CommonName)
cbind(as.character(names$Series_sample_id), paste0(unique(names$CommonName), "rep",seq(1:nrow(names))))
}, simplify=FALSE) %>% do.call(rbind, .) %>% data.frame()
DF$RepName <- RepName$X2[match(DF$Series_sample_id, RepName$X1)]
DF
}, simplify=FALSE)
names(repSamples) <- unique(Metadata[[subCol]])
Metadata$CommonName <- as.character(Metadata$CommonName)
for(region in names(repSamples)){
Metadata$CommonName[match(repSamples[[region]]$Series_sample_id,
Metadata$Series_sample_id)] <- as.character(repSamples[[region]]$RepName)
}
Metadata$CommonName <- as.factor(Metadata$CommonName)
Metadata[[name]] <- Metadata$Series_sample_id
return(Metadata)
}
ReadCell <- function(path, QC = 0, platform, CelFiles=list.files(paste0(name, "/data"))){ # QC =1 / 0, QC = 1 runs arrayQualityMetrics. Takes some time..
Anno_file <- GetAnnoFiles(platform)
if(platform %in% c("GPL96", "GPL97", "GPL570", "GPL1352", "GPL1261", "GPL390")){
source(paste0(GenScriptPath,"Read_and_annotate_CELaffy.R"), local = T)
#Add Gene symbol and annotation
aned <- add_symbols(ned, Anno_file)
names(aned) <- sapply(names(aned), function(x) strsplit(x, "\\.")[[1]][1])
#Identify scanDate
scanDate <- protocolData(affydata)$ScanDate
scanDate <- sapply(scanDate, function(x) strsplit(x, " |T")[[1]][1])
names(scanDate) <- rownames(pData(affydata))
names(scanDate) <- sapply(names(scanDate), function(x) strsplit(x, "\\.")[[1]][1])
} else if (platform %in% c("GPL6244", "GPL5175", "GPL5188")) {
source(paste0(GenScriptPath,"Read_and_annotate_CELoligo.R"), local = T)
#Add Gene symbol and annotation
aned <- add_symbols(exp_value, Anno_file)
names(aned) <- sapply(names(aned), function(x) strsplit(x, "\\.|_")[[1]][1])
#Identify scanDate
scanDate <- pData(protocolData(data))$dates %>% as.character
scanDate <- sapply(scanDate, function(x) strsplit(x, "T| ")[[1]][1])
names(scanDate) <- rownames(pData(protocolData(data)))
names(scanDate) <- sapply(names(scanDate), function(x) strsplit(x, "\\.|_")[[1]][1])
}
aned[,-c(1:3)] <- apply(aned[,-c(1:3)], 2,
function(x) as.numeric(as.character(x)))
study <-list("aned" = aned, "scanDate" = scanDate)
return(study)
}
makeSet <- function(data, meta, name=NULL){
expData <- data$aned
#Add probset names as rownames
if(is.null(rownames(expData))){
rownames(expData) <- expData$Probe
}
#Add the scan date to the meta file
meta$ScanDate <- data$scanDate[pmatch(meta$Series_sample_id, names(data$scanDate))]
studyName <- name
if(is.null(name)){
studyName <- readline("What is study name?")
}
index <- grep(studyName, names(meta), ignore.case=T) #get the relevan colomn for study sample names
meta <- meta[!is.na(meta[index]),]
names(meta) <- sapply(names(meta), function(x) strsplit(x, "\\.\\.")[[1]][1])
rownames(meta) <- as.character(meta$CommonName)
expr <- as.matrix(expData[,sapply(expData[1,], function(x) is.numeric(x))])
phenoData <- AnnotatedDataFrame(meta[match(colnames(expr), rownames(meta)),])
featureData <- AnnotatedDataFrame(expData[,c(1:3)])
eSetObj <- ExpressionSet(assayData=expr,
phenoData = phenoData, featureData=featureData )
return(eSetObj)
}
PreProcces <- function(eSet, study=NULL){
aned <- as.data.frame(cbind(pData(featureData(eSet)),exprs(eSet)))
Metadata <- pData(eSet)
source(paste0(GenScriptPath,"pre-proccess.R"), local=T)
output <- list(aned_high, aned_good, aned_low, MaxNoise,Z_scores_High,
exclude_samples_low, exclude_samples_high, exclude_samples, Metadata)
names(output) <- c("aned_high", "aned_good", "aned_low", "NoiseThreshold", "Z_scores_High",
"exclude_samples_low", "exclude_samples_high", "exclude_samples", "Metadata")
return(output)
}
datasGenerate <- function(genes, exp = "aned_good"){
datas <- lapply(sapply(names(studyFinal), function(s) studyFinal[[s]][[exp]],
simplify=FALSE),
function(x) subset(x, GeneSymbol %in% genes & Probe != "243712_at"))
names(datas) <- names(studyFinal)
return(datas)
}
HeatMapGen <- function(datas, Meta, path=NULL, save = 1){
out <- sapply(names(datas), function(x) {
if(nrow(datas[[x]]) > 1){
study <- x
x <- datas[[x]]
dummy <- data.frame("CommonName"=names(x)[sapply(x[1,], function(y) is.numeric(y))])
KmeanGen <- GeneSex(x, dummy )
KmeanGen$BioGender[KmeanGen$BioGender=="F"] <- "grey"; KmeanGen$BioGender[KmeanGen$BioGender=="M"] <- "black"
KmeanSex <- KmeanGen$BioGender
data <- t(x[sapply(x[1,], function(y) is.numeric(y))])
values <- data.frame(row.names=Meta$CommonName)
values[,1:ncol(data)] <- NA
values[match(rownames(data), rownames(values)),] <- data
MetaSex = as.character(Meta$Sex[match(rownames(data), Meta$CommonName)])
MetaSex[grepl("F|female|^wom", MetaSex, ignore.case=T)] <- "deeppink"
MetaSex[grepl("^M|^male|^man", MetaSex, ignore.case=T)] <- "darkslateblue"
Row_col = as.character(x$GeneSymbol)
AllSexGen <- data.frame(gene = unique(Row_col))
AllSexGen$sex <- sapply(unique(Row_col), function(gene){
if(grepl("KDM5D|RPS4Y1", gene)){
"(Male)"
} else if (grepl("XIST", gene)){
"(Female)"
} else{
NA
}
})
AllSexGen$color <- sapply(AllSexGen$gene, function(x){
switch(as.character(x),
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Row_col <- sapply(Row_col, function(x){
switch(x,
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Row_col <- t(as.matrix(Row_col))
Col_col <- cbind(MetaSex, KmeanSex)
colnames(Col_col) <- c("Metadata Gender", "Kmeans cluster")
rownames(Row_col) <- "Gene"
myPalette <- colorRampPalette(c("skyblue1", "#123766"))(99)
if(save==1){
pdf(paste0(path,study, "Gender.pdf"),width=20, height=15, useDingbats = FALSE)
cex1 = 2
cex2 = 2
cexCol = 2
cex.axis=3
} else if (save==2) {
cex1 = 0.8
cex2 = 1
cexCol=1
cex.axis=1.5
}
heatmap.2b(t(scale(data)),
dendrogram="none",Rowv="none", density.info="density",
ColSideColorsSize = 2, cexCol = cexCol,
col=myPalette, RowSideColors=Row_col,
ColSideColors = Col_col,
symbreaks=T, trace="none", symkey=T,
margins = c(10, 10), na.color="black",
key=T, keysize = 0.8, key.title = "", KeyValueName="Normalized expression",
na.rm=T, cexRow = 1.5 , cex.axis=cex.axis)
legend("left", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=apply(AllSexGen, 1, function(x){
paste(x[1:2], collapse="\n")
}),
fill=AllSexGen$color)
legend("top", xjust=0.5,legend=c("Male\n(Metadata)", "Female\n(Metadata)", "","Male\n(cluster)", "Female\n(cluster)"),
fill=c("darkslateblue", "deeppink", "white", "black", "grey"), bty="n",
border=FALSE, x.intersp=0.8, cex=cex2, horiz=T)
if (save==1){
dev.off()
}
} else {
print("Only one sex gene, better use a plot")
}
})
names(out) <- names(datas)
}
HeatMapGen2 <- function(datas, Meta, missmatched, path=NULL, save = 1){
if(length(names(datas)) >1){
Combined <- lapply(datas, function(x) {
rownames(x) <- x$Probe
data <- t(x[sapply(x[1,], function(y) is.numeric(y))])
values <- data.frame(row.names=Meta$CommonName)
values[,1:ncol(data)] <- NA
values[match(rownames(data), rownames(values)),] <- data
colnames(values) <- colnames(data)
values
})
Features <- lapply(datas, function(x) {
Probes <- x[sapply(x[1,], function(y) !is.numeric(y))]
Probes
})
AllData <- do.call(cbind, args=Combined)
#Add region/study to colnames in case it is not there already
if(!grepl("\\.", names(AllData)[1])){
names(AllData) <- paste(names(datas), names(AllData), sep=".")
}
#Remove samples that were excluded in all of the datasets
SampleRM <- apply(AllData, 1, complete.cases) %>% t %>% rowSums
SampleRM <- names(SampleRM)[SampleRM == 0]
AllData <- subset(AllData, !rownames(AllData) %in% SampleRM)
Meta <- subset(Meta, !CommonName %in% SampleRM)
AllData <- AllData[order(Meta$Sex),]
AllFeatures <- unique(do.call(rbind, args=Features))
MetaSex = as.character(Meta$Sex[match(rownames(AllData), Meta$CommonName)])
MetaSex[MetaSex == "F"] <- "deeppink" ; MetaSex[MetaSex == "M"] <- "darkslateblue"
Probe_col <- sapply(names(AllData), function(x) strsplit(as.character(x), "\\.")[[1]][2])
Probe_col <- as.character(AllFeatures$GeneSymbol[match(Probe_col, AllFeatures$Probe)])
AllSexGen <- data.frame(gene = unique(Probe_col))
AllSexGen$sex <- sapply(unique(Probe_col), function(gene){
if(grepl("KDM5D|RPS4Y1", gene)){
"(Male)"
} else if (grepl("XIST", gene)){
"(Female)"
} else{
NA
}
})
AllSexGen$color <- sapply(AllSexGen$gene, function(x){
switch(as.character(x),
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Probe_col <- sapply(Probe_col, function(x){
switch(x,
"XIST" = "darkred",
"KDM5D" = "blue",
"RPS4Y1|RPS4Y2" = "darkblue",
"RPS4Y1" = "darkblue")
})
Study <- sapply(colnames(AllData), function(x)
strsplit(x, "\\.")[[1]][1])
Study_col <- c("palegreen4", "black", "palevioletred4", "steelblue", "aquamarine3")[as.factor(Study)]
Row_col <- rbind(Study_col, Probe_col)
Col_col <- as.matrix(MetaSex)
names(AllData) <- sapply(names(AllData), function(x) strsplit(x, "\\.")[[1]][2])
colnames(Col_col) <- "Metadata Gender"
rownames(Row_col) <- c("Study", "Gene")
myPalette <- colorRampPalette(c("skyblue1", "#123766"))(99)
SampleCol <- rep("grey", nrow(AllData))
StudyNum <- unique(tolower(sapply(Study, function(x) strsplit(x, "\\.")[[1]][1])))
sapply(StudyNum, function(x){
MM <- sapply(missmatched[[grep(x, names(missmatched), ignore.case=T)]],
function(subject) grep(paste0(subject,"$"), rownames(AllData)), simplify=T)
if(length(MM)>0){
studies <- unique(tolower(Study))
SampleCol[MM] <<- unique(Study_col)[grep(x, studies)]
}
})
if(save==1){
pdf(paste0(path,"CombinedGenderHeatmap.pdf"),width=20, height=15, useDingbats = FALSE)
cex1 = 2
cex2 = 2
cexCol = 2
cex.axis=3
} else if (save==2) {
cex1 = 0.8
cex2 = 1
cexCol=1
cex.axis=1.5
}
heatmap.2b(t(scale(AllData)), density.info="density",
dendrogram="none",Rowv="none", Colv="none",
colCol=SampleCol, colRow = "black",
RowSideColorsSize = 1.5,
cexCol=cexCol, cexRow=2.5,
col=myPalette, RowSideColors=Row_col,
ColSideColors = Col_col,
symbreaks=T, trace="none",
margins = c(12, 12), na.color="grey80",
key=T, keysize = 0.8, key.title = "", KeyValueName="Normalized expression",
na.rm=F, cex.axis=cex.axis)
legend("left", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=apply(AllSexGen, 1, function(x){
paste(x[1:2], collapse="\n")
}),
fill=AllSexGen$color)
legend("bottomleft", y.intersp=2, x.intersp = 0.2, cex=cex1, bty="n",
legend=unique(sapply(Study,
function(x) {
name <- strsplit(x, "\\.")[[1]][1]
name <- strsplit(name, "_")[[1]]
if(length(name) > 1){
paste0(name[1],"\n",name[2])
} else{
name
}
})),
fill=unique(Study_col))
legend("top", xjust=0.5, legend=c("Female", "Male"),
fill=c("deeppink", "darkslateblue"), bty="n",
border=FALSE, x.intersp=0.8, cex=cex2, horiz=T)
if (save==1){
dev.off()
}
} else {
print("Only one dataset")
}
}
PlotAllStudies <- function(Cells=CellType_genes, data, Meta=MetaConsort, remove = "Marker", main=NULL) {
Cells[[remove]] <- NULL #remove the non-relevant list elements
Data <- lapply(data, function(x) x$modified)
PC_combined <- list()
for(i in names(Cells)){
print(i)
PC1 <- lapply(Data, function(dat){
pc_1 <- data.frame(row.names=Meta$CommonName)
pc_1[,1] <- NA
pc_1[match(rownames(dat[[i]]$x), as.character(Meta$CommonName)),1] <- dat[[i]]$x[,1]
pc_1
})
all_PCA <- as.data.frame(PC1)
colnames(all_PCA) <- c("study1", "study3", "study5", "study7")
exclude <- apply(all_PCA, 1, function(x) sum(is.na(x)))
all_PCA <- all_PCA[!exclude %in% c(3,4),]
if(nrow(all_PCA) > 0){
all_PCA <- all_PCA[order(apply(all_PCA,1, function(x) median(x, na.rm=T))),]
stripchart(as.data.frame(t(all_PCA)),
main=paste(strsplit(i, "_Genes")[[1]][1], main),
vertical=T,
col="white",
xaxt="none",
las=2)
boxplot(t(all_PCA[grep("BP", rownames(all_PCA)),]),
col="brown4",
at=grep("BP", rownames(all_PCA)),
add=T, xaxt="none",
yaxt="none")
boxplot(t(all_PCA[grep("Cont", rownames(all_PCA)),]),
col="burlywood",
add=T,
at=grep("Cont", rownames(all_PCA)),
xaxt="none",
yaxt="none")
boxplot(t(all_PCA[grep("SCZ", rownames(all_PCA)),]),
col="chartreuse4",
at=grep("SCZ", rownames(all_PCA)),
xaxt="none",
yaxt="none",
add=T)
stripchart(as.data.frame(t(all_PCA)),
cex=0.7,
pch=16,
vertical=T,
xaxt="none",
yaxt="none",
add=T)
axis(side=1, at=grep("SCZ", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("SCZ", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="chartreuse4")
axis(side=1, at=grep("BP", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("BP", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="brown4")
axis(side=1, at=which(rownames(all_PCA) %in% BP_II),
labels=paste("***", rownames(all_PCA)[which(rownames(all_PCA) %in% BP_II)]),
cex.axis=0.6, las=2, col.axis="brown4")
axis(side=1, at=grep("Cont", rownames(all_PCA)),
labels=rownames(all_PCA)[grep("Cont", rownames(all_PCA))],
cex.axis=0.6, las=2, col.axis="burlywood")
legend("bottomright", legend=c("Control","Bipolar", "Schizophrenia"), fill=c("burlywood", "brown4", "chartreuse4"), cex=1.5)
PC_combined[[i]] <- all_PCA
}
}
return(PC_combined)
}
FindRep <- function(Metadata, char){
names(Metadata) <- tolower(names(Metadata))
char <- tolower(char)
char <- char[char %in% names(Metadata)]
AllChar <- Metadata %>% select_(.dots=char)
Metadata$CharVec <- apply(AllChar, 1, function(x) paste0(x, collapse="_"))
RepTable <- table(Metadata$CharVec)
Replicates <- Metadata %>% filter(CharVec %in% names(RepTable[RepTable > 1])) %>% arrange(CharVec)
Unique <- Metadata %>% filter(!CharVec %in% names(RepTable[RepTable > 1]))
return(list(Replic = Replicates,
Unique = Unique))
}
GetCommonName <- function(Metadata, char=char){
SameSub <- FindRep(Metadata=Metadata, char=char)
UniqueName <- rbind(SameSub$Replic[!duplicated(SameSub$Replic$CharVec),],
SameSub$Unique) %>% arrange(profile)
UniqueName$CommonName <- UniqueName$profile
sampleSum <- table(UniqueName$CommonName)
UniqueName$CommonName <- melt(sapply(names(sampleSum), function(x) {
paste(x, seq(1:sum(UniqueName$CommonName == x)), sep="_")
}))$value
Metadata$CharVec <- sapply(Metadata$Filename,
function(x) rbind(SameSub$Replic, SameSub$Unique) %>%
filter(filename == x) %>%
.$CharVec) %>% unlist
Metadata$CommonName <- UniqueName$CommonName[match(Metadata$CharVec, UniqueName$CharVec)]
return(Metadata)
}
GeneMGPcombined <- function(dataGenes, metaGenes, dataMGP, NameVarMGP="CommonName", GeneVar = "GeneSymbol", GeneList){
rownames(dataMGP) <- dataMGP[[NameVarMGP]] %>% as.character
GeneExp <- dataGenes %>% filter_(paste0(GeneVar, " %in% ", paste0("c(",paste0("'",GeneList,"'", collapse=","), ")")))
rownames(GeneExp) <- dataGenes[[GeneVar]][dataGenes[[GeneVar]] %in% GeneList] %>% as.character
rownames(GeneExp) <- paste0(rownames(GeneExp), "_gene")
GeneExp <- t(GeneExp[sapply(names(GeneExp), function(x) is.numeric(GeneExp[[x]]))])
dataCombined <- merge(dataMGP, GeneExp, by = "row.names")
return(dataCombined)
}
plotGeneMGPcor <- function(dataGenes, dataMGP, GeneList,
ListName = NULL,
grpVar = "Profile",CellVar=NULL,
CellExtend = "_Genes", CellName = NULL,
grpRef = "Cont",
groups=c("Cont", "BP", "SCZ")){
temp <- GeneMGPcombined(dataGenes = dataGenes, dataMGP = dataMGP , GeneList = GeneList)
GeneMGPcor <- sapply(groups, function(grp){
sapply(names(temp)[grepl("_gene", names(temp))], function(gene){
cor.test(formula(paste0("~", CellVar, "+", gene)), data=temp %>% filter_(paste0(grpVar,"=='", grp,"'")))$estimate
})
}) %>% data.frame
GeneMGPcor$GeneSymbol <- sapply(rownames(GeneMGPcor), function(x) strsplit(x, "_gene")[[1]][1])
GeneMGPcor %<>% arrange(desc(.[[grpRef]]))
GeneMGPcor <- melt(GeneMGPcor, id.vars="GeneSymbol", variable.name = grpVar, value.name="Cor")
GeneMGPcor$GeneSymbol <- factor(GeneMGPcor$GeneSymbol, levels = unique(GeneMGPcor$GeneSymbol))
grpColors = c("burlywood3", "cornflowerblue", "indianred4")
ggplot(GeneMGPcor, aes(x = GeneSymbol, y=Cor))+
theme_bw(base_size = 12) +
theme(axis.text.y = element_text(size = rel(0.8)),
axis.text.x = element_text(size = rel(0.8), angle=90),
panel.grid = element_blank()) +
labs(title = CellName, x = ListName, y=paste0("Correlation to ",CellName, " MGP"))+
scale_color_manual(values = grpColors, name="Group") +
geom_point(aes_string(color=grpVar), size=3, shape=16)
}
getCerebellumAstro <- function(data){
AstroGenes <- neuroExpressoAnalysis::mouseMarkerGenes$Cortex$Astrocyte
AstroHuman <- mouse2human(AstroGenes)$humanGene
BregmannGenes <- neuroExpressoAnalysis::mouseMarkerGenes$Cerebellum$Bergmann
BregmannHuman <- mouse2human(BregmannGenes)$humanGene
dataAstro <- data %>% filter(GeneSymbol %in% AstroHuman)
dataAstro$Mean <- apply(dataAstro[sapply(names(data), function(x) is.numeric(data[[x]]))], 1, mean)
dataAstro$Cell <- "Astrocyte"
dataBregmann <- data %>% filter(GeneSymbol %in% BregmannHuman)
dataBregmann$Mean <- apply(dataBregmann[sapply(names(data), function(x) is.numeric(data[[x]]))], 1, mean)
dataBregmann$Cell <- "BregmannGlia"
dataBoth <- rbind(dataAstro %>% select(GeneSymbol, Mean, Cell),
dataBregmann %>% select(GeneSymbol, Mean, Cell))
return(dataBoth)
}
PlotAllStudyOneGeneMGPcor <- function(exclGRP = "MD", gene, MGPname = "GabaPV_Genes"){
#get correlations
corStat <- sapply(ls(pat="^Cortex", .GlobalEnv), function(std){
study = eval(as.name(std))
exp <- study$aned_high[,!grepl(exclGRP , names(study$aned_high))]
meta <- study$Metadata %>% filter(Profile != exclGRP) %>% droplevels()
meta <- meta[match(names(exp)[-c(1:3)], meta$CommonName),]
exp <- exp %>% filter(GeneSymbol == gene) %>% unlist %>% .[-c(1:3)] %>% as.numeric
corStat = cor(exp, meta[[MGPname]], method="spearman", use="complete.obs") %>% round(digits=2)
paste0("rho = ", corStat)
}, simplify=FALSE)
#combine all studies
allStudyCor <- sapply(ls(pat="^Cortex", .GlobalEnv), function(std){
study = eval(as.name(std))
exp <- study$aned_high[,!grepl(exclGRP, names(study$aned_high))]
meta <- study$Metadata %>% filter(Profile != exclGRP) %>% droplevels() %>%
select_(.dots = c("CommonName", "Profile", MGPname))
exp <- exp %>% filter(GeneSymbol == gene) %>% .[,-c(1:3)] %>% t
colnames(exp) <- gene
data <- merge(meta, exp, by.x="CommonName", by.y="row.names")
}, simplify=FALSE) %>% do.call(rbind, .)
allStudyCor$Study <- gsub("Cortex|\\..*", "", rownames(allStudyCor)) %>% as.factor
#plot
p <- ggplot(allStudyCor, aes_string(gene, MGPname, color = "Profile"))
MGPname2 = gsub("_Genes", "", MGPname)
txtSize = 12
plot <- p + labs(x = paste0(gene, "expression(log2)"), y=paste0(MGPname2, "relative MGP"), fill="Profile")+
theme_grey(base_size = txtSize) +
theme(axis.text.y = element_text(size = rel(0.8)),
axis.text.x = element_text(size = rel(1.2)),
panel.grid = element_blank()) +
scale_color_manual(values = c("burlywood3", "cornflowerblue", "indianred4")) +
geom_point(pch=16) +
facet_wrap(~Study) +
annotate("text", label = unlist(corStat), size = 0.25*txtSize, x = 6.5, y = 0.9)
ggsave(paste0(gene, "_", MGPname2, "\\.pdf"), width = 8, height = 6, units = "in", plot = plot, dpi=300)
}
CreateMGPcellTypeDF <- function(cellData, cellMeta, MGPcorGenes, sampleRegex = "GSM",
cellTypeMetaVar = "PyramidalDeep",
idVars = c("Probe", "Gene.Symbol", "GeneNames"),title = NULL){
dataMGPcorGenes <- sapply(names(MGPcorGenes), function(cellType){
cellGenes <- na.omit(MGPcorGenes[[cellType]]) %>% as.vector
data <- cellData %>% filter(Gene.Symbol %in% cellGenes)
data
}, simplify = FALSE) %>% rbindlist(use.names = TRUE, idcol = "GeneType")
#Add information regarding whether a gene is a marker
dataMGPcorGenes$Marker <- NA
for(i in 1:nrow(dataMGPcorGenes)){
gene = dataMGPcorGenes$Gene.Symbol[i]
mgp = dataMGPcorGenes[i,] %>% .$GeneType %>% gsub("_MGP", "", .)
if(gene %in% markerGenes[[mgp]]){
dataMGPcorGenes$Marker[i] <- "YES"
} else {
dataMGPcorGenes$Marker[i] <- "NO"
}
}
#Normalize signals 0-1
dataMGPcorGenesNorm <- apply(dataMGPcorGenes %>% select(matches(sampleRegex)), 1, function(gene){
rescale(gene, c(0,1))
}) %>% t
dataMGPcorGenesNorm <- cbind(dataMGPcorGenes %>% select(-matches(sampleRegex)), dataMGPcorGenesNorm)
dataMGPcorGenesNorm$Gene.Symbol <- factor(dataMGPcorGenes$Gene.Symbol,
levels = unique(dataMGPcorGenes$Gene.Symbol))
dataMGPcorGenesMelt <- melt(dataMGPcorGenesNorm, id.vars = c("GeneType", idVars, "Marker"),
variable.name = "SampleName", value = "Exp")
dataMGPcorGenesMelt$CellType <- factor(cellMeta[[cellTypeMetaVar]][match(dataMGPcorGenesMelt$SampleName, cellMeta$sampleNameCol)],
levels = c("Astrocyte", "Microglia", "Oligo", "GabaPV", "GabaVIPReln",
"Pyramidal_S100a10", "PyramidalCorticoThalam"))
dataMGPcorGenesMelt %<>% filter(!CellType %in% c("GabaRelnCalb", "Pyramidal_Glt_25d2", "Pyramidal_Thy1",
"Microglia_activation_MGP", "Microglia_deactivation_MGP")) %>% droplevels()
dataMGPcorGenesMelt$GeneType <- factor(dataMGPcorGenesMelt$GeneType, levels = dataMGPcorGenesMelt$GeneType %>% unique())
return(dataMGPcorGenesMelt)
}
PlotMGPcellTypes <- function(data, title=NULL, ylab="Normalized expression",
size=12, width=0.1, save = FALSE, path = NULL,
CellColors = c("goldenrod1", "grey", "darkolivegreen4",
"firebrick1", "firebrick", "dodgerblue3",
"deepskyblue2")){
GeneNum <- group_by(data %>% filter(SampleName==levels(data$SampleName)[1]), GeneType) %>% summarise(n = n()) %>% data.frame
data$GeneType2 <- sapply(data$GeneType, function(genetype){
num = GeneNum %>% filter(GeneType == genetype) %>% .$n
paste0("Top correlated genes - ", genetype, " (", num, ")")
}) %>% factor(levels = unique(.))
plot <- ggplot(data, aes(CellType, Exp)) +
labs(title = title, y=ylab) +
theme_bw(base_size = size) +
theme(axis.text.x = element_text(angle = 40,hjust = 1),
panel.grid = element_blank()) +
scale_fill_manual(values = CellColors) +
geom_violin(alpha=0.8, aes(fill = CellType)) +
geom_boxplot(width=width, outlier.size = 0) +
facet_wrap(~GeneType2, nrow = length(levels(data$CellType)))
if(!save){
print(plot)
} else {
ggsave(ggsave(paste0(path, "/MGPcellTypes_", title, ".pdf"),
plot = plot, width = 12, height = 8, units = "in",
dpi=300))
}
}
|
## File Name: rasch.mml2.R
## File Version: 7.4619
# Semiparametric Maximum Likelihood Estimation in the Rasch type Model
# item discrimination and guessing parameter can be fixed
rasch.mml2 <- function( dat, theta.k=seq(-6,6,len=21), group=NULL, weights=NULL,
constraints=NULL, glob.conv=10^(-5), parm.conv=10^(-4), mitermax=4,
mmliter=1000, progress=TRUE, fixed.a=rep(1,ncol(dat)),
fixed.c=rep(0,ncol(dat)), fixed.d=rep(1,ncol(dat)), fixed.K=rep(3,ncol(dat)),
b.init=NULL, est.a=NULL, est.b=NULL, est.c=NULL, est.d=NULL, min.b=-99,
max.b=99, min.a=-99, max.a=99, min.c=0, max.c=1, min.d=0, max.d=1,
prior.b=NULL, prior.a=NULL, prior.c=NULL, prior.d=NULL, est.K=NULL,
min.K=1, max.K=20, beta.init=NULL, min.beta=-8, pid=1:(nrow(dat)),
trait.weights=NULL, center.trait=TRUE, center.b=FALSE, alpha1=0, alpha2=0,
est.alpha=FALSE, equal.alpha=FALSE, designmatrix=NULL, alpha.conv=parm.conv,
numdiff.parm=0.00001, numdiff.alpha.parm=numdiff.parm,
distribution.trait="normal", Qmatrix=NULL,
variance.fixed=NULL, variance.init=NULL,
mu.fixed=cbind(seq(1,ncol(Qmatrix)),rep(0,ncol(Qmatrix))),
irtmodel="raschtype", npformula=NULL, npirt.monotone=TRUE,
use.freqpatt=is.null(group), delta.miss=0, est.delta=rep(NA,ncol(dat)),
... )
{
# specifications
conv1 <- parm.conv
nplausible=5
dat <- as.matrix(dat)
adaptive.quadrature <- FALSE
CALL <- match.call()
# a0 <- Sys.time()
# models
npirt <- ramsay.qm <- FALSE
I <- ncol(dat)
# if ( esttype=="pseudoll" ){
# if ( is.null( group) ){
# group <- rep(1,nrow(dat) )
# }
# }
# m1r <- FALSE
# if (irtmodel=="missing1r"){
# m1r <- TRUE
# irtmodel <- "missing1"
# }
if ( irtmodel=="ramsay.qm" ){
ramsay.qm <- TRUE
kG <- NULL
}
if ( irtmodel=="npirt" ){
npirt <- TRUE
I <- ncol(dat)
if ( ! is.null(npformula) ){
if ( length( npformula)==1 ){
npformula <- rep( npformula, I )
}
npformula0 <- npformula
npformula <- list( 1:I)
for (ii in 1:I){
npformula[[ii]] <- stats::as.formula( npformula0[ii] )
}
}
npmodel <- list(1:I)
}
D <- 1
if (irtmodel=="missing1"){
D <- 2
theta.k <- expand.grid( theta.k, theta.k )
dat[ dat==9 ] <- 2
dat.resp <- 1 - is.na(dat)
# init b parameters
b.init <- - stats::qlogis( colMeans( dat==1, na.rm=TRUE ) )
# init beta parameters
if ( is.null(beta.init) ){
beta <- stats::qlogis( colMeans( dat==2, na.rm=TRUE ) + 1E-3 )
} else {
beta <- beta.init
}
}
# multidimensional model
if ( ! is.null( Qmatrix ) ){
D <- ncol(Qmatrix)
if ( D==2){ theta.k <- expand.grid( theta.k, theta.k ) }
if ( D==3){ theta.k <- expand.grid( theta.k, theta.k, theta.k) }
if ( D==4){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k) }
if ( D==5){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==6){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==7){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==8){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==9){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
# Qmatrix <- sapply( 1:max(item.dim), FUN=function(dd){
# 1*(item.dim==dd) } )
}
if (is.data.frame(theta.k)){
theta.k <- as.matrix(theta.k)
}
if (D > 1){
if ( is.null(variance.fixed) & ( sum(est.a) > 0) ){
variance.fixed <- as.matrix( cbind( 1:D, 1:D, 1 ) )
}
}
Sigma.cov <- diag(D)
if ( ! is.null(variance.init) ){
Sigma.cov <- variance.init
}
mu <- rep(0,D)
# cat("114") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
# ramsay.qm <- FALSE
if ( ! ramsay.qm) { raschtype <- TRUE }
if (ramsay.qm | npirt ){
raschtype <- FALSE
# no alpha, a, c or d parameters can be estimated
est.alpha <- FALSE
est.a <- est.c <- est.d <- NULL
pow.qm <- 1 # This parameter is ignored in analyses
}
# computation time
s1 <- Sys.time()
if (est.alpha){
if (is.null(alpha1) ){ alpha1 <- 0 }
if (is.null(alpha2) ){ alpha2 <- 0 }
}
#*** some data checks
ag1 <- NULL
if( max( colMeans( is.na( dat ) ) )==1 ){
stop("Remove items which have no observations!")
}
if ( ! is.null(group) ){
t1 <- table(sort(group) )
group.orig <- group
group <- match( group.orig, sort(unique( group.orig)) )
ag1 <- stats::aggregate( group, list( group.orig), mean )
colnames(ag1) <- c("group", "groupindex" )
}
# center trait: if there exists constraints, then do not center
if ( is.null( colnames(dat) ) ){
colnames(dat) <- paste( "I", 1:ncol(dat), sep="")
}
if ( ! is.null( constraints ) ){
center.trait <- FALSE
if( ! is.numeric( constraints[,1] ) ){
constraints[,1] <- match( paste(constraints[,1]), colnames(dat) )
}
constraints <- na.omit(constraints)
constraints <- constraints[ constraints[,1] <=ncol(dat),, drop=FALSE]
}
if ( ! is.null( designmatrix) ){
if ( ncol(dat) !=nrow(designmatrix) ){
stop( "Row dimension of designmatrix should be equal to number of items")
}
}
# est.b parameters
if ( ! is.null(est.b) ){
# bG <- unique( est.b )
bG <- unique( setdiff( est.b,0 ))
if ( is.null( b.init) ){
b <- rep(0, I ) } else { b <- b.init }
designmatrix <- matrix( 0, ncol(dat), length(bG) )
for (bb in bG){
# bb <- bG[1]
# designmatrix[ which( est.b==bb ), bb ] <- 1
designmatrix[ which( est.b==bb ), match(bb,bG) ] <- 1
}
}
# set starting values for estimated c and d parameters
if ( ( sum(est.c) > 0 ) & is.null(fixed.c) ){
fixed.c[ est.c > 0 ] <- .10
}
if ( ( sum(est.d) > 0 ) & is.null(fixed.d) ){
fixed.d[ est.d > 0 ] <- .95
}
#****************************************************************************************
WLE <- FALSE
pure.rasch <- -9 # this parameter is only included for historical reasons of this program.
# specify weights
if ( is.null(weights) ){ weights <- rep( 1, nrow(dat) ) }
# display
if ( progress & ( npirt ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Nonparametric IRT Model (Rossi, Wang & Ramsay, 2002) \n")
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & ( npirt ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Missing Data Item Response Model (Mislevy & Wu, 1996) \n")
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & ( ramsay.qm ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Quotient Model (Ramsay, 1989) \n")
# if (normal.trait){ cat("Normal trait distribution \n") } else { cat("Nonparametric trait distribution \n") }
# if (ramsay.qm){ cat("Log Normal Distribution of Theta with Power of", pow.qm, "\n") }
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & (raschtype) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
if ( est.alpha ){
cat(paste( "Raschtype Model with generalized logistic link function: Estimation of alpha1 and alpha2 \n") )
} else {
cat(paste( "Raschtype Model with generalized logistic link function: alpha1=",alpha1, ", alpha2=", alpha2, " \n") )
}
if ( sum(est.c) > 0){ cat(paste( "Estimated guessing parameter groups \n") )} ## estimated guessing parameters
if ( sum(est.d) > 0){ cat(paste( "Estimated slipping parameter groups \n") )} ## estimated slipping parameters
cat("------------------------------------------------------------\n")
flush.console()
}
# revise guessing parameter (if necessary)
if ( !is.null(fixed.c) ){
# calculate item means
itemmean <- colMeans( dat, na.rm=TRUE )
if ( any( itemmean < fixed.c) ){
cat ( "revise fixed guessing estimates\n")
fixed.c[ itemmean < fixed.c] <- 0
}
}
# data preparations
if ( ! is.null(group) ){
use.freqpatt <- FALSE
}
if ( irtmodel !="missing1" ){
dp <- .data.prep( dat, weights=weights, use.freqpatt=use.freqpatt)
dat1 <- dp$dat1
dat2 <- dp$dat2
dat2.resp <- dp$dat2.resp
freq.patt <- dp$freq.patt
n <- dp$n
I <- dp$I
}
se.delta <- NULL
if ( irtmodel=="missing1" ){
# dat1 <- dp$dat # frequency patterns
dat1 <- as.data.frame( cbind( "P", weights ) )
for (ii in 1:I){
l1 <- dat[,ii]
l1 <- ifelse ( dat.resp[,ii]==0, 9, l1 )
dat1[, 1 ] <- paste0( dat1[,1], l1 )
}
colnames(dat1) <- c("pattern","Freq")
dat1 <- as.data.frame(dat1)
freq.patt <- dat1$pattern
dat1$Freq <- weights
dat1$mean <- rowMeans( dat==1 )
dat2 <- dat
dat2.resp <- dat.resp
n <- nrow(dat2)
I <- ncol(dat2)
}
#*** pseudolikelihood estimation?
fracresp <- "pseudoll"
pseudoll <- 0
i1 <- sum( ( dat2 > 0 ) * ( dat2 < 1), na.rm=TRUE )
if (i1 > 10E-10 ){
if ( fracresp=="pseudoll"){
pseudoll <- 1
}
if ( fracresp=="fuzzyll"){
pseudoll <- 2
}
if ( is.null(group) ){
group <- rep( 1, nrow(dat2) )
}
}
# probability weights at theta.k
if (D==1){
pi.k <- sirt_dnorm_discrete( x=theta.k )
}
if (D > 1){
pi.k <- sirt_dmvnorm_discrete( x=theta.k, mean=rep(0,D), sigma=Sigma.cov )
}
G <- 1
pi.k <- matrix( pi.k, nrow=length(pi.k), ncol=G )
# group calculations
if ( !is.null( group )){
G <- length( unique( group ) )
pi.k0 <- pi.k
pi.k <- matrix( 0, nrow=length(pi.k0), ncol=G)
for (gg in 1:G){
pi.k[,gg] <- pi.k0
}
}
sd.trait <- mean.trait <- rep(0,G)
# initial estimates for item difficulties
if ( is.null(b.init) & is.null(est.b) ){
b <- - stats::qlogis( colMeans( dat, na.rm=T ) )
if ( FALSE ){
# if ( ramsay.qm ){
b <- - log( ( fixed.K * colSums( dat, na.rm=TRUE ) ) /
( colSums( 1 - dat, na.rm=TRUE ) ) )
}
}
if ( (!is.null(b.init) ) & is.null(est.b) ){
b <- b.init
}
if ( G==1 ){ group <- rep(1, nrow(dat1)) }
# missing data indicators
ind.ii.list <- list(1:I)
for (ii in 1:I){
ind.ii.list[[ii]] <- which( dat2.resp[,ii]==1 )
}
mean.trait <- rep(0,G)
sd.trait <- rep(1,G)
# initial iteration index
iter <- 0
par.change <- dev.change <- 3
dev <- 99
apmax <- 0
maxalphachange <- 1
# display
disp <- "...........................................................\n"
# old_increment.d <- old_increment.c <- rep( .2, I )
if( sum( est.d ) > 0 ){
old_increment.d <- rep( .2, length( unique( est.d[ est.d > 0 ] ) ) )
}
if( sum( est.c ) > 0 ){
old_increment.c <- rep( .2, length( unique( est.c[ est.c > 0 ] ) ) )
}
old_increment_b <- rep( 2, I )
h <- numdiff.parm
# initialize standard errors
se.alpha <- se.K <- se.b <- se.a <- se.c <- se.d <- NULL
# Ramsay QM
# if ( irtmodel=="ramsay.qm" ){ normal.trait <- TRUE }
#****
# preparations npirt and npformula
ICC_model_matrix <- NULL
if ( ( npirt) & ( !is.null(npformula) ) ){
for (ii in 1:I){
dfr1 <- data.frame( "theta"=theta.k, "y"=1, "wgt"=NA )
dfr0 <- data.frame( "theta"=theta.k, "y"=0, "wgt"=NA )
dafr <- data.frame( rbind( dfr0, dfr1 ) )
theta <- dafr$theta.k
ICC_model_matrix[[ii]] <- stats::model.matrix( npformula[[ii]], dafr )
}
}
# inits theta.k
theta.k0 <- as.matrix(theta.k)
dat2 <- as.matrix(dat2)
dat2.resp <- as.matrix(dat2.resp)
# inits probs
if ( irtmodel=="missing1"){
TP <- nrow( theta.k)
CC <- 3
pjk <- array( 0, dim=c(I,CC,TP ) )
# if ( is.null(group) ){
group_ <- rep(0,nrow(dat2) )
# } else {
# group_ <- group
# }
raschtype <- FALSE
G <- length(unique(group))
}
#***** module missing1
# if ( ! is.null( est.delta ) ){
est_delta <- sum( 1-is.na(est.delta) ) > 0
if ( center.b & is.null(Qmatrix) ){
Qmatrix <- matrix( 1, nrow=I, ncol=1)
theta.k <- matrix( theta.k, ncol=1 )
center.trait <- FALSE
}
#-- indicators of estimated parameters
est_parameters <- list( a=sum(est.a)>0, c=sum(est.c)>0, d=sum(est.d)>0)
#******************************************************
#*************** MML Iteration Algorithm **************
while ( ( dev.change > glob.conv | par.change > conv1 | maxalphachange > alpha.conv ) & iter < mmliter ){
if (progress){
cat(disp)
cat("Iteration", iter+1, " ", paste( Sys.time() ), "\n" )
utils::flush.console()
}
zz0 <- Sys.time()
b0 <- b
if ( irtmodel=="missing1" ){ beta0 <- beta }
dev0 <- dev
#-------------- E-step --------------
if ( irtmodel=="missing1"){
e1 <- .e.step.missing1( dat2, dat2.resp, theta.k, b, beta, delta.miss, I, CC,
TP, group_, pi.k, pjk, weights )
n.ik <- e1$n.ik
e1$ll <- e1$LL
}
if ( ramsay.qm ){
e1 <- .e.step.ramsay( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.K, group, pow.qm=pow.qm, ind.ii.list )
}
if (raschtype & D==1){
e1 <- rasch_mml2_estep_raschtype( dat1=dat1, dat2=dat2, dat2.resp=dat2.resp,
theta.k=theta.k, pi.k=pi.k, I=I, n=n, b=b, fixed.a=fixed.a, fixed.c=fixed.c,
fixed.d=fixed.d, alpha1=alpha1, alpha2=alpha2, group=group, pseudoll=pseudoll )
}
if (raschtype & D>1){
e1 <- .e.step.raschtype.mirt( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.a, fixed.c, fixed.d, alpha1, alpha2, group,
mu, Sigma.cov, Qmatrix, pseudoll)
}
if (npirt){
if (iter==0){
pjk <- stats::plogis( outer( theta.k, b, "-" ) )
}
e1 <- .e.step.ramsay( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.K, group, pow.qm=pow.qm, ind.ii.list,
pjk=pjk )
}
n.k <- e1$n.k
n.jk <- e1$n.jk
r.jk <- e1$r.jk
pjk <- e1$pjk
f.qk.yi <- e1$f.qk.yi
f.yi.qk <- e1$f.yi.qk
dev <- -2*e1$ll
# cat("e step") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#-------------- M-step --------------
# Ramsay QM
if ( ramsay.qm ){
m1 <- .m.step.ramsay( theta.k, b, n.k, n, n.jk, r.jk, I,
conv1, constraints,
mitermax, pure.rasch, trait.weights, fixed.K,
designmatrix=designmatrix, group=group,
numdiff.parm=numdiff.parm, pow.qm=pow.qm )
se.b <- m1$se.b
}
# generalized Rasch type model
if (raschtype){
m1 <- rasch_mml2_mstep_raschtype( theta.k=theta.k, b=b, n.k=n.k, n=n, n.jk=n.jk,
r.jk=r.jk, pi.k=pi.k, I=I, conv1=conv1, constraints=constraints,
mitermax=mitermax, pure.rasch=pure.rasch, trait.weights=trait.weights,
fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d, alpha1=alpha1,
alpha2=alpha2, designmatrix=designmatrix, group=group,
numdiff.parm=numdiff.parm, Qmatrix=Qmatrix, old_increment=old_increment_b,
est.b=est.b, center.b=center.b, min.b=min.b, max.b=max.b,
prior.b=prior.b)
se.b <- m1$se.b
}
# nonparametric IRT model
if (npirt ){
pjk0 <- pjk
res <- .mstep.mml.npirt( pjk, r.jk, n.jk, theta.k,
npformula, npmodel, G, I, npirt.monotone,
ICC_model_matrix )
pjk <- res$pjk
npmodel <- res$npmodel
apmax <- max( pi.k[,1]*abs( pjk - pjk0)/.40 )
m1 <- list( "b"=b, "G"=G, "pi.k"=pi.k, "center"=FALSE )
}
# missing data IRT model
if ( irtmodel=="missing1" ){
m1 <- .mstep.mml.missing1( theta.k, n.ik, mitermax, conv1,
b, beta, delta.miss, pjk, numdiff.parm,
constraints, est.delta, min.beta=min.beta, est_delta )
b <- m1$b
se.b <- m1$se.b
beta <- m1$beta
se.beta <- m1$se.beta
delta.miss <- m1$delta.miss
se.delta <- m1$se.delta
m1$dev <- dev
a1beta <- max( abs( beta - beta0 ) )
}
# cat("m step") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#***************************************
# update mean and covariance in multidimensional models
if ( D > 1){
theta.k <- as.matrix(theta.k)
# delta.theta <- (theta.k[2,1] - theta.k[1,1])^D
delta.theta <- 1
hwt <- e1$f.qk.yi
hwt <- hwt / rowSums(hwt)
thetabar <- hwt%*%theta.k
# calculation of mu
mu <- colSums( thetabar * dat1$Freq ) / sum( dat1$Freq )
if ( ! is.null(mu.fixed ) ){
if (is.matrix(mu.fixed) ){
mu0 <- mu
mu[ mu.fixed[,1] ] <- mu.fixed[,2]
if ( ( sum( as.vector(mu.fixed[1,1:2]) - c(1,0))==0 ) &
( nrow(mu.fixed)==1 ) ){
mu[-1] <- -mu0[1] + mu[-1]
}
}
# if ( mu.fixed=="center"){
# mu <- mu - mean(mu)
# }
}
# calculation of the covariance matrix
theta.k.adj <- theta.k - matrix( mu, nrow=nrow(theta.k),
ncol=ncol(theta.k), byrow=TRUE)
for (dd1 in 1:D){
for (dd2 in dd1:D){
tk <- theta.k.adj[,dd1]*theta.k.adj[,dd2]
h1 <- dat1$Freq * ( hwt %*% tk ) * delta.theta
Sigma.cov[dd1,dd2] <- sum( h1 ) / sum( dat1$Freq )
if (dd1 < dd2 ){ Sigma.cov[dd2,dd1] <- Sigma.cov[dd1,dd2] }
}
}
if ( ! is.null(variance.fixed ) ){
Sigma.cov[ variance.fixed[,1:2,drop=FALSE] ] <- variance.fixed[,3]
Sigma.cov[ variance.fixed[,c(2,1),drop=FALSE] ] <- variance.fixed[,3]
}
diag(Sigma.cov) <- diag(Sigma.cov) + 10^(-10)
# if (m1r){
# d11 <- sqrt( Sigma.cov[1,1]*Sigma.cov[2,2] )- .001
# Sigma.cov[2,1] <- Sigma.cov[1,2] <- d11
# }
# adaptive estimation
if ( adaptive.quadrature ){
theta.k <- mu + theta.k0 %*% chol(Sigma.cov)
}
pi.k <- sirt_dmvnorm_discrete( theta.k, mean=mu, sigma=Sigma.cov, as_matrix=TRUE )
m1$pi.k <- pi.k
}
# end MIRT
#*****
b <- m1$b
# distribution
G <- m1$G
pi.k <- m1$pi.k
if (!is.null( trait.weights) ){
pi.k <- matrix( trait.weights, ncol=1 )
}
#****************************************************
# latent ability distribution
if (distribution.trait=="normal" & D==1){
delta.theta <- 1
# delta.theta <- theta.k[2] - theta.k[1]
# sd.trait <- mean.trait <- rep(0,G)
h <- .0001
for (gg in 1:G){
pi.k0 <- pi.k
f.yi.qk.gg <- e1$f.yi.qk[group==gg,]
dat1.gg <- dat1[group==gg,2]
X1 <- rep(1,nrow(f.yi.qk.gg) )
if ( gg > 1 | ( ! center.trait ) ){
#*********************************
# mean estimation
d.change <- .est.mean( dat1.gg, f.yi.qk.gg, X1, pi.k, pi.k0, gg,
mean.trait, sd.trait, theta.k, h)
mean.trait[gg] <- mean.trait[gg] + d.change
pi.k[,gg] <- sirt_dnorm_discrete( theta.k, mean=mean.trait[gg], sd=sd.trait[gg] )
}
if (center.trait){ mean.trait[1] <- 0 }
#*********************************
# SD estimation
if ( ( gg > 1 ) | ( sum(est.a)==0 ) ){
d.change <- .est.sd( dat1.gg, f.yi.qk.gg, X1, pi.k, pi.k0, gg,
mean.trait, sd.trait, theta.k, h )
sd.trait[gg] <- sd.trait[gg] + d.change
}
if ( ( ! is.null(est.a) ) | ( irtmodel=="npirt" ) ){
sd.trait[1] <- 1
}
pi.k[,gg] <- sirt_dnorm_discrete( theta.k, mean=mean.trait[gg], sd=sd.trait[gg] )
}
} # end normal distribution
#######################################
if (distribution.trait!="normal" & D==1){
for (gg in 1:G){
pik1 <- n.k[,gg] / sum(n.k[,gg] )
pik1 <- pik1 + 10e-10
lpik1 <- log( pik1 )
tk <- theta.k
if ( distribution.trait=="smooth2"){
formula1 <- lpik1 ~ tk + I(tk^2)
}
if ( distribution.trait=="smooth3"){
formula1 <- lpik1 ~ tk + I(tk^2) + I(tk^3)
}
if ( distribution.trait=="smooth4"){
formula1 <- lpik1 ~ tk + I(tk^2) + I(tk^3)+I(tk^4)
}
mod <- stats::lm( formula1, weights=pik1 )
pik2 <- exp( stats::fitted(mod))
pi.k[,gg] <- pik2 / sum(pik2)
if (center.trait & gg==1){
mmm1 <- stats::weighted.mean( theta.k, pik2 )
theta.k <- theta.k - mmm1
}
if ( ( ! is.null(est.a) ) | ( irtmodel=="npirt" ) ){
if (gg==1){
sd1 <- sqrt( sum( theta.k^2 * pi.k[,1] ) - sum( theta.k * pi.k[,1] )^2 )
theta.k <- theta.k / sd1
}
}
}
} # end non-normal distribution
# cat("trait distribution estimation") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#---- estimation of alpha, c and d parameters
alpha.change <- 0
maxalphachange <- 0
a1a <- a1b <- 0
a1K <- a1c <- 0
#--- estimation of a parameters
if ( sum(est.a) > 0 & raschtype ){
fixed.a0 <- fixed.a
aG <- setdiff(unique( est.a ), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.a, min_val=min.a,
max_val=max.a, iter=iter, old_increment=.3,
Qmatrix=Qmatrix, parameter="a", prior=prior.a)
fixed.a <- res$parm
se.a <- res$se
a1a <- max( abs( fixed.a - fixed.a0 ) )
}
#--- estimation of c parameter
if ( sum(est.c) > 0 & raschtype ){
fixed.c0 <- fixed.c
cG <- setdiff( unique(est.c), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.c, min_val=min.c,
max_val=max.c, iter=iter, old_increment=old_increment.c,
Qmatrix=Qmatrix, parameter="c", prior=prior.c)
fixed.c <- res$parm
se.c <- res$se
a1b <- max( abs( fixed.c - fixed.c0 ) )
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# estimation of d parameters
if ( sum( est.d ) > 0 & raschtype ){
fixed.d0 <- fixed.d
dG <- setdiff( unique( est.d ), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.d, min_val=min.d,
max_val=max.d, iter=iter, old_increment=old_increment.d,
Qmatrix=Qmatrix, parameter="d", prior=prior.d)
fixed.d <- res$parm
se.d <- res$se
a1c <- max( abs( fixed.d - fixed.d0 ) )
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# estimation of K parameters in Ramsay's quotient model
if ( sum( est.K ) > 0 & ramsay.qm ){
h <- numdiff.parm
fixed.K0 <- fixed.K
# identify different c parameter groups
kG <- setdiff( unique( est.K ), 0 )
res <- .mml.ramsay.est.K( theta.k, b, fixed.a, fixed.c, fixed.d,
fixed.K, pjk, alpha1, alpha2, h, G, I, r.jk, n.jk, est.K,
min.K, max.K, iter, pow.qm )
fixed.K <- res$fixed.K
se.K <- res$se.K
# convergence is indicated in metric guess.K=1 / ( fixed.K + 1 )
a1K <- max( abs( 1/(1+fixed.K) - 1/(1+fixed.K0) ) )
}
#***************************
# estimation of alpha
if ( est.alpha ){
alpha1.old <- alpha1
h <- numdiff.alpha.parm
#-- alpha1
calc_prob_args <- list( theta.k=theta.k, b=b, fixed.a=fixed.a, fixed.c=fixed.c,
fixed.d=fixed.d, alpha1=alpha1, alpha2=alpha2, Qmatrix=Qmatrix )
pjk.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha1 + h
calc_prob_args$alpha1 <- alpha1 + h
pjk1.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha1 - h
calc_prob_args$alpha1 <- alpha1 - h
pjk2.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- log likelihood
ll0a1 <- ll0 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk.M, n.jk=n.jk, r.jk=r.jk )
ll1a1 <- ll1 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk1.M, n.jk=n.jk, r.jk=r.jk )
ll2a1 <- ll2 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk2.M, n.jk=n.jk, r.jk=r.jk )
#--- derivatives
res <- rasch_mml2_difference_quotient( ll0=ll0, ll1=ll1, ll2=ll2, h=h )
d1 <- res$d1
d2 <- res$d2
# change in item difficulty
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha1 <- alpha1 + alpha.change
a1 <- abs(alpha.change )
se.alpha <- sqrt( 1 / abs(d2) )
#-- alpha2
calc_prob_args$alpha1 <- alpha1
pjk.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha2 + h
calc_prob_args$alpha2 <- alpha2 + h
pjk1.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha2 - h
calc_prob_args$alpha2 <- alpha2 - h
pjk2.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- log likelihood
ll0a1 <- ll0 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk.M, n.jk=n.jk, r.jk=r.jk )
ll1a1 <- ll1 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk1.M, n.jk=n.jk, r.jk=r.jk )
ll2a1 <- ll2 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk2.M, n.jk=n.jk, r.jk=r.jk )
#--- derivatives
res <- rasch_mml2_difference_quotient( ll0=ll0, ll1=ll1, ll2=ll2, h=h )
d1 <- res$d1
d2 <- res$d2
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha2 <- alpha2 + alpha.change
a2 <- abs(alpha.change)
maxalphachange <- max(a1, a2)
se.alpha <- c( se.alpha, sqrt( 1 / abs(d2) ) )
if (equal.alpha){
ll0 <- ll0a1 + ll0
ll1 <- ll1a1 + ll1
ll2 <- ll2a1 + ll2
d1 <- ( ll1 - ll2 ) / ( 2 * h ) # negative sign?
d2 <- ( ll1 + ll2 - 2*ll0 ) / h^2
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha2 <- alpha1 <- alpha1.old + alpha.change
a2 <- abs(alpha.change)
maxalphachange <- max(a2)
se.alpha <- sqrt( 1 / abs(d2) )
}
}
# cat("distribution / rest") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#
##***** output
# iteration index
dev.change <- abs( ( dev - dev0)/ dev0 )
par.change <- max( c( abs(b - b0 ), abs(alpha.change ), a1a, a1b,
a1c, a1K, apmax) )
if (irtmodel=="missing1"){
par.change <- max( c( par.change, a1beta ))
}
# display convergence
if (progress){
cat( paste( " Deviance=", round( dev, 4 ),
if (iter > 0 ){ " | Deviance change=" } else {""},
if( iter>0){round( - dev + dev0, 6 )} else { ""} ,"\n",sep=""))
if ( ! npirt ){
cat( paste0( " Maximum b parameter change", "=",
round( max(abs(b - b0 )), 6 ), " \n" ) )
}
if ( est.alpha ){
cat( paste0( " alpha1=", round(alpha1,3), " | alpha2=", round( alpha2,3),
" | max alpha change ", round( maxalphachange,7 ), "\n", sep=""))
}
if ( sum(est.a) > 0 ){
cat( paste0( " Maximum a parameter change", "=",
paste( round(a1a,6), collapse=" " ), "\n", sep=""))
}
if ( irtmodel=="missing1" ){
cat( paste0( " Maximum beta parameter change=",
paste0( round(a1beta,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.c) > 0 ){
cat( paste0( " Maximum c parameter change=",
paste( round(a1b,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.d) > 0 ){
cat( paste0( " Maximum d parameter change=",
paste( round(a1c,6), collapse=" " ), "\n", sep=""))
}
if ( npirt ){
cat( paste0( " Maximum weighted ICC change=",
paste( round(apmax,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.K) > 0 ){
cat( paste0( " Maximum K parameter change=",
paste( round(a1K,6), collapse=" " ), "\n", sep=""))
}
if ( D > 1 ){
cat(" Mean | " )
cat( round(as.vector(mu),3))
cat("\n Covariance Matrix | " )
cat( round(Sigma.cov[!upper.tri(Sigma.cov)],3))
cat("\n")
}
if ( irtmodel=="missing1" ){
cat(" Delta=" )
r1 <- sort( unique( as.vector(delta.miss) ) )
h1 <- ""
if ( length(r1) > 5 ){
r1 <- r1[1:5]
h1 <- " ... "
}
cat( round( r1,3))
cat( h1, "\n")
}
utils::flush.console()
}
iter <- iter + 1
}
####################################### end iterations #####################
############################################################################
##**************************************************************************
if ( irtmodel=="missing1"){
m1$center <- FALSE
G <- 1
}
if (npirt & ( ! is.null(npformula ) ) ){
item <- NULL
for (ii in 1:I){
item.ii <- data.frame( "item"=colnames(dat)[ii] )
smod.ii <- summary(npmodel[[ii]])
item.ii <- data.frame( cbind( item.ii, rownames(smod.ii$coef),
smod.ii$coef[,1:2] ) )
colnames(item.ii)[-1] <- c("par", "est", "se" )
rownames(item.ii) <- NULL
item <- rbind( item, item.ii )
}
}
#**********************************************
# standard error for item parameter
# ...
# calculations for information criteria
ic <- list( "deviance"=dev, "n"=nrow(dat) )
# number of parameters to be estimated
# these formulas hold when assuming normal distributions
# ic$traitpars <- ic$itempars <- NA
if ( distribution.trait=="normal"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 )
}
if ( distribution.trait=="smooth2"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 )
}
if ( distribution.trait=="smooth3"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 ) + G
}
if ( distribution.trait=="smooth4"){
ic[[ "np" ]]<- ( G - 1 ) + ncol(dat) + ( G - 0 ) + 2*G
}
# ic$itempars <- ic$traitpars - ncol(dat)
# subtract fixed constraints
if ( ! is.null( constraints) ){
ic$np <- ic$np - nrow(constraints)
# ic$itempars <- ic$itempars - nrow(constraints)
}
# subtract constraints due to designmatrix
if ( ! is.null( designmatrix ) ){
ic$np <- ic$np - ncol(dat) + ncol(designmatrix)
# ic$itempars <- ic$itempars - ncol(dat) + ncol(designmatrix)
}
# alpha estimation
ic$np <- ic$np + est.alpha * 2 - equal.alpha *1
# ic$itempars <- ic$itempars + est.alpha * 2 - equal.alpha *1
# guessing, slipping and discrimination parameter estimation
if ( sum(est.c) > 0 ){
ic$np <- ic$np + length(cG)
}
if ( sum(est.d) > 0 ){
ic$np <- ic$np + length(dG)
}
if ( sum(est.a) > 0 ){
ic$np <- ic$np + length(aG) - 1
}
if ( sum(est.K) > 0 ){
ic$np <- ic$np + length(kG)
}
if ( irtmodel=="missing1"){
ic$np <- ic$np + I
if ( est_delta ){
v1 <- unique( est.delta )
v1 <- v1[ ! is.na(v1) ]
ic$np <- ic$np + length(v1)
}
}
# parameters for multiple dimensions
if (D>1){
# mean vector
MM <- nrow(mu.fixed )
# if ( mu.fixed=="center" ){ MM <- 1 }
if ( is.null(mu.fixed) ){ MM <- 0 }
ic$np <- ic$np + length(mu) - MM
# covariance matrix
ic$np <- ic$np - 1*(sum(est.a)==0) + D*(D+1)/2 # SD's
if ( ! is.null(variance.fixed)){ ic$np <- ic$np - nrow( variance.fixed ) }
}
# item parameter for nonparametric models
if (npirt & ( ! is.null(npformula ) ) ){
ic$np <- nrow(item) }
if (npirt & ( is.null(npformula ) ) ){
ic$np <- prod( dim(pjk)) }
# AIC
ic$AIC <- dev + 2*ic$np
# BIC
ic$BIC <- dev + ( log(ic$n) )*ic$np
# CAIC (conistent AIC)
ic$CAIC <- dev + ( log(ic$n) + 1 )*ic$np
# corrected AIC
ic$AICc <- ic$AIC + 2*ic$np * ( ic$np + 1 ) / ( ic$n - ic$np - 1 )
# item statistics
if ( npirt & ( ! is.null(npformula ) ) ){ item0 <- item }
item <- data.frame( "item"=colnames(dat), "N"=colSums( weights*(1 - is.na(dat)) ),
"p"=colSums( weights*dat, na.rm=T) / colSums( weights*(1 - is.na(dat)) ),
"b"=b )
if ( ! is.null( constraints) ){
est.b <- 1:I
est.b[ constraints[,1] ] <- 0
item$est.b <- est.b
}
if ( npirt & ( ! is.null(npformula ) ) ){ item <- merge( x=item[,1:3], y=item0, by="item" ) }
if ( ! npirt ){
if (is.null(est.b)){ item$est.b=seq(1,I) } else { item$est.b <- est.b }
# fixed parameters
item$a <- fixed.a
if ( ! is.null( est.a) ){ item$est.a <- est.a } else { item$est.a <- rep(0,I) }
# include threshold
item$thresh <- item$a*item$b
# guessing parameter
item$c <- fixed.c
if ( ! is.null( est.c) ){ item$est.c <- est.c } else { item$est.c <- rep(0,I) }
item$d <- fixed.d
if ( ! is.null( est.d) ){ item$est.d <- est.d } else { item$est.d <- rep(0,I) }
if (m1$center){ if ( is.null(constraints) ){ # item[I,4] <- NA
}
else { item[ constraints[,1],4] <- NA } }
rownames(item) <- colnames(dat)
}
# latent ability distribution
skewness.trait <- sd.trait <- mean.trait <- rep(0,G)
if ( D==1){
for (gg in 1:G){
mean.trait[gg] <- weighted.mean( theta.k, pi.k[,gg] )
sd.trait[gg] <- sqrt( weighted.mean( ( theta.k - mean.trait[gg] )^2, pi.k[,gg] ) )
skewness.trait[gg] <- sum( ( theta.k - mean.trait[gg] )^3 * pi.k[,gg] ) / sd.trait[gg]^3
if (gg==1 & npirt ){ sd.trait[gg] <- 1 }
}
}
# center trait distribution
# if ( center.trait & G < 1 ){
# theta.k <- theta.k - mean.trait
# b <- b - mean.trait
# item$itemdiff <- b
# mean.trait <- 0
# }
trait.distr <- data.frame( "theta.k"=theta.k, "pi.k"=pi.k )
# item response pattern
if ( D==1 ){
if ( is.matrix(theta.k) ){
theta.k <- as.vector( theta.k)
}
ability.est <- data.frame( dat1, theta.k[ whichrowMaxs( f.qk.yi )$arg ] )
colnames(ability.est) <- c("pattern", "AbsFreq", "mean", "MAP" )
}
if (D>1){
ability.est <- data.frame( dat1, theta.k[ whichrowMaxs( f.qk.yi )$arg,] )
colnames(ability.est) <- c("pattern", "AbsFreq", "mean",
paste("MAP.Dim",1:D,sep="") )
}
if (D==1){
ability.est$EAP <- rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k ) )
ability.est$SE.EAP <- sqrt( rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)),
theta.k^2 ) ) - ability.est$EAP^2 )
}
if (D>1){
for (dd in 1:D){
ability.est[, paste("EAP.Dim",dd,sep="")] <-
rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k[,dd] ) )
ability.est[, paste("SE.EAP.Dim",dd,sep="")] <-
sqrt( rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k[,dd]^2 ) ) -
ability.est[,paste("EAP.Dim",dd,sep="")]^2 )
}
}
# posterior distribution
rownames(f.qk.yi) <- dat1[,1]
# merging ability estimates
# if ( ! is.null(group)){
if ( G > 1 ){
ability.est2 <- cbind( freq.patt, ability.est[,-1] )
} else {
if (irtmodel !="missing1" ){
ability.est2 <- merge( freq.patt, ability.est, 1, 1 )
} else {
ability.est2 <- ability.est
ability.est2$index <- seq(1, nrow(ability.est) )
}
}
ability.est2 <- ability.est2[ order(ability.est2$index), -c(3:5) ]
# EAP reliability estimate
reliability <- NULL
if (D==1){
reliability$eap.reliability <-
1 - mean(ability.est2$SE.EAP^2) / ( mean(ability.est2$SE.EAP^2) + var(ability.est2$EAP) )
}
if (D>1){
r1 <- rep(0,D)
for (dd in 1:D){
r1[dd] <- 1 - mean(ability.est2[,paste("SE.EAP.Dim",dd,sep="")]^2) /
( mean(ability.est2[,paste("SE.EAP.Dim",dd,sep="")]^2) +
stats::var(ability.est2[,paste("EAP.Dim",dd,sep="")]) )
}
if ( is.null( colnames(Qmatrix) ) ){
dimnamesPars <- paste( "Dim",1:D, sep="")
} else { dimnamesPars <- colnames(Qmatrix) }
names(r1) <- dimnamesPars
reliability$eap.reliability <- r1
names(mu) <- dimnamesPars
rownames(Sigma.cov) <- colnames(Sigma.cov) <- dimnamesPars
}
# include person ID
ability.est2$pid <- pid
# match ability patterns
if (irtmodel !="missing1" ){
ind1 <- match( ability.est2$freq.patt, ability.est$pattern )
ability.est <- ability.est[ ind1, ]
f.qk.yi <- f.qk.yi[ind1,]
f.yi.qk <- f.yi.qk[ind1,]
}
#*****
# item table for missing data IRT model
if ( irtmodel=="missing1"){
item$thresh <- item$est.a <- item$est.c <- item$est.d <- NULL
# missing proportion
item$pmiss <- colSums( dat2.resp * ( dat2==2 ), na.rm=TRUE) / colSums( dat2.resp, na.rm=TRUE)
item$beta <- beta
item$delta.miss <- delta.miss
}
# output fixed.a and fixed.c
if ( is.null(fixed.a ) & is.null(fixed.c) ){ fixed.a <- rep(1,I) ; fixed.c <- rep(0,I) }
# include item discrimination
if (D==1){
i1 <- item$emp.discrim <- round( item.discrim( dat, ability.est2$MAP ), 3 )
}
if (npirt){
i1 <- data.frame( "item"=colnames(dat), "emp.discrim"=i1 )
item$emp.discrim <- NULL
item <- merge( x=item, y=i1, by="item" )
}
if ( ! npirt ){
item$alpha1 <- alpha1
item$alpha2 <- alpha2
}
#---------------------------------------------------------
# item summary Ramsay QM
item2 <- NULL
if ( ramsay.qm){
if ( is.null(est.K) ){ est.K <- rep(0,I) }
item2 <- data.frame( "item"=item$item, "N"=item$N , "p"=item$p,
"K"=fixed.K, "est.K"=est.K,
"b"=exp(b), "log_b"=b, "est.b"=item$est.b,
"guess.K"=1/(fixed.K+1),
"emp.discrim"=item$emp.discrim )
}
##################################################
# item response probabilities
d1 <- dim(pjk)
if ( length(d1)==2 ){
rprobs <- array( 0, dim=c( d1[2], 2, d1[1] ) )
rprobs[,2,] <- t( pjk )
rprobs[,1,] <- 1 - t(pjk)
} else {
rprobs <- pjk
}
dimnames(rprobs)[[1]] <- colnames(dat)
#- collect information about priors
priors <- rasch_mml2_prior_information(prior.a, prior.b, prior.c, prior.d)
#--- result
res <- list( dat=dat, item=item, item2=item2, trait.distr=trait.distr,
mean.trait=mean.trait, sd.trait=sd.trait, skewness.trait=skewness.trait,
deviance=dev, pjk=pjk, rprobs=rprobs, person=ability.est2, pid=pid,
ability.est.pattern=ability.est, f.qk.yi=f.qk.yi, f.yi.qk=f.yi.qk,
pure.rasch=pure.rasch, fixed.a=fixed.a, fixed.c=fixed.c, G=G, alpha1=alpha1,
alpha2=alpha2, se.b=se.b, se.a=se.a, se.c=se.c, se.d=se.d, se.alpha=se.alpha,
se.K=se.K, se.delta=se.delta, iter=iter, reliability=reliability,
ramsay.qm=ramsay.qm, irtmodel=irtmodel, D=D, mu=mu, Sigma.cov=Sigma.cov,
est_parameters=est_parameters, priors=priors,
theta.k=theta.k, trait.weights=trait.weights, pi.k=pi.k, CALL=CALL )
class(res) <- "rasch.mml"
res$ic <- ic
res$est.c <- est.c
res$groupindex <- ag1
res$n.jk <- n.jk
res$r.jk <- r.jk
res$esttype <- "ll"
if ( pseudoll ){ res$esttype <- "pseudoll" }
# computation time
s2 <- Sys.time()
res$s1 <- s1
res$s2 <- s2
res$Rfcttype <- "rasch.mml2"
if (progress){
cat("------------------------------------------------------------\n")
cat("Start:", paste( s1), "\n")
cat("End:", paste(s2), "\n")
cat("Difference:", print(s2 -s1), "\n")
cat("------------------------------------------------------------\n")
}
return(res)
}
#---------------------------------------------------------------------------
|
/sirt/R/rasch.mml2.R
|
no_license
|
akhikolla/TestedPackages-NoIssues
|
R
| false
| false
| 51,960
|
r
|
## File Name: rasch.mml2.R
## File Version: 7.4619
# Semiparametric Maximum Likelihood Estimation in the Rasch type Model
# item discrimination and guessing parameter can be fixed
rasch.mml2 <- function( dat, theta.k=seq(-6,6,len=21), group=NULL, weights=NULL,
constraints=NULL, glob.conv=10^(-5), parm.conv=10^(-4), mitermax=4,
mmliter=1000, progress=TRUE, fixed.a=rep(1,ncol(dat)),
fixed.c=rep(0,ncol(dat)), fixed.d=rep(1,ncol(dat)), fixed.K=rep(3,ncol(dat)),
b.init=NULL, est.a=NULL, est.b=NULL, est.c=NULL, est.d=NULL, min.b=-99,
max.b=99, min.a=-99, max.a=99, min.c=0, max.c=1, min.d=0, max.d=1,
prior.b=NULL, prior.a=NULL, prior.c=NULL, prior.d=NULL, est.K=NULL,
min.K=1, max.K=20, beta.init=NULL, min.beta=-8, pid=1:(nrow(dat)),
trait.weights=NULL, center.trait=TRUE, center.b=FALSE, alpha1=0, alpha2=0,
est.alpha=FALSE, equal.alpha=FALSE, designmatrix=NULL, alpha.conv=parm.conv,
numdiff.parm=0.00001, numdiff.alpha.parm=numdiff.parm,
distribution.trait="normal", Qmatrix=NULL,
variance.fixed=NULL, variance.init=NULL,
mu.fixed=cbind(seq(1,ncol(Qmatrix)),rep(0,ncol(Qmatrix))),
irtmodel="raschtype", npformula=NULL, npirt.monotone=TRUE,
use.freqpatt=is.null(group), delta.miss=0, est.delta=rep(NA,ncol(dat)),
... )
{
# specifications
conv1 <- parm.conv
nplausible=5
dat <- as.matrix(dat)
adaptive.quadrature <- FALSE
CALL <- match.call()
# a0 <- Sys.time()
# models
npirt <- ramsay.qm <- FALSE
I <- ncol(dat)
# if ( esttype=="pseudoll" ){
# if ( is.null( group) ){
# group <- rep(1,nrow(dat) )
# }
# }
# m1r <- FALSE
# if (irtmodel=="missing1r"){
# m1r <- TRUE
# irtmodel <- "missing1"
# }
if ( irtmodel=="ramsay.qm" ){
ramsay.qm <- TRUE
kG <- NULL
}
if ( irtmodel=="npirt" ){
npirt <- TRUE
I <- ncol(dat)
if ( ! is.null(npformula) ){
if ( length( npformula)==1 ){
npformula <- rep( npformula, I )
}
npformula0 <- npformula
npformula <- list( 1:I)
for (ii in 1:I){
npformula[[ii]] <- stats::as.formula( npformula0[ii] )
}
}
npmodel <- list(1:I)
}
D <- 1
if (irtmodel=="missing1"){
D <- 2
theta.k <- expand.grid( theta.k, theta.k )
dat[ dat==9 ] <- 2
dat.resp <- 1 - is.na(dat)
# init b parameters
b.init <- - stats::qlogis( colMeans( dat==1, na.rm=TRUE ) )
# init beta parameters
if ( is.null(beta.init) ){
beta <- stats::qlogis( colMeans( dat==2, na.rm=TRUE ) + 1E-3 )
} else {
beta <- beta.init
}
}
# multidimensional model
if ( ! is.null( Qmatrix ) ){
D <- ncol(Qmatrix)
if ( D==2){ theta.k <- expand.grid( theta.k, theta.k ) }
if ( D==3){ theta.k <- expand.grid( theta.k, theta.k, theta.k) }
if ( D==4){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k) }
if ( D==5){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==6){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==7){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==8){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
if ( D==9){ theta.k <- expand.grid( theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k, theta.k) }
# Qmatrix <- sapply( 1:max(item.dim), FUN=function(dd){
# 1*(item.dim==dd) } )
}
if (is.data.frame(theta.k)){
theta.k <- as.matrix(theta.k)
}
if (D > 1){
if ( is.null(variance.fixed) & ( sum(est.a) > 0) ){
variance.fixed <- as.matrix( cbind( 1:D, 1:D, 1 ) )
}
}
Sigma.cov <- diag(D)
if ( ! is.null(variance.init) ){
Sigma.cov <- variance.init
}
mu <- rep(0,D)
# cat("114") ; a1 <- Sys.time(); print(a1-a0) ; a0 <- a1
# ramsay.qm <- FALSE
if ( ! ramsay.qm) { raschtype <- TRUE }
if (ramsay.qm | npirt ){
raschtype <- FALSE
# no alpha, a, c or d parameters can be estimated
est.alpha <- FALSE
est.a <- est.c <- est.d <- NULL
pow.qm <- 1 # This parameter is ignored in analyses
}
# computation time
s1 <- Sys.time()
if (est.alpha){
if (is.null(alpha1) ){ alpha1 <- 0 }
if (is.null(alpha2) ){ alpha2 <- 0 }
}
#*** some data checks
ag1 <- NULL
if( max( colMeans( is.na( dat ) ) )==1 ){
stop("Remove items which have no observations!")
}
if ( ! is.null(group) ){
t1 <- table(sort(group) )
group.orig <- group
group <- match( group.orig, sort(unique( group.orig)) )
ag1 <- stats::aggregate( group, list( group.orig), mean )
colnames(ag1) <- c("group", "groupindex" )
}
# center trait: if there exists constraints, then do not center
if ( is.null( colnames(dat) ) ){
colnames(dat) <- paste( "I", 1:ncol(dat), sep="")
}
if ( ! is.null( constraints ) ){
center.trait <- FALSE
if( ! is.numeric( constraints[,1] ) ){
constraints[,1] <- match( paste(constraints[,1]), colnames(dat) )
}
constraints <- na.omit(constraints)
constraints <- constraints[ constraints[,1] <=ncol(dat),, drop=FALSE]
}
if ( ! is.null( designmatrix) ){
if ( ncol(dat) !=nrow(designmatrix) ){
stop( "Row dimension of designmatrix should be equal to number of items")
}
}
# est.b parameters
if ( ! is.null(est.b) ){
# bG <- unique( est.b )
bG <- unique( setdiff( est.b,0 ))
if ( is.null( b.init) ){
b <- rep(0, I ) } else { b <- b.init }
designmatrix <- matrix( 0, ncol(dat), length(bG) )
for (bb in bG){
# bb <- bG[1]
# designmatrix[ which( est.b==bb ), bb ] <- 1
designmatrix[ which( est.b==bb ), match(bb,bG) ] <- 1
}
}
# set starting values for estimated c and d parameters
if ( ( sum(est.c) > 0 ) & is.null(fixed.c) ){
fixed.c[ est.c > 0 ] <- .10
}
if ( ( sum(est.d) > 0 ) & is.null(fixed.d) ){
fixed.d[ est.d > 0 ] <- .95
}
#****************************************************************************************
WLE <- FALSE
pure.rasch <- -9 # this parameter is only included for historical reasons of this program.
# specify weights
if ( is.null(weights) ){ weights <- rep( 1, nrow(dat) ) }
# display
if ( progress & ( npirt ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Nonparametric IRT Model (Rossi, Wang & Ramsay, 2002) \n")
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & ( npirt ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Missing Data Item Response Model (Mislevy & Wu, 1996) \n")
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & ( ramsay.qm ) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
cat("Quotient Model (Ramsay, 1989) \n")
# if (normal.trait){ cat("Normal trait distribution \n") } else { cat("Nonparametric trait distribution \n") }
# if (ramsay.qm){ cat("Log Normal Distribution of Theta with Power of", pow.qm, "\n") }
cat("------------------------------------------------------------\n")
flush.console()
}
if ( progress & (raschtype) ){
cat("------------------------------------------------------------\n")
cat("Semiparametric Marginal Maximum Likelihood Estimation \n")
if ( est.alpha ){
cat(paste( "Raschtype Model with generalized logistic link function: Estimation of alpha1 and alpha2 \n") )
} else {
cat(paste( "Raschtype Model with generalized logistic link function: alpha1=",alpha1, ", alpha2=", alpha2, " \n") )
}
if ( sum(est.c) > 0){ cat(paste( "Estimated guessing parameter groups \n") )} ## estimated guessing parameters
if ( sum(est.d) > 0){ cat(paste( "Estimated slipping parameter groups \n") )} ## estimated slipping parameters
cat("------------------------------------------------------------\n")
flush.console()
}
# revise guessing parameter (if necessary)
if ( !is.null(fixed.c) ){
# calculate item means
itemmean <- colMeans( dat, na.rm=TRUE )
if ( any( itemmean < fixed.c) ){
cat ( "revise fixed guessing estimates\n")
fixed.c[ itemmean < fixed.c] <- 0
}
}
# data preparations
if ( ! is.null(group) ){
use.freqpatt <- FALSE
}
if ( irtmodel !="missing1" ){
dp <- .data.prep( dat, weights=weights, use.freqpatt=use.freqpatt)
dat1 <- dp$dat1
dat2 <- dp$dat2
dat2.resp <- dp$dat2.resp
freq.patt <- dp$freq.patt
n <- dp$n
I <- dp$I
}
se.delta <- NULL
if ( irtmodel=="missing1" ){
# dat1 <- dp$dat # frequency patterns
dat1 <- as.data.frame( cbind( "P", weights ) )
for (ii in 1:I){
l1 <- dat[,ii]
l1 <- ifelse ( dat.resp[,ii]==0, 9, l1 )
dat1[, 1 ] <- paste0( dat1[,1], l1 )
}
colnames(dat1) <- c("pattern","Freq")
dat1 <- as.data.frame(dat1)
freq.patt <- dat1$pattern
dat1$Freq <- weights
dat1$mean <- rowMeans( dat==1 )
dat2 <- dat
dat2.resp <- dat.resp
n <- nrow(dat2)
I <- ncol(dat2)
}
#*** pseudolikelihood estimation?
fracresp <- "pseudoll"
pseudoll <- 0
i1 <- sum( ( dat2 > 0 ) * ( dat2 < 1), na.rm=TRUE )
if (i1 > 10E-10 ){
if ( fracresp=="pseudoll"){
pseudoll <- 1
}
if ( fracresp=="fuzzyll"){
pseudoll <- 2
}
if ( is.null(group) ){
group <- rep( 1, nrow(dat2) )
}
}
# probability weights at theta.k
if (D==1){
pi.k <- sirt_dnorm_discrete( x=theta.k )
}
if (D > 1){
pi.k <- sirt_dmvnorm_discrete( x=theta.k, mean=rep(0,D), sigma=Sigma.cov )
}
G <- 1
pi.k <- matrix( pi.k, nrow=length(pi.k), ncol=G )
# group calculations
if ( !is.null( group )){
G <- length( unique( group ) )
pi.k0 <- pi.k
pi.k <- matrix( 0, nrow=length(pi.k0), ncol=G)
for (gg in 1:G){
pi.k[,gg] <- pi.k0
}
}
sd.trait <- mean.trait <- rep(0,G)
# initial estimates for item difficulties
if ( is.null(b.init) & is.null(est.b) ){
b <- - stats::qlogis( colMeans( dat, na.rm=T ) )
if ( FALSE ){
# if ( ramsay.qm ){
b <- - log( ( fixed.K * colSums( dat, na.rm=TRUE ) ) /
( colSums( 1 - dat, na.rm=TRUE ) ) )
}
}
if ( (!is.null(b.init) ) & is.null(est.b) ){
b <- b.init
}
if ( G==1 ){ group <- rep(1, nrow(dat1)) }
# missing data indicators
ind.ii.list <- list(1:I)
for (ii in 1:I){
ind.ii.list[[ii]] <- which( dat2.resp[,ii]==1 )
}
mean.trait <- rep(0,G)
sd.trait <- rep(1,G)
# initial iteration index
iter <- 0
par.change <- dev.change <- 3
dev <- 99
apmax <- 0
maxalphachange <- 1
# display
disp <- "...........................................................\n"
# old_increment.d <- old_increment.c <- rep( .2, I )
if( sum( est.d ) > 0 ){
old_increment.d <- rep( .2, length( unique( est.d[ est.d > 0 ] ) ) )
}
if( sum( est.c ) > 0 ){
old_increment.c <- rep( .2, length( unique( est.c[ est.c > 0 ] ) ) )
}
old_increment_b <- rep( 2, I )
h <- numdiff.parm
# initialize standard errors
se.alpha <- se.K <- se.b <- se.a <- se.c <- se.d <- NULL
# Ramsay QM
# if ( irtmodel=="ramsay.qm" ){ normal.trait <- TRUE }
#****
# preparations npirt and npformula
ICC_model_matrix <- NULL
if ( ( npirt) & ( !is.null(npformula) ) ){
for (ii in 1:I){
dfr1 <- data.frame( "theta"=theta.k, "y"=1, "wgt"=NA )
dfr0 <- data.frame( "theta"=theta.k, "y"=0, "wgt"=NA )
dafr <- data.frame( rbind( dfr0, dfr1 ) )
theta <- dafr$theta.k
ICC_model_matrix[[ii]] <- stats::model.matrix( npformula[[ii]], dafr )
}
}
# inits theta.k
theta.k0 <- as.matrix(theta.k)
dat2 <- as.matrix(dat2)
dat2.resp <- as.matrix(dat2.resp)
# inits probs
if ( irtmodel=="missing1"){
TP <- nrow( theta.k)
CC <- 3
pjk <- array( 0, dim=c(I,CC,TP ) )
# if ( is.null(group) ){
group_ <- rep(0,nrow(dat2) )
# } else {
# group_ <- group
# }
raschtype <- FALSE
G <- length(unique(group))
}
#***** module missing1
# if ( ! is.null( est.delta ) ){
est_delta <- sum( 1-is.na(est.delta) ) > 0
if ( center.b & is.null(Qmatrix) ){
Qmatrix <- matrix( 1, nrow=I, ncol=1)
theta.k <- matrix( theta.k, ncol=1 )
center.trait <- FALSE
}
#-- indicators of estimated parameters
est_parameters <- list( a=sum(est.a)>0, c=sum(est.c)>0, d=sum(est.d)>0)
#******************************************************
#*************** MML Iteration Algorithm **************
while ( ( dev.change > glob.conv | par.change > conv1 | maxalphachange > alpha.conv ) & iter < mmliter ){
if (progress){
cat(disp)
cat("Iteration", iter+1, " ", paste( Sys.time() ), "\n" )
utils::flush.console()
}
zz0 <- Sys.time()
b0 <- b
if ( irtmodel=="missing1" ){ beta0 <- beta }
dev0 <- dev
#-------------- E-step --------------
if ( irtmodel=="missing1"){
e1 <- .e.step.missing1( dat2, dat2.resp, theta.k, b, beta, delta.miss, I, CC,
TP, group_, pi.k, pjk, weights )
n.ik <- e1$n.ik
e1$ll <- e1$LL
}
if ( ramsay.qm ){
e1 <- .e.step.ramsay( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.K, group, pow.qm=pow.qm, ind.ii.list )
}
if (raschtype & D==1){
e1 <- rasch_mml2_estep_raschtype( dat1=dat1, dat2=dat2, dat2.resp=dat2.resp,
theta.k=theta.k, pi.k=pi.k, I=I, n=n, b=b, fixed.a=fixed.a, fixed.c=fixed.c,
fixed.d=fixed.d, alpha1=alpha1, alpha2=alpha2, group=group, pseudoll=pseudoll )
}
if (raschtype & D>1){
e1 <- .e.step.raschtype.mirt( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.a, fixed.c, fixed.d, alpha1, alpha2, group,
mu, Sigma.cov, Qmatrix, pseudoll)
}
if (npirt){
if (iter==0){
pjk <- stats::plogis( outer( theta.k, b, "-" ) )
}
e1 <- .e.step.ramsay( dat1, dat2, dat2.resp, theta.k, pi.k, I, n, b,
fixed.K, group, pow.qm=pow.qm, ind.ii.list,
pjk=pjk )
}
n.k <- e1$n.k
n.jk <- e1$n.jk
r.jk <- e1$r.jk
pjk <- e1$pjk
f.qk.yi <- e1$f.qk.yi
f.yi.qk <- e1$f.yi.qk
dev <- -2*e1$ll
# cat("e step") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#-------------- M-step --------------
# Ramsay QM
if ( ramsay.qm ){
m1 <- .m.step.ramsay( theta.k, b, n.k, n, n.jk, r.jk, I,
conv1, constraints,
mitermax, pure.rasch, trait.weights, fixed.K,
designmatrix=designmatrix, group=group,
numdiff.parm=numdiff.parm, pow.qm=pow.qm )
se.b <- m1$se.b
}
# generalized Rasch type model
if (raschtype){
m1 <- rasch_mml2_mstep_raschtype( theta.k=theta.k, b=b, n.k=n.k, n=n, n.jk=n.jk,
r.jk=r.jk, pi.k=pi.k, I=I, conv1=conv1, constraints=constraints,
mitermax=mitermax, pure.rasch=pure.rasch, trait.weights=trait.weights,
fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d, alpha1=alpha1,
alpha2=alpha2, designmatrix=designmatrix, group=group,
numdiff.parm=numdiff.parm, Qmatrix=Qmatrix, old_increment=old_increment_b,
est.b=est.b, center.b=center.b, min.b=min.b, max.b=max.b,
prior.b=prior.b)
se.b <- m1$se.b
}
# nonparametric IRT model
if (npirt ){
pjk0 <- pjk
res <- .mstep.mml.npirt( pjk, r.jk, n.jk, theta.k,
npformula, npmodel, G, I, npirt.monotone,
ICC_model_matrix )
pjk <- res$pjk
npmodel <- res$npmodel
apmax <- max( pi.k[,1]*abs( pjk - pjk0)/.40 )
m1 <- list( "b"=b, "G"=G, "pi.k"=pi.k, "center"=FALSE )
}
# missing data IRT model
if ( irtmodel=="missing1" ){
m1 <- .mstep.mml.missing1( theta.k, n.ik, mitermax, conv1,
b, beta, delta.miss, pjk, numdiff.parm,
constraints, est.delta, min.beta=min.beta, est_delta )
b <- m1$b
se.b <- m1$se.b
beta <- m1$beta
se.beta <- m1$se.beta
delta.miss <- m1$delta.miss
se.delta <- m1$se.delta
m1$dev <- dev
a1beta <- max( abs( beta - beta0 ) )
}
# cat("m step") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#***************************************
# update mean and covariance in multidimensional models
if ( D > 1){
theta.k <- as.matrix(theta.k)
# delta.theta <- (theta.k[2,1] - theta.k[1,1])^D
delta.theta <- 1
hwt <- e1$f.qk.yi
hwt <- hwt / rowSums(hwt)
thetabar <- hwt%*%theta.k
# calculation of mu
mu <- colSums( thetabar * dat1$Freq ) / sum( dat1$Freq )
if ( ! is.null(mu.fixed ) ){
if (is.matrix(mu.fixed) ){
mu0 <- mu
mu[ mu.fixed[,1] ] <- mu.fixed[,2]
if ( ( sum( as.vector(mu.fixed[1,1:2]) - c(1,0))==0 ) &
( nrow(mu.fixed)==1 ) ){
mu[-1] <- -mu0[1] + mu[-1]
}
}
# if ( mu.fixed=="center"){
# mu <- mu - mean(mu)
# }
}
# calculation of the covariance matrix
theta.k.adj <- theta.k - matrix( mu, nrow=nrow(theta.k),
ncol=ncol(theta.k), byrow=TRUE)
for (dd1 in 1:D){
for (dd2 in dd1:D){
tk <- theta.k.adj[,dd1]*theta.k.adj[,dd2]
h1 <- dat1$Freq * ( hwt %*% tk ) * delta.theta
Sigma.cov[dd1,dd2] <- sum( h1 ) / sum( dat1$Freq )
if (dd1 < dd2 ){ Sigma.cov[dd2,dd1] <- Sigma.cov[dd1,dd2] }
}
}
if ( ! is.null(variance.fixed ) ){
Sigma.cov[ variance.fixed[,1:2,drop=FALSE] ] <- variance.fixed[,3]
Sigma.cov[ variance.fixed[,c(2,1),drop=FALSE] ] <- variance.fixed[,3]
}
diag(Sigma.cov) <- diag(Sigma.cov) + 10^(-10)
# if (m1r){
# d11 <- sqrt( Sigma.cov[1,1]*Sigma.cov[2,2] )- .001
# Sigma.cov[2,1] <- Sigma.cov[1,2] <- d11
# }
# adaptive estimation
if ( adaptive.quadrature ){
theta.k <- mu + theta.k0 %*% chol(Sigma.cov)
}
pi.k <- sirt_dmvnorm_discrete( theta.k, mean=mu, sigma=Sigma.cov, as_matrix=TRUE )
m1$pi.k <- pi.k
}
# end MIRT
#*****
b <- m1$b
# distribution
G <- m1$G
pi.k <- m1$pi.k
if (!is.null( trait.weights) ){
pi.k <- matrix( trait.weights, ncol=1 )
}
#****************************************************
# latent ability distribution
if (distribution.trait=="normal" & D==1){
delta.theta <- 1
# delta.theta <- theta.k[2] - theta.k[1]
# sd.trait <- mean.trait <- rep(0,G)
h <- .0001
for (gg in 1:G){
pi.k0 <- pi.k
f.yi.qk.gg <- e1$f.yi.qk[group==gg,]
dat1.gg <- dat1[group==gg,2]
X1 <- rep(1,nrow(f.yi.qk.gg) )
if ( gg > 1 | ( ! center.trait ) ){
#*********************************
# mean estimation
d.change <- .est.mean( dat1.gg, f.yi.qk.gg, X1, pi.k, pi.k0, gg,
mean.trait, sd.trait, theta.k, h)
mean.trait[gg] <- mean.trait[gg] + d.change
pi.k[,gg] <- sirt_dnorm_discrete( theta.k, mean=mean.trait[gg], sd=sd.trait[gg] )
}
if (center.trait){ mean.trait[1] <- 0 }
#*********************************
# SD estimation
if ( ( gg > 1 ) | ( sum(est.a)==0 ) ){
d.change <- .est.sd( dat1.gg, f.yi.qk.gg, X1, pi.k, pi.k0, gg,
mean.trait, sd.trait, theta.k, h )
sd.trait[gg] <- sd.trait[gg] + d.change
}
if ( ( ! is.null(est.a) ) | ( irtmodel=="npirt" ) ){
sd.trait[1] <- 1
}
pi.k[,gg] <- sirt_dnorm_discrete( theta.k, mean=mean.trait[gg], sd=sd.trait[gg] )
}
} # end normal distribution
#######################################
if (distribution.trait!="normal" & D==1){
for (gg in 1:G){
pik1 <- n.k[,gg] / sum(n.k[,gg] )
pik1 <- pik1 + 10e-10
lpik1 <- log( pik1 )
tk <- theta.k
if ( distribution.trait=="smooth2"){
formula1 <- lpik1 ~ tk + I(tk^2)
}
if ( distribution.trait=="smooth3"){
formula1 <- lpik1 ~ tk + I(tk^2) + I(tk^3)
}
if ( distribution.trait=="smooth4"){
formula1 <- lpik1 ~ tk + I(tk^2) + I(tk^3)+I(tk^4)
}
mod <- stats::lm( formula1, weights=pik1 )
pik2 <- exp( stats::fitted(mod))
pi.k[,gg] <- pik2 / sum(pik2)
if (center.trait & gg==1){
mmm1 <- stats::weighted.mean( theta.k, pik2 )
theta.k <- theta.k - mmm1
}
if ( ( ! is.null(est.a) ) | ( irtmodel=="npirt" ) ){
if (gg==1){
sd1 <- sqrt( sum( theta.k^2 * pi.k[,1] ) - sum( theta.k * pi.k[,1] )^2 )
theta.k <- theta.k / sd1
}
}
}
} # end non-normal distribution
# cat("trait distribution estimation") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#---- estimation of alpha, c and d parameters
alpha.change <- 0
maxalphachange <- 0
a1a <- a1b <- 0
a1K <- a1c <- 0
#--- estimation of a parameters
if ( sum(est.a) > 0 & raschtype ){
fixed.a0 <- fixed.a
aG <- setdiff(unique( est.a ), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.a, min_val=min.a,
max_val=max.a, iter=iter, old_increment=.3,
Qmatrix=Qmatrix, parameter="a", prior=prior.a)
fixed.a <- res$parm
se.a <- res$se
a1a <- max( abs( fixed.a - fixed.a0 ) )
}
#--- estimation of c parameter
if ( sum(est.c) > 0 & raschtype ){
fixed.c0 <- fixed.c
cG <- setdiff( unique(est.c), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.c, min_val=min.c,
max_val=max.c, iter=iter, old_increment=old_increment.c,
Qmatrix=Qmatrix, parameter="c", prior=prior.c)
fixed.c <- res$parm
se.c <- res$se
a1b <- max( abs( fixed.c - fixed.c0 ) )
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# estimation of d parameters
if ( sum( est.d ) > 0 & raschtype ){
fixed.d0 <- fixed.d
dG <- setdiff( unique( est.d ), 0 )
res <- rasch_mml2_raschtype_mstep_parameter_group( theta.k=theta.k,
b=b, fixed.a=fixed.a, fixed.c=fixed.c, fixed.d=fixed.d,
pjk=pjk, alpha1=alpha1, alpha2=alpha2, h=numdiff.parm, G=G, I=I,
r.jk=r.jk, n.jk=n.jk, est_val=est.d, min_val=min.d,
max_val=max.d, iter=iter, old_increment=old_increment.d,
Qmatrix=Qmatrix, parameter="d", prior=prior.d)
fixed.d <- res$parm
se.d <- res$se
a1c <- max( abs( fixed.d - fixed.d0 ) )
}
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# estimation of K parameters in Ramsay's quotient model
if ( sum( est.K ) > 0 & ramsay.qm ){
h <- numdiff.parm
fixed.K0 <- fixed.K
# identify different c parameter groups
kG <- setdiff( unique( est.K ), 0 )
res <- .mml.ramsay.est.K( theta.k, b, fixed.a, fixed.c, fixed.d,
fixed.K, pjk, alpha1, alpha2, h, G, I, r.jk, n.jk, est.K,
min.K, max.K, iter, pow.qm )
fixed.K <- res$fixed.K
se.K <- res$se.K
# convergence is indicated in metric guess.K=1 / ( fixed.K + 1 )
a1K <- max( abs( 1/(1+fixed.K) - 1/(1+fixed.K0) ) )
}
#***************************
# estimation of alpha
if ( est.alpha ){
alpha1.old <- alpha1
h <- numdiff.alpha.parm
#-- alpha1
calc_prob_args <- list( theta.k=theta.k, b=b, fixed.a=fixed.a, fixed.c=fixed.c,
fixed.d=fixed.d, alpha1=alpha1, alpha2=alpha2, Qmatrix=Qmatrix )
pjk.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha1 + h
calc_prob_args$alpha1 <- alpha1 + h
pjk1.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha1 - h
calc_prob_args$alpha1 <- alpha1 - h
pjk2.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- log likelihood
ll0a1 <- ll0 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk.M, n.jk=n.jk, r.jk=r.jk )
ll1a1 <- ll1 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk1.M, n.jk=n.jk, r.jk=r.jk )
ll2a1 <- ll2 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk2.M, n.jk=n.jk, r.jk=r.jk )
#--- derivatives
res <- rasch_mml2_difference_quotient( ll0=ll0, ll1=ll1, ll2=ll2, h=h )
d1 <- res$d1
d2 <- res$d2
# change in item difficulty
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha1 <- alpha1 + alpha.change
a1 <- abs(alpha.change )
se.alpha <- sqrt( 1 / abs(d2) )
#-- alpha2
calc_prob_args$alpha1 <- alpha1
pjk.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha2 + h
calc_prob_args$alpha2 <- alpha2 + h
pjk1.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- alpha2 - h
calc_prob_args$alpha2 <- alpha2 - h
pjk2.M <- do.call( "rasch_mml2_calc_prob", args=calc_prob_args )
#-- log likelihood
ll0a1 <- ll0 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk.M, n.jk=n.jk, r.jk=r.jk )
ll1a1 <- ll1 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk1.M, n.jk=n.jk, r.jk=r.jk )
ll2a1 <- ll2 <- rasch_mml2_mstep_calc_likelihood( G=G, pjk.M=pjk2.M, n.jk=n.jk, r.jk=r.jk )
#--- derivatives
res <- rasch_mml2_difference_quotient( ll0=ll0, ll1=ll1, ll2=ll2, h=h )
d1 <- res$d1
d2 <- res$d2
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha2 <- alpha2 + alpha.change
a2 <- abs(alpha.change)
maxalphachange <- max(a1, a2)
se.alpha <- c( se.alpha, sqrt( 1 / abs(d2) ) )
if (equal.alpha){
ll0 <- ll0a1 + ll0
ll1 <- ll1a1 + ll1
ll2 <- ll2a1 + ll2
d1 <- ( ll1 - ll2 ) / ( 2 * h ) # negative sign?
d2 <- ( ll1 + ll2 - 2*ll0 ) / h^2
alpha.change <- - d1 / d2
alpha.change <- ifelse( abs( alpha.change ) > .1, .1*sign(alpha.change), alpha.change )
alpha2 <- alpha1 <- alpha1.old + alpha.change
a2 <- abs(alpha.change)
maxalphachange <- max(a2)
se.alpha <- sqrt( 1 / abs(d2) )
}
}
# cat("distribution / rest") ; zz1 <- Sys.time(); print(zz1-zz0) ; zz0 <- zz1
#
##***** output
# iteration index
dev.change <- abs( ( dev - dev0)/ dev0 )
par.change <- max( c( abs(b - b0 ), abs(alpha.change ), a1a, a1b,
a1c, a1K, apmax) )
if (irtmodel=="missing1"){
par.change <- max( c( par.change, a1beta ))
}
# display convergence
if (progress){
cat( paste( " Deviance=", round( dev, 4 ),
if (iter > 0 ){ " | Deviance change=" } else {""},
if( iter>0){round( - dev + dev0, 6 )} else { ""} ,"\n",sep=""))
if ( ! npirt ){
cat( paste0( " Maximum b parameter change", "=",
round( max(abs(b - b0 )), 6 ), " \n" ) )
}
if ( est.alpha ){
cat( paste0( " alpha1=", round(alpha1,3), " | alpha2=", round( alpha2,3),
" | max alpha change ", round( maxalphachange,7 ), "\n", sep=""))
}
if ( sum(est.a) > 0 ){
cat( paste0( " Maximum a parameter change", "=",
paste( round(a1a,6), collapse=" " ), "\n", sep=""))
}
if ( irtmodel=="missing1" ){
cat( paste0( " Maximum beta parameter change=",
paste0( round(a1beta,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.c) > 0 ){
cat( paste0( " Maximum c parameter change=",
paste( round(a1b,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.d) > 0 ){
cat( paste0( " Maximum d parameter change=",
paste( round(a1c,6), collapse=" " ), "\n", sep=""))
}
if ( npirt ){
cat( paste0( " Maximum weighted ICC change=",
paste( round(apmax,6), collapse=" " ), "\n", sep=""))
}
if ( sum(est.K) > 0 ){
cat( paste0( " Maximum K parameter change=",
paste( round(a1K,6), collapse=" " ), "\n", sep=""))
}
if ( D > 1 ){
cat(" Mean | " )
cat( round(as.vector(mu),3))
cat("\n Covariance Matrix | " )
cat( round(Sigma.cov[!upper.tri(Sigma.cov)],3))
cat("\n")
}
if ( irtmodel=="missing1" ){
cat(" Delta=" )
r1 <- sort( unique( as.vector(delta.miss) ) )
h1 <- ""
if ( length(r1) > 5 ){
r1 <- r1[1:5]
h1 <- " ... "
}
cat( round( r1,3))
cat( h1, "\n")
}
utils::flush.console()
}
iter <- iter + 1
}
####################################### end iterations #####################
############################################################################
##**************************************************************************
if ( irtmodel=="missing1"){
m1$center <- FALSE
G <- 1
}
if (npirt & ( ! is.null(npformula ) ) ){
item <- NULL
for (ii in 1:I){
item.ii <- data.frame( "item"=colnames(dat)[ii] )
smod.ii <- summary(npmodel[[ii]])
item.ii <- data.frame( cbind( item.ii, rownames(smod.ii$coef),
smod.ii$coef[,1:2] ) )
colnames(item.ii)[-1] <- c("par", "est", "se" )
rownames(item.ii) <- NULL
item <- rbind( item, item.ii )
}
}
#**********************************************
# standard error for item parameter
# ...
# calculations for information criteria
ic <- list( "deviance"=dev, "n"=nrow(dat) )
# number of parameters to be estimated
# these formulas hold when assuming normal distributions
# ic$traitpars <- ic$itempars <- NA
if ( distribution.trait=="normal"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 )
}
if ( distribution.trait=="smooth2"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 )
}
if ( distribution.trait=="smooth3"){
ic[[ "np" ]] <- ( G - 1 ) + ncol(dat) + ( G - 0 ) + G
}
if ( distribution.trait=="smooth4"){
ic[[ "np" ]]<- ( G - 1 ) + ncol(dat) + ( G - 0 ) + 2*G
}
# ic$itempars <- ic$traitpars - ncol(dat)
# subtract fixed constraints
if ( ! is.null( constraints) ){
ic$np <- ic$np - nrow(constraints)
# ic$itempars <- ic$itempars - nrow(constraints)
}
# subtract constraints due to designmatrix
if ( ! is.null( designmatrix ) ){
ic$np <- ic$np - ncol(dat) + ncol(designmatrix)
# ic$itempars <- ic$itempars - ncol(dat) + ncol(designmatrix)
}
# alpha estimation
ic$np <- ic$np + est.alpha * 2 - equal.alpha *1
# ic$itempars <- ic$itempars + est.alpha * 2 - equal.alpha *1
# guessing, slipping and discrimination parameter estimation
if ( sum(est.c) > 0 ){
ic$np <- ic$np + length(cG)
}
if ( sum(est.d) > 0 ){
ic$np <- ic$np + length(dG)
}
if ( sum(est.a) > 0 ){
ic$np <- ic$np + length(aG) - 1
}
if ( sum(est.K) > 0 ){
ic$np <- ic$np + length(kG)
}
if ( irtmodel=="missing1"){
ic$np <- ic$np + I
if ( est_delta ){
v1 <- unique( est.delta )
v1 <- v1[ ! is.na(v1) ]
ic$np <- ic$np + length(v1)
}
}
# parameters for multiple dimensions
if (D>1){
# mean vector
MM <- nrow(mu.fixed )
# if ( mu.fixed=="center" ){ MM <- 1 }
if ( is.null(mu.fixed) ){ MM <- 0 }
ic$np <- ic$np + length(mu) - MM
# covariance matrix
ic$np <- ic$np - 1*(sum(est.a)==0) + D*(D+1)/2 # SD's
if ( ! is.null(variance.fixed)){ ic$np <- ic$np - nrow( variance.fixed ) }
}
# item parameter for nonparametric models
if (npirt & ( ! is.null(npformula ) ) ){
ic$np <- nrow(item) }
if (npirt & ( is.null(npformula ) ) ){
ic$np <- prod( dim(pjk)) }
# AIC
ic$AIC <- dev + 2*ic$np
# BIC
ic$BIC <- dev + ( log(ic$n) )*ic$np
# CAIC (conistent AIC)
ic$CAIC <- dev + ( log(ic$n) + 1 )*ic$np
# corrected AIC
ic$AICc <- ic$AIC + 2*ic$np * ( ic$np + 1 ) / ( ic$n - ic$np - 1 )
# item statistics
if ( npirt & ( ! is.null(npformula ) ) ){ item0 <- item }
item <- data.frame( "item"=colnames(dat), "N"=colSums( weights*(1 - is.na(dat)) ),
"p"=colSums( weights*dat, na.rm=T) / colSums( weights*(1 - is.na(dat)) ),
"b"=b )
if ( ! is.null( constraints) ){
est.b <- 1:I
est.b[ constraints[,1] ] <- 0
item$est.b <- est.b
}
if ( npirt & ( ! is.null(npformula ) ) ){ item <- merge( x=item[,1:3], y=item0, by="item" ) }
if ( ! npirt ){
if (is.null(est.b)){ item$est.b=seq(1,I) } else { item$est.b <- est.b }
# fixed parameters
item$a <- fixed.a
if ( ! is.null( est.a) ){ item$est.a <- est.a } else { item$est.a <- rep(0,I) }
# include threshold
item$thresh <- item$a*item$b
# guessing parameter
item$c <- fixed.c
if ( ! is.null( est.c) ){ item$est.c <- est.c } else { item$est.c <- rep(0,I) }
item$d <- fixed.d
if ( ! is.null( est.d) ){ item$est.d <- est.d } else { item$est.d <- rep(0,I) }
if (m1$center){ if ( is.null(constraints) ){ # item[I,4] <- NA
}
else { item[ constraints[,1],4] <- NA } }
rownames(item) <- colnames(dat)
}
# latent ability distribution
skewness.trait <- sd.trait <- mean.trait <- rep(0,G)
if ( D==1){
for (gg in 1:G){
mean.trait[gg] <- weighted.mean( theta.k, pi.k[,gg] )
sd.trait[gg] <- sqrt( weighted.mean( ( theta.k - mean.trait[gg] )^2, pi.k[,gg] ) )
skewness.trait[gg] <- sum( ( theta.k - mean.trait[gg] )^3 * pi.k[,gg] ) / sd.trait[gg]^3
if (gg==1 & npirt ){ sd.trait[gg] <- 1 }
}
}
# center trait distribution
# if ( center.trait & G < 1 ){
# theta.k <- theta.k - mean.trait
# b <- b - mean.trait
# item$itemdiff <- b
# mean.trait <- 0
# }
trait.distr <- data.frame( "theta.k"=theta.k, "pi.k"=pi.k )
# item response pattern
if ( D==1 ){
if ( is.matrix(theta.k) ){
theta.k <- as.vector( theta.k)
}
ability.est <- data.frame( dat1, theta.k[ whichrowMaxs( f.qk.yi )$arg ] )
colnames(ability.est) <- c("pattern", "AbsFreq", "mean", "MAP" )
}
if (D>1){
ability.est <- data.frame( dat1, theta.k[ whichrowMaxs( f.qk.yi )$arg,] )
colnames(ability.est) <- c("pattern", "AbsFreq", "mean",
paste("MAP.Dim",1:D,sep="") )
}
if (D==1){
ability.est$EAP <- rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k ) )
ability.est$SE.EAP <- sqrt( rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)),
theta.k^2 ) ) - ability.est$EAP^2 )
}
if (D>1){
for (dd in 1:D){
ability.est[, paste("EAP.Dim",dd,sep="")] <-
rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k[,dd] ) )
ability.est[, paste("SE.EAP.Dim",dd,sep="")] <-
sqrt( rowSums( f.qk.yi * outer( rep(1,nrow(ability.est)), theta.k[,dd]^2 ) ) -
ability.est[,paste("EAP.Dim",dd,sep="")]^2 )
}
}
# posterior distribution
rownames(f.qk.yi) <- dat1[,1]
# merging ability estimates
# if ( ! is.null(group)){
if ( G > 1 ){
ability.est2 <- cbind( freq.patt, ability.est[,-1] )
} else {
if (irtmodel !="missing1" ){
ability.est2 <- merge( freq.patt, ability.est, 1, 1 )
} else {
ability.est2 <- ability.est
ability.est2$index <- seq(1, nrow(ability.est) )
}
}
ability.est2 <- ability.est2[ order(ability.est2$index), -c(3:5) ]
# EAP reliability estimate
reliability <- NULL
if (D==1){
reliability$eap.reliability <-
1 - mean(ability.est2$SE.EAP^2) / ( mean(ability.est2$SE.EAP^2) + var(ability.est2$EAP) )
}
if (D>1){
r1 <- rep(0,D)
for (dd in 1:D){
r1[dd] <- 1 - mean(ability.est2[,paste("SE.EAP.Dim",dd,sep="")]^2) /
( mean(ability.est2[,paste("SE.EAP.Dim",dd,sep="")]^2) +
stats::var(ability.est2[,paste("EAP.Dim",dd,sep="")]) )
}
if ( is.null( colnames(Qmatrix) ) ){
dimnamesPars <- paste( "Dim",1:D, sep="")
} else { dimnamesPars <- colnames(Qmatrix) }
names(r1) <- dimnamesPars
reliability$eap.reliability <- r1
names(mu) <- dimnamesPars
rownames(Sigma.cov) <- colnames(Sigma.cov) <- dimnamesPars
}
# include person ID
ability.est2$pid <- pid
# match ability patterns
if (irtmodel !="missing1" ){
ind1 <- match( ability.est2$freq.patt, ability.est$pattern )
ability.est <- ability.est[ ind1, ]
f.qk.yi <- f.qk.yi[ind1,]
f.yi.qk <- f.yi.qk[ind1,]
}
#*****
# item table for missing data IRT model
if ( irtmodel=="missing1"){
item$thresh <- item$est.a <- item$est.c <- item$est.d <- NULL
# missing proportion
item$pmiss <- colSums( dat2.resp * ( dat2==2 ), na.rm=TRUE) / colSums( dat2.resp, na.rm=TRUE)
item$beta <- beta
item$delta.miss <- delta.miss
}
# output fixed.a and fixed.c
if ( is.null(fixed.a ) & is.null(fixed.c) ){ fixed.a <- rep(1,I) ; fixed.c <- rep(0,I) }
# include item discrimination
if (D==1){
i1 <- item$emp.discrim <- round( item.discrim( dat, ability.est2$MAP ), 3 )
}
if (npirt){
i1 <- data.frame( "item"=colnames(dat), "emp.discrim"=i1 )
item$emp.discrim <- NULL
item <- merge( x=item, y=i1, by="item" )
}
if ( ! npirt ){
item$alpha1 <- alpha1
item$alpha2 <- alpha2
}
#---------------------------------------------------------
# item summary Ramsay QM
item2 <- NULL
if ( ramsay.qm){
if ( is.null(est.K) ){ est.K <- rep(0,I) }
item2 <- data.frame( "item"=item$item, "N"=item$N , "p"=item$p,
"K"=fixed.K, "est.K"=est.K,
"b"=exp(b), "log_b"=b, "est.b"=item$est.b,
"guess.K"=1/(fixed.K+1),
"emp.discrim"=item$emp.discrim )
}
##################################################
# item response probabilities
d1 <- dim(pjk)
if ( length(d1)==2 ){
rprobs <- array( 0, dim=c( d1[2], 2, d1[1] ) )
rprobs[,2,] <- t( pjk )
rprobs[,1,] <- 1 - t(pjk)
} else {
rprobs <- pjk
}
dimnames(rprobs)[[1]] <- colnames(dat)
#- collect information about priors
priors <- rasch_mml2_prior_information(prior.a, prior.b, prior.c, prior.d)
#--- result
res <- list( dat=dat, item=item, item2=item2, trait.distr=trait.distr,
mean.trait=mean.trait, sd.trait=sd.trait, skewness.trait=skewness.trait,
deviance=dev, pjk=pjk, rprobs=rprobs, person=ability.est2, pid=pid,
ability.est.pattern=ability.est, f.qk.yi=f.qk.yi, f.yi.qk=f.yi.qk,
pure.rasch=pure.rasch, fixed.a=fixed.a, fixed.c=fixed.c, G=G, alpha1=alpha1,
alpha2=alpha2, se.b=se.b, se.a=se.a, se.c=se.c, se.d=se.d, se.alpha=se.alpha,
se.K=se.K, se.delta=se.delta, iter=iter, reliability=reliability,
ramsay.qm=ramsay.qm, irtmodel=irtmodel, D=D, mu=mu, Sigma.cov=Sigma.cov,
est_parameters=est_parameters, priors=priors,
theta.k=theta.k, trait.weights=trait.weights, pi.k=pi.k, CALL=CALL )
class(res) <- "rasch.mml"
res$ic <- ic
res$est.c <- est.c
res$groupindex <- ag1
res$n.jk <- n.jk
res$r.jk <- r.jk
res$esttype <- "ll"
if ( pseudoll ){ res$esttype <- "pseudoll" }
# computation time
s2 <- Sys.time()
res$s1 <- s1
res$s2 <- s2
res$Rfcttype <- "rasch.mml2"
if (progress){
cat("------------------------------------------------------------\n")
cat("Start:", paste( s1), "\n")
cat("End:", paste(s2), "\n")
cat("Difference:", print(s2 -s1), "\n")
cat("------------------------------------------------------------\n")
}
return(res)
}
#---------------------------------------------------------------------------
|
## slices <- c(1210529,37000,344148,22500,12000,406544)
## slices <- slices[order(c(1210529,37000,344148,22500,12000,406544))]
## lbls <- c("Personnel","Travel","Consumables","Publication","Training","Indirect costs")
## lbls <- lbls[order(c(1210529,37000,344148,22500,12000,406544))]
## pct <- round(slices/sum(slices)*100)
## pie(pct,labels = lbls, col=rainbow(length(lbls)),clockwise=TRUE,radius = 0.5, cex = 1.5)
## df <- data.frame(lbls,slices)
## library(RColorBrewer)
## colors <- brewer.pal(7, name="BuGn")
## colors[2:7]
## library(ggplot2)
## bar <- ggplot(df, aes(x = "", y = pct, fill = lbls)) +
## geom_bar(width = 1, stat = "identity", color = "white")
## pie <- bar+coord_polar("y",start=0)+scale_fill_manual(values = colors[2:7])+
## theme_void()+
## theme(legend.text=element_text(size=30),legend.title=element_blank())+
## guides(fill=guide_legend(
## keywidth=0.7,
## keyheight=0.7,
## default.unit="inch")
## )
## png(filename = "Pie.png",
## width = 700, height = 700, units = "px", pointsize = 12,
## bg = "white")
## print(pie)
## dev.off()
## Using plotrix based on these examples:
## https://www.r-bloggers.com/how-to-draw-venn-pie-agram-multi-layer-pie-chart-in-r/
##https://stackoverflow.com/questions/26748069/ggplot2-pie-and-donut-chart-on-same-plot
library(plotrix)
total <- 213764
CG <- 118532
CHG <- total-CG
Genes.without.TEs <- 32307
Genes.without.TEs.CG <- 19106
Genes.without.TEs.CHG <- Genes.without.TEs - Genes.without.TEs.CG
InterGenes.without.TEs <- 93290
InterGenes.without.TEs.CG <- 50637
InterGenes.without.TEs.CHG <- InterGenes.without.TEs - InterGenes.without.TEs.CG
TEs.In.Genes <- 22031
TEs.In.Genes.CG <- 12994
TEs.In.Genes.CHG <- TEs.In.Genes - TEs.In.Genes.CG
TEs.In.InterGenes <- 66136
TEs.In.InterGenes.CG <- 35795
TEs.In.InterGenes.CHG <- TEs.In.InterGenes - TEs.In.InterGenes.CG
Genes <- Genes.without.TEs + TEs.In.Genes
InterGenes <- InterGenes.without.TEs + TEs.In.InterGenes
# parameter for pie chart
iniR=0.2 # initial radius
#colors=list(NO='white',total='black',mtRNA='#e5f5e0',rRNA='#a1d99b',genic=,intergenic='#fec44f',introns=',exons=,upstream='#ffeda0',downstream='#fee0d2',not_near_genes=)
png(filename = "Pie.png",
width = 700, height = 700, units = "px", pointsize = 12,
bg = "white")
#0 circle: blank
pie(1, radius=iniR, init.angle=90, col=c('white'), border = NA, labels='')
#TEs
floating.pie(0,0, c(Genes.without.TEs,TEs.In.Genes,TEs.In.InterGenes,InterGenes.without.TEs), radius=4*iniR, startpos=pi/2, col=c("white",'#9ecae1','#fc9272',"white"),border = "white")
#1 circle: for genes and intergenes
floating.pie(0,0, c(Genes,InterGenes), radius=3*iniR, col=c('#3182bd','#d95f0e'),startpos=pi/2, border = "white")
floating.pie(0,0, c(Genes.without.TEs.CG,
Genes.without.TEs.CHG,
TEs.In.Genes.CHG,
TEs.In.Genes.CG,
TEs.In.InterGenes.CG,
TEs.In.InterGenes.CHG,
InterGenes.without.TEs.CHG,
InterGenes.without.TEs.CG
), radius=2*iniR, col=c("lightgray","darkgray","darkgray","lightgray","lightgray","darkgray","darkgray","lightgray"),startpos=pi/2, border = "white")
legend(-3, 5*iniR, c("genic","intergenic","genic TEs","intergenic TEs","CG","CHG"), col=c('#3182bd','#d95f0e','#9ecae1','#fc9272',"lightgray","darkgray"), pch=19,bty='n', ncol=2,cex=2)
dev.off()
|
/figures/PieChart.r
|
permissive
|
alj1983/2019HongKongPresentation
|
R
| false
| false
| 3,558
|
r
|
## slices <- c(1210529,37000,344148,22500,12000,406544)
## slices <- slices[order(c(1210529,37000,344148,22500,12000,406544))]
## lbls <- c("Personnel","Travel","Consumables","Publication","Training","Indirect costs")
## lbls <- lbls[order(c(1210529,37000,344148,22500,12000,406544))]
## pct <- round(slices/sum(slices)*100)
## pie(pct,labels = lbls, col=rainbow(length(lbls)),clockwise=TRUE,radius = 0.5, cex = 1.5)
## df <- data.frame(lbls,slices)
## library(RColorBrewer)
## colors <- brewer.pal(7, name="BuGn")
## colors[2:7]
## library(ggplot2)
## bar <- ggplot(df, aes(x = "", y = pct, fill = lbls)) +
## geom_bar(width = 1, stat = "identity", color = "white")
## pie <- bar+coord_polar("y",start=0)+scale_fill_manual(values = colors[2:7])+
## theme_void()+
## theme(legend.text=element_text(size=30),legend.title=element_blank())+
## guides(fill=guide_legend(
## keywidth=0.7,
## keyheight=0.7,
## default.unit="inch")
## )
## png(filename = "Pie.png",
## width = 700, height = 700, units = "px", pointsize = 12,
## bg = "white")
## print(pie)
## dev.off()
## Using plotrix based on these examples:
## https://www.r-bloggers.com/how-to-draw-venn-pie-agram-multi-layer-pie-chart-in-r/
##https://stackoverflow.com/questions/26748069/ggplot2-pie-and-donut-chart-on-same-plot
library(plotrix)
total <- 213764
CG <- 118532
CHG <- total-CG
Genes.without.TEs <- 32307
Genes.without.TEs.CG <- 19106
Genes.without.TEs.CHG <- Genes.without.TEs - Genes.without.TEs.CG
InterGenes.without.TEs <- 93290
InterGenes.without.TEs.CG <- 50637
InterGenes.without.TEs.CHG <- InterGenes.without.TEs - InterGenes.without.TEs.CG
TEs.In.Genes <- 22031
TEs.In.Genes.CG <- 12994
TEs.In.Genes.CHG <- TEs.In.Genes - TEs.In.Genes.CG
TEs.In.InterGenes <- 66136
TEs.In.InterGenes.CG <- 35795
TEs.In.InterGenes.CHG <- TEs.In.InterGenes - TEs.In.InterGenes.CG
Genes <- Genes.without.TEs + TEs.In.Genes
InterGenes <- InterGenes.without.TEs + TEs.In.InterGenes
# parameter for pie chart
iniR=0.2 # initial radius
#colors=list(NO='white',total='black',mtRNA='#e5f5e0',rRNA='#a1d99b',genic=,intergenic='#fec44f',introns=',exons=,upstream='#ffeda0',downstream='#fee0d2',not_near_genes=)
png(filename = "Pie.png",
width = 700, height = 700, units = "px", pointsize = 12,
bg = "white")
#0 circle: blank
pie(1, radius=iniR, init.angle=90, col=c('white'), border = NA, labels='')
#TEs
floating.pie(0,0, c(Genes.without.TEs,TEs.In.Genes,TEs.In.InterGenes,InterGenes.without.TEs), radius=4*iniR, startpos=pi/2, col=c("white",'#9ecae1','#fc9272',"white"),border = "white")
#1 circle: for genes and intergenes
floating.pie(0,0, c(Genes,InterGenes), radius=3*iniR, col=c('#3182bd','#d95f0e'),startpos=pi/2, border = "white")
floating.pie(0,0, c(Genes.without.TEs.CG,
Genes.without.TEs.CHG,
TEs.In.Genes.CHG,
TEs.In.Genes.CG,
TEs.In.InterGenes.CG,
TEs.In.InterGenes.CHG,
InterGenes.without.TEs.CHG,
InterGenes.without.TEs.CG
), radius=2*iniR, col=c("lightgray","darkgray","darkgray","lightgray","lightgray","darkgray","darkgray","lightgray"),startpos=pi/2, border = "white")
legend(-3, 5*iniR, c("genic","intergenic","genic TEs","intergenic TEs","CG","CHG"), col=c('#3182bd','#d95f0e','#9ecae1','#fc9272',"lightgray","darkgray"), pch=19,bty='n', ncol=2,cex=2)
dev.off()
|
\name{magcon}
\alias{magcon}
\title{
2D quantile images and contours
}
\description{
This function generates pretty images and contours that reflect the 2D quantile levels of the data. This means the user can immediately assess the 2D regime that contains an arbitrary percentage of the data. This function was designed particularly with the output of MCMC posteriors in mind, where visualising the location of the 68\% and 95\% 2D quantiles for covariant parameters is a necessary part of the post MCMC analysis.
}
\usage{
magcon(x, y, h, doim = TRUE, docon = TRUE, dobar = TRUE, ngrid = 100, add = FALSE,
xlab = '', ylab = '', imcol = c(NA,rev(rainbow(1000, start = 0, end = 2/3))),
conlevels = c(0.5, pnorm(1) - pnorm(-1), 0.95), barposition = "topright",
barorient = "v",bartitle = "Contained \%", bartitleshift = 0, xlim = NULL, ylim = NULL,
weights = NULL,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
x values to contour. If x is a two (or more) column matrix or data.frame and y is missing as an argument, then the first column is used for x and the second column for y.
}
\item{y}{
y values to contour.
}
\item{h}{
Smoothing parameter to pass to kde2d. Can take 1 or 2 arguments for x and optionally y smoothing.
}
\item{doim}{
Should an image be generated.
}
\item{docon}{
Should contours be overlain.
}
\item{dobar}{
Should a magbar colour bar be added describing the image levels (doim must also be true for this to appear).
}
\item{ngrid}{
The ngrid to send to kde2d / sm.density to determine the resolution of the smoothing.
}
\item{add}{
Should the output of this function be added to the current plot. If FALSE then a new plot is generated.
}
\item{xlab}{
Label for x-axis, only used if add=FALSE.
}
\item{ylab}{
Label for y-axis, only used if add=FALSE.
}
\item{imcol}{
The colour palette to use for the image (this is also sent to magbar).
}
\item{conlevels}{
Specific quantile contours to add. Default is for 50\%, 68\% and 95\% contours, i.e. these contours contain that perecentage of the data.
}
\item{barposition}{
The position to use for magbar. See magbar help for more details.
}
\item{barorient}{
The orientation to use for magbar. See magbar help for more details.
}
\item{bartitle}{
Title to use for magbar.
}
\item{bartitleshift}{
Control of how far the magbar title is shifted away from its default position.
}
\item{xlim}{
The x limits to use for the data. Default of NULL caculates the range based on the provided x data vector. Data will be clipped between the extremes given. If xlim[1]>xlim[2] plotted axes will be flipped compared to default.
}
\item{ylim}{
The y limits to use for the data. Default of NULL caculates the range based on the provided y data vector. Data will be clipped between the extremes given. If ylim[1]>ylim[2] plotted axes will be flipped compared to default.
}
\item{weights}{
A vector of weights to pass onto sm.density (that does the 2D density estimate). This must be the same length as the x and y vectors if specified.
}
\item{\dots}{
Other arguments to pass to the \code{\link{contour}} function, e.g. lty=c(2,1,3).
}
}
\details{
This function is particularly designed to assess the output for MCMC posteriors since it highlights the confidence regimes quite clearly. More generally it can show the quantile distributions for any 2D data.
}
\value{
Called for the side effect of generating images and contours representing quantile in 2D data.
}
\author{
Aaron Robotham
}
\seealso{
\code{\link{magplot}},\code{\link{magaxis}},\code{\link{maglab}},\code{\link{magmap}},\code{\link{magrun}},\code{\link{magbar}}
}
\examples{
temp=cbind(rnorm(1e3),rnorm(1e3))
magcon(temp[,1],temp[,2])
}
\keyword{quantile}
\keyword{contour}
|
/magicaxis/man/magcon.Rd
|
no_license
|
rtobar/SS19B-Robotham
|
R
| false
| false
| 3,771
|
rd
|
\name{magcon}
\alias{magcon}
\title{
2D quantile images and contours
}
\description{
This function generates pretty images and contours that reflect the 2D quantile levels of the data. This means the user can immediately assess the 2D regime that contains an arbitrary percentage of the data. This function was designed particularly with the output of MCMC posteriors in mind, where visualising the location of the 68\% and 95\% 2D quantiles for covariant parameters is a necessary part of the post MCMC analysis.
}
\usage{
magcon(x, y, h, doim = TRUE, docon = TRUE, dobar = TRUE, ngrid = 100, add = FALSE,
xlab = '', ylab = '', imcol = c(NA,rev(rainbow(1000, start = 0, end = 2/3))),
conlevels = c(0.5, pnorm(1) - pnorm(-1), 0.95), barposition = "topright",
barorient = "v",bartitle = "Contained \%", bartitleshift = 0, xlim = NULL, ylim = NULL,
weights = NULL,...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
x values to contour. If x is a two (or more) column matrix or data.frame and y is missing as an argument, then the first column is used for x and the second column for y.
}
\item{y}{
y values to contour.
}
\item{h}{
Smoothing parameter to pass to kde2d. Can take 1 or 2 arguments for x and optionally y smoothing.
}
\item{doim}{
Should an image be generated.
}
\item{docon}{
Should contours be overlain.
}
\item{dobar}{
Should a magbar colour bar be added describing the image levels (doim must also be true for this to appear).
}
\item{ngrid}{
The ngrid to send to kde2d / sm.density to determine the resolution of the smoothing.
}
\item{add}{
Should the output of this function be added to the current plot. If FALSE then a new plot is generated.
}
\item{xlab}{
Label for x-axis, only used if add=FALSE.
}
\item{ylab}{
Label for y-axis, only used if add=FALSE.
}
\item{imcol}{
The colour palette to use for the image (this is also sent to magbar).
}
\item{conlevels}{
Specific quantile contours to add. Default is for 50\%, 68\% and 95\% contours, i.e. these contours contain that perecentage of the data.
}
\item{barposition}{
The position to use for magbar. See magbar help for more details.
}
\item{barorient}{
The orientation to use for magbar. See magbar help for more details.
}
\item{bartitle}{
Title to use for magbar.
}
\item{bartitleshift}{
Control of how far the magbar title is shifted away from its default position.
}
\item{xlim}{
The x limits to use for the data. Default of NULL caculates the range based on the provided x data vector. Data will be clipped between the extremes given. If xlim[1]>xlim[2] plotted axes will be flipped compared to default.
}
\item{ylim}{
The y limits to use for the data. Default of NULL caculates the range based on the provided y data vector. Data will be clipped between the extremes given. If ylim[1]>ylim[2] plotted axes will be flipped compared to default.
}
\item{weights}{
A vector of weights to pass onto sm.density (that does the 2D density estimate). This must be the same length as the x and y vectors if specified.
}
\item{\dots}{
Other arguments to pass to the \code{\link{contour}} function, e.g. lty=c(2,1,3).
}
}
\details{
This function is particularly designed to assess the output for MCMC posteriors since it highlights the confidence regimes quite clearly. More generally it can show the quantile distributions for any 2D data.
}
\value{
Called for the side effect of generating images and contours representing quantile in 2D data.
}
\author{
Aaron Robotham
}
\seealso{
\code{\link{magplot}},\code{\link{magaxis}},\code{\link{maglab}},\code{\link{magmap}},\code{\link{magrun}},\code{\link{magbar}}
}
\examples{
temp=cbind(rnorm(1e3),rnorm(1e3))
magcon(temp[,1],temp[,2])
}
\keyword{quantile}
\keyword{contour}
|
datafile<-"./Exploratory Data Analysis/household_power_consumption.txt"
data<-read.table(datafile,sep=";",header=TRUE) #read data
subdata<-subset(data,data$Date %in% c("1/2/2007","2/2/2007")) #subset data from 1/2/2007 to 2/2/2007
submetering1<-as.numeric(subdata$Sub_metering_1)
submetering2<-as.numeric(subdata$Sub_metering_2)
submetering3<-as.numeric(subdata$Sub_metering_3)
subdata$datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")
plot(subdata$datetime,submetering1,type = "l",xlab="",ylab="Energy Submetering")
lines(subdata$datetime,submetering2,type="l",col="red")
lines(subdata$datetime,submetering3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red","blue"))
dev.copy(png,file="plot3.png")
dev.off()
|
/plot3.R
|
no_license
|
TWOSECOND/ExData_Plotting1
|
R
| false
| false
| 805
|
r
|
datafile<-"./Exploratory Data Analysis/household_power_consumption.txt"
data<-read.table(datafile,sep=";",header=TRUE) #read data
subdata<-subset(data,data$Date %in% c("1/2/2007","2/2/2007")) #subset data from 1/2/2007 to 2/2/2007
submetering1<-as.numeric(subdata$Sub_metering_1)
submetering2<-as.numeric(subdata$Sub_metering_2)
submetering3<-as.numeric(subdata$Sub_metering_3)
subdata$datetime <- strptime(paste(subdata$Date, subdata$Time), "%d/%m/%Y %H:%M:%S")
plot(subdata$datetime,submetering1,type = "l",xlab="",ylab="Energy Submetering")
lines(subdata$datetime,submetering2,type="l",col="red")
lines(subdata$datetime,submetering3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col = c("black","red","blue"))
dev.copy(png,file="plot3.png")
dev.off()
|
#' Document vectorization object
#'
#' This class contains a vectorization model for textual documents.
#' @name vectorizer-class
#' @slot vectorizer The vectorizer.
#' @slot transform The transformation to be applied after vectorization (normalization, TF-IDF).
#' @slot phrases The phrase detection method.
#' @slot tfidf The TF-IDF transformation.
#' @slot lsa The LSA transformation.
#' @slot tokens The token from the original document.
#' @exportClass vectorizer
#' @seealso \code{\link{vectorize.docs}}, \code{\link{query.docs}}
setClass ("vectorizer",
representation (vectorizer = "function",
transform = "character",
phrases = "ANY",
tfidf = "ANY",
lsa = "ANY",
tokens = "ANY"))
#' Text mining object
#'
#' Object used for text mining.
#' @name textmining-class
#' @slot vectorizer The vectorizer.
#' @slot vectors The vectorized dataset.
#' @slot res The result of the text mining method.
#' @exportClass textmining
#' @seealso \code{\link{TEXTMINING}}, \code{\link{vectorize.docs}}
setClass ("textmining",
representation (vectorizer = "function",
vectors = "matrix",
res = "ANY"))
#' @keywords internal
addphrases <-
function (it, mincount = 50, maxiter = 10)
{
vocab = text2vec::create_vocabulary (it, stopwords = stopwords::stopwords ("en"))
vocab = text2vec::prune_vocabulary (vocab, term_count_min = mincount)
model = text2vec::Collocations$new(vocabulary = vocab, collocation_count_min = mincount, pmi_min = 0)
model$fit (it)
nphrases = 0
iter = 0
while ((nphrases != nrow (model$collocation_stat)) && (iter < maxiter))
{
iter = iter + 1
nphrases = nrow (model$collocation_stat)
model$prune (pmi_min = 8, gensim_min = 10, lfmd_min = -25)
model$partial_fit (it)
}
return (model)
}
#' @keywords internal
cleanup <-
function (corpus)
{
res = sapply (corpus, function (text) tolower (text))
res = sapply (res, function (text) gsub ("[^[:alnum:]]", " ", text))
res = sapply (res, function (text) gsub ("\\b[[:alnum:]]{1}\\b", "", text))
res = sapply (res, function (text) gsub ("\\s+", " ", text))
return (res)
}
#' @keywords internal
createiterator <-
function (corpus, lang, minphrasecount = NULL)
{
it = tokens (corpus, lang = lang)
phrases = NULL
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
return (it)
}
#' @keywords internal
createvectorizer <-
function (corpus, it = NULL, phrases = NULL, vocab = NULL, lang, stopwords = lang, ngram = 1, mincount = 10, minphrasecount = NULL,
transform = c ("none", "l1", "tfidf", "lsa"), latentdim = 50)
{
if (is.null (it))
it = createiterator (corpus, lang, minphrasecount)
if (is.null (vocab))
vocab = getvocab (corpus, mincount, minphrasecount, ngram, stopwords, it = it, lang = lang)
vectorizer = text2vec::vocab_vectorizer (vocab)
res = list (vectorizer = vectorizer, transform = transform [1], minphrasecount = minphrasecount, tokens = it, phrases = phrases)
if (transform [1] == "tfidf")
{
dtm = text2vec::create_dtm (it, vectorizer)
tfidf = text2vec::TfIdf$new()
dtm = text2vec::fit_transform(dtm, tfidf)
res$tfidf = tfidf
}
else if (transform [1] == "lsa")
{
dtm = text2vec::create_dtm (it, vectorizer)
tfidf = text2vec::TfIdf$new()
dtm = text2vec::fit_transform(dtm, tfidf)
res$tfidf = tfidf
lsa = text2vec::LSA$new(n_topics = latentdim)
dtm = text2vec::fit_transform(dtm, lsa)
res$lsa = lsa
}
class (res) = "vectorizer"
return (res)
}
#' Frequent words
#'
#' Most frequent words of the corpus.
#' @name frequentwords
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @param nb The number of words to be returned.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param ngram maximum size of n-grams.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @return The most frequent words of the corpus.
#' @export
#' @seealso \code{\link{getvocab}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' frequentwords (text, 100)
#' vocab = getvocab (text)
#' frequentwords (vocab, 100)
#' }
frequentwords <-
function (corpus, nb, mincount = 5, minphrasecount = NULL, ngram = 1, lang = "en", stopwords = lang)
{
vocab = NULL
if ("text2vec_vocabulary" %in% class (corpus))
vocab = corpus
else
vocab = getvocab (corpus, mincount = mincount, minphrasecount = minphrasecount, ngram = ngram, lang = lang, stopwords = stopwords)
return (vocab [vocab [, "term_count"] >= vocab [nrow (vocab) + 1 - nb, "term_count"], "term"])
}
#' Extract words and phrases from a corpus
#'
#' Extract words and phrases from a corpus of documents.
#' @name getvocab
#' @param corpus The corpus of documents (a vector of characters).
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param ngram maximum size of n-grams.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @return The vocabulary used in the corpus of documents.
#' @export
#' @seealso \code{\link{plotzipf}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{create_vocabulary}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' vocab1 = getvocab (text) # With stemming
#' nrow (vocab1)
#' vocab2 = getvocab (text, lang = NULL) # Without stemming
#' nrow (vocab2)
#' }
getvocab <-
function (corpus, mincount = 5, minphrasecount = NULL, ngram = 1, lang = "en", stopwords = lang, ...)
{
dots = list (...)
it = NULL
if (!is.null (dots$it))
it = dots$it
else
{
it = tokens (corpus, lang = lang)
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
}
sw = character(0)
if (!is.null (stopwords))
{
if (length (stopwords) == 1)
sw = stopwords::stopwords (stopwords)
else
sw = stopwords
}
vocab = text2vec::create_vocabulary (it, ngram = c (1, ngram), stopwords = sw)
vocab = text2vec::prune_vocabulary (vocab, term_count_min = mincount)
return (vocab)
}
#' load a text file
#'
#' (Down)Load a text file (and extract it if it is in a zip file).
#' @name loadtext
#' @param file The path or URL of the text file.
#' @param dir The (temporary) directory, where the file is downloaded. The file is deleted at the end of this function.
#' @param collapse Indicates whether or not lines of each documents should collapse together or not.
#' @param sep Separator between text fields.
#' @param categories Columns that should be considered as categorial data.
#' @return The text contained in the dowloaded file.
#' @export
#' @seealso \code{\link[utils]{download.file}}, \code{\link[utils]{unzip}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' }
loadtext <-
function (file = file.choose (), dir = "~/", collapse = TRUE, sep = NULL, categories = NULL)
{
mainfile = file
download = grepl ("^https?://", file)
if (download)
{
mainfile = paste (dir, tail (strsplit (file, "/") [[1]], 1), sep = "")
utils::download.file (file, mainfile)
}
ext = tail (strsplit (mainfile, ".", fixed = TRUE) [[1]], 1)
files = NULL
if (ext %in% c ("zip"))
{
files = utils::unzip (mainfile, exdir = dir, list = TRUE) [, 1]
utils::unzip (mainfile, exdir = dir, files = files)
files = paste (dir, files, sep = "")
}
else
files = mainfile
corpus = NULL
if (is.null (sep))
{
corpus = as.vector (sapply (files, function (file)
{
text = readLines (file, n = -1, warn = FALSE)
if (collapse)
text = paste (text, collapse = " ")
return (text)
}))
corpus = corpus [!sapply (corpus, function (text) grepl ("^\\s*$", text))]
}
else
{
corpus = lapply (files, function (file)
{
text = utils::read.table (file, sep = sep, quote = "")
return (text)
})
if (length (corpus) > 1)
corpus = do.call (rbind, corpus)
corpus [] = lapply(corpus, as.character)
if (!is.null (categories))
corpus [categories] = lapply(corpus [categories], factor)
}
if (download)
file.remove (mainfile)
if (ext %in% c ("zip"))
sapply (files, function (file) file.remove (file))
return (corpus)
}
#' Plot word cloud
#'
#' Plot a word cloud based on the word frequencies in the documents.
#' @name plotcloud
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @param k A categorial variable (vector or factor).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @export
#' @seealso \code{\link{plotzipf}}, \code{\link{getvocab}}, \code{\link[wordcloud]{wordcloud}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' plotcloud (text)
#' vocab = getvocab (text, mincount = 1, lang = NULL, stopwords = "en")
#' plotcloud (vocab)
#' }
plotcloud <-
function (corpus, k = NULL, stopwords = "en", ...)
{
l = NULL
labels = NULL
kk = 1
if (is.null (k))
l = list (corpus)
else
{
kk = sort (unique (k))
for (i in kk)
l = c (l, list (corpus [k == i]))
if (is.factor (k))
labels = levels (k)
else
labels = paste ("Cluster", kk)
}
n = length (kk)
nrow = round (sqrt (n))
ncol = ceiling (n / nrow)
graphics::layout (matrix (1:(nrow * ncol), ncol = ncol, byrow = TRUE))
on.exit (graphics::layout (1))
for (i in 1:n)
{
vocab = NULL
freq = NULL
words = NULL
if ("text2vec_vocabulary" %in% class (l [[i]]))
{
words = l [[i]] [, "term"]
freq = l [[i]] [, "term_count"]
}
else
{
vocab = getvocab (l [[i]], mincount = 1, stopwords = stopwords, lang = NULL)
words = vocab [, "term"]
freq = vocab [, "term_count"]
}
maxfreq = max (freq)
col = unique (grDevices::gray (1 - ((tail (freq, 200) + maxfreq) / (maxfreq * 2))))
wordcloud::wordcloud (words = words, freq = freq, min.freq = 1, max.words = 200, random.order = FALSE, rot.per = 1 / 3, colors = col)
graphics::title (main = labels [i])
}
}
#' Plot rank versus frequency
#'
#' Plot the frequency of words in a document agains the ranks of those words. It also plot the Zipf law.
#' @name plotzipf
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @export
#' @seealso \code{\link{plotcloud}}, \code{\link{getvocab}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' plotzipf (text)
#' vocab = getvocab (text, mincount = 1, lang = NULL)
#' plotzipf (vocab)
#' }
plotzipf <-
function (corpus)
{
freq = NULL
if ("text2vec_vocabulary" %in% class (corpus))
freq = corpus [, "term_count"]
else
freq = getvocab (corpus, mincount = 1, stopwords = NULL, lang = NULL) [, "term_count"]
rank = 1:length (freq)
freq = freq [rev (rank)]
logd = data.frame (logrank = log2 (rank), logfreq = log2 (freq))
model = stats::lm (logfreq ~ logrank, weights = freq, data = logd)
options (scipen = freq [1])
graphics::plot (x = rank, y = freq, log = "xy", xlab = "Rank", ylab = "Frequency", t = "l")
graphics::lines (rank, 2^model$coefficients [1] / rank^(-model$coefficients [2]), col = "red", lty = 2)
graphics::legend ("topright", col = 1:2, legend = c ("Observations", "Zipf's law"), lty = 1:2, bty = "n")
}
#' Model predictions
#'
#' This function predicts values based upon a model trained for text mining.
#' @name predict.textmining
#' @param object The classification model (of class \code{\link{textmining-class}}, created by \code{\link{TEXTMINING}}.
#' @param test The test set (a \code{data.frame})
#' @param fuzzy A boolean indicating whether fuzzy classification is used or not.
#' @return A vector of predicted values (\code{factor}).
#' @param ... Other parameters.
#' @export
#' @method predict textmining
#' @seealso \code{\link{TEXTMINING}}, \code{\link{textmining-class}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' model = TEXTMINING (d$train.x, NB, labels = d$train.y, mincount = 50)
#' pred = predict (model, d$test.x)
#' evaluation (pred, d$test.y)
#' }
predict.textmining <- function (object, test, fuzzy = FALSE, ...)
{
test = vectorize.docs (corpus = test, vectorizer = object$vectorizer)
return (predict (object$res, as.matrix (test), fuzzy, ...))
}
#' Document query
#'
#' Search for documents similar to the query.
#' @name query.docs
#' @param docvectors The vectorized documents.
#' @param query The query (vectorized or raw text).
#' @param vectorizer The vectorizer taht has been used to vectorize the documents.
#' @param nres The number of results.
#' @return The indices of the documents the most similar to the query.
#' @export
#' @seealso \code{\link{vectorize.docs}}, \code{\link[text2vec]{sim2}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data (movie_review)
#' vectorizer = vectorize.docs (corpus = movie_review$review,
#' minphrasecount = 50, returndata = FALSE)
#' docs = vectorize.docs (corpus = movie_review$review, vectorizer = vectorizer)
#' query.docs (docs, movie_review$review [1], vectorizer)
#' query.docs (docs, docs [1, ], vectorizer)
#' }
query.docs <-
function (docvectors, query, vectorizer, nres = 5)
{
if (is.character (query))
query = vectorize.docs (vectorizer, query)
taboo = apply (docvectors, 1, function (v) all (v == query))
return (names (head (sort (text2vec::sim2 (x = docvectors [!taboo, ], y = matrix (query, nrow = 1), method = "cosine", norm = "l2") [, 1], decreasing = TRUE), nres)))
}
#' Word query
#'
#' Search for words similar to the query.
#' @name query.words
#' @param wordvectors The vectorized words
#' @param origin The query (character).
#' @param sub Words to be substrated to the origin.
#' @param add Words to be Added to the origin.
#' @param nres The number of results.
#' @param lang The language of the words (NULL if no stemming).
#' @return The Words the most similar to the query.
#' @export
#' @seealso \code{\link{vectorize.words}}, \code{\link[text2vec]{sim2}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' words = vectorize.words (text, minphrasecount = 50)
#' query.words (words, origin = "paris", sub = "france", add = "germany")
#' query.words (words, origin = "berlin", sub = "germany", add = "france")
#' query.words (words, origin = "new_zealand")
#' }
query.words <-
function (wordvectors, origin, sub = NULL, add = NULL, nres = 5, lang = "en")
{
words = rownames (wordvectors)
origin = intersect (words, SnowballC::wordStem (tolower (origin), language = lang))
if (length (origin) == 0)
return (character (0))
if (!is.null (sub))
sub = intersect (words, SnowballC::wordStem (tolower (sub), language = lang))
if (!is.null (add))
add = intersect (words, SnowballC::wordStem (tolower (add), language = lang))
taboo = which (words %in% c (origin, sub, add))
q = wordvectors [origin [1], , drop = FALSE]
if ((!is.null (sub)) && (length (sub) > 0))
q = q - apply (wordvectors [sub, , drop = FALSE], 2, sum)
if ((!is.null (add)) && (length (add) > 0))
q = q + apply (wordvectors [add, , drop = FALSE], 2, sum)
return (names (head (sort (text2vec::sim2 (x = wordvectors [-taboo, ], y = q, method = "cosine", norm = "l2") [, 1], decreasing = TRUE), nres)))
}
#' @keywords internal
stemtokenizer <-
function (x, lang = "en")
{
tokens = text2vec::word_tokenizer (x)
res = lapply (tokens, SnowballC::wordStem, language = lang)
return (res)
}
#' Text mining
#'
#' Apply data mining function on vectorized text
#' @name TEXTMINING
#' @param corpus The corpus.
#' @param miningmethod The data mining method.
#' @param vector Indicates the type of vectorization, documents (TF-IDF) or words (GloVe).
#' @param ... Parameters passed to the vectorisation and to the data mining method.
#' @return The result of the data mining method.
#' @export
#' @seealso \code{\link{predict.textmining}}, \code{\link{textmining-class}}, \code{\link{vectorize.docs}}, \code{\link{vectorize.words}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' model = TEXTMINING (d$train.x, NB, labels = d$train.y, mincount = 50)
#' pred = predict (model, d$test.x)
#' evaluation (pred, d$test.y)
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' clusters = TEXTMINING (text, HCA, vector = "words", k = 9, maxwords = 100)
#' plotclus (clusters$res, text, type = "tree", labels = TRUE)
#' }
TEXTMINING <-
function (corpus, miningmethod, vector = c ("docs", "words"), ...)
{
if (vector [1] == "docs")
{
vectorizer = vectorize.docs (corpus = corpus, returndata = FALSE, ...)
d = as.matrix (vectorize.docs (corpus = corpus, vectorizer = vectorizer))
res = miningmethod (d, ...)
res = list (vectorizer = vectorizer, vectors = d, res = res)
class (res) = "textmining"
}
else
{
d = as.matrix (vectorize.words (corpus = corpus, ...))
res = miningmethod (d, ...)
res = list (vectors = d, res = res)
class (res) = "textmining"
}
return (res)
}
#' @keywords internal
tokens <-
function (corpus, lang = NULL)
{
ids = NULL
if (length (corpus) > 1)
ids = 1:length (corpus)
tokenizer = text2vec::word_tokenizer
if (!is.null (lang))
tokenizer = stemtokenizer
return (text2vec::itoken (corpus, preprocessor = cleanup, tokenizer = tokenizer, ids = 1:length (corpus), progressbar = FALSE, lang = lang))
}
#' Document vectorization
#'
#' Vectorize a corpus of documents.
#' @name vectorize.docs
#' @param vectorizer The document vectorizer.
#' @param corpus The corpus of documents (a vector of characters).
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ngram maximum size of n-grams.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param transform Transformation (TF-IDF, LSA, L1 normanization, or nothing).
#' @param latentdim Number of latent dimensions if LSA transformation is performed.
#' @param returndata If true, the vectorized documents are returned. If false, a "vectorizer" is returned.
#' @param ... Other parameters.
#' @return The vectorized documents.
#' @export
#' @seealso \code{\link{query.docs}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{vectorizers}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' # Clustering
#' docs = vectorize.docs (corpus = movie_review$review, transform = "tfidf")
#' km = KMEANS (docs [sample (nrow (docs), 100), ], k = 10)
#' # Classification
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' vectorizer = vectorize.docs (corpus = d$train.x,
#' returndata = FALSE, mincount = 50)
#' train = vectorize.docs (corpus = d$train.x, vectorizer = vectorizer)
#' test = vectorize.docs (corpus = d$test.x, vectorizer = vectorizer)
#' model = NB (as.matrix (train), d$train.y)
#' pred = predict (model, as.matrix (test))
#' evaluation (pred, d$test.y)
#' }
vectorize.docs <-
function (vectorizer = NULL, corpus = NULL, lang = "en", stopwords = lang, ngram = 1, mincount = 10, minphrasecount = NULL, transform = c ("tfidf", "lsa", "l1", "none"), latentdim = 50, returndata = TRUE, ...)
{
if (is.null (vectorizer))
vectorizer = createvectorizer (corpus, lang = lang, stopwords = stopwords, ngram = ngram, mincount = mincount, minphrasecount = minphrasecount, transform = transform, latentdim = latentdim)
if (returndata)
{
it = NULL
if (is.null (corpus))
it = vectorizer$tokens
else
{
it = tokens (corpus, lang = lang)
if (!is.null (vectorizer$phrases))
it = vectorizer$phrases$transform (it)
}
dtm = text2vec::create_dtm (it, vectorizer$vectorizer)
if (vectorizer$transform == "l1")
dtm = text2vec::normalize (dtm, "l1")
else if (vectorizer$transform == "tfidf")
dtm = vectorizer$tfidf$transform(dtm)
else if (vectorizer$transform == "lsa")
{
dtm = vectorizer$tfidf$transform(dtm)
dtm = vectorizer$lsa$transform(dtm)
}
return (as.data.frame (as.matrix (dtm)))
}
else
return (vectorizer)
}
#' Word vectorization
#'
#' Vectorize words from a corpus of documents.
#' @name vectorize.words
#' @param corpus The corpus of documents (a vector of characters).
#' @param ndim The number of dimensions of the vector space.
#' @param maxwords The maximum number of words.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param window Window for term-co-occurence matrix construction.
#' @param maxcooc Maximum number of co-occurrences to use in the weighting function.
#' @param maxiter The maximum number of iteration to fit the GloVe model.
#' @param epsilon Defines early stopping strategy when fit the GloVe model.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @return The vectorized words.
#' @export
#' @seealso \code{\link{query.words}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{vectorizers}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' words = vectorize.words (text, minphrasecount = 50)
#' query.words (words, origin = "paris", sub = "france", add = "germany")
#' query.words (words, origin = "berlin", sub = "germany", add = "france")
#' query.words (words, origin = "new_zealand")
#' }
vectorize.words <-
function (corpus = NULL, ndim = 50, maxwords = NULL, mincount = 5, minphrasecount = NULL, window = 5, maxcooc = 10, maxiter = 10, epsilon = 0.01, lang = "en", stopwords = lang, ...)
{
it = createiterator (corpus, lang = lang)
phrases = NULL
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
vocab = getvocab (corpus, mincount = mincount, minphrasecount = minphrasecount, ngram = 1, stopwords = stopwords, it = it, lang = lang)
vectorizer = createvectorizer (corpus, it = it, phrases = phrases, vocab = vocab, stopwords = stopwords, ngram = 1, mincount = mincount, minphrasecount = minphrasecount)
tcm = text2vec::create_tcm (vectorizer$tokens, vectorizer$vectorizer, skip_grams_window = window)
glove = text2vec::GlobalVectors$new (rank = ndim, x_max = maxcooc)
words = glove$fit_transform (tcm, n_iter = maxiter, convergence_tol = epsilon)
words = words + t (glove$components)
if (!is.null (maxwords))
{
fw = frequentwords (vocab, maxwords)
words = words [fw, ]
}
return (as.data.frame (words))
}
|
/R/text.R
|
no_license
|
cran/fdm2id
|
R
| false
| false
| 24,713
|
r
|
#' Document vectorization object
#'
#' This class contains a vectorization model for textual documents.
#' @name vectorizer-class
#' @slot vectorizer The vectorizer.
#' @slot transform The transformation to be applied after vectorization (normalization, TF-IDF).
#' @slot phrases The phrase detection method.
#' @slot tfidf The TF-IDF transformation.
#' @slot lsa The LSA transformation.
#' @slot tokens The token from the original document.
#' @exportClass vectorizer
#' @seealso \code{\link{vectorize.docs}}, \code{\link{query.docs}}
setClass ("vectorizer",
representation (vectorizer = "function",
transform = "character",
phrases = "ANY",
tfidf = "ANY",
lsa = "ANY",
tokens = "ANY"))
#' Text mining object
#'
#' Object used for text mining.
#' @name textmining-class
#' @slot vectorizer The vectorizer.
#' @slot vectors The vectorized dataset.
#' @slot res The result of the text mining method.
#' @exportClass textmining
#' @seealso \code{\link{TEXTMINING}}, \code{\link{vectorize.docs}}
setClass ("textmining",
representation (vectorizer = "function",
vectors = "matrix",
res = "ANY"))
#' @keywords internal
addphrases <-
function (it, mincount = 50, maxiter = 10)
{
vocab = text2vec::create_vocabulary (it, stopwords = stopwords::stopwords ("en"))
vocab = text2vec::prune_vocabulary (vocab, term_count_min = mincount)
model = text2vec::Collocations$new(vocabulary = vocab, collocation_count_min = mincount, pmi_min = 0)
model$fit (it)
nphrases = 0
iter = 0
while ((nphrases != nrow (model$collocation_stat)) && (iter < maxiter))
{
iter = iter + 1
nphrases = nrow (model$collocation_stat)
model$prune (pmi_min = 8, gensim_min = 10, lfmd_min = -25)
model$partial_fit (it)
}
return (model)
}
#' @keywords internal
cleanup <-
function (corpus)
{
res = sapply (corpus, function (text) tolower (text))
res = sapply (res, function (text) gsub ("[^[:alnum:]]", " ", text))
res = sapply (res, function (text) gsub ("\\b[[:alnum:]]{1}\\b", "", text))
res = sapply (res, function (text) gsub ("\\s+", " ", text))
return (res)
}
#' @keywords internal
createiterator <-
function (corpus, lang, minphrasecount = NULL)
{
it = tokens (corpus, lang = lang)
phrases = NULL
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
return (it)
}
#' @keywords internal
createvectorizer <-
function (corpus, it = NULL, phrases = NULL, vocab = NULL, lang, stopwords = lang, ngram = 1, mincount = 10, minphrasecount = NULL,
transform = c ("none", "l1", "tfidf", "lsa"), latentdim = 50)
{
if (is.null (it))
it = createiterator (corpus, lang, minphrasecount)
if (is.null (vocab))
vocab = getvocab (corpus, mincount, minphrasecount, ngram, stopwords, it = it, lang = lang)
vectorizer = text2vec::vocab_vectorizer (vocab)
res = list (vectorizer = vectorizer, transform = transform [1], minphrasecount = minphrasecount, tokens = it, phrases = phrases)
if (transform [1] == "tfidf")
{
dtm = text2vec::create_dtm (it, vectorizer)
tfidf = text2vec::TfIdf$new()
dtm = text2vec::fit_transform(dtm, tfidf)
res$tfidf = tfidf
}
else if (transform [1] == "lsa")
{
dtm = text2vec::create_dtm (it, vectorizer)
tfidf = text2vec::TfIdf$new()
dtm = text2vec::fit_transform(dtm, tfidf)
res$tfidf = tfidf
lsa = text2vec::LSA$new(n_topics = latentdim)
dtm = text2vec::fit_transform(dtm, lsa)
res$lsa = lsa
}
class (res) = "vectorizer"
return (res)
}
#' Frequent words
#'
#' Most frequent words of the corpus.
#' @name frequentwords
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @param nb The number of words to be returned.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param ngram maximum size of n-grams.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @return The most frequent words of the corpus.
#' @export
#' @seealso \code{\link{getvocab}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' frequentwords (text, 100)
#' vocab = getvocab (text)
#' frequentwords (vocab, 100)
#' }
frequentwords <-
function (corpus, nb, mincount = 5, minphrasecount = NULL, ngram = 1, lang = "en", stopwords = lang)
{
vocab = NULL
if ("text2vec_vocabulary" %in% class (corpus))
vocab = corpus
else
vocab = getvocab (corpus, mincount = mincount, minphrasecount = minphrasecount, ngram = ngram, lang = lang, stopwords = stopwords)
return (vocab [vocab [, "term_count"] >= vocab [nrow (vocab) + 1 - nb, "term_count"], "term"])
}
#' Extract words and phrases from a corpus
#'
#' Extract words and phrases from a corpus of documents.
#' @name getvocab
#' @param corpus The corpus of documents (a vector of characters).
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param ngram maximum size of n-grams.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @return The vocabulary used in the corpus of documents.
#' @export
#' @seealso \code{\link{plotzipf}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{create_vocabulary}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' vocab1 = getvocab (text) # With stemming
#' nrow (vocab1)
#' vocab2 = getvocab (text, lang = NULL) # Without stemming
#' nrow (vocab2)
#' }
getvocab <-
function (corpus, mincount = 5, minphrasecount = NULL, ngram = 1, lang = "en", stopwords = lang, ...)
{
dots = list (...)
it = NULL
if (!is.null (dots$it))
it = dots$it
else
{
it = tokens (corpus, lang = lang)
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
}
sw = character(0)
if (!is.null (stopwords))
{
if (length (stopwords) == 1)
sw = stopwords::stopwords (stopwords)
else
sw = stopwords
}
vocab = text2vec::create_vocabulary (it, ngram = c (1, ngram), stopwords = sw)
vocab = text2vec::prune_vocabulary (vocab, term_count_min = mincount)
return (vocab)
}
#' load a text file
#'
#' (Down)Load a text file (and extract it if it is in a zip file).
#' @name loadtext
#' @param file The path or URL of the text file.
#' @param dir The (temporary) directory, where the file is downloaded. The file is deleted at the end of this function.
#' @param collapse Indicates whether or not lines of each documents should collapse together or not.
#' @param sep Separator between text fields.
#' @param categories Columns that should be considered as categorial data.
#' @return The text contained in the dowloaded file.
#' @export
#' @seealso \code{\link[utils]{download.file}}, \code{\link[utils]{unzip}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' }
loadtext <-
function (file = file.choose (), dir = "~/", collapse = TRUE, sep = NULL, categories = NULL)
{
mainfile = file
download = grepl ("^https?://", file)
if (download)
{
mainfile = paste (dir, tail (strsplit (file, "/") [[1]], 1), sep = "")
utils::download.file (file, mainfile)
}
ext = tail (strsplit (mainfile, ".", fixed = TRUE) [[1]], 1)
files = NULL
if (ext %in% c ("zip"))
{
files = utils::unzip (mainfile, exdir = dir, list = TRUE) [, 1]
utils::unzip (mainfile, exdir = dir, files = files)
files = paste (dir, files, sep = "")
}
else
files = mainfile
corpus = NULL
if (is.null (sep))
{
corpus = as.vector (sapply (files, function (file)
{
text = readLines (file, n = -1, warn = FALSE)
if (collapse)
text = paste (text, collapse = " ")
return (text)
}))
corpus = corpus [!sapply (corpus, function (text) grepl ("^\\s*$", text))]
}
else
{
corpus = lapply (files, function (file)
{
text = utils::read.table (file, sep = sep, quote = "")
return (text)
})
if (length (corpus) > 1)
corpus = do.call (rbind, corpus)
corpus [] = lapply(corpus, as.character)
if (!is.null (categories))
corpus [categories] = lapply(corpus [categories], factor)
}
if (download)
file.remove (mainfile)
if (ext %in% c ("zip"))
sapply (files, function (file) file.remove (file))
return (corpus)
}
#' Plot word cloud
#'
#' Plot a word cloud based on the word frequencies in the documents.
#' @name plotcloud
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @param k A categorial variable (vector or factor).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @export
#' @seealso \code{\link{plotzipf}}, \code{\link{getvocab}}, \code{\link[wordcloud]{wordcloud}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' plotcloud (text)
#' vocab = getvocab (text, mincount = 1, lang = NULL, stopwords = "en")
#' plotcloud (vocab)
#' }
plotcloud <-
function (corpus, k = NULL, stopwords = "en", ...)
{
l = NULL
labels = NULL
kk = 1
if (is.null (k))
l = list (corpus)
else
{
kk = sort (unique (k))
for (i in kk)
l = c (l, list (corpus [k == i]))
if (is.factor (k))
labels = levels (k)
else
labels = paste ("Cluster", kk)
}
n = length (kk)
nrow = round (sqrt (n))
ncol = ceiling (n / nrow)
graphics::layout (matrix (1:(nrow * ncol), ncol = ncol, byrow = TRUE))
on.exit (graphics::layout (1))
for (i in 1:n)
{
vocab = NULL
freq = NULL
words = NULL
if ("text2vec_vocabulary" %in% class (l [[i]]))
{
words = l [[i]] [, "term"]
freq = l [[i]] [, "term_count"]
}
else
{
vocab = getvocab (l [[i]], mincount = 1, stopwords = stopwords, lang = NULL)
words = vocab [, "term"]
freq = vocab [, "term_count"]
}
maxfreq = max (freq)
col = unique (grDevices::gray (1 - ((tail (freq, 200) + maxfreq) / (maxfreq * 2))))
wordcloud::wordcloud (words = words, freq = freq, min.freq = 1, max.words = 200, random.order = FALSE, rot.per = 1 / 3, colors = col)
graphics::title (main = labels [i])
}
}
#' Plot rank versus frequency
#'
#' Plot the frequency of words in a document agains the ranks of those words. It also plot the Zipf law.
#' @name plotzipf
#' @param corpus The corpus of documents (a vector of characters) or the vocabulary of the documents (result of function \code{getvocab}).
#' @export
#' @seealso \code{\link{plotcloud}}, \code{\link{getvocab}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' plotzipf (text)
#' vocab = getvocab (text, mincount = 1, lang = NULL)
#' plotzipf (vocab)
#' }
plotzipf <-
function (corpus)
{
freq = NULL
if ("text2vec_vocabulary" %in% class (corpus))
freq = corpus [, "term_count"]
else
freq = getvocab (corpus, mincount = 1, stopwords = NULL, lang = NULL) [, "term_count"]
rank = 1:length (freq)
freq = freq [rev (rank)]
logd = data.frame (logrank = log2 (rank), logfreq = log2 (freq))
model = stats::lm (logfreq ~ logrank, weights = freq, data = logd)
options (scipen = freq [1])
graphics::plot (x = rank, y = freq, log = "xy", xlab = "Rank", ylab = "Frequency", t = "l")
graphics::lines (rank, 2^model$coefficients [1] / rank^(-model$coefficients [2]), col = "red", lty = 2)
graphics::legend ("topright", col = 1:2, legend = c ("Observations", "Zipf's law"), lty = 1:2, bty = "n")
}
#' Model predictions
#'
#' This function predicts values based upon a model trained for text mining.
#' @name predict.textmining
#' @param object The classification model (of class \code{\link{textmining-class}}, created by \code{\link{TEXTMINING}}.
#' @param test The test set (a \code{data.frame})
#' @param fuzzy A boolean indicating whether fuzzy classification is used or not.
#' @return A vector of predicted values (\code{factor}).
#' @param ... Other parameters.
#' @export
#' @method predict textmining
#' @seealso \code{\link{TEXTMINING}}, \code{\link{textmining-class}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' model = TEXTMINING (d$train.x, NB, labels = d$train.y, mincount = 50)
#' pred = predict (model, d$test.x)
#' evaluation (pred, d$test.y)
#' }
predict.textmining <- function (object, test, fuzzy = FALSE, ...)
{
test = vectorize.docs (corpus = test, vectorizer = object$vectorizer)
return (predict (object$res, as.matrix (test), fuzzy, ...))
}
#' Document query
#'
#' Search for documents similar to the query.
#' @name query.docs
#' @param docvectors The vectorized documents.
#' @param query The query (vectorized or raw text).
#' @param vectorizer The vectorizer taht has been used to vectorize the documents.
#' @param nres The number of results.
#' @return The indices of the documents the most similar to the query.
#' @export
#' @seealso \code{\link{vectorize.docs}}, \code{\link[text2vec]{sim2}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data (movie_review)
#' vectorizer = vectorize.docs (corpus = movie_review$review,
#' minphrasecount = 50, returndata = FALSE)
#' docs = vectorize.docs (corpus = movie_review$review, vectorizer = vectorizer)
#' query.docs (docs, movie_review$review [1], vectorizer)
#' query.docs (docs, docs [1, ], vectorizer)
#' }
query.docs <-
function (docvectors, query, vectorizer, nres = 5)
{
if (is.character (query))
query = vectorize.docs (vectorizer, query)
taboo = apply (docvectors, 1, function (v) all (v == query))
return (names (head (sort (text2vec::sim2 (x = docvectors [!taboo, ], y = matrix (query, nrow = 1), method = "cosine", norm = "l2") [, 1], decreasing = TRUE), nres)))
}
#' Word query
#'
#' Search for words similar to the query.
#' @name query.words
#' @param wordvectors The vectorized words
#' @param origin The query (character).
#' @param sub Words to be substrated to the origin.
#' @param add Words to be Added to the origin.
#' @param nres The number of results.
#' @param lang The language of the words (NULL if no stemming).
#' @return The Words the most similar to the query.
#' @export
#' @seealso \code{\link{vectorize.words}}, \code{\link[text2vec]{sim2}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' words = vectorize.words (text, minphrasecount = 50)
#' query.words (words, origin = "paris", sub = "france", add = "germany")
#' query.words (words, origin = "berlin", sub = "germany", add = "france")
#' query.words (words, origin = "new_zealand")
#' }
query.words <-
function (wordvectors, origin, sub = NULL, add = NULL, nres = 5, lang = "en")
{
words = rownames (wordvectors)
origin = intersect (words, SnowballC::wordStem (tolower (origin), language = lang))
if (length (origin) == 0)
return (character (0))
if (!is.null (sub))
sub = intersect (words, SnowballC::wordStem (tolower (sub), language = lang))
if (!is.null (add))
add = intersect (words, SnowballC::wordStem (tolower (add), language = lang))
taboo = which (words %in% c (origin, sub, add))
q = wordvectors [origin [1], , drop = FALSE]
if ((!is.null (sub)) && (length (sub) > 0))
q = q - apply (wordvectors [sub, , drop = FALSE], 2, sum)
if ((!is.null (add)) && (length (add) > 0))
q = q + apply (wordvectors [add, , drop = FALSE], 2, sum)
return (names (head (sort (text2vec::sim2 (x = wordvectors [-taboo, ], y = q, method = "cosine", norm = "l2") [, 1], decreasing = TRUE), nres)))
}
#' @keywords internal
stemtokenizer <-
function (x, lang = "en")
{
tokens = text2vec::word_tokenizer (x)
res = lapply (tokens, SnowballC::wordStem, language = lang)
return (res)
}
#' Text mining
#'
#' Apply data mining function on vectorized text
#' @name TEXTMINING
#' @param corpus The corpus.
#' @param miningmethod The data mining method.
#' @param vector Indicates the type of vectorization, documents (TF-IDF) or words (GloVe).
#' @param ... Parameters passed to the vectorisation and to the data mining method.
#' @return The result of the data mining method.
#' @export
#' @seealso \code{\link{predict.textmining}}, \code{\link{textmining-class}}, \code{\link{vectorize.docs}}, \code{\link{vectorize.words}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' model = TEXTMINING (d$train.x, NB, labels = d$train.y, mincount = 50)
#' pred = predict (model, d$test.x)
#' evaluation (pred, d$test.y)
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' clusters = TEXTMINING (text, HCA, vector = "words", k = 9, maxwords = 100)
#' plotclus (clusters$res, text, type = "tree", labels = TRUE)
#' }
TEXTMINING <-
function (corpus, miningmethod, vector = c ("docs", "words"), ...)
{
if (vector [1] == "docs")
{
vectorizer = vectorize.docs (corpus = corpus, returndata = FALSE, ...)
d = as.matrix (vectorize.docs (corpus = corpus, vectorizer = vectorizer))
res = miningmethod (d, ...)
res = list (vectorizer = vectorizer, vectors = d, res = res)
class (res) = "textmining"
}
else
{
d = as.matrix (vectorize.words (corpus = corpus, ...))
res = miningmethod (d, ...)
res = list (vectors = d, res = res)
class (res) = "textmining"
}
return (res)
}
#' @keywords internal
tokens <-
function (corpus, lang = NULL)
{
ids = NULL
if (length (corpus) > 1)
ids = 1:length (corpus)
tokenizer = text2vec::word_tokenizer
if (!is.null (lang))
tokenizer = stemtokenizer
return (text2vec::itoken (corpus, preprocessor = cleanup, tokenizer = tokenizer, ids = 1:length (corpus), progressbar = FALSE, lang = lang))
}
#' Document vectorization
#'
#' Vectorize a corpus of documents.
#' @name vectorize.docs
#' @param vectorizer The document vectorizer.
#' @param corpus The corpus of documents (a vector of characters).
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ngram maximum size of n-grams.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param transform Transformation (TF-IDF, LSA, L1 normanization, or nothing).
#' @param latentdim Number of latent dimensions if LSA transformation is performed.
#' @param returndata If true, the vectorized documents are returned. If false, a "vectorizer" is returned.
#' @param ... Other parameters.
#' @return The vectorized documents.
#' @export
#' @seealso \code{\link{query.docs}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{vectorizers}}
#' @examples
#' \dontrun{
#' require (text2vec)
#' data ("movie_review")
#' # Clustering
#' docs = vectorize.docs (corpus = movie_review$review, transform = "tfidf")
#' km = KMEANS (docs [sample (nrow (docs), 100), ], k = 10)
#' # Classification
#' d = movie_review [, 2:3]
#' d [, 1] = factor (d [, 1])
#' d = splitdata (d, 1)
#' vectorizer = vectorize.docs (corpus = d$train.x,
#' returndata = FALSE, mincount = 50)
#' train = vectorize.docs (corpus = d$train.x, vectorizer = vectorizer)
#' test = vectorize.docs (corpus = d$test.x, vectorizer = vectorizer)
#' model = NB (as.matrix (train), d$train.y)
#' pred = predict (model, as.matrix (test))
#' evaluation (pred, d$test.y)
#' }
vectorize.docs <-
function (vectorizer = NULL, corpus = NULL, lang = "en", stopwords = lang, ngram = 1, mincount = 10, minphrasecount = NULL, transform = c ("tfidf", "lsa", "l1", "none"), latentdim = 50, returndata = TRUE, ...)
{
if (is.null (vectorizer))
vectorizer = createvectorizer (corpus, lang = lang, stopwords = stopwords, ngram = ngram, mincount = mincount, minphrasecount = minphrasecount, transform = transform, latentdim = latentdim)
if (returndata)
{
it = NULL
if (is.null (corpus))
it = vectorizer$tokens
else
{
it = tokens (corpus, lang = lang)
if (!is.null (vectorizer$phrases))
it = vectorizer$phrases$transform (it)
}
dtm = text2vec::create_dtm (it, vectorizer$vectorizer)
if (vectorizer$transform == "l1")
dtm = text2vec::normalize (dtm, "l1")
else if (vectorizer$transform == "tfidf")
dtm = vectorizer$tfidf$transform(dtm)
else if (vectorizer$transform == "lsa")
{
dtm = vectorizer$tfidf$transform(dtm)
dtm = vectorizer$lsa$transform(dtm)
}
return (as.data.frame (as.matrix (dtm)))
}
else
return (vectorizer)
}
#' Word vectorization
#'
#' Vectorize words from a corpus of documents.
#' @name vectorize.words
#' @param corpus The corpus of documents (a vector of characters).
#' @param ndim The number of dimensions of the vector space.
#' @param maxwords The maximum number of words.
#' @param mincount Minimum word count to be considered as frequent.
#' @param minphrasecount Minimum collocation of words count to be considered as frequent.
#' @param window Window for term-co-occurence matrix construction.
#' @param maxcooc Maximum number of co-occurrences to use in the weighting function.
#' @param maxiter The maximum number of iteration to fit the GloVe model.
#' @param epsilon Defines early stopping strategy when fit the GloVe model.
#' @param lang The language of the documents (NULL if no stemming).
#' @param stopwords Stopwords, or the language of the documents. NULL if stop words should not be removed.
#' @param ... Other parameters.
#' @return The vectorized words.
#' @export
#' @seealso \code{\link{query.words}}, \code{\link[stopwords]{stopwords}}, \code{\link[text2vec]{vectorizers}}
#' @examples
#' \dontrun{
#' text = loadtext ("http://mattmahoney.net/dc/text8.zip")
#' words = vectorize.words (text, minphrasecount = 50)
#' query.words (words, origin = "paris", sub = "france", add = "germany")
#' query.words (words, origin = "berlin", sub = "germany", add = "france")
#' query.words (words, origin = "new_zealand")
#' }
vectorize.words <-
function (corpus = NULL, ndim = 50, maxwords = NULL, mincount = 5, minphrasecount = NULL, window = 5, maxcooc = 10, maxiter = 10, epsilon = 0.01, lang = "en", stopwords = lang, ...)
{
it = createiterator (corpus, lang = lang)
phrases = NULL
if ((!is.null (minphrasecount)) && (minphrasecount > 0))
{
phrases = addphrases (it, mincount = minphrasecount)
it = phrases$transform (it)
}
vocab = getvocab (corpus, mincount = mincount, minphrasecount = minphrasecount, ngram = 1, stopwords = stopwords, it = it, lang = lang)
vectorizer = createvectorizer (corpus, it = it, phrases = phrases, vocab = vocab, stopwords = stopwords, ngram = 1, mincount = mincount, minphrasecount = minphrasecount)
tcm = text2vec::create_tcm (vectorizer$tokens, vectorizer$vectorizer, skip_grams_window = window)
glove = text2vec::GlobalVectors$new (rank = ndim, x_max = maxcooc)
words = glove$fit_transform (tcm, n_iter = maxiter, convergence_tol = epsilon)
words = words + t (glove$components)
if (!is.null (maxwords))
{
fw = frequentwords (vocab, maxwords)
words = words [fw, ]
}
return (as.data.frame (words))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_functions.R
\name{binary_tree}
\alias{binary_tree}
\title{Create a binary tree object}
\usage{
binary_tree(label, left = NULL, right = NULL)
}
\arguments{
\item{label}{The label of the node}
\item{left}{The left binary tree}
\item{right}{The right binary tree}
}
\value{
A list representing the tree
}
\description{
Create a binary tree object
}
|
/man/binary_tree.Rd
|
permissive
|
asmagen/hierarchicalSingleCell
|
R
| false
| true
| 431
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tree_functions.R
\name{binary_tree}
\alias{binary_tree}
\title{Create a binary tree object}
\usage{
binary_tree(label, left = NULL, right = NULL)
}
\arguments{
\item{label}{The label of the node}
\item{left}{The left binary tree}
\item{right}{The right binary tree}
}
\value{
A list representing the tree
}
\description{
Create a binary tree object
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rndpoints.r
\docType{data}
\name{rndpoints}
\alias{rndpoints}
\title{Dataset: random points}
\format{
SpatialPointsDataFrame
}
\usage{
data(rndpoints)
}
\description{
A SpatialPointsDataFrame representing random locations.
}
\keyword{datasets}
|
/man/rndpoints.Rd
|
no_license
|
cran/GmAMisc
|
R
| false
| true
| 322
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rndpoints.r
\docType{data}
\name{rndpoints}
\alias{rndpoints}
\title{Dataset: random points}
\format{
SpatialPointsDataFrame
}
\usage{
data(rndpoints)
}
\description{
A SpatialPointsDataFrame representing random locations.
}
\keyword{datasets}
|
#' @export
PortSummary<-function(lfa='34',bins=seq(50,200,5),output='graph'){
loadfunctions('lobster')
lobster.db('atSea')
atSea<-addSYEAR(subset(atSea,LFA==lfa))
atSea$YEAR<-year(atSea$SDATE)
atSea$SYEAR[month(atSea$SDATE)<12&atSea$YEAR<2001]<-atSea$YEAR[month(atSea$SDATE)<12&atSea$YEAR<2001]
atSea$SYEAR[month(atSea$SDATE)==12&atSea$YEAR<2000]<-atSea$YEAR[month(atSea$SDATE)==12&atSea$YEAR<2000]-1
PortSamp<-PortSamplesProcess(lfa=lfa,min.size=0)
logs<-subset(read.csv(file.path( project.datadirectory('bio.lobster'), "data","products","logsInSeason.csv")),LFA==lfa)
allPorts<-subset(read.csv(file.path( project.datadirectory('bio.lobster'), "data","inputs","Ports.csv")),LFA==lfa)
#PSports<-subset(PS$portsamples,!duplicated(PORT_CODE))$PORT_CODE
#ASports<-subset(atSea,!duplicated(PORT))$PORT
#SLports<-subset(logs,!duplicated(COMMUNITY_CODE))$COMMUNITY_CODE
if(output=='graph'){
ports<-allPorts$Port_Code[allPorts$Port_Code%in%logs$COMMUNITY_CODE&allPorts$Port_Code%in%PortSamp$portsamples$PORT_CODE&allPorts$Port_Code%in%atSea$PORT]
#browser()
for(i in 1:length(ports)){
pdf(file.path(project.datadirectory('bio.lobster'),"figures","PortSummary",paste0("PortSummary",ports[i],".pdf")))
#browser()
# at Sea sampling
atSeaDat<-subset(atSea,!is.na(SYEAR)&PORT==ports[i],c("SYEAR","CARLENGTH"))
if(nrow(atSeaDat)>0){
atSeaCLF<-CLF(atSeaDat,bins=bins,ID="atSea")
BarPlotCLF(atSeaCLF$CLF,yrs=atSeaCLF$yrs,bins=bins,col='grey',pdf=F,rel=T,LS=83)
}
# port sampling
IDs<-subset(PortSamp[[1]],PORT_CODE==ports[i])$SAMPLE_ID
portDat<-subset(PortSamp$portlengths,SAMPLE_ID%in%IDs,c("SYEAR","LENGTH"))
if(nrow(portDat)>0){
portCLF<-CLF(portDat,bins=bins,ID="Port")
BarPlotCLF(portCLF$CLF,yrs=portCLF$yrs,bins=bins,col='grey',pdf=F,rel=T,LS=83)
}
# logs
catch<-with(subset(logs,COMMUNITY_CODE==ports[i]),tapply(WEIGHT_KG,SYEAR,sum))
effort<-with(subset(logs,COMMUNITY_CODE==ports[i]),tapply(NUM_OF_TRAPS,SYEAR,sum))
cpue.dat<-merge(data.frame(year=as.numeric(names(catch)),catch=catch),data.frame(year=as.numeric(names(effort)),effort=effort),all=T)
cpue.dat$cpue<-cpue.dat$catch/cpue.dat$effort
par(mfrow=c(3,1),las=1)
plot(catch~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(catch)),ylab="Catch (Kg)",xlab="Year")
plot(effort~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(effort)),ylab="Effort (TH)",xlab="Year")
plot(cpue~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(cpue)),ylab="CPUE (Kg/TH)",xlab="Year")
dev.off()
}
}
if(output=='table'){
port.table<-list()
for(i in 1:length(years)){
AS<-with(subset(atSea,SYEAR==years[i]),tapply(CARLENGTH,PORT,length))
PS<-with(merge(subset(PortSamp$portsamples,SYEAR==years[i],c('PORT_CODE','SAMPLE_ID')),subset(PortSamp$portlengths,SYEAR==years[i])),tapply(LENGTH,PORT_CODE,length))
SL<-round(with(subset(logs,SYEAR==years[i]),tapply(WEIGHT_KG,COMMUNITY_CODE,sum)))
port.table[[i]]<-merge(allPorts,merge(data.frame(Port_Code=names(SL),landed.kg=SL),merge(data.frame(Port_Code=names(AS),at.Sea=AS),data.frame(Port_Code=names(PS),at.Port=PS),all=T),all=T),all.y=T)
}
}
port.table
}
|
/R/PortSummary.r
|
no_license
|
LobsterScience/bio.lobster
|
R
| false
| false
| 3,214
|
r
|
#' @export
PortSummary<-function(lfa='34',bins=seq(50,200,5),output='graph'){
loadfunctions('lobster')
lobster.db('atSea')
atSea<-addSYEAR(subset(atSea,LFA==lfa))
atSea$YEAR<-year(atSea$SDATE)
atSea$SYEAR[month(atSea$SDATE)<12&atSea$YEAR<2001]<-atSea$YEAR[month(atSea$SDATE)<12&atSea$YEAR<2001]
atSea$SYEAR[month(atSea$SDATE)==12&atSea$YEAR<2000]<-atSea$YEAR[month(atSea$SDATE)==12&atSea$YEAR<2000]-1
PortSamp<-PortSamplesProcess(lfa=lfa,min.size=0)
logs<-subset(read.csv(file.path( project.datadirectory('bio.lobster'), "data","products","logsInSeason.csv")),LFA==lfa)
allPorts<-subset(read.csv(file.path( project.datadirectory('bio.lobster'), "data","inputs","Ports.csv")),LFA==lfa)
#PSports<-subset(PS$portsamples,!duplicated(PORT_CODE))$PORT_CODE
#ASports<-subset(atSea,!duplicated(PORT))$PORT
#SLports<-subset(logs,!duplicated(COMMUNITY_CODE))$COMMUNITY_CODE
if(output=='graph'){
ports<-allPorts$Port_Code[allPorts$Port_Code%in%logs$COMMUNITY_CODE&allPorts$Port_Code%in%PortSamp$portsamples$PORT_CODE&allPorts$Port_Code%in%atSea$PORT]
#browser()
for(i in 1:length(ports)){
pdf(file.path(project.datadirectory('bio.lobster'),"figures","PortSummary",paste0("PortSummary",ports[i],".pdf")))
#browser()
# at Sea sampling
atSeaDat<-subset(atSea,!is.na(SYEAR)&PORT==ports[i],c("SYEAR","CARLENGTH"))
if(nrow(atSeaDat)>0){
atSeaCLF<-CLF(atSeaDat,bins=bins,ID="atSea")
BarPlotCLF(atSeaCLF$CLF,yrs=atSeaCLF$yrs,bins=bins,col='grey',pdf=F,rel=T,LS=83)
}
# port sampling
IDs<-subset(PortSamp[[1]],PORT_CODE==ports[i])$SAMPLE_ID
portDat<-subset(PortSamp$portlengths,SAMPLE_ID%in%IDs,c("SYEAR","LENGTH"))
if(nrow(portDat)>0){
portCLF<-CLF(portDat,bins=bins,ID="Port")
BarPlotCLF(portCLF$CLF,yrs=portCLF$yrs,bins=bins,col='grey',pdf=F,rel=T,LS=83)
}
# logs
catch<-with(subset(logs,COMMUNITY_CODE==ports[i]),tapply(WEIGHT_KG,SYEAR,sum))
effort<-with(subset(logs,COMMUNITY_CODE==ports[i]),tapply(NUM_OF_TRAPS,SYEAR,sum))
cpue.dat<-merge(data.frame(year=as.numeric(names(catch)),catch=catch),data.frame(year=as.numeric(names(effort)),effort=effort),all=T)
cpue.dat$cpue<-cpue.dat$catch/cpue.dat$effort
par(mfrow=c(3,1),las=1)
plot(catch~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(catch)),ylab="Catch (Kg)",xlab="Year")
plot(effort~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(effort)),ylab="Effort (TH)",xlab="Year")
plot(cpue~year,cpue.dat,type='o',pch=16,col='blue',lwd=2,ylim=c(0,max(cpue)),ylab="CPUE (Kg/TH)",xlab="Year")
dev.off()
}
}
if(output=='table'){
port.table<-list()
for(i in 1:length(years)){
AS<-with(subset(atSea,SYEAR==years[i]),tapply(CARLENGTH,PORT,length))
PS<-with(merge(subset(PortSamp$portsamples,SYEAR==years[i],c('PORT_CODE','SAMPLE_ID')),subset(PortSamp$portlengths,SYEAR==years[i])),tapply(LENGTH,PORT_CODE,length))
SL<-round(with(subset(logs,SYEAR==years[i]),tapply(WEIGHT_KG,COMMUNITY_CODE,sum)))
port.table[[i]]<-merge(allPorts,merge(data.frame(Port_Code=names(SL),landed.kg=SL),merge(data.frame(Port_Code=names(AS),at.Sea=AS),data.frame(Port_Code=names(PS),at.Port=PS),all=T),all=T),all.y=T)
}
}
port.table
}
|
# For your information, briefly, the model is that, as below,
# Y_ij = intercept + dummyX_ij * (beta + beta_i) + Z_ij * (b + b_i) + error_ij,
# where 'beta' and 'b' are fixed effects while 'b and 'b_i' are random slope effects.
# We conduct the test that H_0: variance of b_i = 0 v.s. H_a: variance of b_i > 0 through exactRLR.
# In this simulation, we use parallel computing in order to reduce the performing time.
# set the random covariance as different values to do the power test
# Setups
library(parallel)
simRep <- 20000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.25) # The set of varaince of random covariates b as random slope
nRandCovariate <- 8
cores <- 10
r.sim <- b.var
# Below is the function defined for each node in parallel
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
# library(MASS)
set.seed(iter)
nGroup <- 50 # Group number
nRep.sim <- 20 # Duplication in each group n_i
epsilon.sd <- 1 # or 0.5
intercept.true <- 0.5
# nRandCovariate <- 2
# r.sim <- 0
b.sim <- 2
beta.sim <- 2
betaVar.sim <- 1
z.mean <- 0
totalN <- nGroup * nRep.sim
z.var <- c(1)
ID.sim <- rep(1:nGroup, each = nRep.sim)
error.sim <- rnorm(n = totalN, mean = 0, sd = epsilon.sd)
z.sim <- mapply(rnorm, totalN, z.mean, rep(sqrt(z.var), nRandCovariate))
bV.sim <- mapply(rnorm, nGroup, b.sim, rep(sqrt(r.sim), nRandCovariate))
bV.sim <- bV.sim[rep(1:nrow(bV.sim), each = nRep.sim), ]
bV.sim <- bV.sim * z.sim
bV.sim <- rowSums(bV.sim)
betaV.sim <- mapply(rnorm, nGroup, beta.sim, rep(sqrt(betaVar.sim), 1))
betaV.sim <- betaV.sim[rep(1:nrow(betaV.sim), each = nRep.sim), ]
betaV2.sim <- mapply(rnorm, nGroup, beta.sim, rep(sqrt(betaVar.sim), 1))
betaV2.sim <- betaV2.sim[rep(1:nrow(betaV2.sim), each = nRep.sim), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # Shift dummyX
Y.sim <- (intercept.true + bV.sim) + dummyX * betaV.sim + (dummyX - 1) * betaV2.sim + error.sim # NEW add 'dummyX'
ID = ID.sim
Y = Y.sim
npc = nRandCovariate
##12.7.2017
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nGroup*nRandCovariate),
nrow = nRandCovariate,
ncol = nGroup),
matrix(0, nrow = nRep.sim - nRandCovariate, ncol = nGroup)))
for(k in 1:nGroup){
svd <- svd(z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ] %*% t(z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:nRandCovariate]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep.sim+1):(k*nRep.sim)] <- u.tra %*% Y[((k-1)*nRep.sim+1):(k*nRep.sim)]
dummyX[((k-1)*nRep.sim+1):(k*nRep.sim), ] <- u.tra %*% dummyX[((k-1)*nRep.sim+1):(k*nRep.sim), ]
z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ] <- rbind(u.tra[1:nRandCovariate, ] %*% z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ],
matrix(0,
nrow = nRep.sim - nRandCovariate,
ncol = nRandCovariate))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep.sim - nRandCovariate))
}
##12.7.2017
designMatrix.pdIdent <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
a.score = z.sim,
z.sim.uni = z.sim.uni)
# 'lme' model with 'pdIdent'
#fullReml.pdIdent <- NA
#noAScoreReml.pdIdent <- NA
#notempReml.pdIdent <- NA
# if(npc == 1){
# fullReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + dummyX),
# ID = pdIdent(~ 0 + z.sim)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noZReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + dummyX)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noDummyXReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + z.sim)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# }else
#if(npc == 2){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 3){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 4){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
# }else if(npc == 5){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 6){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 7){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 8){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 9){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 10){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else{
# additive0.sim <- paste(1:pca.npc, collapse = " + a.score.")
# modelFix.sim <- as.formula(paste("rating ~ 1 + temp + a.score.",
# additive0.sim,
# sep = ""))
# modelRan.sim <- as.formula(paste("~ 0 + a.score.",
# additive0.sim,
# sep = ""))
# fullReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(~ 0 + temp),
# ID = pdIdent(modelRan.sim)),
# data = designMatrix.pdIdent)
# noAScoreReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(~ 0 + temp)),
# data = designMatrix.pdIdent)
# notempReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(modelRan.sim)),
# data = designMatrix.pdIdent)
# }
# tests1 <- exactRLRT(notempReml.pdIdent, fullReml.pdIdent, noAScoreReml.pdIdent) # , nsim = 100000
# tests1 <- exactRLRT(notempReml.pdIdent) # , nsim = 100000
# 'lmer' model
designMatrix.lmm <- designMatrix.pdIdent
additive0.sim <- paste(1:npc, collapse = " + a.score.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + a.score.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
# tests2 <- list()
# for(i in 1:npc){
# ii <- paste("a.score.", i, sep = "")
# f0 <- as.formula(paste(" . ~ . - (0 + ", ii, "| ID)"))
# m0 <- update(fullReml, f0)
# f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.", additive0.sim, " + (0 +", ii, " | ID)",
# sep = ""))
# m.slope <- lmer(f.slope, data = designMatrix.lmm)
# tests2[[i]] <- exactRLRT(m.slope, fullReml, m0)
# }
# multiTest1 <- sapply(tests2, function(x) {
# c(statistic = x$statistic[1],
# "p-value" = x$p[1])})
# pvalues.bonf <- p.adjust(multiTest1[2,], "bonferroni")
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
# fullReml <- lmer(Y ~ 1 + dummyX + z.sim + (0 + dummyX | ID) + (0 + z.sim | ID), data = designMatrix) ## + (1 | temp : ID)
# m0 <- update(fullReml, . ~ . - (0 + z.sim | ID))
# m.slope <- update(fullReml, . ~ . - (0 + dummyX | ID))
# tests2 <- list()
# tests2[[1]] <- exactRLRT(m.slope, fullReml, m0) # , nsim = 100000
#
#
# tests2 <- sapply(tests2, function(x){c(statistic = x$statistic[1], "p-value" = x$p[1])})
# pvalues.bonf <- p.adjust(tests2[2,], "bonferroni")
return(list(realTau = r.sim,
#pvalue = tests1$p[1],
pvalues.bonf = pvalues.bonf,
#tests1 = tests1,
tests2 = tests2))
}
# Setup parallel
# cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("power_", b.var, "_grp20-rep50-", nRandCovariate,".RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
clusterExport(cluster, c("r.sim")) # casting the coefficient parameter on the random effects' covariance function
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
pvalues.bonf = x$pvalues.bonf)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(result2.sim, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
/full simulation/12.7.2017/power test/variance0.25/power_0.25_8_grp50_rep20.R
|
no_license
|
wma9/FMRI-project
|
R
| false
| false
| 23,536
|
r
|
# For your information, briefly, the model is that, as below,
# Y_ij = intercept + dummyX_ij * (beta + beta_i) + Z_ij * (b + b_i) + error_ij,
# where 'beta' and 'b' are fixed effects while 'b and 'b_i' are random slope effects.
# We conduct the test that H_0: variance of b_i = 0 v.s. H_a: variance of b_i > 0 through exactRLR.
# In this simulation, we use parallel computing in order to reduce the performing time.
# set the random covariance as different values to do the power test
# Setups
library(parallel)
simRep <- 20000 # Replication times in one simulation
pvalue.true <- .05 # Testing type I error
b.var <- c(0.25) # The set of varaince of random covariates b as random slope
nRandCovariate <- 8
cores <- 10
r.sim <- b.var
# Below is the function defined for each node in parallel
run_one_sample <- function(iter){
library(refund)
library(lme4)
library(nlme)
library(arm)
library(RLRsim)
# library(MASS)
set.seed(iter)
nGroup <- 50 # Group number
nRep.sim <- 20 # Duplication in each group n_i
epsilon.sd <- 1 # or 0.5
intercept.true <- 0.5
# nRandCovariate <- 2
# r.sim <- 0
b.sim <- 2
beta.sim <- 2
betaVar.sim <- 1
z.mean <- 0
totalN <- nGroup * nRep.sim
z.var <- c(1)
ID.sim <- rep(1:nGroup, each = nRep.sim)
error.sim <- rnorm(n = totalN, mean = 0, sd = epsilon.sd)
z.sim <- mapply(rnorm, totalN, z.mean, rep(sqrt(z.var), nRandCovariate))
bV.sim <- mapply(rnorm, nGroup, b.sim, rep(sqrt(r.sim), nRandCovariate))
bV.sim <- bV.sim[rep(1:nrow(bV.sim), each = nRep.sim), ]
bV.sim <- bV.sim * z.sim
bV.sim <- rowSums(bV.sim)
betaV.sim <- mapply(rnorm, nGroup, beta.sim, rep(sqrt(betaVar.sim), 1))
betaV.sim <- betaV.sim[rep(1:nrow(betaV.sim), each = nRep.sim), ]
betaV2.sim <- mapply(rnorm, nGroup, beta.sim, rep(sqrt(betaVar.sim), 1))
betaV2.sim <- betaV2.sim[rep(1:nrow(betaV2.sim), each = nRep.sim), ]
dummyX <- rbinom(n = totalN, size = 1, prob = 0.5) # Shift dummyX
Y.sim <- (intercept.true + bV.sim) + dummyX * betaV.sim + (dummyX - 1) * betaV2.sim + error.sim # NEW add 'dummyX'
ID = ID.sim
Y = Y.sim
npc = nRandCovariate
##12.7.2017
dummyX <- cbind(dummyX, -dummyX + 1)
z.sim.uni = c()
ID.uni <- c(rbind(matrix(1:(nGroup*nRandCovariate),
nrow = nRandCovariate,
ncol = nGroup),
matrix(0, nrow = nRep.sim - nRandCovariate, ncol = nGroup)))
for(k in 1:nGroup){
svd <- svd(z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ] %*% t(z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ])) #SVD on A_i
u.tra <- t(svd$v)
u <- svd$u
d <- (svd$d)[1:nRandCovariate]
# u <- cbind(u, Null(u))
Y[((k-1)*nRep.sim+1):(k*nRep.sim)] <- u.tra %*% Y[((k-1)*nRep.sim+1):(k*nRep.sim)]
dummyX[((k-1)*nRep.sim+1):(k*nRep.sim), ] <- u.tra %*% dummyX[((k-1)*nRep.sim+1):(k*nRep.sim), ]
z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ] <- rbind(u.tra[1:nRandCovariate, ] %*% z.sim[((k-1)*nRep.sim+1):(k*nRep.sim), ],
matrix(0,
nrow = nRep.sim - nRandCovariate,
ncol = nRandCovariate))
z.sim.uni <- c(z.sim.uni, sqrt(d), rep(0, nRep.sim - nRandCovariate))
}
##12.7.2017
designMatrix.pdIdent <- data.frame(rating = Y,
temp.1 = dummyX[, 1],
temp.2 = dummyX[, 2],
ID = as.factor(ID),
ID.uni = as.factor(ID.uni),
a.score = z.sim,
z.sim.uni = z.sim.uni)
# 'lme' model with 'pdIdent'
#fullReml.pdIdent <- NA
#noAScoreReml.pdIdent <- NA
#notempReml.pdIdent <- NA
# if(npc == 1){
# fullReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + dummyX),
# ID = pdIdent(~ 0 + z.sim)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noZReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + dummyX)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noDummyXReml.pdIdent <- lme(fixed = Y ~ 1 + dummyX + z.sim,
# random = list(ID = pdIdent(~ 0 + z.sim)),
# data = designMatrix,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# }else
#if(npc == 2){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 3){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 4){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 + a.score.4,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
# }else if(npc == 5){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 6){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 7){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 8){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 9){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else if(npc == 10){
# fullReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni),
# ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# noAScoreReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID = pdIdent(~ 0 + temp.1 + temp.2)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
# notempReml.pdIdent <- lme(fixed = rating ~ 1 + temp.1 + temp.2 + a.score.1 + a.score.2 + a.score.3 +
# a.score.4 + a.score.5 + a.score.6 + a.score.7 + a.score.8 + a.score.9 + a.score.10,
# random = list(ID.uni = pdIdent(~ 0 + z.sim.uni)),
# data = designMatrix.pdIdent,
# control = lmeControl(msVerbose = TRUE, opt = 'optim', singular.ok=TRUE, returnObject=TRUE))
#
#}else{
# additive0.sim <- paste(1:pca.npc, collapse = " + a.score.")
# modelFix.sim <- as.formula(paste("rating ~ 1 + temp + a.score.",
# additive0.sim,
# sep = ""))
# modelRan.sim <- as.formula(paste("~ 0 + a.score.",
# additive0.sim,
# sep = ""))
# fullReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(~ 0 + temp),
# ID = pdIdent(modelRan.sim)),
# data = designMatrix.pdIdent)
# noAScoreReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(~ 0 + temp)),
# data = designMatrix.pdIdent)
# notempReml.pdIdent <- lme(fixed = modelFix.sim,
# random = list(ID = pdIdent(modelRan.sim)),
# data = designMatrix.pdIdent)
# }
# tests1 <- exactRLRT(notempReml.pdIdent, fullReml.pdIdent, noAScoreReml.pdIdent) # , nsim = 100000
# tests1 <- exactRLRT(notempReml.pdIdent) # , nsim = 100000
# 'lmer' model
designMatrix.lmm <- designMatrix.pdIdent
additive0.sim <- paste(1:npc, collapse = " + a.score.")
additive.sim <- paste(1:npc, collapse = " | ID) + (0 + a.score.")
# Confusion of modifying
model.sim <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.",
additive0.sim,
" + (0 + temp.1 | ID) + (0 + temp.2 | ID) + (0 + z.sim.uni | ID.uni)",
sep = ""))
fullReml <- lmer(model.sim, data = designMatrix.lmm)
# tests2 <- list()
# for(i in 1:npc){
# ii <- paste("a.score.", i, sep = "")
# f0 <- as.formula(paste(" . ~ . - (0 + ", ii, "| ID)"))
# m0 <- update(fullReml, f0)
# f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.", additive0.sim, " + (0 +", ii, " | ID)",
# sep = ""))
# m.slope <- lmer(f.slope, data = designMatrix.lmm)
# tests2[[i]] <- exactRLRT(m.slope, fullReml, m0)
# }
# multiTest1 <- sapply(tests2, function(x) {
# c(statistic = x$statistic[1],
# "p-value" = x$p[1])})
# pvalues.bonf <- p.adjust(multiTest1[2,], "bonferroni")
f.slope <- as.formula(paste("rating ~ 1 + temp.1 + temp.2 + a.score.",
additive0.sim,
" + (0 + z.sim.uni | ID.uni)",
sep = ""))
m.slope <- lmer(f.slope, data = designMatrix.lmm)
f0 <- as.formula(" . ~ . - (0 + z.sim.uni | ID.uni)")
m0 <- update(fullReml, f0)
tests2 <- exactRLRT(m.slope, fullReml, m0)
pvalues.bonf <- tests2$p[1]
# fullReml <- lmer(Y ~ 1 + dummyX + z.sim + (0 + dummyX | ID) + (0 + z.sim | ID), data = designMatrix) ## + (1 | temp : ID)
# m0 <- update(fullReml, . ~ . - (0 + z.sim | ID))
# m.slope <- update(fullReml, . ~ . - (0 + dummyX | ID))
# tests2 <- list()
# tests2[[1]] <- exactRLRT(m.slope, fullReml, m0) # , nsim = 100000
#
#
# tests2 <- sapply(tests2, function(x){c(statistic = x$statistic[1], "p-value" = x$p[1])})
# pvalues.bonf <- p.adjust(tests2[2,], "bonferroni")
return(list(realTau = r.sim,
#pvalue = tests1$p[1],
pvalues.bonf = pvalues.bonf,
#tests1 = tests1,
tests2 = tests2))
}
# Setup parallel
# cores <- detectCores()
cluster <- makeCluster(cores)
clusterSetRNGStream(cluster, 20170822)
# for(nRandCovariate in 1 * 2){ # START out-outer loop
clusterExport(cluster, c("nRandCovariate")) # casting the coefficient parameter on the random effects' covariance function
fileName <- paste("power_", b.var, "_grp20-rep50-", nRandCovariate,".RData", sep = "") # Saving file's name
# run the simulation
loopIndex <- 1
# resultDoubleList.sim <- list()
#power1.sim <- list()
power2.sim <- list()
# for(r.sim in b.var){ # START outer loop
clusterExport(cluster, c("r.sim")) # casting the coefficient parameter on the random effects' covariance function
node_results <- parLapply(cluster, 1:simRep, run_one_sample)
# result1.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
# pvalue = x$pvalue)})
result2.sim <- lapply(node_results, function(x) {list(realTau = x$realTau,
pvalues.bonf = x$pvalues.bonf)})
#resultDoubleList.sim[[loopIndex]] <- node_results
#save.image(file=fileName) # Auto Save
#table1.sim <- sapply(result1.sim, function(x) {
# c(sens = (sum(x$pvalue <= pvalue.true) > 0))})
#Power1 <- mean(table1.sim)
#cat("nRandCovariate: ", nRandCovariate, fill = TRUE)
#cat("Power1: ", Power1, fill = TRUE)
#power1.sim[[loopIndex]] <- list(Power = Power1, realTau = r.sim)
table2.sim <- sapply(result2.sim, function(x) {
c(overall.sens = (sum(x$pvalues.bonf <= pvalue.true) > 0))})
Power2 <- mean(table2.sim)
#cat("Power2: ", Power2, fill = TRUE)
power2.sim[[loopIndex]] <- list(Power = Power2, realTau = r.sim)
# loopIndex <- loopIndex + 1
# } # End outer loop
save.image(file=fileName) # Auto Save
# par(mfrow=c(2,1))
# Histogram plots
# hist(sapply(result1.sim, function(x) x$pvalue),
# main = "Histogram of p-value for lme model",
# xlab = "p-value")
# hist(sapply(result2.sim, function(x) x$pvalues.bonf),
# main = "Histogram of p-value for lmer model",
# xlab = "p-value")
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests1)$statistic[1]),
# breaks = (0:110)/10,
# main = "Histogram of test-statistic for lme model",
# xlab = "Test Statistics")
#
# hist(sapply(resultDoubleList.sim[[1]], function(x) (x$tests2)[1,1]),
# breaks = (0:100)/10,
# main = "Histogram of test-statistic for lmer model",
# xlab = "Test Statistics")
#} # End out-outer loop
stopCluster(cluster)
|
# Exponential-Dispersions-Familien
# Dichtefunktion/ Wahrschienlichkeitsfunktion
# Der Form ->
# f(y) = c(y, phi)*exp((y*Teta-A(Teta))/phi)
# Bsp. Binomialverteilung n fest, p
# Normalverteilung mue, sigma^2
# Exponentialverteilung gamma
# Poinsonverteilung
# X^2-Verteilung
# Schlecht -> Gleichverteilung
# X^2-Verteilung als Spezialfall einer Gammerverteilung
# f(y) = 1/(S^a*T(a))*y^(a-1)*exp(-y/S) | y > 0
# exp(-y/s+(a+1)*log(y)-a*log(s)-log(T(a)))
# Gesucht Teta und Phi
# exp((y*Teta)/phi - A(Teta)/phi) * c(y, phi)
# phi = 1/a
# Teta = 1/(a*S)
# exp((y*(-1/(a*S)))/(1/a) + a*log(S))*y^(a-1)/T(a)
# ...
# (a^a*y^(a-1))/T(a)*exp((y*(-1/(a*S))^(1/a)-log(a*S))/(1/a))
# A(Teta) = log(a*S) = log(-(1/Teta)) = -log(-Teta)
# c(y, phi) = a^a/T(a)*y^(a-1)
# Momente der Gammerverteilung
# mue = EW(y) = A'(Teta) = -(1/-Teta)*(-1) = -1/Teta = a * S
# Var(Y) = phi*A''(Teta) = 1/a+ 1/Teta^2 = 1/a * a^2*s^2 = a* s^2
# Varianzfunktion v(mue) = mue^2
# Var(Y) = phi*mue^2
### Gammerverteilung
# Vorher beschriebenes in R
?rgamma
x = seq(0,10,0.01)
plot(x, dgamma(x, shape=1, scale=2), type="l")
points(x, dgamma(x, shape=3, scale=2), type="l", col = 2)
points(x, dgamma(x, shape=5, scale=2), type="l", col = 3)
# Jetzt kommt der Polinomiale Anteil zum Tragen
# -> verschiebt die Kurve aendert das Maximum -> deswegen shape
# Verteilung ist Schief ergibt sich natuerlich aus der unteren Grenze null
# geometrische Verteilung
# unabhaengige identische Versuche mit Erfolgswahrscheinlichkeit p
# Wartezeit bis zum 1. Erfolg
# Wertebereich: {1,2,3, ...}
# P(Y=k) = (1-p)^(k-1)*p
# P(Y=1) = p
# P(Y=2) = (1-p)*p
# Spezialfall einer negativen Binomialverteilung
# - Variante 1: Anzahl der Versuche bis zum r-ten Erfolg
# - Variante 2: Anzahl der Misserfolgebis zum r-ten Erfolg
# P(Y=k) = (k+r-1) ueber k (1-p)^k*p = (k+r-1)!/(k!(r-1)!)*(1-p)^k*p
# Verallgemeinerung: r nicht notwendig element Natuerlicher Zahlen sondern r elem Reelen Zahlen
# -> P(Y=k) = (T(k+r)/(T(r)*k!))*(1-p)^k*p T ^= Gamma >.>
# r fest, bekannt
# = T(y+r)/(T(r)*y!)*(1-p)^y*p = T(y+r)/(T(r)*y!) exp(y*log(1-p) + r*log(p))
# Teta = log(1-p)
# exp(Teta) = 1-p
# p = 1 - exp(Teta)
# A(Teta) = -r*log(p) = -r*log(1-exp(Teta))
# Erwartungwert:
# mue = EW(Y) = A'(Teta) = -r * (1/(1-exp(Teta)) * (-exp(Teta)) = r * (exp(Teta)/(1 - exp(Teta))))
# umkehrung = log(mue/(mue+r)) = Teta
# mue = r*(1-p)/p
#Zweite Ableitung
# Var = A''(Teta) = r * (exp(Teta)*(1-exp(Teta))-exp(Teta)*(-exp(Teta)))/(1-exp(Teta))^2 = r*exp(Teta)/(1-exp(Teta))^2
# Var(Y) = mue + mue^2/r > mue diskrete Verteilung mit Varianz groesser als bei Poissonverteilung
# neg Binomialverteilung fuer Situationen mit groesserer Variablitaet
# Beispiel Bodenatmung
# y_i = log( Bodenatmung)
# x_i = Temperatur
# bisher:
# EW(y_i) = beta_0 +betat_1*x_i =mue_i < Y_i = N(mue, sigma^2)
# jetzt:
# Y_i ~ Gammerverteilung mit geeingetem Erwartungswert und geeigneter Varianz
# T(S_i, a)
soil.data <- read.csv("soilrespiration1.csv", sep=";")
x <- soil.data$temp
y <-log(soil.data$resp)
b0 = 4.3
b1 = 0.075
sigma = sqrt(0.07)
means = b0+b1*x
y.sim = rnorm(76, mean=means, sd=sigma)
y.simgamma = rgamma(76, shape= 1, scale= means)
plot(x,y, ylim=c(0,20))
points(x,y.simgamma, pch = 16, col = 2)
# wir gucken nur auf den Erwartungswert und ignorieren a
# EW(y_i) = beta_0 +betat_1*x_i = a * s_i
# Var(Y_i) = a*s_i^2
# Varianz der Gammerverteilung stimmt fuer mittleres x_i mit der vorherigen Varianz der Normalverteilung ueberein
# x_i = 17.5
b0+b1*17.5
# mue_i = beta_0 +betat_1*x_i = 5.6125 = a * s_i
# Var(Y_i) = 0.07 = a * S_i^2
# s_i = (a * s_i^2) / (a * s_i) = 0.07/5.6125
# a = 5.6125 / s_i
s_i = 0.07 / 5.6125
a = 5.6125 / s_i
# scale = mue_i/a
y.simgamma = rgamma(76, shape= a, scale= means / a)
points(x,y.simgamma, pch = 16, col = 5)
|
/Uebung9/uebung9.R
|
no_license
|
asver12/Statistische-Verfahren
|
R
| false
| false
| 3,820
|
r
|
# Exponential-Dispersions-Familien
# Dichtefunktion/ Wahrschienlichkeitsfunktion
# Der Form ->
# f(y) = c(y, phi)*exp((y*Teta-A(Teta))/phi)
# Bsp. Binomialverteilung n fest, p
# Normalverteilung mue, sigma^2
# Exponentialverteilung gamma
# Poinsonverteilung
# X^2-Verteilung
# Schlecht -> Gleichverteilung
# X^2-Verteilung als Spezialfall einer Gammerverteilung
# f(y) = 1/(S^a*T(a))*y^(a-1)*exp(-y/S) | y > 0
# exp(-y/s+(a+1)*log(y)-a*log(s)-log(T(a)))
# Gesucht Teta und Phi
# exp((y*Teta)/phi - A(Teta)/phi) * c(y, phi)
# phi = 1/a
# Teta = 1/(a*S)
# exp((y*(-1/(a*S)))/(1/a) + a*log(S))*y^(a-1)/T(a)
# ...
# (a^a*y^(a-1))/T(a)*exp((y*(-1/(a*S))^(1/a)-log(a*S))/(1/a))
# A(Teta) = log(a*S) = log(-(1/Teta)) = -log(-Teta)
# c(y, phi) = a^a/T(a)*y^(a-1)
# Momente der Gammerverteilung
# mue = EW(y) = A'(Teta) = -(1/-Teta)*(-1) = -1/Teta = a * S
# Var(Y) = phi*A''(Teta) = 1/a+ 1/Teta^2 = 1/a * a^2*s^2 = a* s^2
# Varianzfunktion v(mue) = mue^2
# Var(Y) = phi*mue^2
### Gammerverteilung
# Vorher beschriebenes in R
?rgamma
x = seq(0,10,0.01)
plot(x, dgamma(x, shape=1, scale=2), type="l")
points(x, dgamma(x, shape=3, scale=2), type="l", col = 2)
points(x, dgamma(x, shape=5, scale=2), type="l", col = 3)
# Jetzt kommt der Polinomiale Anteil zum Tragen
# -> verschiebt die Kurve aendert das Maximum -> deswegen shape
# Verteilung ist Schief ergibt sich natuerlich aus der unteren Grenze null
# geometrische Verteilung
# unabhaengige identische Versuche mit Erfolgswahrscheinlichkeit p
# Wartezeit bis zum 1. Erfolg
# Wertebereich: {1,2,3, ...}
# P(Y=k) = (1-p)^(k-1)*p
# P(Y=1) = p
# P(Y=2) = (1-p)*p
# Spezialfall einer negativen Binomialverteilung
# - Variante 1: Anzahl der Versuche bis zum r-ten Erfolg
# - Variante 2: Anzahl der Misserfolgebis zum r-ten Erfolg
# P(Y=k) = (k+r-1) ueber k (1-p)^k*p = (k+r-1)!/(k!(r-1)!)*(1-p)^k*p
# Verallgemeinerung: r nicht notwendig element Natuerlicher Zahlen sondern r elem Reelen Zahlen
# -> P(Y=k) = (T(k+r)/(T(r)*k!))*(1-p)^k*p T ^= Gamma >.>
# r fest, bekannt
# = T(y+r)/(T(r)*y!)*(1-p)^y*p = T(y+r)/(T(r)*y!) exp(y*log(1-p) + r*log(p))
# Teta = log(1-p)
# exp(Teta) = 1-p
# p = 1 - exp(Teta)
# A(Teta) = -r*log(p) = -r*log(1-exp(Teta))
# Erwartungwert:
# mue = EW(Y) = A'(Teta) = -r * (1/(1-exp(Teta)) * (-exp(Teta)) = r * (exp(Teta)/(1 - exp(Teta))))
# umkehrung = log(mue/(mue+r)) = Teta
# mue = r*(1-p)/p
#Zweite Ableitung
# Var = A''(Teta) = r * (exp(Teta)*(1-exp(Teta))-exp(Teta)*(-exp(Teta)))/(1-exp(Teta))^2 = r*exp(Teta)/(1-exp(Teta))^2
# Var(Y) = mue + mue^2/r > mue diskrete Verteilung mit Varianz groesser als bei Poissonverteilung
# neg Binomialverteilung fuer Situationen mit groesserer Variablitaet
# Beispiel Bodenatmung
# y_i = log( Bodenatmung)
# x_i = Temperatur
# bisher:
# EW(y_i) = beta_0 +betat_1*x_i =mue_i < Y_i = N(mue, sigma^2)
# jetzt:
# Y_i ~ Gammerverteilung mit geeingetem Erwartungswert und geeigneter Varianz
# T(S_i, a)
soil.data <- read.csv("soilrespiration1.csv", sep=";")
x <- soil.data$temp
y <-log(soil.data$resp)
b0 = 4.3
b1 = 0.075
sigma = sqrt(0.07)
means = b0+b1*x
y.sim = rnorm(76, mean=means, sd=sigma)
y.simgamma = rgamma(76, shape= 1, scale= means)
plot(x,y, ylim=c(0,20))
points(x,y.simgamma, pch = 16, col = 2)
# wir gucken nur auf den Erwartungswert und ignorieren a
# EW(y_i) = beta_0 +betat_1*x_i = a * s_i
# Var(Y_i) = a*s_i^2
# Varianz der Gammerverteilung stimmt fuer mittleres x_i mit der vorherigen Varianz der Normalverteilung ueberein
# x_i = 17.5
b0+b1*17.5
# mue_i = beta_0 +betat_1*x_i = 5.6125 = a * s_i
# Var(Y_i) = 0.07 = a * S_i^2
# s_i = (a * s_i^2) / (a * s_i) = 0.07/5.6125
# a = 5.6125 / s_i
s_i = 0.07 / 5.6125
a = 5.6125 / s_i
# scale = mue_i/a
y.simgamma = rgamma(76, shape= a, scale= means / a)
points(x,y.simgamma, pch = 16, col = 5)
|
# Packages
require(mlr)
require(tweeDEseq)
source('functions/functions.r')
load('../data/filter_genes.RData')
print('Load and match datasets ...')
data = create.data(path.brca = '../data/brca.rds', path.luad = '../data/luad.rds')
target = as.character(data$target)
rnames = colnames(data)
i = intersect(colnames(data), genes)
xdata = data[,i]
# Normalize by TMM approach
data_tmm = normalizeCounts(xdata, method = 'TMM')
xdata = as.data.frame(cbind(data_tmm, target))
xdata$target = as.character(xdata$target)
names(xdata) = make.names(names(xdata))
xdata[] <- lapply(xdata, type.convert, as.is = TRUE)
print('Running Feature Selection. Method: Kruskal Test ...')
# Feature Selection (Filter univariate)
task = makeClassifTask(data = xdata, target = 'target')
# nfeat = 20
nfeat = c(10, 20, 40, 80)
fs.type = 'kruskal.test'
tasks = lapply(nfeat, function(x) filterFeatures(task, method = fs.type, abs = x))
for (i in 1:length(nfeat)) {
tasks[[i]]$task.desc$id = paste(fs.type, ncol(tasks[[i]]$env$data) - 1 , sep = "_")
}
tdata = list()
for (i in 1:length(tasks)) {
tdata[[i]] = tasks[[i]]$env$data
names(tdata)[[i]] = paste(fs.type, nfeat[i], sep = '_')
}
# Machine Learning (Random Forest and Glmnet)
source('functions/machineLearning.r')
bmr=execute.ml(list.data = tdata, win = F) # si se corre en windows hay que poner win = T para la paralelización
# Plot the results
png(filename = '../plots/BMRsummary.png')
plotBMRSummary(bmr)
dev.off()
png(filename = '../plots/BMRboxplot.png')
plotBMRBoxplots(bmr, style = 'violin')
dev.off()
png(filename = '../plots/varImp-glmnet.png')
varImp.glmnet(bmr, n.model = 2)
dev.off()
png(filename = '../plots/varImp-rf.png')
varImp.rf(bmr, n.model = 2)
dev.off()
|
/R/run-MachineLearning-analysis.r
|
no_license
|
jlinaresb/RNAseqML
|
R
| false
| false
| 1,724
|
r
|
# Packages
require(mlr)
require(tweeDEseq)
source('functions/functions.r')
load('../data/filter_genes.RData')
print('Load and match datasets ...')
data = create.data(path.brca = '../data/brca.rds', path.luad = '../data/luad.rds')
target = as.character(data$target)
rnames = colnames(data)
i = intersect(colnames(data), genes)
xdata = data[,i]
# Normalize by TMM approach
data_tmm = normalizeCounts(xdata, method = 'TMM')
xdata = as.data.frame(cbind(data_tmm, target))
xdata$target = as.character(xdata$target)
names(xdata) = make.names(names(xdata))
xdata[] <- lapply(xdata, type.convert, as.is = TRUE)
print('Running Feature Selection. Method: Kruskal Test ...')
# Feature Selection (Filter univariate)
task = makeClassifTask(data = xdata, target = 'target')
# nfeat = 20
nfeat = c(10, 20, 40, 80)
fs.type = 'kruskal.test'
tasks = lapply(nfeat, function(x) filterFeatures(task, method = fs.type, abs = x))
for (i in 1:length(nfeat)) {
tasks[[i]]$task.desc$id = paste(fs.type, ncol(tasks[[i]]$env$data) - 1 , sep = "_")
}
tdata = list()
for (i in 1:length(tasks)) {
tdata[[i]] = tasks[[i]]$env$data
names(tdata)[[i]] = paste(fs.type, nfeat[i], sep = '_')
}
# Machine Learning (Random Forest and Glmnet)
source('functions/machineLearning.r')
bmr=execute.ml(list.data = tdata, win = F) # si se corre en windows hay que poner win = T para la paralelización
# Plot the results
png(filename = '../plots/BMRsummary.png')
plotBMRSummary(bmr)
dev.off()
png(filename = '../plots/BMRboxplot.png')
plotBMRBoxplots(bmr, style = 'violin')
dev.off()
png(filename = '../plots/varImp-glmnet.png')
varImp.glmnet(bmr, n.model = 2)
dev.off()
png(filename = '../plots/varImp-rf.png')
varImp.rf(bmr, n.model = 2)
dev.off()
|
# Script for generating synthetic item response data using the Gaussian
# copula factor and Accelerated Bayesian Additive Regression Trees models
library(bfa.mod)
library(XBART)
library(plyr)
out_of_sample <- FALSE
subpopulation <- FALSE
# paths to item response data for fitting models
IR_data_dir <- "simulated_data"
output_dir <- "output"
if (out_of_sample) {
IR_data_train_path <- file.path(IR_data_dir, "item_response_data_train.csv")
IR_data_test_path <- file.path(IR_data_dir, "item_response_data_test.csv")
} else {
IR_data_path <- file.path(IR_data_dir, "item_response_data_all.csv")
}
# directories for model and data storage
folder <- ifelse(out_of_sample,
file.path(output_dir, "out_of_sample"),
file.path(output_dir, "in_sample"))
folder <- file.path(folder, ifelse(subpopulation, "subpopulation", "all"))
data_dir <- file.path(folder, "synthetic_data")
model_dir <- file.path(folder, "model_fits")
dir.create(data_dir, recursive = TRUE)
dir.create(model_dir, recursive = TRUE)
########################## Hyperparamters ##############################
# Sampling parameters
n_mcmc <- 1000 # number of "sample populations" to draw, D
n_samp <- 1000 # number of data points in each sample population, N
n_prune_samp <- 100
# BFA parameters for fitting f(X)
num_factor <- 3
nburn <- 5000
if (subpopulation){
cond_vars <- list(Age = 15) # conditioning variables for subpopulation
cond_type <- list(">=")
} else {
cond_vars <- NA
cond_type <- NA
}
# XBART.multinomial parameters for fitting f(Y|X)
num_trees <- 30
burnin <- 100
Nmin <- 4
max_depth <- 250
num_cutpoints <- 7
weight <- 1
########################## Data Preparation ##############################
# read in data
if(out_of_sample){
data_train <- read.csv(IR_data_train_path)
data_test <- read.csv(IR_data_test_path)
} else {
data_train <- read.csv(IR_data_path)
data_test <- data_train
}
n_train <- nrow(data_train)
n_test <- nrow(data_test)
n_cond <- ifelse(is.na(cond_vars), 0, length(cond_vars))
p <- ncol(data_train) - 1 - n_cond # num columns in synthetic X matrix
item_demo_cols <- which(colnames(data_train) != "y")
item_cols <- which(!(colnames(data_train) %in% c("y", names(cond_vars))))
########################## Model Fitting ##############################
# check for Gaussian copula factor model & fit if not present
BFA_model_file <- file.path(model_dir, "fit_BFA")
if(file.exists(BFA_model_file)){
cat("Loading BFA model...\n")
load(BFA_model_file)
} else{
cat("Fitting BFA model...\n")
fit_BFA <- bfa_copula(~., data=data_train[,item_demo_cols],
num.factor = num_factor,
factor.scales = FALSE,
keep.scores = FALSE,
nburn = nburn,
nsim = 2*n_mcmc,
loading.prior = "gdp",
imh = FALSE)
save(fit_BFA, file = BFA_model_file, ascii=TRUE)
}
##### fit XBART model #####
# first compute needed params
p_categorical <- length(item_cols)
mtry <- p_categorical + 1
XB_num_sweeps <- 2*n_mcmc + burnin
XB_postburn_idx <- (burnin + 1):XB_num_sweeps
# fit model; XBART crashed with all categorical inputs -> added dummy rnorm column
cat("Fitting XBART model...\n")
fit_XBART <- XBART.multinomial(y = as.matrix(data_train$y),
num_class = 2,
X = as.matrix(cbind(rnorm(n_train), data_train[,item_cols])),
Xtest = as.matrix(cbind(rnorm(n_test), data_test[,item_cols])),
num_trees = num_trees,
num_sweeps = XB_num_sweeps,
max_depth = max_depth,
Nmin = Nmin,
num_cutpoints = num_cutpoints,
alpha = 0.95,
beta = 1.25,
tau_a = 1,
tau_b = 1,
no_split_penality = 1,
burnin = burnin,
mtry = mtry,
p_categorical = p_categorical,
verbose = FALSE,
parallel = FALSE,
random_seed = NULL,
sample_weights_flag = TRUE,
separate_tree = FALSE,
weight = weight)
save(fit_XBART, file = file.path(model_dir, "fit_XBART"), ascii=TRUE)
# Predict on train and test data and save output
XB_pred_train <- colMeans(fit_XBART$yhats_train[XB_postburn_idx,,2])
XB_pred_test <- colMeans(fit_XBART$yhats_test[XB_postburn_idx,,2])
XB_predict <- list(train = XB_pred_train, test = XB_pred_test)
save(XB_predict, file = file.path(data_dir, "XB_predict"), ascii = TRUE)
########################## Sampling ##############################
# Draw posterior samples from the fitted Gaussian copula factor model;
# match up posterior indices to draw probabilities and risk class from the fitted XBART model;
# Also compute posterior mean probability \bar{E}(Y|x) for fitting regression tree
synth_data_treefitting <- array(NA, dim=c(n_mcmc, n_samp, p+4))
synth_data_uncertainty <- array(NA, dim=c(n_mcmc, n_samp, p+4))
prune_data_treefitting <- array(NA, dim=c(n_mcmc, n_prune_samp, p+4))
prune_data_uncertainty <- array(NA, dim=c(n_mcmc, n_prune_samp, p+4))
for (j in 1:(2*n_mcmc)) {
cat(sprintf("============ Predicting for iteration %d out of %d ============\n", j, 2*n_mcmc))
# Draw samples
Xtilde <- predict_idx(fit_BFA, post.idx = j, n.samp = n_samp + n_prune_samp,
cond.vars = cond_vars, cond.type = cond_type)
X_item_cols <- which(colnames(Xtilde) != "Age")
p_XBART_draw <- predict.XBARTmultinomial(fit_XBART,
X=as.matrix(cbind(rnorm(nrow(Xtilde)),
Xtilde[,X_item_cols])),
iteration = as.integer(j+burnin))
p_XBART_draw <- p_XBART_draw$yhats[,,2]
Ytilde_draw <- rbinom(n = length(p_XBART_draw), size=1, prob=p_XBART_draw)
p_XBART_mean_pred <- predict.XBARTmultinomial(fit_XBART,
X=as.matrix(cbind(rnorm(nrow(Xtilde)),
Xtilde[,X_item_cols])))
p_XBART_mean <- colMeans(p_XBART_mean_pred$yhats[XB_postburn_idx,,2])
Ytilde_mean <- rbinom(n = length(p_XBART_mean), size=1, prob=p_XBART_mean)
# Store data
temp <- as.matrix(cbind(Xtilde, p_XBART_mean, Ytilde_mean, p_XBART_draw, Ytilde_draw))
if(j%%2 == 0) {
synth_data_treefitting[j/2,,] <- temp[1:n_samp,]
prune_data_treefitting[j/2,,] <- temp[(n_samp+1):(n_samp + n_prune_samp),]
} else {
synth_data_uncertainty[(j+1)/2,,] <- temp[1:n_samp,]
prune_data_uncertainty[(j+1)/2,,] <- temp[(n_samp+1):(n_samp + n_prune_samp),]
}
}
# Data postprocessing and saving
synth_treefitting_XB <- adply(synth_data_treefitting, .margins = 1, .id="post.idx")
synth_uncertainty_XB <- adply(synth_data_uncertainty, .margins = 1, .id="post.idx")
prune_treefitting_XB <- adply(prune_data_treefitting, .margins = 1, .id="post.idx")
prune_uncertainty_XB <- adply(prune_data_uncertainty, .margins = 1, .id="post.idx")
Xtilde_cols <- which(!(colnames(data_train) %in% c("y", names(cond_vars))))
Xtilde_colnames <- colnames(data_train)[Xtilde_cols]
colnames(synth_treefitting_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(synth_uncertainty_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(prune_treefitting_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(prune_uncertainty_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
write.csv(synth_treefitting_XB, file.path(data_dir, "synth_treefitting_XB.csv"), row.names = FALSE)
write.csv(synth_uncertainty_XB, file.path(data_dir, "synth_uncertainty_XB.csv"), row.names = FALSE)
write.csv(prune_treefitting_XB, file.path(data_dir, "prune_treefitting_XB.csv"), row.names = FALSE)
write.csv(prune_uncertainty_XB, file.path(data_dir, "prune_uncertainty_XB.csv"), row.names = FALSE)
|
/code/2-draw_synth_XB.R
|
no_license
|
chelsea-k/adaptive-tests
|
R
| false
| false
| 8,457
|
r
|
# Script for generating synthetic item response data using the Gaussian
# copula factor and Accelerated Bayesian Additive Regression Trees models
library(bfa.mod)
library(XBART)
library(plyr)
out_of_sample <- FALSE
subpopulation <- FALSE
# paths to item response data for fitting models
IR_data_dir <- "simulated_data"
output_dir <- "output"
if (out_of_sample) {
IR_data_train_path <- file.path(IR_data_dir, "item_response_data_train.csv")
IR_data_test_path <- file.path(IR_data_dir, "item_response_data_test.csv")
} else {
IR_data_path <- file.path(IR_data_dir, "item_response_data_all.csv")
}
# directories for model and data storage
folder <- ifelse(out_of_sample,
file.path(output_dir, "out_of_sample"),
file.path(output_dir, "in_sample"))
folder <- file.path(folder, ifelse(subpopulation, "subpopulation", "all"))
data_dir <- file.path(folder, "synthetic_data")
model_dir <- file.path(folder, "model_fits")
dir.create(data_dir, recursive = TRUE)
dir.create(model_dir, recursive = TRUE)
########################## Hyperparamters ##############################
# Sampling parameters
n_mcmc <- 1000 # number of "sample populations" to draw, D
n_samp <- 1000 # number of data points in each sample population, N
n_prune_samp <- 100
# BFA parameters for fitting f(X)
num_factor <- 3
nburn <- 5000
if (subpopulation){
cond_vars <- list(Age = 15) # conditioning variables for subpopulation
cond_type <- list(">=")
} else {
cond_vars <- NA
cond_type <- NA
}
# XBART.multinomial parameters for fitting f(Y|X)
num_trees <- 30
burnin <- 100
Nmin <- 4
max_depth <- 250
num_cutpoints <- 7
weight <- 1
########################## Data Preparation ##############################
# read in data
if(out_of_sample){
data_train <- read.csv(IR_data_train_path)
data_test <- read.csv(IR_data_test_path)
} else {
data_train <- read.csv(IR_data_path)
data_test <- data_train
}
n_train <- nrow(data_train)
n_test <- nrow(data_test)
n_cond <- ifelse(is.na(cond_vars), 0, length(cond_vars))
p <- ncol(data_train) - 1 - n_cond # num columns in synthetic X matrix
item_demo_cols <- which(colnames(data_train) != "y")
item_cols <- which(!(colnames(data_train) %in% c("y", names(cond_vars))))
########################## Model Fitting ##############################
# check for Gaussian copula factor model & fit if not present
BFA_model_file <- file.path(model_dir, "fit_BFA")
if(file.exists(BFA_model_file)){
cat("Loading BFA model...\n")
load(BFA_model_file)
} else{
cat("Fitting BFA model...\n")
fit_BFA <- bfa_copula(~., data=data_train[,item_demo_cols],
num.factor = num_factor,
factor.scales = FALSE,
keep.scores = FALSE,
nburn = nburn,
nsim = 2*n_mcmc,
loading.prior = "gdp",
imh = FALSE)
save(fit_BFA, file = BFA_model_file, ascii=TRUE)
}
##### fit XBART model #####
# first compute needed params
p_categorical <- length(item_cols)
mtry <- p_categorical + 1
XB_num_sweeps <- 2*n_mcmc + burnin
XB_postburn_idx <- (burnin + 1):XB_num_sweeps
# fit model; XBART crashed with all categorical inputs -> added dummy rnorm column
cat("Fitting XBART model...\n")
fit_XBART <- XBART.multinomial(y = as.matrix(data_train$y),
num_class = 2,
X = as.matrix(cbind(rnorm(n_train), data_train[,item_cols])),
Xtest = as.matrix(cbind(rnorm(n_test), data_test[,item_cols])),
num_trees = num_trees,
num_sweeps = XB_num_sweeps,
max_depth = max_depth,
Nmin = Nmin,
num_cutpoints = num_cutpoints,
alpha = 0.95,
beta = 1.25,
tau_a = 1,
tau_b = 1,
no_split_penality = 1,
burnin = burnin,
mtry = mtry,
p_categorical = p_categorical,
verbose = FALSE,
parallel = FALSE,
random_seed = NULL,
sample_weights_flag = TRUE,
separate_tree = FALSE,
weight = weight)
save(fit_XBART, file = file.path(model_dir, "fit_XBART"), ascii=TRUE)
# Predict on train and test data and save output
XB_pred_train <- colMeans(fit_XBART$yhats_train[XB_postburn_idx,,2])
XB_pred_test <- colMeans(fit_XBART$yhats_test[XB_postburn_idx,,2])
XB_predict <- list(train = XB_pred_train, test = XB_pred_test)
save(XB_predict, file = file.path(data_dir, "XB_predict"), ascii = TRUE)
########################## Sampling ##############################
# Draw posterior samples from the fitted Gaussian copula factor model;
# match up posterior indices to draw probabilities and risk class from the fitted XBART model;
# Also compute posterior mean probability \bar{E}(Y|x) for fitting regression tree
synth_data_treefitting <- array(NA, dim=c(n_mcmc, n_samp, p+4))
synth_data_uncertainty <- array(NA, dim=c(n_mcmc, n_samp, p+4))
prune_data_treefitting <- array(NA, dim=c(n_mcmc, n_prune_samp, p+4))
prune_data_uncertainty <- array(NA, dim=c(n_mcmc, n_prune_samp, p+4))
for (j in 1:(2*n_mcmc)) {
cat(sprintf("============ Predicting for iteration %d out of %d ============\n", j, 2*n_mcmc))
# Draw samples
Xtilde <- predict_idx(fit_BFA, post.idx = j, n.samp = n_samp + n_prune_samp,
cond.vars = cond_vars, cond.type = cond_type)
X_item_cols <- which(colnames(Xtilde) != "Age")
p_XBART_draw <- predict.XBARTmultinomial(fit_XBART,
X=as.matrix(cbind(rnorm(nrow(Xtilde)),
Xtilde[,X_item_cols])),
iteration = as.integer(j+burnin))
p_XBART_draw <- p_XBART_draw$yhats[,,2]
Ytilde_draw <- rbinom(n = length(p_XBART_draw), size=1, prob=p_XBART_draw)
p_XBART_mean_pred <- predict.XBARTmultinomial(fit_XBART,
X=as.matrix(cbind(rnorm(nrow(Xtilde)),
Xtilde[,X_item_cols])))
p_XBART_mean <- colMeans(p_XBART_mean_pred$yhats[XB_postburn_idx,,2])
Ytilde_mean <- rbinom(n = length(p_XBART_mean), size=1, prob=p_XBART_mean)
# Store data
temp <- as.matrix(cbind(Xtilde, p_XBART_mean, Ytilde_mean, p_XBART_draw, Ytilde_draw))
if(j%%2 == 0) {
synth_data_treefitting[j/2,,] <- temp[1:n_samp,]
prune_data_treefitting[j/2,,] <- temp[(n_samp+1):(n_samp + n_prune_samp),]
} else {
synth_data_uncertainty[(j+1)/2,,] <- temp[1:n_samp,]
prune_data_uncertainty[(j+1)/2,,] <- temp[(n_samp+1):(n_samp + n_prune_samp),]
}
}
# Data postprocessing and saving
synth_treefitting_XB <- adply(synth_data_treefitting, .margins = 1, .id="post.idx")
synth_uncertainty_XB <- adply(synth_data_uncertainty, .margins = 1, .id="post.idx")
prune_treefitting_XB <- adply(prune_data_treefitting, .margins = 1, .id="post.idx")
prune_uncertainty_XB <- adply(prune_data_uncertainty, .margins = 1, .id="post.idx")
Xtilde_cols <- which(!(colnames(data_train) %in% c("y", names(cond_vars))))
Xtilde_colnames <- colnames(data_train)[Xtilde_cols]
colnames(synth_treefitting_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(synth_uncertainty_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(prune_treefitting_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
colnames(prune_uncertainty_XB) <- c('post.idx', Xtilde_colnames, 'phat.mean', 'y.mean', 'phat.draw', 'y.draw')
write.csv(synth_treefitting_XB, file.path(data_dir, "synth_treefitting_XB.csv"), row.names = FALSE)
write.csv(synth_uncertainty_XB, file.path(data_dir, "synth_uncertainty_XB.csv"), row.names = FALSE)
write.csv(prune_treefitting_XB, file.path(data_dir, "prune_treefitting_XB.csv"), row.names = FALSE)
write.csv(prune_uncertainty_XB, file.path(data_dir, "prune_uncertainty_XB.csv"), row.names = FALSE)
|
url = "http://www.randomservices.org/random/data/Challenger2.txt"
oring=read.table(url,header=T)
attach(oring)
# attach command is to lable the data
#
plot(T,I)
oring.lm = lm (I ~T)
# if you would like to see the regression details, you could use the summary function
summary(oring.lm)
# add fitted line to scatterplot
lines(T,fitted(oring.lm))
#From summary data, we could see that the slope value is Estimate -0.24337
# Std.Error is 0.06349
# 95% posterior interval for the slope
-0.24337 - 0.06349*qt(.975,21)
-0.24337 + 0.06349*qt(.975,21)
# note that these are the same as the frequentist confidence intervals
# the Challenger launch was at 31 degrees Fahrenheit
# how much o-ring damage would we predict?
# y-hat
18.36508-0.24337*31
coef(oring.lm)
# use the coefficience to calculate a certain value
coef(oring.lm)[1] + coef(oring.lm)[2]*31
# posterior prediction interval (same as frequentist)
# fit lwr upr
#1 10.82052 4.048269 17.59276
# we got the fitted values and the 95% intervals with upper and lower values.
predict(oring.lm,data.frame(T=31),interval="predict")
# Varify this by hand
10.82052-2.102*qt(.975,21)*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))
# posterior probability that damage index is greater than zero
1-pt((0-10.82052)/(2.102*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))),21)
# Galton's seminal data on predicting the height of children from the heights of the parents, all in inches
url1 = "http://www.randomservices.org/random/data/Galton.txt"
heights=read.table(url1,header=T)
attach(heights)
names(heights)
pairs(heights)
summary(lm(Height~Father+Mother+Gender+Kids))
# From the results, we saw the Estimate value of Kids is -0.04382, this value is near the Std.Error in size, and contains zero.
# Therefore, we came to the conclusion that we could regression without the kids as a factor.
summary(lm(Height~Father+Mother+Gender))
# Now all the variables have strong effects
heights.lm=lm(Height~Father+Mother+Gender)
# each extra inch taller a father is is correlated with 0.4 inch extra height in the child
# each extra inch taller a mother is is correlated with 0.3 inch extra height in the child
# a male child is on average 5.2 inches taller than a female child
# 95% posterior interval for the the difference in height by gender
5.226 - 0.144*qt(.975,894)
5.226 + 0.144*qt(.975,894)
# posterior prediction interval (same as frequentist)
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="M"),interval="predict")
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="F"),interval="predict")
|
/LinearRegression.R
|
no_license
|
SaladeDeFruits/Bayesian_Statistics_From_Concept_to_Data_Analysis
|
R
| false
| false
| 2,695
|
r
|
url = "http://www.randomservices.org/random/data/Challenger2.txt"
oring=read.table(url,header=T)
attach(oring)
# attach command is to lable the data
#
plot(T,I)
oring.lm = lm (I ~T)
# if you would like to see the regression details, you could use the summary function
summary(oring.lm)
# add fitted line to scatterplot
lines(T,fitted(oring.lm))
#From summary data, we could see that the slope value is Estimate -0.24337
# Std.Error is 0.06349
# 95% posterior interval for the slope
-0.24337 - 0.06349*qt(.975,21)
-0.24337 + 0.06349*qt(.975,21)
# note that these are the same as the frequentist confidence intervals
# the Challenger launch was at 31 degrees Fahrenheit
# how much o-ring damage would we predict?
# y-hat
18.36508-0.24337*31
coef(oring.lm)
# use the coefficience to calculate a certain value
coef(oring.lm)[1] + coef(oring.lm)[2]*31
# posterior prediction interval (same as frequentist)
# fit lwr upr
#1 10.82052 4.048269 17.59276
# we got the fitted values and the 95% intervals with upper and lower values.
predict(oring.lm,data.frame(T=31),interval="predict")
# Varify this by hand
10.82052-2.102*qt(.975,21)*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))
# posterior probability that damage index is greater than zero
1-pt((0-10.82052)/(2.102*sqrt(1+1/23+((31-mean(T))^2/22/var(T)))),21)
# Galton's seminal data on predicting the height of children from the heights of the parents, all in inches
url1 = "http://www.randomservices.org/random/data/Galton.txt"
heights=read.table(url1,header=T)
attach(heights)
names(heights)
pairs(heights)
summary(lm(Height~Father+Mother+Gender+Kids))
# From the results, we saw the Estimate value of Kids is -0.04382, this value is near the Std.Error in size, and contains zero.
# Therefore, we came to the conclusion that we could regression without the kids as a factor.
summary(lm(Height~Father+Mother+Gender))
# Now all the variables have strong effects
heights.lm=lm(Height~Father+Mother+Gender)
# each extra inch taller a father is is correlated with 0.4 inch extra height in the child
# each extra inch taller a mother is is correlated with 0.3 inch extra height in the child
# a male child is on average 5.2 inches taller than a female child
# 95% posterior interval for the the difference in height by gender
5.226 - 0.144*qt(.975,894)
5.226 + 0.144*qt(.975,894)
# posterior prediction interval (same as frequentist)
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="M"),interval="predict")
predict(heights.lm,data.frame(Father=68,Mother=64,Gender="F"),interval="predict")
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ddo_addTransform.R
\name{applyTransform}
\alias{applyTransform}
\title{Applies the transformation function(s)}
\usage{
applyTransform(transFns, x, env = NULL)
}
\arguments{
\item{transFns}{from the "transforms" attribute of a ddo object}
\item{x}{a subset of the object}
\item{env}{the environment in which to evaluate the function (should be instantiated from calling \code{\link{setupTransformEnv}}) - if \code{NULL}, the environment will be set up for you}
}
\description{
This is called internally in the map phase of datadr MapReduce jobs. It is not meant for use outside of there, but is exported for convenience.
}
|
/man/applyTransform.Rd
|
permissive
|
jrounds/datadr
|
R
| false
| false
| 712
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ddo_addTransform.R
\name{applyTransform}
\alias{applyTransform}
\title{Applies the transformation function(s)}
\usage{
applyTransform(transFns, x, env = NULL)
}
\arguments{
\item{transFns}{from the "transforms" attribute of a ddo object}
\item{x}{a subset of the object}
\item{env}{the environment in which to evaluate the function (should be instantiated from calling \code{\link{setupTransformEnv}}) - if \code{NULL}, the environment will be set up for you}
}
\description{
This is called internally in the map phase of datadr MapReduce jobs. It is not meant for use outside of there, but is exported for convenience.
}
|
# Multiple Testing --------------------------------------------------------
|
/Chapter_13.R
|
no_license
|
jSoboil/Introduction-to-Statistical-Learning
|
R
| false
| false
| 76
|
r
|
# Multiple Testing --------------------------------------------------------
|
context("Test to make sure lat checks and long checks are ok")
test_that("lat and long checks work correctly", {
expect_false(test_lat(-1900))
expect_true(test_lat(-89))
expect_false(test_long(-1800))
expect_true(test_long(-89))
})
context("Testing the aggregate function")
test_that("aggregates work correctly", {
dat <- structure(list(departement = structure(c(5L, 47L, 18L, 9L, 14L,
9L), .Label = c("AIN", "AISNE", "ALLIER", "ARDENNES", "AUBE",
"BAS-RHIN", "C\xd4TE-D'OR", "CALVADOS", "CHARENTE", "CHER", "CREUSE",
"DEUX-S\xc8VRES", "DORDOGNE", "EURE", "EURE-ET-LOIR", "GIRONDE",
"HAUT-RHIN", "HAUTE-MARNE", "HAUTE-SA\xd4NE", "HAUTE-VIENNE",
"INDRE", "INDRE-ET-LOIRE", "JURA", "LOIR-ET-CHER", "LOIRET",
"MAINE-ET-LOIRE", "MANCHE", "MARNE", "MAYENNE", "MEURTHE-ET-MOSELLE",
"MEUSE", "MOSELLE", "NI\xc8VRE", "NORD", "OISE", "ORNE", "PAS-DE-CALAIS",
"PUY-DE-D\xd4ME", "PYR\xc9N\xc9ES-ATLANTIQUES", "RH\xd4NE", "SA\xd4NE-ET-LOIRE",
"SARTHE", "SAVOIE", "SEINE-ET-MARNE", "SEINE-MARITIME", "SOMME",
"VIENNE", "VOSGES", "YONNE"), class = "factor"), commune = structure(c(992L,
522L, 810L, 740L, 116L, 756L), .Label = c("\xc9pen\xe8de", "\xc9perrais",
"\xc9poisses", "\xc9riseul", "\xc9tagnac", "\xc9talleville",
"\xc9tr\xe9aupont", "\xc9treillers", "\xc9troeungt", "abilly",
"Abzac", "ABZAC", "ACY", "Adriers", "ADRIERS", "AFFLEVILLE",
"Agincourt", "Ailly-le-Haut-Clocher", "Aisey-et-Richecourt",
"Aixe-sur-Vienne", "Aizy Jouy", "Alincthun", "ALLICHAMPS", "ALLOUE",
"Allouville-Bellefosse", "Ambernac", "Andilly-en-Bassigny", "Andryes",
"Angely", "ANOST", "Anrosey", "Ansac-sur-Vienne", "ANSAC-SUR-VIENNE",
"ANSAUVILLERS", "Anv\xe9ville", "Archigny", "ARGILLIERES", "ARGY",
"ARMES", "Arnac-la-Poste", "Arrest", "Artannes-sur-Indre", "Asni\xe8res-sur-Blour",
"ASNIERES-SUR-BLOUR", "ASPACH", "Attin", "Atton", "Auberville-la-Renault",
"AUCHY-LEZ-ORCHIES", "Audincthun", "AUDINGHEN", "Audruicq", "AULT",
"Aunay-sur-Odon", "Authe", "Authieule", "Autigny-la-Tour", "Autrey",
"AUTREY", "AUXAIS", "Availles-Limouzine", "AVAILLES-LIMOUZINE",
"Avelin", "Avermes", "Avesnelles", "AVESNES-LE-SEC", "Avrecourt",
"AVROULT", "Awoingt", "AYDOILLES", "AZAT-LE-RIS", "Azay-le-Ferron",
"AZAY-LE-FERRON", "AZAY-LE-RIDEAU", "Azoudange", "AZY-LE-VIF",
"B\xe9court", "B\xe9nouville", "B\xe9thines", "Bacourt", "Bacqueville-en-Caux",
"Bagneux", "Bailleul", "BAINVILLE-SUR-MADON", "BALLAY", "Balledent",
"BALLEDENT", "Baons-le-Comte", "BARBAS", "Basse-sur-le-Rupt",
"Bauzemont", "BAVINCOURT", "Bay-sur Aube", "BAZELAT", "BEAULIEU",
"Beauquesne", "Beaurepaire", "Beaurepaire-en-Bresse", "Beauvilliers",
"BEGROLLES-EN-MAUGES", "Bellac", "Belrupt", "Belval-sous-Ch\xe2tillon",
"BERCK", "BEREZIAT", "Berneuil", "Bernouville", "BERTHEN", "BERUGES",
"BESSINES-SUR-GARTEMPE", "BETHINES", "Bettoncourt-le-Haut", "Beurizot",
"Beurville", "Beux", "Beuzeville", "Beuzeville-la-Grenier", "Bezaumont",
"Biarre", "Billy-sous-les-C\xf4tes", "BISLEE", "Bissey-sous-Cruchaud",
"Biville-la-Baignarde", "BIZIAT", "BLANCHEFOSSE-ET-BAY", "Blanzac",
"BLANZAC", "BLARINGHEM", "Blessonville", "Blicourt", "Blond",
"Bolbec", "Bolleville", "Bonneuil", "Bonny-sur-Loire", "Bossancourt",
"BOUESSE", "Bougey", "BOULAY-MOSELLE", "Bourbonne-les-Bains",
"Bouresse", "BOURESSE", "Bourg-Archambault", "BOURG-ARCHAMBAULT",
"Boz", "Br\xe9aut\xe9", "Br\xe9m\xe9nil", "Br\xe9moncourt", "Braux",
"BREVES", "Briantes", "Bricquebec", "BRICQUEBEC", "Brigueil-le-Chantre",
"BRIGUEIL-LE-CHANTRE", "Brigueuil", "BRIGUEUIL", "Brillac", "BRILLAC",
"BRIXEY-AUX-CHANOINES", "Brulange", "Brunehamel", "Brutelles",
"Bucamps", "Buffign\xe9court", "Buhl-Lorraine", "BURTHECOURT-AUX-CHENES",
"Bussi\xe8re-Poitevine", "BUSSIERE-BOFFY", "BUSSIERE-POITEVINE",
"Buxi\xe8res-d'Aillac", "BUXIERES-LES-MINES", "C\xe9lon", "CABANAC-ET-VILLAGRAINS",
"Cambremer", "Cannectancourt", "Canville-les-Deux-\xc9glises",
"Carignan", "CARQUEBUT", "CATIGNY", "Catillon-sur-Sambre", "Caunay",
"Cavron-Saint-Martin", "CERE-LA-RONDE", "CERENCES", "CERISY-LA-SALLE",
"Ch\xe2teau-Garnier", "ch\xe2teau-salins", "Ch\xe2teauponsac",
"Ch\xe2tenay-Vaudin", "Ch\xe2tenois", "Ch\xe9zeaux", "Ch\xe9zy-en-Orxois",
"Chablis", "CHABRIS", "Chaillac", "CHAILLAC", "CHAILLAC-SUR-VIENNE",
"Chailly-sur-Arman\xe7on", "CHALAIS", "Chalindrey", "CHALLERANGE",
"CHAMARANDES-CHOIGNES", "Chamboret", "Chambrey", "CHAMPAGNE-SAINT-HILAIRE",
"Champigny-sous-Varennes", "CHAMPS", "CHAMPSEVRAINE", "Chantelle",
"Chaouilley", "Charbuy", "CHARLEVILLE-SOUS-BOIS", "Charmes-la-Grande",
"CHARMES-LA-GRANDE", "charroux", "Chassey-l\xe8s-Montbozon",
"CHATEAU-CHINON(CAMPAGNE)", "CHATEAU-GARNIER", "CHATEAUPONSAC",
"Chaudenay", "CHAUMONT-LA-VILLE", "Chaumont-Porcien", "Chauvigny",
"CHAVEYRIAT", "Chazelet", "CHAZELLES", "CHEHERY", "Chemery",
"CHEMERY", "Chemin-d'Aisey", "CHENIERS", "Cherbourg", "CHERVES-CHATELARS",
"Chevannes", "CHEVERNY", "Chevillon", "CHEVROCHES", "Chicourt",
"CHIRAC", "Cieux", "Ciron", "Civrieux", "Cizay-la-Madeleine",
"Cl\xe9rey-sur-Br\xe9non", "Cl\xe9zentaine", "CLAIRFONTAINE",
"Clamecy", "CLAMECY", "CLERE-DU-BOIS", "Clinchamps-sur-Orne",
"CLION", "Coiffy-le-Haut", "COINGT", "Colombey-les-Belles", "COLOMBEY-LES-DEUX-EGLISES",
"COLONDANNES", "Combres-sous-les-C\xf4tes", "COMBREUX", "Cond\xe9-Northen",
"CONDE-NORTHEN", "CONFOLENS", "Confran\xe7on", "CONGE-SUR-ORNE",
"Conthil", "Corpoyer-la-Chapelle", "COSSAYE", "Cossesseville",
"Coulandon", "COULLEMONT", "Coulmier-le-Sec", "Coulonges", "COULONGES",
"Coulonges-sur-Sarthe", "Coupelle-Vieille", "COURCELLES-LES-SEMUR",
"COURGEON", "COURTESOULT-ET-GATEY", "Coussay-les-Bois", "COUVERTPUIS",
"Coyviller", "Cr\xe9quy", "CRECY-SUR-SERRE", "CREQUY", "CRIEL-SUR-MER",
"Crion", "CROISY", "CROMAC", "CRUGNY", "Crupilly", "CULAN", "Cussay",
"Cuves", "D\xe9deling", "Dammartin-sur-Meuse", "Darnac", "DARNAC",
"Delettes", "DELETTES", "Dercy", "Desvres", "DIEVAL", "Dinsac",
"DOMBROT-SUR-VAIR", "Domecy-sur-Cure", "Domgermain", "Domjulien",
"DOMMARTIN-DAMPIERRE", "Dompierre-les-\xc9glises", "Dompierre-sur-Authie",
"Domptail", "Donzy", "Doudeauville", "Douvrend", "DOVILLE", "Dracy",
"Dracy-Saint-Loup", "Drambon", "Droisy", "Droux", "Dunet", "ECUEILLE",
"ECUIRES", "ECULLEVILLE", "Emberm\xe9nil", "Empur\xe9", "Englesqueville-en-Auge",
"Envronville", "EPAIGNES", "epinac", "EPINAL", "Escles", "Escombres-et-le-Chesnois",
"Esley", "Esse", "ESSE", "Euilly-et-Lombut", "EXIDEUIL", "Eywiller",
"FAULQUEMONT", "Faulx", "FAULX", "Faveraye-Machelles", "FAYE-L'ABBESSE",
"FAYL-BILLOT", "FEYTIAT", "Fill\xe9", "FLAGEY", "Fleury-la-For\xeat",
"FLIGNY", "Flor\xe9mont", "Fluqui\xe8res", "FOAMEIX-ORNEL", "Fontaine-au-Bois",
"Fontaine-l\xe8s-Vervins", "Fontaine-le-Dun", "FONTAINE-LES-VERVINS",
"Fontaines", "Fontangy", "FONTENELLE", "formigny", "Fossieux",
"Foucart", "Foulcrey", "Fouligny", "Fourcigny", "FOURS", "Fr\xe9monville",
"Fr\xe9vent", "Fr\xf4lois", "Fraignot-et-Vesvrotte", "Framicourt",
"Fraquelfing", "Freb\xe9court", "Frenelle-la-Grande", "Freneuse-sur-Risle",
"Fresnes-en-Saulnois", "Fresnes-en-Tardenois", "FRESNOY-FOLNY",
"Fromy", "FRONTENARD", "G\xe9lucourt", "GAJOUBERT", "GARGILESSE-DAMPIERRE",
"Gemmelaincourt", "GEMONVILLE", "Genouillac", "Gerb\xe9viller",
"Gergny", "GERMINY", "Gevrolles", "Ghyvelde", "GIEVILLE", "ginai",
"Giraumont", "Givron", "Gizay", "Glanges", "GLOS-LA-FERRIERE",
"GODERVILLE", "GOLLEVILLE", "GONCOURT", "Gonneville", "GONNEVILLE-EN-AUGE",
"Gouberville", "Gouex", "GOUEX", "Gournay", "GOURNAY-EN-BRAY",
"Goviller", "Grand", "Grand Rullecourt", "GRAND-CAMP", "Grandcourt",
"Greuville", "GREZ", "Grez-en-Bou\xe8re", "Grivy-Loisy", "GROSSOUVRE",
"Grosville", "Gu\xe9bling", "GUILLON", "Guilly", "Guincourt",
"GUIPY", "H\xe9nam\xe9nil", "Haboudange", "Haillainville", "Hambye",
"Hammeville", "HANNACHES", "HARGICOURT", "Hary", "HAUT-LIEU",
"HAUTE-AMANCE", "Haute-Kontz", "HAUTEFONTAINE", "Hauteroche",
"HAUTTEVILLE-BOCAGE", "HAZEBROUCK", "Hennezel", "Hermanville",
"Heugleville-sur-Scie", "HEUGNES", "Honnechy", "Honskirch", "HORNAING",
"HORNOY-LE-BOURG", "Hou\xe9ville", "Houdelaincourt", "HOUECOURT",
"Hugleville-en-Caux", "Hurecourt", "Ibigny", "ILLKIRCH-GRAFFENSTADEN",
"Imbleville", "Incourt", "INCOURT", "Insming", "Is-en-Bassigny",
"ISENAY", "Jaulges", "Jeantes", "JEANTES", "JEU-LES-BOIS", "Jouac",
"Jouhet", "JOUHET", "JOURNET", "JOUSSE", "JUGY", "JUVIGNIES",
"KIRRWILLER-BOSSELSHAUSEN", "L'\xc9tang-Bertrand", "L'Isle-Jourdain",
"La Bazeuge", "LA BERTHENOUX", "LA BESACE", "La Blouti\xe8re",
"La Celle-Gu\xe9nand", "LA CELLE-SUR-LOIRE", "La Cerlangue",
"La Chapelle-Bertrand", "La Chapelle-Montreuil", "LA CHAPELLE-MONTREUIL",
"La Chapelle-Th\xe8cle", "LA COLOMBE", "LA COMTE", "LA COURBE",
"La Croix-sur-Gartempe", "LA CROIX-SUR-GARTEMPE", "LA FEREE",
"LA FERMETE", "La Ferri\xe8re-en-Parthenay", "La Fert\xe9-Loupi\xe8re",
"La Gaillarde", "La Godefroy", "La Haye-du-Puits", "La Loge",
"LA MANCELIERE", "La Mothe-Saint-H\xe9ray", "La P\xe9rouille",
"LA PERCHE", "LA PERNELLE", "LA PUYE", "La Roche-Chalais", "La Roche-en-Brenil",
"La Roche-Posay", "LA ROCHE-VANNEAU", "La Souterraine", "LA SUZE-SUR-SARTHE",
"La Trimouille", "LA TRIMOUILLE", "La Vineuse", "Lagarde", "Laitre-sous-Amance",
"Lamarche", "Landange", "LANDOUZY-LA-VILLE", "LANGRUNE-SUR-MER",
"LANTAGES", "LATHUS-SAINT-REMY", "Launstroff", "Lavall\xe9e",
"LAVAUSSEAU", "Le B\xe9ny-Bocage", "Le Blanc", "LE BLANC", "Le Bourg-d'Hem",
"LE CATEAU-CAMBRESIS", "LE CHATELET", "LE DORAT", "Le Doulieu",
"Le F\xeate", "LE LOUROUX", "LE MAY-SUR-EVRE", "LE MENIL-GUYON",
"Le Mesnil-Eudes", "Le Mesnil-Raoult", "LE MESNIL-ROGUES", "Le Mesnilbus",
"Le Nouvion-en-Thi\xe9rache", "LE PECHEREAU", "LE PIN", "LE PLESSIER-SUR-BULLES",
"Le Reculey", "LE SOUICH", "Le Torpt", "Le Val-Saint-P\xe8re",
"Le Vigeant", "LE VIGEANT", "Le Vr\xe9tot", "Leignes-sur-Fontaine",
"Leintrey", "Lengronne", "LENT", "Les Forges", "Les Grands Ch\xe9zeaux",
"LES LOGES", "Les Thons", "LES TROIS-PIERRES", "LESMENILS", "Lessac",
"Lesterps", "LEULINGHEM", "Levoncourt", "Levroux", "Liernais",
"LIESVILLE-SUR-DOUVE", "LIFFOL-LE-GRAND", "Liglet", "LIGLET",
"Lignac", "LIGNAC", "LIGNEREUIL", "LIGNIERES-CHATELAIN", "Lignol-le-Ch\xe2teau",
"LIMANTON", "LIMOGES", "Limonest", "Linselles", "Lironville",
"LIVERDUN", "Lochwiller", "Loison", "Long", "Longeville-en-Barrois",
"Longeville-sur-la-Laines", "Longpr\xe9-les-Corps-Saints", "Longvillers",
"LORMES", "Lottinghen", "Louvencourt", "Louvi\xe8res", "Lu\xe7ay-le-M\xe2le",
"LUANT", "LUCE-SOUS-BALLON", "Luchapt", "Lussac", "LUSSAC", "Lussac-les-\xc9glises",
"Lussac-les-Ch\xe2teaux", "LUSSAC-LES-EGLISES", "Luttange", "LUZERET",
"LYE", "M\xe9nil-en-Xaintois", "M\xe9nil-sur-Belvitte", "M\xe9obecq",
"M\xe9ry-la-Bataille", "M\xe9zi\xe8res-sur-Issoire", "Magnac-Laval",
"MAGNAC-LAVAL", "Magneville", "Magny", "Magny-le-Freule", "Mailhac-sur-Benaize",
"MAILHAC-SUR-BENAIZE", "MAILLET", "Mailly-sur-Seille", "MAISON-MAUGIS",
"MAISONCELLE-TUILERIE", "Maixe", "Maizi\xe8res", "Malaincourt-sur-Meuse",
"MALICORNAY", "Man\xe9houville", "MANDRES-EN-BARROIS", "Manerbe",
"Manglieu", "MANNEVILLE-LA-PIPARD", "Manoncourt-en-Vermois",
"Manoncourt-sur-Seille", "MARAINVILLER", "MARANGE-ZONDRANGE",
"Marault", "Marchais-en-Brie", "MARCHESEUIL", "Marcilly-en-Bassigny",
"MARCILLY-EN-BASSIGNY", "MARIGNY", "MARIGNY-BRIZAY", "Marigny-l'\xc9glise",
"Marnay", "Martigny", "Martincourt", "MARTIZAY", "Marville",
"Marzy", "MASSIGNAC", "Mattexey", "Maupr\xe9voir", "MAUPREVOIR",
"MAUX", "Mavilly-Mandelot", "Mazerolles", "Mazingarbe", "Meillant",
"MELLEROY", "MENETOU-RATEL", "MENEVILLERS", "MERLAUT", "Mers-sur-Indre",
"MERS-SUR-INDRE", "Mesnil-Domqueur", "Metz-Robert", "Meuvy",
"Mietesheim", "MILLERY", "MIRECOURT", "MISSE", "MONCEAU-SAINT-WAAST",
"MONCHIET", "Mons-Boubert", "MONS-EN-LAONNOIS", "Mont-Bonvillers",
"MONT-ET-MARRE", "MONT-LES-LAMARCHE", "Montauban-de-Picardie",
"MONTBOUY", "MONTCAVREL", "MONTCHEVRIER", "Montheries", "Montiers-sur-Saulx",
"MONTIERS-SUR-SAULX", "MONTIGNY-EN-CAMBRESIS", "Montigny-l\xe8s-Vaucouleurs",
"Montigny-Montfort", "Montlou\xe9", "Montmartin-en-Graignes",
"MONTMARTIN-EN-GRAIGNES", "Montmeillant", "MONTMEILLANT", "Montmorillon",
"Montr\xe9al", "Montreuil-sur-Lozon", "MONTROL-SENARD", "MONTROLLET",
"MORGNY-EN-THIERACHE", "MORLAC", "Morterolles-sur-Semme", "Morval",
"MORVILLE", "Morvilliers", "Mosnay", "Mouhers", "MOUHERS", "MOUHET",
"Moulines", "Moulismes", "MOULISMES", "MOUSSAC", "moussey", "Mousson",
"Mouterre-sur-Blourde", "MOUTERRE-SUR-BLOURDE", "Moutiers-Saint-Jean",
"Moyenvic", "Murs", "N\xe9rignac", "NANTIAT", "NANTILLOIS", "Narb\xe9fontaine",
"NEAUPHE-SUR-DIVE", "NEDONCHEL", "NERONDES", "Neufgrange", "NEUILLAY-LES-BOIS",
"NEUVILLE-LES-DAMES", "NEUVILLE-LEZ-BEAULIEU", "NEUVILLE-SUR-AUTHOU",
"Neuville-sur-Touques", "Neuvy-Saint-S\xe9pulchre", "NEUVY-SAINT-SEPULCHRE",
"NEVILLE-SUR-MER", "Nibas", "NIBAS", "NIBELLE", "Nielles-l\xe8s-Bl\xe9quin",
"Nieuil", "Niherne", "Nogent", "Noirterre", "Nomain", "Norroy",
"Notre-Dame-d'\xc9pine", "NOUAILLE-MAUPERTUIS", "Nouans-les-Fontaines",
"Nouic", "Nully", "Nuret-le-Ferron", "NURET-LE-FERRON", "Ocqueville",
"OCTEVILLE", "Ollainville", "Oradour-Fanais", "Oradour-Saint-Genest",
"ORADOUR-SAINT-GENEST", "ORDIARP", "Origny-en-Thi\xe9rache",
"Origny-Sainte-Beno\xeete", "ORMES", "ORMES-ET-VILLE", "Ormoy",
"Orsennes", "Orsinval", "ORVILLE", "Osmoy", "OULCHES", "Outines",
"Outrem\xe9court", "OZERAILLES", "PAGEAS", "PALLUAU-SUR-INDRE",
"PAREY-SAINT-CESAIRE", "Parfondeval", "PARNAC", "PARTHENAY",
"PAS-EN-ARTOIS", "Payroux", "PAYROUX", "Pel-et-Der", "Persac",
"PERSAC", "PETERSBACH", "Peyrat-de-Bellac", "PEYRILHAC", "Pindray",
"PINDRAY", "PISSY-POVILLE", "PLAISANCE", "Pleuville", "PLOMB",
"Pommer\xe9val", "POMMIERS", "PONT-FARCY", "PONT-SUR-SAMBRE",
"Pouillenay", "Pouligny-Notre-Dame", "Pouligny-Saint-Pierre",
"POULIGNY-SAINT-PIERRE", "Pourrain", "Poursac", "Pouru-aux-Bois",
"Pr\xe9cy-sous-Thil", "Pr\xe9cy-sur-Vrin", "Pr\xe9ny", "Pr\xe9tot-Sainte-Suzanne",
"Pr\xe9tot-Vicquemare", "Prangey", "PREMONT", "Pressac", "PRESSAC",
"Pressignac", "Prissac", "PRISSAC", "Proussy", "Provency", "PROVENCY",
"Pulligny", "Punerot", "Puttelange-l\xe8s-Thionville", "Qua\xebdypre",
"Queaux", "QUESTRECQUES", "QUIBOU", "Quoeux-Haut-Mainil", "R\xe9chicourt-la-Petite",
"R\xe9chicourt-le-Ch\xe2teau", "R\xe9moville", "RADINGHEM", "Radonvilliers",
"Raffetot", "Ran\xe7onni\xe8res", "Rancon", "RANCON", "Rangecourt",
"RAUVILLE-LA-BIGOT", "RECHICOURT-LE-CHATEAU", "REMILLY", "REMILLY-SUR-LOZON",
"Rennepont", "RENNEPONT", "Renneval", "Repel", "RESSON", "REUMONT",
"Ricarville", "RICHELING", "Rigny-la-Salle", "Rimsdorf", "Rocheville",
"ROMAGNE", "Romain-sur-Meuse", "Ronch\xe8res", "Rousies", "Roussac",
"ROUSSAC", "ROUSSINES", "Roussy-le-Village", "Rouvres-sous-Meilly",
"Rouvrois-sur-Meuse", "ROYERES", "Ruffec", "RUFFEC", "Rumegies",
"Rupt-devant-Saint-Mihiel", "S\xe9meries", "S\xe9nill\xe9", "Sacierges-Saint-Martin",
"Saint-\xc9pain", "Saint-Agnan", "SAINT-ALGIS", "Saint-Andr\xe9-de-Bohon",
"SAINT-ANDRE-LE-BOUCHOUX", "Saint-Ao\xfbt", "SAINT-AOUT", "SAINT-AVRE",
"Saint-Barbant", "SAINT-BARBANT", "Saint-Bonnet-de-Bellac", "SAINT-BONNET-DE-BELLAC",
"SAINT-BRICE", "SAINT-CHARTIER", "SAINT-CHRISTOPHE", "SAINT-CHRISTOPHE-EN-BAZELLE",
"Saint-Clair-sur-les-Monts", "Saint-Cyr-de-Salerne", "Saint-D\xe9sir\xe9",
"Saint-Denis-de l'H\xf4tel", "SAINT-DENIS-DE-JOUHET", "Saint-Denoeux",
"SAINT-EBREMOND-DE-BONFOSSE", "Saint-Eustache-la-For\xeat", "SAINT-FIRMIN-DES-BOIS",
"SAINT-FIRMIN-SUR-LOIRE", "Saint-G\xe2tien-des-Bois", "Saint-Georges",
"Saint-Georges-d'Elle", "Saint-Georges-les-Landes", "SAINT-GEORGES-LES-LANDES",
"SAINT-GERMAIN-DE-LIVET", "SAINT-GERMAIN-DES-PRES", "SAINT-GERMAIN-LES-SENAILLY",
"Saint-Germain-sous-Cailly", "Saint-Germain-sur-Bresle", "Saint-Hilaire-de-Court",
"SAINT-HILAIRE-EN-LIGNIERES", "SAINT-HILAIRE-EN-MORVAN", "Saint-Hilaire-La-Treille",
"SAINT-HILAIRE-LA-TREILLE", "Saint-Hilaire-sur-B\xe9naize", "SAINT-HILAIRE-SUR-BENAIZE",
"SAINT-HIPPOLYTE", "SAINT-JEAN-DE-BOEUF", "Saint-Jean-de-la-Neuville",
"Saint-Jean-le-Blanc", "SAINT-JOIRE", "Saint-Julien-sur-Sarthe",
"Saint-Juvin", "Saint-L\xe9ger-Magnazeix", "Saint-L\xe9omer",
"SAINT-LEGER-DU-BOIS", "SAINT-LEGER-MAGNAZEIX", "SAINT-LEGER-SOUS-CHOLET",
"SAINT-LEGER-SUR-DHEUNE", "SAINT-LEOMER", "SAINT-LEONARD", "Saint-Lupien",
"Saint-Martial-sur-Isop", "Saint-Martin-aux-Chartrains", "Saint-Martin-de-Lamps",
"Saint-Martin-en-Campagne", "Saint-Martin-l'Ars", "SAINT-MARTIN-L'ARS",
"Saint-Martin-l'Hortier", "Saint-Martin-sur-Ouanne", "Saint-Mary",
"SAINT-MAUR", "Saint-Maurice-des-Lions", "SAINT-MAURICE-DES-LIONS",
"Saint-Maurice-la-Clou\xe8re", "SAINT-MAURICE-LA-CLOUERE", "Saint-Maurice-la-Souterraine",
"SAINT-MAURICE-LA-SOUTERRAINE", "SAINT-MAURICE-SUR-AVEYRON",
"Saint-Menoux", "SAINT-MICHEL-EN-BRENNE", "Saint-Ouen-l\xe8s-Parey",
"Saint-Ouen-sur-Gartempe", "SAINT-OUEN-SUR-GARTEMPE", "SAINT-OUEN-SUR-LOIRE",
"Saint-P\xe8re", "Saint-Pierre-de-Fursac", "Saint-Pierre-de-Maill\xe9",
"SAINT-PIERRE-DE-MAILLE", "Saint-Pierre-de-Varennes", "Saint-Pierre-le-Vieux",
"Saint-Pierre-le-Viger", "Saint-Pierremont", "Saint-Priest-Ligoure",
"SAINT-PRIEST-TAURION", "Saint-Quentin-des-Pr\xe9s", "Saint-S\xe9condin",
"Saint-Saturnin-du-Bois", "Saint-Saulge", "SAINT-SAUVEUR-LE-VICOMTE",
"Saint-Sornin-la-Marche", "Saint-Sornin-Leulac", "Saint-Sulpice",
"Saint-Sulpice-le-Dunois", "Saint-Sulpice-les-Feuilles", "SAINT-SYMPHORIEN",
"Saint-Symphorien-sur-Couze", "SAINT-THURIEN", "Saint-Vaast-d'\xc9quiqueville",
"Saint-Val\xe9ry-sur-Somme", "Saint-Victor-sur-Ouche", "Saint-Vincent-Cramesnil",
"Sainte-Colombe", "SAINTE-COLOMBE-SUR-LOING", "SAINTE-CROIX",
"Sainte-Croix-aux-Mines", "Sainte-H\xe9l\xe8ne-Bondeville", "Sainte-Maure-de-Touraine",
"SAINTE-MAURE-DE-TOURAINE", "Sainte-Menehould", "Sainte-Radegonde",
"Sainte-S\xe9v\xe8re-sur-Indre", "Saints", "SAIZY", "Salon",
"Sandaucourt", "SANDAUCOURT", "SANGATTE", "Sarcey", "SARDY-LES-EPIRY",
"SARNOIS", "Sarralbe", "Saulg\xe9", "SAULGOND", "Saulxures",
"SAULXURES-LES-VANNES", "Sauzelles", "Savigny-en-Terre-Plaine",
"Savilly", "Schillersdorf", "Schleithal", "Schopperten", "SEMIDE",
"semoutiers", "Senaide", "Senonges", "Seraumont", "Sermaize-les-Bains",
"Serqueux", "SERVINS", "Sexey-aux-Forges", "Signy-le-Petit",
"Sincey", "Sivry", "Sogny-en-l'Angle", "SOIZE", "Sologny", "SOLOGNY",
"SOMMAING", "Sommeval", "Sorcy-Saint-Martin", "Suisse", "SURIS",
"Suzanne", "TACONNAY", "TANNAY", "Tannerre-en-Puisaye", "Tendu",
"TERNAS", "Tersannes", "TERSANNES", "Th\xe9norgues", "Theillay",
"Thenay", "They-sous-Vaud\xe9mont", "Thiat", "THIEFFRAIN", "THIEULLOY-L'ABBAYE",
"Thollet", "THOLLET", "Thonne-le-Thil", "Thorey-Lyautey", "Thors",
"Tign\xe9court", "Tilly-sur-Meuse", "Touffreville-sur-Eu", "Tourlaville",
"Tourmignies", "Tourni\xe8res", "Tr\xe9mont-sur-Saulx", "Tr\xe9monzey",
"Tramayes", "Tranzault", "TRANZAULT", "TREVOL", "TUPIGNY", "Uchizy",
"Urville", "Usson-du-Poitou", "USSON-DU-POITOU", "VACQUERIE-LE-BOUCQ",
"Vains", "VAINS", "Val-de-Meuse", "Valfroicourt", "VANDENESSE-EN-AUXOIS",
"Vandy", "VARANGEVILLE", "VARENGUEBEC", "VARESNES", "VARZY",
"VASLES", "Vaud\xe9mont", "Vaudeville", "VAUDOY-EN-BRIE", "Vaulry",
"Vault-de-Lugny", "VAUX", "Vaux-sur-Aure", "Vaux-sur-Blaise",
"VAZEILLES-LIMANDRE", "Velles", "Vendoeuvres", "VENESMES", "VENTES-SAINT-REMY",
"VERNEUIL", "Verneuil-Moustiers", "VERNEUIL-MOUSTIERS", "Vernon",
"VERRIERES", "VERTEUIL-SUR-CHARENTE", "Veuvey-sur-Ouche", "Veyrac",
"VEZELISE", "VIC-DE-CHASSENAY", "VICQ-SUR-GARTEMPE", "Vierzy",
"Vieux", "Vigneulles", "Vigoux", "VIGOUX", "Vill\xe9cloye", "VILLE-LE-MARCLET",
"Villemotier", "VILLENTROIS", "VILLERS-AU-BOIS", "Villers-Br\xfblin",
"Villers-Outr\xe9aux", "VILLEY-SAINT-ETIENNE", "Villiers-le-Sec",
"VILLIEU-LOYES-MOLLON", "VIMENIL", "Vimoutiers", "VINEUIL", "Vingt-Hanaps",
"Viriat", "Vironchaux", "Vitrey", "Voncq", "Waldhambach", "WANNEHAIN",
"Wi\xe8ge-Faty", "WIEGE-FATY", "Wignehies", "WILLERONCOURT",
"Wimy", "Winnezeele", "Wissant", "Woincourt"), class = "factor"),
unit.type = structure(c(1L, 1L, 1L, 1L, 1L, 1L), .Label = "Farm", class = "factor"),
location = structure(c(1036L, 548L, 846L, 775L, 122L, 791L
), .Label = c("\xc9pen\xe8de", "\xc9perrais", "\xc9poisses",
"\xc9riseul", "\xc9tagnac", "\xc9talleville", "\xc9tr\xe9aupont",
"\xc9treillers", "\xc9troeungt", "abilly", "Abzac", "ABZAC",
"Abzac n\xb01", "Abzac n\xb02", "ACY", "Adriers", "ADRIERS",
"Adriers n\xb01", "Adriers n\xb02", "AFFLEVILLE", "Agincourt",
"Ailly-le-Haut-Clocher", "Aisey-et-Richecourt", "Aixe-sur-Vienne",
"Aizy Jouy", "Alincthun", "ALLICHAMPS", "ALLOUE", "Allouville-Bellefosse",
"Ambernac", "Andilly-en-Bassigny", "Andryes", "Angely", "ANOST",
"Anrosey", "Ansac-sur-Vienne", "ANSAC-SUR-VIENNE", "ANSAUVILLERS",
"Anv\xe9ville", "Archigny", "ARGILLIERES", "ARGY", "ARMES",
"Arnac-la-Poste", "Arnac-la-Poste n\xb01", "Arnac-la-Poste n\xb02",
"Arrest", "Artannes-sur-Indre", "Asni\xe8res-sur-Blour",
"ASNIERES-SUR-BLOUR", "ASPACH", "Attin", "Atton", "Auberville-la-Renault",
"AUCHY-LEZ-ORCHIES", "Audincthun", "AUDINGHEN", "Audruicq",
"AULT", "Aunay-sur-Odon", "Authe", "Authieule", "Autigny-la-Tour",
"Autrey", "AUTREY", "AUXAIS", "Availles-Limouzine", "AVAILLES-LIMOUZINE",
"Avelin", "Avermes", "Avesnelles", "AVESNES-LE-SEC", "Avrecourt",
"AVROULT", "Awoingt", "AYDOILLES", "AZAT-LE-RIS", "Azay-le-Ferron",
"AZAY-LE-FERRON", "AZAY-LE-RIDEAU", "Azoudange", "AZY-LE-VIF",
"B\xe9court", "B\xe9nouville", "B\xe9thines", "Bacourt",
"Bacqueville-en-Caux", "Bagneux", "Bailleul", "BAINVILLE-SUR-MADON",
"BALLAY", "Balledent", "BALLEDENT", "Baons-le-Comte", "BARBAS",
"Basse-sur-le-Rupt", "Bauzemont", "BAVINCOURT", "Bay-sur Aube",
"BAZELAT", "BEAULIEU", "Beauquesne", "Beaurepaire", "Beaurepaire-en-Bresse",
"Beauvilliers", "BEGROLLES-EN-MAUGES", "Bellac", "Belrupt",
"Belval-sous-Ch\xe2tillon", "BERCK", "BEREZIAT", "Berneuil",
"Bernouville", "BERTHEN", "BERUGES", "BESSINES-SUR-GARTEMPE",
"BETHINES", "Bettoncourt-le-Haut", "Beurizot", "Beurville",
"Beux", "Beuzeville", "Beuzeville-la-Grenier", "Bezaumont",
"Biarre", "Billy-sous-les-C\xf4tes", "BISLEE", "Bissey-sous-Cruchaud",
"Biville-la-Baignarde", "BIZIAT", "BLANCHEFOSSE-ET-BAY",
"Blanzac", "BLANZAC", "BLARINGHEM", "Blessonville", "Blicourt",
"Blond", "Bolbec", "Bolleville", "Bonneuil", "Bonneuil n\xb01",
"Bonneuil n\xb02", "Bonny-sur-Loire", "Bossancourt", "BOUESSE",
"Bougey", "BOULAY-MOSELLE", "Bourbonne-les-Bains", "Bouresse",
"BOURESSE", "Bourg-Archambault", "BOURG-ARCHAMBAULT", "Boz",
"Br\xe9aut\xe9", "Br\xe9m\xe9nil", "Br\xe9moncourt", "Braux",
"BREVES", "Briantes", "Bricquebec", "BRICQUEBEC", "Brigueil-le-Chantre",
"BRIGUEIL-LE-CHANTRE", "Brigueil-le-Chantre n\xb01", "Brigueil-le-Chantre n\xb02",
"Brigueuil", "BRIGUEUIL", "Brillac", "BRILLAC", "BRIXEY-AUX-CHANOINES",
"Brulange", "Brunehamel", "Brutelles", "Bucamps", "Buffign\xe9court",
"Buhl-Lorraine", "BURTHECOURT-AUX-CHENES", "Bussi\xe8re-Poitevine",
"BUSSIERE-BOFFY", "BUSSIERE-POITEVINE", "Buxi\xe8res-d'Aillac",
"BUXIERES-LES-MINES", "C\xe9lon", "CABANAC-ET-VILLAGRAINS",
"Cambremer", "Cannectancourt", "Canville-les-Deux-\xc9glises",
"Carignan", "Carignan bis", "CARQUEBUT", "CATIGNY", "Catillon-sur-Sambre",
"Caunay", "Cavron-Saint-Martin", "CERE-LA-RONDE", "CERENCES",
"CERISY-LA-SALLE", "Ch\xe2teau-Garnier", "Ch\xe2teau-Garnier n\xb01",
"Ch\xe2teau-Garnier n\xb02", "ch\xe2teau-salins", "Ch\xe2teauponsac",
"Ch\xe2tenay-Vaudin", "Ch\xe2tenay-Vaudin n\xb01", "Ch\xe2tenois",
"Ch\xe9zeaux", "Ch\xe9zy-en-Orxois", "Chablis", "CHABRIS",
"Chaillac", "CHAILLAC", "Chaillac n\xb01", "Chaillac n\xb02",
"Chaillac n\xb03", "CHAILLAC-SUR-VIENNE", "Chailly-sur-Arman\xe7on",
"CHALAIS", "Chalindrey", "CHALLERANGE", "CHAMARANDES-CHOIGNES",
"Chamboret", "Chambrey", "CHAMPAGNE-SAINT-HILAIRE", "Champigny-sous-Varennes",
"CHAMPS", "CHAMPSEVRAINE", "Chantelle", "Chaouilley", "Charbuy",
"CHARLEVILLE-SOUS-BOIS", "Charmes-la-Grande", "CHARMES-LA-GRANDE",
"charroux", "Chassey-l\xe8s-Montbozon", "CHATEAU-CHINON(CAMPAGNE)",
"CHATEAU-GARNIER", "CHATEAUPONSAC", "Chaudenay", "CHAUMONT-LA-VILLE",
"Chaumont-Porcien", "Chauvigny", "CHAVEYRIAT", "Chazelet",
"CHAZELLES", "CHEHERY", "Chemery", "CHEMERY", "Chemin-d'Aisey",
"CHENIERS", "Cherbourg", "CHERVES-CHATELARS", "Chevannes",
"CHEVERNY", "Chevillon", "CHEVROCHES", "Chicourt", "CHIRAC",
"Cieux", "Ciron", "Civrieux", "Cizay-la-Madeleine", "Cl\xe9rey-sur-Br\xe9non",
"Cl\xe9zentaine", "CLAIRFONTAINE", "Clamecy", "CLAMECY",
"CLERE-DU-BOIS", "Clinchamps-sur-Orne", "CLION", "Coiffy-le-Haut",
"COINGT", "Colombey-les-Belles", "COLOMBEY-LES-DEUX-EGLISES",
"COLONDANNES", "Combres-sous-les-C\xf4tes", "COMBREUX", "Cond\xe9-Northen",
"CONDE-NORTHEN", "CONFOLENS", "Confran\xe7on", "CONGE-SUR-ORNE",
"Conthil", "Corpoyer-la-Chapelle", "COSSAYE", "Cossesseville",
"Coulandon", "COULLEMONT", "Coulmier-le-Sec", "Coulonges",
"COULONGES", "COULONGES n\xb01", "COULONGES n\xb02", "Coulonges-sur-Sarthe",
"Coupelle-Vieille", "COURCELLES-LES-SEMUR", "COURGEON", "COURTESOULT-ET-GATEY",
"Coussay-les-Bois", "COUVERTPUIS", "Coyviller", "Cr\xe9quy",
"CRECY-SUR-SERRE", "CREQUY", "CRIEL-SUR-MER", "Crion", "CROISY",
"CROMAC", "CRUGNY", "Crupilly", "CULAN", "Cussay", "Cuves",
"D\xe9deling", "Dammartin-sur-Meuse", "Darnac", "DARNAC",
"Delettes", "DELETTES", "Dercy", "Desvres", "DIEVAL", "Dinsac",
"DOMBROT-SUR-VAIR", "Domecy-sur-Cure", "Domgermain", "Domjulien",
"DOMMARTIN-DAMPIERRE", "Dompierre-les-\xc9glises", "Dompierre-sur-Authie",
"Domptail", "Donzy", "Doudeauville", "Douvrend", "DOVILLE",
"Dracy", "Dracy-Saint-Loup", "Drambon", "Droisy", "Droux",
"Dunet", "ECUEILLE", "ECUIRES", "ECULLEVILLE", "Emberm\xe9nil",
"Empur\xe9", "Englesqueville-en-Auge", "Envronville", "EPAIGNES",
"epinac", "EPINAL", "Escles", "Escombres-et-le-Chesnois",
"Esley", "Esse", "ESSE", "Esse n\xb01", "Esse n\xb02", "Euilly-et-Lombut n\xb01",
"Euilly-et-Lombut n\xb02", "EXIDEUIL", "Eywiller", "FAULQUEMONT",
"Faulx", "FAULX", "Faveraye-Machelles", "FAYE-L'ABBESSE",
"FAYL-BILLOT", "FEYTIAT", "Fill\xe9", "FLAGEY", "Fleury-la-For\xeat",
"FLIGNY", "Flor\xe9mont", "Fluqui\xe8res", "FOAMEIX-ORNEL",
"Fontaine-au-Bois", "Fontaine-l\xe8s-Vervins", "Fontaine-le-Dun",
"FONTAINE-LES-VERVINS", "Fontaines", "Fontangy", "FONTENELLE",
"formigny", "Fossieux", "Foucart", "Foulcrey", "Fouligny",
"Fourcigny", "FOURS", "Fr\xe9monville", "Fr\xe9vent", "Fr\xf4lois",
"Fraignot-et-Vesvrotte", "Framicourt", "Fraquelfing", "Freb\xe9court",
"Frenelle-la-Grande", "Freneuse-sur-Risle", "Fresnes-en-Saulnois",
"Fresnes-en-Tardenois", "FRESNOY-FOLNY", "Fromy", "FRONTENARD",
"G\xe9lucourt", "GAJOUBERT", "GARGILESSE-DAMPIERRE", "Gemmelaincourt",
"GEMONVILLE", "Genouillac", "Gerb\xe9viller", "Gergny", "GERMINY",
"Gevrolles", "Ghyvelde", "GIEVILLE", "ginai", "Giraumont",
"Givron n\xb01", "Givron n\xb02", "Gizay", "Glanges", "GLOS-LA-FERRIERE",
"GODERVILLE", "GOLLEVILLE", "GONCOURT", "Gonneville", "GONNEVILLE-EN-AUGE",
"Gouberville", "Gouex", "GOUEX", "Gournay", "GOURNAY-EN-BRAY",
"Goviller", "Grand", "Grand Rullecourt", "GRAND-CAMP", "Grandcourt",
"Greuville", "GREZ", "Grez-en-Bou\xe8re", "Grivy-Loisy",
"GROSSOUVRE", "Grosville", "Gu\xe9bling", "GUILLON", "Guilly",
"Guincourt", "GUIPY", "H\xe9nam\xe9nil", "Haboudange", "Haillainville",
"Hambye", "Hammeville", "HANNACHES", "HARGICOURT", "Hary",
"HAUT-LIEU", "HAUTE-AMANCE", "Haute-Kontz", "HAUTEFONTAINE",
"Hauteroche", "HAUTTEVILLE-BOCAGE", "HAZEBROUCK", "Hennezel",
"Hermanville", "Heugleville-sur-Scie", "HEUGNES", "Honnechy",
"Honskirch", "HORNAING", "HORNOY-LE-BOURG", "Hou\xe9ville",
"Houdelaincourt", "HOUECOURT", "Hugleville-en-Caux", "Hurecourt",
"Ibigny", "ILLKIRCH-GRAFFENSTADEN", "Imbleville", "Incourt",
"INCOURT", "Insming", "Is-en-Bassigny", "ISENAY", "Jaulges",
"Jeantes", "JEANTES", "JEU-LES-BOIS", "Jouac", "Jouhet",
"JOUHET", "JOURNET", "JOUSSE", "JUGY", "JUVIGNIES", "KIRRWILLER-BOSSELSHAUSEN",
"L'\xc9tang-Bertrand", "L'Isle-Jourdain n\xb01", "L'Isle-Jourdain n\xb02",
"La Bazeuge", "LA BERTHENOUX", "LA BESACE", "La Blouti\xe8re",
"La Celle-Gu\xe9nand", "LA CELLE-SUR-LOIRE", "La Cerlangue",
"La Chapelle-Bertrand", "La Chapelle-Montreuil", "LA CHAPELLE-MONTREUIL",
"La Chapelle-Th\xe8cle", "LA COLOMBE", "LA COMTE", "LA COURBE",
"La Croix-sur-Gartempe", "LA CROIX-SUR-GARTEMPE", "LA FEREE",
"LA FERMETE", "La Ferri\xe8re-en-Parthenay", "La Fert\xe9-Loupi\xe8re",
"La Gaillarde", "La Godefroy", "La Haye-du-Puits", "La Loge",
"LA MANCELIERE", "La Mothe-Saint-H\xe9ray", "La P\xe9rouille",
"LA PERCHE", "LA PERNELLE", "LA PUYE", "La Roche-Chalais",
"La Roche-en-Brenil", "La Roche-Posay", "LA ROCHE-VANNEAU",
"La Souterraine", "LA SUZE-SUR-SARTHE", "La Trimouille",
"LA TRIMOUILLE", "La Trimouille n\xb01", "La Trimouille n\xb02",
"La Vineuse", "Lagarde", "Laitre-sous-Amance", "Lamarche",
"Landange", "LANDOUZY-LA-VILLE", "LANGRUNE-SUR-MER", "LANTAGES",
"LATHUS-SAINT-REMY", "LATHUS-SAINT-REMY n\xb01", "LATHUS-SAINT-REMY n\xb02",
"Launstroff", "Lavall\xe9e", "LAVAUSSEAU", "Le B\xe9ny-Bocage",
"Le Blanc", "LE BLANC", "Le Bourg-d'Hem", "LE CATEAU-CAMBRESIS",
"LE CHATELET", "LE DORAT", "Le Doulieu", "Le F\xeate", "LE LOUROUX",
"LE MAY-SUR-EVRE", "LE MENIL-GUYON", "Le Mesnil-Eudes", "Le Mesnil-Raoult",
"LE MESNIL-ROGUES", "Le Mesnilbus", "Le Nouvion-en-Thi\xe9rache",
"LE PECHEREAU", "LE PIN", "LE PLESSIER-SUR-BULLES", "Le Reculey",
"LE SOUICH", "Le Torpt", "Le Val-Saint-P\xe8re", "Le Vigeant",
"LE VIGEANT", "Le Vigeant n\xb01", "Le Vigeant n\xb02", "Le Vr\xe9tot",
"Leignes-sur-Fontaine", "Leintrey", "Lengronne", "LENT",
"Les Forges", "Les Grands Ch\xe9zeaux", "LES LOGES", "Les Thons",
"LES TROIS-PIERRES", "LESMENILS", "Lessac", "Lesterps", "LEULINGHEM",
"Levoncourt n\xb01", "Levoncourt n\xb02", "Levroux", "Liernais",
"LIESVILLE-SUR-DOUVE", "LIFFOL-LE-GRAND", "Liglet", "LIGLET",
"Lignac", "LIGNAC", "Lignac n\xb01", "Lignac n\xb02", "LIGNEREUIL",
"LIGNIERES-CHATELAIN", "Lignol-le-Ch\xe2teau", "LIMANTON",
"LIMOGES", "Limonest", "Linselles", "Lironville", "LIVERDUN",
"Lochwiller", "Loison", "Long", "Longeville-en-Barrois",
"Longeville-sur-la-Laines", "Longpr\xe9-les-Corps-Saints",
"Longvillers", "LORMES", "Lottinghen", "Louvencourt", "Louvi\xe8res",
"Lu\xe7ay-le-M\xe2le", "LUANT", "LUCE-SOUS-BALLON", "Luchapt",
"Lussac", "LUSSAC", "Lussac-les-\xc9glises", "Lussac-les-Ch\xe2teaux",
"LUSSAC-LES-EGLISES", "Luttange", "LUZERET", "LYE", "M\xe9nil-en-Xaintois",
"M\xe9nil-sur-Belvitte", "M\xe9obecq", "M\xe9ry-la-Bataille",
"M\xe9zi\xe8res-sur-Issoire", "Magnac-Laval", "MAGNAC-LAVAL",
"Magneville", "Magny", "Magny-le-Freule", "Mailhac-sur-Benaize",
"MAILHAC-SUR-BENAIZE", "MAILLET", "Mailly-sur-Seille", "MAISON-MAUGIS",
"MAISONCELLE-TUILERIE", "Maixe", "Maizi\xe8res", "Malaincourt-sur-Meuse",
"MALICORNAY", "Man\xe9houville", "MANDRES-EN-BARROIS", "Manerbe",
"Manglieu", "MANNEVILLE-LA-PIPARD", "Manoncourt-en-Vermois",
"Manoncourt-sur-Seille", "MARAINVILLER", "MARANGE-ZONDRANGE",
"Marault", "Marchais-en-Brie", "MARCHESEUIL", "Marcilly-en-Bassigny",
"MARCILLY-EN-BASSIGNY", "MARIGNY", "MARIGNY-BRIZAY", "Marigny-l'\xc9glise",
"Marnay", "Martigny", "Martincourt", "MARTIZAY", "Marville",
"Marzy", "MASSIGNAC", "Mattexey", "Maupr\xe9voir", "MAUPREVOIR",
"MAUX", "Mavilly-Mandelot", "Mazerolles", "Mazingarbe", "Meillant",
"MELLEROY", "MENETOU-RATEL", "MENEVILLERS", "MERLAUT", "Mers-sur-Indre",
"MERS-SUR-INDRE", "Mesnil-Domqueur", "Metz-Robert", "Meuvy",
"Mietesheim", "MILLERY", "MIRECOURT", "MISSE", "MONCEAU-SAINT-WAAST",
"MONCHIET", "Mons-Boubert", "MONS-EN-LAONNOIS", "Mont-Bonvillers",
"MONT-ET-MARRE", "MONT-LES-LAMARCHE", "Montauban-de-Picardie",
"MONTBOUY", "MONTCAVREL", "MONTCHEVRIER", "Montheries", "Montiers-sur-Saulx",
"MONTIERS-SUR-SAULX", "MONTIGNY-EN-CAMBRESIS", "Montigny-l\xe8s-Vaucouleurs",
"Montigny-Montfort", "Montlou\xe9", "Montmartin-en-Graignes",
"MONTMARTIN-EN-GRAIGNES", "Montmeillant", "MONTMEILLANT",
"Montmorillon", "Montr\xe9al", "Montreuil-sur-Lozon", "MONTROL-SENARD",
"MONTROLLET", "MORGNY-EN-THIERACHE", "MORLAC", "Morterolles-sur-Semme",
"Morval", "MORVILLE", "Morvilliers", "Mosnay", "Mouhers",
"MOUHERS", "MOUHET", "Moulines", "Moulismes", "MOULISMES",
"Moulismes n\xb01", "Moulismes n\xb02", "MOUSSAC", "moussey",
"Mousson", "Mouterre-sur-Blourde", "MOUTERRE-SUR-BLOURDE",
"Moutiers-Saint-Jean", "Moyenvic", "Murs", "N\xe9rignac",
"NANTIAT", "NANTILLOIS", "Narb\xe9fontaine", "NEAUPHE-SUR-DIVE",
"NEDONCHEL", "NERONDES", "Neufgrange", "NEUILLAY-LES-BOIS",
"NEUVILLE-LES-DAMES", "NEUVILLE-LEZ-BEAULIEU", "NEUVILLE-SUR-AUTHOU",
"Neuville-sur-Touques", "Neuvy-Saint-S\xe9pulchre", "NEUVY-SAINT-SEPULCHRE",
"NEVILLE-SUR-MER", "Nibas", "NIBAS", "NIBELLE", "Nielles-l\xe8s-Bl\xe9quin",
"Nieuil", "Niherne", "Nogent", "Noirterre", "Nomain", "Norroy",
"Notre-Dame-d'\xc9pine", "NOUAILLE-MAUPERTUIS", "Nouans-les-Fontaines",
"Nouic", "Nully", "Nuret-le-Ferron", "NURET-LE-FERRON", "Ocqueville",
"OCTEVILLE", "Ollainville", "Oradour-Fanais", "Oradour-Saint-Genest",
"ORADOUR-SAINT-GENEST", "ORDIARP", "Origny-en-Thi\xe9rache",
"Origny-Sainte-Beno\xeete", "ORMES", "ORMES-ET-VILLE", "Ormoy",
"Orsennes", "Orsinval", "ORVILLE", "Osmoy", "OULCHES", "Outines",
"Outrem\xe9court", "OZERAILLES", "PAGEAS", "PALLUAU-SUR-INDRE",
"PAREY-SAINT-CESAIRE", "Parfondeval", "PARNAC", "PARTHENAY",
"PAS-EN-ARTOIS", "Payroux", "PAYROUX", "Pel-et-Der", "Persac",
"PERSAC", "PETERSBACH", "Peyrat-de-Bellac", "PEYRILHAC",
"PINDRAY", "Pindray n\xb01", "Pindray n\xb02", "PISSY-POVILLE",
"PLAISANCE", "Pleuville", "PLOMB", "Pommer\xe9val", "POMMIERS",
"PONT-FARCY", "PONT-SUR-SAMBRE", "Pouillenay", "Pouligny-Notre-Dame",
"Pouligny-Saint-Pierre", "POULIGNY-SAINT-PIERRE", "Pourrain",
"Poursac", "Pouru-aux-Bois", "Pr\xe9cy-sous-Thil", "Pr\xe9cy-sur-Vrin",
"Pr\xe9ny", "Pr\xe9tot-Sainte-Suzanne", "Pr\xe9tot-Vicquemare",
"Prangey", "PREMONT", "Pressac", "PRESSAC", "Pressignac",
"Prissac", "PRISSAC", "Proussy", "Provency", "PROVENCY",
"Pulligny", "Punerot", "Puttelange-l\xe8s-Thionville", "Qua\xebdypre",
"Queaux", "QUESTRECQUES", "QUIBOU", "Quoeux-Haut-Mainil",
"R\xe9chicourt-la-Petite", "R\xe9chicourt-le-Ch\xe2teau",
"R\xe9moville", "RADINGHEM", "Radonvilliers", "Raffetot",
"Ran\xe7onni\xe8res", "Rancon", "RANCON", "Rangecourt", "RAUVILLE-LA-BIGOT",
"RECHICOURT-LE-CHATEAU", "REMILLY", "REMILLY-SUR-LOZON",
"Rennepont", "RENNEPONT", "Renneval", "Repel", "RESSON",
"REUMONT", "Ricarville", "RICHELING", "Rigny-la-Salle", "Rimsdorf",
"Rocheville", "ROMAGNE", "Romain-sur-Meuse", "Ronch\xe8res",
"Rousies", "Roussac", "ROUSSAC", "ROUSSINES", "Roussy-le-Village",
"Rouvres-sous-Meilly", "Rouvrois-sur-Meuse", "ROYERES", "RUFFEC",
"Ruffec n\xb01", "Ruffec n\xb02", "Rumegies", "Rupt-devant-Saint-Mihiel",
"S\xe9meries", "S\xe9nill\xe9", "Sacierges-Saint-Martin",
"Saint-\xc9pain", "Saint-Agnan", "SAINT-ALGIS", "Saint-Andr\xe9-de-Bohon",
"SAINT-ANDRE-LE-BOUCHOUX", "Saint-Ao\xfbt", "SAINT-AOUT",
"SAINT-AVRE", "Saint-Barbant", "SAINT-BARBANT", "Saint-Bonnet-de-Bellac",
"SAINT-BONNET-DE-BELLAC", "Saint-Bonnet-de-Bellac n\xb01",
"Saint-Bonnet-de-Bellac n\xb02", "SAINT-BRICE", "SAINT-CHARTIER n\xb01",
"SAINT-CHARTIER n\xb02", "SAINT-CHRISTOPHE", "SAINT-CHRISTOPHE-EN-BAZELLE",
"Saint-Clair-sur-les-Monts", "Saint-Cyr-de-Salerne", "Saint-D\xe9sir\xe9",
"Saint-Denis-de l'H\xf4tel", "SAINT-DENIS-DE-JOUHET", "Saint-Denoeux",
"SAINT-EBREMOND-DE-BONFOSSE", "Saint-Eustache-la-For\xeat",
"SAINT-FIRMIN-DES-BOIS", "SAINT-FIRMIN-SUR-LOIRE", "Saint-G\xe2tien-des-Bois",
"Saint-Georges", "Saint-Georges-d'Elle", "SAINT-GEORGES-LES-LANDES",
"Saint-Georges-les-Landes n\xb01", "Saint-Georges-les-Landes n\xb02",
"SAINT-GERMAIN-DE-LIVET", "SAINT-GERMAIN-DES-PRES", "SAINT-GERMAIN-LES-SENAILLY",
"Saint-Germain-sous-Cailly", "Saint-Germain-sur-Bresle",
"Saint-Hilaire-de-Court", "SAINT-HILAIRE-EN-LIGNIERES", "SAINT-HILAIRE-EN-MORVAN",
"Saint-Hilaire-La-Treille", "SAINT-HILAIRE-LA-TREILLE", "Saint-Hilaire-sur-B\xe9naize",
"SAINT-HILAIRE-SUR-BENAIZE", "SAINT-HIPPOLYTE", "SAINT-JEAN-DE-BOEUF",
"Saint-Jean-de-la-Neuville", "Saint-Jean-le-Blanc", "SAINT-JOIRE",
"Saint-Julien-sur-Sarthe", "Saint-Juvin", "Saint-L\xe9ger-Magnazeix",
"Saint-L\xe9ger-Magnazeix n\xb01", "Saint-L\xe9ger-Magnazeix n\xb02",
"Saint-L\xe9omer", "SAINT-LEGER-DU-BOIS", "SAINT-LEGER-MAGNAZEIX",
"SAINT-LEGER-SOUS-CHOLET", "SAINT-LEGER-SUR-DHEUNE", "SAINT-LEOMER",
"SAINT-LEONARD", "Saint-Lupien", "Saint-Martial-sur-Isop",
"Saint-Martin-aux-Chartrains", "Saint-Martin-de-Lamps", "Saint-Martin-en-Campagne",
"Saint-Martin-l'Ars", "SAINT-MARTIN-L'ARS", "Saint-Martin-l'Hortier",
"Saint-Martin-sur-Ouanne", "Saint-Mary", "SAINT-MAUR", "Saint-Maurice-des-Lions",
"Saint-Maurice-des-Lions n\xb01", "Saint-Maurice-des-Lions n\xb02",
"Saint-Maurice-la-Clou\xe8re", "SAINT-MAURICE-LA-CLOUERE",
"Saint-Maurice-la-Souterraine", "SAINT-MAURICE-LA-SOUTERRAINE",
"SAINT-MAURICE-SUR-AVEYRON", "Saint-Menoux", "SAINT-MICHEL-EN-BRENNE",
"Saint-Ouen-l\xe8s-Parey", "Saint-Ouen-sur-Gartempe", "SAINT-OUEN-SUR-GARTEMPE",
"SAINT-OUEN-SUR-LOIRE", "Saint-P\xe8re", "Saint-Pierre-de-Fursac",
"Saint-Pierre-de-Maill\xe9", "SAINT-PIERRE-DE-MAILLE", "Saint-Pierre-de-Varennes",
"Saint-Pierre-le-Vieux", "Saint-Pierre-le-Viger", "Saint-Pierremont",
"Saint-Priest-Ligoure", "SAINT-PRIEST-TAURION", "Saint-Quentin-des-Pr\xe9s",
"Saint-S\xe9condin", "Saint-Saturnin-du-Bois", "Saint-Saulge",
"SAINT-SAUVEUR-LE-VICOMTE", "Saint-Sornin-la-Marche", "Saint-Sornin-Leulac",
"Saint-Sulpice", "Saint-Sulpice-le-Dunois", "Saint-Sulpice-les-Feuilles",
"SAINT-SYMPHORIEN", "Saint-Symphorien-sur-Couze", "SAINT-THURIEN",
"Saint-Vaast-d'\xc9quiqueville", "Saint-Val\xe9ry-sur-Somme",
"Saint-Victor-sur-Ouche", "Saint-Vincent-Cramesnil", "Sainte-Colombe",
"SAINTE-COLOMBE-SUR-LOING", "SAINTE-CROIX", "Sainte-Croix-aux-Mines",
"Sainte-H\xe9l\xe8ne-Bondeville", "Sainte-Maure-de-Touraine",
"SAINTE-MAURE-DE-TOURAINE", "Sainte-Menehould", "Sainte-Radegonde",
"Sainte-S\xe9v\xe8re-sur-Indre", "Saints", "SAIZY", "Salon",
"Sandaucourt", "SANDAUCOURT", "SANGATTE", "Sarcey", "SARDY-LES-EPIRY",
"SARNOIS", "Sarralbe", "Saulg\xe9", "Saulg\xe9 n\xb01", "Saulg\xe9 n\xb02",
"Saulg\xe9 n\xb03", "Saulg\xe9 n\xb04", "Saulg\xe9 n\xb05",
"SAULGOND", "Saulxures", "SAULXURES-LES-VANNES", "Sauzelles",
"Savigny-en-Terre-Plaine", "Savilly", "Schillersdorf", "Schleithal",
"Schopperten", "SEMIDE", "semoutiers", "Senaide", "Senonges",
"Seraumont", "Sermaize-les-Bains", "Serqueux", "SERVINS",
"Sexey-aux-Forges", "Signy-le-Petit", "Sincey", "Sivry",
"Sivry n\xb02", "Sogny-en-l'Angle", "SOIZE", "Sologny", "SOLOGNY",
"SOMMAING", "Sommeval", "Sorcy-Saint-Martin", "Suisse", "SURIS",
"Suzanne", "TACONNAY", "TANNAY", "Tannerre-en-Puisaye", "Tendu",
"TERNAS", "Tersannes", "TERSANNES", "Th\xe9norgues", "Theillay",
"Thenay", "They-sous-Vaud\xe9mont", "Thiat", "THIEFFRAIN",
"THIEULLOY-L'ABBAYE", "Thollet", "THOLLET", "Thonne-le-Thil",
"Thorey-Lyautey", "Thors", "Tign\xe9court", "Tilly-sur-Meuse",
"Touffreville-sur-Eu", "Tourlaville", "Tourmignies", "Tourni\xe8res",
"Tr\xe9mont-sur-Saulx", "Tr\xe9monzey", "Tramayes", "Tranzault",
"TRANZAULT", "TREVOL", "TUPIGNY", "Uchizy", "Urville", "Usson-du-Poitou",
"USSON-DU-POITOU", "VACQUERIE-LE-BOUCQ", "Vains", "VAINS",
"Val-de-Meuse", "Valfroicourt", "VANDENESSE-EN-AUXOIS", "Vandy",
"VARANGEVILLE", "VARENGUEBEC", "VARESNES", "VARZY", "VASLES",
"Vaud\xe9mont", "Vaudeville", "VAUDOY-EN-BRIE", "Vaulry",
"Vault-de-Lugny", "VAUX", "Vaux-sur-Aure", "Vaux-sur-Blaise",
"VAZEILLES-LIMANDRE", "Velles", "Vendoeuvres", "VENESMES",
"VENTES-SAINT-REMY", "VERNEUIL", "Verneuil-Moustiers", "VERNEUIL-MOUSTIERS",
"Vernon", "VERRIERES", "VERTEUIL-SUR-CHARENTE", "Veuvey-sur-Ouche",
"Veyrac", "VEZELISE", "VIC-DE-CHASSENAY", "VICQ-SUR-GARTEMPE",
"Vierzy", "Vieux", "Vigneulles", "Vigoux", "VIGOUX", "Vill\xe9cloye",
"VILLE-LE-MARCLET", "Villemotier", "VILLENTROIS", "VILLERS-AU-BOIS",
"Villers-Br\xfblin", "Villers-Outr\xe9aux", "VILLEY-SAINT-ETIENNE",
"Villiers-le-Sec", "VILLIEU-LOYES-MOLLON", "VIMENIL", "Vimoutiers",
"VINEUIL", "Vingt-Hanaps", "Viriat", "Vironchaux", "Vitrey",
"Voncq", "Waldhambach", "WANNEHAIN", "Wi\xe8ge-Faty", "WIEGE-FATY",
"Wignehies", "WILLERONCOURT", "Wimy", "Winnezeele", "Wissant",
"Woincourt"), class = "factor"), latitude = c(48.633333,
46.383333, 47.733333, 45.883333, 49.333333, 46.116667), longitude = c(4,
0.883333, 5.266667, 0.5, 0.35, 0.783333), start.date = structure(c(57L,
57L, 57L, 57L, 57L, 57L), .Label = c("01/02/2012", "01/03/2012",
"01/04/2012", "02/02/2012", "02/03/2012", "02/04/2012", "03/02/2012",
"03/03/2012", "03/04/2012", "04/03/2012", "04/04/2012", "05/02/2012",
"05/03/2012", "05/04/2012", "06/02/2012", "06/03/2012", "06/04/2012",
"07/02/2012", "07/03/2012", "07/04/2012", "08/02/2012", "08/03/2012",
"08/04/2012", "09/02/2012", "09/03/2012", "09/04/2012", "10/02/2012",
"10/03/2012", "10/04/2012", "11/02/2012", "11/03/2012", "11/04/2012",
"12/02/2012", "12/03/2012", "12/04/2012", "13/02/2012", "13/03/2012",
"13/04/2012", "14/02/2012", "14/03/2012", "14/04/2012", "15/02/2012",
"15/03/2012", "15/04/2012", "16/02/2012", "16/03/2012", "16/04/2012",
"17/02/2012", "17/03/2012", "17/04/2012", "18/02/2012", "18/03/2012",
"18/04/2012", "19/02/2012", "19/03/2012", "19/04/2012", "20/01/2012",
"20/02/2012", "20/03/2012", "20/04/2012", "21/01/2012", "21/02/2012",
"21/03/2012", "22/02/2012", "22/03/2012", "23/01/2012", "23/02/2012",
"23/03/2012", "23/04/2012", "24/01/2012", "24/02/2012", "24/03/2012",
"25/01/2012", "25/02/2012", "25/03/2012", "26/01/2012", "26/02/2012",
"26/03/2012", "27/02/2012", "27/03/2012", "28/02/2012", "28/03/2012",
"29/02/2012", "29/03/2012", "30/01/2012", "30/03/2012", "31/01/2012",
"31/03/2012"), class = "factor"), sheep.measuring.units = structure(c(1L,
1L, 1L, 1L, NA, 1L), .Label = "Animals", class = "factor"),
sheep.susceptible = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), sheep.cases = c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), sheep.deaths = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), sheep.destroyed = c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), sheep.slaughtered = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), cattle.measuring.units = structure(c(NA,
NA, NA, NA, 1L, NA), .Label = "Animals", class = "factor"),
cattle.susceptible = c(NA, NA, NA, NA, NA, NA), cattle.cases = c(NA,
NA, NA, NA, NA, NA), cattle.deaths = c(NA, NA, NA, NA, NA,
NA), cattle.destroyed = c(NA, NA, NA, NA, NA, NA), cattle.slaughtered = c(NA,
NA, NA, NA, NA, NA), goat.measuring.units = structure(c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), .Label = "Animals", class = "factor"), goat.susceptible = c(NA,
NA, NA, NA, NA, NA), goat.cases = c(NA, NA, NA, NA, NA, NA
), goat.deaths = c(NA, NA, NA, NA, NA, NA), goat.destroyed = c(NA,
NA, NA, NA, NA, NA), goat.slaughtered = c(NA, NA, NA, NA,
NA, NA), remark = structure(c(1L, 1L, 1L, 1L, 1L, 1L), .Label = c("Affected Population",
"Affected Population newborn lamb", "Affected Population newborn lambs"
), class = "factor"), code = structure(c(10L, 87L, 53L, 16L,
26L, 16L), .Label = c("01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "2A", "2B", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45",
"46", "47", "48", "49", "50", "51", "52", "53", "54", "55",
"56", "57", "58", "59", "60", "61", "62", "63", "64", "65",
"66", "67", "68", "69", "70", "71", "72", "73", "74", "75",
"76", "77", "78", "79", "80", "81", "82", "83", "84", "85",
"86", "87", "88", "89", "90", "91", "92", "93", "94", "95"
), class = "factor")), .Names = c("departement", "commune",
"unit.type", "location", "latitude", "longitude", "start.date",
"sheep.measuring.units", "sheep.susceptible", "sheep.cases",
"sheep.deaths", "sheep.destroyed", "sheep.slaughtered", "cattle.measuring.units",
"cattle.susceptible", "cattle.cases", "cattle.deaths", "cattle.destroyed",
"cattle.slaughtered", "goat.measuring.units", "goat.susceptible",
"goat.cases", "goat.deaths", "goat.destroyed", "goat.slaughtered",
"remark", "code"), row.names = c(NA, 6L), class = "data.frame")
expect_is(agg_summaries(dat, var = "latitude", group = "code", mean), "data.frame")
})
|
/tests/testthat/test-mapData.r
|
no_license
|
Hackout2/mapData
|
R
| false
| false
| 46,073
|
r
|
context("Test to make sure lat checks and long checks are ok")
test_that("lat and long checks work correctly", {
expect_false(test_lat(-1900))
expect_true(test_lat(-89))
expect_false(test_long(-1800))
expect_true(test_long(-89))
})
context("Testing the aggregate function")
test_that("aggregates work correctly", {
dat <- structure(list(departement = structure(c(5L, 47L, 18L, 9L, 14L,
9L), .Label = c("AIN", "AISNE", "ALLIER", "ARDENNES", "AUBE",
"BAS-RHIN", "C\xd4TE-D'OR", "CALVADOS", "CHARENTE", "CHER", "CREUSE",
"DEUX-S\xc8VRES", "DORDOGNE", "EURE", "EURE-ET-LOIR", "GIRONDE",
"HAUT-RHIN", "HAUTE-MARNE", "HAUTE-SA\xd4NE", "HAUTE-VIENNE",
"INDRE", "INDRE-ET-LOIRE", "JURA", "LOIR-ET-CHER", "LOIRET",
"MAINE-ET-LOIRE", "MANCHE", "MARNE", "MAYENNE", "MEURTHE-ET-MOSELLE",
"MEUSE", "MOSELLE", "NI\xc8VRE", "NORD", "OISE", "ORNE", "PAS-DE-CALAIS",
"PUY-DE-D\xd4ME", "PYR\xc9N\xc9ES-ATLANTIQUES", "RH\xd4NE", "SA\xd4NE-ET-LOIRE",
"SARTHE", "SAVOIE", "SEINE-ET-MARNE", "SEINE-MARITIME", "SOMME",
"VIENNE", "VOSGES", "YONNE"), class = "factor"), commune = structure(c(992L,
522L, 810L, 740L, 116L, 756L), .Label = c("\xc9pen\xe8de", "\xc9perrais",
"\xc9poisses", "\xc9riseul", "\xc9tagnac", "\xc9talleville",
"\xc9tr\xe9aupont", "\xc9treillers", "\xc9troeungt", "abilly",
"Abzac", "ABZAC", "ACY", "Adriers", "ADRIERS", "AFFLEVILLE",
"Agincourt", "Ailly-le-Haut-Clocher", "Aisey-et-Richecourt",
"Aixe-sur-Vienne", "Aizy Jouy", "Alincthun", "ALLICHAMPS", "ALLOUE",
"Allouville-Bellefosse", "Ambernac", "Andilly-en-Bassigny", "Andryes",
"Angely", "ANOST", "Anrosey", "Ansac-sur-Vienne", "ANSAC-SUR-VIENNE",
"ANSAUVILLERS", "Anv\xe9ville", "Archigny", "ARGILLIERES", "ARGY",
"ARMES", "Arnac-la-Poste", "Arrest", "Artannes-sur-Indre", "Asni\xe8res-sur-Blour",
"ASNIERES-SUR-BLOUR", "ASPACH", "Attin", "Atton", "Auberville-la-Renault",
"AUCHY-LEZ-ORCHIES", "Audincthun", "AUDINGHEN", "Audruicq", "AULT",
"Aunay-sur-Odon", "Authe", "Authieule", "Autigny-la-Tour", "Autrey",
"AUTREY", "AUXAIS", "Availles-Limouzine", "AVAILLES-LIMOUZINE",
"Avelin", "Avermes", "Avesnelles", "AVESNES-LE-SEC", "Avrecourt",
"AVROULT", "Awoingt", "AYDOILLES", "AZAT-LE-RIS", "Azay-le-Ferron",
"AZAY-LE-FERRON", "AZAY-LE-RIDEAU", "Azoudange", "AZY-LE-VIF",
"B\xe9court", "B\xe9nouville", "B\xe9thines", "Bacourt", "Bacqueville-en-Caux",
"Bagneux", "Bailleul", "BAINVILLE-SUR-MADON", "BALLAY", "Balledent",
"BALLEDENT", "Baons-le-Comte", "BARBAS", "Basse-sur-le-Rupt",
"Bauzemont", "BAVINCOURT", "Bay-sur Aube", "BAZELAT", "BEAULIEU",
"Beauquesne", "Beaurepaire", "Beaurepaire-en-Bresse", "Beauvilliers",
"BEGROLLES-EN-MAUGES", "Bellac", "Belrupt", "Belval-sous-Ch\xe2tillon",
"BERCK", "BEREZIAT", "Berneuil", "Bernouville", "BERTHEN", "BERUGES",
"BESSINES-SUR-GARTEMPE", "BETHINES", "Bettoncourt-le-Haut", "Beurizot",
"Beurville", "Beux", "Beuzeville", "Beuzeville-la-Grenier", "Bezaumont",
"Biarre", "Billy-sous-les-C\xf4tes", "BISLEE", "Bissey-sous-Cruchaud",
"Biville-la-Baignarde", "BIZIAT", "BLANCHEFOSSE-ET-BAY", "Blanzac",
"BLANZAC", "BLARINGHEM", "Blessonville", "Blicourt", "Blond",
"Bolbec", "Bolleville", "Bonneuil", "Bonny-sur-Loire", "Bossancourt",
"BOUESSE", "Bougey", "BOULAY-MOSELLE", "Bourbonne-les-Bains",
"Bouresse", "BOURESSE", "Bourg-Archambault", "BOURG-ARCHAMBAULT",
"Boz", "Br\xe9aut\xe9", "Br\xe9m\xe9nil", "Br\xe9moncourt", "Braux",
"BREVES", "Briantes", "Bricquebec", "BRICQUEBEC", "Brigueil-le-Chantre",
"BRIGUEIL-LE-CHANTRE", "Brigueuil", "BRIGUEUIL", "Brillac", "BRILLAC",
"BRIXEY-AUX-CHANOINES", "Brulange", "Brunehamel", "Brutelles",
"Bucamps", "Buffign\xe9court", "Buhl-Lorraine", "BURTHECOURT-AUX-CHENES",
"Bussi\xe8re-Poitevine", "BUSSIERE-BOFFY", "BUSSIERE-POITEVINE",
"Buxi\xe8res-d'Aillac", "BUXIERES-LES-MINES", "C\xe9lon", "CABANAC-ET-VILLAGRAINS",
"Cambremer", "Cannectancourt", "Canville-les-Deux-\xc9glises",
"Carignan", "CARQUEBUT", "CATIGNY", "Catillon-sur-Sambre", "Caunay",
"Cavron-Saint-Martin", "CERE-LA-RONDE", "CERENCES", "CERISY-LA-SALLE",
"Ch\xe2teau-Garnier", "ch\xe2teau-salins", "Ch\xe2teauponsac",
"Ch\xe2tenay-Vaudin", "Ch\xe2tenois", "Ch\xe9zeaux", "Ch\xe9zy-en-Orxois",
"Chablis", "CHABRIS", "Chaillac", "CHAILLAC", "CHAILLAC-SUR-VIENNE",
"Chailly-sur-Arman\xe7on", "CHALAIS", "Chalindrey", "CHALLERANGE",
"CHAMARANDES-CHOIGNES", "Chamboret", "Chambrey", "CHAMPAGNE-SAINT-HILAIRE",
"Champigny-sous-Varennes", "CHAMPS", "CHAMPSEVRAINE", "Chantelle",
"Chaouilley", "Charbuy", "CHARLEVILLE-SOUS-BOIS", "Charmes-la-Grande",
"CHARMES-LA-GRANDE", "charroux", "Chassey-l\xe8s-Montbozon",
"CHATEAU-CHINON(CAMPAGNE)", "CHATEAU-GARNIER", "CHATEAUPONSAC",
"Chaudenay", "CHAUMONT-LA-VILLE", "Chaumont-Porcien", "Chauvigny",
"CHAVEYRIAT", "Chazelet", "CHAZELLES", "CHEHERY", "Chemery",
"CHEMERY", "Chemin-d'Aisey", "CHENIERS", "Cherbourg", "CHERVES-CHATELARS",
"Chevannes", "CHEVERNY", "Chevillon", "CHEVROCHES", "Chicourt",
"CHIRAC", "Cieux", "Ciron", "Civrieux", "Cizay-la-Madeleine",
"Cl\xe9rey-sur-Br\xe9non", "Cl\xe9zentaine", "CLAIRFONTAINE",
"Clamecy", "CLAMECY", "CLERE-DU-BOIS", "Clinchamps-sur-Orne",
"CLION", "Coiffy-le-Haut", "COINGT", "Colombey-les-Belles", "COLOMBEY-LES-DEUX-EGLISES",
"COLONDANNES", "Combres-sous-les-C\xf4tes", "COMBREUX", "Cond\xe9-Northen",
"CONDE-NORTHEN", "CONFOLENS", "Confran\xe7on", "CONGE-SUR-ORNE",
"Conthil", "Corpoyer-la-Chapelle", "COSSAYE", "Cossesseville",
"Coulandon", "COULLEMONT", "Coulmier-le-Sec", "Coulonges", "COULONGES",
"Coulonges-sur-Sarthe", "Coupelle-Vieille", "COURCELLES-LES-SEMUR",
"COURGEON", "COURTESOULT-ET-GATEY", "Coussay-les-Bois", "COUVERTPUIS",
"Coyviller", "Cr\xe9quy", "CRECY-SUR-SERRE", "CREQUY", "CRIEL-SUR-MER",
"Crion", "CROISY", "CROMAC", "CRUGNY", "Crupilly", "CULAN", "Cussay",
"Cuves", "D\xe9deling", "Dammartin-sur-Meuse", "Darnac", "DARNAC",
"Delettes", "DELETTES", "Dercy", "Desvres", "DIEVAL", "Dinsac",
"DOMBROT-SUR-VAIR", "Domecy-sur-Cure", "Domgermain", "Domjulien",
"DOMMARTIN-DAMPIERRE", "Dompierre-les-\xc9glises", "Dompierre-sur-Authie",
"Domptail", "Donzy", "Doudeauville", "Douvrend", "DOVILLE", "Dracy",
"Dracy-Saint-Loup", "Drambon", "Droisy", "Droux", "Dunet", "ECUEILLE",
"ECUIRES", "ECULLEVILLE", "Emberm\xe9nil", "Empur\xe9", "Englesqueville-en-Auge",
"Envronville", "EPAIGNES", "epinac", "EPINAL", "Escles", "Escombres-et-le-Chesnois",
"Esley", "Esse", "ESSE", "Euilly-et-Lombut", "EXIDEUIL", "Eywiller",
"FAULQUEMONT", "Faulx", "FAULX", "Faveraye-Machelles", "FAYE-L'ABBESSE",
"FAYL-BILLOT", "FEYTIAT", "Fill\xe9", "FLAGEY", "Fleury-la-For\xeat",
"FLIGNY", "Flor\xe9mont", "Fluqui\xe8res", "FOAMEIX-ORNEL", "Fontaine-au-Bois",
"Fontaine-l\xe8s-Vervins", "Fontaine-le-Dun", "FONTAINE-LES-VERVINS",
"Fontaines", "Fontangy", "FONTENELLE", "formigny", "Fossieux",
"Foucart", "Foulcrey", "Fouligny", "Fourcigny", "FOURS", "Fr\xe9monville",
"Fr\xe9vent", "Fr\xf4lois", "Fraignot-et-Vesvrotte", "Framicourt",
"Fraquelfing", "Freb\xe9court", "Frenelle-la-Grande", "Freneuse-sur-Risle",
"Fresnes-en-Saulnois", "Fresnes-en-Tardenois", "FRESNOY-FOLNY",
"Fromy", "FRONTENARD", "G\xe9lucourt", "GAJOUBERT", "GARGILESSE-DAMPIERRE",
"Gemmelaincourt", "GEMONVILLE", "Genouillac", "Gerb\xe9viller",
"Gergny", "GERMINY", "Gevrolles", "Ghyvelde", "GIEVILLE", "ginai",
"Giraumont", "Givron", "Gizay", "Glanges", "GLOS-LA-FERRIERE",
"GODERVILLE", "GOLLEVILLE", "GONCOURT", "Gonneville", "GONNEVILLE-EN-AUGE",
"Gouberville", "Gouex", "GOUEX", "Gournay", "GOURNAY-EN-BRAY",
"Goviller", "Grand", "Grand Rullecourt", "GRAND-CAMP", "Grandcourt",
"Greuville", "GREZ", "Grez-en-Bou\xe8re", "Grivy-Loisy", "GROSSOUVRE",
"Grosville", "Gu\xe9bling", "GUILLON", "Guilly", "Guincourt",
"GUIPY", "H\xe9nam\xe9nil", "Haboudange", "Haillainville", "Hambye",
"Hammeville", "HANNACHES", "HARGICOURT", "Hary", "HAUT-LIEU",
"HAUTE-AMANCE", "Haute-Kontz", "HAUTEFONTAINE", "Hauteroche",
"HAUTTEVILLE-BOCAGE", "HAZEBROUCK", "Hennezel", "Hermanville",
"Heugleville-sur-Scie", "HEUGNES", "Honnechy", "Honskirch", "HORNAING",
"HORNOY-LE-BOURG", "Hou\xe9ville", "Houdelaincourt", "HOUECOURT",
"Hugleville-en-Caux", "Hurecourt", "Ibigny", "ILLKIRCH-GRAFFENSTADEN",
"Imbleville", "Incourt", "INCOURT", "Insming", "Is-en-Bassigny",
"ISENAY", "Jaulges", "Jeantes", "JEANTES", "JEU-LES-BOIS", "Jouac",
"Jouhet", "JOUHET", "JOURNET", "JOUSSE", "JUGY", "JUVIGNIES",
"KIRRWILLER-BOSSELSHAUSEN", "L'\xc9tang-Bertrand", "L'Isle-Jourdain",
"La Bazeuge", "LA BERTHENOUX", "LA BESACE", "La Blouti\xe8re",
"La Celle-Gu\xe9nand", "LA CELLE-SUR-LOIRE", "La Cerlangue",
"La Chapelle-Bertrand", "La Chapelle-Montreuil", "LA CHAPELLE-MONTREUIL",
"La Chapelle-Th\xe8cle", "LA COLOMBE", "LA COMTE", "LA COURBE",
"La Croix-sur-Gartempe", "LA CROIX-SUR-GARTEMPE", "LA FEREE",
"LA FERMETE", "La Ferri\xe8re-en-Parthenay", "La Fert\xe9-Loupi\xe8re",
"La Gaillarde", "La Godefroy", "La Haye-du-Puits", "La Loge",
"LA MANCELIERE", "La Mothe-Saint-H\xe9ray", "La P\xe9rouille",
"LA PERCHE", "LA PERNELLE", "LA PUYE", "La Roche-Chalais", "La Roche-en-Brenil",
"La Roche-Posay", "LA ROCHE-VANNEAU", "La Souterraine", "LA SUZE-SUR-SARTHE",
"La Trimouille", "LA TRIMOUILLE", "La Vineuse", "Lagarde", "Laitre-sous-Amance",
"Lamarche", "Landange", "LANDOUZY-LA-VILLE", "LANGRUNE-SUR-MER",
"LANTAGES", "LATHUS-SAINT-REMY", "Launstroff", "Lavall\xe9e",
"LAVAUSSEAU", "Le B\xe9ny-Bocage", "Le Blanc", "LE BLANC", "Le Bourg-d'Hem",
"LE CATEAU-CAMBRESIS", "LE CHATELET", "LE DORAT", "Le Doulieu",
"Le F\xeate", "LE LOUROUX", "LE MAY-SUR-EVRE", "LE MENIL-GUYON",
"Le Mesnil-Eudes", "Le Mesnil-Raoult", "LE MESNIL-ROGUES", "Le Mesnilbus",
"Le Nouvion-en-Thi\xe9rache", "LE PECHEREAU", "LE PIN", "LE PLESSIER-SUR-BULLES",
"Le Reculey", "LE SOUICH", "Le Torpt", "Le Val-Saint-P\xe8re",
"Le Vigeant", "LE VIGEANT", "Le Vr\xe9tot", "Leignes-sur-Fontaine",
"Leintrey", "Lengronne", "LENT", "Les Forges", "Les Grands Ch\xe9zeaux",
"LES LOGES", "Les Thons", "LES TROIS-PIERRES", "LESMENILS", "Lessac",
"Lesterps", "LEULINGHEM", "Levoncourt", "Levroux", "Liernais",
"LIESVILLE-SUR-DOUVE", "LIFFOL-LE-GRAND", "Liglet", "LIGLET",
"Lignac", "LIGNAC", "LIGNEREUIL", "LIGNIERES-CHATELAIN", "Lignol-le-Ch\xe2teau",
"LIMANTON", "LIMOGES", "Limonest", "Linselles", "Lironville",
"LIVERDUN", "Lochwiller", "Loison", "Long", "Longeville-en-Barrois",
"Longeville-sur-la-Laines", "Longpr\xe9-les-Corps-Saints", "Longvillers",
"LORMES", "Lottinghen", "Louvencourt", "Louvi\xe8res", "Lu\xe7ay-le-M\xe2le",
"LUANT", "LUCE-SOUS-BALLON", "Luchapt", "Lussac", "LUSSAC", "Lussac-les-\xc9glises",
"Lussac-les-Ch\xe2teaux", "LUSSAC-LES-EGLISES", "Luttange", "LUZERET",
"LYE", "M\xe9nil-en-Xaintois", "M\xe9nil-sur-Belvitte", "M\xe9obecq",
"M\xe9ry-la-Bataille", "M\xe9zi\xe8res-sur-Issoire", "Magnac-Laval",
"MAGNAC-LAVAL", "Magneville", "Magny", "Magny-le-Freule", "Mailhac-sur-Benaize",
"MAILHAC-SUR-BENAIZE", "MAILLET", "Mailly-sur-Seille", "MAISON-MAUGIS",
"MAISONCELLE-TUILERIE", "Maixe", "Maizi\xe8res", "Malaincourt-sur-Meuse",
"MALICORNAY", "Man\xe9houville", "MANDRES-EN-BARROIS", "Manerbe",
"Manglieu", "MANNEVILLE-LA-PIPARD", "Manoncourt-en-Vermois",
"Manoncourt-sur-Seille", "MARAINVILLER", "MARANGE-ZONDRANGE",
"Marault", "Marchais-en-Brie", "MARCHESEUIL", "Marcilly-en-Bassigny",
"MARCILLY-EN-BASSIGNY", "MARIGNY", "MARIGNY-BRIZAY", "Marigny-l'\xc9glise",
"Marnay", "Martigny", "Martincourt", "MARTIZAY", "Marville",
"Marzy", "MASSIGNAC", "Mattexey", "Maupr\xe9voir", "MAUPREVOIR",
"MAUX", "Mavilly-Mandelot", "Mazerolles", "Mazingarbe", "Meillant",
"MELLEROY", "MENETOU-RATEL", "MENEVILLERS", "MERLAUT", "Mers-sur-Indre",
"MERS-SUR-INDRE", "Mesnil-Domqueur", "Metz-Robert", "Meuvy",
"Mietesheim", "MILLERY", "MIRECOURT", "MISSE", "MONCEAU-SAINT-WAAST",
"MONCHIET", "Mons-Boubert", "MONS-EN-LAONNOIS", "Mont-Bonvillers",
"MONT-ET-MARRE", "MONT-LES-LAMARCHE", "Montauban-de-Picardie",
"MONTBOUY", "MONTCAVREL", "MONTCHEVRIER", "Montheries", "Montiers-sur-Saulx",
"MONTIERS-SUR-SAULX", "MONTIGNY-EN-CAMBRESIS", "Montigny-l\xe8s-Vaucouleurs",
"Montigny-Montfort", "Montlou\xe9", "Montmartin-en-Graignes",
"MONTMARTIN-EN-GRAIGNES", "Montmeillant", "MONTMEILLANT", "Montmorillon",
"Montr\xe9al", "Montreuil-sur-Lozon", "MONTROL-SENARD", "MONTROLLET",
"MORGNY-EN-THIERACHE", "MORLAC", "Morterolles-sur-Semme", "Morval",
"MORVILLE", "Morvilliers", "Mosnay", "Mouhers", "MOUHERS", "MOUHET",
"Moulines", "Moulismes", "MOULISMES", "MOUSSAC", "moussey", "Mousson",
"Mouterre-sur-Blourde", "MOUTERRE-SUR-BLOURDE", "Moutiers-Saint-Jean",
"Moyenvic", "Murs", "N\xe9rignac", "NANTIAT", "NANTILLOIS", "Narb\xe9fontaine",
"NEAUPHE-SUR-DIVE", "NEDONCHEL", "NERONDES", "Neufgrange", "NEUILLAY-LES-BOIS",
"NEUVILLE-LES-DAMES", "NEUVILLE-LEZ-BEAULIEU", "NEUVILLE-SUR-AUTHOU",
"Neuville-sur-Touques", "Neuvy-Saint-S\xe9pulchre", "NEUVY-SAINT-SEPULCHRE",
"NEVILLE-SUR-MER", "Nibas", "NIBAS", "NIBELLE", "Nielles-l\xe8s-Bl\xe9quin",
"Nieuil", "Niherne", "Nogent", "Noirterre", "Nomain", "Norroy",
"Notre-Dame-d'\xc9pine", "NOUAILLE-MAUPERTUIS", "Nouans-les-Fontaines",
"Nouic", "Nully", "Nuret-le-Ferron", "NURET-LE-FERRON", "Ocqueville",
"OCTEVILLE", "Ollainville", "Oradour-Fanais", "Oradour-Saint-Genest",
"ORADOUR-SAINT-GENEST", "ORDIARP", "Origny-en-Thi\xe9rache",
"Origny-Sainte-Beno\xeete", "ORMES", "ORMES-ET-VILLE", "Ormoy",
"Orsennes", "Orsinval", "ORVILLE", "Osmoy", "OULCHES", "Outines",
"Outrem\xe9court", "OZERAILLES", "PAGEAS", "PALLUAU-SUR-INDRE",
"PAREY-SAINT-CESAIRE", "Parfondeval", "PARNAC", "PARTHENAY",
"PAS-EN-ARTOIS", "Payroux", "PAYROUX", "Pel-et-Der", "Persac",
"PERSAC", "PETERSBACH", "Peyrat-de-Bellac", "PEYRILHAC", "Pindray",
"PINDRAY", "PISSY-POVILLE", "PLAISANCE", "Pleuville", "PLOMB",
"Pommer\xe9val", "POMMIERS", "PONT-FARCY", "PONT-SUR-SAMBRE",
"Pouillenay", "Pouligny-Notre-Dame", "Pouligny-Saint-Pierre",
"POULIGNY-SAINT-PIERRE", "Pourrain", "Poursac", "Pouru-aux-Bois",
"Pr\xe9cy-sous-Thil", "Pr\xe9cy-sur-Vrin", "Pr\xe9ny", "Pr\xe9tot-Sainte-Suzanne",
"Pr\xe9tot-Vicquemare", "Prangey", "PREMONT", "Pressac", "PRESSAC",
"Pressignac", "Prissac", "PRISSAC", "Proussy", "Provency", "PROVENCY",
"Pulligny", "Punerot", "Puttelange-l\xe8s-Thionville", "Qua\xebdypre",
"Queaux", "QUESTRECQUES", "QUIBOU", "Quoeux-Haut-Mainil", "R\xe9chicourt-la-Petite",
"R\xe9chicourt-le-Ch\xe2teau", "R\xe9moville", "RADINGHEM", "Radonvilliers",
"Raffetot", "Ran\xe7onni\xe8res", "Rancon", "RANCON", "Rangecourt",
"RAUVILLE-LA-BIGOT", "RECHICOURT-LE-CHATEAU", "REMILLY", "REMILLY-SUR-LOZON",
"Rennepont", "RENNEPONT", "Renneval", "Repel", "RESSON", "REUMONT",
"Ricarville", "RICHELING", "Rigny-la-Salle", "Rimsdorf", "Rocheville",
"ROMAGNE", "Romain-sur-Meuse", "Ronch\xe8res", "Rousies", "Roussac",
"ROUSSAC", "ROUSSINES", "Roussy-le-Village", "Rouvres-sous-Meilly",
"Rouvrois-sur-Meuse", "ROYERES", "Ruffec", "RUFFEC", "Rumegies",
"Rupt-devant-Saint-Mihiel", "S\xe9meries", "S\xe9nill\xe9", "Sacierges-Saint-Martin",
"Saint-\xc9pain", "Saint-Agnan", "SAINT-ALGIS", "Saint-Andr\xe9-de-Bohon",
"SAINT-ANDRE-LE-BOUCHOUX", "Saint-Ao\xfbt", "SAINT-AOUT", "SAINT-AVRE",
"Saint-Barbant", "SAINT-BARBANT", "Saint-Bonnet-de-Bellac", "SAINT-BONNET-DE-BELLAC",
"SAINT-BRICE", "SAINT-CHARTIER", "SAINT-CHRISTOPHE", "SAINT-CHRISTOPHE-EN-BAZELLE",
"Saint-Clair-sur-les-Monts", "Saint-Cyr-de-Salerne", "Saint-D\xe9sir\xe9",
"Saint-Denis-de l'H\xf4tel", "SAINT-DENIS-DE-JOUHET", "Saint-Denoeux",
"SAINT-EBREMOND-DE-BONFOSSE", "Saint-Eustache-la-For\xeat", "SAINT-FIRMIN-DES-BOIS",
"SAINT-FIRMIN-SUR-LOIRE", "Saint-G\xe2tien-des-Bois", "Saint-Georges",
"Saint-Georges-d'Elle", "Saint-Georges-les-Landes", "SAINT-GEORGES-LES-LANDES",
"SAINT-GERMAIN-DE-LIVET", "SAINT-GERMAIN-DES-PRES", "SAINT-GERMAIN-LES-SENAILLY",
"Saint-Germain-sous-Cailly", "Saint-Germain-sur-Bresle", "Saint-Hilaire-de-Court",
"SAINT-HILAIRE-EN-LIGNIERES", "SAINT-HILAIRE-EN-MORVAN", "Saint-Hilaire-La-Treille",
"SAINT-HILAIRE-LA-TREILLE", "Saint-Hilaire-sur-B\xe9naize", "SAINT-HILAIRE-SUR-BENAIZE",
"SAINT-HIPPOLYTE", "SAINT-JEAN-DE-BOEUF", "Saint-Jean-de-la-Neuville",
"Saint-Jean-le-Blanc", "SAINT-JOIRE", "Saint-Julien-sur-Sarthe",
"Saint-Juvin", "Saint-L\xe9ger-Magnazeix", "Saint-L\xe9omer",
"SAINT-LEGER-DU-BOIS", "SAINT-LEGER-MAGNAZEIX", "SAINT-LEGER-SOUS-CHOLET",
"SAINT-LEGER-SUR-DHEUNE", "SAINT-LEOMER", "SAINT-LEONARD", "Saint-Lupien",
"Saint-Martial-sur-Isop", "Saint-Martin-aux-Chartrains", "Saint-Martin-de-Lamps",
"Saint-Martin-en-Campagne", "Saint-Martin-l'Ars", "SAINT-MARTIN-L'ARS",
"Saint-Martin-l'Hortier", "Saint-Martin-sur-Ouanne", "Saint-Mary",
"SAINT-MAUR", "Saint-Maurice-des-Lions", "SAINT-MAURICE-DES-LIONS",
"Saint-Maurice-la-Clou\xe8re", "SAINT-MAURICE-LA-CLOUERE", "Saint-Maurice-la-Souterraine",
"SAINT-MAURICE-LA-SOUTERRAINE", "SAINT-MAURICE-SUR-AVEYRON",
"Saint-Menoux", "SAINT-MICHEL-EN-BRENNE", "Saint-Ouen-l\xe8s-Parey",
"Saint-Ouen-sur-Gartempe", "SAINT-OUEN-SUR-GARTEMPE", "SAINT-OUEN-SUR-LOIRE",
"Saint-P\xe8re", "Saint-Pierre-de-Fursac", "Saint-Pierre-de-Maill\xe9",
"SAINT-PIERRE-DE-MAILLE", "Saint-Pierre-de-Varennes", "Saint-Pierre-le-Vieux",
"Saint-Pierre-le-Viger", "Saint-Pierremont", "Saint-Priest-Ligoure",
"SAINT-PRIEST-TAURION", "Saint-Quentin-des-Pr\xe9s", "Saint-S\xe9condin",
"Saint-Saturnin-du-Bois", "Saint-Saulge", "SAINT-SAUVEUR-LE-VICOMTE",
"Saint-Sornin-la-Marche", "Saint-Sornin-Leulac", "Saint-Sulpice",
"Saint-Sulpice-le-Dunois", "Saint-Sulpice-les-Feuilles", "SAINT-SYMPHORIEN",
"Saint-Symphorien-sur-Couze", "SAINT-THURIEN", "Saint-Vaast-d'\xc9quiqueville",
"Saint-Val\xe9ry-sur-Somme", "Saint-Victor-sur-Ouche", "Saint-Vincent-Cramesnil",
"Sainte-Colombe", "SAINTE-COLOMBE-SUR-LOING", "SAINTE-CROIX",
"Sainte-Croix-aux-Mines", "Sainte-H\xe9l\xe8ne-Bondeville", "Sainte-Maure-de-Touraine",
"SAINTE-MAURE-DE-TOURAINE", "Sainte-Menehould", "Sainte-Radegonde",
"Sainte-S\xe9v\xe8re-sur-Indre", "Saints", "SAIZY", "Salon",
"Sandaucourt", "SANDAUCOURT", "SANGATTE", "Sarcey", "SARDY-LES-EPIRY",
"SARNOIS", "Sarralbe", "Saulg\xe9", "SAULGOND", "Saulxures",
"SAULXURES-LES-VANNES", "Sauzelles", "Savigny-en-Terre-Plaine",
"Savilly", "Schillersdorf", "Schleithal", "Schopperten", "SEMIDE",
"semoutiers", "Senaide", "Senonges", "Seraumont", "Sermaize-les-Bains",
"Serqueux", "SERVINS", "Sexey-aux-Forges", "Signy-le-Petit",
"Sincey", "Sivry", "Sogny-en-l'Angle", "SOIZE", "Sologny", "SOLOGNY",
"SOMMAING", "Sommeval", "Sorcy-Saint-Martin", "Suisse", "SURIS",
"Suzanne", "TACONNAY", "TANNAY", "Tannerre-en-Puisaye", "Tendu",
"TERNAS", "Tersannes", "TERSANNES", "Th\xe9norgues", "Theillay",
"Thenay", "They-sous-Vaud\xe9mont", "Thiat", "THIEFFRAIN", "THIEULLOY-L'ABBAYE",
"Thollet", "THOLLET", "Thonne-le-Thil", "Thorey-Lyautey", "Thors",
"Tign\xe9court", "Tilly-sur-Meuse", "Touffreville-sur-Eu", "Tourlaville",
"Tourmignies", "Tourni\xe8res", "Tr\xe9mont-sur-Saulx", "Tr\xe9monzey",
"Tramayes", "Tranzault", "TRANZAULT", "TREVOL", "TUPIGNY", "Uchizy",
"Urville", "Usson-du-Poitou", "USSON-DU-POITOU", "VACQUERIE-LE-BOUCQ",
"Vains", "VAINS", "Val-de-Meuse", "Valfroicourt", "VANDENESSE-EN-AUXOIS",
"Vandy", "VARANGEVILLE", "VARENGUEBEC", "VARESNES", "VARZY",
"VASLES", "Vaud\xe9mont", "Vaudeville", "VAUDOY-EN-BRIE", "Vaulry",
"Vault-de-Lugny", "VAUX", "Vaux-sur-Aure", "Vaux-sur-Blaise",
"VAZEILLES-LIMANDRE", "Velles", "Vendoeuvres", "VENESMES", "VENTES-SAINT-REMY",
"VERNEUIL", "Verneuil-Moustiers", "VERNEUIL-MOUSTIERS", "Vernon",
"VERRIERES", "VERTEUIL-SUR-CHARENTE", "Veuvey-sur-Ouche", "Veyrac",
"VEZELISE", "VIC-DE-CHASSENAY", "VICQ-SUR-GARTEMPE", "Vierzy",
"Vieux", "Vigneulles", "Vigoux", "VIGOUX", "Vill\xe9cloye", "VILLE-LE-MARCLET",
"Villemotier", "VILLENTROIS", "VILLERS-AU-BOIS", "Villers-Br\xfblin",
"Villers-Outr\xe9aux", "VILLEY-SAINT-ETIENNE", "Villiers-le-Sec",
"VILLIEU-LOYES-MOLLON", "VIMENIL", "Vimoutiers", "VINEUIL", "Vingt-Hanaps",
"Viriat", "Vironchaux", "Vitrey", "Voncq", "Waldhambach", "WANNEHAIN",
"Wi\xe8ge-Faty", "WIEGE-FATY", "Wignehies", "WILLERONCOURT",
"Wimy", "Winnezeele", "Wissant", "Woincourt"), class = "factor"),
unit.type = structure(c(1L, 1L, 1L, 1L, 1L, 1L), .Label = "Farm", class = "factor"),
location = structure(c(1036L, 548L, 846L, 775L, 122L, 791L
), .Label = c("\xc9pen\xe8de", "\xc9perrais", "\xc9poisses",
"\xc9riseul", "\xc9tagnac", "\xc9talleville", "\xc9tr\xe9aupont",
"\xc9treillers", "\xc9troeungt", "abilly", "Abzac", "ABZAC",
"Abzac n\xb01", "Abzac n\xb02", "ACY", "Adriers", "ADRIERS",
"Adriers n\xb01", "Adriers n\xb02", "AFFLEVILLE", "Agincourt",
"Ailly-le-Haut-Clocher", "Aisey-et-Richecourt", "Aixe-sur-Vienne",
"Aizy Jouy", "Alincthun", "ALLICHAMPS", "ALLOUE", "Allouville-Bellefosse",
"Ambernac", "Andilly-en-Bassigny", "Andryes", "Angely", "ANOST",
"Anrosey", "Ansac-sur-Vienne", "ANSAC-SUR-VIENNE", "ANSAUVILLERS",
"Anv\xe9ville", "Archigny", "ARGILLIERES", "ARGY", "ARMES",
"Arnac-la-Poste", "Arnac-la-Poste n\xb01", "Arnac-la-Poste n\xb02",
"Arrest", "Artannes-sur-Indre", "Asni\xe8res-sur-Blour",
"ASNIERES-SUR-BLOUR", "ASPACH", "Attin", "Atton", "Auberville-la-Renault",
"AUCHY-LEZ-ORCHIES", "Audincthun", "AUDINGHEN", "Audruicq",
"AULT", "Aunay-sur-Odon", "Authe", "Authieule", "Autigny-la-Tour",
"Autrey", "AUTREY", "AUXAIS", "Availles-Limouzine", "AVAILLES-LIMOUZINE",
"Avelin", "Avermes", "Avesnelles", "AVESNES-LE-SEC", "Avrecourt",
"AVROULT", "Awoingt", "AYDOILLES", "AZAT-LE-RIS", "Azay-le-Ferron",
"AZAY-LE-FERRON", "AZAY-LE-RIDEAU", "Azoudange", "AZY-LE-VIF",
"B\xe9court", "B\xe9nouville", "B\xe9thines", "Bacourt",
"Bacqueville-en-Caux", "Bagneux", "Bailleul", "BAINVILLE-SUR-MADON",
"BALLAY", "Balledent", "BALLEDENT", "Baons-le-Comte", "BARBAS",
"Basse-sur-le-Rupt", "Bauzemont", "BAVINCOURT", "Bay-sur Aube",
"BAZELAT", "BEAULIEU", "Beauquesne", "Beaurepaire", "Beaurepaire-en-Bresse",
"Beauvilliers", "BEGROLLES-EN-MAUGES", "Bellac", "Belrupt",
"Belval-sous-Ch\xe2tillon", "BERCK", "BEREZIAT", "Berneuil",
"Bernouville", "BERTHEN", "BERUGES", "BESSINES-SUR-GARTEMPE",
"BETHINES", "Bettoncourt-le-Haut", "Beurizot", "Beurville",
"Beux", "Beuzeville", "Beuzeville-la-Grenier", "Bezaumont",
"Biarre", "Billy-sous-les-C\xf4tes", "BISLEE", "Bissey-sous-Cruchaud",
"Biville-la-Baignarde", "BIZIAT", "BLANCHEFOSSE-ET-BAY",
"Blanzac", "BLANZAC", "BLARINGHEM", "Blessonville", "Blicourt",
"Blond", "Bolbec", "Bolleville", "Bonneuil", "Bonneuil n\xb01",
"Bonneuil n\xb02", "Bonny-sur-Loire", "Bossancourt", "BOUESSE",
"Bougey", "BOULAY-MOSELLE", "Bourbonne-les-Bains", "Bouresse",
"BOURESSE", "Bourg-Archambault", "BOURG-ARCHAMBAULT", "Boz",
"Br\xe9aut\xe9", "Br\xe9m\xe9nil", "Br\xe9moncourt", "Braux",
"BREVES", "Briantes", "Bricquebec", "BRICQUEBEC", "Brigueil-le-Chantre",
"BRIGUEIL-LE-CHANTRE", "Brigueil-le-Chantre n\xb01", "Brigueil-le-Chantre n\xb02",
"Brigueuil", "BRIGUEUIL", "Brillac", "BRILLAC", "BRIXEY-AUX-CHANOINES",
"Brulange", "Brunehamel", "Brutelles", "Bucamps", "Buffign\xe9court",
"Buhl-Lorraine", "BURTHECOURT-AUX-CHENES", "Bussi\xe8re-Poitevine",
"BUSSIERE-BOFFY", "BUSSIERE-POITEVINE", "Buxi\xe8res-d'Aillac",
"BUXIERES-LES-MINES", "C\xe9lon", "CABANAC-ET-VILLAGRAINS",
"Cambremer", "Cannectancourt", "Canville-les-Deux-\xc9glises",
"Carignan", "Carignan bis", "CARQUEBUT", "CATIGNY", "Catillon-sur-Sambre",
"Caunay", "Cavron-Saint-Martin", "CERE-LA-RONDE", "CERENCES",
"CERISY-LA-SALLE", "Ch\xe2teau-Garnier", "Ch\xe2teau-Garnier n\xb01",
"Ch\xe2teau-Garnier n\xb02", "ch\xe2teau-salins", "Ch\xe2teauponsac",
"Ch\xe2tenay-Vaudin", "Ch\xe2tenay-Vaudin n\xb01", "Ch\xe2tenois",
"Ch\xe9zeaux", "Ch\xe9zy-en-Orxois", "Chablis", "CHABRIS",
"Chaillac", "CHAILLAC", "Chaillac n\xb01", "Chaillac n\xb02",
"Chaillac n\xb03", "CHAILLAC-SUR-VIENNE", "Chailly-sur-Arman\xe7on",
"CHALAIS", "Chalindrey", "CHALLERANGE", "CHAMARANDES-CHOIGNES",
"Chamboret", "Chambrey", "CHAMPAGNE-SAINT-HILAIRE", "Champigny-sous-Varennes",
"CHAMPS", "CHAMPSEVRAINE", "Chantelle", "Chaouilley", "Charbuy",
"CHARLEVILLE-SOUS-BOIS", "Charmes-la-Grande", "CHARMES-LA-GRANDE",
"charroux", "Chassey-l\xe8s-Montbozon", "CHATEAU-CHINON(CAMPAGNE)",
"CHATEAU-GARNIER", "CHATEAUPONSAC", "Chaudenay", "CHAUMONT-LA-VILLE",
"Chaumont-Porcien", "Chauvigny", "CHAVEYRIAT", "Chazelet",
"CHAZELLES", "CHEHERY", "Chemery", "CHEMERY", "Chemin-d'Aisey",
"CHENIERS", "Cherbourg", "CHERVES-CHATELARS", "Chevannes",
"CHEVERNY", "Chevillon", "CHEVROCHES", "Chicourt", "CHIRAC",
"Cieux", "Ciron", "Civrieux", "Cizay-la-Madeleine", "Cl\xe9rey-sur-Br\xe9non",
"Cl\xe9zentaine", "CLAIRFONTAINE", "Clamecy", "CLAMECY",
"CLERE-DU-BOIS", "Clinchamps-sur-Orne", "CLION", "Coiffy-le-Haut",
"COINGT", "Colombey-les-Belles", "COLOMBEY-LES-DEUX-EGLISES",
"COLONDANNES", "Combres-sous-les-C\xf4tes", "COMBREUX", "Cond\xe9-Northen",
"CONDE-NORTHEN", "CONFOLENS", "Confran\xe7on", "CONGE-SUR-ORNE",
"Conthil", "Corpoyer-la-Chapelle", "COSSAYE", "Cossesseville",
"Coulandon", "COULLEMONT", "Coulmier-le-Sec", "Coulonges",
"COULONGES", "COULONGES n\xb01", "COULONGES n\xb02", "Coulonges-sur-Sarthe",
"Coupelle-Vieille", "COURCELLES-LES-SEMUR", "COURGEON", "COURTESOULT-ET-GATEY",
"Coussay-les-Bois", "COUVERTPUIS", "Coyviller", "Cr\xe9quy",
"CRECY-SUR-SERRE", "CREQUY", "CRIEL-SUR-MER", "Crion", "CROISY",
"CROMAC", "CRUGNY", "Crupilly", "CULAN", "Cussay", "Cuves",
"D\xe9deling", "Dammartin-sur-Meuse", "Darnac", "DARNAC",
"Delettes", "DELETTES", "Dercy", "Desvres", "DIEVAL", "Dinsac",
"DOMBROT-SUR-VAIR", "Domecy-sur-Cure", "Domgermain", "Domjulien",
"DOMMARTIN-DAMPIERRE", "Dompierre-les-\xc9glises", "Dompierre-sur-Authie",
"Domptail", "Donzy", "Doudeauville", "Douvrend", "DOVILLE",
"Dracy", "Dracy-Saint-Loup", "Drambon", "Droisy", "Droux",
"Dunet", "ECUEILLE", "ECUIRES", "ECULLEVILLE", "Emberm\xe9nil",
"Empur\xe9", "Englesqueville-en-Auge", "Envronville", "EPAIGNES",
"epinac", "EPINAL", "Escles", "Escombres-et-le-Chesnois",
"Esley", "Esse", "ESSE", "Esse n\xb01", "Esse n\xb02", "Euilly-et-Lombut n\xb01",
"Euilly-et-Lombut n\xb02", "EXIDEUIL", "Eywiller", "FAULQUEMONT",
"Faulx", "FAULX", "Faveraye-Machelles", "FAYE-L'ABBESSE",
"FAYL-BILLOT", "FEYTIAT", "Fill\xe9", "FLAGEY", "Fleury-la-For\xeat",
"FLIGNY", "Flor\xe9mont", "Fluqui\xe8res", "FOAMEIX-ORNEL",
"Fontaine-au-Bois", "Fontaine-l\xe8s-Vervins", "Fontaine-le-Dun",
"FONTAINE-LES-VERVINS", "Fontaines", "Fontangy", "FONTENELLE",
"formigny", "Fossieux", "Foucart", "Foulcrey", "Fouligny",
"Fourcigny", "FOURS", "Fr\xe9monville", "Fr\xe9vent", "Fr\xf4lois",
"Fraignot-et-Vesvrotte", "Framicourt", "Fraquelfing", "Freb\xe9court",
"Frenelle-la-Grande", "Freneuse-sur-Risle", "Fresnes-en-Saulnois",
"Fresnes-en-Tardenois", "FRESNOY-FOLNY", "Fromy", "FRONTENARD",
"G\xe9lucourt", "GAJOUBERT", "GARGILESSE-DAMPIERRE", "Gemmelaincourt",
"GEMONVILLE", "Genouillac", "Gerb\xe9viller", "Gergny", "GERMINY",
"Gevrolles", "Ghyvelde", "GIEVILLE", "ginai", "Giraumont",
"Givron n\xb01", "Givron n\xb02", "Gizay", "Glanges", "GLOS-LA-FERRIERE",
"GODERVILLE", "GOLLEVILLE", "GONCOURT", "Gonneville", "GONNEVILLE-EN-AUGE",
"Gouberville", "Gouex", "GOUEX", "Gournay", "GOURNAY-EN-BRAY",
"Goviller", "Grand", "Grand Rullecourt", "GRAND-CAMP", "Grandcourt",
"Greuville", "GREZ", "Grez-en-Bou\xe8re", "Grivy-Loisy",
"GROSSOUVRE", "Grosville", "Gu\xe9bling", "GUILLON", "Guilly",
"Guincourt", "GUIPY", "H\xe9nam\xe9nil", "Haboudange", "Haillainville",
"Hambye", "Hammeville", "HANNACHES", "HARGICOURT", "Hary",
"HAUT-LIEU", "HAUTE-AMANCE", "Haute-Kontz", "HAUTEFONTAINE",
"Hauteroche", "HAUTTEVILLE-BOCAGE", "HAZEBROUCK", "Hennezel",
"Hermanville", "Heugleville-sur-Scie", "HEUGNES", "Honnechy",
"Honskirch", "HORNAING", "HORNOY-LE-BOURG", "Hou\xe9ville",
"Houdelaincourt", "HOUECOURT", "Hugleville-en-Caux", "Hurecourt",
"Ibigny", "ILLKIRCH-GRAFFENSTADEN", "Imbleville", "Incourt",
"INCOURT", "Insming", "Is-en-Bassigny", "ISENAY", "Jaulges",
"Jeantes", "JEANTES", "JEU-LES-BOIS", "Jouac", "Jouhet",
"JOUHET", "JOURNET", "JOUSSE", "JUGY", "JUVIGNIES", "KIRRWILLER-BOSSELSHAUSEN",
"L'\xc9tang-Bertrand", "L'Isle-Jourdain n\xb01", "L'Isle-Jourdain n\xb02",
"La Bazeuge", "LA BERTHENOUX", "LA BESACE", "La Blouti\xe8re",
"La Celle-Gu\xe9nand", "LA CELLE-SUR-LOIRE", "La Cerlangue",
"La Chapelle-Bertrand", "La Chapelle-Montreuil", "LA CHAPELLE-MONTREUIL",
"La Chapelle-Th\xe8cle", "LA COLOMBE", "LA COMTE", "LA COURBE",
"La Croix-sur-Gartempe", "LA CROIX-SUR-GARTEMPE", "LA FEREE",
"LA FERMETE", "La Ferri\xe8re-en-Parthenay", "La Fert\xe9-Loupi\xe8re",
"La Gaillarde", "La Godefroy", "La Haye-du-Puits", "La Loge",
"LA MANCELIERE", "La Mothe-Saint-H\xe9ray", "La P\xe9rouille",
"LA PERCHE", "LA PERNELLE", "LA PUYE", "La Roche-Chalais",
"La Roche-en-Brenil", "La Roche-Posay", "LA ROCHE-VANNEAU",
"La Souterraine", "LA SUZE-SUR-SARTHE", "La Trimouille",
"LA TRIMOUILLE", "La Trimouille n\xb01", "La Trimouille n\xb02",
"La Vineuse", "Lagarde", "Laitre-sous-Amance", "Lamarche",
"Landange", "LANDOUZY-LA-VILLE", "LANGRUNE-SUR-MER", "LANTAGES",
"LATHUS-SAINT-REMY", "LATHUS-SAINT-REMY n\xb01", "LATHUS-SAINT-REMY n\xb02",
"Launstroff", "Lavall\xe9e", "LAVAUSSEAU", "Le B\xe9ny-Bocage",
"Le Blanc", "LE BLANC", "Le Bourg-d'Hem", "LE CATEAU-CAMBRESIS",
"LE CHATELET", "LE DORAT", "Le Doulieu", "Le F\xeate", "LE LOUROUX",
"LE MAY-SUR-EVRE", "LE MENIL-GUYON", "Le Mesnil-Eudes", "Le Mesnil-Raoult",
"LE MESNIL-ROGUES", "Le Mesnilbus", "Le Nouvion-en-Thi\xe9rache",
"LE PECHEREAU", "LE PIN", "LE PLESSIER-SUR-BULLES", "Le Reculey",
"LE SOUICH", "Le Torpt", "Le Val-Saint-P\xe8re", "Le Vigeant",
"LE VIGEANT", "Le Vigeant n\xb01", "Le Vigeant n\xb02", "Le Vr\xe9tot",
"Leignes-sur-Fontaine", "Leintrey", "Lengronne", "LENT",
"Les Forges", "Les Grands Ch\xe9zeaux", "LES LOGES", "Les Thons",
"LES TROIS-PIERRES", "LESMENILS", "Lessac", "Lesterps", "LEULINGHEM",
"Levoncourt n\xb01", "Levoncourt n\xb02", "Levroux", "Liernais",
"LIESVILLE-SUR-DOUVE", "LIFFOL-LE-GRAND", "Liglet", "LIGLET",
"Lignac", "LIGNAC", "Lignac n\xb01", "Lignac n\xb02", "LIGNEREUIL",
"LIGNIERES-CHATELAIN", "Lignol-le-Ch\xe2teau", "LIMANTON",
"LIMOGES", "Limonest", "Linselles", "Lironville", "LIVERDUN",
"Lochwiller", "Loison", "Long", "Longeville-en-Barrois",
"Longeville-sur-la-Laines", "Longpr\xe9-les-Corps-Saints",
"Longvillers", "LORMES", "Lottinghen", "Louvencourt", "Louvi\xe8res",
"Lu\xe7ay-le-M\xe2le", "LUANT", "LUCE-SOUS-BALLON", "Luchapt",
"Lussac", "LUSSAC", "Lussac-les-\xc9glises", "Lussac-les-Ch\xe2teaux",
"LUSSAC-LES-EGLISES", "Luttange", "LUZERET", "LYE", "M\xe9nil-en-Xaintois",
"M\xe9nil-sur-Belvitte", "M\xe9obecq", "M\xe9ry-la-Bataille",
"M\xe9zi\xe8res-sur-Issoire", "Magnac-Laval", "MAGNAC-LAVAL",
"Magneville", "Magny", "Magny-le-Freule", "Mailhac-sur-Benaize",
"MAILHAC-SUR-BENAIZE", "MAILLET", "Mailly-sur-Seille", "MAISON-MAUGIS",
"MAISONCELLE-TUILERIE", "Maixe", "Maizi\xe8res", "Malaincourt-sur-Meuse",
"MALICORNAY", "Man\xe9houville", "MANDRES-EN-BARROIS", "Manerbe",
"Manglieu", "MANNEVILLE-LA-PIPARD", "Manoncourt-en-Vermois",
"Manoncourt-sur-Seille", "MARAINVILLER", "MARANGE-ZONDRANGE",
"Marault", "Marchais-en-Brie", "MARCHESEUIL", "Marcilly-en-Bassigny",
"MARCILLY-EN-BASSIGNY", "MARIGNY", "MARIGNY-BRIZAY", "Marigny-l'\xc9glise",
"Marnay", "Martigny", "Martincourt", "MARTIZAY", "Marville",
"Marzy", "MASSIGNAC", "Mattexey", "Maupr\xe9voir", "MAUPREVOIR",
"MAUX", "Mavilly-Mandelot", "Mazerolles", "Mazingarbe", "Meillant",
"MELLEROY", "MENETOU-RATEL", "MENEVILLERS", "MERLAUT", "Mers-sur-Indre",
"MERS-SUR-INDRE", "Mesnil-Domqueur", "Metz-Robert", "Meuvy",
"Mietesheim", "MILLERY", "MIRECOURT", "MISSE", "MONCEAU-SAINT-WAAST",
"MONCHIET", "Mons-Boubert", "MONS-EN-LAONNOIS", "Mont-Bonvillers",
"MONT-ET-MARRE", "MONT-LES-LAMARCHE", "Montauban-de-Picardie",
"MONTBOUY", "MONTCAVREL", "MONTCHEVRIER", "Montheries", "Montiers-sur-Saulx",
"MONTIERS-SUR-SAULX", "MONTIGNY-EN-CAMBRESIS", "Montigny-l\xe8s-Vaucouleurs",
"Montigny-Montfort", "Montlou\xe9", "Montmartin-en-Graignes",
"MONTMARTIN-EN-GRAIGNES", "Montmeillant", "MONTMEILLANT",
"Montmorillon", "Montr\xe9al", "Montreuil-sur-Lozon", "MONTROL-SENARD",
"MONTROLLET", "MORGNY-EN-THIERACHE", "MORLAC", "Morterolles-sur-Semme",
"Morval", "MORVILLE", "Morvilliers", "Mosnay", "Mouhers",
"MOUHERS", "MOUHET", "Moulines", "Moulismes", "MOULISMES",
"Moulismes n\xb01", "Moulismes n\xb02", "MOUSSAC", "moussey",
"Mousson", "Mouterre-sur-Blourde", "MOUTERRE-SUR-BLOURDE",
"Moutiers-Saint-Jean", "Moyenvic", "Murs", "N\xe9rignac",
"NANTIAT", "NANTILLOIS", "Narb\xe9fontaine", "NEAUPHE-SUR-DIVE",
"NEDONCHEL", "NERONDES", "Neufgrange", "NEUILLAY-LES-BOIS",
"NEUVILLE-LES-DAMES", "NEUVILLE-LEZ-BEAULIEU", "NEUVILLE-SUR-AUTHOU",
"Neuville-sur-Touques", "Neuvy-Saint-S\xe9pulchre", "NEUVY-SAINT-SEPULCHRE",
"NEVILLE-SUR-MER", "Nibas", "NIBAS", "NIBELLE", "Nielles-l\xe8s-Bl\xe9quin",
"Nieuil", "Niherne", "Nogent", "Noirterre", "Nomain", "Norroy",
"Notre-Dame-d'\xc9pine", "NOUAILLE-MAUPERTUIS", "Nouans-les-Fontaines",
"Nouic", "Nully", "Nuret-le-Ferron", "NURET-LE-FERRON", "Ocqueville",
"OCTEVILLE", "Ollainville", "Oradour-Fanais", "Oradour-Saint-Genest",
"ORADOUR-SAINT-GENEST", "ORDIARP", "Origny-en-Thi\xe9rache",
"Origny-Sainte-Beno\xeete", "ORMES", "ORMES-ET-VILLE", "Ormoy",
"Orsennes", "Orsinval", "ORVILLE", "Osmoy", "OULCHES", "Outines",
"Outrem\xe9court", "OZERAILLES", "PAGEAS", "PALLUAU-SUR-INDRE",
"PAREY-SAINT-CESAIRE", "Parfondeval", "PARNAC", "PARTHENAY",
"PAS-EN-ARTOIS", "Payroux", "PAYROUX", "Pel-et-Der", "Persac",
"PERSAC", "PETERSBACH", "Peyrat-de-Bellac", "PEYRILHAC",
"PINDRAY", "Pindray n\xb01", "Pindray n\xb02", "PISSY-POVILLE",
"PLAISANCE", "Pleuville", "PLOMB", "Pommer\xe9val", "POMMIERS",
"PONT-FARCY", "PONT-SUR-SAMBRE", "Pouillenay", "Pouligny-Notre-Dame",
"Pouligny-Saint-Pierre", "POULIGNY-SAINT-PIERRE", "Pourrain",
"Poursac", "Pouru-aux-Bois", "Pr\xe9cy-sous-Thil", "Pr\xe9cy-sur-Vrin",
"Pr\xe9ny", "Pr\xe9tot-Sainte-Suzanne", "Pr\xe9tot-Vicquemare",
"Prangey", "PREMONT", "Pressac", "PRESSAC", "Pressignac",
"Prissac", "PRISSAC", "Proussy", "Provency", "PROVENCY",
"Pulligny", "Punerot", "Puttelange-l\xe8s-Thionville", "Qua\xebdypre",
"Queaux", "QUESTRECQUES", "QUIBOU", "Quoeux-Haut-Mainil",
"R\xe9chicourt-la-Petite", "R\xe9chicourt-le-Ch\xe2teau",
"R\xe9moville", "RADINGHEM", "Radonvilliers", "Raffetot",
"Ran\xe7onni\xe8res", "Rancon", "RANCON", "Rangecourt", "RAUVILLE-LA-BIGOT",
"RECHICOURT-LE-CHATEAU", "REMILLY", "REMILLY-SUR-LOZON",
"Rennepont", "RENNEPONT", "Renneval", "Repel", "RESSON",
"REUMONT", "Ricarville", "RICHELING", "Rigny-la-Salle", "Rimsdorf",
"Rocheville", "ROMAGNE", "Romain-sur-Meuse", "Ronch\xe8res",
"Rousies", "Roussac", "ROUSSAC", "ROUSSINES", "Roussy-le-Village",
"Rouvres-sous-Meilly", "Rouvrois-sur-Meuse", "ROYERES", "RUFFEC",
"Ruffec n\xb01", "Ruffec n\xb02", "Rumegies", "Rupt-devant-Saint-Mihiel",
"S\xe9meries", "S\xe9nill\xe9", "Sacierges-Saint-Martin",
"Saint-\xc9pain", "Saint-Agnan", "SAINT-ALGIS", "Saint-Andr\xe9-de-Bohon",
"SAINT-ANDRE-LE-BOUCHOUX", "Saint-Ao\xfbt", "SAINT-AOUT",
"SAINT-AVRE", "Saint-Barbant", "SAINT-BARBANT", "Saint-Bonnet-de-Bellac",
"SAINT-BONNET-DE-BELLAC", "Saint-Bonnet-de-Bellac n\xb01",
"Saint-Bonnet-de-Bellac n\xb02", "SAINT-BRICE", "SAINT-CHARTIER n\xb01",
"SAINT-CHARTIER n\xb02", "SAINT-CHRISTOPHE", "SAINT-CHRISTOPHE-EN-BAZELLE",
"Saint-Clair-sur-les-Monts", "Saint-Cyr-de-Salerne", "Saint-D\xe9sir\xe9",
"Saint-Denis-de l'H\xf4tel", "SAINT-DENIS-DE-JOUHET", "Saint-Denoeux",
"SAINT-EBREMOND-DE-BONFOSSE", "Saint-Eustache-la-For\xeat",
"SAINT-FIRMIN-DES-BOIS", "SAINT-FIRMIN-SUR-LOIRE", "Saint-G\xe2tien-des-Bois",
"Saint-Georges", "Saint-Georges-d'Elle", "SAINT-GEORGES-LES-LANDES",
"Saint-Georges-les-Landes n\xb01", "Saint-Georges-les-Landes n\xb02",
"SAINT-GERMAIN-DE-LIVET", "SAINT-GERMAIN-DES-PRES", "SAINT-GERMAIN-LES-SENAILLY",
"Saint-Germain-sous-Cailly", "Saint-Germain-sur-Bresle",
"Saint-Hilaire-de-Court", "SAINT-HILAIRE-EN-LIGNIERES", "SAINT-HILAIRE-EN-MORVAN",
"Saint-Hilaire-La-Treille", "SAINT-HILAIRE-LA-TREILLE", "Saint-Hilaire-sur-B\xe9naize",
"SAINT-HILAIRE-SUR-BENAIZE", "SAINT-HIPPOLYTE", "SAINT-JEAN-DE-BOEUF",
"Saint-Jean-de-la-Neuville", "Saint-Jean-le-Blanc", "SAINT-JOIRE",
"Saint-Julien-sur-Sarthe", "Saint-Juvin", "Saint-L\xe9ger-Magnazeix",
"Saint-L\xe9ger-Magnazeix n\xb01", "Saint-L\xe9ger-Magnazeix n\xb02",
"Saint-L\xe9omer", "SAINT-LEGER-DU-BOIS", "SAINT-LEGER-MAGNAZEIX",
"SAINT-LEGER-SOUS-CHOLET", "SAINT-LEGER-SUR-DHEUNE", "SAINT-LEOMER",
"SAINT-LEONARD", "Saint-Lupien", "Saint-Martial-sur-Isop",
"Saint-Martin-aux-Chartrains", "Saint-Martin-de-Lamps", "Saint-Martin-en-Campagne",
"Saint-Martin-l'Ars", "SAINT-MARTIN-L'ARS", "Saint-Martin-l'Hortier",
"Saint-Martin-sur-Ouanne", "Saint-Mary", "SAINT-MAUR", "Saint-Maurice-des-Lions",
"Saint-Maurice-des-Lions n\xb01", "Saint-Maurice-des-Lions n\xb02",
"Saint-Maurice-la-Clou\xe8re", "SAINT-MAURICE-LA-CLOUERE",
"Saint-Maurice-la-Souterraine", "SAINT-MAURICE-LA-SOUTERRAINE",
"SAINT-MAURICE-SUR-AVEYRON", "Saint-Menoux", "SAINT-MICHEL-EN-BRENNE",
"Saint-Ouen-l\xe8s-Parey", "Saint-Ouen-sur-Gartempe", "SAINT-OUEN-SUR-GARTEMPE",
"SAINT-OUEN-SUR-LOIRE", "Saint-P\xe8re", "Saint-Pierre-de-Fursac",
"Saint-Pierre-de-Maill\xe9", "SAINT-PIERRE-DE-MAILLE", "Saint-Pierre-de-Varennes",
"Saint-Pierre-le-Vieux", "Saint-Pierre-le-Viger", "Saint-Pierremont",
"Saint-Priest-Ligoure", "SAINT-PRIEST-TAURION", "Saint-Quentin-des-Pr\xe9s",
"Saint-S\xe9condin", "Saint-Saturnin-du-Bois", "Saint-Saulge",
"SAINT-SAUVEUR-LE-VICOMTE", "Saint-Sornin-la-Marche", "Saint-Sornin-Leulac",
"Saint-Sulpice", "Saint-Sulpice-le-Dunois", "Saint-Sulpice-les-Feuilles",
"SAINT-SYMPHORIEN", "Saint-Symphorien-sur-Couze", "SAINT-THURIEN",
"Saint-Vaast-d'\xc9quiqueville", "Saint-Val\xe9ry-sur-Somme",
"Saint-Victor-sur-Ouche", "Saint-Vincent-Cramesnil", "Sainte-Colombe",
"SAINTE-COLOMBE-SUR-LOING", "SAINTE-CROIX", "Sainte-Croix-aux-Mines",
"Sainte-H\xe9l\xe8ne-Bondeville", "Sainte-Maure-de-Touraine",
"SAINTE-MAURE-DE-TOURAINE", "Sainte-Menehould", "Sainte-Radegonde",
"Sainte-S\xe9v\xe8re-sur-Indre", "Saints", "SAIZY", "Salon",
"Sandaucourt", "SANDAUCOURT", "SANGATTE", "Sarcey", "SARDY-LES-EPIRY",
"SARNOIS", "Sarralbe", "Saulg\xe9", "Saulg\xe9 n\xb01", "Saulg\xe9 n\xb02",
"Saulg\xe9 n\xb03", "Saulg\xe9 n\xb04", "Saulg\xe9 n\xb05",
"SAULGOND", "Saulxures", "SAULXURES-LES-VANNES", "Sauzelles",
"Savigny-en-Terre-Plaine", "Savilly", "Schillersdorf", "Schleithal",
"Schopperten", "SEMIDE", "semoutiers", "Senaide", "Senonges",
"Seraumont", "Sermaize-les-Bains", "Serqueux", "SERVINS",
"Sexey-aux-Forges", "Signy-le-Petit", "Sincey", "Sivry",
"Sivry n\xb02", "Sogny-en-l'Angle", "SOIZE", "Sologny", "SOLOGNY",
"SOMMAING", "Sommeval", "Sorcy-Saint-Martin", "Suisse", "SURIS",
"Suzanne", "TACONNAY", "TANNAY", "Tannerre-en-Puisaye", "Tendu",
"TERNAS", "Tersannes", "TERSANNES", "Th\xe9norgues", "Theillay",
"Thenay", "They-sous-Vaud\xe9mont", "Thiat", "THIEFFRAIN",
"THIEULLOY-L'ABBAYE", "Thollet", "THOLLET", "Thonne-le-Thil",
"Thorey-Lyautey", "Thors", "Tign\xe9court", "Tilly-sur-Meuse",
"Touffreville-sur-Eu", "Tourlaville", "Tourmignies", "Tourni\xe8res",
"Tr\xe9mont-sur-Saulx", "Tr\xe9monzey", "Tramayes", "Tranzault",
"TRANZAULT", "TREVOL", "TUPIGNY", "Uchizy", "Urville", "Usson-du-Poitou",
"USSON-DU-POITOU", "VACQUERIE-LE-BOUCQ", "Vains", "VAINS",
"Val-de-Meuse", "Valfroicourt", "VANDENESSE-EN-AUXOIS", "Vandy",
"VARANGEVILLE", "VARENGUEBEC", "VARESNES", "VARZY", "VASLES",
"Vaud\xe9mont", "Vaudeville", "VAUDOY-EN-BRIE", "Vaulry",
"Vault-de-Lugny", "VAUX", "Vaux-sur-Aure", "Vaux-sur-Blaise",
"VAZEILLES-LIMANDRE", "Velles", "Vendoeuvres", "VENESMES",
"VENTES-SAINT-REMY", "VERNEUIL", "Verneuil-Moustiers", "VERNEUIL-MOUSTIERS",
"Vernon", "VERRIERES", "VERTEUIL-SUR-CHARENTE", "Veuvey-sur-Ouche",
"Veyrac", "VEZELISE", "VIC-DE-CHASSENAY", "VICQ-SUR-GARTEMPE",
"Vierzy", "Vieux", "Vigneulles", "Vigoux", "VIGOUX", "Vill\xe9cloye",
"VILLE-LE-MARCLET", "Villemotier", "VILLENTROIS", "VILLERS-AU-BOIS",
"Villers-Br\xfblin", "Villers-Outr\xe9aux", "VILLEY-SAINT-ETIENNE",
"Villiers-le-Sec", "VILLIEU-LOYES-MOLLON", "VIMENIL", "Vimoutiers",
"VINEUIL", "Vingt-Hanaps", "Viriat", "Vironchaux", "Vitrey",
"Voncq", "Waldhambach", "WANNEHAIN", "Wi\xe8ge-Faty", "WIEGE-FATY",
"Wignehies", "WILLERONCOURT", "Wimy", "Winnezeele", "Wissant",
"Woincourt"), class = "factor"), latitude = c(48.633333,
46.383333, 47.733333, 45.883333, 49.333333, 46.116667), longitude = c(4,
0.883333, 5.266667, 0.5, 0.35, 0.783333), start.date = structure(c(57L,
57L, 57L, 57L, 57L, 57L), .Label = c("01/02/2012", "01/03/2012",
"01/04/2012", "02/02/2012", "02/03/2012", "02/04/2012", "03/02/2012",
"03/03/2012", "03/04/2012", "04/03/2012", "04/04/2012", "05/02/2012",
"05/03/2012", "05/04/2012", "06/02/2012", "06/03/2012", "06/04/2012",
"07/02/2012", "07/03/2012", "07/04/2012", "08/02/2012", "08/03/2012",
"08/04/2012", "09/02/2012", "09/03/2012", "09/04/2012", "10/02/2012",
"10/03/2012", "10/04/2012", "11/02/2012", "11/03/2012", "11/04/2012",
"12/02/2012", "12/03/2012", "12/04/2012", "13/02/2012", "13/03/2012",
"13/04/2012", "14/02/2012", "14/03/2012", "14/04/2012", "15/02/2012",
"15/03/2012", "15/04/2012", "16/02/2012", "16/03/2012", "16/04/2012",
"17/02/2012", "17/03/2012", "17/04/2012", "18/02/2012", "18/03/2012",
"18/04/2012", "19/02/2012", "19/03/2012", "19/04/2012", "20/01/2012",
"20/02/2012", "20/03/2012", "20/04/2012", "21/01/2012", "21/02/2012",
"21/03/2012", "22/02/2012", "22/03/2012", "23/01/2012", "23/02/2012",
"23/03/2012", "23/04/2012", "24/01/2012", "24/02/2012", "24/03/2012",
"25/01/2012", "25/02/2012", "25/03/2012", "26/01/2012", "26/02/2012",
"26/03/2012", "27/02/2012", "27/03/2012", "28/02/2012", "28/03/2012",
"29/02/2012", "29/03/2012", "30/01/2012", "30/03/2012", "31/01/2012",
"31/03/2012"), class = "factor"), sheep.measuring.units = structure(c(1L,
1L, 1L, 1L, NA, 1L), .Label = "Animals", class = "factor"),
sheep.susceptible = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), sheep.cases = c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), sheep.deaths = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), sheep.destroyed = c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), sheep.slaughtered = c(NA_integer_, NA_integer_, NA_integer_,
NA_integer_, NA_integer_, NA_integer_), cattle.measuring.units = structure(c(NA,
NA, NA, NA, 1L, NA), .Label = "Animals", class = "factor"),
cattle.susceptible = c(NA, NA, NA, NA, NA, NA), cattle.cases = c(NA,
NA, NA, NA, NA, NA), cattle.deaths = c(NA, NA, NA, NA, NA,
NA), cattle.destroyed = c(NA, NA, NA, NA, NA, NA), cattle.slaughtered = c(NA,
NA, NA, NA, NA, NA), goat.measuring.units = structure(c(NA_integer_,
NA_integer_, NA_integer_, NA_integer_, NA_integer_, NA_integer_
), .Label = "Animals", class = "factor"), goat.susceptible = c(NA,
NA, NA, NA, NA, NA), goat.cases = c(NA, NA, NA, NA, NA, NA
), goat.deaths = c(NA, NA, NA, NA, NA, NA), goat.destroyed = c(NA,
NA, NA, NA, NA, NA), goat.slaughtered = c(NA, NA, NA, NA,
NA, NA), remark = structure(c(1L, 1L, 1L, 1L, 1L, 1L), .Label = c("Affected Population",
"Affected Population newborn lamb", "Affected Population newborn lambs"
), class = "factor"), code = structure(c(10L, 87L, 53L, 16L,
26L, 16L), .Label = c("01", "02", "03", "04", "05", "06",
"07", "08", "09", "10", "11", "12", "13", "14", "15", "16",
"17", "18", "19", "21", "22", "23", "24", "25", "26", "27",
"28", "29", "2A", "2B", "30", "31", "32", "33", "34", "35",
"36", "37", "38", "39", "40", "41", "42", "43", "44", "45",
"46", "47", "48", "49", "50", "51", "52", "53", "54", "55",
"56", "57", "58", "59", "60", "61", "62", "63", "64", "65",
"66", "67", "68", "69", "70", "71", "72", "73", "74", "75",
"76", "77", "78", "79", "80", "81", "82", "83", "84", "85",
"86", "87", "88", "89", "90", "91", "92", "93", "94", "95"
), class = "factor")), .Names = c("departement", "commune",
"unit.type", "location", "latitude", "longitude", "start.date",
"sheep.measuring.units", "sheep.susceptible", "sheep.cases",
"sheep.deaths", "sheep.destroyed", "sheep.slaughtered", "cattle.measuring.units",
"cattle.susceptible", "cattle.cases", "cattle.deaths", "cattle.destroyed",
"cattle.slaughtered", "goat.measuring.units", "goat.susceptible",
"goat.cases", "goat.deaths", "goat.destroyed", "goat.slaughtered",
"remark", "code"), row.names = c(NA, 6L), class = "data.frame")
expect_is(agg_summaries(dat, var = "latitude", group = "code", mean), "data.frame")
})
|
library(Cascade)
### Name: compare-methods
### Title: Some basic criteria of comparison between actual and inferred
### network.
### Aliases: compare-methods compare compare,network,network,numeric-method
### ** Examples
data(simul)
#Comparing true and inferred networks
F_score=NULL
#Here are the cutoff level tested
test.seq<-seq(0,max(abs(Net_inf@network*0.9)),length.out=200)
for(u in test.seq){
F_score<-rbind(F_score,Cascade::compare(Net,Net_inf,u))
}
matplot(test.seq,F_score,type="l",ylab="criterion value",xlab="cutoff level",lwd=2)
|
/data/genthat_extracted_code/Cascade/examples/compare-methods.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 554
|
r
|
library(Cascade)
### Name: compare-methods
### Title: Some basic criteria of comparison between actual and inferred
### network.
### Aliases: compare-methods compare compare,network,network,numeric-method
### ** Examples
data(simul)
#Comparing true and inferred networks
F_score=NULL
#Here are the cutoff level tested
test.seq<-seq(0,max(abs(Net_inf@network*0.9)),length.out=200)
for(u in test.seq){
F_score<-rbind(F_score,Cascade::compare(Net,Net_inf,u))
}
matplot(test.seq,F_score,type="l",ylab="criterion value",xlab="cutoff level",lwd=2)
|
#' Sort_major_minor
#'
#' Sorting and reordering table
#' @param data data
#' @param col1 col1
#' @param col2 col2
#' @export
#' @return None
Sort_major_minor<-function(data, col1, col2){
for (i in 1:dim(data)[1]) {
if (data[i,col1] < data[i,col2]) {
save = data[i,col2]
data[i,col2] = data[i,col1]
data[i,col1] = save
}
}
return(data)
}
|
/eSNPKaryotyping/R/Sort_major_minor.R
|
no_license
|
BenvenLab/eSNPKaryotyping
|
R
| false
| false
| 370
|
r
|
#' Sort_major_minor
#'
#' Sorting and reordering table
#' @param data data
#' @param col1 col1
#' @param col2 col2
#' @export
#' @return None
Sort_major_minor<-function(data, col1, col2){
for (i in 1:dim(data)[1]) {
if (data[i,col1] < data[i,col2]) {
save = data[i,col2]
data[i,col2] = data[i,col1]
data[i,col1] = save
}
}
return(data)
}
|
context("erfc function")
test_that("check mode of x", {
x <- rnorm(4L)
expect_match(mode(erfc(x)), "numeric")
})
test_that("erfc length is constant", {
x <- rnorm(4L)
y <- 5.
expect_equal(length(x), length(erfc(x)))
})
|
/data/genthat_extracted_code/tsensembler/tests/test-erfc.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 231
|
r
|
context("erfc function")
test_that("check mode of x", {
x <- rnorm(4L)
expect_match(mode(erfc(x)), "numeric")
})
test_that("erfc length is constant", {
x <- rnorm(4L)
y <- 5.
expect_equal(length(x), length(erfc(x)))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/K_KernelObj.R
\name{KernelObj-methods}
\alias{KernelObj-methods}
\alias{.getKernelX,data.frame,KernelObj-method}
\alias{.kernel,KernelObj,ANY,ANY-method}
\alias{.kernel,KernelObj,missing,missing-method}
\alias{.kernel,KernelObj,missing,ANY-method}
\alias{.kernel,KernelObj,ANY,missing-method}
\alias{print,KernelObj-method}
\alias{show,KernelObj-method}
\alias{summary,KernelObj-method}
\title{Methods Available for Objects of Class \code{KernelObj}}
\usage{
\S4method{.getKernelX}{data.frame,KernelObj}(data, object)
\S4method{.kernel}{KernelObj,ANY,ANY}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,missing,missing}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,missing,ANY}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,ANY,missing}(object, x1, x2, ...)
\S4method{print}{KernelObj}(x, ...)
\S4method{show}{KernelObj}(object)
\S4method{summary}{KernelObj}(object, ...)
}
\description{
Methods Available for Objects of Class \code{KernelObj}
\code{.getKernelX}
not allowed.
\code{.kernel}
not allowed.
\code{print}
prints kernel model. Includes "Kernel" as header.
\code{show}
displays kernel model. Includes "Kernel" as header.
\code{summary}
not allowed.
}
\keyword{internal}
|
/man/KernelObj-methods.Rd
|
no_license
|
cran/DynTxRegime
|
R
| false
| true
| 1,292
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/K_KernelObj.R
\name{KernelObj-methods}
\alias{KernelObj-methods}
\alias{.getKernelX,data.frame,KernelObj-method}
\alias{.kernel,KernelObj,ANY,ANY-method}
\alias{.kernel,KernelObj,missing,missing-method}
\alias{.kernel,KernelObj,missing,ANY-method}
\alias{.kernel,KernelObj,ANY,missing-method}
\alias{print,KernelObj-method}
\alias{show,KernelObj-method}
\alias{summary,KernelObj-method}
\title{Methods Available for Objects of Class \code{KernelObj}}
\usage{
\S4method{.getKernelX}{data.frame,KernelObj}(data, object)
\S4method{.kernel}{KernelObj,ANY,ANY}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,missing,missing}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,missing,ANY}(object, x1, x2, ...)
\S4method{.kernel}{KernelObj,ANY,missing}(object, x1, x2, ...)
\S4method{print}{KernelObj}(x, ...)
\S4method{show}{KernelObj}(object)
\S4method{summary}{KernelObj}(object, ...)
}
\description{
Methods Available for Objects of Class \code{KernelObj}
\code{.getKernelX}
not allowed.
\code{.kernel}
not allowed.
\code{print}
prints kernel model. Includes "Kernel" as header.
\code{show}
displays kernel model. Includes "Kernel" as header.
\code{summary}
not allowed.
}
\keyword{internal}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Function to create a cacheable matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
# The function calculates the inverse of the matrix created with the above function (makeCacheMatrix). It first checks to see
#if the inverse of the matrix has already been calculated. If so, it gets the inverse of the matrix from the cache and skips the
#computation. Otherwise, it calculates the inverse of the matrix of the data and sets the value
# of the inverse of the matrix in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
/cachematrix.R
|
no_license
|
senekanei/ProgrammingAssignment2
|
R
| false
| false
| 1,855
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Function to create a cacheable matrix
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y) {
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(solve) inverse <<- solve
getinverse <- function() inverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
# The function calculates the inverse of the matrix created with the above function (makeCacheMatrix). It first checks to see
#if the inverse of the matrix has already been calculated. If so, it gets the inverse of the matrix from the cache and skips the
#computation. Otherwise, it calculates the inverse of the matrix of the data and sets the value
# of the inverse of the matrix in the cache via the setinverse function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
}
|
dataset <- 'MERGE.clean.FINAL'
#load("Cross_Validation.Rdata")
load("resid.models.Rdata") # load total phenotype with covariates for all subjects
total <- total[(!is.na(total$IID) & !is.na(total$BMI) & !is.na(total$SEX) & !is.na(total$AGE) & !is.na(total$BATCH) & !is.na(total$C1)),] # remove any subjects with missing data
# Read in list of all genes that have PCAs:
####################################################################################################
#system("ls *.min_PCA | sed 's/\\(.*\\)......../\\1/' > PCA.genes") # write list of all genes that have .min_PCA format to file
PCA.genes <- read.table("PCA.genes",header=FALSE)
names(PCA.genes) <- c('NAME')
gene.count <- nrow(PCA.genes)
# Read in eigenSNPs for each gene and place dataframes in list
##############################################################
read_gene <- function(gene){ # Function that reads gene listed in data.frame of gene names (1 per row)
gene.PCAs.temp <- read.table(paste("",gene,".min_PCA",sep=""),header=FALSE) # read in eigenSNPs for gene
gene.PCAs.temp <- gene.PCAs.temp[,2:ncol(gene.PCAs.temp)]
eigen.names <- paste(rep('EIG',(ncol(gene.PCAs.temp)-1)),1:(ncol(gene.PCAs.temp)-1),sep="")
names(gene.PCAs.temp) <- c('IID',eigen.names)
gene.PCAs.temp <- as.matrix(gene.PCAs.temp)
return(gene.PCAs.temp)
}
gene.PCAs <- apply(PCA.genes, 1, read_gene) # apply read_gene function to PCA.genes list of genes data.frame
names(gene.PCAs) <- PCA.genes$NAME
lin.mod <- function(gene.PCA,resid.phe){ # Function that return z-value for each gene
gene.covars <- merge(gene.PCA,resid.phe,by='IID')#
eigen.SNP.text <- c(paste(rep('EIG',(ncol(gene.PCA)-1)),1:(ncol(gene.PCA)-1),sep=""))
eigen.SNP.variable <- paste(eigen.SNP.text,collapse=" + ")
model.A <- eval(parse(text=(paste("lm(PHE ~ ",eigen.SNP.variable,", data=gene.covars)",sep="")))) # evaluate effects of each eigenSNP after controlling for covariates
pval <- as.numeric(pf(summary(model.A)$fstatistic[1],summary(model.A)$fstatistic[2],summary(model.A)$fstatistic[3],lower.tail=FALSE))
zval <- -1*qnorm(pval/2) # store z-value
return(zval)
}
# Start permutation loop where 1000 random 50% splits of the data are created and evaluated:
#########################
perms <- 100
# START FOR LOOP
for(i in 1:perms){ # iterate through perms
# Designate 50% of sample to training set and 50% to test set:
training.index <- sample(1:nrow(total),(nrow(total)/2),replace=FALSE)
training.mod <- lm(formula = as.numeric(BMI) ~ as.numeric(AGE) + as.factor(SEX) +
as.numeric(C1) + as.numeric(C2) + as.numeric(C3) + as.numeric(C4) +
as.numeric(C5) + as.numeric(C6) + as.numeric(C7) + as.numeric(C8) +
as.numeric(C9) + as.numeric(C10) + as.numeric(C11) + as.numeric(C12) +
as.numeric(C13) + as.numeric(C14) + as.numeric(C15) + as.numeric(C16) +
as.numeric(C17) + as.numeric(C18) + as.numeric(C19) + as.numeric(C20) +
as.factor(set) + as.factor(BATCH), data = total[training.index,])
resid.phe.train <- resid(training.mod)
test.mod <- lm(formula = as.numeric(BMI) ~ as.numeric(AGE) + as.factor(SEX) +
as.numeric(C1) + as.numeric(C2) + as.numeric(C3) + as.numeric(C4) +
as.numeric(C5) + as.numeric(C6) + as.numeric(C7) + as.numeric(C8) +
as.numeric(C9) + as.numeric(C10) + as.numeric(C11) + as.numeric(C12) +
as.numeric(C13) + as.numeric(C14) + as.numeric(C15) + as.numeric(C16) +
as.numeric(C17) + as.numeric(C18) + as.numeric(C19) + as.numeric(C20) +
as.factor(set) + as.factor(BATCH), data = total[c(-training.index),])
resid.phe.test <- resid(test.mod)
training.set <- cbind.data.frame(rep(0,length(resid.phe.train)),total[training.index,1],resid.phe.train)
names(training.set) <- c('FID','IID','PHE')
test.set <- cbind.data.frame(rep(0,length(resid.phe.test)),total[c(-training.index),1],resid.phe.test)
names(test.set) <- c('FID','IID','PHE')
training.stat <- as.matrix(unlist(lapply(gene.PCAs,lin.mod,training.set)))
test.stat <- as.matrix(unlist(lapply(gene.PCAs,lin.mod,test.set)))
names(training.stat) <- c('p.z')
names(test.stat) <- c('p.z')
# write out each permutation:
write.table(training.stat,file=paste('',dataset,'.cv_train_stat',i,'',sep=""),quote=FALSE,row.names=FALSE,col.names=TRUE)
write.table(test.stat,file=paste('',dataset,'.cv_test_stat',i,'',sep=""),quote=FALSE,row.names=FALSE,col.names=TRUE)
#
} # END LOOP i
#
# Read in temporary files for training and test set permutations
################################################################
test.stats <- as.data.frame(matrix(0,nrow=7161,ncol=(perms))) # matrix to be filled
training.stats <- as.data.frame(matrix(0,nrow=7161,ncol=(perms))) # matrix to be filled
for (i in 1:perms){
test.stats[,i] <- read.table(paste('',dataset,'.cv_test_stat',i,'',sep=""),header=TRUE)
training.stats[,i] <- read.table(paste('',dataset,'.cv_train_stat',i,'',sep=""),header=TRUE)
print(i)
}
# Run GSEA on training set and test set data
############################################
z.permutes <- read.table(paste('',dataset,'.gene_perms',sep=""))## read in matrix with permutation values
gene.names <- as.data.frame(PCA.genes$NAME) # names of all genes
names(gene.names) <- c('NAME')
gene.info.pre <- read.table('clean.genelist.FINAL',header=TRUE) # read in list of genes
# order gene list correctly:
gene.info <- merge(gene.names,gene.info.pre,by='NAME')
load(paste('',dataset,'.gene.set.Rdata',sep="")) # read in gene set information
#
Gene2Set <- function(gene, set){
set.idx <- lapply(set, function(x){
idx <- NULL
for (i in x) idx <- c(idx, which(gene==i)) # loop through each position in pathway and find index with name of gene from list
return(idx)
})
return(set.idx)
} # END FUNCTION 'Gene2Set'
transform <- function(gene.set){
values <- as.matrix(gene.set[[1]])[,1] # Transform gene-pathway list into correct format
return(values)
} # END FUNCTION 'transform'
gene.sets <- lapply(gene.sets.final,transform)
set.idx <- Gene2Set(gene.info[,1], gene.sets) # indeces in list of genes of genes in pathway sets
#
stats0 <- z.permutes[,2:ncol(z.permutes)]# assign z-value from each gene for each perm to column of dat.frame
path.names <- names(gene.sets)
################################################
# Get p-values for training sets and test sets #
################################################
getES <- function(gene.stats,set.idx, p=1){ # This function is called in the next function; returns deviation from what is expected by random chance given normal distribution for each pathway
ES=rep(0,length(set.idx))
rk <- rank(-gene.stats,ties.method="first") # return order index based on ranked z-values (first of a tie is chosen as ordered first); greatest to least
N=length(gene.stats) ## total number of genes
for (i in 1:length(set.idx)){
path.idx=set.idx[[i]] ## the gene indice for this i-th pathway
Nh=length(path.idx) ## number of genes in this pathway
oo <- sort(rk[path.idx]) # sort ranks of genes for path i
ES.all= -(1:N)/(N-Nh) # 1 through total number of genes, divided by total genes - genes in pathway
statj=gene.stats[path.idx]
statj=-sort(-statj)
Nr=sum(abs(statj)^p) #
for (j in 1:(Nh-1)){ # loop through number of genes in pathway
jj=sum(abs(statj[1:j])^p)
ES.all[oo[j]:(oo[j+1]-1)] = ES.all[oo[j]:(oo[j+1]-1)]+jj/Nr+j/(N-Nh)
}
ES.all[N]=0
ES[i]=max(ES.all)
}
return(ES)
}
ES.test <- apply(test.stats,2,getES,set.idx) # Enrichment Scores for test sets
ES.train <- apply(training.stats,2,getES,set.idx) # Enrichment Scores for training sets
ES0=NULL
for (i in 1:ncol(stats0)){
s0=stats0[,i]
ES0=cbind(ES0,getES(set.idx,gene.stats=s0)) # get null Enrichment Scores for null data
}
mES=apply(ES0,1,mean) # find the mean Enrichment Scores of null
sdES=apply(ES0,1,sd) # find standard deviation of null Enrichment Scores
NES.test=(ES.test-mES)/sdES # normalized observed enrichment scores for observed data; ONE FOR EACH PATHWAY
NES.train=(ES.train-mES)/sdES
NES0 <- (ES0-mES)/sdES # normalized null
# Calculate uncorrected p-values:
get_pval <- function(ob,null){
pct.a<- mean(null >= ob)
M <- length(null)
B1 <- pct.a*M
pval <- (B1+1)/(M+1)
return(pval)
}
uc.pvals.train <- matrix(0,nrow=512,ncol=perms) # store uncorrected p-values
uc.pvals.test <- matrix(0,nrow=512,ncol=perms)
cross_val_p <- function(data){
return(sapply(data,get_pval,NES0))
}
uc.pvals.train <- apply(NES.train,2,cross_val_p)
uc.pvals.test <- apply(NES.test,2,cross_val_p)
save.image(file="Cross_Validation.Rdata")
# Examine the correlation across cross validation samples of p-values from pathways
#########################################################################################
below_per <- function(train,test,cutoff,path.dat){ # returns % of times pathway has p-value below cutoff in both training set AND test set, also p-value for percentage based on empirical null
train.below <- train
train.below[which(train<=cutoff)] <- 1
train.below[which(train>cutoff)] <- 0
test.below <- test
test.below[which(test<=cutoff)] <- 1
test.below[which(test>cutoff)] <- 0
total.below <- test.below + train.below
total.below[which(total.below<2)] <- 0
total.below[which(total.below==2)] <- 2
per.2 <- function(x){return(mean(x==2))}
percent.overlap <- apply(total.below,1,per.2)
paths.below <- cbind.data.frame(path.dat,percent.overlap)
names(paths.below) <- c('PATHWAY','PERCENT')
return(paths.below) # what % of the time is data equal to or less than specified cutoff
}
cutoff <- .05
dat <- below_per(uc.pvals.train,uc.pvals.test,cutoff,path.names)
pval.data <- read.table("MERGE.clean.FINAL.GSEA",header=TRUE)
names(pval.data) <- c('PATHWAY','P.VAL','Q.VAL')
sig.paths <- pval.data[which(pval.data[,2] <= cutoff ),1]
path.names <- names(gene.sets)
sig.path.index <- match(sig.paths,path.names)
null.paths <- pval.data[which(pval.data[,2] > cutoff ),1]
path.names <- names(gene.sets)
null.path.index <- match(null.paths,path.names)
top.paths <- dat[sig.path.index,]
bottom.paths <- dat[null.path.index,]
t.test(top.paths[,2],bottom.paths[,2]) # compare match rates in those above and below cutoff
#
# Create Plot of pathway p-values:
total.paths.pre <- merge(rbind(top.paths,bottom.paths),pval.data,by='PATHWAY')
tot.pval.index <- order(total.paths.pre[,3])
total.paths <- cbind.data.frame(total.paths.pre[tot.pval.index,],1:512)
plot(total.paths[,2],xlab="Pathway Ranked By Total Sample P-Value",ylab="% Training and Test Both Below Cutoff",main="GSEA Pathway Cross-Validation",pch=19,bg='grey')
p <- .05/512
null.05 <- mean(uc.pvals.train<=.05)*mean(uc.pvals.test<=.05) # probability any path cross validation replicates by chance
bon.p <- qexp(p,1/null.05,lower.tail=FALSE) # .05 cutoff before bonferroni
abline(h=bon.p,col="red",lty=5,lwd=2)
legend(300,.2, # places a legend at the appropriate place
c("Positive","Type I","Type II","Negative","p = 0.05"), # puts text in the legend
pch=c(19,19,19,19,NA),
lty=c(NA,NA,NA,NA,5), # gives the legend appropriate symbols (lines)
lwd=c(NA,NA,NA,NA,2),
col=c("blue","green3","orange","black","red"))
points(total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),5],total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),2], col="blue",pch=19,bg='grey') # True discoveries
points(total.paths[((total.paths[,3]<=.05) & (total.paths[,2]<bon.p)),5],total.paths[((total.paths[,3]<=.05) & (total.paths[,2]<bon.p)),2], col="green3",pch=19) # Type I errors
points(total.paths[((total.paths[,3]>.05) & (total.paths[,2]>=bon.p)),5],total.paths[((total.paths[,3]>.05) & (total.paths[,2]>=bon.p)),2], col="orange",pch=19) # Type II errors
#
Validated.paths <- total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),]
save(Validated.paths,file=paste('',dataset,'.GSEA.cv.Rdata',sep="")) # save
save.image(file="Cross_Validation.Rdata")
|
/Cross_Validation_GSEA.r
|
no_license
|
MatthewASimonson/gene_set_scripts
|
R
| false
| false
| 12,117
|
r
|
dataset <- 'MERGE.clean.FINAL'
#load("Cross_Validation.Rdata")
load("resid.models.Rdata") # load total phenotype with covariates for all subjects
total <- total[(!is.na(total$IID) & !is.na(total$BMI) & !is.na(total$SEX) & !is.na(total$AGE) & !is.na(total$BATCH) & !is.na(total$C1)),] # remove any subjects with missing data
# Read in list of all genes that have PCAs:
####################################################################################################
#system("ls *.min_PCA | sed 's/\\(.*\\)......../\\1/' > PCA.genes") # write list of all genes that have .min_PCA format to file
PCA.genes <- read.table("PCA.genes",header=FALSE)
names(PCA.genes) <- c('NAME')
gene.count <- nrow(PCA.genes)
# Read in eigenSNPs for each gene and place dataframes in list
##############################################################
read_gene <- function(gene){ # Function that reads gene listed in data.frame of gene names (1 per row)
gene.PCAs.temp <- read.table(paste("",gene,".min_PCA",sep=""),header=FALSE) # read in eigenSNPs for gene
gene.PCAs.temp <- gene.PCAs.temp[,2:ncol(gene.PCAs.temp)]
eigen.names <- paste(rep('EIG',(ncol(gene.PCAs.temp)-1)),1:(ncol(gene.PCAs.temp)-1),sep="")
names(gene.PCAs.temp) <- c('IID',eigen.names)
gene.PCAs.temp <- as.matrix(gene.PCAs.temp)
return(gene.PCAs.temp)
}
gene.PCAs <- apply(PCA.genes, 1, read_gene) # apply read_gene function to PCA.genes list of genes data.frame
names(gene.PCAs) <- PCA.genes$NAME
lin.mod <- function(gene.PCA,resid.phe){ # Function that return z-value for each gene
gene.covars <- merge(gene.PCA,resid.phe,by='IID')#
eigen.SNP.text <- c(paste(rep('EIG',(ncol(gene.PCA)-1)),1:(ncol(gene.PCA)-1),sep=""))
eigen.SNP.variable <- paste(eigen.SNP.text,collapse=" + ")
model.A <- eval(parse(text=(paste("lm(PHE ~ ",eigen.SNP.variable,", data=gene.covars)",sep="")))) # evaluate effects of each eigenSNP after controlling for covariates
pval <- as.numeric(pf(summary(model.A)$fstatistic[1],summary(model.A)$fstatistic[2],summary(model.A)$fstatistic[3],lower.tail=FALSE))
zval <- -1*qnorm(pval/2) # store z-value
return(zval)
}
# Start permutation loop where 1000 random 50% splits of the data are created and evaluated:
#########################
perms <- 100
# START FOR LOOP
for(i in 1:perms){ # iterate through perms
# Designate 50% of sample to training set and 50% to test set:
training.index <- sample(1:nrow(total),(nrow(total)/2),replace=FALSE)
training.mod <- lm(formula = as.numeric(BMI) ~ as.numeric(AGE) + as.factor(SEX) +
as.numeric(C1) + as.numeric(C2) + as.numeric(C3) + as.numeric(C4) +
as.numeric(C5) + as.numeric(C6) + as.numeric(C7) + as.numeric(C8) +
as.numeric(C9) + as.numeric(C10) + as.numeric(C11) + as.numeric(C12) +
as.numeric(C13) + as.numeric(C14) + as.numeric(C15) + as.numeric(C16) +
as.numeric(C17) + as.numeric(C18) + as.numeric(C19) + as.numeric(C20) +
as.factor(set) + as.factor(BATCH), data = total[training.index,])
resid.phe.train <- resid(training.mod)
test.mod <- lm(formula = as.numeric(BMI) ~ as.numeric(AGE) + as.factor(SEX) +
as.numeric(C1) + as.numeric(C2) + as.numeric(C3) + as.numeric(C4) +
as.numeric(C5) + as.numeric(C6) + as.numeric(C7) + as.numeric(C8) +
as.numeric(C9) + as.numeric(C10) + as.numeric(C11) + as.numeric(C12) +
as.numeric(C13) + as.numeric(C14) + as.numeric(C15) + as.numeric(C16) +
as.numeric(C17) + as.numeric(C18) + as.numeric(C19) + as.numeric(C20) +
as.factor(set) + as.factor(BATCH), data = total[c(-training.index),])
resid.phe.test <- resid(test.mod)
training.set <- cbind.data.frame(rep(0,length(resid.phe.train)),total[training.index,1],resid.phe.train)
names(training.set) <- c('FID','IID','PHE')
test.set <- cbind.data.frame(rep(0,length(resid.phe.test)),total[c(-training.index),1],resid.phe.test)
names(test.set) <- c('FID','IID','PHE')
training.stat <- as.matrix(unlist(lapply(gene.PCAs,lin.mod,training.set)))
test.stat <- as.matrix(unlist(lapply(gene.PCAs,lin.mod,test.set)))
names(training.stat) <- c('p.z')
names(test.stat) <- c('p.z')
# write out each permutation:
write.table(training.stat,file=paste('',dataset,'.cv_train_stat',i,'',sep=""),quote=FALSE,row.names=FALSE,col.names=TRUE)
write.table(test.stat,file=paste('',dataset,'.cv_test_stat',i,'',sep=""),quote=FALSE,row.names=FALSE,col.names=TRUE)
#
} # END LOOP i
#
# Read in temporary files for training and test set permutations
################################################################
test.stats <- as.data.frame(matrix(0,nrow=7161,ncol=(perms))) # matrix to be filled
training.stats <- as.data.frame(matrix(0,nrow=7161,ncol=(perms))) # matrix to be filled
for (i in 1:perms){
test.stats[,i] <- read.table(paste('',dataset,'.cv_test_stat',i,'',sep=""),header=TRUE)
training.stats[,i] <- read.table(paste('',dataset,'.cv_train_stat',i,'',sep=""),header=TRUE)
print(i)
}
# Run GSEA on training set and test set data
############################################
z.permutes <- read.table(paste('',dataset,'.gene_perms',sep=""))## read in matrix with permutation values
gene.names <- as.data.frame(PCA.genes$NAME) # names of all genes
names(gene.names) <- c('NAME')
gene.info.pre <- read.table('clean.genelist.FINAL',header=TRUE) # read in list of genes
# order gene list correctly:
gene.info <- merge(gene.names,gene.info.pre,by='NAME')
load(paste('',dataset,'.gene.set.Rdata',sep="")) # read in gene set information
#
Gene2Set <- function(gene, set){
set.idx <- lapply(set, function(x){
idx <- NULL
for (i in x) idx <- c(idx, which(gene==i)) # loop through each position in pathway and find index with name of gene from list
return(idx)
})
return(set.idx)
} # END FUNCTION 'Gene2Set'
transform <- function(gene.set){
values <- as.matrix(gene.set[[1]])[,1] # Transform gene-pathway list into correct format
return(values)
} # END FUNCTION 'transform'
gene.sets <- lapply(gene.sets.final,transform)
set.idx <- Gene2Set(gene.info[,1], gene.sets) # indeces in list of genes of genes in pathway sets
#
stats0 <- z.permutes[,2:ncol(z.permutes)]# assign z-value from each gene for each perm to column of dat.frame
path.names <- names(gene.sets)
################################################
# Get p-values for training sets and test sets #
################################################
getES <- function(gene.stats,set.idx, p=1){ # This function is called in the next function; returns deviation from what is expected by random chance given normal distribution for each pathway
ES=rep(0,length(set.idx))
rk <- rank(-gene.stats,ties.method="first") # return order index based on ranked z-values (first of a tie is chosen as ordered first); greatest to least
N=length(gene.stats) ## total number of genes
for (i in 1:length(set.idx)){
path.idx=set.idx[[i]] ## the gene indice for this i-th pathway
Nh=length(path.idx) ## number of genes in this pathway
oo <- sort(rk[path.idx]) # sort ranks of genes for path i
ES.all= -(1:N)/(N-Nh) # 1 through total number of genes, divided by total genes - genes in pathway
statj=gene.stats[path.idx]
statj=-sort(-statj)
Nr=sum(abs(statj)^p) #
for (j in 1:(Nh-1)){ # loop through number of genes in pathway
jj=sum(abs(statj[1:j])^p)
ES.all[oo[j]:(oo[j+1]-1)] = ES.all[oo[j]:(oo[j+1]-1)]+jj/Nr+j/(N-Nh)
}
ES.all[N]=0
ES[i]=max(ES.all)
}
return(ES)
}
ES.test <- apply(test.stats,2,getES,set.idx) # Enrichment Scores for test sets
ES.train <- apply(training.stats,2,getES,set.idx) # Enrichment Scores for training sets
ES0=NULL
for (i in 1:ncol(stats0)){
s0=stats0[,i]
ES0=cbind(ES0,getES(set.idx,gene.stats=s0)) # get null Enrichment Scores for null data
}
mES=apply(ES0,1,mean) # find the mean Enrichment Scores of null
sdES=apply(ES0,1,sd) # find standard deviation of null Enrichment Scores
NES.test=(ES.test-mES)/sdES # normalized observed enrichment scores for observed data; ONE FOR EACH PATHWAY
NES.train=(ES.train-mES)/sdES
NES0 <- (ES0-mES)/sdES # normalized null
# Calculate uncorrected p-values:
get_pval <- function(ob,null){
pct.a<- mean(null >= ob)
M <- length(null)
B1 <- pct.a*M
pval <- (B1+1)/(M+1)
return(pval)
}
uc.pvals.train <- matrix(0,nrow=512,ncol=perms) # store uncorrected p-values
uc.pvals.test <- matrix(0,nrow=512,ncol=perms)
cross_val_p <- function(data){
return(sapply(data,get_pval,NES0))
}
uc.pvals.train <- apply(NES.train,2,cross_val_p)
uc.pvals.test <- apply(NES.test,2,cross_val_p)
save.image(file="Cross_Validation.Rdata")
# Examine the correlation across cross validation samples of p-values from pathways
#########################################################################################
below_per <- function(train,test,cutoff,path.dat){ # returns % of times pathway has p-value below cutoff in both training set AND test set, also p-value for percentage based on empirical null
train.below <- train
train.below[which(train<=cutoff)] <- 1
train.below[which(train>cutoff)] <- 0
test.below <- test
test.below[which(test<=cutoff)] <- 1
test.below[which(test>cutoff)] <- 0
total.below <- test.below + train.below
total.below[which(total.below<2)] <- 0
total.below[which(total.below==2)] <- 2
per.2 <- function(x){return(mean(x==2))}
percent.overlap <- apply(total.below,1,per.2)
paths.below <- cbind.data.frame(path.dat,percent.overlap)
names(paths.below) <- c('PATHWAY','PERCENT')
return(paths.below) # what % of the time is data equal to or less than specified cutoff
}
cutoff <- .05
dat <- below_per(uc.pvals.train,uc.pvals.test,cutoff,path.names)
pval.data <- read.table("MERGE.clean.FINAL.GSEA",header=TRUE)
names(pval.data) <- c('PATHWAY','P.VAL','Q.VAL')
sig.paths <- pval.data[which(pval.data[,2] <= cutoff ),1]
path.names <- names(gene.sets)
sig.path.index <- match(sig.paths,path.names)
null.paths <- pval.data[which(pval.data[,2] > cutoff ),1]
path.names <- names(gene.sets)
null.path.index <- match(null.paths,path.names)
top.paths <- dat[sig.path.index,]
bottom.paths <- dat[null.path.index,]
t.test(top.paths[,2],bottom.paths[,2]) # compare match rates in those above and below cutoff
#
# Create Plot of pathway p-values:
total.paths.pre <- merge(rbind(top.paths,bottom.paths),pval.data,by='PATHWAY')
tot.pval.index <- order(total.paths.pre[,3])
total.paths <- cbind.data.frame(total.paths.pre[tot.pval.index,],1:512)
plot(total.paths[,2],xlab="Pathway Ranked By Total Sample P-Value",ylab="% Training and Test Both Below Cutoff",main="GSEA Pathway Cross-Validation",pch=19,bg='grey')
p <- .05/512
null.05 <- mean(uc.pvals.train<=.05)*mean(uc.pvals.test<=.05) # probability any path cross validation replicates by chance
bon.p <- qexp(p,1/null.05,lower.tail=FALSE) # .05 cutoff before bonferroni
abline(h=bon.p,col="red",lty=5,lwd=2)
legend(300,.2, # places a legend at the appropriate place
c("Positive","Type I","Type II","Negative","p = 0.05"), # puts text in the legend
pch=c(19,19,19,19,NA),
lty=c(NA,NA,NA,NA,5), # gives the legend appropriate symbols (lines)
lwd=c(NA,NA,NA,NA,2),
col=c("blue","green3","orange","black","red"))
points(total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),5],total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),2], col="blue",pch=19,bg='grey') # True discoveries
points(total.paths[((total.paths[,3]<=.05) & (total.paths[,2]<bon.p)),5],total.paths[((total.paths[,3]<=.05) & (total.paths[,2]<bon.p)),2], col="green3",pch=19) # Type I errors
points(total.paths[((total.paths[,3]>.05) & (total.paths[,2]>=bon.p)),5],total.paths[((total.paths[,3]>.05) & (total.paths[,2]>=bon.p)),2], col="orange",pch=19) # Type II errors
#
Validated.paths <- total.paths[((total.paths[,3]<=.05) & (total.paths[,2]>=bon.p)),]
save(Validated.paths,file=paste('',dataset,'.GSEA.cv.Rdata',sep="")) # save
save.image(file="Cross_Validation.Rdata")
|
#' Retrieve the Active Project
#'
#' Retrieve the path to the active project (if any).
#'
#' @param default The value to return when no project is
#' currently active. Defaults to `NULL`.
#'
#' @export
#'
#' @return The active project directory, as a length-one character vector.
#'
#' @examples
#' \dontrun{
#'
#' # get the currently-active renv project
#' renv::project()
#'
#' }
project <- function(default = NULL) {
renv_project(default = default)
}
renv_project <- function(default = getwd()) {
project <- Sys.getenv("RENV_PROJECT", unset = NA)
if (is.na(project))
return(default)
project
}
renv_project_initialized <- function(project) {
lockfile <- renv_lockfile_path(project)
if (file.exists(lockfile))
return(TRUE)
library <- renv_paths_library(project = project)
if (file.exists(library))
return(TRUE)
FALSE
}
renv_project_type <- function(path) {
# check for R package projects
descpath <- file.path(path, "DESCRIPTION")
if (!file.exists(descpath))
return("unknown")
desc <- catch(renv_description_read(descpath))
if (inherits(desc, "error")) {
warning(desc)
return("unknown")
}
# check for explicitly recorded type
type <- desc$Type
if (!is.null(type))
return(tolower(type))
# infer otherwise from 'Package' field otherwise
package <- desc$Package
if (!is.null(package))
return("package")
}
renv_project_records <- function(project) {
# if this project has a DESCRIPTION file, use it to provide records
descpath <- file.path(project, "DESCRIPTION")
if (file.exists(descpath))
return(renv_project_records_description(project, descpath))
}
renv_project_records_description <- function(project, descpath) {
# first, parse remotes (if any)
remotes <- renv_project_records_description_remotes(project, descpath)
# next, find packages mentioned in the DESCRIPTION file
fields <- c("Depends", "Imports", "Suggests", "LinkingTo")
deps <- renv_dependencies_discover_description(descpath, fields)
specs <- split(deps, deps$Package)
# drop ignored specs
ignored <- renv_project_ignored_packages(project = project)
specs <- specs[setdiff(names(specs), c("R", ignored))]
# if any Roxygen fields are included,
# infer a dependency on roxygen2 and devtools
desc <- renv_description_read(descpath)
if (any(grepl("^Roxygen", names(desc)))) {
for (package in c("devtools", "roxygen2")) {
specs[[package]] <-
specs[[package]] %||%
list(Package = package, Require = "", Version = "")
}
}
# now, try to resolve the packages
records <- enumerate(specs, function(package, spec) {
# use remote if supplied
if (!is.null(remotes[[package]]))
return(remotes[[package]])
# check for explicit version requirement
explicit <- spec[spec$Require == "==", ]
if (nrow(explicit) == 0)
return(renv_remotes_resolve(package))
version <- spec$Version[[1]]
if (!nzchar(version))
return(renv_remotes_resolve(package))
entry <- paste(package, version, sep = "@")
renv_remotes_resolve(entry)
})
# return records
records
}
renv_project_records_description_remotes <- function(project, descpath) {
desc <- renv_description_read(descpath)
remotes <- desc$Remotes
if (is.null(desc$Remotes))
return(list())
splat <- strsplit(remotes, "\\s*,\\s*")[[1]]
resolved <- lapply(splat, renv_remotes_resolve)
names(resolved) <- extract_chr(resolved, "Package")
resolved
}
renv_project_ignored_packages <- function(project) {
# if we don't have a project, nothing to do
if (is.null(project))
return(character())
# read base set of ignored packages
ignored <- settings$ignored.packages(project = project)
# for R package projects, ensure the project itself is ignored
if (renv_project_type(project) == "package") {
desc <- renv_description_read(project)
ignored <- c(ignored, desc[["Package"]])
}
# return collected set of ignored packages
ignored
}
|
/R/project.R
|
no_license
|
philipp-baumann/renv
|
R
| false
| false
| 3,999
|
r
|
#' Retrieve the Active Project
#'
#' Retrieve the path to the active project (if any).
#'
#' @param default The value to return when no project is
#' currently active. Defaults to `NULL`.
#'
#' @export
#'
#' @return The active project directory, as a length-one character vector.
#'
#' @examples
#' \dontrun{
#'
#' # get the currently-active renv project
#' renv::project()
#'
#' }
project <- function(default = NULL) {
renv_project(default = default)
}
renv_project <- function(default = getwd()) {
project <- Sys.getenv("RENV_PROJECT", unset = NA)
if (is.na(project))
return(default)
project
}
renv_project_initialized <- function(project) {
lockfile <- renv_lockfile_path(project)
if (file.exists(lockfile))
return(TRUE)
library <- renv_paths_library(project = project)
if (file.exists(library))
return(TRUE)
FALSE
}
renv_project_type <- function(path) {
# check for R package projects
descpath <- file.path(path, "DESCRIPTION")
if (!file.exists(descpath))
return("unknown")
desc <- catch(renv_description_read(descpath))
if (inherits(desc, "error")) {
warning(desc)
return("unknown")
}
# check for explicitly recorded type
type <- desc$Type
if (!is.null(type))
return(tolower(type))
# infer otherwise from 'Package' field otherwise
package <- desc$Package
if (!is.null(package))
return("package")
}
renv_project_records <- function(project) {
# if this project has a DESCRIPTION file, use it to provide records
descpath <- file.path(project, "DESCRIPTION")
if (file.exists(descpath))
return(renv_project_records_description(project, descpath))
}
renv_project_records_description <- function(project, descpath) {
# first, parse remotes (if any)
remotes <- renv_project_records_description_remotes(project, descpath)
# next, find packages mentioned in the DESCRIPTION file
fields <- c("Depends", "Imports", "Suggests", "LinkingTo")
deps <- renv_dependencies_discover_description(descpath, fields)
specs <- split(deps, deps$Package)
# drop ignored specs
ignored <- renv_project_ignored_packages(project = project)
specs <- specs[setdiff(names(specs), c("R", ignored))]
# if any Roxygen fields are included,
# infer a dependency on roxygen2 and devtools
desc <- renv_description_read(descpath)
if (any(grepl("^Roxygen", names(desc)))) {
for (package in c("devtools", "roxygen2")) {
specs[[package]] <-
specs[[package]] %||%
list(Package = package, Require = "", Version = "")
}
}
# now, try to resolve the packages
records <- enumerate(specs, function(package, spec) {
# use remote if supplied
if (!is.null(remotes[[package]]))
return(remotes[[package]])
# check for explicit version requirement
explicit <- spec[spec$Require == "==", ]
if (nrow(explicit) == 0)
return(renv_remotes_resolve(package))
version <- spec$Version[[1]]
if (!nzchar(version))
return(renv_remotes_resolve(package))
entry <- paste(package, version, sep = "@")
renv_remotes_resolve(entry)
})
# return records
records
}
renv_project_records_description_remotes <- function(project, descpath) {
desc <- renv_description_read(descpath)
remotes <- desc$Remotes
if (is.null(desc$Remotes))
return(list())
splat <- strsplit(remotes, "\\s*,\\s*")[[1]]
resolved <- lapply(splat, renv_remotes_resolve)
names(resolved) <- extract_chr(resolved, "Package")
resolved
}
renv_project_ignored_packages <- function(project) {
# if we don't have a project, nothing to do
if (is.null(project))
return(character())
# read base set of ignored packages
ignored <- settings$ignored.packages(project = project)
# for R package projects, ensure the project itself is ignored
if (renv_project_type(project) == "package") {
desc <- renv_description_read(project)
ignored <- c(ignored, desc[["Package"]])
}
# return collected set of ignored packages
ignored
}
|
library(PBImisc)
### Name: eden
### Title: European day hospital evaluation
### Aliases: eden
### Keywords: eden
### ** Examples
data(eden)
library(lattice)
xyplot(BPRS.Average~MANSA|center, data=eden, type=c("p","g","smooth"))
|
/data/genthat_extracted_code/PBImisc/examples/eden.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 235
|
r
|
library(PBImisc)
### Name: eden
### Title: European day hospital evaluation
### Aliases: eden
### Keywords: eden
### ** Examples
data(eden)
library(lattice)
xyplot(BPRS.Average~MANSA|center, data=eden, type=c("p","g","smooth"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praise_bien.R
\name{praise_bien}
\alias{praise_bien}
\title{Crear frases de éxito estilo mexicanas}
\usage{
praise_bien(exclamaciones = exclamacion_positiva,
frases = frase_positiva, refranes = refranes_positivos)
}
\arguments{
\item{exclamaciones}{Un vector de caracteres formado por una colección de exclamaciones que expresan felicidad y entusiasmo}
\item{frases}{Un vector de caracteres formado por una colección de frases de celebración}
\item{refranes}{Un vector de caracteres formado por refranes cotidianos de celebración}
}
\value{
Un vector de caracteres
}
\description{
Esta función genera frases chidas a parir de un muestreo aleatorio de exclamaciones y frases, o refranes, comunes en el vocabulario dominguero mexicano.
}
\seealso{
Other crear_frases: \code{\link{praise_error}}
}
\concept{crear_frases}
|
/man/praise_bien.Rd
|
no_license
|
ComunidadBioInfo/praiseMX
|
R
| false
| true
| 906
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/praise_bien.R
\name{praise_bien}
\alias{praise_bien}
\title{Crear frases de éxito estilo mexicanas}
\usage{
praise_bien(exclamaciones = exclamacion_positiva,
frases = frase_positiva, refranes = refranes_positivos)
}
\arguments{
\item{exclamaciones}{Un vector de caracteres formado por una colección de exclamaciones que expresan felicidad y entusiasmo}
\item{frases}{Un vector de caracteres formado por una colección de frases de celebración}
\item{refranes}{Un vector de caracteres formado por refranes cotidianos de celebración}
}
\value{
Un vector de caracteres
}
\description{
Esta función genera frases chidas a parir de un muestreo aleatorio de exclamaciones y frases, o refranes, comunes en el vocabulario dominguero mexicano.
}
\seealso{
Other crear_frases: \code{\link{praise_error}}
}
\concept{crear_frases}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nagelR2.r
\name{nagelR2}
\alias{nagelR2}
\title{Nagelkerge's / Craig & Uhler's R2}
\usage{
nagelR2(likeNull, likeFull, n)
}
\arguments{
\item{likeNull}{Likelihood (not log-likelihood) of the null model or an object of class \code{logLik} with log-likelihood of the null model (usually an intercept-only model).}
\item{likeFull}{Likelihood (not log-likelihood) of the "full" model or an object of class \code{logLik} with log-likelihood of the "full" model (usually a model with covariates).}
\item{n}{Sample size.}
}
\value{
Numeric.
}
\description{
Nagelkerge's / Craig & Uhler's R2
}
\examples{
# create data
x <- 1:100
y <- 2 + 1.7 * x + rnorm(100, 0, 30)
# models
nullModel <- lm(y ~ 1)
fullModel <- lm(y ~ x)
# plot
plot(x, y)
abline(nullModel, col='red')
abline(fullModel, col='blue')
legend('bottomright', legend=c('Null', 'Full'), lwd=1, col=c('red', 'blue'))
# R2
likeNull <- exp(as.numeric(logLik(nullModel)))
likeFull <- exp(as.numeric(logLik(fullModel)))
nagelR2(likeNull, likeFull, 100)
}
|
/man/nagelR2.Rd
|
no_license
|
adamlilith/statisfactory
|
R
| false
| true
| 1,086
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nagelR2.r
\name{nagelR2}
\alias{nagelR2}
\title{Nagelkerge's / Craig & Uhler's R2}
\usage{
nagelR2(likeNull, likeFull, n)
}
\arguments{
\item{likeNull}{Likelihood (not log-likelihood) of the null model or an object of class \code{logLik} with log-likelihood of the null model (usually an intercept-only model).}
\item{likeFull}{Likelihood (not log-likelihood) of the "full" model or an object of class \code{logLik} with log-likelihood of the "full" model (usually a model with covariates).}
\item{n}{Sample size.}
}
\value{
Numeric.
}
\description{
Nagelkerge's / Craig & Uhler's R2
}
\examples{
# create data
x <- 1:100
y <- 2 + 1.7 * x + rnorm(100, 0, 30)
# models
nullModel <- lm(y ~ 1)
fullModel <- lm(y ~ x)
# plot
plot(x, y)
abline(nullModel, col='red')
abline(fullModel, col='blue')
legend('bottomright', legend=c('Null', 'Full'), lwd=1, col=c('red', 'blue'))
# R2
likeNull <- exp(as.numeric(logLik(nullModel)))
likeFull <- exp(as.numeric(logLik(fullModel)))
nagelR2(likeNull, likeFull, 100)
}
|
\name{xml.rpc}
\alias{xml.rpc}
\title{Invoke XML-RPC method from R}
\description{
This function can be used to invoke a method provided by an XML-RPC
(remote procedure call) server.
It can pass R objects in the request by serializing them to XML format
and also converts the result back to R.
}
\usage{
xml.rpc(url, method, ..., .args = list(...), .opts = list(),
.defaultOpts = list(httpheader = c("Content-Type" = "text/xml"),
followlocation = TRUE,
useragent = useragent),
.convert = TRUE, .curl = getCurlHandle(),
useragent = "R-XMLRPC")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{url}{the URL of the XML-RPC server}
\item{method}{a string giving the name of the XML-RPC method to invoke}
\item{\dots}{a collection of argument valuesn}
\item{.args}{an alternative way to specify the collection (list) of arguments}
\item{.opts}{a list of options passed on to
\code{\link[RCurl]{postForm}}. This is for the caller to specify
server-specific curl options as opposed to general XML-RPC options
which are set via \code{.defaultOpts}.
}
\item{.defaultOpts}{standard/default RCurl options used when making
this call}
\item{.convert}{either a logical value indicating whether to perform
the defalt conversion (via \code{convertToR}) or not,
or alternatively a function which is called with a string
giving the body of the HTTP response of the XML-RPC call.}
\item{.curl}{a CURLHandle object that the caller can specify to allow
reusing existing handles and connections. This can greatly improve
efficiency.}
\item{useragent}{the string identifying the application that is
reported to the Web server as making the request.}
}
\value{
If \code{.convert} is a logical value and \code{TRUE}, an R object
giving the result of the XML-RPC method invocation. If \code{.convert}
is \code{FALSE}, a string giving the body of the response.
If \code{.convert} is a function, it is called with the body of the
XML-RPC response as a string.
}
\references{\url{http://www.xmlrpc.com/spec}
\url{http://www.cafeconleche.org/books/xmljava/chapters/ch02s05.html}
for a DTD for XML-RPC and examples and discussion.
}
\author{Duncan Temple Lang }
\seealso{
\code{\link[RCurl]{postForm}}
\code{\link[RCurl]{getURL}} and REST Web services
\code{SSOAP} package.
}
\examples{
# See http://www.advogato.org/xmlrpc.html
xml.rpc('http://www.advogato.org/XMLRPC', 'test.square', 9L)
xml.rpc('http://www.advogato.org/XMLRPC', 'test.sumprod', 9L, 10L)
xml.rpc('http://www.advogato.org/XMLRPC', 'test.strlen', 'abcdef')
xml.rpc('http://www.advogato.org/XMLRPC', 'test.capitalize', 'abcdef')
xml.rpc('http://www.advogato.org/XMLRPC', 'user.exists', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'cert.get', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.len', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.get', 'duncan', 1L)
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.getDates', 'duncan', 4L)
xml.rpc("http://xmlrpc-c.sourceforge.net/api/sample.php", "sample.sumAndDifference", 3L, 4L)
# Doesn't work
# xml.rpc('http://ws2.webservices.nl', 'system.methodHelp', 'addressReeksPostcodeSearch')
# xml.rpc('http://www.cookcomputing.com/xmlrpcsamples/RPC2.ashx', 'example.getStateName', 2L)
}
\keyword{IO}
\keyword{programming}
|
/man/xml.rpc.Rd
|
no_license
|
duncantl/XMLRPC
|
R
| false
| false
| 3,445
|
rd
|
\name{xml.rpc}
\alias{xml.rpc}
\title{Invoke XML-RPC method from R}
\description{
This function can be used to invoke a method provided by an XML-RPC
(remote procedure call) server.
It can pass R objects in the request by serializing them to XML format
and also converts the result back to R.
}
\usage{
xml.rpc(url, method, ..., .args = list(...), .opts = list(),
.defaultOpts = list(httpheader = c("Content-Type" = "text/xml"),
followlocation = TRUE,
useragent = useragent),
.convert = TRUE, .curl = getCurlHandle(),
useragent = "R-XMLRPC")
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{url}{the URL of the XML-RPC server}
\item{method}{a string giving the name of the XML-RPC method to invoke}
\item{\dots}{a collection of argument valuesn}
\item{.args}{an alternative way to specify the collection (list) of arguments}
\item{.opts}{a list of options passed on to
\code{\link[RCurl]{postForm}}. This is for the caller to specify
server-specific curl options as opposed to general XML-RPC options
which are set via \code{.defaultOpts}.
}
\item{.defaultOpts}{standard/default RCurl options used when making
this call}
\item{.convert}{either a logical value indicating whether to perform
the defalt conversion (via \code{convertToR}) or not,
or alternatively a function which is called with a string
giving the body of the HTTP response of the XML-RPC call.}
\item{.curl}{a CURLHandle object that the caller can specify to allow
reusing existing handles and connections. This can greatly improve
efficiency.}
\item{useragent}{the string identifying the application that is
reported to the Web server as making the request.}
}
\value{
If \code{.convert} is a logical value and \code{TRUE}, an R object
giving the result of the XML-RPC method invocation. If \code{.convert}
is \code{FALSE}, a string giving the body of the response.
If \code{.convert} is a function, it is called with the body of the
XML-RPC response as a string.
}
\references{\url{http://www.xmlrpc.com/spec}
\url{http://www.cafeconleche.org/books/xmljava/chapters/ch02s05.html}
for a DTD for XML-RPC and examples and discussion.
}
\author{Duncan Temple Lang }
\seealso{
\code{\link[RCurl]{postForm}}
\code{\link[RCurl]{getURL}} and REST Web services
\code{SSOAP} package.
}
\examples{
# See http://www.advogato.org/xmlrpc.html
xml.rpc('http://www.advogato.org/XMLRPC', 'test.square', 9L)
xml.rpc('http://www.advogato.org/XMLRPC', 'test.sumprod', 9L, 10L)
xml.rpc('http://www.advogato.org/XMLRPC', 'test.strlen', 'abcdef')
xml.rpc('http://www.advogato.org/XMLRPC', 'test.capitalize', 'abcdef')
xml.rpc('http://www.advogato.org/XMLRPC', 'user.exists', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'cert.get', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.len', 'duncan')
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.get', 'duncan', 1L)
xml.rpc('http://www.advogato.org/XMLRPC', 'diary.getDates', 'duncan', 4L)
xml.rpc("http://xmlrpc-c.sourceforge.net/api/sample.php", "sample.sumAndDifference", 3L, 4L)
# Doesn't work
# xml.rpc('http://ws2.webservices.nl', 'system.methodHelp', 'addressReeksPostcodeSearch')
# xml.rpc('http://www.cookcomputing.com/xmlrpcsamples/RPC2.ashx', 'example.getStateName', 2L)
}
\keyword{IO}
\keyword{programming}
|
library(ropercenter)
### Name: roper_download
### Title: Download datasets from the Roper Center
### Aliases: roper_download
### ** Examples
## Not run:
##D roper_download(file_id = c("CNCIPO1996-96010", "CNCIPO2000-02"))
## End(Not run)
|
/data/genthat_extracted_code/ropercenter/examples/roper_download.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 248
|
r
|
library(ropercenter)
### Name: roper_download
### Title: Download datasets from the Roper Center
### Aliases: roper_download
### ** Examples
## Not run:
##D roper_download(file_id = c("CNCIPO1996-96010", "CNCIPO2000-02"))
## End(Not run)
|
.onLoad <- function(libname, pkgname) {
options(
civis.ml_train_template_id = get_train_template_id(),
civis.default_database = NULL
)
invisible()
}
get_train_template_id <- function() {
versions <- unique(CIVIS_ML_TEMPLATE_IDS$version)
latest <- max(versions)
id <- CIVIS_ML_TEMPLATE_IDS[CIVIS_ML_TEMPLATE_IDS$version == latest &
CIVIS_ML_TEMPLATE_IDS$name == "train", "id"]
if (!inherits(try(api_key(), silent = TRUE), "try-error")) {
i <- 1
while (length(scripts_list_custom(id)) == 0 &&
i < length(versions)) {
# use the previous version; assume that only the latest version is in internal release
prev <- versions[length(versions) - i]
id <- CIVIS_ML_TEMPLATE_IDS[CIVIS_ML_TEMPLATE_IDS$version == prev &
CIVIS_ML_TEMPLATE_IDS$name == "train", "id"]
i <- i + 1
}
}
return(id)
}
|
/R/zzz.R
|
no_license
|
JosiahParry/civis-r
|
R
| false
| false
| 923
|
r
|
.onLoad <- function(libname, pkgname) {
options(
civis.ml_train_template_id = get_train_template_id(),
civis.default_database = NULL
)
invisible()
}
get_train_template_id <- function() {
versions <- unique(CIVIS_ML_TEMPLATE_IDS$version)
latest <- max(versions)
id <- CIVIS_ML_TEMPLATE_IDS[CIVIS_ML_TEMPLATE_IDS$version == latest &
CIVIS_ML_TEMPLATE_IDS$name == "train", "id"]
if (!inherits(try(api_key(), silent = TRUE), "try-error")) {
i <- 1
while (length(scripts_list_custom(id)) == 0 &&
i < length(versions)) {
# use the previous version; assume that only the latest version is in internal release
prev <- versions[length(versions) - i]
id <- CIVIS_ML_TEMPLATE_IDS[CIVIS_ML_TEMPLATE_IDS$version == prev &
CIVIS_ML_TEMPLATE_IDS$name == "train", "id"]
i <- i + 1
}
}
return(id)
}
|
# Resample the Fire risk map to be at the same resolution/extent as the Zillow data
library(tidyverse)
library(raster)
library(googledrive)
# devtools::install_github(repo = "JoshOBrien/rasterDT")
library(rasterDT)
library(gdalUtils)
# make the functions available to download the Zillow grid
source("R/download_grid.R")
# Ensure the directory structure for data output is present
if(!dir.exists(file.path("output", "hazards"))) {
dir.create(file.path("output", "hazards"), recursive = TRUE)
}
# Get the empty template grid of the Zillow dataset
empty_grid <-
download_grid() %>%
raster()
# These set up the variables to be used to get the hazard data and name
# new output files appropriately
hazard_name <- "hurricane-wind"
# hazard_file <- "CycloneFrequency_1980_2000_projected/gdcyc_NAD.tif"
# zip_path <- file.path("data", "hazards", "CycloneFrequency_1980_2000_projected.zip")
#
# # The hurricane wind data is on the Google Drive
# hazard_id <- "1REzIWNeq4zwwZdiTT2YBa7UYXTYA-r2s"
hazard_file <- "gdcyc/gdcyc.asc"
zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
hazard_id <- "1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t"
# Names of the files (to read and manipulate, and then what to call it upon
# export)
hazard_path_src <- file.path("data", "hazards", hazard_name, hazard_file)
hazard_path_out <- file.path("output", "hazards", paste0(hazard_name, "_zillow-grid.tif"))
overwrite <- FALSE
if(!file.exists(hazard_path_out) | overwrite) {
# download the raw data from Google Drive
hazard_metadata <- googledrive::drive_get(id = hazard_id)
googledrive::drive_download(hazard_metadata, path = zip_path)
# unzip the data file
unzip(zip_path, overwrite = TRUE, exdir = file.path("data", "hazards", hazard_name))
unlink(zip_path)
hazard_path_tmp <- file.path("data", "hazards", hazard_name, paste0(hazard_name, "_temp.tif"))
hazard_orig <- raster::raster(hazard_path_src)
gdalwarp(srcfile = hazard_path_src,
dstfile = hazard_path_tmp,
t_srs = crs(empty_grid),
tr = c(250, 250),
overwrite = TRUE,
s_srs = crs(hazard_orig),
r = "bilinear")
hazard <- gdalUtils::align_rasters(unaligned = hazard_path_tmp,
reference = empty_grid@file@name,
dstfile = hazard_path_out,
overwrite = TRUE,
output_Raster = TRUE)
unlink(hazard_path_tmp)
# Mask out the pixels outside of CONUS using the water mask derived from the
# USAboundaries package high resolution CONUS shapefile (rasterized to the Zillow
# grid) and the flood hazard layer, with all values of 999 masked out (representing
# persistent water bodies)
if(!file.exists(file.path("output", "water-mask_zillow-grid.tif"))) {
source("R/configure-flood.R")
}
mask <- raster::raster("output/water-mask_zillow-grid.tif")
hazard <- raster::mask(x = hazard, mask = mask)
# This source represents records for 21 years (Jan 1, 1980 to Dec 31, 2000)
# https://sedac.ciesin.columbia.edu/data/set/ndh-cyclone-hazard-frequency-distribution
# One caveat is that there is a mask applied to 1 km grid cells to exclude
# cells with <5 people per square km or without significant agriculture
hazard_rate <- hazard / 21
# assume an exponential distribution of waiting times, and then we can use
# the cumulative distribution function for the exponential to ask what
# is the probability that an event occurred before a specified waiting time
# (one year, in our case) given a rate of the hazard (which we figured
# empirically)
hazard_prob <- 1 - exp(-1 * hazard_rate)
# write to disk
raster::writeRaster(x = hazard_prob, filename = hazard_path_out, overwrite = TRUE)
}
# Alternative source?
# hazard_file <- "gdcyc/gdcyc.asc"
# hazard_id <- '1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t'
# zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
|
/R/configure-hurricane-wind.R
|
permissive
|
mikoontz/twensday
|
R
| false
| false
| 4,016
|
r
|
# Resample the Fire risk map to be at the same resolution/extent as the Zillow data
library(tidyverse)
library(raster)
library(googledrive)
# devtools::install_github(repo = "JoshOBrien/rasterDT")
library(rasterDT)
library(gdalUtils)
# make the functions available to download the Zillow grid
source("R/download_grid.R")
# Ensure the directory structure for data output is present
if(!dir.exists(file.path("output", "hazards"))) {
dir.create(file.path("output", "hazards"), recursive = TRUE)
}
# Get the empty template grid of the Zillow dataset
empty_grid <-
download_grid() %>%
raster()
# These set up the variables to be used to get the hazard data and name
# new output files appropriately
hazard_name <- "hurricane-wind"
# hazard_file <- "CycloneFrequency_1980_2000_projected/gdcyc_NAD.tif"
# zip_path <- file.path("data", "hazards", "CycloneFrequency_1980_2000_projected.zip")
#
# # The hurricane wind data is on the Google Drive
# hazard_id <- "1REzIWNeq4zwwZdiTT2YBa7UYXTYA-r2s"
hazard_file <- "gdcyc/gdcyc.asc"
zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
hazard_id <- "1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t"
# Names of the files (to read and manipulate, and then what to call it upon
# export)
hazard_path_src <- file.path("data", "hazards", hazard_name, hazard_file)
hazard_path_out <- file.path("output", "hazards", paste0(hazard_name, "_zillow-grid.tif"))
overwrite <- FALSE
if(!file.exists(hazard_path_out) | overwrite) {
# download the raw data from Google Drive
hazard_metadata <- googledrive::drive_get(id = hazard_id)
googledrive::drive_download(hazard_metadata, path = zip_path)
# unzip the data file
unzip(zip_path, overwrite = TRUE, exdir = file.path("data", "hazards", hazard_name))
unlink(zip_path)
hazard_path_tmp <- file.path("data", "hazards", hazard_name, paste0(hazard_name, "_temp.tif"))
hazard_orig <- raster::raster(hazard_path_src)
gdalwarp(srcfile = hazard_path_src,
dstfile = hazard_path_tmp,
t_srs = crs(empty_grid),
tr = c(250, 250),
overwrite = TRUE,
s_srs = crs(hazard_orig),
r = "bilinear")
hazard <- gdalUtils::align_rasters(unaligned = hazard_path_tmp,
reference = empty_grid@file@name,
dstfile = hazard_path_out,
overwrite = TRUE,
output_Raster = TRUE)
unlink(hazard_path_tmp)
# Mask out the pixels outside of CONUS using the water mask derived from the
# USAboundaries package high resolution CONUS shapefile (rasterized to the Zillow
# grid) and the flood hazard layer, with all values of 999 masked out (representing
# persistent water bodies)
if(!file.exists(file.path("output", "water-mask_zillow-grid.tif"))) {
source("R/configure-flood.R")
}
mask <- raster::raster("output/water-mask_zillow-grid.tif")
hazard <- raster::mask(x = hazard, mask = mask)
# This source represents records for 21 years (Jan 1, 1980 to Dec 31, 2000)
# https://sedac.ciesin.columbia.edu/data/set/ndh-cyclone-hazard-frequency-distribution
# One caveat is that there is a mask applied to 1 km grid cells to exclude
# cells with <5 people per square km or without significant agriculture
hazard_rate <- hazard / 21
# assume an exponential distribution of waiting times, and then we can use
# the cumulative distribution function for the exponential to ask what
# is the probability that an event occurred before a specified waiting time
# (one year, in our case) given a rate of the hazard (which we figured
# empirically)
hazard_prob <- 1 - exp(-1 * hazard_rate)
# write to disk
raster::writeRaster(x = hazard_prob, filename = hazard_path_out, overwrite = TRUE)
}
# Alternative source?
# hazard_file <- "gdcyc/gdcyc.asc"
# hazard_id <- '1whh-JSmF7v6vJm35lgQAAt5bs01Phb_t'
# zip_path <- file.path("data", "hazards", "gdcyc_cyclone.zip")
|
ssplotM <- function(x, hidden.paths=NULL,
plots="obs", type="d",
sortv=NULL, sort.channel=1, dist.method="OM",
with.missing=FALSE,
title=NA, title.n=TRUE, cex.title=1, title.pos=1,
withlegend="auto", ncol.legend="auto",
with.missing.legend="auto",
legend.prop=0.3, cex.legend=1,
hidden.states.colors="auto", hidden.states.labels="auto",
xaxis=TRUE, xlab=NA, xtlab=NULL, xlab.pos=1,
yaxis = FALSE, ylab="auto", hidden.states.title="Hidden states",
ylab.pos="auto",
cex.lab=1, cex.axis=1, ...){
# plot.new()
grid.newpage()
arguments <- as.list(match.call())[-1]
if (length(arguments$x) == 1) {
arguments$x <- arguments$x[[1]]
}
do.call(SSPlotter, args = do.call(ssp, args = arguments))
}
|
/R/ssplotM.R
|
no_license
|
zencoding/seqHMM
|
R
| false
| false
| 959
|
r
|
ssplotM <- function(x, hidden.paths=NULL,
plots="obs", type="d",
sortv=NULL, sort.channel=1, dist.method="OM",
with.missing=FALSE,
title=NA, title.n=TRUE, cex.title=1, title.pos=1,
withlegend="auto", ncol.legend="auto",
with.missing.legend="auto",
legend.prop=0.3, cex.legend=1,
hidden.states.colors="auto", hidden.states.labels="auto",
xaxis=TRUE, xlab=NA, xtlab=NULL, xlab.pos=1,
yaxis = FALSE, ylab="auto", hidden.states.title="Hidden states",
ylab.pos="auto",
cex.lab=1, cex.axis=1, ...){
# plot.new()
grid.newpage()
arguments <- as.list(match.call())[-1]
if (length(arguments$x) == 1) {
arguments$x <- arguments$x[[1]]
}
do.call(SSPlotter, args = do.call(ssp, args = arguments))
}
|
library(package = "arules")
transactions = read.transactions(file = file("stdin"), format = "basket", sep = ",")
rules = apriori(transactions, parameter = list(minlen=1, sup = 0.001, conf = 0.001))
itemsets <- unique(generatingItemsets(rules))
itemsets.df <- as(itemsets, "data.frame")
frequentItemsets <- itemsets.df[with(itemsets.df, order(-support,items)),]
names(frequentItemsets)[1] <- "itemset"
write.table(frequentItemsets, file = "", sep = ",", row.names = FALSE)
|
/Lab4/Mining.R
|
no_license
|
anishi-mehta/Data-Mining
|
R
| false
| false
| 472
|
r
|
library(package = "arules")
transactions = read.transactions(file = file("stdin"), format = "basket", sep = ",")
rules = apriori(transactions, parameter = list(minlen=1, sup = 0.001, conf = 0.001))
itemsets <- unique(generatingItemsets(rules))
itemsets.df <- as(itemsets, "data.frame")
frequentItemsets <- itemsets.df[with(itemsets.df, order(-support,items)),]
names(frequentItemsets)[1] <- "itemset"
write.table(frequentItemsets, file = "", sep = ",", row.names = FALSE)
|
# Script to make example degree days and path plot
library(tidyr)
library(dplyr)
library(lubridate)
library(reshape2)
library(geosphere)
library(ggplot2)
#devtools::install_github("thomasp85/patchwork")
#install.packages("scales")
#devtools::install_github('thomasp85/gganimate')
#library(gganimate)
library(tools)
#install.packages("rgdal")
library(rgdal)
library(sp)
library(raster)
library(stringr)
library(tm)
library(data.table)
library(zoo)
library(cowplot)
library(scales)
#library(reshape)
#library(maptools)
library(ggpubr)
total_data <- readRDS("D:/Reproduction/katana output/2019 Run/Daily/2003_Lat-30.0_For_with_mortality.rds")
total_data <- subset(total_data, degree_days <=530)
str(total_data)
single <- subset(total_data, Particle =="2003_Lat-30.0_For.155678")
single <- subset(total_data, Particle =="2003_Lat-30.0_For.214756")
bounds <- read.csv("ROMS Boundaries.csv", header = T)
dots <- read.csv("forward_release_summary _table.csv", header = T)
#Load map data
Aus <- readOGR(dsn = "Shape files/australia",layer = "cstauscd_r")
#plot(Aus)
Aus_coast <- subset(Aus, FEAT_CODE != "sea")
#plot(Aus_coast)
min_lon <- 145
max_lon <- 160
min_lat <- -40
max_lat <- -24
geo_bounds <- c(left = min_lon, bottom = min_lat, right = max_lon, top = max_lat)
Sites.grid <- expand.grid(lon_bound = c(geo_bounds[1], geo_bounds[3]),
lat_bound = c(geo_bounds[2], geo_bounds[4]))
coordinates(Sites.grid) <- ~ lon_bound + lat_bound
Aus_crop <- crop(Aus_coast, extent(Sites.grid)) #rgeos must be installed to run
shelf <- read.csv("200m_contour.csv", header = T)
shelf <- subset(shelf, Var2 >= -39)
shelf <- subset(shelf, Var2 <= -25.4)
shelf <- subset(shelf, Var1 > 145)
g <- ggplot(total_data) + geom_polygon(data = Aus_crop, aes(x=long, y = lat, group = group), fill = "grey60") +
geom_path(data = single, aes(x = Longtitude, y = Latitude, col=Temperature), size = 1) + theme_classic() + coord_map("mercator") +
coord_cartesian(xlim = c(145, 160), ylim = c(-20, -40), expand = TRUE)
g
p1 <- ggplot(total_data) + geom_path(data = single, aes(x = Longtitude, y = Latitude, col=Temperature), size = 1) +
theme_classic() +
labs(x=expression(paste("Longitude (",degree, ")", sep="")), y=expression(paste("Latitude (", degree, ")"))) +
scale_x_continuous(expand = c(0,0)) + scale_y_continuous(expand = c(0,0)) +
#scale_fill_distiller(palette = "Spectral", na.value=NA, trans="log10") +
coord_map() + #coord_quickmap() + # # this line could be very slow
geom_path(data=bounds, aes(x=Long, y = Lat), colour = "black", lty="dashed", show.legend = FALSE) +
geom_polygon(data = Aus_crop, aes(x=long, y = lat, group = group), fill = "gray40", colour = "black")+
#geom_point(data = dots, aes(x = Longitude, y = Latitude), size = 2, col = "black")+
geom_path(data=shelf, aes(x=Var1, y = Var2)) +
scale_color_gradient(low="blue", high = "red")+
theme(axis.title.x = element_text(face="bold", colour="black", size = 18),
axis.text.x = element_text(colour="black", size = 12),
axis.title.y = element_text(face="bold", colour="black", size = 18),
axis.text.y = element_text(colour="black", size = 14),
axis.ticks = element_line(colour="black"),
strip.text = element_text(colour="black", face = "bold", size = 14),
strip.background = element_rect(colour = "white"),
legend.justification=c(1,0), legend.position="right",
panel.border = element_rect(colour = "black", fill=NA, size = 1),
legend.key.size = unit(1, "cm"),
legend.title = element_text(face = "bold", size = 14),
legend.text = element_text(size = 12, face = "bold"))
p1
head(single)
p2 <- ggplot(single, aes(DayofLife, degree_days, col = Temperature)) + geom_line(size=2) + theme_classic() +
scale_color_gradient(low="blue", high = "red") +
theme(axis.title.x = element_text(face="bold", colour="black", size = 18),
axis.text.x = element_text(colour="black", size = 12),
axis.title.y = element_text(face="bold", colour="black", size = 18),
axis.text.y = element_text(colour="black", size = 14),
axis.ticks = element_line(colour="black"),
strip.text = element_text(colour="black", face = "bold", size = 14),
strip.background = element_rect(colour = "white"),
legend.justification=c(1,0), legend.position="right",
panel.border = element_rect(colour = "black", fill=NA, size = 1),
legend.key.size = unit(1, "cm"),
legend.title = element_text(face = "bold", size = 14),
legend.text = element_text(size = 12, face = "bold")) +
ylab("Degree-days") + xlab("Days")
p2
ggarrange(p1, p2,
labels = "auto",
ncol = 2, label.y = 1, label.x = 0, common.legend = TRUE, widths = c(1.5,1))
ggsave("Output/Degree-day real example Plot.pdf", width = 21, height = 14.8, units = "cm")
ggsave("Output/Degree-day real example Plot.png", width = 21, height = 14.8, units = "cm")
# How to identify particle to plot
dat <- total_data %>% group_by(Particle) %>% summarise(Temp_range = (max(Temperature) -min(Temperature)))
dat <- sort(dat, Temp_range)
|
/3_Figures/Figure_S2.R
|
no_license
|
jaseeverett/TailorParticleTracking
|
R
| false
| false
| 5,306
|
r
|
# Script to make example degree days and path plot
library(tidyr)
library(dplyr)
library(lubridate)
library(reshape2)
library(geosphere)
library(ggplot2)
#devtools::install_github("thomasp85/patchwork")
#install.packages("scales")
#devtools::install_github('thomasp85/gganimate')
#library(gganimate)
library(tools)
#install.packages("rgdal")
library(rgdal)
library(sp)
library(raster)
library(stringr)
library(tm)
library(data.table)
library(zoo)
library(cowplot)
library(scales)
#library(reshape)
#library(maptools)
library(ggpubr)
total_data <- readRDS("D:/Reproduction/katana output/2019 Run/Daily/2003_Lat-30.0_For_with_mortality.rds")
total_data <- subset(total_data, degree_days <=530)
str(total_data)
single <- subset(total_data, Particle =="2003_Lat-30.0_For.155678")
single <- subset(total_data, Particle =="2003_Lat-30.0_For.214756")
bounds <- read.csv("ROMS Boundaries.csv", header = T)
dots <- read.csv("forward_release_summary _table.csv", header = T)
#Load map data
Aus <- readOGR(dsn = "Shape files/australia",layer = "cstauscd_r")
#plot(Aus)
Aus_coast <- subset(Aus, FEAT_CODE != "sea")
#plot(Aus_coast)
min_lon <- 145
max_lon <- 160
min_lat <- -40
max_lat <- -24
geo_bounds <- c(left = min_lon, bottom = min_lat, right = max_lon, top = max_lat)
Sites.grid <- expand.grid(lon_bound = c(geo_bounds[1], geo_bounds[3]),
lat_bound = c(geo_bounds[2], geo_bounds[4]))
coordinates(Sites.grid) <- ~ lon_bound + lat_bound
Aus_crop <- crop(Aus_coast, extent(Sites.grid)) #rgeos must be installed to run
shelf <- read.csv("200m_contour.csv", header = T)
shelf <- subset(shelf, Var2 >= -39)
shelf <- subset(shelf, Var2 <= -25.4)
shelf <- subset(shelf, Var1 > 145)
g <- ggplot(total_data) + geom_polygon(data = Aus_crop, aes(x=long, y = lat, group = group), fill = "grey60") +
geom_path(data = single, aes(x = Longtitude, y = Latitude, col=Temperature), size = 1) + theme_classic() + coord_map("mercator") +
coord_cartesian(xlim = c(145, 160), ylim = c(-20, -40), expand = TRUE)
g
p1 <- ggplot(total_data) + geom_path(data = single, aes(x = Longtitude, y = Latitude, col=Temperature), size = 1) +
theme_classic() +
labs(x=expression(paste("Longitude (",degree, ")", sep="")), y=expression(paste("Latitude (", degree, ")"))) +
scale_x_continuous(expand = c(0,0)) + scale_y_continuous(expand = c(0,0)) +
#scale_fill_distiller(palette = "Spectral", na.value=NA, trans="log10") +
coord_map() + #coord_quickmap() + # # this line could be very slow
geom_path(data=bounds, aes(x=Long, y = Lat), colour = "black", lty="dashed", show.legend = FALSE) +
geom_polygon(data = Aus_crop, aes(x=long, y = lat, group = group), fill = "gray40", colour = "black")+
#geom_point(data = dots, aes(x = Longitude, y = Latitude), size = 2, col = "black")+
geom_path(data=shelf, aes(x=Var1, y = Var2)) +
scale_color_gradient(low="blue", high = "red")+
theme(axis.title.x = element_text(face="bold", colour="black", size = 18),
axis.text.x = element_text(colour="black", size = 12),
axis.title.y = element_text(face="bold", colour="black", size = 18),
axis.text.y = element_text(colour="black", size = 14),
axis.ticks = element_line(colour="black"),
strip.text = element_text(colour="black", face = "bold", size = 14),
strip.background = element_rect(colour = "white"),
legend.justification=c(1,0), legend.position="right",
panel.border = element_rect(colour = "black", fill=NA, size = 1),
legend.key.size = unit(1, "cm"),
legend.title = element_text(face = "bold", size = 14),
legend.text = element_text(size = 12, face = "bold"))
p1
head(single)
p2 <- ggplot(single, aes(DayofLife, degree_days, col = Temperature)) + geom_line(size=2) + theme_classic() +
scale_color_gradient(low="blue", high = "red") +
theme(axis.title.x = element_text(face="bold", colour="black", size = 18),
axis.text.x = element_text(colour="black", size = 12),
axis.title.y = element_text(face="bold", colour="black", size = 18),
axis.text.y = element_text(colour="black", size = 14),
axis.ticks = element_line(colour="black"),
strip.text = element_text(colour="black", face = "bold", size = 14),
strip.background = element_rect(colour = "white"),
legend.justification=c(1,0), legend.position="right",
panel.border = element_rect(colour = "black", fill=NA, size = 1),
legend.key.size = unit(1, "cm"),
legend.title = element_text(face = "bold", size = 14),
legend.text = element_text(size = 12, face = "bold")) +
ylab("Degree-days") + xlab("Days")
p2
ggarrange(p1, p2,
labels = "auto",
ncol = 2, label.y = 1, label.x = 0, common.legend = TRUE, widths = c(1.5,1))
ggsave("Output/Degree-day real example Plot.pdf", width = 21, height = 14.8, units = "cm")
ggsave("Output/Degree-day real example Plot.png", width = 21, height = 14.8, units = "cm")
# How to identify particle to plot
dat <- total_data %>% group_by(Particle) %>% summarise(Temp_range = (max(Temperature) -min(Temperature)))
dat <- sort(dat, Temp_range)
|
## ----setup, include=FALSE-----------------------------------------------------
library(knitr)
knitr::opts_chunk$set(
fig.align = "center",
fig.height = 5.5,
fig.width = 6,
warning = FALSE,
collapse = TRUE,
dev.args = list(pointsize = 10),
out.width = "65%"
)
## ---- message = FALSE, echo = FALSE-------------------------------------------
library(smmR)
library(DiscreteWeibull)
## ----echo = FALSE, out.width = '400px', fig.cap = 'Waste treatment for a textile factory'----
knitr::include_graphics("Waste_treatment.png")
## ----echo = FALSE, out.width = '400px', fig.cap = 'Three-state discrete-time semi-Markov system modelization'----
knitr::include_graphics("Three_state_modelization.png")
## -----------------------------------------------------------------------------
states <- c("1", "2", "3") # State space
alpha <- c(1, 0, 0) # Initial distribution
p <- matrix(data = c(0, 1, 0,
0.95, 0, 0.05,
1, 0, 0), nrow = 3, byrow = TRUE) # Transition matrix
distr <- matrix(c(NA, "geom", NA,
"dweibull", NA, "dweibull",
"dweibull", NA, NA),
nrow = 3, ncol = 3, byrow = TRUE) # Distribution matrix
param1 <- matrix(c(NA, 0.8, NA,
0.3, NA, 0.5,
0.6, NA, NA),
nrow = 3, ncol = 3, byrow = TRUE)
param2 <- matrix(c(NA, NA, NA,
0.5, NA, 0.7,
0.9, NA, NA),
nrow = 3, ncol = 3, byrow = TRUE)
parameters <- array(c(param1, param2), c(3, 3, 2))
factory <- smmparametric(states = states, init = alpha, ptrans = p,
type.sojourn = "fij", distr = distr, param = parameters)
## -----------------------------------------------------------------------------
M <- 10000
seq <- simulate(object = factory, nsim = M)
## -----------------------------------------------------------------------------
estimate <- fitsmm(sequences = seq, states = states, type.sojourn = "fij")
## -----------------------------------------------------------------------------
print(x = estimate$ptrans, digits = 2)
## -----------------------------------------------------------------------------
plot(x = estimate, i = "2", j = "3", type = "l", col = "blue")
lines(x = 1:estimate$kmax, y = ddweibull(x = 1:estimate$kmax, q = 0.5, beta = 0.7),
col = "red", pch = "x")
legend(x = "topright",
legend = c("True value", "Estimate"),
col = c("red", "blue"), lty = c(1, 1))
## -----------------------------------------------------------------------------
k <- 300
upstates <- c("1", "2") # Working states of the semi-Markov system
## -----------------------------------------------------------------------------
trueReliab <- reliability(x = factory, k = k, upstates = upstates)
estReliab <- reliability(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueReliab[, 1], type = "l", cex = 2.5, ylim = c(0, 1),
col = "red", main = "Reliability", xlab = "k", ylab = "R(k)")
lines(x = estReliab[, 1], col = "blue")
lines(x = estReliab[, 3], lty = 4, col = "blue")
lines(x = estReliab[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueAvail <- availability(x = factory, k = k, upstates = upstates)
estAvail <- availability(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueAvail[, 1], type = "l", cex = 2.5, ylim = c(0.95, 1),
col = "red", main = "Availability", xlab = "k", ylab = "A(k)")
lines(x = estAvail[, 1], col = "blue")
lines(x = estAvail[, 3], lty = 4, col = "blue")
lines(x = estAvail[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueBMP <- failureRate(x = factory, k = k, upstates = upstates)
estBMP <- failureRate(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueBMP[, 1], type = "l", cex = 2.5, ylim = c(0, 0.025),
col = "red", main = "BMP-failure rate", xlab = "k", ylab = bquote(lambda(k)))
lines(x = estBMP[, 1], col = "blue")
lines(x = estBMP[, 3], lty = 4, col = "blue")
lines(x = estBMP[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueRG <- failureRate(x = factory, k = k, upstates = upstates, failure.rate = "RG")
estRG <- failureRate(x = estimate, k = k, upstates = upstates, failure.rate = "RG")
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueRG[, 1], type = "l", cex = 2.5, ylim = c(0, 0.03),
col = "red", main = "RG-failure rate", xlab = "k", ylab = "r(k)")
lines(x = estRG[, 1], col = "blue")
lines(x = estRG[, 3], lty = 4, col = "blue")
lines(x = estRG[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueMTTF <- mttf(x = factory, upstates = upstates)
estMTTF <- mttf(x = estimate, upstates = upstates)
## -----------------------------------------------------------------------------
print(trueMTTF)
print(estMTTF)
## -----------------------------------------------------------------------------
trueMTTR <- mttr(x = factory, upstates = upstates)
estMTTR <- mttr(x = estimate, upstates = upstates)
|
/inst/doc/Textile-Factory.R
|
no_license
|
cran/smmR
|
R
| false
| false
| 6,142
|
r
|
## ----setup, include=FALSE-----------------------------------------------------
library(knitr)
knitr::opts_chunk$set(
fig.align = "center",
fig.height = 5.5,
fig.width = 6,
warning = FALSE,
collapse = TRUE,
dev.args = list(pointsize = 10),
out.width = "65%"
)
## ---- message = FALSE, echo = FALSE-------------------------------------------
library(smmR)
library(DiscreteWeibull)
## ----echo = FALSE, out.width = '400px', fig.cap = 'Waste treatment for a textile factory'----
knitr::include_graphics("Waste_treatment.png")
## ----echo = FALSE, out.width = '400px', fig.cap = 'Three-state discrete-time semi-Markov system modelization'----
knitr::include_graphics("Three_state_modelization.png")
## -----------------------------------------------------------------------------
states <- c("1", "2", "3") # State space
alpha <- c(1, 0, 0) # Initial distribution
p <- matrix(data = c(0, 1, 0,
0.95, 0, 0.05,
1, 0, 0), nrow = 3, byrow = TRUE) # Transition matrix
distr <- matrix(c(NA, "geom", NA,
"dweibull", NA, "dweibull",
"dweibull", NA, NA),
nrow = 3, ncol = 3, byrow = TRUE) # Distribution matrix
param1 <- matrix(c(NA, 0.8, NA,
0.3, NA, 0.5,
0.6, NA, NA),
nrow = 3, ncol = 3, byrow = TRUE)
param2 <- matrix(c(NA, NA, NA,
0.5, NA, 0.7,
0.9, NA, NA),
nrow = 3, ncol = 3, byrow = TRUE)
parameters <- array(c(param1, param2), c(3, 3, 2))
factory <- smmparametric(states = states, init = alpha, ptrans = p,
type.sojourn = "fij", distr = distr, param = parameters)
## -----------------------------------------------------------------------------
M <- 10000
seq <- simulate(object = factory, nsim = M)
## -----------------------------------------------------------------------------
estimate <- fitsmm(sequences = seq, states = states, type.sojourn = "fij")
## -----------------------------------------------------------------------------
print(x = estimate$ptrans, digits = 2)
## -----------------------------------------------------------------------------
plot(x = estimate, i = "2", j = "3", type = "l", col = "blue")
lines(x = 1:estimate$kmax, y = ddweibull(x = 1:estimate$kmax, q = 0.5, beta = 0.7),
col = "red", pch = "x")
legend(x = "topright",
legend = c("True value", "Estimate"),
col = c("red", "blue"), lty = c(1, 1))
## -----------------------------------------------------------------------------
k <- 300
upstates <- c("1", "2") # Working states of the semi-Markov system
## -----------------------------------------------------------------------------
trueReliab <- reliability(x = factory, k = k, upstates = upstates)
estReliab <- reliability(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueReliab[, 1], type = "l", cex = 2.5, ylim = c(0, 1),
col = "red", main = "Reliability", xlab = "k", ylab = "R(k)")
lines(x = estReliab[, 1], col = "blue")
lines(x = estReliab[, 3], lty = 4, col = "blue")
lines(x = estReliab[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueAvail <- availability(x = factory, k = k, upstates = upstates)
estAvail <- availability(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueAvail[, 1], type = "l", cex = 2.5, ylim = c(0.95, 1),
col = "red", main = "Availability", xlab = "k", ylab = "A(k)")
lines(x = estAvail[, 1], col = "blue")
lines(x = estAvail[, 3], lty = 4, col = "blue")
lines(x = estAvail[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueBMP <- failureRate(x = factory, k = k, upstates = upstates)
estBMP <- failureRate(x = estimate, k = k, upstates = upstates)
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueBMP[, 1], type = "l", cex = 2.5, ylim = c(0, 0.025),
col = "red", main = "BMP-failure rate", xlab = "k", ylab = bquote(lambda(k)))
lines(x = estBMP[, 1], col = "blue")
lines(x = estBMP[, 3], lty = 4, col = "blue")
lines(x = estBMP[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueRG <- failureRate(x = factory, k = k, upstates = upstates, failure.rate = "RG")
estRG <- failureRate(x = estimate, k = k, upstates = upstates, failure.rate = "RG")
## -----------------------------------------------------------------------------
plot(x = 0:k, y = trueRG[, 1], type = "l", cex = 2.5, ylim = c(0, 0.03),
col = "red", main = "RG-failure rate", xlab = "k", ylab = "r(k)")
lines(x = estRG[, 1], col = "blue")
lines(x = estRG[, 3], lty = 4, col = "blue")
lines(x = estRG[, 4], lty = 4, col = "blue")
legend(x = "topright",
legend = c("True value", "Estimated value", "95% confidence interval"),
col = c("red", "blue", "blue"), lty = c(1, 1, 4))
## -----------------------------------------------------------------------------
trueMTTF <- mttf(x = factory, upstates = upstates)
estMTTF <- mttf(x = estimate, upstates = upstates)
## -----------------------------------------------------------------------------
print(trueMTTF)
print(estMTTF)
## -----------------------------------------------------------------------------
trueMTTR <- mttr(x = factory, upstates = upstates)
estMTTR <- mttr(x = estimate, upstates = upstates)
|
#########################################################
## Reads Report Files ##
## and pulls out required quantities ##
## Written by: Chantel Wetzel ##
## Date: 4-18-2014 ##
#########################################################
Rep_Summary<- function(rep.new, y, pre.fishery.yrs, do.forecast)
{
tot.yrs <- 1:y #1:(y-pre.fishery.yrs)
if (do.forecast > 0){
ofl.yrs <- (tot.yrs[length(tot.yrs)]+1):(tot.yrs[length(tot.yrs)] + 4)
OFL = mapply(function(x) OFL = as.numeric(strsplit(rep.new[grep(paste("OFLCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
ACL = mapply(function(x) OFL = as.numeric(strsplit(rep.new[grep(paste("ForeCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
ForeCatch = mapply(function(x) ForeCatch = as.numeric(strsplit(rep.new[grep(paste("ForeCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
}
SB = mapply(function(x) SB = as.numeric(strsplit(rep.new[grep(paste("SPB_",x,sep=""),rep.new)]," ")[[1]][3]), x = tot.yrs)
SB.virgin = as.numeric(strsplit(rep.new[grep("SPB_Virgin",rep.new)]," ")[[1]][3])
Recruits = mapply(function(x) TotBio = as.numeric(strsplit(rep.new[grep(paste(1, x,"TIME",sep=" "),rep.new)]," ")[[1]][8]),
x = 1:tot.yrs[length(tot.yrs)-1])
TotBio= mapply(function(x) TotBio = as.numeric(strsplit(rep.new[grep(paste(1, x,"TIME",sep=" "),rep.new)]," ")[[1]][5]),
x = 1:tot.yrs[length(tot.yrs)-1])
FMSY = as.numeric(strsplit(rep.new[grep("Fstd_MSY",rep.new)], " ")[[1]][3])
FSPR = as.numeric(strsplit(rep.new[grep("Fstd_SPR",rep.new)], " ")[[1]][3])
#LLsurvey = as.numeric(strsplit(rep.new[grep("TOTAL",rep.new)+2], " ")[[1]][2])
#CrashPen = as.numeric(strsplit(rep.new[grep("TOTAL",rep.new)+8], " ")[[1]][2])#NEW HERE
R0 = as.numeric(strsplit(rep.new[grep("R0",rep.new)], " ")[[1]][3])
M.f = as.numeric(strsplit(rep.new[grep("NatM_p_1_Fem_GP",rep.new)], " ")[[1]][3])
M.m = as.numeric(strsplit(rep.new[grep("NatM_p_1_Mal_GP",rep.new)], " ")[[1]][3])
Lmin = as.numeric(strsplit(rep.new[grep("L_at_Amin_Fem_GP",rep.new)], " ")[[1]][3])
Lmax = as.numeric(strsplit(rep.new[grep("L_at_Amax_Fem_GP",rep.new)], " ")[[1]][3])
k = as.numeric(strsplit(rep.new[grep("VonBert_K_Fem_GP",rep.new)], " ")[[1]][3])
cv.young = as.numeric(strsplit(rep.new[grep("CV_young",rep.new)], " ")[[1]][3])
cv.old = as.numeric(strsplit(rep.new[grep("CV_old",rep.new)], " ")[[1]][3])
#Selectivity
F.Selex.1 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_1_Fishery",rep.new)], " ")[[1]][3])
F.Selex.2 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_2_Fishery",rep.new)], " ")[[1]][3])
F.Selex.3 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_3_Fishery",rep.new)], " ")[[1]][3])
F.Selex.4 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_4_Fishery",rep.new)], " ")[[1]][3])
F.Selex.5 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_5_Fishery",rep.new)], " ")[[1]][3])
F.Selex.6 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_6_Fishery",rep.new)], " ")[[1]][3])
S.Selex.1 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_1_Survey",rep.new)], " ")[[1]][3])
S.Selex.2 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_2_Survey",rep.new)], " ")[[1]][3])
S.Selex.3 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_3_Survey",rep.new)], " ")[[1]][3])
S.Selex.4 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_4_Survey",rep.new)], " ")[[1]][3])
S.Selex.5 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_5_Survey",rep.new)], " ")[[1]][3])
S.Selex.6 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_6_Survey",rep.new)], " ")[[1]][3])
if (need.blocks == TRUE){
F.Selex.1.adj = ifelse(overfished.counter == 0, 0,
as.numeric(strsplit(rep.new[grep("SizeSel_1P_1_Fishery_BLK1",rep.new)], " ")[[1]][3]) )
}
#x = start.survey:(y-pre.fishery.yrs - 1)
x = seq(start.survey,(y - 1), 2)
VulBioEst <- mapply(function(x)
VulBioEst = as.numeric(strsplit(rep.new[grep(paste(2,"Survey",x,sep=" "),rep.new)], " ")[[1]][6]), x = x)
Depl = SB/SB.virgin
RepSummary <- list()
RepSummary$SB <- SB
RepSummary$SB.virgin <- SB.virgin
RepSummary$TotBio <- TotBio
if (do.forecast > 0 ){
RepSummary$OFL <- OFL
RepSummary$ACL <- ACL
RepSummary$ForeCatch <- ForeCatch
}
RepSummary$VulBioEst <- VulBioEst
RepSummary$Depl <- Depl
RepSummary$FSPR <- FSPR
RepSummary$FMSY <-FMSY
RepSummary$FSelex<- c(F.Selex.1, F.Selex.2, F.Selex.3, F.Selex.4, F.Selex.5, F.Selex.6)
RepSummary$R0<- R0
RepSummary$SSelex<- c(S.Selex.1, S.Selex.2, S.Selex.3, S.Selex.4, S.Selex.5, S.Selex.6)
RepSummary$M<- cbind(M.f,M.m)
RepSummary$Recruits<- Recruits
RepSummary$Lmin<- Lmin
RepSummary$Lmax<- Lmax
RepSummary$k <- k
RepSummary$cv.young <- cv.young
RepSummary$cv.old <- cv.old
if(need.blocks == TRUE){
RepSummary$F.selex.1.adj <- F.Selex.1.adj
}
return(RepSummary)
}
|
/archive_10_15/functions/Rep_Summary.R
|
no_license
|
chantelwetzel-noaa/Ch3_DataLoss
|
R
| false
| false
| 5,034
|
r
|
#########################################################
## Reads Report Files ##
## and pulls out required quantities ##
## Written by: Chantel Wetzel ##
## Date: 4-18-2014 ##
#########################################################
Rep_Summary<- function(rep.new, y, pre.fishery.yrs, do.forecast)
{
tot.yrs <- 1:y #1:(y-pre.fishery.yrs)
if (do.forecast > 0){
ofl.yrs <- (tot.yrs[length(tot.yrs)]+1):(tot.yrs[length(tot.yrs)] + 4)
OFL = mapply(function(x) OFL = as.numeric(strsplit(rep.new[grep(paste("OFLCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
ACL = mapply(function(x) OFL = as.numeric(strsplit(rep.new[grep(paste("ForeCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
ForeCatch = mapply(function(x) ForeCatch = as.numeric(strsplit(rep.new[grep(paste("ForeCatch_",x,sep=""),rep.new)], " ")[[1]][3]), x = ofl.yrs)
}
SB = mapply(function(x) SB = as.numeric(strsplit(rep.new[grep(paste("SPB_",x,sep=""),rep.new)]," ")[[1]][3]), x = tot.yrs)
SB.virgin = as.numeric(strsplit(rep.new[grep("SPB_Virgin",rep.new)]," ")[[1]][3])
Recruits = mapply(function(x) TotBio = as.numeric(strsplit(rep.new[grep(paste(1, x,"TIME",sep=" "),rep.new)]," ")[[1]][8]),
x = 1:tot.yrs[length(tot.yrs)-1])
TotBio= mapply(function(x) TotBio = as.numeric(strsplit(rep.new[grep(paste(1, x,"TIME",sep=" "),rep.new)]," ")[[1]][5]),
x = 1:tot.yrs[length(tot.yrs)-1])
FMSY = as.numeric(strsplit(rep.new[grep("Fstd_MSY",rep.new)], " ")[[1]][3])
FSPR = as.numeric(strsplit(rep.new[grep("Fstd_SPR",rep.new)], " ")[[1]][3])
#LLsurvey = as.numeric(strsplit(rep.new[grep("TOTAL",rep.new)+2], " ")[[1]][2])
#CrashPen = as.numeric(strsplit(rep.new[grep("TOTAL",rep.new)+8], " ")[[1]][2])#NEW HERE
R0 = as.numeric(strsplit(rep.new[grep("R0",rep.new)], " ")[[1]][3])
M.f = as.numeric(strsplit(rep.new[grep("NatM_p_1_Fem_GP",rep.new)], " ")[[1]][3])
M.m = as.numeric(strsplit(rep.new[grep("NatM_p_1_Mal_GP",rep.new)], " ")[[1]][3])
Lmin = as.numeric(strsplit(rep.new[grep("L_at_Amin_Fem_GP",rep.new)], " ")[[1]][3])
Lmax = as.numeric(strsplit(rep.new[grep("L_at_Amax_Fem_GP",rep.new)], " ")[[1]][3])
k = as.numeric(strsplit(rep.new[grep("VonBert_K_Fem_GP",rep.new)], " ")[[1]][3])
cv.young = as.numeric(strsplit(rep.new[grep("CV_young",rep.new)], " ")[[1]][3])
cv.old = as.numeric(strsplit(rep.new[grep("CV_old",rep.new)], " ")[[1]][3])
#Selectivity
F.Selex.1 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_1_Fishery",rep.new)], " ")[[1]][3])
F.Selex.2 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_2_Fishery",rep.new)], " ")[[1]][3])
F.Selex.3 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_3_Fishery",rep.new)], " ")[[1]][3])
F.Selex.4 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_4_Fishery",rep.new)], " ")[[1]][3])
F.Selex.5 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_5_Fishery",rep.new)], " ")[[1]][3])
F.Selex.6 = as.numeric(strsplit(rep.new[grep("SizeSel_1P_6_Fishery",rep.new)], " ")[[1]][3])
S.Selex.1 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_1_Survey",rep.new)], " ")[[1]][3])
S.Selex.2 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_2_Survey",rep.new)], " ")[[1]][3])
S.Selex.3 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_3_Survey",rep.new)], " ")[[1]][3])
S.Selex.4 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_4_Survey",rep.new)], " ")[[1]][3])
S.Selex.5 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_5_Survey",rep.new)], " ")[[1]][3])
S.Selex.6 = as.numeric(strsplit(rep.new[grep("SizeSel_2P_6_Survey",rep.new)], " ")[[1]][3])
if (need.blocks == TRUE){
F.Selex.1.adj = ifelse(overfished.counter == 0, 0,
as.numeric(strsplit(rep.new[grep("SizeSel_1P_1_Fishery_BLK1",rep.new)], " ")[[1]][3]) )
}
#x = start.survey:(y-pre.fishery.yrs - 1)
x = seq(start.survey,(y - 1), 2)
VulBioEst <- mapply(function(x)
VulBioEst = as.numeric(strsplit(rep.new[grep(paste(2,"Survey",x,sep=" "),rep.new)], " ")[[1]][6]), x = x)
Depl = SB/SB.virgin
RepSummary <- list()
RepSummary$SB <- SB
RepSummary$SB.virgin <- SB.virgin
RepSummary$TotBio <- TotBio
if (do.forecast > 0 ){
RepSummary$OFL <- OFL
RepSummary$ACL <- ACL
RepSummary$ForeCatch <- ForeCatch
}
RepSummary$VulBioEst <- VulBioEst
RepSummary$Depl <- Depl
RepSummary$FSPR <- FSPR
RepSummary$FMSY <-FMSY
RepSummary$FSelex<- c(F.Selex.1, F.Selex.2, F.Selex.3, F.Selex.4, F.Selex.5, F.Selex.6)
RepSummary$R0<- R0
RepSummary$SSelex<- c(S.Selex.1, S.Selex.2, S.Selex.3, S.Selex.4, S.Selex.5, S.Selex.6)
RepSummary$M<- cbind(M.f,M.m)
RepSummary$Recruits<- Recruits
RepSummary$Lmin<- Lmin
RepSummary$Lmax<- Lmax
RepSummary$k <- k
RepSummary$cv.young <- cv.young
RepSummary$cv.old <- cv.old
if(need.blocks == TRUE){
RepSummary$F.selex.1.adj <- F.Selex.1.adj
}
return(RepSummary)
}
|
library(tidyverse)
library(here)
library(brms)
library(mice)
bashInput <- commandArgs(trailingOnly = TRUE)
#Function Defining
split_facets <- function(x) {
facet_expr <- unlist(x[["facet"]][["params"]][c("cols", "rows", "facets")])
facet_levels <- lapply(facet_expr, rlang::eval_tidy, data = x[["data"]])
facet_id <- do.call(interaction, facet_levels)
panel_data <- split(x[["data"]], facet_id)
plots <- vector("list", length(panel_data))
for (ii in seq_along(plots)) {
plots[[ii]] <- x
plots[[ii]][["data"]] <- panel_data[[ii]]
plots[[ii]][["facet"]] <- facet_null()
}
plots
}
# i dont need this anymore
#rstan_options(auto_write = TRUE)
linMap <- function(x, from, to) {
# Shifting the vector so that min(x) == 0
x <- x - min(x)
# Scaling to the range of [0, 1]
x <- x / max(x)
# Scaling to the needed amplitude
x <- x * (to - from)
# Shifting to the needed level
x + from
}
# this transforms my age regressors.
#ARQs<-readRDS("A_RawData/ARQs.rds")
ARQs<-readRDS("A_RawData/ARQs.rds")
ARQs$age=as.numeric(as.character(ARQs$age))
# here i bring the quadratic and linear age onto the same scale as the other predictors, so that we can better interpret theplot()
#Betas.
ARQs$LinearAgeTrans<-(poly(ARQs$age,2)[,1])
ARQs$QuadraticAgeTrans<-(poly(ARQs$age,2)[,2]*-1)
#TidyQuestionnaireLongSums$TestPart
#TidyQuestionnaireLongSums<-
# get only the Sumscores
# some datawrangling
normalEng<-ARQs%>%mutate(DiffEng=(HowOften-Recom)
)%>%mutate(
NormalizedDiffEng=as.vector(scale(DiffEng)),
Bin= case_when(
age<=13~1,
(age<=15 & age>13)~2,
(age<=17 & age>15)~2,
(age<=19 & age>17)~3,
(age>19)~3
#(age>=22)~4
),
SubScale=case_when(
Scale=="Rebellious"~"Rebellious",
Scale=="Reckless"~"Reckless",
Scale=="Antisocial"~"Antisocial",
Scale=="Thrill_Seeking"~"Thrill Seeking"
),
Presence=case_when(
Scale=="Rebellious"~"A",
Scale=="Reckless"~"A",
Scale=="Antisocial"~"A",
Scale=="Thrill_Seeking"~"B"
),
Bin=as.factor(Bin)
)
cftPath = "A_RawData/Covariates/cft/logs"
FilesCFT=list.files(path = cftPath)
cftResults=tibble(
cftScore=1:length(FilesCFT),
subject=1:length(FilesCFT)
)
for (i in 1:length(FilesCFT)){
cft=read.delim(paste0(cftPath,"/",FilesCFT[i]),header = FALSE)
#print(Files[i])
cftResults$cftScore[i]=as.numeric(as.character(cft[3,2]))
cftResults$subject[i]=strsplit(FilesCFT[i], "_")[[1]][3]
}
cftResults=unique(cftResults)
numbersPath = "A_RawData/Covariates/Numbers/logs"
FilesNumbers=list.files(path = numbersPath)
numbersResults=tibble(
numberFowards=1:length(FilesNumbers),
numberBackwards=1:length(FilesNumbers),
subject=1:length(FilesNumbers)
)
for (i in 1:length(FilesNumbers)){
numbers=read.delim(paste0(numbersPath,"/",FilesNumbers[i]),header = FALSE)
#print(Files[i])
numbersResults$numberFowards[i]=as.numeric(as.character(numbers[5,2]))
numbersResults$numberBackwards[i]=as.numeric(as.character(numbers[6,2]))
numbersResults$subject[i]=as.numeric(strsplit(FilesNumbers[i], "_")[[1]][3])
}
numbersResults=unique(numbersResults)
numbersResults<-numbersResults%>%mutate(numbersTotal=numberFowards+numberBackwards)
Eyes<-read_csv(file = "./A_RawData/Covariates/MindInEyesAll.csv")
Eyes%<>%mutate(subject=as.double(subject))%>%select(-X1)
cftResults<-cftResults%>%mutate(subject=as.double(subject))
normalEng%<>%as_tibble()%>%left_join(x=.,y=numbersResults%>%select(subject,numbersTotal),by="subject")%>%
left_join(x=.,y=cftResults,by="subject")%>%left_join(x=.,y=Eyes,by="subject")
# normalEng[is.na(normalEng$MinInEyesSum),]$MinInEyesSum=mean(normalEng$MinInEyesSum,na.rm=T)
# normalEng[is.na(normalEng$numbersTotal),]$numbersTotal=mean(normalEng$numbersTotal,na.rm=T)
# normalEng[is.na(normalEng$cftScore),]$cftScore=mean(normalEng$cftScore,na.rm=T)
if (as.numeric(bashInput)==1){
MultiNoIllegal <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking" & normalEng$Scale!="Reckless",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoIllegal,file="../ModelFits/MultiNoIllegal.rds")
} else if (as.numeric(bashInput)==2){
MultiIQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+Risk*LinearAgeTrans+Risk*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiRiskAgeIna.rds")
} else if (as.numeric(bashInput)==3){
library(mice)
imp <- mice(normalEng[normalEng$Scale!="Thrill_Seeking",], m = 10)
MultiIQ <- brm_multiple(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+MinInEyesSum*LinearAgeTrans+MinInEyesSum*QuadraticAgeTrans+
cftScore*LinearAgeTrans+cftScore*QuadraticAgeTrans+numbersTotal*LinearAgeTrans+numbersTotal*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = imp,
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiThrillSeekQuadLinMapC_IQ.rds")
}else if (as.numeric(bashInput)==4){
# library(mice)
imp <- mice(normalEng[normalEng$Scale=="Thrill_Seeking",], m = 10, print = TRUE)
MultiIQNoThrill <- brm_multiple(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+MinInEyesSum*LinearAgeTrans+MinInEyesSum*QuadraticAgeTrans+
cftScore*LinearAgeTrans+cftScore*QuadraticAgeTrans+numbersTotal*LinearAgeTrans+numbersTotal*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = imp,
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQNoThrill,file="../ModelFits/MultiNoThrillSeekQuadLinMapC_IQ.rds")
}else if (as.numeric(bashInput)==5){
MultiNoAgeQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the example small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoAgeQ,file="../ModelFits/MultiNoAgeNoThrill.rds")
}else if (as.numeric(bashInput)==6){
MultiNoAgeQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale=="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the example small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoAgeQ,file="../ModelFits/MultiNoAgeThrill.rds")
} else if (as.numeric(bashInput)==7){
MultiIQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+Risk*LinearAgeTrans+Risk*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale=="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiRiskAgeIna_Thrill.rds")
}
|
/fitModelsCluster.R
|
no_license
|
NomisCiri/Risk_Gaps
|
R
| false
| false
| 8,010
|
r
|
library(tidyverse)
library(here)
library(brms)
library(mice)
bashInput <- commandArgs(trailingOnly = TRUE)
#Function Defining
split_facets <- function(x) {
facet_expr <- unlist(x[["facet"]][["params"]][c("cols", "rows", "facets")])
facet_levels <- lapply(facet_expr, rlang::eval_tidy, data = x[["data"]])
facet_id <- do.call(interaction, facet_levels)
panel_data <- split(x[["data"]], facet_id)
plots <- vector("list", length(panel_data))
for (ii in seq_along(plots)) {
plots[[ii]] <- x
plots[[ii]][["data"]] <- panel_data[[ii]]
plots[[ii]][["facet"]] <- facet_null()
}
plots
}
# i dont need this anymore
#rstan_options(auto_write = TRUE)
linMap <- function(x, from, to) {
# Shifting the vector so that min(x) == 0
x <- x - min(x)
# Scaling to the range of [0, 1]
x <- x / max(x)
# Scaling to the needed amplitude
x <- x * (to - from)
# Shifting to the needed level
x + from
}
# this transforms my age regressors.
#ARQs<-readRDS("A_RawData/ARQs.rds")
ARQs<-readRDS("A_RawData/ARQs.rds")
ARQs$age=as.numeric(as.character(ARQs$age))
# here i bring the quadratic and linear age onto the same scale as the other predictors, so that we can better interpret theplot()
#Betas.
ARQs$LinearAgeTrans<-(poly(ARQs$age,2)[,1])
ARQs$QuadraticAgeTrans<-(poly(ARQs$age,2)[,2]*-1)
#TidyQuestionnaireLongSums$TestPart
#TidyQuestionnaireLongSums<-
# get only the Sumscores
# some datawrangling
normalEng<-ARQs%>%mutate(DiffEng=(HowOften-Recom)
)%>%mutate(
NormalizedDiffEng=as.vector(scale(DiffEng)),
Bin= case_when(
age<=13~1,
(age<=15 & age>13)~2,
(age<=17 & age>15)~2,
(age<=19 & age>17)~3,
(age>19)~3
#(age>=22)~4
),
SubScale=case_when(
Scale=="Rebellious"~"Rebellious",
Scale=="Reckless"~"Reckless",
Scale=="Antisocial"~"Antisocial",
Scale=="Thrill_Seeking"~"Thrill Seeking"
),
Presence=case_when(
Scale=="Rebellious"~"A",
Scale=="Reckless"~"A",
Scale=="Antisocial"~"A",
Scale=="Thrill_Seeking"~"B"
),
Bin=as.factor(Bin)
)
cftPath = "A_RawData/Covariates/cft/logs"
FilesCFT=list.files(path = cftPath)
cftResults=tibble(
cftScore=1:length(FilesCFT),
subject=1:length(FilesCFT)
)
for (i in 1:length(FilesCFT)){
cft=read.delim(paste0(cftPath,"/",FilesCFT[i]),header = FALSE)
#print(Files[i])
cftResults$cftScore[i]=as.numeric(as.character(cft[3,2]))
cftResults$subject[i]=strsplit(FilesCFT[i], "_")[[1]][3]
}
cftResults=unique(cftResults)
numbersPath = "A_RawData/Covariates/Numbers/logs"
FilesNumbers=list.files(path = numbersPath)
numbersResults=tibble(
numberFowards=1:length(FilesNumbers),
numberBackwards=1:length(FilesNumbers),
subject=1:length(FilesNumbers)
)
for (i in 1:length(FilesNumbers)){
numbers=read.delim(paste0(numbersPath,"/",FilesNumbers[i]),header = FALSE)
#print(Files[i])
numbersResults$numberFowards[i]=as.numeric(as.character(numbers[5,2]))
numbersResults$numberBackwards[i]=as.numeric(as.character(numbers[6,2]))
numbersResults$subject[i]=as.numeric(strsplit(FilesNumbers[i], "_")[[1]][3])
}
numbersResults=unique(numbersResults)
numbersResults<-numbersResults%>%mutate(numbersTotal=numberFowards+numberBackwards)
Eyes<-read_csv(file = "./A_RawData/Covariates/MindInEyesAll.csv")
Eyes%<>%mutate(subject=as.double(subject))%>%select(-X1)
cftResults<-cftResults%>%mutate(subject=as.double(subject))
normalEng%<>%as_tibble()%>%left_join(x=.,y=numbersResults%>%select(subject,numbersTotal),by="subject")%>%
left_join(x=.,y=cftResults,by="subject")%>%left_join(x=.,y=Eyes,by="subject")
# normalEng[is.na(normalEng$MinInEyesSum),]$MinInEyesSum=mean(normalEng$MinInEyesSum,na.rm=T)
# normalEng[is.na(normalEng$numbersTotal),]$numbersTotal=mean(normalEng$numbersTotal,na.rm=T)
# normalEng[is.na(normalEng$cftScore),]$cftScore=mean(normalEng$cftScore,na.rm=T)
if (as.numeric(bashInput)==1){
MultiNoIllegal <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking" & normalEng$Scale!="Reckless",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoIllegal,file="../ModelFits/MultiNoIllegal.rds")
} else if (as.numeric(bashInput)==2){
MultiIQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+Risk*LinearAgeTrans+Risk*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiRiskAgeIna.rds")
} else if (as.numeric(bashInput)==3){
library(mice)
imp <- mice(normalEng[normalEng$Scale!="Thrill_Seeking",], m = 10)
MultiIQ <- brm_multiple(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+MinInEyesSum*LinearAgeTrans+MinInEyesSum*QuadraticAgeTrans+
cftScore*LinearAgeTrans+cftScore*QuadraticAgeTrans+numbersTotal*LinearAgeTrans+numbersTotal*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = imp,
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiThrillSeekQuadLinMapC_IQ.rds")
}else if (as.numeric(bashInput)==4){
# library(mice)
imp <- mice(normalEng[normalEng$Scale=="Thrill_Seeking",], m = 10, print = TRUE)
MultiIQNoThrill <- brm_multiple(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+MinInEyesSum*LinearAgeTrans+MinInEyesSum*QuadraticAgeTrans+
cftScore*LinearAgeTrans+cftScore*QuadraticAgeTrans+numbersTotal*LinearAgeTrans+numbersTotal*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = imp,
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQNoThrill,file="../ModelFits/MultiNoThrillSeekQuadLinMapC_IQ.rds")
}else if (as.numeric(bashInput)==5){
MultiNoAgeQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale!="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the example small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoAgeQ,file="../ModelFits/MultiNoAgeNoThrill.rds")
}else if (as.numeric(bashInput)==6){
MultiNoAgeQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale=="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the example small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10))
saveRDS(MultiNoAgeQ,file="../ModelFits/MultiNoAgeThrill.rds")
} else if (as.numeric(bashInput)==7){
MultiIQ <- brm(
formula = mvbind(Recom+1,HowOften+1)~Risk+sex+HowMany*LinearAgeTrans+HowMany*QuadraticAgeTrans+Risk*LinearAgeTrans+Risk*QuadraticAgeTrans+(1|p|subject)+(1|Scale),
data = normalEng[normalEng$Scale=="Thrill_Seeking",],
family="cumulative",
# this next line is only to keep the 40000 small in size!
chains = 6, cores = 6,init=0, iter = 20000,control = list(adapt_delta = 0.9999999999, stepsize = 0.01, max_treedepth = 10)
)
saveRDS(MultiIQ,file="../ModelFits/MultiRiskAgeIna_Thrill.rds")
}
|
##Load File:
data <- read.csv("subset_household_power_consumption.txt", sep=";")
data$DateTime <- strptime(paste(data$Date, data$Time), format='%d/%m/%Y %H:%M:%S')
##Plot Graph:
plot(data$DateTime, data$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
#Save picture to file:
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
/Plot2.R
|
no_license
|
dhelquist/ExData_Plotting1
|
R
| false
| false
| 378
|
r
|
##Load File:
data <- read.csv("subset_household_power_consumption.txt", sep=";")
data$DateTime <- strptime(paste(data$Date, data$Time), format='%d/%m/%Y %H:%M:%S')
##Plot Graph:
plot(data$DateTime, data$Global_active_power, type="l", xlab="",
ylab="Global Active Power (kilowatts)")
#Save picture to file:
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalize_isobaric.R
\name{normalize_isobaric}
\alias{normalize_isobaric}
\title{Examine and Apply Isobaric Normalization}
\usage{
normalize_isobaric(
omicsData,
exp_cname = NULL,
apply_norm = FALSE,
channel_cname = NULL,
refpool_channel = NULL,
refpool_cname = NULL,
refpool_notation = NULL
)
}
\arguments{
\item{omicsData}{an object of the class 'isobaricpepData'}
\item{exp_cname}{character string specifying the name of the column
containing the experiment/plate information in \code{f_data}}
\item{apply_norm}{logical, indicates whether normalization should be applied
to omicsData$e_data}
\item{channel_cname}{optional character string specifying the name of the
column containing the instrument channel a sample was run on in
\code{f_data}. This argument is optional. See Details for how to specify
information regarding reference pool samples. If using this argument, the
'refpool_channel' argument must also be specified; in this case,
'refpool_cname' and 'refpool_notation' should not be specified.}
\item{refpool_channel}{optional character string specifying which channel
contains the reference pool sample. Only used when this is the same
from experiment to experiment. This argument is optional. See Details for
how to specify information regarding reference pool samples. If using this
argument, the 'channel_cname' argument must also be specified; in this
case, 'refpool_cname' and 'refpool_notation' should not be specified.}
\item{refpool_cname}{optional character string specifying the name of the
column containing information about which samples are reference samples in
\code{f_data}. This argument is optional. see Details for how to specify
information regarding reference pool samples. If using this argument, the
'refpool_notation' argument must also be specified; in this case,
'channel_cname' and 'refpool_channel' should not be specified.}
\item{refpool_notation}{optional character string specifying the value in the
refpool_channel column which denotes that a sample is a reference sample.
This argument is optional. See Details for how to specify information
regarding reference pool samples. If using this argument, the
'refpool_cname' argument must also be specified; in this case,
'channel_cname' and 'refpool_channel' should not be specified.}
}
\description{
Examine reference pool samples and apply normalization of study samples to
their corresponding reference pool sample
}
\details{
There are two ways to specify the information needed for identifying
reference samples which should be used for normalization: \enumerate{ \item
specify \code{channel_cname} and \code{refpool_channel}. This should be
used when the reference sample for each experiment/plate was always located
in the same channel. Here \code{channel_cname} gives the column name for
the column in \code{f_data} which gives information about which channel
each sample was run on, and \code{refpool_channel} is a character string
specifying the value in \code{channel_colname} that corresponds to the
reference sample channel. \item specify \code{refpool_cname} and
\code{refpool_notation}. This should be used when the reference sample is
not in a consistent channel across experiments/plates. Here,
\code{refpool_cname} gives the name of the column in \code{f_data} which
indicates whether a sample is a reference or not, and
\code{refpool_notation} is a character string giving the value used to
denote a reference sample in that column. } In both cases you must specify
\code{exp_cname} which gives the column name for the column in
\code{f_data} containing information about which experiment/plate a sample
was run on.
}
\examples{
library(pmartRdata)
myiso <- edata_transform(isobaric_object, "log2")
# Don't apply the normalization quite yet; can use summary() and plot() to view reference pool samples
myiso_refpools <- normalize_isobaric(omicsData = myiso, exp_cname = "Plex",
apply_norm = FALSE,
refpool_cname = "Virus",
refpool_notation = "Pool")
summary(myiso_refpools)
# Now apply the normalization; can use plot() to view the study samples after reference pool normalization
myiso_norm <- normalize_isobaric(omicsData = myiso, exp_cname = "Plex",
apply_norm = TRUE,
refpool_cname = "Virus",
refpool_notation = "Pool")
}
|
/man/normalize_isobaric.Rd
|
permissive
|
clabornd/pmartR
|
R
| false
| true
| 4,643
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/normalize_isobaric.R
\name{normalize_isobaric}
\alias{normalize_isobaric}
\title{Examine and Apply Isobaric Normalization}
\usage{
normalize_isobaric(
omicsData,
exp_cname = NULL,
apply_norm = FALSE,
channel_cname = NULL,
refpool_channel = NULL,
refpool_cname = NULL,
refpool_notation = NULL
)
}
\arguments{
\item{omicsData}{an object of the class 'isobaricpepData'}
\item{exp_cname}{character string specifying the name of the column
containing the experiment/plate information in \code{f_data}}
\item{apply_norm}{logical, indicates whether normalization should be applied
to omicsData$e_data}
\item{channel_cname}{optional character string specifying the name of the
column containing the instrument channel a sample was run on in
\code{f_data}. This argument is optional. See Details for how to specify
information regarding reference pool samples. If using this argument, the
'refpool_channel' argument must also be specified; in this case,
'refpool_cname' and 'refpool_notation' should not be specified.}
\item{refpool_channel}{optional character string specifying which channel
contains the reference pool sample. Only used when this is the same
from experiment to experiment. This argument is optional. See Details for
how to specify information regarding reference pool samples. If using this
argument, the 'channel_cname' argument must also be specified; in this
case, 'refpool_cname' and 'refpool_notation' should not be specified.}
\item{refpool_cname}{optional character string specifying the name of the
column containing information about which samples are reference samples in
\code{f_data}. This argument is optional. see Details for how to specify
information regarding reference pool samples. If using this argument, the
'refpool_notation' argument must also be specified; in this case,
'channel_cname' and 'refpool_channel' should not be specified.}
\item{refpool_notation}{optional character string specifying the value in the
refpool_channel column which denotes that a sample is a reference sample.
This argument is optional. See Details for how to specify information
regarding reference pool samples. If using this argument, the
'refpool_cname' argument must also be specified; in this case,
'channel_cname' and 'refpool_channel' should not be specified.}
}
\description{
Examine reference pool samples and apply normalization of study samples to
their corresponding reference pool sample
}
\details{
There are two ways to specify the information needed for identifying
reference samples which should be used for normalization: \enumerate{ \item
specify \code{channel_cname} and \code{refpool_channel}. This should be
used when the reference sample for each experiment/plate was always located
in the same channel. Here \code{channel_cname} gives the column name for
the column in \code{f_data} which gives information about which channel
each sample was run on, and \code{refpool_channel} is a character string
specifying the value in \code{channel_colname} that corresponds to the
reference sample channel. \item specify \code{refpool_cname} and
\code{refpool_notation}. This should be used when the reference sample is
not in a consistent channel across experiments/plates. Here,
\code{refpool_cname} gives the name of the column in \code{f_data} which
indicates whether a sample is a reference or not, and
\code{refpool_notation} is a character string giving the value used to
denote a reference sample in that column. } In both cases you must specify
\code{exp_cname} which gives the column name for the column in
\code{f_data} containing information about which experiment/plate a sample
was run on.
}
\examples{
library(pmartRdata)
myiso <- edata_transform(isobaric_object, "log2")
# Don't apply the normalization quite yet; can use summary() and plot() to view reference pool samples
myiso_refpools <- normalize_isobaric(omicsData = myiso, exp_cname = "Plex",
apply_norm = FALSE,
refpool_cname = "Virus",
refpool_notation = "Pool")
summary(myiso_refpools)
# Now apply the normalization; can use plot() to view the study samples after reference pool normalization
myiso_norm <- normalize_isobaric(omicsData = myiso, exp_cname = "Plex",
apply_norm = TRUE,
refpool_cname = "Virus",
refpool_notation = "Pool")
}
|
rm(list = ls())
setwd("~/project/colon cancer/siOTX2/")
library(DESeq2)
library(ggplot2)
#inputf <- read.delim("tophat_featureCounts.txt",comment.char = "#")
inputf <- read.delim("star_featureCounts.txt",sep = "\t",comment.char = "#")
names(inputf) <- c("Geneid","Chr","Start","End","Starnd","Length","siKLF3-1","siKLF3-2","siNC-1","siNC-2","siOTX2-1","siOTX2-2")
rownames(inputf) <- inputf$Geneid
# OTX2
inputf <- inputf[,c(9:12)]
data <- inputf[rowSums(inputf > 3) > 2,]
cData <- data.frame(row.names = colnames(data),
condition = rep(factor(c("control","OTX2")),each=2))
# KLF3
inputf <- inputf[,c(7:10)]
data <- inputf[rowSums(inputf > 3) > 2,]
cData <- data.frame(row.names = colnames(data),
condition = rep(factor(c("KLF3","control")),each=2))
dds <- DESeqDataSetFromMatrix(countData = data,colData = cData,design = ~ condition)
dds <- DESeq(dds)
dds
res <- results(dds,contrast = c("condition","KLF3","control"))
res <- results(dds,contrast = c("condition","OTX2","control"))
res <- res[order(res$pvalue),]
summary(res)
resdata <- merge(as.data.frame(res),as.data.frame(counts(dds,normalized = TRUE)),by='row.names',sort=FALSE)
sig <- res[!is.na(res$pvalue) & res$pvalue < 0.05,]
sig.deseq <- rownames(sig)
table(res$padj < 0.05)
resdata$change <- as.factor(
ifelse(
resdata$pvalue < 0.05 & abs(resdata$log2FoldChange) > 0.5,
ifelse(resdata$log2FoldChange > 0.5,'Up','Down'), # 2^0.5 = 1.414
'Nd'
)
)
up <- resdata[resdata$change == "Up",]
down <- resdata[resdata$change == "Down",]
colnames(up)
up <- up[,c("Row.names","log2FoldChange","pvalue","siKLF3-1","siKLF3-2","siNC-1","siNC-2","change")]
down <- down[,c("Row.names","log2FoldChange","pvalue","siKLF3-1","siKLF3-2","siNC-1","siNC-2","change")]
write.table(up,file = "~/project/colon cancer/siOTX2/siKLF3_up_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
write.table(down,file = "~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
up <- up[,c("Row.names","log2FoldChange","pvalue","siOTX2-1","siOTX2-2","siNC-1","siNC-2","change")]
down <- down[,c("Row.names","log2FoldChange","pvalue","siOTX2-1","siOTX2-2","siNC-1","siNC-2","change")]
write.table(up,file = "~/project/colon cancer/siOTX2/siOTX2_up_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
write.table(down,file = "~/project/colon cancer/siOTX2/siOTX2-1_down_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
klfdown <- read.table("~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",header = TRUE)
otxdown <- read.table("~/project/colon cancer/siOTX2/siOTX2_down_pvalue0.05_gene.txt",header = TRUE)
a <- intersect(klfdown$Row.names,otxdown$Row.names)
b <- sort(a)
write.table(b,file = "~/project/colon cancer/siOTX2/siKLF3_siOTX2_down_overlap.txt",sep = "\t",quote = FALSE,row.names = FALSE,col.names = FALSE)
library(clusterProfiler)
bp <- enrichGO(gene = down$Row.names,
keyType = "SYMBOL",
OrgDb = "org.Hs.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
bp_0.05 <- subset(bp@result,bp@result$pvalue < 0.05,)[c(1,3,4,6,7,8,9,10,11,12),]
plotbp <- function(data,tf) {
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 18,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,max(ceiling(-log10(bp_0.05$pvalue))))) +
scale_fill_gradient(low = "black",high = "black") +
labs(title = paste("BP of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 20,hjust = 1.2,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 18))
p
}
plotbp(data = bp_0.05,tf = "siKLF3")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siKLF3_down_bp.pdf",sep = ""),height = 3.9,width = 7)
library(biomaRt)
human <- useMart(dataset = "hsapiens_gene_ensembl",biomart = "ensembl",host="http://uswest.ensembl.org")
entrezgene <- getBM(mart = human,attributes = c('external_gene_name','entrezgene_id'),filters = "external_gene_name",values = klfdown$Row.names)
kegg <- enrichKEGG(gene = entrezgene[,2],
keyType = "kegg",
organism = "hsa",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
library(scales)
kegg_0.05 <- kegg@result[kegg@result$pvalue < 0.05,]#[1:10,]
plotkegg <- function(data,tf){
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 14,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,3)) +#limits = c(0,max(ceiling(-log10(kegg@result$pvalue))))) +
scale_fill_gradient(low = "#08519C",high = "#08519C") +
labs(title = paste("KEGG pathway of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 14,hjust = 1.1,vjust = 0.5),
plot.margin = margin(t=0,r=0.3,b=0,l=0,unit = "cm"),
axis.text = element_text(colour = 'black',size = 14))
p
}
plotkegg(data = kegg_0.05,tf = "siKLF3")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siKLF3_down_kegg.pdf",sep = ""),height = 3.9,width = 4.8)
library(clusterProfiler)
bp <- enrichGO(gene = otxdown$Row.names,
keyType = "SYMBOL",
OrgDb = "org.Hs.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
bp_0.05 <- subset(bp@result,bp@result$pvalue < 0.05,)[c(1,4,5,6,7,8,9,10,11,12),]
plotbp <- function(data,tf) {
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 18,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,max(ceiling(-log10(bp_0.05$pvalue))))) +
scale_fill_gradient(low = "black",high = "black") +
labs(title = paste("BP of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 20,hjust = 1.2,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 18))
p
}
plotbp(data = bp_0.05,tf = "siOTX2")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siOTX2_down_bp.pdf",sep = ""),height = 5,width = 9)
library(biomaRt)
human <- useMart(dataset = "hsapiens_gene_ensembl",biomart = "ensembl",host="http://uswest.ensembl.org")
entrezgene <- getBM(mart = human,attributes = c('external_gene_name','entrezgene_id'),filters = "external_gene_name",values = otxdown$Row.names)
kegg <- enrichKEGG(gene = entrezgene[,2],
keyType = "kegg",
organism = "hsa",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
library(scales)
kegg_0.05 <- kegg@result[kegg@result$pvalue < 0.05,]#[1:10,]
plotkegg <- function(data,tf){
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 14,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,3)) +#limits = c(0,max(ceiling(-log10(kegg@result$pvalue))))) +
scale_fill_gradient(low = "#08519C",high = "#08519C") +
labs(title = paste("KEGG pathway of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 14,hjust = 1.1,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 14))
p
}
plotkegg(data = kegg_0.05,tf = "siOTX2")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siOTX2_down_kegg.pdf",sep = ""),height = 3,width = 4.8)
vsd <- getVarianceStabilizedData(dds)
heatmap(cor(vsd),cexCol = 1,cexRow = 1)
pr <- prcomp(t(vsd))
plot(pr$x,col="white",main="PC plot")
text(pr$x[,1],pr$x[,2],labels=colnames(vsd),
cex=0.7)
biplot(pr,cex=c(1,0.5),main="KLF3",
col=c("black","grey"))
pvalue
# HCT116 FPKM -------------------------------------------------------------
file <- read.delim("genes.fpkm_tracking",sep = "\t")
data <- file[,c(5,10,14,18,22,26,30)]
write.table(data,file = "siKLF3 and siOTX2 FPKM.txt",sep = "\t",quote = FALSE,col.names = TRUE,row.names = FALSE)
df <- data[data$gene_short_name == "KLF3",]
df <- data[data$gene_short_name == "OTX2",]
library(reshape2)
df <- melt(df[,1:5],id.vars="gene_short_name",variable.name = "sample",value.name = "fpkm")
df <- cbind(df,type=rep(c("siKLF3","siNC"),each=2))
ggplot(df,aes(x = factor(type),y = fpkm,color=type)) +
geom_point(shape=19,alpha=0.8) +
#geom_point(shape=21,colour="white",fill="#1B9E77",alpha=0.6) + #,position = position_jitter(width = 0.25,height = 0)
theme_classic(base_size = 18,base_family = "sans",base_line_size = 1.1) +
labs(title = "KLF3",x=NULL,y="Gene expression (FPKM)",color=NULL) +
scale_y_continuous(expand = c(0,0.01),limits = c(0,9),breaks = c(0,3,6,9)) +
#scale_x_discrete(breaks=times,labels=c(0,2,4,7,10)) +
theme(aspect.ratio = 1/0.618,
plot.margin = margin(t=2,b=2,l=0.5,r=0.5,unit = "cm"),
plot.title = element_text(size = 18,face = "bold",margin = margin(b=0,unit = "cm"),hjust = 0.5,vjust = 0.5),
axis.title.y = element_text(margin = margin(t = 0,b = 0,l = 0,r = 0.2,unit = "cm")),
axis.text = element_text(colour = "black",size = 18,margin = margin(t = 0.1,b = 0.1,l = 0.1,r = 0.1,unit = "cm")),
axis.text.x = element_text(angle = 0,hjust = 0.5)) +
guides(color=FALSE)
# overlap with chip-seq data ----------------------------------------------
klfdown <- read.table("~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",header = TRUE)
otxdown <- read.table("~/project/colon cancer/siOTX2/siOTX2_down_pvalue0.05_gene.txt",header = TRUE)
klfup <- read.delim("~/project/colon cancer/siOTX2/siKLF3_up_pvalue0.05_gene.txt",header = TRUE)
otxup <- read.delim("~/project/colon cancer/siOTX2/siOTX2_up_pvalue0.05_gene.txt",header = TRUE)
###### Cistrome Data: human KLF3
file <- read.delim("~/project/colon cancer/siOTX2/klf3 target gene /38790_gene_score_5fold.txt",sep = "\t",comment.char = "#",header = FALSE)
colnames(file) <- c("chr","start","end","refseq","score","strand","symbol")
a <- unique(file$symbol)[1:500]
a <- unique(file$symbol)[500:1000]
a <- unique(file$symbol)[1000:1500]
a <- unique(file$symbol)[1500:2000]
b <- intersect(klfdown$Row.names,a)
b
c <- intersect(klfup$Row.names,a)
c
###### Cistrome Data: human OTX2
# 6540 cell human OTX2 epithelium
inputf <- read.delim("~/project/colon cancer/siOTX2/50896_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(inputf) <- c("chr","start","end","refseq","score","strand","symbol")
# D341 cell 人髓母细胞瘤
file1 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74350_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file1) <- c("chr","start","end","refseq","score","strand","symbol")
file2 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74352_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file2) <- c("chr","start","end","refseq","score","strand","symbol")
file3 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74358_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file3) <- c("chr","start","end","refseq","score","strand","symbol")
list <- c(unique(file1$symbol)[1500:2000],unique(file2$symbol)[1500:2000],unique(file3$symbol)[1500:2000])
list <- c(unique(file1$symbol)[1:500],unique(file2$symbol)[1:500],unique(file3$symbol)[1:500])
list <- unique(list)
a <- unique(list)[1:500]
a <- unique(list)[500:1000]
a <- unique(list)[1000:1500]
a <- unique(list)[1500:2000]
b <- intersect(otxdown$Row.names,list)
b
c <- intersect(otxup$Row.names,list)
c
killDbConnections = function () {
all_cons = dbListConnections(MySQL())
for(con in all_cons){dbDisconnect(con)}
}
args=commandArgs(T)
signature = args[1] ## signature genelist
dataset = args[2] ## selected datasets
parameter = args[3] ## parameters
signatures = strsplit(signature,",")[[1]] ## split the signature genelist into array
if(is.na(signatures[4])){signatures[4] = ""}
datasets = strsplit(dataset,",")[[1]] ## split the datasets info into array
datasets = datasets[datasets != ""]
parameters = strsplit(parameter,",")[[1]]
dbs = "GE_SF" ## database
symbol1 = parameters[1]
symbol2 = parameters[2]
symbol3 = parameters[3]
symbol4 = parameters[4]
method = parameters[5] ## "pearson" or "spearman" or "kendall"
outputdir = parameters[6] ## outputdir
.libPaths(c("--------",.libPaths())) ## add the specific directory of RMySQL R package
suppressPackageStartupMessages(library("RMySQL"))
killDbConnections()
#mydb = dbConnect(MySQL(), user='--------', password='--------', dbname=dbs) ## connet to mysql database
mydb = dbConnect(MySQL(), user='--------', password='--------', dbname=dbs,unix.socket="--------")
## Gene A:
if(signatures[3] != ""){
symbol1 = paste(symbol1,symbol3,sep = "/")
signatures_a = c(signatures[1],signatures[3])
}else{signatures_a = signatures[1]}
df_a = as.matrix(array(data = 0,dim = c(0,length(signatures_a))))
for(i in 1:length(datasets)){
table = datasets[i]
df_t=t(dbGetQuery(mydb,paste("SELECT * FROM ",table," WHERE geneid IN ('",paste(signatures_a, collapse = "','"),"')",sep="")))
colnames(df_t) = df_t[1,]
df_t = df_t[-1,,drop = F]
df_a = rbind(df_a,df_t)
}
storage.mode(df_a) = "numeric"
df_a = df_a[,signatures_a,drop = F]
if(signatures[3] != ""){
df_a = log2(df_a + 0.001)
df_a = df_a[,1,drop = F] - df_a[,2,drop = F]
df_a = 2^df_a
colnames(df_a) = signatures_a[1]
}
## Gene B:
if(signatures[4] != ""){
symbol2 = paste(symbol2,symbol4,sep = "/")
signatures_b = c(signatures[2],signatures[4])
}else{signatures_b = signatures[2]}
df_b = as.matrix(array(data = 0,dim = c(0,length(signatures_b))))
for(i in 1:length(datasets)){
table = datasets[i]
df_t=t(dbGetQuery(mydb,paste("SELECT * FROM ",table," WHERE geneid IN ('",paste(signatures_b, collapse = "','"),"')",sep="")))
colnames(df_t) = df_t[1,]
df_t = df_t[-1,,drop = F]
df_b = rbind(df_b,df_t)
}
storage.mode(df_b) = "numeric"
df_b = df_b[,signatures_b,drop = F]
if(signatures[4] != "" ){
df_b = log2(df_b + 0.001)
df_b = df_b[,1,drop = F] - df_b[,2,drop = F]
df_b = 2^df_b
colnames(df_b) = signatures_b[1]
}
df = cbind(df_a,df_b)
### build the integrated table
### single gene
colnames(df)[colnames(df) == signatures[1]] = symbol1
colnames(df)[colnames(df) == signatures[2]] = symbol2
pdf(file = outputdir,title="Result Display",width = 6,height = 5.5)
par(mar=c(4.5, 5.1, 1.1, 2.1))
options(warn=-1)
rvalue = signif(cor(x = df[,1], y = df[,2],method = method),2)
cpvalue = signif(as.numeric(cor.test(x = df[,1], y = df[,2],method = method)[3]),2)
plot(x = log2(df[,1] + 1),y = log2(df[,2] + 1),main = NULL,cex.lab = 1.5,
xlab = paste("log2(",colnames(df)[1]," TPM)",sep = ""),
ylab = paste("log2(",colnames(df)[2]," TPM)",sep = ""), pch = 19,cex=0.5)
range_y = range(log2(df[,2] + 1))
text(x = max(log2(df[,1] + 1)) * 1.03,y = max(log2(df[,2] + 1)) - (range_y[2] - range_y[1])/15,labels = paste("p-value = ",as.character(cpvalue),"\nR = ",as.character(rvalue),sep=""),cex = 1.3,col = "black",pos = 2)
a = dev.off()
|
/RNA-seq/siOTX2/siOTX2 and siKLF3 DESeq2.R
|
no_license
|
ZhaoChen96/CRC
|
R
| false
| false
| 16,194
|
r
|
rm(list = ls())
setwd("~/project/colon cancer/siOTX2/")
library(DESeq2)
library(ggplot2)
#inputf <- read.delim("tophat_featureCounts.txt",comment.char = "#")
inputf <- read.delim("star_featureCounts.txt",sep = "\t",comment.char = "#")
names(inputf) <- c("Geneid","Chr","Start","End","Starnd","Length","siKLF3-1","siKLF3-2","siNC-1","siNC-2","siOTX2-1","siOTX2-2")
rownames(inputf) <- inputf$Geneid
# OTX2
inputf <- inputf[,c(9:12)]
data <- inputf[rowSums(inputf > 3) > 2,]
cData <- data.frame(row.names = colnames(data),
condition = rep(factor(c("control","OTX2")),each=2))
# KLF3
inputf <- inputf[,c(7:10)]
data <- inputf[rowSums(inputf > 3) > 2,]
cData <- data.frame(row.names = colnames(data),
condition = rep(factor(c("KLF3","control")),each=2))
dds <- DESeqDataSetFromMatrix(countData = data,colData = cData,design = ~ condition)
dds <- DESeq(dds)
dds
res <- results(dds,contrast = c("condition","KLF3","control"))
res <- results(dds,contrast = c("condition","OTX2","control"))
res <- res[order(res$pvalue),]
summary(res)
resdata <- merge(as.data.frame(res),as.data.frame(counts(dds,normalized = TRUE)),by='row.names',sort=FALSE)
sig <- res[!is.na(res$pvalue) & res$pvalue < 0.05,]
sig.deseq <- rownames(sig)
table(res$padj < 0.05)
resdata$change <- as.factor(
ifelse(
resdata$pvalue < 0.05 & abs(resdata$log2FoldChange) > 0.5,
ifelse(resdata$log2FoldChange > 0.5,'Up','Down'), # 2^0.5 = 1.414
'Nd'
)
)
up <- resdata[resdata$change == "Up",]
down <- resdata[resdata$change == "Down",]
colnames(up)
up <- up[,c("Row.names","log2FoldChange","pvalue","siKLF3-1","siKLF3-2","siNC-1","siNC-2","change")]
down <- down[,c("Row.names","log2FoldChange","pvalue","siKLF3-1","siKLF3-2","siNC-1","siNC-2","change")]
write.table(up,file = "~/project/colon cancer/siOTX2/siKLF3_up_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
write.table(down,file = "~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
up <- up[,c("Row.names","log2FoldChange","pvalue","siOTX2-1","siOTX2-2","siNC-1","siNC-2","change")]
down <- down[,c("Row.names","log2FoldChange","pvalue","siOTX2-1","siOTX2-2","siNC-1","siNC-2","change")]
write.table(up,file = "~/project/colon cancer/siOTX2/siOTX2_up_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
write.table(down,file = "~/project/colon cancer/siOTX2/siOTX2-1_down_pvalue0.05_gene.txt",sep = "\t",quote = FALSE,row.names = FALSE)
klfdown <- read.table("~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",header = TRUE)
otxdown <- read.table("~/project/colon cancer/siOTX2/siOTX2_down_pvalue0.05_gene.txt",header = TRUE)
a <- intersect(klfdown$Row.names,otxdown$Row.names)
b <- sort(a)
write.table(b,file = "~/project/colon cancer/siOTX2/siKLF3_siOTX2_down_overlap.txt",sep = "\t",quote = FALSE,row.names = FALSE,col.names = FALSE)
library(clusterProfiler)
bp <- enrichGO(gene = down$Row.names,
keyType = "SYMBOL",
OrgDb = "org.Hs.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
bp_0.05 <- subset(bp@result,bp@result$pvalue < 0.05,)[c(1,3,4,6,7,8,9,10,11,12),]
plotbp <- function(data,tf) {
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 18,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,max(ceiling(-log10(bp_0.05$pvalue))))) +
scale_fill_gradient(low = "black",high = "black") +
labs(title = paste("BP of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 20,hjust = 1.2,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 18))
p
}
plotbp(data = bp_0.05,tf = "siKLF3")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siKLF3_down_bp.pdf",sep = ""),height = 3.9,width = 7)
library(biomaRt)
human <- useMart(dataset = "hsapiens_gene_ensembl",biomart = "ensembl",host="http://uswest.ensembl.org")
entrezgene <- getBM(mart = human,attributes = c('external_gene_name','entrezgene_id'),filters = "external_gene_name",values = klfdown$Row.names)
kegg <- enrichKEGG(gene = entrezgene[,2],
keyType = "kegg",
organism = "hsa",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
library(scales)
kegg_0.05 <- kegg@result[kegg@result$pvalue < 0.05,]#[1:10,]
plotkegg <- function(data,tf){
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 14,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,3)) +#limits = c(0,max(ceiling(-log10(kegg@result$pvalue))))) +
scale_fill_gradient(low = "#08519C",high = "#08519C") +
labs(title = paste("KEGG pathway of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 14,hjust = 1.1,vjust = 0.5),
plot.margin = margin(t=0,r=0.3,b=0,l=0,unit = "cm"),
axis.text = element_text(colour = 'black',size = 14))
p
}
plotkegg(data = kegg_0.05,tf = "siKLF3")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siKLF3_down_kegg.pdf",sep = ""),height = 3.9,width = 4.8)
library(clusterProfiler)
bp <- enrichGO(gene = otxdown$Row.names,
keyType = "SYMBOL",
OrgDb = "org.Hs.eg.db",
ont = "BP",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
bp_0.05 <- subset(bp@result,bp@result$pvalue < 0.05,)[c(1,4,5,6,7,8,9,10,11,12),]
plotbp <- function(data,tf) {
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 18,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,max(ceiling(-log10(bp_0.05$pvalue))))) +
scale_fill_gradient(low = "black",high = "black") +
labs(title = paste("BP of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 20,hjust = 1.2,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 18))
p
}
plotbp(data = bp_0.05,tf = "siOTX2")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siOTX2_down_bp.pdf",sep = ""),height = 5,width = 9)
library(biomaRt)
human <- useMart(dataset = "hsapiens_gene_ensembl",biomart = "ensembl",host="http://uswest.ensembl.org")
entrezgene <- getBM(mart = human,attributes = c('external_gene_name','entrezgene_id'),filters = "external_gene_name",values = otxdown$Row.names)
kegg <- enrichKEGG(gene = entrezgene[,2],
keyType = "kegg",
organism = "hsa",
pAdjustMethod = "BH",
pvalueCutoff = 0.05,
qvalueCutoff = 0.05
)
library(scales)
kegg_0.05 <- kegg@result[kegg@result$pvalue < 0.05,]#[1:10,]
plotkegg <- function(data,tf){
p <- ggplot(data,aes(x = -log10(pvalue),y = reorder(Description,-log10(pvalue)),fill=-log10(pvalue))) +
geom_bar(stat = "identity",width = 0.8,position = position_dodge(width = 0.8)) +
theme_classic(base_size = 14,base_family = "sans") +
scale_x_continuous(expand = c(0,0),limits = c(0,3)) +#limits = c(0,max(ceiling(-log10(kegg@result$pvalue))))) +
scale_fill_gradient(low = "#08519C",high = "#08519C") +
labs(title = paste("KEGG pathway of",tf,"down regulation gene",sep = " "),x="-log10(pValue)",y=NULL) +
guides(fill=FALSE) +
theme(plot.title = element_text(size = 14,hjust = 1.1,vjust = 0.5),
plot.margin = margin(t=0.3,r=0.3,b=0.3,l=0.3,unit = "cm"),
axis.text = element_text(colour = 'black',size = 14))
p
}
plotkegg(data = kegg_0.05,tf = "siOTX2")
ggsave(filename = paste("~/project/colon cancer/siOTX2/siOTX2_down_kegg.pdf",sep = ""),height = 3,width = 4.8)
vsd <- getVarianceStabilizedData(dds)
heatmap(cor(vsd),cexCol = 1,cexRow = 1)
pr <- prcomp(t(vsd))
plot(pr$x,col="white",main="PC plot")
text(pr$x[,1],pr$x[,2],labels=colnames(vsd),
cex=0.7)
biplot(pr,cex=c(1,0.5),main="KLF3",
col=c("black","grey"))
pvalue
# HCT116 FPKM -------------------------------------------------------------
file <- read.delim("genes.fpkm_tracking",sep = "\t")
data <- file[,c(5,10,14,18,22,26,30)]
write.table(data,file = "siKLF3 and siOTX2 FPKM.txt",sep = "\t",quote = FALSE,col.names = TRUE,row.names = FALSE)
df <- data[data$gene_short_name == "KLF3",]
df <- data[data$gene_short_name == "OTX2",]
library(reshape2)
df <- melt(df[,1:5],id.vars="gene_short_name",variable.name = "sample",value.name = "fpkm")
df <- cbind(df,type=rep(c("siKLF3","siNC"),each=2))
ggplot(df,aes(x = factor(type),y = fpkm,color=type)) +
geom_point(shape=19,alpha=0.8) +
#geom_point(shape=21,colour="white",fill="#1B9E77",alpha=0.6) + #,position = position_jitter(width = 0.25,height = 0)
theme_classic(base_size = 18,base_family = "sans",base_line_size = 1.1) +
labs(title = "KLF3",x=NULL,y="Gene expression (FPKM)",color=NULL) +
scale_y_continuous(expand = c(0,0.01),limits = c(0,9),breaks = c(0,3,6,9)) +
#scale_x_discrete(breaks=times,labels=c(0,2,4,7,10)) +
theme(aspect.ratio = 1/0.618,
plot.margin = margin(t=2,b=2,l=0.5,r=0.5,unit = "cm"),
plot.title = element_text(size = 18,face = "bold",margin = margin(b=0,unit = "cm"),hjust = 0.5,vjust = 0.5),
axis.title.y = element_text(margin = margin(t = 0,b = 0,l = 0,r = 0.2,unit = "cm")),
axis.text = element_text(colour = "black",size = 18,margin = margin(t = 0.1,b = 0.1,l = 0.1,r = 0.1,unit = "cm")),
axis.text.x = element_text(angle = 0,hjust = 0.5)) +
guides(color=FALSE)
# overlap with chip-seq data ----------------------------------------------
klfdown <- read.table("~/project/colon cancer/siOTX2/siKLF3_down_pvalue0.05_gene.txt",header = TRUE)
otxdown <- read.table("~/project/colon cancer/siOTX2/siOTX2_down_pvalue0.05_gene.txt",header = TRUE)
klfup <- read.delim("~/project/colon cancer/siOTX2/siKLF3_up_pvalue0.05_gene.txt",header = TRUE)
otxup <- read.delim("~/project/colon cancer/siOTX2/siOTX2_up_pvalue0.05_gene.txt",header = TRUE)
###### Cistrome Data: human KLF3
file <- read.delim("~/project/colon cancer/siOTX2/klf3 target gene /38790_gene_score_5fold.txt",sep = "\t",comment.char = "#",header = FALSE)
colnames(file) <- c("chr","start","end","refseq","score","strand","symbol")
a <- unique(file$symbol)[1:500]
a <- unique(file$symbol)[500:1000]
a <- unique(file$symbol)[1000:1500]
a <- unique(file$symbol)[1500:2000]
b <- intersect(klfdown$Row.names,a)
b
c <- intersect(klfup$Row.names,a)
c
###### Cistrome Data: human OTX2
# 6540 cell human OTX2 epithelium
inputf <- read.delim("~/project/colon cancer/siOTX2/50896_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(inputf) <- c("chr","start","end","refseq","score","strand","symbol")
# D341 cell 人髓母细胞瘤
file1 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74350_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file1) <- c("chr","start","end","refseq","score","strand","symbol")
file2 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74352_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file2) <- c("chr","start","end","refseq","score","strand","symbol")
file3 <- read.delim("~/project/colon cancer/siOTX2/OTX2 human D341 cell/74358_gene_score_5fold.txt",comment.char = "#",header = FALSE)
colnames(file3) <- c("chr","start","end","refseq","score","strand","symbol")
list <- c(unique(file1$symbol)[1500:2000],unique(file2$symbol)[1500:2000],unique(file3$symbol)[1500:2000])
list <- c(unique(file1$symbol)[1:500],unique(file2$symbol)[1:500],unique(file3$symbol)[1:500])
list <- unique(list)
a <- unique(list)[1:500]
a <- unique(list)[500:1000]
a <- unique(list)[1000:1500]
a <- unique(list)[1500:2000]
b <- intersect(otxdown$Row.names,list)
b
c <- intersect(otxup$Row.names,list)
c
killDbConnections = function () {
all_cons = dbListConnections(MySQL())
for(con in all_cons){dbDisconnect(con)}
}
args=commandArgs(T)
signature = args[1] ## signature genelist
dataset = args[2] ## selected datasets
parameter = args[3] ## parameters
signatures = strsplit(signature,",")[[1]] ## split the signature genelist into array
if(is.na(signatures[4])){signatures[4] = ""}
datasets = strsplit(dataset,",")[[1]] ## split the datasets info into array
datasets = datasets[datasets != ""]
parameters = strsplit(parameter,",")[[1]]
dbs = "GE_SF" ## database
symbol1 = parameters[1]
symbol2 = parameters[2]
symbol3 = parameters[3]
symbol4 = parameters[4]
method = parameters[5] ## "pearson" or "spearman" or "kendall"
outputdir = parameters[6] ## outputdir
.libPaths(c("--------",.libPaths())) ## add the specific directory of RMySQL R package
suppressPackageStartupMessages(library("RMySQL"))
killDbConnections()
#mydb = dbConnect(MySQL(), user='--------', password='--------', dbname=dbs) ## connet to mysql database
mydb = dbConnect(MySQL(), user='--------', password='--------', dbname=dbs,unix.socket="--------")
## Gene A:
if(signatures[3] != ""){
symbol1 = paste(symbol1,symbol3,sep = "/")
signatures_a = c(signatures[1],signatures[3])
}else{signatures_a = signatures[1]}
df_a = as.matrix(array(data = 0,dim = c(0,length(signatures_a))))
for(i in 1:length(datasets)){
table = datasets[i]
df_t=t(dbGetQuery(mydb,paste("SELECT * FROM ",table," WHERE geneid IN ('",paste(signatures_a, collapse = "','"),"')",sep="")))
colnames(df_t) = df_t[1,]
df_t = df_t[-1,,drop = F]
df_a = rbind(df_a,df_t)
}
storage.mode(df_a) = "numeric"
df_a = df_a[,signatures_a,drop = F]
if(signatures[3] != ""){
df_a = log2(df_a + 0.001)
df_a = df_a[,1,drop = F] - df_a[,2,drop = F]
df_a = 2^df_a
colnames(df_a) = signatures_a[1]
}
## Gene B:
if(signatures[4] != ""){
symbol2 = paste(symbol2,symbol4,sep = "/")
signatures_b = c(signatures[2],signatures[4])
}else{signatures_b = signatures[2]}
df_b = as.matrix(array(data = 0,dim = c(0,length(signatures_b))))
for(i in 1:length(datasets)){
table = datasets[i]
df_t=t(dbGetQuery(mydb,paste("SELECT * FROM ",table," WHERE geneid IN ('",paste(signatures_b, collapse = "','"),"')",sep="")))
colnames(df_t) = df_t[1,]
df_t = df_t[-1,,drop = F]
df_b = rbind(df_b,df_t)
}
storage.mode(df_b) = "numeric"
df_b = df_b[,signatures_b,drop = F]
if(signatures[4] != "" ){
df_b = log2(df_b + 0.001)
df_b = df_b[,1,drop = F] - df_b[,2,drop = F]
df_b = 2^df_b
colnames(df_b) = signatures_b[1]
}
df = cbind(df_a,df_b)
### build the integrated table
### single gene
colnames(df)[colnames(df) == signatures[1]] = symbol1
colnames(df)[colnames(df) == signatures[2]] = symbol2
pdf(file = outputdir,title="Result Display",width = 6,height = 5.5)
par(mar=c(4.5, 5.1, 1.1, 2.1))
options(warn=-1)
rvalue = signif(cor(x = df[,1], y = df[,2],method = method),2)
cpvalue = signif(as.numeric(cor.test(x = df[,1], y = df[,2],method = method)[3]),2)
plot(x = log2(df[,1] + 1),y = log2(df[,2] + 1),main = NULL,cex.lab = 1.5,
xlab = paste("log2(",colnames(df)[1]," TPM)",sep = ""),
ylab = paste("log2(",colnames(df)[2]," TPM)",sep = ""), pch = 19,cex=0.5)
range_y = range(log2(df[,2] + 1))
text(x = max(log2(df[,1] + 1)) * 1.03,y = max(log2(df[,2] + 1)) - (range_y[2] - range_y[1])/15,labels = paste("p-value = ",as.character(cpvalue),"\nR = ",as.character(rvalue),sep=""),cex = 1.3,col = "black",pos = 2)
a = dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logistic_pseudononmy.R
\name{future.logis}
\alias{future.logis}
\title{Calculate expected optimality for a given trajectory using sequential approach.
We assume a logistic model for the response.}
\usage{
future.logis(D.fix, n.r, z.probs, beta, int, sim, code = 0, lossfunc, ...)
}
\arguments{
\item{D.fix}{Design matrix constructed using the true covariates in the experiment so far}
\item{n.r}{length of trajectory to be simulated}
\item{z.probs}{vector of probabilities for each level of covariate z}
\item{beta}{current estimate of regression coefficients}
\item{int}{set to T if you allow for treatment-covariate interactions in the model, NULL otherwise}
\item{sim}{number of trajectories to simulate}
\item{code}{set to NULL if (-1,1) coding is used for the treatments. Set to 0 if (0, 1) is used.}
\item{lossfunc}{the objective function to minimize}
\item{...}{further arguments to be passed to <lossfunc>}
}
\value{
loss of the design matrix which includes trajectory
}
\description{
Calculate expected optimality for a given trajectory using sequential approach.
We assume a logistic model for the response.
}
|
/man/future.logis.Rd
|
no_license
|
mst1g15/biasedcoin
|
R
| false
| true
| 1,241
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/logistic_pseudononmy.R
\name{future.logis}
\alias{future.logis}
\title{Calculate expected optimality for a given trajectory using sequential approach.
We assume a logistic model for the response.}
\usage{
future.logis(D.fix, n.r, z.probs, beta, int, sim, code = 0, lossfunc, ...)
}
\arguments{
\item{D.fix}{Design matrix constructed using the true covariates in the experiment so far}
\item{n.r}{length of trajectory to be simulated}
\item{z.probs}{vector of probabilities for each level of covariate z}
\item{beta}{current estimate of regression coefficients}
\item{int}{set to T if you allow for treatment-covariate interactions in the model, NULL otherwise}
\item{sim}{number of trajectories to simulate}
\item{code}{set to NULL if (-1,1) coding is used for the treatments. Set to 0 if (0, 1) is used.}
\item{lossfunc}{the objective function to minimize}
\item{...}{further arguments to be passed to <lossfunc>}
}
\value{
loss of the design matrix which includes trajectory
}
\description{
Calculate expected optimality for a given trajectory using sequential approach.
We assume a logistic model for the response.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.sfn_operations.R
\name{describe_state_machine}
\alias{describe_state_machine}
\title{Describes a state machine}
\usage{
describe_state_machine(stateMachineArn)
}
\arguments{
\item{stateMachineArn}{[required] The Amazon Resource Name (ARN) of the state machine to describe.}
}
\description{
Describes a state machine.
}
\details{
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
}
\section{Accepted Parameters}{
\preformatted{describe_state_machine(
stateMachineArn = "string"
)
}
}
|
/service/paws.sfn/man/describe_state_machine.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 641
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.sfn_operations.R
\name{describe_state_machine}
\alias{describe_state_machine}
\title{Describes a state machine}
\usage{
describe_state_machine(stateMachineArn)
}
\arguments{
\item{stateMachineArn}{[required] The Amazon Resource Name (ARN) of the state machine to describe.}
}
\description{
Describes a state machine.
}
\details{
This operation is eventually consistent. The results are best effort and may not reflect very recent updates and changes.
}
\section{Accepted Parameters}{
\preformatted{describe_state_machine(
stateMachineArn = "string"
)
}
}
|
#move pdfs from subfolder to a new folder
install.packages("stringi")
install.packages("filesstrings")
library(stringr)
library(stringi)
library(filesstrings)
# list all CSV files recursively through each sub-folder
pdf<-list.files("C:/Users/ASUS/Downloads/here",pattern = ".pdf", full.names=T,recursive = TRUE)
for (i in 1:length(pdf)) {file.move(pdf[i], "C:/Users/ASUS/Downloads/here")}
|
/moveFiles.R
|
no_license
|
teuzindahuz/spat_ecology
|
R
| false
| false
| 408
|
r
|
#move pdfs from subfolder to a new folder
install.packages("stringi")
install.packages("filesstrings")
library(stringr)
library(stringi)
library(filesstrings)
# list all CSV files recursively through each sub-folder
pdf<-list.files("C:/Users/ASUS/Downloads/here",pattern = ".pdf", full.names=T,recursive = TRUE)
for (i in 1:length(pdf)) {file.move(pdf[i], "C:/Users/ASUS/Downloads/here")}
|
## Export: inla.version
## !\name{inla.version}
## !\alias{version}
## !\alias{inla.version}
## !
## !\title{Show the version of the INLA-package}
## !
## !\description{Show the version of the INLA-package}
## !
## !\usage{
## !inla.version(what = c("default", "version", "date"))
## !}
## !
## !\arguments{
## ! \item{what}{What to show version of}
## !}
## !
## !\value{%%
## ! \code{inla.version} display the current version information using \code{cat}
## ! with
## ! \code{default} or \code{info}, or return
## ! other spesific requests through the call.
## !}
## !%%
## !
## !\author{Havard Rue \email{hrue@r-inla.org}}
## !
## !\examples{
## !## Summary of all
## !inla.version()
## !## The building date
## !inla.version("bdate")
## !}
`inla.version` <- function(what = c("default", "version", "date")) {
`trim` <- function(string) {
string <- gsub("^[ \t]+", "", string)
string <- gsub("[ \t]+$", "", string)
return(string)
}
date <- inla.trim("Wed Jan 13 07:39:24 PM +03 2021")
version <- inla.trim("21.01.13")
what <- match.arg(what)
if (what %in% "default") {
cat("\n")
cat(paste("\n\tR-INLA version ..........: ", version, "\n", sep = ""))
cat(paste("\tDate ....................: ", date, "\n", sep = ""))
cat("\tMaintainers .............: Havard Rue <hrue@r-inla.org>\n")
cat("\t : Finn Lindgren <finn.lindgren@gmail.com>\n")
cat("\t : Elias Teixeira Krainski <elias.krainski@gmail.com>\n")
cat("\tMain web-page ...........: www.r-inla.org\n")
cat("\tDownload-page ...........: inla.r-inla-download.org\n")
cat("\tEmail support ...........: help@r-inla.org\n")
cat("\t : r-inla-discussion-group@googlegroups.com\n")
cat("\tSource-code .............: github.com/hrue/r-inla\n")
cat("\n")
return(invisible())
} else if (what %in% "date") {
return(date)
} else if (what %in% "version") {
return(version)
}
stop("This should not happen.")
}
|
/R/version.R
|
no_license
|
jdsimkin04/shinyinla
|
R
| false
| false
| 2,132
|
r
|
## Export: inla.version
## !\name{inla.version}
## !\alias{version}
## !\alias{inla.version}
## !
## !\title{Show the version of the INLA-package}
## !
## !\description{Show the version of the INLA-package}
## !
## !\usage{
## !inla.version(what = c("default", "version", "date"))
## !}
## !
## !\arguments{
## ! \item{what}{What to show version of}
## !}
## !
## !\value{%%
## ! \code{inla.version} display the current version information using \code{cat}
## ! with
## ! \code{default} or \code{info}, or return
## ! other spesific requests through the call.
## !}
## !%%
## !
## !\author{Havard Rue \email{hrue@r-inla.org}}
## !
## !\examples{
## !## Summary of all
## !inla.version()
## !## The building date
## !inla.version("bdate")
## !}
`inla.version` <- function(what = c("default", "version", "date")) {
`trim` <- function(string) {
string <- gsub("^[ \t]+", "", string)
string <- gsub("[ \t]+$", "", string)
return(string)
}
date <- inla.trim("Wed Jan 13 07:39:24 PM +03 2021")
version <- inla.trim("21.01.13")
what <- match.arg(what)
if (what %in% "default") {
cat("\n")
cat(paste("\n\tR-INLA version ..........: ", version, "\n", sep = ""))
cat(paste("\tDate ....................: ", date, "\n", sep = ""))
cat("\tMaintainers .............: Havard Rue <hrue@r-inla.org>\n")
cat("\t : Finn Lindgren <finn.lindgren@gmail.com>\n")
cat("\t : Elias Teixeira Krainski <elias.krainski@gmail.com>\n")
cat("\tMain web-page ...........: www.r-inla.org\n")
cat("\tDownload-page ...........: inla.r-inla-download.org\n")
cat("\tEmail support ...........: help@r-inla.org\n")
cat("\t : r-inla-discussion-group@googlegroups.com\n")
cat("\tSource-code .............: github.com/hrue/r-inla\n")
cat("\n")
return(invisible())
} else if (what %in% "date") {
return(date)
} else if (what %in% "version") {
return(version)
}
stop("This should not happen.")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relationships.R
\name{relationships}
\alias{relationships}
\alias{paths}
\title{Structural specification functions for seminr package}
\usage{
relationships(...)
paths(from,to)
}
\arguments{
\item{...}{A comma separated list of all the structural relationships in the the model. These
paths take the form (from = c(construct_name), to = c(construct_name)).}
\item{to}{The destination construct of a structural path}
\item{from}{The source construct of a structural path}
\item{paths}{The function \code{paths} that specifies the source and destination constructs
for each of the model's structural paths.}
}
\description{
\code{paths} creates the structural paths of a PLS SEM model and \code{relationships} generates
the matrix of paths.
}
\examples{
mobi_sm <- relationships(
paths(from = "Image", to = c("Expectation", "Satisfaction", "Loyalty")),
paths(from = "Expectation", to = c("Quality", "Value", "Satisfaction")),
paths(from = "Quality", to = c("Value", "Satisfaction")),
paths(from = "Value", to = c("Satisfaction")),
paths(from = "Satisfaction", to = c("Complaints", "Loyalty")),
paths(from = "Complaints", to = "Loyalty")
)
}
|
/man/relationships.Rd
|
no_license
|
chendongyu0125/seminr
|
R
| false
| true
| 1,333
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relationships.R
\name{relationships}
\alias{relationships}
\alias{paths}
\title{Structural specification functions for seminr package}
\usage{
relationships(...)
paths(from,to)
}
\arguments{
\item{...}{A comma separated list of all the structural relationships in the the model. These
paths take the form (from = c(construct_name), to = c(construct_name)).}
\item{to}{The destination construct of a structural path}
\item{from}{The source construct of a structural path}
\item{paths}{The function \code{paths} that specifies the source and destination constructs
for each of the model's structural paths.}
}
\description{
\code{paths} creates the structural paths of a PLS SEM model and \code{relationships} generates
the matrix of paths.
}
\examples{
mobi_sm <- relationships(
paths(from = "Image", to = c("Expectation", "Satisfaction", "Loyalty")),
paths(from = "Expectation", to = c("Quality", "Value", "Satisfaction")),
paths(from = "Quality", to = c("Value", "Satisfaction")),
paths(from = "Value", to = c("Satisfaction")),
paths(from = "Satisfaction", to = c("Complaints", "Loyalty")),
paths(from = "Complaints", to = "Loyalty")
)
}
|
# Naive Bayes
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
library(e1071)
classifier = naiveBayes(x = training_set[-3],
y = training_set$Purchased)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'Naive Bayes (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'Naive Bayes (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
/Part 3 - Classification/Section 18 - Naive Bayes/naive_bayes_user.R
|
no_license
|
asjadaugust/machine-learning-toolkit
|
R
| false
| false
| 2,271
|
r
|
# Naive Bayes
# Importing the dataset
dataset = read.csv('Social_Network_Ads.csv')
dataset = dataset[3:5]
# Encoding the target feature as factor
dataset$Purchased = factor(dataset$Purchased, levels = c(0, 1))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$Purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Feature Scaling
training_set[-3] = scale(training_set[-3])
test_set[-3] = scale(test_set[-3])
# Fitting classifier to the Training set
library(e1071)
classifier = naiveBayes(x = training_set[-3],
y = training_set$Purchased)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-3])
# Making the Confusion Matrix
cm = table(test_set[, 3], y_pred)
# Visualising the Training set results
library(ElemStatLearn)
set = training_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3],
main = 'Naive Bayes (Training set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
# Visualising the Test set results
library(ElemStatLearn)
set = test_set
X1 = seq(min(set[, 1]) - 1, max(set[, 1]) + 1, by = 0.01)
X2 = seq(min(set[, 2]) - 1, max(set[, 2]) + 1, by = 0.01)
grid_set = expand.grid(X1, X2)
colnames(grid_set) = c('Age', 'EstimatedSalary')
y_grid = predict(classifier, newdata = grid_set)
plot(set[, -3], main = 'Naive Bayes (Test set)',
xlab = 'Age', ylab = 'Estimated Salary',
xlim = range(X1), ylim = range(X2))
contour(X1, X2, matrix(as.numeric(y_grid), length(X1), length(X2)), add = TRUE)
points(grid_set, pch = '.', col = ifelse(y_grid == 1, 'springgreen3', 'tomato'))
points(set, pch = 21, bg = ifelse(set[, 3] == 1, 'green4', 'red3'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fugu.R
\name{fOr}
\alias{fOr}
\title{function-level OR operator}
\usage{
fOr(...)
}
\arguments{
\item{...}{a list of functions}
}
\value{
f st. f(a,b,c) is true when f1(a,b,c) || f2(a,b,c) ...
}
\description{
Return a new function which is true only when all Fs are true
}
\keyword{functional}
|
/man/fOr.Rd
|
no_license
|
VincentToups/fugu
|
R
| false
| true
| 373
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fugu.R
\name{fOr}
\alias{fOr}
\title{function-level OR operator}
\usage{
fOr(...)
}
\arguments{
\item{...}{a list of functions}
}
\value{
f st. f(a,b,c) is true when f1(a,b,c) || f2(a,b,c) ...
}
\description{
Return a new function which is true only when all Fs are true
}
\keyword{functional}
|
library(shiny)
### Name: HTML
### Title: Mark Characters as HTML
### Aliases: HTML
### ** Examples
el <- div(HTML("I like <u>turtles</u>"))
cat(as.character(el))
|
/data/genthat_extracted_code/shiny/examples/HTML.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 169
|
r
|
library(shiny)
### Name: HTML
### Title: Mark Characters as HTML
### Aliases: HTML
### ** Examples
el <- div(HTML("I like <u>turtles</u>"))
cat(as.character(el))
|
#' Semi parametric location, shape and scale regression
#'
#' \code{lssVarReg.multi} performs a semiparametric location (\eqn{\xi} or xi), shape (\eqn{\nu} or nu) and scale (\eqn{\omega} or omega) regression model. This is designed for multiple covariates that are fit in the location, scale and shape models.
#' @param y Vector containing outcome data. Must be no missing data.
#' @param x Matrix containing the covariate data, same length as \code{y}. Must be no missing data.
#' @param locationmodel Vector to specify the location model to be fit for each covariate. Options: \code{"constant"} = constant model (intercept only), \code{"linear"} = linear term with x covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.l}).
#' @param location.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the location model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{locationmodel} which specifies if they are fit as linear/semi. If semi, use \code{knots.l} to specify knots.
#' @param scale2model Vector to specify the scale^2 model to be fit for each covariate. Options: \code{"constant"} = constant term only, \code{"linear"} = linear term with \code{x} covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.sc})
#' @param scale2.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the scale^2 model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{scale2model} which specifies if they are fit as linear/semi. If semi, use \code{knots.sc} to specify knots.
#' @param shapemodel Vector to specify the shape model to be fit for each covariate. Options: \code{"constant"} = constant shape model, \code{"linear"} = linear term with x covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.sh}).
#' @param shape.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the shape model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{shapemodel} which specifies if they are fit as linear/semi. If semi, use \code{knots.sh} to specify knots.
#' @param knots.l Vector indicating the number of internal knots to be fit in the location model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param knots.sc Vector indicating the number of internal knots to be fit in the scale^2 model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param knots.sh Vector indicating the number of internal knots to be fit in the shape model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param degree Integer to indicate the degree of the splines fit in the location, scale and shape. Default is '2'.
#' @param location.init Vector of initial parameter estimates for the location model. Defaults to vector of 1's of appropriate length.
#' @param scale2.init Vector of initial parameter estimates for the scale^2 model. Defaults to vector of 1's of appropriate length.
#' @param shape.init Vector of initial parameter estimates for the shape model. Defaults to vector of 1's of appropriate length.
#' @param int.maxit Integer of maximum iterations for the internal location and scale EM algorithm. Default is 1000 iterations.
#' @param print.it Logical for printing progress of estimates through each iteration. Default is \code{FALSE}.
#' @param control List of control parameters for the algorithm. See \code{\link{VarReg.control}}.
#' @param ... arguments to be used to form the default control argument if it is not supplied
#' directly
#' @return
#' \code{lssVarReg} returns an object of class \code{"lssVarReg"}, which inherits most from class
#' \code{"VarReg"}. This object of class \code{lssVarReg} is a list of the following components:
#' \itemize{
#' \item \code{modeltype}: Text indicating the model that was fit, always "LSS model" for this model.
#' \item \code{locationmodel}, \code{scale2model}, \code{shapemodel}, \code{knots.l}, \code{knots.sc},
#' \code{knots.sh}, \code{degree},\code{mono.scale} : Returning the input variables as described above
#' \item\code{converged}: Logical argument indicating if convergence occurred.
#' \item\code{iterations}: Total iterations performed of the main algorithm (not including the
#' internal EM algorithm).
#' \item\code{reldiff}: the positive convergence tolerance that occured at the final iteration.
#' \item\code{loglik}: Numeric variable of the maximised log-likelihood.
#' \item\code{aic.c}: Akaike information criterion corrected for small samples
#' \item\code{aic}: Akaike information criterion
#' \item\code{bic}: Bayesian information criterion
#' \item\code{hqc}: Hannan-Quinn information criterion
#' \item\code{location}: Vector of the maximum likelihood estimates of the location parameters.
#' \item\code{scale2}: Vector of the maximum likelihood estimates of the scale (squared) parameters.
#' \item\code{shape}: Vector of the maximum likelihood estimates of the shape parameters.
#' \item\code{data}: Dataframe containing the variables included in the model.
#' }
#'
#'@seealso
#' \code{\link{VarReg.control}} \code{\link{plotlssVarReg}}
#'
#'@examples
#' ## not run
#' ## library(palmerpenguins)
#' ## cc<-na.omit(penguins)
#' ## y<-cc$body_mass_g
#' ## x<-as.data.frame(cbind(cc$bill_length_mm, cc$flipper_length_mm,cc$bill_depth_mm))
#' ## colnames(x) <-c("bill length mm", "flipper length mm","bill depth mm")
#' ## model1<-lssVarReg.multi(y, x,
#' ## locationmodel="linear", location.vars = 2,
#' ## scale2model="constant",
#' ## shapemodel=c("linear", "semi"), shape.vars = c(2,3),
#' ## knots.sh = 1, int.maxit=10 )
#' ## model1[-21] ## print model
#' @export
lssVarReg.multi<-function(y, x,
locationmodel=c("constant", "linear", "semi"),
location.vars = c(1),
scale2model=c("constant", "linear", "semi"),
scale2.vars = c(1),
shapemodel=c("constant", "linear", "semi"),
shape.vars = c(1),
knots.l=NULL, knots.sc=NULL, knots.sh=NULL, degree=2,
location.init=NULL, scale2.init=NULL,shape.init=NULL,
int.maxit=1000, print.it=FALSE, control=list(...), ...) {
control<-do.call(VarReg.control, control)
if (anyNA(x)){
stop("Error: x contains missing data")
}
if (anyNA(y)){
stop("Error: y contains missing data")
}
if (length(y)!=nrow(x)){
stop("Error: x and y not same length")
}
#check lengths of all mean model components are equal
if (locationmodel[1]=="constant"){
knots.l<-NULL
location.vars<-NULL
} else if (length(locationmodel)!=length(location.vars) ||
sum(locationmodel=="semi")!=length(knots.l) ){
stop("Error: locationmodel, knots.l and location.vars not equal length")
}
if (sum(location.vars=="semi")>0 &&
sum(location.vars=="semi")!=length(knots.l)){
stop("Error: vector knots.l not long enough")
}
if (scale2model[1]=="constant"){
knots.sc=NULL #ensure these are set correctly
scale2.vars=NULL
} else if (length(scale2model)!=length(scale2.vars) ||
sum(scale2model=="semi")!=length(knots.sc) ){
stop("Error: scale2model, knots.sc and scale2.vars not equal length")
}
if (sum(scale2model=="semi")>0 &&
sum(scale2model=="semi")!=length(knots.sc))(
stop("Error: vector knots.sc not long enough")
)
if (shapemodel[1]=="constant"){
knots.sh=NULL #ensure these are set correctly
shape.vars=NULL
} else if (length(shapemodel)!=length(shape.vars) ||
sum(shapemodel=="semi")!=length(knots.sh) ){
stop("Error: shapemodel, knots.sh and shape.vars not equal length")
}
if (sum(shapemodel=="semi")>0 &&
sum(shapemodel=="semi")!=length(knots.sh))(
stop("Error: vector knots.sh not long enough")
)
if ("semi" %in% locationmodel && length(which("semi"== locationmodel))!=length(knots.l)){
stop("Error: please specify the number of knots for each semiparametric variable in the location model")
}
if ("semi" %in% scale2model && length(which("semi"== scale2model))!=length(knots.sc)){
stop("Error: please specify the number of knots for each semiparametric variable in the scale2 model")
}
if ("semi" %in% shapemodel && length(which("semi"== shapemodel))!=length(knots.sh)){
stop("Error: please specify the number of knots for each semiparametric variable in the shape model")
}
n<-length(y)
alldat<-data.frame(y, mean.int=rep(1,n), x)
colnames(x)<-make.names(colnames(x))
#loop thru the location variables
mean.ind<-NULL
msemicounter<-0
if (locationmodel[1]=="constant"){
mean.ind<-c(2)
xiold<-1
}else if (length(locationmodel)>=1){
mean.ind<-c(2)
for (i in 1:length(locationmodel)){
#print(i)
if(locationmodel[i]=="semi"){
#print("semi")
msemicounter<-msemicounter+1
bmean<-splines::bs(x=x[,location.vars[i]], df=(degree+knots.l[msemicounter]), degree=degree)
colnames(bmean) <- paste(paste(paste(colnames(x)[location.vars[i]], "Knt", sep="_"),knots.l[msemicounter], sep = ""), paste("Base", colnames(bmean), sep=""), sep = "_")
alldat<-data.frame(alldat, bmean)
mean.ind[length(mean.ind)+1:(ncol(bmean))]<-which(colnames(alldat)%in%colnames(bmean))
}else if (locationmodel[i]=="linear"){
#print("linear")
mean.ind[length(mean.ind)+1]<-location.vars[i]+2 ##assign next free place in vector with covariate
}
}
if (is.null(location.init)==TRUE){
xiold<-rep(1,times = length(mean.ind))
}else if (length(location.init)==length(mean.ind)){
xiold<-location.init
}else{
stop("Error: check location.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check locationmodel contains the appropriate strings")
}
#loop thru the scale2 variables
mono.scale<-"none"
var.ind<-NULL
vsemicounter<-0
if (scale2model[1]=="constant"){
knots.sc<-NULL
var.ind<-FALSE
omega2old<-1
}else if (length(scale2model)>=1){
var.ind<-NULL
for (i in 1:length(scale2model)){
#print(i)
if(scale2model[i]=="semi"){
#print("semi")
vsemicounter<-vsemicounter+1
bvar<-splines::bs(x=x[,scale2.vars[i]], df=(degree+knots.sc[vsemicounter]), degree=degree)
colnames(bvar) <- paste(paste(paste(colnames(x)[scale2.vars[i]],"Knt",sep="_"),knots.sc[vsemicounter], sep = ""), paste("Base", colnames(bvar), sep=""), sep = "_")
alldat<-data.frame(alldat, bvar)
var.ind[length(var.ind)+1:(ncol(bvar))]<-which(colnames(alldat)%in%colnames(bvar))
}else if (scale2model[i]=="linear"){
#print("linear")
var.ind[length(var.ind)+1]<-scale2.vars[i]+2 ##assign next free place in vector with covariate
}
}
if (is.null(scale2.init)==TRUE){
omega2old<-rep(1,times = 1+length(var.ind))
}else if (length(scale2.init)==1+length(var.ind)){
omega2old<-scale2.init
}else{
stop("Error: check scale2.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check scale2model contains the appropriate strings")
}
ssemicounter<-0
nu.ind<-NULL
if (shapemodel[1]=="constant"){
nuold<-1
knots.sh<-NULL
nu.ind<-NULL
} else if (length(shapemodel)>=1){
nu.ind<-NULL
for (i in 1:length(shapemodel)){
#print(i)
if(shapemodel[i]=="semi"){
#print("semi")
ssemicounter<-ssemicounter+1
bsh<-splines::bs(x=x[,shape.vars[i]], df=(degree+knots.sh[ssemicounter]), degree=degree)
colnames(bsh) <- paste(paste(paste(colnames(x)[shape.vars[i]],"Knt",sep="_"),knots.sh[ssemicounter], sep = ""), paste("Base", colnames(bsh), sep=""), sep = "_")
alldat<-data.frame(alldat, bsh)
nu.ind[length(nu.ind)+1:(ncol(bsh))]<-which(colnames(alldat)%in%colnames(bsh))
}else if (shapemodel[i]=="linear"){
nu.ind[length(nu.ind)+1]<-shape.vars[i]+2 ##assign next free place in vector with covariate
}
}
nuold<-rep(1,times = 1+length(nu.ind))
if (is.null(shape.init)==TRUE){
nuold<-rep(1,times = 1+length(nu.ind))
}else if (length(shape.init)==1+length(nu.ind)){
nuold<-shape.init
}else{
stop("Error: check shape.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check shapemodel contains the appropriate strings")
}
l<-loop_lss(alldat,xiold,omega2old,nuold,mean.ind, var.ind, nu.ind, para.space="all",
maxit=control$maxit, eps=control$epsilon,int.maxit, print.it)
mean<-l$fitted.xi
variance<-unname(colSums(t(cbind(rep(1,n),alldat[1:n,var.ind]))*l$omega2new))
nu<-unname(colSums(t(cbind(rep(1,n),alldat[1:n,nu.ind]))*l$nunew))
d<-vector()
for (i in 1:n){
d[i]<-(sn::dsn(y[i], xi=l$fitted.xi[i], omega=sqrt(variance[i]), alpha=nu[i], log=TRUE))
}
loglik<-sum(d)
param<-length(l$xinew)+length(l$omega2new)+length(l$nunew)
ic<-criterion(n, loglik, param)
if (shapemodel[1]=="constant"){
names(l$nunew)<-"Intercept"
} else {
names(l$nunew)<-c("Intercept", colnames(alldat)[nu.ind])
}
out<-list(modeltype="LSS model", locationmodel=locationmodel, knots.l=knots.l, scale2model=scale2model, knots.sc=knots.sc, shapemodel=shapemodel, knots.sh=knots.sh, degree=degree, converged=l$conv, iterations=l$it,reldiff=l$reldiff, loglik=loglik, aic.c=ic$aicc, aic=ic$aic,bic=ic$bic, mono.scale=mono.scale, hqc=ic$hqc,
location=l$xinew,
scale2=l$omega2new,
shape=l$nunew, data=alldat)
class(out) <- c("lssVarReg")
return(out)
}
|
/R/lssVarReg_multi.R
|
no_license
|
cran/VarReg
|
R
| false
| false
| 14,420
|
r
|
#' Semi parametric location, shape and scale regression
#'
#' \code{lssVarReg.multi} performs a semiparametric location (\eqn{\xi} or xi), shape (\eqn{\nu} or nu) and scale (\eqn{\omega} or omega) regression model. This is designed for multiple covariates that are fit in the location, scale and shape models.
#' @param y Vector containing outcome data. Must be no missing data.
#' @param x Matrix containing the covariate data, same length as \code{y}. Must be no missing data.
#' @param locationmodel Vector to specify the location model to be fit for each covariate. Options: \code{"constant"} = constant model (intercept only), \code{"linear"} = linear term with x covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.l}).
#' @param location.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the location model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{locationmodel} which specifies if they are fit as linear/semi. If semi, use \code{knots.l} to specify knots.
#' @param scale2model Vector to specify the scale^2 model to be fit for each covariate. Options: \code{"constant"} = constant term only, \code{"linear"} = linear term with \code{x} covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.sc})
#' @param scale2.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the scale^2 model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{scale2model} which specifies if they are fit as linear/semi. If semi, use \code{knots.sc} to specify knots.
#' @param shapemodel Vector to specify the shape model to be fit for each covariate. Options: \code{"constant"} = constant shape model, \code{"linear"} = linear term with x covariate, \code{"semi"} = semiparametric spline (specify with \code{knots.sh}).
#' @param shape.vars Vector to specify the column(s) in \code{x} referring to covariates to be fit in the shape model, eg c(1,2) indicates columns 1 and 2 in \code{x}. Must be the same length as \code{shapemodel} which specifies if they are fit as linear/semi. If semi, use \code{knots.sh} to specify knots.
#' @param knots.l Vector indicating the number of internal knots to be fit in the location model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param knots.sc Vector indicating the number of internal knots to be fit in the scale^2 model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param knots.sh Vector indicating the number of internal knots to be fit in the shape model for each covariate. Default is '2'. (Note that the knots are placed equidistantly over x.)
#' @param degree Integer to indicate the degree of the splines fit in the location, scale and shape. Default is '2'.
#' @param location.init Vector of initial parameter estimates for the location model. Defaults to vector of 1's of appropriate length.
#' @param scale2.init Vector of initial parameter estimates for the scale^2 model. Defaults to vector of 1's of appropriate length.
#' @param shape.init Vector of initial parameter estimates for the shape model. Defaults to vector of 1's of appropriate length.
#' @param int.maxit Integer of maximum iterations for the internal location and scale EM algorithm. Default is 1000 iterations.
#' @param print.it Logical for printing progress of estimates through each iteration. Default is \code{FALSE}.
#' @param control List of control parameters for the algorithm. See \code{\link{VarReg.control}}.
#' @param ... arguments to be used to form the default control argument if it is not supplied
#' directly
#' @return
#' \code{lssVarReg} returns an object of class \code{"lssVarReg"}, which inherits most from class
#' \code{"VarReg"}. This object of class \code{lssVarReg} is a list of the following components:
#' \itemize{
#' \item \code{modeltype}: Text indicating the model that was fit, always "LSS model" for this model.
#' \item \code{locationmodel}, \code{scale2model}, \code{shapemodel}, \code{knots.l}, \code{knots.sc},
#' \code{knots.sh}, \code{degree},\code{mono.scale} : Returning the input variables as described above
#' \item\code{converged}: Logical argument indicating if convergence occurred.
#' \item\code{iterations}: Total iterations performed of the main algorithm (not including the
#' internal EM algorithm).
#' \item\code{reldiff}: the positive convergence tolerance that occured at the final iteration.
#' \item\code{loglik}: Numeric variable of the maximised log-likelihood.
#' \item\code{aic.c}: Akaike information criterion corrected for small samples
#' \item\code{aic}: Akaike information criterion
#' \item\code{bic}: Bayesian information criterion
#' \item\code{hqc}: Hannan-Quinn information criterion
#' \item\code{location}: Vector of the maximum likelihood estimates of the location parameters.
#' \item\code{scale2}: Vector of the maximum likelihood estimates of the scale (squared) parameters.
#' \item\code{shape}: Vector of the maximum likelihood estimates of the shape parameters.
#' \item\code{data}: Dataframe containing the variables included in the model.
#' }
#'
#'@seealso
#' \code{\link{VarReg.control}} \code{\link{plotlssVarReg}}
#'
#'@examples
#' ## not run
#' ## library(palmerpenguins)
#' ## cc<-na.omit(penguins)
#' ## y<-cc$body_mass_g
#' ## x<-as.data.frame(cbind(cc$bill_length_mm, cc$flipper_length_mm,cc$bill_depth_mm))
#' ## colnames(x) <-c("bill length mm", "flipper length mm","bill depth mm")
#' ## model1<-lssVarReg.multi(y, x,
#' ## locationmodel="linear", location.vars = 2,
#' ## scale2model="constant",
#' ## shapemodel=c("linear", "semi"), shape.vars = c(2,3),
#' ## knots.sh = 1, int.maxit=10 )
#' ## model1[-21] ## print model
#' @export
lssVarReg.multi<-function(y, x,
locationmodel=c("constant", "linear", "semi"),
location.vars = c(1),
scale2model=c("constant", "linear", "semi"),
scale2.vars = c(1),
shapemodel=c("constant", "linear", "semi"),
shape.vars = c(1),
knots.l=NULL, knots.sc=NULL, knots.sh=NULL, degree=2,
location.init=NULL, scale2.init=NULL,shape.init=NULL,
int.maxit=1000, print.it=FALSE, control=list(...), ...) {
control<-do.call(VarReg.control, control)
if (anyNA(x)){
stop("Error: x contains missing data")
}
if (anyNA(y)){
stop("Error: y contains missing data")
}
if (length(y)!=nrow(x)){
stop("Error: x and y not same length")
}
#check lengths of all mean model components are equal
if (locationmodel[1]=="constant"){
knots.l<-NULL
location.vars<-NULL
} else if (length(locationmodel)!=length(location.vars) ||
sum(locationmodel=="semi")!=length(knots.l) ){
stop("Error: locationmodel, knots.l and location.vars not equal length")
}
if (sum(location.vars=="semi")>0 &&
sum(location.vars=="semi")!=length(knots.l)){
stop("Error: vector knots.l not long enough")
}
if (scale2model[1]=="constant"){
knots.sc=NULL #ensure these are set correctly
scale2.vars=NULL
} else if (length(scale2model)!=length(scale2.vars) ||
sum(scale2model=="semi")!=length(knots.sc) ){
stop("Error: scale2model, knots.sc and scale2.vars not equal length")
}
if (sum(scale2model=="semi")>0 &&
sum(scale2model=="semi")!=length(knots.sc))(
stop("Error: vector knots.sc not long enough")
)
if (shapemodel[1]=="constant"){
knots.sh=NULL #ensure these are set correctly
shape.vars=NULL
} else if (length(shapemodel)!=length(shape.vars) ||
sum(shapemodel=="semi")!=length(knots.sh) ){
stop("Error: shapemodel, knots.sh and shape.vars not equal length")
}
if (sum(shapemodel=="semi")>0 &&
sum(shapemodel=="semi")!=length(knots.sh))(
stop("Error: vector knots.sh not long enough")
)
if ("semi" %in% locationmodel && length(which("semi"== locationmodel))!=length(knots.l)){
stop("Error: please specify the number of knots for each semiparametric variable in the location model")
}
if ("semi" %in% scale2model && length(which("semi"== scale2model))!=length(knots.sc)){
stop("Error: please specify the number of knots for each semiparametric variable in the scale2 model")
}
if ("semi" %in% shapemodel && length(which("semi"== shapemodel))!=length(knots.sh)){
stop("Error: please specify the number of knots for each semiparametric variable in the shape model")
}
n<-length(y)
alldat<-data.frame(y, mean.int=rep(1,n), x)
colnames(x)<-make.names(colnames(x))
#loop thru the location variables
mean.ind<-NULL
msemicounter<-0
if (locationmodel[1]=="constant"){
mean.ind<-c(2)
xiold<-1
}else if (length(locationmodel)>=1){
mean.ind<-c(2)
for (i in 1:length(locationmodel)){
#print(i)
if(locationmodel[i]=="semi"){
#print("semi")
msemicounter<-msemicounter+1
bmean<-splines::bs(x=x[,location.vars[i]], df=(degree+knots.l[msemicounter]), degree=degree)
colnames(bmean) <- paste(paste(paste(colnames(x)[location.vars[i]], "Knt", sep="_"),knots.l[msemicounter], sep = ""), paste("Base", colnames(bmean), sep=""), sep = "_")
alldat<-data.frame(alldat, bmean)
mean.ind[length(mean.ind)+1:(ncol(bmean))]<-which(colnames(alldat)%in%colnames(bmean))
}else if (locationmodel[i]=="linear"){
#print("linear")
mean.ind[length(mean.ind)+1]<-location.vars[i]+2 ##assign next free place in vector with covariate
}
}
if (is.null(location.init)==TRUE){
xiold<-rep(1,times = length(mean.ind))
}else if (length(location.init)==length(mean.ind)){
xiold<-location.init
}else{
stop("Error: check location.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check locationmodel contains the appropriate strings")
}
#loop thru the scale2 variables
mono.scale<-"none"
var.ind<-NULL
vsemicounter<-0
if (scale2model[1]=="constant"){
knots.sc<-NULL
var.ind<-FALSE
omega2old<-1
}else if (length(scale2model)>=1){
var.ind<-NULL
for (i in 1:length(scale2model)){
#print(i)
if(scale2model[i]=="semi"){
#print("semi")
vsemicounter<-vsemicounter+1
bvar<-splines::bs(x=x[,scale2.vars[i]], df=(degree+knots.sc[vsemicounter]), degree=degree)
colnames(bvar) <- paste(paste(paste(colnames(x)[scale2.vars[i]],"Knt",sep="_"),knots.sc[vsemicounter], sep = ""), paste("Base", colnames(bvar), sep=""), sep = "_")
alldat<-data.frame(alldat, bvar)
var.ind[length(var.ind)+1:(ncol(bvar))]<-which(colnames(alldat)%in%colnames(bvar))
}else if (scale2model[i]=="linear"){
#print("linear")
var.ind[length(var.ind)+1]<-scale2.vars[i]+2 ##assign next free place in vector with covariate
}
}
if (is.null(scale2.init)==TRUE){
omega2old<-rep(1,times = 1+length(var.ind))
}else if (length(scale2.init)==1+length(var.ind)){
omega2old<-scale2.init
}else{
stop("Error: check scale2.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check scale2model contains the appropriate strings")
}
ssemicounter<-0
nu.ind<-NULL
if (shapemodel[1]=="constant"){
nuold<-1
knots.sh<-NULL
nu.ind<-NULL
} else if (length(shapemodel)>=1){
nu.ind<-NULL
for (i in 1:length(shapemodel)){
#print(i)
if(shapemodel[i]=="semi"){
#print("semi")
ssemicounter<-ssemicounter+1
bsh<-splines::bs(x=x[,shape.vars[i]], df=(degree+knots.sh[ssemicounter]), degree=degree)
colnames(bsh) <- paste(paste(paste(colnames(x)[shape.vars[i]],"Knt",sep="_"),knots.sh[ssemicounter], sep = ""), paste("Base", colnames(bsh), sep=""), sep = "_")
alldat<-data.frame(alldat, bsh)
nu.ind[length(nu.ind)+1:(ncol(bsh))]<-which(colnames(alldat)%in%colnames(bsh))
}else if (shapemodel[i]=="linear"){
nu.ind[length(nu.ind)+1]<-shape.vars[i]+2 ##assign next free place in vector with covariate
}
}
nuold<-rep(1,times = 1+length(nu.ind))
if (is.null(shape.init)==TRUE){
nuold<-rep(1,times = 1+length(nu.ind))
}else if (length(shape.init)==1+length(nu.ind)){
nuold<-shape.init
}else{
stop("Error: check shape.init is the correct length (expecting intercept + parameter starting estimates)")
}
}else {stop("Error: check shapemodel contains the appropriate strings")
}
l<-loop_lss(alldat,xiold,omega2old,nuold,mean.ind, var.ind, nu.ind, para.space="all",
maxit=control$maxit, eps=control$epsilon,int.maxit, print.it)
mean<-l$fitted.xi
variance<-unname(colSums(t(cbind(rep(1,n),alldat[1:n,var.ind]))*l$omega2new))
nu<-unname(colSums(t(cbind(rep(1,n),alldat[1:n,nu.ind]))*l$nunew))
d<-vector()
for (i in 1:n){
d[i]<-(sn::dsn(y[i], xi=l$fitted.xi[i], omega=sqrt(variance[i]), alpha=nu[i], log=TRUE))
}
loglik<-sum(d)
param<-length(l$xinew)+length(l$omega2new)+length(l$nunew)
ic<-criterion(n, loglik, param)
if (shapemodel[1]=="constant"){
names(l$nunew)<-"Intercept"
} else {
names(l$nunew)<-c("Intercept", colnames(alldat)[nu.ind])
}
out<-list(modeltype="LSS model", locationmodel=locationmodel, knots.l=knots.l, scale2model=scale2model, knots.sc=knots.sc, shapemodel=shapemodel, knots.sh=knots.sh, degree=degree, converged=l$conv, iterations=l$it,reldiff=l$reldiff, loglik=loglik, aic.c=ic$aicc, aic=ic$aic,bic=ic$bic, mono.scale=mono.scale, hqc=ic$hqc,
location=l$xinew,
scale2=l$omega2new,
shape=l$nunew, data=alldat)
class(out) <- c("lssVarReg")
return(out)
}
|
/fund 2004-2017/07-buy group.R
|
no_license
|
shenfan2018/shenfan2018
|
R
| false
| false
| 1,736
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangeseller_functions.R
\docType{package}
\name{adexchangeseller_googleAuthR}
\alias{adexchangeseller_googleAuthR}
\alias{adexchangeseller_googleAuthR-package}
\title{Ad Exchange Seller API
Accesses the inventory of Ad Exchange seller users and generates reports.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 22:31:27
filename: /Users/mark/dev/R/autoGoogleAPI/googleadexchangesellerv1.auto/R/adexchangeseller_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/adexchange.seller
\item https://www.googleapis.com/auth/adexchange.seller.readonly
}
}
|
/googleadexchangesellerv1.auto/man/adexchangeseller_googleAuthR.Rd
|
permissive
|
Phippsy/autoGoogleAPI
|
R
| false
| true
| 750
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangeseller_functions.R
\docType{package}
\name{adexchangeseller_googleAuthR}
\alias{adexchangeseller_googleAuthR}
\alias{adexchangeseller_googleAuthR-package}
\title{Ad Exchange Seller API
Accesses the inventory of Ad Exchange seller users and generates reports.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2016-09-03 22:31:27
filename: /Users/mark/dev/R/autoGoogleAPI/googleadexchangesellerv1.auto/R/adexchangeseller_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/adexchange.seller
\item https://www.googleapis.com/auth/adexchange.seller.readonly
}
}
|
\name{transactions-class}
\docType{class}
\alias{transactions-class}
\alias{transactions}
%%%
\alias{initialize,transactions-method}
\alias{coerce,matrix,transactions-method}
\alias{coerce,list,transactions-method}
\alias{coerce,transactions,matrix-method}
\alias{coerce,transactions,list-method}
\alias{coerce,data.frame,transactions-method}
\alias{coerce,transactions,data.frame-method}
\alias{coerce,ngCMatrix,transactions-method}
%\alias{image,transactions-method}
\alias{labels,transactions-method}
\alias{dimnames,transactions-method}
\alias{dimnames<-,transactions,list-method}
\alias{show,transactions-method}
\alias{transactionInfo<-,transactions-method}
\alias{transactionInfo,transactions-method}
\alias{transactionInfo<-}
\alias{transactionInfo}
\alias{t,transactions-method}
%
\alias{summary,transactions-method}
\alias{summary.transactions-class}
\alias{show,summary.transactions-method}
\title{Class ``transactions'' --- Binary Incidence Matrix for
Transactions}
\description{
The \code{transactions} class represents transaction data used for
mining itemsets or rules. It is a direct extension of class
\code{\linkS4class{itemMatrix}} to store a binary incidence
matrix, item labels, and optionally transaction IDs and user IDs.
}
\section{Objects from the Class}{
Objects are created by coercion from objects of other classes
(see Examples section) or by
calls of the form \code{new("transactions", ...)}.
}
\section{Slots}{
\describe{
\item{\code{itemsetInfo}:}{a data.frame
with one row per transaction (each transaction is considered an
itemset). The data.frame
can hold columns with additional information, e.g.,
transaction IDs or user IDs for each transaction. \bold{Note:} this
slot is inherited from class \code{\linkS4class{itemMatrix}}, but
should be accessed in transactions with the
method \code{transactionInfo()}.}
\item{\code{data}:}{object of class
\code{\link[Matrix:ngCMatrix-class]{ngCMatrix}} to store the
binary incidence matrix (see
\code{\linkS4class{itemMatrix}} class)}
\item{\code{itemInfo}:}{a data.frame to store
item labels (see \code{\linkS4class{itemMatrix}} class)}
}
}
\section{Extends}{
Class \code{\linkS4class{itemMatrix}}, directly.
}
\details{
Transactions can be created by coercion from lists
containing transactions, but also from matrix and data.frames.
However, you will need to prepare your data first. Association rule mining can only use items and does not work with continuous variables.
For example, an item describing a person (i.e., the considered object called a transaction) could be \emph{tall}. The fact that the person is tall would be encoded in the transaction containing the item \emph{tall}. This is typically encoded in a transaction-by-items matrix by a \code{TRUE} value. This is why \code{as.transaction} can deal with logical columns, because it assumes the column stands for an item. The function also can convert columns with nominal values (i.e., factors) into a series of binary items (one for each level). So if you have nominal variables then you need to make sure they are factors (and not characters or numbers) using something like
\code{data[,"a_nominal_var"] <- factor(data[,"a_nominal_var"])}.
Continuous variables need to be discretized first. An item resulting from discretization might be \emph{age>18} and the column contains only \code{TRUE} or \code{FALSE}. Alternatively it can be a factor with levels \emph{age<=18}, \emph{50=>age>18} and \emph{age>50}. These will be automatically converted into 3 items, one for each level. Have a look at the function \code{\link{discretize}} for automatic discretization.
Complete examples for how to prepare data can be found in the man pages for \code{\link{Income}} and
\code{\link{Adult}}.
Transactions are represented as sparse binary matrices of class
\code{\link{itemMatrix}}. If you work with several transaction sets at the
same time, then the encoding (order of the items in the binary matrix) in the different sets is important.
See \code{\link{itemCoding}} to learn how to encode and recode transaction sets.
}
\section{Methods}{
\describe{
% \item{[}{\code{signature(x = "transactions")};
% extracts a subset from the incidence matrix. The first argument
% extracts transactions and the second argument can be used to
% extract a subset of items (using item IDs)}
\item{coerce}{\code{signature(from = "matrix", to = "transactions")};
produces a transactions data set from a binary incidence matrix.
The row names are used as item labels and the column names are
stores as transaction IDs.}
\item{coerce}{\code{signature(from = "transactions", to = "matrix")};
coerces the transactions data set into a binary incidence matrix.
}
\item{coerce}{\code{signature(from = "list", to = "transactions")};
produces a transactions data set from a list. The names of the
items in the list are used as item labels and the item IDs and the
incidence matrix is produced automatically.
}
\item{coerce}{\code{signature(from = "transactions", to = "list")};
coerces the transactions data set into a list of transactions.
Each transaction is a vector of character strings (names of the
contained items).
}
\item{coerce}{\code{signature(from = "data.frame", to = "transactions")};
recodes the data frame containing only categorical variables (factors)
or logicals all into a binary transaction data set. For binary variables
only TRUE values are converted into items and the item label is the
variable name. For factors, a dummy item for each level is
automatically generated. Item labels are generated by concatenating
variable names and levels with \samp{"="}.
The original variable names and levels are stored in the itemInfo
data frame
as the components \code{variables} and \code{levels}.
Note that \code{NAs} are ignored (i.e., do not generate an item).}
\item{coerce}{\code{signature(from = "transactions", to = "data.frame")};
represents the set of transactions in a printable form
as a data.frame.
Note that this does not reverse coercion from data.frame
to \code{transactions}.}
\item{coerce}{\code{signature(from = "ngCMatrix", to = "transactions")}; Note that the ngCMatrix needs to have the items as rows!}
\item{dimnames, rownames, colnames}{\code{signature(x = "transactions")};
returns row (transactionID) and column (item) names.}
\item{labels}{\code{signature(x = "transactions")};
returns the labels for the itemsets in each transaction
(see \code{itemMatrix}).}
\item{transactionInfo<-}{\code{signature(x = "transactions")};
replaces the transaction information with a new data.frame.}
\item{transactionInfo}{\code{signature(x = "transactions")};
returns the transaction information as a data.frame.}
\item{show}{\code{signature(object = "transactions")}}
\item{summary}{\code{signature(object = "transactions")}}
}
}
\seealso{
\code{\link{[-methods}},
\code{\link{discretize}},
\code{\link{LIST}},
\code{\link{write}},
\code{\link{c}},
\code{\link{image}},
\code{\link{inspect}},
\code{\link{itemCoding}},
\code{\link{read.transactions}},
\code{\link{random.transactions}},
\code{\link{sets}},
\code{\link{itemMatrix-class}}
}
\author{Michael Hahsler}
\examples{
## example 1: creating transactions form a list
a_list <- list(
c("a","b","c"),
c("a","b"),
c("a","b","d"),
c("c","e"),
c("a","b","d","e")
)
## set transaction names
names(a_list) <- paste("Tr",c(1:5), sep = "")
a_list
## coerce into transactions
trans1 <- as(a_list, "transactions")
## analyze transactions
summary(trans1)
image(trans1)
## example 2: creating transactions from a matrix
a_matrix <- matrix(c(
1,1,1,0,0,
1,1,0,0,0,
1,1,0,1,0,
0,0,1,0,1,
1,1,0,1,1
), ncol = 5)
## set dim names
dimnames(a_matrix) <- list(c("a","b","c","d","e"),
paste("Tr",c(1:5), sep = ""))
a_matrix
## coerce
trans2 <- as(a_matrix, "transactions")
trans2
inspect(trans2)
## example 3: creating transactions from data.frame
a_df <- data.frame(
age = as.factor(c(6, 8, NA, 9, 16)),
grade = as.factor(c("A", "C", "F", NA, "C")),
pass = c(TRUE, TRUE, FALSE, TRUE, TRUE))
## note: factors are translated differently to logicals and NAs are ignored
a_df
## coerce
trans3 <- as(a_df, "transactions")
inspect(trans3)
as(trans3, "data.frame")
## example 4: creating transactions from a data.frame with
## transaction IDs and items
a_df3 <- data.frame(
TID = c(1,1,2,2,2,3),
item=c("a","b","a","b","c", "b")
)
a_df3
trans4 <- as(split(a_df3[,"item"], a_df3[,"TID"]), "transactions")
trans4
inspect(trans4)
}
\keyword{classes}
|
/man/transactions-class.Rd
|
no_license
|
matmo/arules
|
R
| false
| false
| 8,923
|
rd
|
\name{transactions-class}
\docType{class}
\alias{transactions-class}
\alias{transactions}
%%%
\alias{initialize,transactions-method}
\alias{coerce,matrix,transactions-method}
\alias{coerce,list,transactions-method}
\alias{coerce,transactions,matrix-method}
\alias{coerce,transactions,list-method}
\alias{coerce,data.frame,transactions-method}
\alias{coerce,transactions,data.frame-method}
\alias{coerce,ngCMatrix,transactions-method}
%\alias{image,transactions-method}
\alias{labels,transactions-method}
\alias{dimnames,transactions-method}
\alias{dimnames<-,transactions,list-method}
\alias{show,transactions-method}
\alias{transactionInfo<-,transactions-method}
\alias{transactionInfo,transactions-method}
\alias{transactionInfo<-}
\alias{transactionInfo}
\alias{t,transactions-method}
%
\alias{summary,transactions-method}
\alias{summary.transactions-class}
\alias{show,summary.transactions-method}
\title{Class ``transactions'' --- Binary Incidence Matrix for
Transactions}
\description{
The \code{transactions} class represents transaction data used for
mining itemsets or rules. It is a direct extension of class
\code{\linkS4class{itemMatrix}} to store a binary incidence
matrix, item labels, and optionally transaction IDs and user IDs.
}
\section{Objects from the Class}{
Objects are created by coercion from objects of other classes
(see Examples section) or by
calls of the form \code{new("transactions", ...)}.
}
\section{Slots}{
\describe{
\item{\code{itemsetInfo}:}{a data.frame
with one row per transaction (each transaction is considered an
itemset). The data.frame
can hold columns with additional information, e.g.,
transaction IDs or user IDs for each transaction. \bold{Note:} this
slot is inherited from class \code{\linkS4class{itemMatrix}}, but
should be accessed in transactions with the
method \code{transactionInfo()}.}
\item{\code{data}:}{object of class
\code{\link[Matrix:ngCMatrix-class]{ngCMatrix}} to store the
binary incidence matrix (see
\code{\linkS4class{itemMatrix}} class)}
\item{\code{itemInfo}:}{a data.frame to store
item labels (see \code{\linkS4class{itemMatrix}} class)}
}
}
\section{Extends}{
Class \code{\linkS4class{itemMatrix}}, directly.
}
\details{
Transactions can be created by coercion from lists
containing transactions, but also from matrix and data.frames.
However, you will need to prepare your data first. Association rule mining can only use items and does not work with continuous variables.
For example, an item describing a person (i.e., the considered object called a transaction) could be \emph{tall}. The fact that the person is tall would be encoded in the transaction containing the item \emph{tall}. This is typically encoded in a transaction-by-items matrix by a \code{TRUE} value. This is why \code{as.transaction} can deal with logical columns, because it assumes the column stands for an item. The function also can convert columns with nominal values (i.e., factors) into a series of binary items (one for each level). So if you have nominal variables then you need to make sure they are factors (and not characters or numbers) using something like
\code{data[,"a_nominal_var"] <- factor(data[,"a_nominal_var"])}.
Continuous variables need to be discretized first. An item resulting from discretization might be \emph{age>18} and the column contains only \code{TRUE} or \code{FALSE}. Alternatively it can be a factor with levels \emph{age<=18}, \emph{50=>age>18} and \emph{age>50}. These will be automatically converted into 3 items, one for each level. Have a look at the function \code{\link{discretize}} for automatic discretization.
Complete examples for how to prepare data can be found in the man pages for \code{\link{Income}} and
\code{\link{Adult}}.
Transactions are represented as sparse binary matrices of class
\code{\link{itemMatrix}}. If you work with several transaction sets at the
same time, then the encoding (order of the items in the binary matrix) in the different sets is important.
See \code{\link{itemCoding}} to learn how to encode and recode transaction sets.
}
\section{Methods}{
\describe{
% \item{[}{\code{signature(x = "transactions")};
% extracts a subset from the incidence matrix. The first argument
% extracts transactions and the second argument can be used to
% extract a subset of items (using item IDs)}
\item{coerce}{\code{signature(from = "matrix", to = "transactions")};
produces a transactions data set from a binary incidence matrix.
The row names are used as item labels and the column names are
stores as transaction IDs.}
\item{coerce}{\code{signature(from = "transactions", to = "matrix")};
coerces the transactions data set into a binary incidence matrix.
}
\item{coerce}{\code{signature(from = "list", to = "transactions")};
produces a transactions data set from a list. The names of the
items in the list are used as item labels and the item IDs and the
incidence matrix is produced automatically.
}
\item{coerce}{\code{signature(from = "transactions", to = "list")};
coerces the transactions data set into a list of transactions.
Each transaction is a vector of character strings (names of the
contained items).
}
\item{coerce}{\code{signature(from = "data.frame", to = "transactions")};
recodes the data frame containing only categorical variables (factors)
or logicals all into a binary transaction data set. For binary variables
only TRUE values are converted into items and the item label is the
variable name. For factors, a dummy item for each level is
automatically generated. Item labels are generated by concatenating
variable names and levels with \samp{"="}.
The original variable names and levels are stored in the itemInfo
data frame
as the components \code{variables} and \code{levels}.
Note that \code{NAs} are ignored (i.e., do not generate an item).}
\item{coerce}{\code{signature(from = "transactions", to = "data.frame")};
represents the set of transactions in a printable form
as a data.frame.
Note that this does not reverse coercion from data.frame
to \code{transactions}.}
\item{coerce}{\code{signature(from = "ngCMatrix", to = "transactions")}; Note that the ngCMatrix needs to have the items as rows!}
\item{dimnames, rownames, colnames}{\code{signature(x = "transactions")};
returns row (transactionID) and column (item) names.}
\item{labels}{\code{signature(x = "transactions")};
returns the labels for the itemsets in each transaction
(see \code{itemMatrix}).}
\item{transactionInfo<-}{\code{signature(x = "transactions")};
replaces the transaction information with a new data.frame.}
\item{transactionInfo}{\code{signature(x = "transactions")};
returns the transaction information as a data.frame.}
\item{show}{\code{signature(object = "transactions")}}
\item{summary}{\code{signature(object = "transactions")}}
}
}
\seealso{
\code{\link{[-methods}},
\code{\link{discretize}},
\code{\link{LIST}},
\code{\link{write}},
\code{\link{c}},
\code{\link{image}},
\code{\link{inspect}},
\code{\link{itemCoding}},
\code{\link{read.transactions}},
\code{\link{random.transactions}},
\code{\link{sets}},
\code{\link{itemMatrix-class}}
}
\author{Michael Hahsler}
\examples{
## example 1: creating transactions form a list
a_list <- list(
c("a","b","c"),
c("a","b"),
c("a","b","d"),
c("c","e"),
c("a","b","d","e")
)
## set transaction names
names(a_list) <- paste("Tr",c(1:5), sep = "")
a_list
## coerce into transactions
trans1 <- as(a_list, "transactions")
## analyze transactions
summary(trans1)
image(trans1)
## example 2: creating transactions from a matrix
a_matrix <- matrix(c(
1,1,1,0,0,
1,1,0,0,0,
1,1,0,1,0,
0,0,1,0,1,
1,1,0,1,1
), ncol = 5)
## set dim names
dimnames(a_matrix) <- list(c("a","b","c","d","e"),
paste("Tr",c(1:5), sep = ""))
a_matrix
## coerce
trans2 <- as(a_matrix, "transactions")
trans2
inspect(trans2)
## example 3: creating transactions from data.frame
a_df <- data.frame(
age = as.factor(c(6, 8, NA, 9, 16)),
grade = as.factor(c("A", "C", "F", NA, "C")),
pass = c(TRUE, TRUE, FALSE, TRUE, TRUE))
## note: factors are translated differently to logicals and NAs are ignored
a_df
## coerce
trans3 <- as(a_df, "transactions")
inspect(trans3)
as(trans3, "data.frame")
## example 4: creating transactions from a data.frame with
## transaction IDs and items
a_df3 <- data.frame(
TID = c(1,1,2,2,2,3),
item=c("a","b","a","b","c", "b")
)
a_df3
trans4 <- as(split(a_df3[,"item"], a_df3[,"TID"]), "transactions")
trans4
inspect(trans4)
}
\keyword{classes}
|
#install.packages('here')
#install.packages('xlsx', type='source', repos='http://cran.rstudio.com')
library(here)
library(haven)
library(foreign)
library(openxlsx)
path<-here::here("data", "municipal_population", "inegi","population_conteo")
# path<-here("data", "municipal_population", "pop2000_and_2010.dta")
# pop2000_2010<- read_dta(path)
# pop2000 <- subset(pop2000_2010, yearcenso==2000)
# pop2010 <- subset(pop2000_2010, yearcenso==2010)
#
# pop2010 %>%
# select(yearcenso, municipio,pob)
#
# pop2000 %>%
# select(yearcenso, municipio,pob)
path<-here::here("data", "inequality_poverty","gini.xlsx")
gini2010 <- read.xlsx(path, sheet = "2010")
gini2015 <- read.xlsx(path, sheet = "2015")
colnames(gini2010)[3] <- "id_municipal_gini"
colnames(gini2010)[4]<- "name_muni_gini"
colnames(gini2010)[5] <- "gini"
colnames(gini2010)[6] <- "income_ratio"
colnames(gini2015)[3] <- "id_municipal_gini"
colnames(gini2015)[4]<- "name_muni_gini"
colnames(gini2015)[5] <- "gini"
colnames(gini2015)[6] <- "income_ratio"
gini2010<-select( gini2010, -c("Clave.de.entidad", "Entidad.federativa") )
gini2015<-select( gini2015, -c("Clave.de.entidad", "Entidad.federativa") )
# for (datagini in (dfs)) {
# colnames(datagini)[3] <- "id_municipal_gini"
# colnames(datagini)[4]<- "name_muni_gini"
# colnames(datagini)[5] <- "gini"
# colnames(datagini)[6] <- "income_ratio"
# }
###### REZAGO #########
path<-here::here("data", "inequality_poverty","IRS_2000_2015_vf.xlsx")
irs <- read.xlsx(path, sheet = "Municipios", startRow = 5)
colnames(irs)[5] <- "pop2000"
colnames(irs)[6] <- "pop2005"
colnames(irs)[7] <- "pop2010"
colnames(irs)[8] <- "pop2015"
irs <- subset(irs[c(-9:-20)])
colnames(irs)[9] <- "pop_no_healthserv00"
colnames(irs)[10] <- "pop_no_healthserv05"
colnames(irs)[11] <- "pop_no_healthserv10"
colnames(irs)[12] <- "pop_no_healthserv15"
irs <- subset(irs[c(-13:-40)])
colnames(irs)[13] <- "irs00"
colnames(irs)[14] <- "irs05"
colnames(irs)[15] <- "irs10"
colnames(irs)[16] <- "irs15"
colnames(irs)[17] <- "degree00"
colnames(irs)[18] <- "degree05"
colnames(irs)[19] <- "degree10"
colnames(irs)[20] <- "degree15"
colnames(irs)[21] <- "rank00"
colnames(irs)[22] <- "rank05"
colnames(irs)[23] <- "rank10"
colnames(irs)[24] <- "rank15"
colnames(irs)[1] <- "cve_edo"
colnames(irs)[2] <- "state_name"
colnames(irs)[3] <- "municipal_id"
colnames(irs)[4] <- "municipal_name"
irs$pop2000<-as.numeric(irs$pop2000, na.rm=TRUE)
irs$pop2005<-as.numeric(irs$pop2005)
irs$pop2010<-as.numeric(irs$pop2010)
irs$pop2015<-as.numeric(irs$pop2015)
irs$pop_no_healthserv00<- as.numeric(irs$pop_no_healthserv00)
irs$pop_no_healthserv05<- as.numeric(irs$pop_no_healthserv05)
irs$pop_no_healthserv10<- as.numeric(irs$pop_no_healthserv10)
irs$pop_no_healthserv15<- as.numeric(irs$pop_no_healthserv15)
irs$irs00<-as.numeric(irs$irs00)
irs$irs05<-as.numeric(irs$irs05)
irs$irs10<-as.numeric(irs$irs10)
irs$irs15<-as.numeric(irs$irs15)
save(irs, file="irs.RData")
save(gini2010, file = "gini2010.RData")
save(gini2015, file = "gini2015.RData")
path_pop_cohort<-here::here("data", "population_age_cohorts","total_by_cohort.xlsx")
pop_by_cohort <- read.xlsx(path_pop_cohort, startRow = 1, sheet="clean_data")
save(pop_by_cohort, file="pop_by_cohort.RData")
head(pop_by_cohort)
save(irs, file="pop_by_cohort.RData")
|
/scripts/population_conteo.R
|
no_license
|
aleistermi/Mortality-and-Suicides-in-Mx-Viz
|
R
| false
| false
| 3,423
|
r
|
#install.packages('here')
#install.packages('xlsx', type='source', repos='http://cran.rstudio.com')
library(here)
library(haven)
library(foreign)
library(openxlsx)
path<-here::here("data", "municipal_population", "inegi","population_conteo")
# path<-here("data", "municipal_population", "pop2000_and_2010.dta")
# pop2000_2010<- read_dta(path)
# pop2000 <- subset(pop2000_2010, yearcenso==2000)
# pop2010 <- subset(pop2000_2010, yearcenso==2010)
#
# pop2010 %>%
# select(yearcenso, municipio,pob)
#
# pop2000 %>%
# select(yearcenso, municipio,pob)
path<-here::here("data", "inequality_poverty","gini.xlsx")
gini2010 <- read.xlsx(path, sheet = "2010")
gini2015 <- read.xlsx(path, sheet = "2015")
colnames(gini2010)[3] <- "id_municipal_gini"
colnames(gini2010)[4]<- "name_muni_gini"
colnames(gini2010)[5] <- "gini"
colnames(gini2010)[6] <- "income_ratio"
colnames(gini2015)[3] <- "id_municipal_gini"
colnames(gini2015)[4]<- "name_muni_gini"
colnames(gini2015)[5] <- "gini"
colnames(gini2015)[6] <- "income_ratio"
gini2010<-select( gini2010, -c("Clave.de.entidad", "Entidad.federativa") )
gini2015<-select( gini2015, -c("Clave.de.entidad", "Entidad.federativa") )
# for (datagini in (dfs)) {
# colnames(datagini)[3] <- "id_municipal_gini"
# colnames(datagini)[4]<- "name_muni_gini"
# colnames(datagini)[5] <- "gini"
# colnames(datagini)[6] <- "income_ratio"
# }
###### REZAGO #########
path<-here::here("data", "inequality_poverty","IRS_2000_2015_vf.xlsx")
irs <- read.xlsx(path, sheet = "Municipios", startRow = 5)
colnames(irs)[5] <- "pop2000"
colnames(irs)[6] <- "pop2005"
colnames(irs)[7] <- "pop2010"
colnames(irs)[8] <- "pop2015"
irs <- subset(irs[c(-9:-20)])
colnames(irs)[9] <- "pop_no_healthserv00"
colnames(irs)[10] <- "pop_no_healthserv05"
colnames(irs)[11] <- "pop_no_healthserv10"
colnames(irs)[12] <- "pop_no_healthserv15"
irs <- subset(irs[c(-13:-40)])
colnames(irs)[13] <- "irs00"
colnames(irs)[14] <- "irs05"
colnames(irs)[15] <- "irs10"
colnames(irs)[16] <- "irs15"
colnames(irs)[17] <- "degree00"
colnames(irs)[18] <- "degree05"
colnames(irs)[19] <- "degree10"
colnames(irs)[20] <- "degree15"
colnames(irs)[21] <- "rank00"
colnames(irs)[22] <- "rank05"
colnames(irs)[23] <- "rank10"
colnames(irs)[24] <- "rank15"
colnames(irs)[1] <- "cve_edo"
colnames(irs)[2] <- "state_name"
colnames(irs)[3] <- "municipal_id"
colnames(irs)[4] <- "municipal_name"
irs$pop2000<-as.numeric(irs$pop2000, na.rm=TRUE)
irs$pop2005<-as.numeric(irs$pop2005)
irs$pop2010<-as.numeric(irs$pop2010)
irs$pop2015<-as.numeric(irs$pop2015)
irs$pop_no_healthserv00<- as.numeric(irs$pop_no_healthserv00)
irs$pop_no_healthserv05<- as.numeric(irs$pop_no_healthserv05)
irs$pop_no_healthserv10<- as.numeric(irs$pop_no_healthserv10)
irs$pop_no_healthserv15<- as.numeric(irs$pop_no_healthserv15)
irs$irs00<-as.numeric(irs$irs00)
irs$irs05<-as.numeric(irs$irs05)
irs$irs10<-as.numeric(irs$irs10)
irs$irs15<-as.numeric(irs$irs15)
save(irs, file="irs.RData")
save(gini2010, file = "gini2010.RData")
save(gini2015, file = "gini2015.RData")
path_pop_cohort<-here::here("data", "population_age_cohorts","total_by_cohort.xlsx")
pop_by_cohort <- read.xlsx(path_pop_cohort, startRow = 1, sheet="clean_data")
save(pop_by_cohort, file="pop_by_cohort.RData")
head(pop_by_cohort)
save(irs, file="pop_by_cohort.RData")
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM FORESTS FOR SURVIVAL, REGRESSION, AND CLASSIFICATION (RF-SRC)
#### Version 2.4.1 (_PROJECT_BUILD_ID_)
####
#### Copyright 2016, University of Miami
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 3
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### ----------------------------------------------------------------
#### Project Partially Funded By:
#### ----------------------------------------------------------------
#### Dr. Ishwaran's work was funded in part by DMS grant 1148991 from the
#### National Science Foundation and grant R01 CA163739 from the National
#### Cancer Institute.
####
#### Dr. Kogalur's work was funded in part by grant R01 CA163739 from the
#### National Cancer Institute.
#### ----------------------------------------------------------------
#### Written by:
#### ----------------------------------------------------------------
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Department of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: ubk@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
adrop3d.last <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[,,1:d, drop = FALSE]
}
else {
if (dim(x)[1] == 1) {
rbind(x[,,1, drop = TRUE])
}
else {
if (dim(x)[2] == 1) {
if (keepColNames) {
xnew <- cbind(x[,,1, drop = TRUE])
colnames(xnew) <- colnames(x)
xnew
}
else {
cbind(x[,,1, drop = TRUE])
}
}
else {
x[,,1, drop = TRUE]
}
}
}
}
}
adrop2d.first <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[1:d,, drop = FALSE]
}
else {
x[1, , drop = TRUE]
}
}
}
adrop2d.last <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[,1:d, drop = FALSE]
}
else {
x[,1, drop = TRUE]
}
}
}
amatrix <- function(x, d, names) {
x <- matrix(x, d, dimnames = names)
if (ncol(x) > 1) {
x
}
else {
c(x)
}
}
amatrix.remove.names <- function(x) {
if (!is.null(dim(x)) && ncol(x) == 1) {
unlist(c(x), use.names = FALSE)
}
else {
x
}
}
atmatrix <- function(x, d, names, keep.names = FALSE) {
x <- t(matrix(x, ncol = d, dimnames = names))
if (ncol(x) > 1) {
x
}
else {
if (keep.names == FALSE) {
c(x)
}
else {
x.names <- rownames(x)
x <- c(x)
names(x) <- x.names
x
}
}
}
avector <- function(x, name = FALSE) {
if (!is.null(dim(x)) && nrow(x) > 1 && ncol(x) == 1) {
x.names <- rownames(x)
x <- unlist(c(x))
if (name) names(x) <- x.names else names(x) <- NULL
x
}
else if (!is.null(dim(x)) && nrow(x) == 1 && ncol(x) > 1) {
x.names <- colnames(x)
x <- unlist(c(x))
if (name) names(x) <- x.names else names(x) <- NULL
x
}
else if (!is.null(dim(x)) && nrow(x) == 1 && ncol(x) == 1) {
unlist(c(x))
}
else {
x
}
}
available <- function (package, lib.loc = NULL, quietly = TRUE)
{
package <- as.character(substitute(package))
installed <- package %in% installed.packages()
if (installed) {
require(package, quietly = TRUE, character.only = TRUE)
}
else {
return(invisible(FALSE))
}
}
bayes.rule <- function(prob) {
levels.class <- colnames(prob)
factor(levels.class[apply(prob, 1, function(x) {
if (!all(is.na(x))) {
resample(which(x == max(x, na.rm = TRUE)), 1)
}
else {
NA
}
})], levels = levels.class)
}
brier <- function(ytest, prob) {
cl <- colnames(prob)
J <- length(cl)
bs <- rep(NA, J)
nullO <- sapply(1:J, function(j) {
bs[j] <<- mean((1 * (ytest == cl[j]) - prob[, j]) ^ 2, na.rm = TRUE)
NULL
})
norm.const <- (J / (J - 1))
sum(bs * norm.const, na.rm = TRUE)
}
cv.folds <- function (n, folds = 10) {
split(resample(1:n), rep(1:folds, length = n))
}
data.matrix <- function(x) {
as.data.frame(lapply(x, function(xi) {
if (is.integer(xi) || is.numeric(xi)) {
xi
}
else if (is.logical(xi) || is.factor(xi)) {
as.integer(xi)
}
else {
as.numeric(xi)
}
}))
}
family.pretty <- function(fmly) {
switch(fmly,
"surv" = "RSF",
"surv-CR" = "RSF",
"regr" = "RF-R",
"class" = "RF-C",
"unsupv" = "RF-U",
"regr+" = "mRF-R",
"class+" = "mRF-C",
"mix+" = "mRF-RC"
)
}
finalizeFormula <- function(formula.obj, data) {
yvar.names <- formula.obj$yvar.names
all.names <- formula.obj$all.names
index <- length(yvar.names)
fmly <- formula.obj$family
ytry <- formula.obj$ytry
if (length(all.names) <= index) {
stop("formula is misspecified: total number of variables does not exceed total number of y-variables")
}
if (all.names[index + 1] == ".") {
if(index == 0) {
xvar.names <- names(data)
}
else {
xvar.names <- names(data)[!is.element(names(data), all.names[1:index])]
}
}
else {
if(index == 0) {
xvar.names <- all.names
}
else {
xvar.names <- all.names[-c(1:index)]
}
not.specified <- !is.element(xvar.names, names(data))
if (sum(not.specified) > 0) {
stop("formula is misspecified, object ", xvar.names[not.specified], " not found")
}
}
return (list(family=fmly, yvar.names=yvar.names, xvar.names=xvar.names, ytry=ytry))
}
finalizeData <- function(fnames, data, na.action, miss.flag = TRUE) {
data <- data[ , is.element(names(data), fnames), drop = FALSE]
factor.names <- unlist(lapply(data, is.factor))
if (sum(factor.names) > 0) {
data[, factor.names] <- data.matrix(data[, factor.names, drop = FALSE])
}
if (miss.flag == TRUE && na.action == "na.omit") {
if (any(is.na(data))) {
data <- na.omit(data)
}
}
if (nrow(data) == 0) {
stop("no records in the NA-processed data: consider using 'na.action=na.impute'")
}
logical.names <- unlist(lapply(data, is.logical))
if (sum(logical.names) > 0) {
data[, logical.names] <- 1 * data[, logical.names, drop = FALSE]
}
character.names <- unlist(lapply(data, is.character))
if (sum(character.names) > 0) {
stop("data types cannot be character: please convert all characters to factors")
}
return (data)
}
get.importance.xvar <- function(importance.xvar, importance, object) {
if (!is.null(importance)) {
if (missing(importance.xvar) || is.null(importance.xvar)) {
importance.xvar <- object$xvar.names
}
else {
importance.xvar <- unique(importance.xvar)
importance.xvar <- intersect(importance.xvar, object$xvar.names)
}
if (length(importance.xvar) == 0) {
stop("xvar names do not match object xvar matrix")
}
}
else {
importance.xvar <- NULL
}
return (importance.xvar)
}
get.nmiss <- function(xvar, yvar = NULL) {
if (!is.null(yvar)) {
sum(apply(yvar, 1, function(x){any(is.na(x))}) | apply(xvar, 1, function(x){any(is.na(x))}))
}
else {
sum(apply(xvar, 1, function(x){any(is.na(x))}))
}
}
get.outcome.target <- function(family, yvar.names, outcome.target) {
if (family == "regr" | family == "regr+" | family == "class" | family == "class+" | family == "mix+") {
if (is.null(outcome.target)) {
outcome.target <- yvar.names
}
outcome.target <- unique(outcome.target)
outcome.target <- intersect(outcome.target, yvar.names)
if (length(outcome.target) == 0) {
stop("yvar target names do not match object yvar names")
}
outcome.target <- match(outcome.target, yvar.names)
}
else {
outcome.target <- 0
}
}
get.grow.nodesize <- function(fmly, nodesize) {
if (fmly == "surv"){
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (fmly == "surv-CR"){
if (is.null(nodesize)) {
nodesize <- 6
}
}
else if (fmly == "class" | fmly == "class+") {
if (is.null(nodesize)) {
nodesize <- 1
}
}
else if (fmly == "regr" | fmly == "regr+") {
if (is.null(nodesize)) {
nodesize <- 5
}
}
else if (fmly == "mix+") {
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (fmly == "unsupv") {
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (is.null(nodesize)) {
stop("family is misspecified")
}
nodesize <- round(nodesize)
}
get.coerced.survival.fmly <- function(fmly, event.type, splitrule = NULL) {
if (grepl("surv", fmly)) {
coerced.fmly <- "surv"
if (!is.null(splitrule)) {
if ((length(event.type) > 1) & (splitrule != "logrankscore")) {
coerced.fmly <- "surv-CR"
}
}
else {
if (length(event.type) > 1) {
coerced.fmly <- "surv-CR"
}
}
}
else {
stop("attempt to coerce a non-survival family")
}
coerced.fmly
}
get.event.info <- function(obj, subset = NULL) {
if (grepl("surv", obj$family)) {
if (!is.null(obj$yvar)) {
if (is.null(subset)) {
subset <- (1:nrow(cbind(obj$yvar)))
}
r.dim <- 2
time <- obj$yvar[subset, 1]
cens <- obj$yvar[subset, 2]
if (!all(floor(cens) == abs(cens), na.rm = TRUE)) {
stop("for survival families censoring variable must be coded as a non-negative integer")
}
event <- na.omit(cens)[na.omit(cens) > 0]
event.type <- sort(unique(event))
}
else {
r.dim <- 0
event <- event.type <- cens <- cens <- time <- NULL
}
time.interest <- obj$time.interest
}
else {
if ((obj$family == "regr+") | (obj$family == "class+")) {
r.dim <- dim(obj$yvar)[2]
}
else {
r.dim <- 1
}
event <- event.type <- cens <- time.interest <- cens <- time <- NULL
}
return(list(event = event, event.type = event.type, cens = cens,
time.interest = time.interest, time = time, r.dim = r.dim))
}
get.grow.event.info <- function(yvar, fmly, need.deaths = TRUE, ntime) {
if (grepl("surv", fmly)) {
r.dim <- 2
time <- yvar[, 1]
cens <- yvar[, 2]
if (!all(floor(cens) == abs(cens), na.rm = TRUE)) {
stop("for survival families censoring variable must be coded as a non-negative integer (perhaps the formula is set incorrectly?)")
}
if (need.deaths & (all(na.omit(cens) == 0))) {
stop("no deaths in data!")
}
if (!all(na.omit(time) >= 0)) {
stop("time must be positive")
}
event.type <- unique(na.omit(cens))
if (sum(event.type >= 0) != length(event.type)) {
stop("censoring variable must be coded as NA, 0, or greater than 0.")
}
event <- na.omit(cens)[na.omit(cens) > 0]
event.type <- unique(event)
nonMissingOutcome <- which(!is.na(cens) & !is.na(time))
nonMissingDeathFlag <- (cens[nonMissingOutcome] != 0)
time.interest <- sort(unique(time[nonMissingOutcome[nonMissingDeathFlag]]))
if (!missing(ntime)) {
if (length(ntime) == 1 && length(time.interest) > ntime) {
time.interest <- time.interest[
unique(round(seq.int(1, length(time.interest), length.out = ntime)))]
}
if (length(ntime) > 1) {
time.interest <- unique(sapply(ntime, function(tt) {
time.interest[max(1, sum(tt >= time.interest, na.rm = TRUE))]
}))
}
}
}
else {
if ((fmly == "regr+") | (fmly == "class+") | (fmly == "mix+")) {
r.dim <- dim(yvar)[2]
}
else {
if (fmly == "unsupv") {
r.dim <- 0
}
else {
r.dim <- 1
}
}
event <- event.type <- cens <- time.interest <- cens <- time <- NULL
}
return(list(event = event, event.type = event.type, cens = cens,
time.interest = time.interest,
time = time, r.dim = r.dim))
}
get.grow.splitinfo <- function (formula.detail, splitrule, nsplit, event.type) {
splitrule.names <- c("logrank",
"logrankscore",
"logrankCR",
"logrankACR",
"random",
"mse",
"mse.unwt",
"mse.hvwt",
"gini",
"gini.unwt",
"gini.hvwt",
"unsupv",
"mv.mse",
"mv.gini",
"custom")
fmly <- formula.detail$family
nsplit <- round(nsplit)
if (nsplit < 0) {
stop("Invalid nsplit value specified.")
}
cust.idx <- NULL
splitpass <- FALSE
if (!is.null(splitrule)) {
if(grepl("custom", splitrule)) {
splitrule.idx <- which(splitrule.names == "custom")
cust.idx <- as.integer(sub("custom", "", splitrule))
if (is.na(cust.idx)) cust.idx <- 1
splitpass <- TRUE
}
else if (splitrule == "random") {
splitrule.idx <- which(splitrule.names == "random")
nsplit <- 1
splitpass <- TRUE
}
}
if (!splitpass) {
if (grepl("surv", fmly)) {
if (is.null(splitrule)) {
if (length(event.type) == 1) {
splitrule.idx <- which(splitrule.names == "logrank")
}
else {
splitrule.idx <- which(splitrule.names == "logrankCR")
}
splitrule <- splitrule.names[splitrule.idx]
}
else {
splitrule.idx <- which(splitrule.names == splitrule)
if (length(splitrule.idx) != 1) {
stop("Invalid split rule specified: ", splitrule)
}
if ((length(event.type) == 1) & (splitrule.idx == which(splitrule.names == "logrankCR"))) {
stop("Cannot specify logrankCR splitting for right-censored data")
}
if ((length(event.type) > 1) & (splitrule.idx == which(splitrule.names == "logrank"))) {
splitrule.idx <- which(splitrule.names == "logrankACR")
}
}
}
if (fmly == "class") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "gini")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "gini") &
(splitrule != "gini.unwt") &
(splitrule != "gini.hvwt")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "regr") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mse")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mse") &
(splitrule != "mse.unwt") &
(splitrule != "mse.hvwt")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "regr+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.mse")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mv.mse")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "class+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.gini")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mv.gini")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "mix+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.mse")
splitrule <- "mv.mix"
}
else {
if ((splitrule != "mv.mix")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "unsupv") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "unsupv")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "unsupv")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
}
splitinfo <- list(name = splitrule, index = splitrule.idx, cust = cust.idx, nsplit = nsplit)
return (splitinfo)
}
get.weight <- function(weight, n) {
if (!is.null(weight)) {
if (any(weight < 0) ||
all(weight == 0) ||
length(weight) != n ||
any(is.na(weight))) {
stop("Invalid weight vector specified.")
}
}
else {
weight <- rep(1, n)
}
return (weight)
}
get.grow.mtry <- function (mtry = NULL, n.xvar, fmly) {
if (!is.null(mtry)) {
mtry <- round(mtry)
if (mtry < 1 | mtry > n.xvar) mtry <- max(1, min(mtry, n.xvar))
}
else {
if (grepl("regr", fmly)) {
mtry <- max(ceiling(n.xvar/3), 1)
}
else {
mtry <- max(ceiling(sqrt(n.xvar)), 1)
}
}
return (mtry)
}
get.ytry <- function(f) {
}
get.xvar.type <- function(generic.types, xvar.names, coerce.factor = NULL) {
xvar.type <- generic.types
if (!is.null(coerce.factor$xvar.names)) {
xvar.type[is.element(xvar.names, coerce.factor$xvar.names)] <- "C"
}
xvar.type
}
get.xvar.nlevels <- function(nlevels, xvar.names, xvar, coerce.factor = NULL) {
xvar.nlevels <- nlevels
if (!is.null(coerce.factor$xvar.names)) {
pt <- is.element(xvar.names, coerce.factor$xvar.names)
xvar.nlevels[pt] <- sapply(coerce.factor$xvar.names, function(nn) {max(xvar[, nn])})
}
xvar.nlevels
}
get.yvar.type <- function(fmly, generic.types, yvar.names, coerce.factor = NULL) {
if (fmly == "unsupv") {
yvar.type <- NULL
}
else {
if (grepl("surv", fmly)) {
yvar.type <- c("T", "S")
}
else {
yvar.type <- generic.types
if (!is.null(coerce.factor$yvar.names)) {
yvar.type[is.element(yvar.names, coerce.factor$yvar.names)] <- "C"
}
}
}
yvar.type
}
get.yvar.nlevels <- function(fmly, nlevels, yvar.names, yvar, coerce.factor = NULL) {
if (fmly == "unsupv") {
yvar.nlevels <- NULL
}
else {
yvar.nlevels <- nlevels
if (!is.null(coerce.factor$yvar.names)) {
pt <- is.element(yvar.names, coerce.factor$yvar.names)
yvar.nlevels[pt] <- sapply(coerce.factor$yvar.names, function(nn) {max(yvar[, nn])})
}
}
yvar.nlevels
}
parseFormula <- function(f, data, ytry = NULL, coerce.factor = NULL) {
if (!inherits(f, "formula")) {
stop("'formula' is not a formula object.")
}
if (is.null(data)) {
stop("'data' is missing.")
}
if (!is.data.frame(data)) {
stop("'data' must be a data frame.")
}
fmly <- all.names(f, max.names = 1e7)[2]
all.names <- all.vars(f, max.names = 1e7)
yvar.names <- all.vars(formula(paste(as.character(f)[2], "~ .")), max.names = 1e7)
yvar.names <- yvar.names[-length(yvar.names)]
coerce.factor.org <- coerce.factor
coerce.factor <- vector("list", 2)
names(coerce.factor) <- c("xvar.names", "yvar.names")
if (!is.null(coerce.factor.org)) {
coerce.factor$yvar.names <- intersect(yvar.names, coerce.factor.org)
if (length(coerce.factor$yvar.names) == 0) {
coerce.factor$yvar.names <- NULL
}
coerce.factor$xvar.names <- intersect(setdiff(colnames(data), yvar.names), coerce.factor.org)
}
if ((fmly == "Surv")) {
if (sum(is.element(yvar.names, names(data))) != 2) {
stop("Survival formula incorrectly specified.")
}
family <- "surv"
ytry <- 2
}
else if ((fmly == "Multivar" || fmly == "cbind") && length(yvar.names) > 1) {
if (sum(is.element(yvar.names, names(data))) < length(yvar.names)) {
stop("Multivariate formula incorrectly specified: y's listed in formula are not in data.")
}
Y <- data[, yvar.names, drop = FALSE]
logical.names <- unlist(lapply(Y, is.logical))
if (sum(logical.names) > 0) {
Y[, logical.names] <- 1 * Y[, logical.names, drop = FALSE]
}
if ((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) == length(yvar.names)) {
family <- "class+"
}
else if ((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) == 0) {
family <- "regr+"
}
else if (((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) > 0) &&
((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) < length(yvar.names))) {
family <- "mix+"
}
else {
stop("y-outcomes must be either real or factors in multivariate forests.")
}
if (!is.null(ytry)) {
if ((ytry < 1) || (ytry > length(yvar.names))) {
stop("invalid value for ytry: ", ytry)
}
}
else {
ytry <- length(yvar.names)
}
}
else if (fmly == "Unsupervised") {
if (length(yvar.names) != 0) {
stop("Unsupervised forests require no y-responses")
}
family <- "unsupv"
yvar.names <- NULL
temp <- gsub(fmly, "", as.character(f)[2])
temp <- gsub("\\(|\\)", "", temp)
ytry <- as.integer(temp)
if (is.na(ytry)) {
ytry <- 1
}
else {
if (ytry <= 0) {
stop("Unsupervised forests require positive ytry value")
}
}
}
else {
if (sum(is.element(yvar.names, names(data))) != 1) {
stop("formula is incorrectly specified.")
}
Y <- data[, yvar.names]
if (is.logical(Y)) {
Y <- as.numeric(Y)
}
if (!(is.factor(Y) | is.numeric(Y))) {
stop("the y-outcome must be either real or a factor.")
}
if (is.factor.not.ordered(Y) || length(coerce.factor$yvar.names) == 1) {
family <- "class"
}
else {
family <- "regr"
}
ytry <- 1
}
return (list(all.names=all.names, family=family, yvar.names=yvar.names, ytry=ytry,
coerce.factor = coerce.factor))
}
is.all.na <- function(x) {all(is.na(x))}
parseMissingData <- function(formula.obj, data) {
yvar.names <- formula.obj$yvar.names
if (length(yvar.names) > 0) {
resp <- data[, yvar.names, drop = FALSE]
col.resp.na <- unlist(lapply(data[, yvar.names, drop = FALSE], is.all.na))
if (any(col.resp.na)) {
stop("All records are missing for the yvar(s)")
}
}
colPt <- unlist(lapply(data, is.all.na))
if (sum(colPt) > 0 && sum(colPt) >= (ncol(data) - length(yvar.names))) {
stop("All x-variables have all missing data: analysis not meaningful.")
}
data <- data[, !colPt, drop = FALSE]
rowPt <- apply(data, 1, is.all.na)
if (sum(rowPt) == nrow(data)) {
stop("Rows of the data have all missing data: analysis not meaningful.")
}
data <- data[!rowPt,, drop = FALSE]
return(data)
}
make.sample <- function(ntree, samp.size, boot.size = NULL) {
if (samp.size < 0) {
stop("samp.size cannot be negative:", samp.size)
}
if (is.null(boot.size)) {
boot.size <- samp.size
}
rbind(sapply(1:ntree, function(bb){
inb <- rep(0, samp.size)
smp <- sample(1:samp.size, size = boot.size, replace = TRUE)
frq <- tapply(smp, smp, length)
idx <- as.numeric(names(frq))
inb[idx] <- frq
inb
}))
}
resample <- function(x, size, ...) {
if (length(x) <= 1) {
if (!missing(size) && size == 0) x[FALSE] else x
}
else {
sample(x, size, ...)
}
}
row.col.deleted <- function(dat, r.n, c.n)
{
which.r <- setdiff(r.n, rownames(dat))
if (length(which.r) > 0) {
which.r <- match(which.r, r.n)
}
else {
which.r <- NULL
}
which.c <- setdiff(c.n, colnames(dat))
if (length(which.c) > 0) {
which.c <- match(which.c, c.n)
}
else {
which.c <- NULL
}
return(list(row = which.r, col = which.c))
}
|
/R/data.utilities.R
|
no_license
|
ehrlinger/randomForestSRC
|
R
| false
| false
| 26,704
|
r
|
####**********************************************************************
####**********************************************************************
####
#### RANDOM FORESTS FOR SURVIVAL, REGRESSION, AND CLASSIFICATION (RF-SRC)
#### Version 2.4.1 (_PROJECT_BUILD_ID_)
####
#### Copyright 2016, University of Miami
####
#### This program is free software; you can redistribute it and/or
#### modify it under the terms of the GNU General Public License
#### as published by the Free Software Foundation; either version 3
#### of the License, or (at your option) any later version.
####
#### This program is distributed in the hope that it will be useful,
#### but WITHOUT ANY WARRANTY; without even the implied warranty of
#### MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#### GNU General Public License for more details.
####
#### You should have received a copy of the GNU General Public
#### License along with this program; if not, write to the Free
#### Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
#### Boston, MA 02110-1301, USA.
####
#### ----------------------------------------------------------------
#### Project Partially Funded By:
#### ----------------------------------------------------------------
#### Dr. Ishwaran's work was funded in part by DMS grant 1148991 from the
#### National Science Foundation and grant R01 CA163739 from the National
#### Cancer Institute.
####
#### Dr. Kogalur's work was funded in part by grant R01 CA163739 from the
#### National Cancer Institute.
#### ----------------------------------------------------------------
#### Written by:
#### ----------------------------------------------------------------
#### Hemant Ishwaran, Ph.D.
#### Director of Statistical Methodology
#### Professor, Division of Biostatistics
#### Clinical Research Building, Room 1058
#### 1120 NW 14th Street
#### University of Miami, Miami FL 33136
####
#### email: hemant.ishwaran@gmail.com
#### URL: http://web.ccs.miami.edu/~hishwaran
#### --------------------------------------------------------------
#### Udaya B. Kogalur, Ph.D.
#### Adjunct Staff
#### Department of Quantitative Health Sciences
#### Cleveland Clinic Foundation
####
#### Kogalur & Company, Inc.
#### 5425 Nestleway Drive, Suite L1
#### Clemmons, NC 27012
####
#### email: ubk@kogalur.com
#### URL: http://www.kogalur.com
#### --------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
adrop3d.last <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[,,1:d, drop = FALSE]
}
else {
if (dim(x)[1] == 1) {
rbind(x[,,1, drop = TRUE])
}
else {
if (dim(x)[2] == 1) {
if (keepColNames) {
xnew <- cbind(x[,,1, drop = TRUE])
colnames(xnew) <- colnames(x)
xnew
}
else {
cbind(x[,,1, drop = TRUE])
}
}
else {
x[,,1, drop = TRUE]
}
}
}
}
}
adrop2d.first <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[1:d,, drop = FALSE]
}
else {
x[1, , drop = TRUE]
}
}
}
adrop2d.last <- function(x, d, keepColNames = FALSE) {
if (!is.array(x)) {
x
}
else {
if (d > 1) {
x[,1:d, drop = FALSE]
}
else {
x[,1, drop = TRUE]
}
}
}
amatrix <- function(x, d, names) {
x <- matrix(x, d, dimnames = names)
if (ncol(x) > 1) {
x
}
else {
c(x)
}
}
amatrix.remove.names <- function(x) {
if (!is.null(dim(x)) && ncol(x) == 1) {
unlist(c(x), use.names = FALSE)
}
else {
x
}
}
atmatrix <- function(x, d, names, keep.names = FALSE) {
x <- t(matrix(x, ncol = d, dimnames = names))
if (ncol(x) > 1) {
x
}
else {
if (keep.names == FALSE) {
c(x)
}
else {
x.names <- rownames(x)
x <- c(x)
names(x) <- x.names
x
}
}
}
avector <- function(x, name = FALSE) {
if (!is.null(dim(x)) && nrow(x) > 1 && ncol(x) == 1) {
x.names <- rownames(x)
x <- unlist(c(x))
if (name) names(x) <- x.names else names(x) <- NULL
x
}
else if (!is.null(dim(x)) && nrow(x) == 1 && ncol(x) > 1) {
x.names <- colnames(x)
x <- unlist(c(x))
if (name) names(x) <- x.names else names(x) <- NULL
x
}
else if (!is.null(dim(x)) && nrow(x) == 1 && ncol(x) == 1) {
unlist(c(x))
}
else {
x
}
}
available <- function (package, lib.loc = NULL, quietly = TRUE)
{
package <- as.character(substitute(package))
installed <- package %in% installed.packages()
if (installed) {
require(package, quietly = TRUE, character.only = TRUE)
}
else {
return(invisible(FALSE))
}
}
bayes.rule <- function(prob) {
levels.class <- colnames(prob)
factor(levels.class[apply(prob, 1, function(x) {
if (!all(is.na(x))) {
resample(which(x == max(x, na.rm = TRUE)), 1)
}
else {
NA
}
})], levels = levels.class)
}
brier <- function(ytest, prob) {
cl <- colnames(prob)
J <- length(cl)
bs <- rep(NA, J)
nullO <- sapply(1:J, function(j) {
bs[j] <<- mean((1 * (ytest == cl[j]) - prob[, j]) ^ 2, na.rm = TRUE)
NULL
})
norm.const <- (J / (J - 1))
sum(bs * norm.const, na.rm = TRUE)
}
cv.folds <- function (n, folds = 10) {
split(resample(1:n), rep(1:folds, length = n))
}
data.matrix <- function(x) {
as.data.frame(lapply(x, function(xi) {
if (is.integer(xi) || is.numeric(xi)) {
xi
}
else if (is.logical(xi) || is.factor(xi)) {
as.integer(xi)
}
else {
as.numeric(xi)
}
}))
}
family.pretty <- function(fmly) {
switch(fmly,
"surv" = "RSF",
"surv-CR" = "RSF",
"regr" = "RF-R",
"class" = "RF-C",
"unsupv" = "RF-U",
"regr+" = "mRF-R",
"class+" = "mRF-C",
"mix+" = "mRF-RC"
)
}
finalizeFormula <- function(formula.obj, data) {
yvar.names <- formula.obj$yvar.names
all.names <- formula.obj$all.names
index <- length(yvar.names)
fmly <- formula.obj$family
ytry <- formula.obj$ytry
if (length(all.names) <= index) {
stop("formula is misspecified: total number of variables does not exceed total number of y-variables")
}
if (all.names[index + 1] == ".") {
if(index == 0) {
xvar.names <- names(data)
}
else {
xvar.names <- names(data)[!is.element(names(data), all.names[1:index])]
}
}
else {
if(index == 0) {
xvar.names <- all.names
}
else {
xvar.names <- all.names[-c(1:index)]
}
not.specified <- !is.element(xvar.names, names(data))
if (sum(not.specified) > 0) {
stop("formula is misspecified, object ", xvar.names[not.specified], " not found")
}
}
return (list(family=fmly, yvar.names=yvar.names, xvar.names=xvar.names, ytry=ytry))
}
finalizeData <- function(fnames, data, na.action, miss.flag = TRUE) {
data <- data[ , is.element(names(data), fnames), drop = FALSE]
factor.names <- unlist(lapply(data, is.factor))
if (sum(factor.names) > 0) {
data[, factor.names] <- data.matrix(data[, factor.names, drop = FALSE])
}
if (miss.flag == TRUE && na.action == "na.omit") {
if (any(is.na(data))) {
data <- na.omit(data)
}
}
if (nrow(data) == 0) {
stop("no records in the NA-processed data: consider using 'na.action=na.impute'")
}
logical.names <- unlist(lapply(data, is.logical))
if (sum(logical.names) > 0) {
data[, logical.names] <- 1 * data[, logical.names, drop = FALSE]
}
character.names <- unlist(lapply(data, is.character))
if (sum(character.names) > 0) {
stop("data types cannot be character: please convert all characters to factors")
}
return (data)
}
get.importance.xvar <- function(importance.xvar, importance, object) {
if (!is.null(importance)) {
if (missing(importance.xvar) || is.null(importance.xvar)) {
importance.xvar <- object$xvar.names
}
else {
importance.xvar <- unique(importance.xvar)
importance.xvar <- intersect(importance.xvar, object$xvar.names)
}
if (length(importance.xvar) == 0) {
stop("xvar names do not match object xvar matrix")
}
}
else {
importance.xvar <- NULL
}
return (importance.xvar)
}
get.nmiss <- function(xvar, yvar = NULL) {
if (!is.null(yvar)) {
sum(apply(yvar, 1, function(x){any(is.na(x))}) | apply(xvar, 1, function(x){any(is.na(x))}))
}
else {
sum(apply(xvar, 1, function(x){any(is.na(x))}))
}
}
get.outcome.target <- function(family, yvar.names, outcome.target) {
if (family == "regr" | family == "regr+" | family == "class" | family == "class+" | family == "mix+") {
if (is.null(outcome.target)) {
outcome.target <- yvar.names
}
outcome.target <- unique(outcome.target)
outcome.target <- intersect(outcome.target, yvar.names)
if (length(outcome.target) == 0) {
stop("yvar target names do not match object yvar names")
}
outcome.target <- match(outcome.target, yvar.names)
}
else {
outcome.target <- 0
}
}
get.grow.nodesize <- function(fmly, nodesize) {
if (fmly == "surv"){
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (fmly == "surv-CR"){
if (is.null(nodesize)) {
nodesize <- 6
}
}
else if (fmly == "class" | fmly == "class+") {
if (is.null(nodesize)) {
nodesize <- 1
}
}
else if (fmly == "regr" | fmly == "regr+") {
if (is.null(nodesize)) {
nodesize <- 5
}
}
else if (fmly == "mix+") {
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (fmly == "unsupv") {
if (is.null(nodesize)) {
nodesize <- 3
}
}
else if (is.null(nodesize)) {
stop("family is misspecified")
}
nodesize <- round(nodesize)
}
get.coerced.survival.fmly <- function(fmly, event.type, splitrule = NULL) {
if (grepl("surv", fmly)) {
coerced.fmly <- "surv"
if (!is.null(splitrule)) {
if ((length(event.type) > 1) & (splitrule != "logrankscore")) {
coerced.fmly <- "surv-CR"
}
}
else {
if (length(event.type) > 1) {
coerced.fmly <- "surv-CR"
}
}
}
else {
stop("attempt to coerce a non-survival family")
}
coerced.fmly
}
get.event.info <- function(obj, subset = NULL) {
if (grepl("surv", obj$family)) {
if (!is.null(obj$yvar)) {
if (is.null(subset)) {
subset <- (1:nrow(cbind(obj$yvar)))
}
r.dim <- 2
time <- obj$yvar[subset, 1]
cens <- obj$yvar[subset, 2]
if (!all(floor(cens) == abs(cens), na.rm = TRUE)) {
stop("for survival families censoring variable must be coded as a non-negative integer")
}
event <- na.omit(cens)[na.omit(cens) > 0]
event.type <- sort(unique(event))
}
else {
r.dim <- 0
event <- event.type <- cens <- cens <- time <- NULL
}
time.interest <- obj$time.interest
}
else {
if ((obj$family == "regr+") | (obj$family == "class+")) {
r.dim <- dim(obj$yvar)[2]
}
else {
r.dim <- 1
}
event <- event.type <- cens <- time.interest <- cens <- time <- NULL
}
return(list(event = event, event.type = event.type, cens = cens,
time.interest = time.interest, time = time, r.dim = r.dim))
}
get.grow.event.info <- function(yvar, fmly, need.deaths = TRUE, ntime) {
if (grepl("surv", fmly)) {
r.dim <- 2
time <- yvar[, 1]
cens <- yvar[, 2]
if (!all(floor(cens) == abs(cens), na.rm = TRUE)) {
stop("for survival families censoring variable must be coded as a non-negative integer (perhaps the formula is set incorrectly?)")
}
if (need.deaths & (all(na.omit(cens) == 0))) {
stop("no deaths in data!")
}
if (!all(na.omit(time) >= 0)) {
stop("time must be positive")
}
event.type <- unique(na.omit(cens))
if (sum(event.type >= 0) != length(event.type)) {
stop("censoring variable must be coded as NA, 0, or greater than 0.")
}
event <- na.omit(cens)[na.omit(cens) > 0]
event.type <- unique(event)
nonMissingOutcome <- which(!is.na(cens) & !is.na(time))
nonMissingDeathFlag <- (cens[nonMissingOutcome] != 0)
time.interest <- sort(unique(time[nonMissingOutcome[nonMissingDeathFlag]]))
if (!missing(ntime)) {
if (length(ntime) == 1 && length(time.interest) > ntime) {
time.interest <- time.interest[
unique(round(seq.int(1, length(time.interest), length.out = ntime)))]
}
if (length(ntime) > 1) {
time.interest <- unique(sapply(ntime, function(tt) {
time.interest[max(1, sum(tt >= time.interest, na.rm = TRUE))]
}))
}
}
}
else {
if ((fmly == "regr+") | (fmly == "class+") | (fmly == "mix+")) {
r.dim <- dim(yvar)[2]
}
else {
if (fmly == "unsupv") {
r.dim <- 0
}
else {
r.dim <- 1
}
}
event <- event.type <- cens <- time.interest <- cens <- time <- NULL
}
return(list(event = event, event.type = event.type, cens = cens,
time.interest = time.interest,
time = time, r.dim = r.dim))
}
get.grow.splitinfo <- function (formula.detail, splitrule, nsplit, event.type) {
splitrule.names <- c("logrank",
"logrankscore",
"logrankCR",
"logrankACR",
"random",
"mse",
"mse.unwt",
"mse.hvwt",
"gini",
"gini.unwt",
"gini.hvwt",
"unsupv",
"mv.mse",
"mv.gini",
"custom")
fmly <- formula.detail$family
nsplit <- round(nsplit)
if (nsplit < 0) {
stop("Invalid nsplit value specified.")
}
cust.idx <- NULL
splitpass <- FALSE
if (!is.null(splitrule)) {
if(grepl("custom", splitrule)) {
splitrule.idx <- which(splitrule.names == "custom")
cust.idx <- as.integer(sub("custom", "", splitrule))
if (is.na(cust.idx)) cust.idx <- 1
splitpass <- TRUE
}
else if (splitrule == "random") {
splitrule.idx <- which(splitrule.names == "random")
nsplit <- 1
splitpass <- TRUE
}
}
if (!splitpass) {
if (grepl("surv", fmly)) {
if (is.null(splitrule)) {
if (length(event.type) == 1) {
splitrule.idx <- which(splitrule.names == "logrank")
}
else {
splitrule.idx <- which(splitrule.names == "logrankCR")
}
splitrule <- splitrule.names[splitrule.idx]
}
else {
splitrule.idx <- which(splitrule.names == splitrule)
if (length(splitrule.idx) != 1) {
stop("Invalid split rule specified: ", splitrule)
}
if ((length(event.type) == 1) & (splitrule.idx == which(splitrule.names == "logrankCR"))) {
stop("Cannot specify logrankCR splitting for right-censored data")
}
if ((length(event.type) > 1) & (splitrule.idx == which(splitrule.names == "logrank"))) {
splitrule.idx <- which(splitrule.names == "logrankACR")
}
}
}
if (fmly == "class") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "gini")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "gini") &
(splitrule != "gini.unwt") &
(splitrule != "gini.hvwt")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "regr") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mse")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mse") &
(splitrule != "mse.unwt") &
(splitrule != "mse.hvwt")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "regr+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.mse")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mv.mse")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "class+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.gini")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "mv.gini")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "mix+") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "mv.mse")
splitrule <- "mv.mix"
}
else {
if ((splitrule != "mv.mix")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
if (fmly == "unsupv") {
if (is.null(splitrule)) {
splitrule.idx <- which(splitrule.names == "unsupv")
splitrule <- splitrule.names[splitrule.idx]
}
else {
if ((splitrule != "unsupv")) {
stop("Invalid split rule specified: ", splitrule)
}
splitrule.idx <- which(splitrule.names == splitrule)
}
}
}
splitinfo <- list(name = splitrule, index = splitrule.idx, cust = cust.idx, nsplit = nsplit)
return (splitinfo)
}
get.weight <- function(weight, n) {
if (!is.null(weight)) {
if (any(weight < 0) ||
all(weight == 0) ||
length(weight) != n ||
any(is.na(weight))) {
stop("Invalid weight vector specified.")
}
}
else {
weight <- rep(1, n)
}
return (weight)
}
get.grow.mtry <- function (mtry = NULL, n.xvar, fmly) {
if (!is.null(mtry)) {
mtry <- round(mtry)
if (mtry < 1 | mtry > n.xvar) mtry <- max(1, min(mtry, n.xvar))
}
else {
if (grepl("regr", fmly)) {
mtry <- max(ceiling(n.xvar/3), 1)
}
else {
mtry <- max(ceiling(sqrt(n.xvar)), 1)
}
}
return (mtry)
}
get.ytry <- function(f) {
}
get.xvar.type <- function(generic.types, xvar.names, coerce.factor = NULL) {
xvar.type <- generic.types
if (!is.null(coerce.factor$xvar.names)) {
xvar.type[is.element(xvar.names, coerce.factor$xvar.names)] <- "C"
}
xvar.type
}
get.xvar.nlevels <- function(nlevels, xvar.names, xvar, coerce.factor = NULL) {
xvar.nlevels <- nlevels
if (!is.null(coerce.factor$xvar.names)) {
pt <- is.element(xvar.names, coerce.factor$xvar.names)
xvar.nlevels[pt] <- sapply(coerce.factor$xvar.names, function(nn) {max(xvar[, nn])})
}
xvar.nlevels
}
get.yvar.type <- function(fmly, generic.types, yvar.names, coerce.factor = NULL) {
if (fmly == "unsupv") {
yvar.type <- NULL
}
else {
if (grepl("surv", fmly)) {
yvar.type <- c("T", "S")
}
else {
yvar.type <- generic.types
if (!is.null(coerce.factor$yvar.names)) {
yvar.type[is.element(yvar.names, coerce.factor$yvar.names)] <- "C"
}
}
}
yvar.type
}
get.yvar.nlevels <- function(fmly, nlevels, yvar.names, yvar, coerce.factor = NULL) {
if (fmly == "unsupv") {
yvar.nlevels <- NULL
}
else {
yvar.nlevels <- nlevels
if (!is.null(coerce.factor$yvar.names)) {
pt <- is.element(yvar.names, coerce.factor$yvar.names)
yvar.nlevels[pt] <- sapply(coerce.factor$yvar.names, function(nn) {max(yvar[, nn])})
}
}
yvar.nlevels
}
parseFormula <- function(f, data, ytry = NULL, coerce.factor = NULL) {
if (!inherits(f, "formula")) {
stop("'formula' is not a formula object.")
}
if (is.null(data)) {
stop("'data' is missing.")
}
if (!is.data.frame(data)) {
stop("'data' must be a data frame.")
}
fmly <- all.names(f, max.names = 1e7)[2]
all.names <- all.vars(f, max.names = 1e7)
yvar.names <- all.vars(formula(paste(as.character(f)[2], "~ .")), max.names = 1e7)
yvar.names <- yvar.names[-length(yvar.names)]
coerce.factor.org <- coerce.factor
coerce.factor <- vector("list", 2)
names(coerce.factor) <- c("xvar.names", "yvar.names")
if (!is.null(coerce.factor.org)) {
coerce.factor$yvar.names <- intersect(yvar.names, coerce.factor.org)
if (length(coerce.factor$yvar.names) == 0) {
coerce.factor$yvar.names <- NULL
}
coerce.factor$xvar.names <- intersect(setdiff(colnames(data), yvar.names), coerce.factor.org)
}
if ((fmly == "Surv")) {
if (sum(is.element(yvar.names, names(data))) != 2) {
stop("Survival formula incorrectly specified.")
}
family <- "surv"
ytry <- 2
}
else if ((fmly == "Multivar" || fmly == "cbind") && length(yvar.names) > 1) {
if (sum(is.element(yvar.names, names(data))) < length(yvar.names)) {
stop("Multivariate formula incorrectly specified: y's listed in formula are not in data.")
}
Y <- data[, yvar.names, drop = FALSE]
logical.names <- unlist(lapply(Y, is.logical))
if (sum(logical.names) > 0) {
Y[, logical.names] <- 1 * Y[, logical.names, drop = FALSE]
}
if ((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) == length(yvar.names)) {
family <- "class+"
}
else if ((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) == 0) {
family <- "regr+"
}
else if (((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) > 0) &&
((sum(unlist(lapply(Y, is.factor))) +
length(coerce.factor$yvar.names)) < length(yvar.names))) {
family <- "mix+"
}
else {
stop("y-outcomes must be either real or factors in multivariate forests.")
}
if (!is.null(ytry)) {
if ((ytry < 1) || (ytry > length(yvar.names))) {
stop("invalid value for ytry: ", ytry)
}
}
else {
ytry <- length(yvar.names)
}
}
else if (fmly == "Unsupervised") {
if (length(yvar.names) != 0) {
stop("Unsupervised forests require no y-responses")
}
family <- "unsupv"
yvar.names <- NULL
temp <- gsub(fmly, "", as.character(f)[2])
temp <- gsub("\\(|\\)", "", temp)
ytry <- as.integer(temp)
if (is.na(ytry)) {
ytry <- 1
}
else {
if (ytry <= 0) {
stop("Unsupervised forests require positive ytry value")
}
}
}
else {
if (sum(is.element(yvar.names, names(data))) != 1) {
stop("formula is incorrectly specified.")
}
Y <- data[, yvar.names]
if (is.logical(Y)) {
Y <- as.numeric(Y)
}
if (!(is.factor(Y) | is.numeric(Y))) {
stop("the y-outcome must be either real or a factor.")
}
if (is.factor.not.ordered(Y) || length(coerce.factor$yvar.names) == 1) {
family <- "class"
}
else {
family <- "regr"
}
ytry <- 1
}
return (list(all.names=all.names, family=family, yvar.names=yvar.names, ytry=ytry,
coerce.factor = coerce.factor))
}
is.all.na <- function(x) {all(is.na(x))}
parseMissingData <- function(formula.obj, data) {
yvar.names <- formula.obj$yvar.names
if (length(yvar.names) > 0) {
resp <- data[, yvar.names, drop = FALSE]
col.resp.na <- unlist(lapply(data[, yvar.names, drop = FALSE], is.all.na))
if (any(col.resp.na)) {
stop("All records are missing for the yvar(s)")
}
}
colPt <- unlist(lapply(data, is.all.na))
if (sum(colPt) > 0 && sum(colPt) >= (ncol(data) - length(yvar.names))) {
stop("All x-variables have all missing data: analysis not meaningful.")
}
data <- data[, !colPt, drop = FALSE]
rowPt <- apply(data, 1, is.all.na)
if (sum(rowPt) == nrow(data)) {
stop("Rows of the data have all missing data: analysis not meaningful.")
}
data <- data[!rowPt,, drop = FALSE]
return(data)
}
make.sample <- function(ntree, samp.size, boot.size = NULL) {
if (samp.size < 0) {
stop("samp.size cannot be negative:", samp.size)
}
if (is.null(boot.size)) {
boot.size <- samp.size
}
rbind(sapply(1:ntree, function(bb){
inb <- rep(0, samp.size)
smp <- sample(1:samp.size, size = boot.size, replace = TRUE)
frq <- tapply(smp, smp, length)
idx <- as.numeric(names(frq))
inb[idx] <- frq
inb
}))
}
resample <- function(x, size, ...) {
if (length(x) <= 1) {
if (!missing(size) && size == 0) x[FALSE] else x
}
else {
sample(x, size, ...)
}
}
row.col.deleted <- function(dat, r.n, c.n)
{
which.r <- setdiff(r.n, rownames(dat))
if (length(which.r) > 0) {
which.r <- match(which.r, r.n)
}
else {
which.r <- NULL
}
which.c <- setdiff(c.n, colnames(dat))
if (length(which.c) > 0) {
which.c <- match(which.c, c.n)
}
else {
which.c <- NULL
}
return(list(row = which.r, col = which.c))
}
|
\name{PlotLogTheta}
\alias{PlotLogTheta}
\title{Plot of the log theta penalties}
\usage{
PlotLogTheta(results)
}
\arguments{
\item{results}{Object of class \code{"list"}: either the
output of \code{\link{pawl}} or of
\code{\link{adaptiveMH}}.}
}
\value{
The function returns a ggplot2 object.
}
\description{
This function takes the result of \code{\link{pawl}}, and
draws a trace plot of the log theta penalties along the
iterations.
}
\author{
Luke Bornn <bornn@stat.harvard.edu>, Pierre E. Jacob
<pierre.jacob.work@gmail.com>
}
\seealso{
\code{\link{ggplot}}
}
|
/man/PlotLogTheta.Rd
|
no_license
|
ericschulz/rpawl
|
R
| false
| false
| 587
|
rd
|
\name{PlotLogTheta}
\alias{PlotLogTheta}
\title{Plot of the log theta penalties}
\usage{
PlotLogTheta(results)
}
\arguments{
\item{results}{Object of class \code{"list"}: either the
output of \code{\link{pawl}} or of
\code{\link{adaptiveMH}}.}
}
\value{
The function returns a ggplot2 object.
}
\description{
This function takes the result of \code{\link{pawl}}, and
draws a trace plot of the log theta penalties along the
iterations.
}
\author{
Luke Bornn <bornn@stat.harvard.edu>, Pierre E. Jacob
<pierre.jacob.work@gmail.com>
}
\seealso{
\code{\link{ggplot}}
}
|
source('getData.R')
source('~/Dropbox/LNCC/Doutorado/Disciplinas/Ciência de Redes/Project/getData022018.R')
# Genre - Country (Top 5 Genres) #####################################################
# Pegando apenas 5 gêneros para cada pais, de um top 200 artistas
# Excluindo gêneros que aparecem em todos os países
# Simplificar dataframe market.genre
# Pegar uma ocorrência de determinado gênero por país
# Assim é possível verificar em quantos países cada gênero aparece
market.genre.u <- unique(market.genre)
market.genre.u <- na.omit(market.genre.u); head(market.genre.u)
unique(market.genre.u$genre) ## 367
# Quantos países cada gênero alcança
eachGenre.total <- as.data.frame(table(market.genre.u$genre))
eachGenre.total <- eachGenre.total[with(eachGenre.total, order(-Freq)), ]; head(eachGenre.total, 10)
# Excluir gêneros que conectam determinado número de países
genre.cntAll <- eachGenre.total[eachGenre.total$Freq>=51,]; nrow(genre.cntAll)
market.genre.u <- na.omit(market.genre); head(market.genre)
market.genre.u <- market.genre.u[!(market.genre.u$genre %in% genre.cntAll$Var1),]
# Quantas vezes cada gênero aparece por market, ordenado por país e frequência
freqGenre.eachMk <- as.data.frame(table(market.genre.u))
colnames(freqGenre.eachMk) <- c('market', 'genre', 'freq')
freqGenre.eachMk <- freqGenre.eachMk[with(freqGenre.eachMk, order(market, -freq)), ]; head(freqGenre.eachMk)
# Subset pra pegar só os 5 gêneros mais frequêntes de cada market
aux <- as.data.table(freqGenre.eachMk)
top5Genre.eachMk <- aux[, head(.SD, 5), by = 'market']; head(top5Genre.eachMk)
# Excluir países que não possuem gêneros
top5Genre.eachMk <- droplevels(top5Genre.eachMk[top5Genre.eachMk$freq>0,])
length(unique(top5Genre.eachMk$genre)) # 101 gêneros
# Matriz de adjacência 57 linhas, 70 colunas
market.top5genre.am <- xtabs(~ market + genre, top5Genre.eachMk)
market.top5genre.amdf <- as.data.frame(market.top5genre.am) #market, genre, #freq
# Para pegar as diferenças
# market.genre10.amdf$Freq <- ifelse(market.genre10.amdf$Freq=="0","1","0")
# Grafo da matriz de adjacência
market.top5genre.g <- graph_from_incidence_matrix(market.top5genre.am)
market.top5genre.projec <- bipartite.projection(market.top5genre.g)
# Projections (1 - Market, 2 - Genre) ############################
# Projection 1
edges.projec1 <- E(market.top5genre.projec$proj1) # 459
vertices.projec1 <- V(market.top5genre.projec$proj1) # 56
# Projection 2
edges.projec2 <- E(market.top5genre.projec$proj2) # 310
vertices.projec2 <- V(market.top5genre.projec$proj2) # 101
# Checking - Comparing markets #################################
# Yep!
market.top5genre.edges <- as.data.frame(get.edgelist(market.top5genre.projec$proj1)); head(market.top5genre.edges)
a <- top5Genre.eachMk[top5Genre.eachMk$market=='br', ]; nrow(a) # 5
b <- top5Genre.eachMk[top5Genre.eachMk$market=='pt', ]; nrow(b) # 5
c <- a[a$genre %in% b$genre,]; nrow(c) # 1
# Plot bipartite ###############################################
V(market.top5genre.g)$size <- 1
V(market.top5genre.g)$frame.color <- "white"
V(market.top5genre.g)$color <- colors[4]
#V(market.top5genre.g)$label <- NA
E(market.top5genre.g)$arrow.mode <- 0
plot(market.top5genre.g)
# Plot Projection 1 ################################################
V(market.top5genre.projec$proj1)$size <- 5
V(market.top5genre.projec$proj1)$frame.color <- "white"
V(market.top5genre.projec$proj1)$color <- colors[4]
V(market.top5genre.projec$proj1)$label <- NA
E(market.top5genre.projec$proj1)$arrow.mode <- 0
plot(market.top5genre.projec$proj1)
# Plot Projection 2 ################################################
V(market.top5genre.projec$proj2)$size <- 5
V(market.top5genre.projec$proj2)$frame.color <- "white"
V(market.top5genre.projec$proj2)$color <- colors[4]
V(market.top5genre.projec$proj2)$label <- NA
E(market.top5genre.projec$proj2)$arrow.mode <- 0
plot(market.top5genre.projec$proj2)
# Degree #######################################################
# The degree of a vertex is its most basic structural property,
# the number of its adjacent edges.
# Projection 1
market.top5genre.deg <- degree(market.top5genre.projec$proj1, mode="all"); market.top5genre.deg
max(market.top5genre.deg); min(market.top5genre.deg); mean(market.top5genre.deg)
#plot(market.genre.projec$proj1, vertex.size=market.genre.deg*3)
market.top5genre.deg.dist <- degree_distribution(market.top5genre.projec$proj1,
cumulative=F, mode="all")
ggplot(as.data.frame(market.top5genre.deg.dist),
aes(x = seq(1:length(market.top5genre.deg.dist)), y = market.top5genre.deg.dist)) +
geom_point(colour=colors[4],size = 2) +
theme_minimal() +
theme(panel.background = element_rect(colour = "grey50", size=0.3),
legend.title=element_blank(),
panel.grid.major = element_line(colour = "grey80"),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(
subtitle=" ",
y="Probabilidade",
x="Grau")
hist(market.top5genre.deg, breaks=1:vcount(market.top5genre.projec$proj1)-1,
main="Filtro 2",
xlab="Nós", ylab="Frequência", col=colors[4],
ylim=c(0, 35),
xlim = c(0,57), xaxt='n'); axis(side = 1, at=seq(0,55,5))
# Projection 2
market.top5genre.deg2 <- degree(market.top5genre.projec$proj2, mode="all"); market.top5genre.deg2
max(market.top5genre.deg2); min(market.top5genre.deg2); mean(market.top5genre.deg2)
# Random Network ################################################
# Projection 1
gnp_proj1 <- sample_gnp(length(vertices.projec1), 16.1/(length(vertices.projec1)-1))
min.weight <- min(E(market.top5genre.projec$proj1)$weight)
max.weight <- max(E(market.top5genre.projec$proj1)$weight)
E(gnp_proj1)$weight <- sample(min.weight:max.weight, length(E(gnp_proj1)), replace=T)
degree(gnp_proj1); mean(degree(gnp_proj1)) # 16.21429
degree_distribution(gnp_proj1)
plot(x=0:max(degree(gnp_proj1)), y=degree_distribution(gnp_proj1), pch=19,
cex=1.2, col=colors[4], xlab="Grau", ylab="Frequência Acumulada",
xaxt='n'); axis(side = 1, at=seq(0,25,5))
# Projection 2
gnp_proj2 <- sample_gnp(length(vertices.projec2), 6.1/(length(vertices.projec2)-1))
min.weight <- min(E(market.top5genre.projec$proj2)$weight)
max.weight <- max(E(market.top5genre.projec$proj2)$weight)
E(gnp_proj2)$weight <- sample(min.weight:max.weight, length(E(gnp_proj2)), replace=T)
degree(gnp_proj2); mean(degree(gnp_proj2)) # 6.07
# Diameter #######################################################
# The diameter of a graph is the length of the longest geodesic.
# Projection 1
market.top5genre.dmt <- diameter(market.top5genre.projec$proj1, directed = F,
weights = E(market.top5genre.projec$proj1)$weight); market.top5genre.dmt # 5
gnp.dmt.projec1 <- diameter(gnp_proj1, directed = F, weights = E(gnp_proj1)$weight); gnp.dmt.projec1 # 5
# Projection 2
market.top5genre.dmt2 <- diameter(market.top5genre.projec$proj2, directed = F,
weights = E(market.top5genre.projec$proj2)$weight); market.top5genre.dmt2 # 7
gnp.dmt.projec2 <- diameter(gnp_proj2, directed = F, weights = E(gnp_proj2)$weight); gnp.dmt.projec2 # 41
# Density #######################################################
# The ratio of the number of edges and the number of possible edges.
# Projection 1
market.top5genre.dst <- edge_density(market.top5genre.projec$proj1); market.top5genre.dst # 0.294
gnp.dst.projec1 <- edge_density(gnp_proj1); gnp.dst.projec1 # 0.298
# Projection 2
market.top5genre.dst2 <- edge_density(market.top5genre.projec$proj2); market.top5genre.dst2 # 0.06
gnp.dst.projec2 <- edge_density(gnp_proj2); gnp.dst.projec2 # 0.06
# Transitivity #########################################
# Projection 1
market.top5genre.trans <- transitivity(market.top5genre.projec$proj1,
type="global"); market.top5genre.trans # 0.79
gnp.trans.projec1 <- transitivity(gnp_proj1, type="global"); gnp.trans.projec1 # 0.27
# Projection 1
market.top5genre.trans2 <- transitivity(market.top5genre.projec$proj2,
type="global"); market.top5genre.trans2 # 0.43
gnp.trans.projec2 <- transitivity(gnp_proj2, type="global"); gnp.trans.projec2 # 0.07
# Distance ##############################################
# Projection 1
distances(market.top5genre.projec$proj1, v = V(market.top5genre.projec$proj1),
to = V(market.top5genre.projec$proj1),
mode = "all", weights = E(market.top5genre.projec$proj1)$weight,
algorithm = "automatic")
market.top5genre.mdist <- mean_distance(market.top5genre.projec$proj1, directed=F) # 1.766
gnp.mdist.projec1 <- mean_distance(gnp_proj1, directed = F) # 1.70
# Projection 2
market.top5genre.mdist2 <- mean_distance(market.top5genre.projec$proj2, directed=F) # 2.6
gnp.mdist.projec2 <- mean_distance(gnp_proj2, directed = F) # 2.7
# Components ####################################################
# Projection 1
components(market.top5genre.projec$proj1)$no # 6
# Projection 2
components(market.top5genre.projec$proj2)$no # 6
# Communities ####################################################
new_cols <- c("white", "red", "black")[membership(wc)]
#market.genre.comunities <- cluster_edge_betweenness(market.top5genre.projec$proj1)
market.genre.comunities <- cluster_louvain(market.top5genre.projec$proj1,
weights = E(market.top5genre.projec$proj1)$weight)
#plot_dendrogram(market.genre.comunities, mode = 'dendrogram')
#plot(market.genre.comunities, market.top5genre.projec$proj1)
colors.c <- brewer.pal(10, "BuGn")[membership(market.genre.comunities)]
V(market.top5genre.projec$proj1)$community <- market.genre.comunities$membership
par(mar=c(0,0,0,0))
plot(market.genre.comunities,
market.top5genre.projec$proj1,
vertex.size = 6, col = colors[4],
mark.border = '#143662',
#edge.color = colors[2],
mark.col='white',
#vertex.label = NA,
vertex.label.family = "sans",
vertex.label.cex=.7
)
# Total of tracks, artists e genres (each country) ######################################
colnames(top200complete)
music.eachMk <- top200complete[,c(2,4)]
music.eachMk <- data.frame(table(music.eachMk$mk))
music.eachMk <- music.eachMk[with(music.eachMk, order(Var1)),]; head(music.eachMk)
music.eachMk['tipo'] <- as.factor('Músicas')
artist.eachMk <- top200complete[,c(2,1)]
artist.eachMk <- unique(artist.eachMk)
artist.eachMk <- data.frame(table(artist.eachMk$mk))
artist.eachMk <- artist.eachMk[with(artist.eachMk, order(Var1)),]; head(artist.eachMk)
artist.eachMk['tipo'] <- as.factor('Artistas')
genre.eachMk <- unique(market.genre)
genre.eachMk <- data.frame(table(genre.eachMk$market))
genre.eachMk <- genre.eachMk[with(genre.eachMk, order(Var1)),]; head(genre.eachMk)
genre.eachMk['tipo'] <- as.factor('Gêneros')
eachMk.merged <- rbind(music.eachMk, artist.eachMk, genre.eachMk)
colnames(eachMk.merged) <- c('mk', 'freq', 'tipo')
eachMk.merged <- merge(eachMk.merged, countries, by='mk')
eachMk.Plot <-
ggplot(eachMk.merged,
aes(market, freq, fill=tipo, colour=tipo),
na.rm = TRUE) +
geom_bar(stat= 'identity', position = 'stack') +
theme_minimal() +
scale_colour_manual(values=colors[c(1,4,3)]) +
scale_fill_manual(values=colors[c(1,4,3)]) +
theme(panel.background = element_rect(colour = "grey20", size=0.3),
legend.title=element_blank(),
legend.position="top",
legend.text = element_text(size = 12),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(subtitle=" ",
y="Quantidade",
x="Países") + scale_y_continuous(breaks = seq(0, 500, by = 50)) +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10))
# Ranking Genres ############################################
# Closeness
closeness.genre <- data.frame(closeness(market.top5genre.projec$proj2,
vids = V(market.top5genre.projec$proj2),
mode = 'all',
weights = E(market.top5genre.projec$proj2)$weight,
normalized = TRUE))
closeness.genre <- cbind(V(market.top5genre.projec$proj2)$name, closeness.genre)
colnames(closeness.genre) <- c('genre', 'closeness')
closeness.genre <- closeness.genre[with(closeness.genre, order(-closeness)), ]
# Betweeness
betweeness.genre <- data.frame(betweenness(market.top5genre.projec$proj2,
weights = E(market.top5genre.projec$proj2)$weight,
normalized = TRUE, directed = FALSE))
betweeness.genre <- cbind(V(market.top5genre.projec$proj2)$name, betweeness.genre)
colnames(betweeness.genre) <- c('genre', 'betweeness')
betweeness.genre <- betweeness.genre[with(betweeness.genre, order(-betweeness)), ]
# Degree
degree.genre <- data.frame(degree(market.top5genre.projec$proj2))
degree.genre <- cbind(V(market.top5genre.projec$proj2)$name, degree.genre)
colnames(degree.genre) <- c('genre', 'degree')
degree.genre <- degree.genre[with(degree.genre, order(-degree)), ]
# Strength
strength.genre <- data.frame(strength(market.top5genre.projec$proj2,
vids = V(market.top5genre.projec$proj2),
mode = "all",
loops = TRUE,
weights = E(market.top5genre.projec$proj2)$weight))
strength.genre <- cbind(V(market.top5genre.projec$proj2)$name, strength.genre)
colnames(strength.genre) <- c('genre', 'strength')
strength.genre <- strength.genre[with(strength.genre, order(-strength)), ]
rank.genre <- data.frame(degree = degree.genre$genre[1:10],
strength = strength.genre$genre[1:10],
closeness = closeness.genre$genre[1:10],
betweenness = betweeness.genre$genre[1:10])
layout(matrix(1:4, 2, 2, byrow = TRUE))
par(mar=c(0,2,2,0))
plot(market.top5genre.projec$proj2,
main="Degree", vertex.size=V(market.top5genre.projec$proj2)$size*strength.genre/10)
# How many genres are original? #############################
origin_genre <- read.csv(paste0(dir,'origin_genre.csv'))
unique(origin_genre$mk) # 99
unique(droplevels(top5Genre.eachMk$market))
origin_genre <- merge(origin_genre, countries, by='market')
a <- origin_genre[,c(2,3)]
b <- top5Genre.eachMk[,c(2,1)]
colnames(a) <- colnames(b) <- c('genre', 'mk')
equal <- inner_join(a, b)
equal <- aggregate(genre ~ mk, data = equal, FUN = function(x){NROW(x)})
b <- aggregate(genre ~ mk, data = b, FUN = function(x){NROW(x)})
c <- merge(equal, b, by='mk')
c <- rbind(c, data.frame(mk = b$mk[which(!(b$mk %in% c$mk))], genre.x = 0, genre.y = 5))
c['percent'] <- (c$genre.x/c$genre.y)*100
c <- merge(c, countries, by = 'mk')
how.many <-
ggplot(c,
aes(market, percent),
na.rm = TRUE) +
geom_bar(stat= 'identity', fill = colors[4]) +
theme_minimal() +
scale_colour_manual(values=colors[4]) +
scale_fill_manual(values=colors[4]) +
theme(panel.background = element_rect(colour = "grey20", size=0.3),
legend.title=element_blank(),
legend.position="top",
legend.text = element_text(size = 12),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(subtitle=" ",
y="Porcentagem de origem",
x="Países") + scale_y_continuous(breaks = seq(0, 100, by = 20)) +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10))
# Heat Map ######################################################
# Plot heat map - genre similarity between markets ##########
market.genre.edges <- as.data.frame(get.edgelist(market.top5genre.projec$proj1)); head(market.genre.edges)
# Pegar os qntds de gêneros de cada país
market.genre.edges <- cbind(market.genre.edges, E(market.top5genre.projec$proj1)$weight)
colnames(market.genre.edges) <- c('market', 'to', 'w.sim'); head(market.genre.edges)
#market.genre.edges <- merge(market.genre.edges, genre.eachMk, by='market')
#colnames(market.genre.edges) <- c('from', 'market', 'w.sim'); head(market.genre.edges)
#market.genre.edges <- merge(market.genre.edges, genre.eachMk, by='market')
#colnames(market.genre.edges) <- c('from', 'to', 'w.sim', 'w.to', 'w.from')
#Calcula porcentagem de similaridade
#market.genre.edges['simPercent'] <- market.genre.edges$w.sim/(market.genre.edges$w.to + market.genre.edges$w.from)
# Make a new data.frame, simply reversing A and B
market.genre.edges.oh <- data.frame(to = market.genre.edges$market,
from = market.genre.edges$to,
sim = market.genre.edges$w.sim)
# Here's the diagonal
diagonal <- data.frame(to = unique(market.genre.edges$to),
from = unique(market.genre.edges$to),
sim = 1)
# Mash 'em all together
full <- rbind(market.genre.edges, market.genre.edges.oh); head(full)
# Pegar nome completo dos países
colnames(full) <- c('mk', 'from'); head(full)
full <- merge(full, countries, by='mk')
colnames(full) <- c('t', 'mk', 'sim', 'to'); head(full)
full <- merge(full, countries, by='mk')
colnames(full) <- c('f', 't', 'sim', 'from', 'to'); head(full)
full <- full[,c(-1,-2)]
heatMap.mg <-
ggplot(data = full, aes(x = to, y = from)) +
geom_tile(aes(fill = sim)) +
scale_fill_gradient(low = colors[4], high = colors[1], name = "Similarity"
) +
theme_minimal() +
labs(title="Similarity between countries based on musical genre",
y="Countries",
x="Countries") +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10),
axis.text.y=element_text(size=8))
#######
market.top5genre.deg.dist2 <- degree_distribution(market.top5genre.projec$proj2,
cumulative=F, mode="all")
plot(x=0:max(degree(market.top5genre.projec$proj2)), y=market.top5genre.deg.dist2, pch=19,
cex=1.2, col=colors[4], xlab="Grau", ylab="Frequência Acumulada",
xaxt='n'); axis(side = 1, at=seq(0,35,5))
hist(market.top5genre.deg, breaks=1:vcount(market.top5genre.projec$proj1)-1,
main="Filtro 2",
xlab="Nós", ylab="Frequência", col=colors[4],
ylim=c(0, 35),
xlim = c(0,57), xaxt='n'); axis(side = 1, at=seq(0,55,5))
|
/top200spotify/R/artistGenreTop5NWA.R
|
no_license
|
mmondelli/network-science
|
R
| false
| false
| 18,562
|
r
|
source('getData.R')
source('~/Dropbox/LNCC/Doutorado/Disciplinas/Ciência de Redes/Project/getData022018.R')
# Genre - Country (Top 5 Genres) #####################################################
# Pegando apenas 5 gêneros para cada pais, de um top 200 artistas
# Excluindo gêneros que aparecem em todos os países
# Simplificar dataframe market.genre
# Pegar uma ocorrência de determinado gênero por país
# Assim é possível verificar em quantos países cada gênero aparece
market.genre.u <- unique(market.genre)
market.genre.u <- na.omit(market.genre.u); head(market.genre.u)
unique(market.genre.u$genre) ## 367
# Quantos países cada gênero alcança
eachGenre.total <- as.data.frame(table(market.genre.u$genre))
eachGenre.total <- eachGenre.total[with(eachGenre.total, order(-Freq)), ]; head(eachGenre.total, 10)
# Excluir gêneros que conectam determinado número de países
genre.cntAll <- eachGenre.total[eachGenre.total$Freq>=51,]; nrow(genre.cntAll)
market.genre.u <- na.omit(market.genre); head(market.genre)
market.genre.u <- market.genre.u[!(market.genre.u$genre %in% genre.cntAll$Var1),]
# Quantas vezes cada gênero aparece por market, ordenado por país e frequência
freqGenre.eachMk <- as.data.frame(table(market.genre.u))
colnames(freqGenre.eachMk) <- c('market', 'genre', 'freq')
freqGenre.eachMk <- freqGenre.eachMk[with(freqGenre.eachMk, order(market, -freq)), ]; head(freqGenre.eachMk)
# Subset pra pegar só os 5 gêneros mais frequêntes de cada market
aux <- as.data.table(freqGenre.eachMk)
top5Genre.eachMk <- aux[, head(.SD, 5), by = 'market']; head(top5Genre.eachMk)
# Excluir países que não possuem gêneros
top5Genre.eachMk <- droplevels(top5Genre.eachMk[top5Genre.eachMk$freq>0,])
length(unique(top5Genre.eachMk$genre)) # 101 gêneros
# Matriz de adjacência 57 linhas, 70 colunas
market.top5genre.am <- xtabs(~ market + genre, top5Genre.eachMk)
market.top5genre.amdf <- as.data.frame(market.top5genre.am) #market, genre, #freq
# Para pegar as diferenças
# market.genre10.amdf$Freq <- ifelse(market.genre10.amdf$Freq=="0","1","0")
# Grafo da matriz de adjacência
market.top5genre.g <- graph_from_incidence_matrix(market.top5genre.am)
market.top5genre.projec <- bipartite.projection(market.top5genre.g)
# Projections (1 - Market, 2 - Genre) ############################
# Projection 1
edges.projec1 <- E(market.top5genre.projec$proj1) # 459
vertices.projec1 <- V(market.top5genre.projec$proj1) # 56
# Projection 2
edges.projec2 <- E(market.top5genre.projec$proj2) # 310
vertices.projec2 <- V(market.top5genre.projec$proj2) # 101
# Checking - Comparing markets #################################
# Yep!
market.top5genre.edges <- as.data.frame(get.edgelist(market.top5genre.projec$proj1)); head(market.top5genre.edges)
a <- top5Genre.eachMk[top5Genre.eachMk$market=='br', ]; nrow(a) # 5
b <- top5Genre.eachMk[top5Genre.eachMk$market=='pt', ]; nrow(b) # 5
c <- a[a$genre %in% b$genre,]; nrow(c) # 1
# Plot bipartite ###############################################
V(market.top5genre.g)$size <- 1
V(market.top5genre.g)$frame.color <- "white"
V(market.top5genre.g)$color <- colors[4]
#V(market.top5genre.g)$label <- NA
E(market.top5genre.g)$arrow.mode <- 0
plot(market.top5genre.g)
# Plot Projection 1 ################################################
V(market.top5genre.projec$proj1)$size <- 5
V(market.top5genre.projec$proj1)$frame.color <- "white"
V(market.top5genre.projec$proj1)$color <- colors[4]
V(market.top5genre.projec$proj1)$label <- NA
E(market.top5genre.projec$proj1)$arrow.mode <- 0
plot(market.top5genre.projec$proj1)
# Plot Projection 2 ################################################
V(market.top5genre.projec$proj2)$size <- 5
V(market.top5genre.projec$proj2)$frame.color <- "white"
V(market.top5genre.projec$proj2)$color <- colors[4]
V(market.top5genre.projec$proj2)$label <- NA
E(market.top5genre.projec$proj2)$arrow.mode <- 0
plot(market.top5genre.projec$proj2)
# Degree #######################################################
# The degree of a vertex is its most basic structural property,
# the number of its adjacent edges.
# Projection 1
market.top5genre.deg <- degree(market.top5genre.projec$proj1, mode="all"); market.top5genre.deg
max(market.top5genre.deg); min(market.top5genre.deg); mean(market.top5genre.deg)
#plot(market.genre.projec$proj1, vertex.size=market.genre.deg*3)
market.top5genre.deg.dist <- degree_distribution(market.top5genre.projec$proj1,
cumulative=F, mode="all")
ggplot(as.data.frame(market.top5genre.deg.dist),
aes(x = seq(1:length(market.top5genre.deg.dist)), y = market.top5genre.deg.dist)) +
geom_point(colour=colors[4],size = 2) +
theme_minimal() +
theme(panel.background = element_rect(colour = "grey50", size=0.3),
legend.title=element_blank(),
panel.grid.major = element_line(colour = "grey80"),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(
subtitle=" ",
y="Probabilidade",
x="Grau")
hist(market.top5genre.deg, breaks=1:vcount(market.top5genre.projec$proj1)-1,
main="Filtro 2",
xlab="Nós", ylab="Frequência", col=colors[4],
ylim=c(0, 35),
xlim = c(0,57), xaxt='n'); axis(side = 1, at=seq(0,55,5))
# Projection 2
market.top5genre.deg2 <- degree(market.top5genre.projec$proj2, mode="all"); market.top5genre.deg2
max(market.top5genre.deg2); min(market.top5genre.deg2); mean(market.top5genre.deg2)
# Random Network ################################################
# Projection 1
gnp_proj1 <- sample_gnp(length(vertices.projec1), 16.1/(length(vertices.projec1)-1))
min.weight <- min(E(market.top5genre.projec$proj1)$weight)
max.weight <- max(E(market.top5genre.projec$proj1)$weight)
E(gnp_proj1)$weight <- sample(min.weight:max.weight, length(E(gnp_proj1)), replace=T)
degree(gnp_proj1); mean(degree(gnp_proj1)) # 16.21429
degree_distribution(gnp_proj1)
plot(x=0:max(degree(gnp_proj1)), y=degree_distribution(gnp_proj1), pch=19,
cex=1.2, col=colors[4], xlab="Grau", ylab="Frequência Acumulada",
xaxt='n'); axis(side = 1, at=seq(0,25,5))
# Projection 2
gnp_proj2 <- sample_gnp(length(vertices.projec2), 6.1/(length(vertices.projec2)-1))
min.weight <- min(E(market.top5genre.projec$proj2)$weight)
max.weight <- max(E(market.top5genre.projec$proj2)$weight)
E(gnp_proj2)$weight <- sample(min.weight:max.weight, length(E(gnp_proj2)), replace=T)
degree(gnp_proj2); mean(degree(gnp_proj2)) # 6.07
# Diameter #######################################################
# The diameter of a graph is the length of the longest geodesic.
# Projection 1
market.top5genre.dmt <- diameter(market.top5genre.projec$proj1, directed = F,
weights = E(market.top5genre.projec$proj1)$weight); market.top5genre.dmt # 5
gnp.dmt.projec1 <- diameter(gnp_proj1, directed = F, weights = E(gnp_proj1)$weight); gnp.dmt.projec1 # 5
# Projection 2
market.top5genre.dmt2 <- diameter(market.top5genre.projec$proj2, directed = F,
weights = E(market.top5genre.projec$proj2)$weight); market.top5genre.dmt2 # 7
gnp.dmt.projec2 <- diameter(gnp_proj2, directed = F, weights = E(gnp_proj2)$weight); gnp.dmt.projec2 # 41
# Density #######################################################
# The ratio of the number of edges and the number of possible edges.
# Projection 1
market.top5genre.dst <- edge_density(market.top5genre.projec$proj1); market.top5genre.dst # 0.294
gnp.dst.projec1 <- edge_density(gnp_proj1); gnp.dst.projec1 # 0.298
# Projection 2
market.top5genre.dst2 <- edge_density(market.top5genre.projec$proj2); market.top5genre.dst2 # 0.06
gnp.dst.projec2 <- edge_density(gnp_proj2); gnp.dst.projec2 # 0.06
# Transitivity #########################################
# Projection 1
market.top5genre.trans <- transitivity(market.top5genre.projec$proj1,
type="global"); market.top5genre.trans # 0.79
gnp.trans.projec1 <- transitivity(gnp_proj1, type="global"); gnp.trans.projec1 # 0.27
# Projection 1
market.top5genre.trans2 <- transitivity(market.top5genre.projec$proj2,
type="global"); market.top5genre.trans2 # 0.43
gnp.trans.projec2 <- transitivity(gnp_proj2, type="global"); gnp.trans.projec2 # 0.07
# Distance ##############################################
# Projection 1
distances(market.top5genre.projec$proj1, v = V(market.top5genre.projec$proj1),
to = V(market.top5genre.projec$proj1),
mode = "all", weights = E(market.top5genre.projec$proj1)$weight,
algorithm = "automatic")
market.top5genre.mdist <- mean_distance(market.top5genre.projec$proj1, directed=F) # 1.766
gnp.mdist.projec1 <- mean_distance(gnp_proj1, directed = F) # 1.70
# Projection 2
market.top5genre.mdist2 <- mean_distance(market.top5genre.projec$proj2, directed=F) # 2.6
gnp.mdist.projec2 <- mean_distance(gnp_proj2, directed = F) # 2.7
# Components ####################################################
# Projection 1
components(market.top5genre.projec$proj1)$no # 6
# Projection 2
components(market.top5genre.projec$proj2)$no # 6
# Communities ####################################################
new_cols <- c("white", "red", "black")[membership(wc)]
#market.genre.comunities <- cluster_edge_betweenness(market.top5genre.projec$proj1)
market.genre.comunities <- cluster_louvain(market.top5genre.projec$proj1,
weights = E(market.top5genre.projec$proj1)$weight)
#plot_dendrogram(market.genre.comunities, mode = 'dendrogram')
#plot(market.genre.comunities, market.top5genre.projec$proj1)
colors.c <- brewer.pal(10, "BuGn")[membership(market.genre.comunities)]
V(market.top5genre.projec$proj1)$community <- market.genre.comunities$membership
par(mar=c(0,0,0,0))
plot(market.genre.comunities,
market.top5genre.projec$proj1,
vertex.size = 6, col = colors[4],
mark.border = '#143662',
#edge.color = colors[2],
mark.col='white',
#vertex.label = NA,
vertex.label.family = "sans",
vertex.label.cex=.7
)
# Total of tracks, artists e genres (each country) ######################################
colnames(top200complete)
music.eachMk <- top200complete[,c(2,4)]
music.eachMk <- data.frame(table(music.eachMk$mk))
music.eachMk <- music.eachMk[with(music.eachMk, order(Var1)),]; head(music.eachMk)
music.eachMk['tipo'] <- as.factor('Músicas')
artist.eachMk <- top200complete[,c(2,1)]
artist.eachMk <- unique(artist.eachMk)
artist.eachMk <- data.frame(table(artist.eachMk$mk))
artist.eachMk <- artist.eachMk[with(artist.eachMk, order(Var1)),]; head(artist.eachMk)
artist.eachMk['tipo'] <- as.factor('Artistas')
genre.eachMk <- unique(market.genre)
genre.eachMk <- data.frame(table(genre.eachMk$market))
genre.eachMk <- genre.eachMk[with(genre.eachMk, order(Var1)),]; head(genre.eachMk)
genre.eachMk['tipo'] <- as.factor('Gêneros')
eachMk.merged <- rbind(music.eachMk, artist.eachMk, genre.eachMk)
colnames(eachMk.merged) <- c('mk', 'freq', 'tipo')
eachMk.merged <- merge(eachMk.merged, countries, by='mk')
eachMk.Plot <-
ggplot(eachMk.merged,
aes(market, freq, fill=tipo, colour=tipo),
na.rm = TRUE) +
geom_bar(stat= 'identity', position = 'stack') +
theme_minimal() +
scale_colour_manual(values=colors[c(1,4,3)]) +
scale_fill_manual(values=colors[c(1,4,3)]) +
theme(panel.background = element_rect(colour = "grey20", size=0.3),
legend.title=element_blank(),
legend.position="top",
legend.text = element_text(size = 12),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(subtitle=" ",
y="Quantidade",
x="Países") + scale_y_continuous(breaks = seq(0, 500, by = 50)) +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10))
# Ranking Genres ############################################
# Closeness
closeness.genre <- data.frame(closeness(market.top5genre.projec$proj2,
vids = V(market.top5genre.projec$proj2),
mode = 'all',
weights = E(market.top5genre.projec$proj2)$weight,
normalized = TRUE))
closeness.genre <- cbind(V(market.top5genre.projec$proj2)$name, closeness.genre)
colnames(closeness.genre) <- c('genre', 'closeness')
closeness.genre <- closeness.genre[with(closeness.genre, order(-closeness)), ]
# Betweeness
betweeness.genre <- data.frame(betweenness(market.top5genre.projec$proj2,
weights = E(market.top5genre.projec$proj2)$weight,
normalized = TRUE, directed = FALSE))
betweeness.genre <- cbind(V(market.top5genre.projec$proj2)$name, betweeness.genre)
colnames(betweeness.genre) <- c('genre', 'betweeness')
betweeness.genre <- betweeness.genre[with(betweeness.genre, order(-betweeness)), ]
# Degree
degree.genre <- data.frame(degree(market.top5genre.projec$proj2))
degree.genre <- cbind(V(market.top5genre.projec$proj2)$name, degree.genre)
colnames(degree.genre) <- c('genre', 'degree')
degree.genre <- degree.genre[with(degree.genre, order(-degree)), ]
# Strength
strength.genre <- data.frame(strength(market.top5genre.projec$proj2,
vids = V(market.top5genre.projec$proj2),
mode = "all",
loops = TRUE,
weights = E(market.top5genre.projec$proj2)$weight))
strength.genre <- cbind(V(market.top5genre.projec$proj2)$name, strength.genre)
colnames(strength.genre) <- c('genre', 'strength')
strength.genre <- strength.genre[with(strength.genre, order(-strength)), ]
rank.genre <- data.frame(degree = degree.genre$genre[1:10],
strength = strength.genre$genre[1:10],
closeness = closeness.genre$genre[1:10],
betweenness = betweeness.genre$genre[1:10])
layout(matrix(1:4, 2, 2, byrow = TRUE))
par(mar=c(0,2,2,0))
plot(market.top5genre.projec$proj2,
main="Degree", vertex.size=V(market.top5genre.projec$proj2)$size*strength.genre/10)
# How many genres are original? #############################
origin_genre <- read.csv(paste0(dir,'origin_genre.csv'))
unique(origin_genre$mk) # 99
unique(droplevels(top5Genre.eachMk$market))
origin_genre <- merge(origin_genre, countries, by='market')
a <- origin_genre[,c(2,3)]
b <- top5Genre.eachMk[,c(2,1)]
colnames(a) <- colnames(b) <- c('genre', 'mk')
equal <- inner_join(a, b)
equal <- aggregate(genre ~ mk, data = equal, FUN = function(x){NROW(x)})
b <- aggregate(genre ~ mk, data = b, FUN = function(x){NROW(x)})
c <- merge(equal, b, by='mk')
c <- rbind(c, data.frame(mk = b$mk[which(!(b$mk %in% c$mk))], genre.x = 0, genre.y = 5))
c['percent'] <- (c$genre.x/c$genre.y)*100
c <- merge(c, countries, by = 'mk')
how.many <-
ggplot(c,
aes(market, percent),
na.rm = TRUE) +
geom_bar(stat= 'identity', fill = colors[4]) +
theme_minimal() +
scale_colour_manual(values=colors[4]) +
scale_fill_manual(values=colors[4]) +
theme(panel.background = element_rect(colour = "grey20", size=0.3),
legend.title=element_blank(),
legend.position="top",
legend.text = element_text(size = 12),
axis.text=element_text(size=11),
axis.title=element_text(size=12)) +
labs(subtitle=" ",
y="Porcentagem de origem",
x="Países") + scale_y_continuous(breaks = seq(0, 100, by = 20)) +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10))
# Heat Map ######################################################
# Plot heat map - genre similarity between markets ##########
market.genre.edges <- as.data.frame(get.edgelist(market.top5genre.projec$proj1)); head(market.genre.edges)
# Pegar os qntds de gêneros de cada país
market.genre.edges <- cbind(market.genre.edges, E(market.top5genre.projec$proj1)$weight)
colnames(market.genre.edges) <- c('market', 'to', 'w.sim'); head(market.genre.edges)
#market.genre.edges <- merge(market.genre.edges, genre.eachMk, by='market')
#colnames(market.genre.edges) <- c('from', 'market', 'w.sim'); head(market.genre.edges)
#market.genre.edges <- merge(market.genre.edges, genre.eachMk, by='market')
#colnames(market.genre.edges) <- c('from', 'to', 'w.sim', 'w.to', 'w.from')
#Calcula porcentagem de similaridade
#market.genre.edges['simPercent'] <- market.genre.edges$w.sim/(market.genre.edges$w.to + market.genre.edges$w.from)
# Make a new data.frame, simply reversing A and B
market.genre.edges.oh <- data.frame(to = market.genre.edges$market,
from = market.genre.edges$to,
sim = market.genre.edges$w.sim)
# Here's the diagonal
diagonal <- data.frame(to = unique(market.genre.edges$to),
from = unique(market.genre.edges$to),
sim = 1)
# Mash 'em all together
full <- rbind(market.genre.edges, market.genre.edges.oh); head(full)
# Pegar nome completo dos países
colnames(full) <- c('mk', 'from'); head(full)
full <- merge(full, countries, by='mk')
colnames(full) <- c('t', 'mk', 'sim', 'to'); head(full)
full <- merge(full, countries, by='mk')
colnames(full) <- c('f', 't', 'sim', 'from', 'to'); head(full)
full <- full[,c(-1,-2)]
heatMap.mg <-
ggplot(data = full, aes(x = to, y = from)) +
geom_tile(aes(fill = sim)) +
scale_fill_gradient(low = colors[4], high = colors[1], name = "Similarity"
) +
theme_minimal() +
labs(title="Similarity between countries based on musical genre",
y="Countries",
x="Countries") +
theme(axis.text.x=element_text(angle=60, hjust=1, size=10),
axis.text.y=element_text(size=8))
#######
market.top5genre.deg.dist2 <- degree_distribution(market.top5genre.projec$proj2,
cumulative=F, mode="all")
plot(x=0:max(degree(market.top5genre.projec$proj2)), y=market.top5genre.deg.dist2, pch=19,
cex=1.2, col=colors[4], xlab="Grau", ylab="Frequência Acumulada",
xaxt='n'); axis(side = 1, at=seq(0,35,5))
hist(market.top5genre.deg, breaks=1:vcount(market.top5genre.projec$proj1)-1,
main="Filtro 2",
xlab="Nós", ylab="Frequência", col=colors[4],
ylim=c(0, 35),
xlim = c(0,57), xaxt='n'); axis(side = 1, at=seq(0,55,5))
|
/Listas/Lista03/Lista3.R
|
permissive
|
ClezioLopes/MLG
|
R
| false
| false
| 13,339
|
r
| ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/questionnaire.R
\name{kre.compute}
\alias{kre.compute}
\title{Oblicza wynik surowy dla kwestionariusza KRE}
\usage{
kre.compute(x, ...)
}
\description{
Oblicza wynik surowy dla kwestionariusza KRE
}
|
/man/kre.compute.Rd
|
no_license
|
boryspaulewicz/bp
|
R
| false
| true
| 278
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/questionnaire.R
\name{kre.compute}
\alias{kre.compute}
\title{Oblicza wynik surowy dla kwestionariusza KRE}
\usage{
kre.compute(x, ...)
}
\description{
Oblicza wynik surowy dla kwestionariusza KRE
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.greengrass_operations.R
\name{associate_service_role_to_account}
\alias{associate_service_role_to_account}
\title{Associates a role with your account}
\usage{
associate_service_role_to_account(RoleArn = NULL)
}
\arguments{
\item{RoleArn}{The ARN of the service role you wish to associate with your account.}
}
\description{
Associates a role with your account. AWS IoT Greengrass will use the role to access your Lambda functions and AWS IoT resources. This is necessary for deployments to succeed. The role must have at least minimum permissions in the policy ''AWSGreengrassResourceAccessRolePolicy''.
}
\section{Accepted Parameters}{
\preformatted{associate_service_role_to_account(
RoleArn = "string"
)
}
}
|
/service/paws.greengrass/man/associate_service_role_to_account.Rd
|
permissive
|
CR-Mercado/paws
|
R
| false
| true
| 797
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.greengrass_operations.R
\name{associate_service_role_to_account}
\alias{associate_service_role_to_account}
\title{Associates a role with your account}
\usage{
associate_service_role_to_account(RoleArn = NULL)
}
\arguments{
\item{RoleArn}{The ARN of the service role you wish to associate with your account.}
}
\description{
Associates a role with your account. AWS IoT Greengrass will use the role to access your Lambda functions and AWS IoT resources. This is necessary for deployments to succeed. The role must have at least minimum permissions in the policy ''AWSGreengrassResourceAccessRolePolicy''.
}
\section{Accepted Parameters}{
\preformatted{associate_service_role_to_account(
RoleArn = "string"
)
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.