content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(readr)
library(tidyverse)
library(readxl)
OEM <- read_excel("OEM.xlsx")
Characteristics <- read_excel("Characterisitcs.xlsx")
names(OEM) <- make.names(names(OEM), unique=TRUE)
names(Characteristics) <- make.names(names(Characteristics), unique=TRUE)
mydata <- merge(OEM, Characteristics, by.x=c("Manufacturer.Name","System.Model.Number","Coil.Model.Number.1"), by.y=c("Company","Model.Number","Coil.Model.Number"))
| /Combining.R | no_license | brucestewartjrAHRI/Rstudio | R | false | false | 426 | r | library(readr)
library(tidyverse)
library(readxl)
OEM <- read_excel("OEM.xlsx")
Characteristics <- read_excel("Characterisitcs.xlsx")
names(OEM) <- make.names(names(OEM), unique=TRUE)
names(Characteristics) <- make.names(names(Characteristics), unique=TRUE)
mydata <- merge(OEM, Characteristics, by.x=c("Manufacturer.Name","System.Model.Number","Coil.Model.Number.1"), by.y=c("Company","Model.Number","Coil.Model.Number"))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize_embeddings.R
\name{keep_tokens}
\alias{keep_tokens}
\title{Filter Tokens}
\usage{
keep_tokens(embedding_df, tokens = "[CLS]")
}
\arguments{
\item{embedding_df}{A tbl_df of embedding vectors; the output of
\code{\link{extract_vectors_df}}.}
\item{tokens}{Character vector; which tokens to keep.}
}
\value{
The input tbl_df of embedding vectors, with the specified filtering
applied.
}
\description{
Keeps only specified tokens in the given table of embeddings.
}
\examples{
\dontrun{
# assuming something like the following has been run:
# feats <- RBERT::extract_features(...) # See RBERT documentation
# Then:
embeddings <- extract_vectors_df(feats$layer_outputs)
embeddings_layer12_cls <- embeddings \%>\%
filter_layer_embeddings(layer_indices = 12L) \%>\%
keep_tokens("[CLS]")
}
}
| /man/keep_tokens.Rd | permissive | IntuitionMachine/RBERTviz | R | false | true | 883 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize_embeddings.R
\name{keep_tokens}
\alias{keep_tokens}
\title{Filter Tokens}
\usage{
keep_tokens(embedding_df, tokens = "[CLS]")
}
\arguments{
\item{embedding_df}{A tbl_df of embedding vectors; the output of
\code{\link{extract_vectors_df}}.}
\item{tokens}{Character vector; which tokens to keep.}
}
\value{
The input tbl_df of embedding vectors, with the specified filtering
applied.
}
\description{
Keeps only specified tokens in the given table of embeddings.
}
\examples{
\dontrun{
# assuming something like the following has been run:
# feats <- RBERT::extract_features(...) # See RBERT documentation
# Then:
embeddings <- extract_vectors_df(feats$layer_outputs)
embeddings_layer12_cls <- embeddings \%>\%
filter_layer_embeddings(layer_indices = 12L) \%>\%
keep_tokens("[CLS]")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{classify_validname}
\alias{classify_validname}
\title{Classify worrms validated names}
\usage{
classify_validname(valid_name, ranks = c("phylum", "class", "order",
"family", "genus", "species"))
}
\arguments{
\item{valid_name}{a worrms validated name}
\item{ranks}{ranks of interest to be returned}
}
\value{
data.frame of ranks for worrms valid name
}
\description{
Classify worrms validated names
}
| /man/classify_validname.Rd | permissive | annakrystalli/seabirddiet.devtools | R | false | true | 499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/taxonomy.R
\name{classify_validname}
\alias{classify_validname}
\title{Classify worrms validated names}
\usage{
classify_validname(valid_name, ranks = c("phylum", "class", "order",
"family", "genus", "species"))
}
\arguments{
\item{valid_name}{a worrms validated name}
\item{ranks}{ranks of interest to be returned}
}
\value{
data.frame of ranks for worrms valid name
}
\description{
Classify worrms validated names
}
|
library(MLDS)
### Name: make.ix.mat
### Title: Create data.frame for Fitting Difference Scale by glm
### Aliases: make.ix.mat
### Keywords: manip
### ** Examples
data(AutumnLab)
make.ix.mat(AutumnLab)
mlds(AutumnLab, c(1, seq(6, 30, 3)))
| /data/genthat_extracted_code/MLDS/examples/make.ix.mat.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 245 | r | library(MLDS)
### Name: make.ix.mat
### Title: Create data.frame for Fitting Difference Scale by glm
### Aliases: make.ix.mat
### Keywords: manip
### ** Examples
data(AutumnLab)
make.ix.mat(AutumnLab)
mlds(AutumnLab, c(1, seq(6, 30, 3)))
|
library(DescribeDisplay)
### Name: plot.dd
### Title: Draw dd plot Draw a complete describe display.
### Aliases: plot.dd
### Keywords: internal
### ** Examples
plot(dd_example("xyplot"))
plot(dd_example("tour1d"))
plot(dd_example("tour2d"))
| /data/genthat_extracted_code/DescribeDisplay/examples/plot.dd.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 249 | r | library(DescribeDisplay)
### Name: plot.dd
### Title: Draw dd plot Draw a complete describe display.
### Aliases: plot.dd
### Keywords: internal
### ** Examples
plot(dd_example("xyplot"))
plot(dd_example("tour1d"))
plot(dd_example("tour2d"))
|
setwd("~/Documents/GitRepo/SugarKelpBreeding/TraitAnalyses200125/Code")
library(magrittr)
library(ClusterR)
hMat <- readRDS(file="hMat_analyzeNH.rds")
gpMat <- hMat[196:312, 196:312]
gpEig <- eigen(gpMat, symmetric=T)
clustDat <- gpEig$vectors %*% diag(sqrt(gpEig$values))
tst <- KMeans_rcpp(clustDat, 8, num_init=100)
plot(gpEig$vectors[,1:2], col=tst$clusters, pch=16)
xlim <- range(gpEig$vectors[,1]); ylim <- range(gpEig$vectors[,2])
op <- par(mfrow=c(2, 2))
for (clust in 1:4){
plot(gpEig$vectors[tst$clusters == clust,1:2], pch=16, xlim=xlim, ylim=ylim)
}
for (clust in 5:8){
plot(gpEig$vectors[tst$clusters == clust,1:2], pch=16, xlim=xlim, ylim=ylim)
}
par(op)
| /TraitAnalyses200125/Code/ClusterGPs.R | no_license | jeanlucj/SugarKelpBreeding | R | false | false | 673 | r | setwd("~/Documents/GitRepo/SugarKelpBreeding/TraitAnalyses200125/Code")
library(magrittr)
library(ClusterR)
hMat <- readRDS(file="hMat_analyzeNH.rds")
gpMat <- hMat[196:312, 196:312]
gpEig <- eigen(gpMat, symmetric=T)
clustDat <- gpEig$vectors %*% diag(sqrt(gpEig$values))
tst <- KMeans_rcpp(clustDat, 8, num_init=100)
plot(gpEig$vectors[,1:2], col=tst$clusters, pch=16)
xlim <- range(gpEig$vectors[,1]); ylim <- range(gpEig$vectors[,2])
op <- par(mfrow=c(2, 2))
for (clust in 1:4){
plot(gpEig$vectors[tst$clusters == clust,1:2], pch=16, xlim=xlim, ylim=ylim)
}
for (clust in 5:8){
plot(gpEig$vectors[tst$clusters == clust,1:2], pch=16, xlim=xlim, ylim=ylim)
}
par(op)
|
#library(xts)
library(data.table)
library(dplyr)
library(magrittr)
## calculate the number of hit in each game
teamhit = function(file = "all2013.csv"){
year = substr(file, 4, 7)
filename = paste("../../../../data/", file, sep="")
dat = fread(filename)
name = fread("../names.csv", header=FALSE) %>% unlist
dat1 = dat %>% setnames(name) %>%
dplyr::select(GAME_ID, AWAY_TEAM_ID, BAT_HOME_ID, H_FL,
AWAY_SCORE_CT, HOME_SCORE_CT)
dat_teamhit = dat1 %>%
setnames(c("id", "away", "h_a", "h_fl", "away_score", "home_score")) %>%
mutate(home= substr(id, 1,3)) %>%
mutate(hit = ifelse(h_fl > 0, 1, 0)) %>%
group_by(id, home, away, h_a) %>%
dplyr::summarise(hit = sum(hit), year = year,
away_score = max(away_score),
home_score = max(home_score)) %>%
mutate(team = ifelse(h_a==1, home, away)) %>%
mutate(score = ifelse(home == team, home_score, away_score)) %>%
group_by(add=FALSE) %>%
dplyr::select(id, hit, team, year, score)
return(dat_teamhit)
}
files = fread("../../../../data/files.txt", header=FALSE) %>% unlist
dat = data.table()
for(file in files){
print(paste("file:", file))
dat_tmp = teamhit(file)
dat = rbind(dat, dat_tmp)
}
dat %>% write.csv("teamhit.csv", quote = FALSE, row.names=FALSE)
dat
| /batting_data/game_analysis/team_hit/teamhit.R | no_license | gghatano/analyze_mlbdata_with_R | R | false | false | 1,335 | r | #library(xts)
library(data.table)
library(dplyr)
library(magrittr)
## calculate the number of hit in each game
teamhit = function(file = "all2013.csv"){
year = substr(file, 4, 7)
filename = paste("../../../../data/", file, sep="")
dat = fread(filename)
name = fread("../names.csv", header=FALSE) %>% unlist
dat1 = dat %>% setnames(name) %>%
dplyr::select(GAME_ID, AWAY_TEAM_ID, BAT_HOME_ID, H_FL,
AWAY_SCORE_CT, HOME_SCORE_CT)
dat_teamhit = dat1 %>%
setnames(c("id", "away", "h_a", "h_fl", "away_score", "home_score")) %>%
mutate(home= substr(id, 1,3)) %>%
mutate(hit = ifelse(h_fl > 0, 1, 0)) %>%
group_by(id, home, away, h_a) %>%
dplyr::summarise(hit = sum(hit), year = year,
away_score = max(away_score),
home_score = max(home_score)) %>%
mutate(team = ifelse(h_a==1, home, away)) %>%
mutate(score = ifelse(home == team, home_score, away_score)) %>%
group_by(add=FALSE) %>%
dplyr::select(id, hit, team, year, score)
return(dat_teamhit)
}
files = fread("../../../../data/files.txt", header=FALSE) %>% unlist
dat = data.table()
for(file in files){
print(paste("file:", file))
dat_tmp = teamhit(file)
dat = rbind(dat, dat_tmp)
}
dat %>% write.csv("teamhit.csv", quote = FALSE, row.names=FALSE)
dat
|
# #install.packages("mailR",repos="http://cran.r-project.org")
suppressWarnings(suppressMessages(require(mailR)))
if(grepl(tail(readLines(".../logs/outfile.txt"),1), pattern = "New signal!")){
send.mail(from = "mail@mail.com",
to = "mail@mail.com",
subject = "New signal",
body = paste0("We have alert for your investment. ", tail(readLines(".../logs/outfile.txt"),1)),
smtp = list(host.name = "email-smtp.us-east-1.amazonaws.com",
port = 587,
user.name = "...",
passwd = "...",
ssl = TRUE),
authenticate = TRUE,
send = TRUE)
cat(paste0(substr(as.POSIXct(Sys.time(), "UTC", format = "%Y-%m-%d %H:%M"), 1,16), ": ", paste0("We have alert for your investment. ", tail(readLines(".../logs/outfile.txt"),1))),file=".../logs/mailer.log",sep="\n",append = T)
}
| /mailer.R | no_license | egeor90/stock_alert | R | false | false | 935 | r | # #install.packages("mailR",repos="http://cran.r-project.org")
suppressWarnings(suppressMessages(require(mailR)))
if(grepl(tail(readLines(".../logs/outfile.txt"),1), pattern = "New signal!")){
send.mail(from = "mail@mail.com",
to = "mail@mail.com",
subject = "New signal",
body = paste0("We have alert for your investment. ", tail(readLines(".../logs/outfile.txt"),1)),
smtp = list(host.name = "email-smtp.us-east-1.amazonaws.com",
port = 587,
user.name = "...",
passwd = "...",
ssl = TRUE),
authenticate = TRUE,
send = TRUE)
cat(paste0(substr(as.POSIXct(Sys.time(), "UTC", format = "%Y-%m-%d %H:%M"), 1,16), ": ", paste0("We have alert for your investment. ", tail(readLines(".../logs/outfile.txt"),1))),file=".../logs/mailer.log",sep="\n",append = T)
}
|
library("brokenstick")
context("predict.brokenstick()")
obj <- fit_200
dat <- smocc_200
n <- nrow(dat)
m <- length(unique(dat$id))
k <- length(get_knots(obj))
test_that("returns proper number of rows", {
expect_equal(nrow(predict(obj, dat)), n)
expect_equal(nrow(predict(obj, dat, x = NA, include_data = FALSE)), m)
expect_equal(nrow(predict(obj, x = NA, y = 10)), 1L)
expect_equal(nrow(predict(obj, x = c(NA, NA), y = c(-1, 10))), 2L)
})
test_that("returns proper number of rows with at = 'knots'", {
expect_equal(nrow(predict(obj, dat, x = "knots", include_data = FALSE)), m * k)
expect_equal(nrow(predict(obj, dat, x = NA, include_data = FALSE)), m)
expect_equal(nrow(predict(obj, x = NA, y = 10)), 1L)
})
test_that("returns proper number of rows with both data & knots", {
expect_equal(nrow(predict(obj, dat, x = "knots")), n + k * m)
expect_equal(nrow(predict(obj, dat, x = NA, y = 10, group = 10001)), 11)
expect_equal(nrow(predict(obj, dat, x = c(NA, NA), y = c(-1, 10), group = rep(10001, 2))), 12)
})
test_that("output = 'vector' and output = 'long' are consistent", {
expect_equivalent(
predict(obj, dat)[[".pred"]],
predict(obj, dat, shape = "vector")
)
expect_equal(
predict(obj, dat, x = 1)[[".pred"]],
predict(obj, dat, x = 1, shape = "vector")
)
expect_equal(
predict(obj, x = c(NA, 1), y = c(1, NA))[[".pred"]],
predict(obj, x = c(NA, 1), y = c(1, NA), 10, shape = "vector")
)
expect_equal(
predict(obj, dat, x = "knots")[[".pred"]],
predict(obj, dat, x = "knots", shape = "vector")
)
expect_equal(
predict(obj, dat)[[".pred"]],
predict(obj, dat, shape = "vector")
)
expect_equal(
predict(obj, x = NA, y = 10)[[".pred"]],
predict(obj, x = NA, y = 10, shape = "vector")
)
})
exp <- fit_200
dat <- smocc_200
test_that("returns proper number of rows", {
expect_equal(nrow(predict(exp, dat, x = NA, include_data = FALSE)), 200L)
expect_equal(nrow(predict(exp, dat, x = c(NA, NA), include_data = FALSE)), 400L)
expect_equal(nrow(predict(exp, dat, x = NA, y = 1)), 1L)
expect_equal(nrow(predict(exp, dat, x = c(NA, NA), y = c(-1, 10))), 2L)
expect_equal(nrow(predict(exp, dat, x = "knots", include_data = FALSE, hide = "none")), 2200L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 10))), 10L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 11), hide = "none")), 11L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 2), hide = "internal")), 2L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 9), hide = "boundary")), 9L)
})
test_that("accepts intermediate NA in x", {
expect_equal(
unlist(predict(exp, x = 1, y = -1)[1, ]),
unlist(predict(exp, x = c(NA, 1), y = c(1, -1))[2, ])
)
expect_equal(
unlist(predict(exp, x = c(1, NA), y = c(-1, 1))[1, ]),
unlist(predict(exp, x = c(NA, 1), y = c(1, -1))[2, ])
)
expect_equal(
unlist(predict(exp, x = c(1, 2, NA), y = c(NA, -1, 1))[2, ]),
unlist(predict(exp, x = c(1, NA, 2), y = c(NA, 1, -1))[3, ])
)
})
test_that("accepts unordered x", {
expect_equal(
round(predict(exp, x = c(1, 2, 3), y = c(-1, 1, 0))[1, 5], 5),
round(predict(exp, x = c(2, 3, 1), y = c(1, 0, -1))[3, 5], 5)
)
})
xz <- data.frame(
id = c(NA_real_, NA_real_),
age = c(NA_real_, NA_real_),
hgt_z = c(NA_real_, NA_real_)
)
test_that("accepts all NA's in newdata", {
expect_silent(predict(exp, newdata = xz, x = "knots"))
})
context("predict_brokenstick factor")
fit <- fit_200
dat <- smocc_200
dat$id <- factor(dat$id)
test_that("works if id in newdata is a factor", {
expect_silent(predict(obj, newdata = dat))
})
# We needed this to solve problem when newdata is a factor
# obj1 <- brokenstick(hgt_z ~ age | id, data = smocc_200, knots = 1:2)
# obj2 <- brokenstick(hgt_z ~ age | id, data = dat, knots = 1:2)
# test_that("brokenstick doesn't care about factors", {
# expect_identical(obj1, obj2)
# })
#
#
# z1 <- predict(obj1, newdata = dat)
# z2 <- predict(obj2, newdata = dat)
# identical(z1, z2)
| /tests/testthat/test-predict.brokenstick.R | permissive | growthcharts/brokenstick | R | false | false | 4,055 | r | library("brokenstick")
context("predict.brokenstick()")
obj <- fit_200
dat <- smocc_200
n <- nrow(dat)
m <- length(unique(dat$id))
k <- length(get_knots(obj))
test_that("returns proper number of rows", {
expect_equal(nrow(predict(obj, dat)), n)
expect_equal(nrow(predict(obj, dat, x = NA, include_data = FALSE)), m)
expect_equal(nrow(predict(obj, x = NA, y = 10)), 1L)
expect_equal(nrow(predict(obj, x = c(NA, NA), y = c(-1, 10))), 2L)
})
test_that("returns proper number of rows with at = 'knots'", {
expect_equal(nrow(predict(obj, dat, x = "knots", include_data = FALSE)), m * k)
expect_equal(nrow(predict(obj, dat, x = NA, include_data = FALSE)), m)
expect_equal(nrow(predict(obj, x = NA, y = 10)), 1L)
})
test_that("returns proper number of rows with both data & knots", {
expect_equal(nrow(predict(obj, dat, x = "knots")), n + k * m)
expect_equal(nrow(predict(obj, dat, x = NA, y = 10, group = 10001)), 11)
expect_equal(nrow(predict(obj, dat, x = c(NA, NA), y = c(-1, 10), group = rep(10001, 2))), 12)
})
test_that("output = 'vector' and output = 'long' are consistent", {
expect_equivalent(
predict(obj, dat)[[".pred"]],
predict(obj, dat, shape = "vector")
)
expect_equal(
predict(obj, dat, x = 1)[[".pred"]],
predict(obj, dat, x = 1, shape = "vector")
)
expect_equal(
predict(obj, x = c(NA, 1), y = c(1, NA))[[".pred"]],
predict(obj, x = c(NA, 1), y = c(1, NA), 10, shape = "vector")
)
expect_equal(
predict(obj, dat, x = "knots")[[".pred"]],
predict(obj, dat, x = "knots", shape = "vector")
)
expect_equal(
predict(obj, dat)[[".pred"]],
predict(obj, dat, shape = "vector")
)
expect_equal(
predict(obj, x = NA, y = 10)[[".pred"]],
predict(obj, x = NA, y = 10, shape = "vector")
)
})
exp <- fit_200
dat <- smocc_200
test_that("returns proper number of rows", {
expect_equal(nrow(predict(exp, dat, x = NA, include_data = FALSE)), 200L)
expect_equal(nrow(predict(exp, dat, x = c(NA, NA), include_data = FALSE)), 400L)
expect_equal(nrow(predict(exp, dat, x = NA, y = 1)), 1L)
expect_equal(nrow(predict(exp, dat, x = c(NA, NA), y = c(-1, 10))), 2L)
expect_equal(nrow(predict(exp, dat, x = "knots", include_data = FALSE, hide = "none")), 2200L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 10))), 10L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 11), hide = "none")), 11L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 2), hide = "internal")), 2L)
expect_equal(nrow(predict(exp, dat, x = "knots", y = rep(1, 9), hide = "boundary")), 9L)
})
test_that("accepts intermediate NA in x", {
expect_equal(
unlist(predict(exp, x = 1, y = -1)[1, ]),
unlist(predict(exp, x = c(NA, 1), y = c(1, -1))[2, ])
)
expect_equal(
unlist(predict(exp, x = c(1, NA), y = c(-1, 1))[1, ]),
unlist(predict(exp, x = c(NA, 1), y = c(1, -1))[2, ])
)
expect_equal(
unlist(predict(exp, x = c(1, 2, NA), y = c(NA, -1, 1))[2, ]),
unlist(predict(exp, x = c(1, NA, 2), y = c(NA, 1, -1))[3, ])
)
})
test_that("accepts unordered x", {
expect_equal(
round(predict(exp, x = c(1, 2, 3), y = c(-1, 1, 0))[1, 5], 5),
round(predict(exp, x = c(2, 3, 1), y = c(1, 0, -1))[3, 5], 5)
)
})
xz <- data.frame(
id = c(NA_real_, NA_real_),
age = c(NA_real_, NA_real_),
hgt_z = c(NA_real_, NA_real_)
)
test_that("accepts all NA's in newdata", {
expect_silent(predict(exp, newdata = xz, x = "knots"))
})
context("predict_brokenstick factor")
fit <- fit_200
dat <- smocc_200
dat$id <- factor(dat$id)
test_that("works if id in newdata is a factor", {
expect_silent(predict(obj, newdata = dat))
})
# We needed this to solve problem when newdata is a factor
# obj1 <- brokenstick(hgt_z ~ age | id, data = smocc_200, knots = 1:2)
# obj2 <- brokenstick(hgt_z ~ age | id, data = dat, knots = 1:2)
# test_that("brokenstick doesn't care about factors", {
# expect_identical(obj1, obj2)
# })
#
#
# z1 <- predict(obj1, newdata = dat)
# z2 <- predict(obj2, newdata = dat)
# identical(z1, z2)
|
library(tidyverse)
library(glmnet)
library(doParallel)
source("utils.R")
results_folder <- "results/highdim/binary/miss_prop_widen/"
start_time <- Sys.time()
set.seed(100)
results <- tibble()
n <- 4 * 1000
n_sim <- 500
d <- 500
q <- 400 # dimension of hidden confounder z
p <- d - q # dimension of v
gamma <- 4*3 # number of non-zero predictors in v
beta <- 50
zeta <- beta-gamma # number of non-zero predictors in z
alpha <- 40 # sparsity in propensity
alpha_z <- 35
alpha_v <- 5
s <- sort(rep(1:4, n / 4))
# parallelize
registerDoParallel(cores = 48)
results <- foreach(sim_num = 1:n_sim) %dopar% {
v_first_order <- matrix(rnorm(n * p/2), n, p/2)
v_second_order <- v_first_order^2
z <- matrix(rnorm(n * q), n, q)
x <- cbind(z, v_first_order, v_second_order)
mu0 <- sigmoid(as.numeric(z %*% rep(c(1, 0), c(zeta, q - zeta)) +
v_second_order %*% c(rep(c(1, -1), gamma/4), rep(0,p/2 - gamma/2))+
v_first_order %*% rep(c(1, 0), c(gamma/2, p/2 - gamma/2)))/sqrt(beta*0.02))
nu <- sigmoid(as.numeric(v_second_order %*% c(rep(c(1, -1), gamma/4), rep(0,p/2 - gamma/2))+
v_first_order %*% rep(c(1, 0), c(gamma/2, p/2 - gamma/2)))/sqrt(beta*0.02))
prop <- sigmoid(as.numeric(x %*% rep(c(1, 0, 1, 0), c(alpha_z, q - alpha_z, alpha_v, p - alpha_v))) / sqrt(alpha))
a <- rbinom(n, 1, prop)
y0 <- rbinom(n, 1, mu0)
# qplot(mu0[((s == 2) & (a == 0))])
# stage 1
mu_lasso <- cv.glmnet(x[((s == 2) & (a == 0)), ], y0[((s == 2) & (a == 0))], family = "binomial")
muhat <- as.numeric(predict(mu_lasso, newx = x, type = "response", s = "lambda.min"))
prop_lasso <- cv.glmnet(x[s == 1, ], a[s == 1], family = "binomial")
prophat <- as.numeric(predict(prop_lasso, newx = x, type = "response", s = "lambda.min"))
bchat <- (1 - a) * (y0 - muhat) / (1 - prophat) + muhat
bc_true <- (1 - a) * (y0 - mu0) / (1 - prop) + mu0
bc_true_prop <- (1 - a) * (y0 - muhat) / (1 - prop) + muhat
bc_true_mu <- (1 - a) * (y0 - mu0) / (1 - prophat) + mu0
bc_rct <- (1 - a) * (y0 - mu0) / (1 - mean(a)) + mu0
bc_rct_muest <- (1 - a) * (y0 - muhat) / (1 - mean(a)) + muhat
# stage 2
conf_lasso <- cv.glmnet(v_first_order[((s == 3) & (a == 0)), ], y0[((s == 3) & (a == 0))], family = "binomial")
conf <- predict(conf_lasso, newx = v_first_order, s = "lambda.min", type = "response")
conf1se <- predict(conf_lasso, newx = v_first_order, type = "response")
pl_lasso <- cv.glmnet(v_first_order[s == 3, ], muhat[s == 3])
pl <- predict(pl_lasso, newx = v_first_order, s = "lambda.min")
pl1se <- predict(pl_lasso, newx = v_first_order)
bc_lasso <- cv.glmnet(v_first_order[s == 3, ], bchat[s == 3])
bc <- predict(bc_lasso, newx = v_first_order, s = "lambda.min")
bct_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true[s == 3])
bct <- predict(bct_lasso, newx = v_first_order, s = "lambda.min")
bctp_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true_prop[s == 3])
bct_prop <- predict(bctp_lasso, newx = v_first_order, s = "lambda.min")
bctm_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true_mu[s == 3])
bct_mu <- predict(bctm_lasso, newx = v_first_order, s = "lambda.min")
bcrt_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_rct[s == 3])
bcr <- predict(bcrt_lasso, newx = v_first_order, s = "lambda.min")
bcrt_muest_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_rct_muest[s == 3])
bcr_muest <- predict(bcrt_muest_lasso, newx = v_first_order, s = "lambda.min")
tibble(
"mse" = c(
mean((conf - nu)[s == 4]^2),
mean((pl - nu)[s == 4]^2),
mean((bc - nu)[s == 4]^2),
mean((bct - nu)[s == 4]^2),
mean((bct_prop - nu)[s == 4]^2),
mean((bct_mu - nu)[s == 4]^2),
mean((bcr - nu)[s == 4]^2),
mean((bcr_muest - nu)[s == 4]^2),
mean((conf1se - nu)[s == 4]^2),
mean((pl1se - nu)[s == 4]^2),
mean((mu0 - nu)[s == 4]^2)
),
"method" = c("conf", "pl", "bc", "bct", "bc_true_prop", "bc_true_mu", "bc_rt_true_mu", "bc_rt_muest", "conf1se", "pl1se", "regression_diff"),
"sim" = sim_num,
"prop_nnzero" = nnzero(coef(prop_lasso, s = prop_lasso$lambda.1se)),
"mu_nnzero" = nnzero(coef(mu_lasso, s = mu_lasso$lambda.1se))
)
}
saveRDS(tibble(
"dim" = d,
"n_in_each_fold" = n / 4,
"q" = q,
"dim_z" = q,
"p" = p,
"zeta" = zeta,
"gamma" = gamma,
"beta" = beta,
"alpha_v" = alpha_v,
"alpha_z" = alpha_z,
"alpha" = alpha
), glue::glue(results_folder, "parameters.Rds"))
saveRDS(bind_rows(results), glue::glue(results_folder, "results.Rds"))
task_time <- difftime(Sys.time(), start_time)
print(task_time) | /binary_misspec.R | no_license | mandycoston/confound_sim | R | false | false | 4,659 | r | library(tidyverse)
library(glmnet)
library(doParallel)
source("utils.R")
results_folder <- "results/highdim/binary/miss_prop_widen/"
start_time <- Sys.time()
set.seed(100)
results <- tibble()
n <- 4 * 1000
n_sim <- 500
d <- 500
q <- 400 # dimension of hidden confounder z
p <- d - q # dimension of v
gamma <- 4*3 # number of non-zero predictors in v
beta <- 50
zeta <- beta-gamma # number of non-zero predictors in z
alpha <- 40 # sparsity in propensity
alpha_z <- 35
alpha_v <- 5
s <- sort(rep(1:4, n / 4))
# parallelize
registerDoParallel(cores = 48)
results <- foreach(sim_num = 1:n_sim) %dopar% {
v_first_order <- matrix(rnorm(n * p/2), n, p/2)
v_second_order <- v_first_order^2
z <- matrix(rnorm(n * q), n, q)
x <- cbind(z, v_first_order, v_second_order)
mu0 <- sigmoid(as.numeric(z %*% rep(c(1, 0), c(zeta, q - zeta)) +
v_second_order %*% c(rep(c(1, -1), gamma/4), rep(0,p/2 - gamma/2))+
v_first_order %*% rep(c(1, 0), c(gamma/2, p/2 - gamma/2)))/sqrt(beta*0.02))
nu <- sigmoid(as.numeric(v_second_order %*% c(rep(c(1, -1), gamma/4), rep(0,p/2 - gamma/2))+
v_first_order %*% rep(c(1, 0), c(gamma/2, p/2 - gamma/2)))/sqrt(beta*0.02))
prop <- sigmoid(as.numeric(x %*% rep(c(1, 0, 1, 0), c(alpha_z, q - alpha_z, alpha_v, p - alpha_v))) / sqrt(alpha))
a <- rbinom(n, 1, prop)
y0 <- rbinom(n, 1, mu0)
# qplot(mu0[((s == 2) & (a == 0))])
# stage 1
mu_lasso <- cv.glmnet(x[((s == 2) & (a == 0)), ], y0[((s == 2) & (a == 0))], family = "binomial")
muhat <- as.numeric(predict(mu_lasso, newx = x, type = "response", s = "lambda.min"))
prop_lasso <- cv.glmnet(x[s == 1, ], a[s == 1], family = "binomial")
prophat <- as.numeric(predict(prop_lasso, newx = x, type = "response", s = "lambda.min"))
bchat <- (1 - a) * (y0 - muhat) / (1 - prophat) + muhat
bc_true <- (1 - a) * (y0 - mu0) / (1 - prop) + mu0
bc_true_prop <- (1 - a) * (y0 - muhat) / (1 - prop) + muhat
bc_true_mu <- (1 - a) * (y0 - mu0) / (1 - prophat) + mu0
bc_rct <- (1 - a) * (y0 - mu0) / (1 - mean(a)) + mu0
bc_rct_muest <- (1 - a) * (y0 - muhat) / (1 - mean(a)) + muhat
# stage 2
conf_lasso <- cv.glmnet(v_first_order[((s == 3) & (a == 0)), ], y0[((s == 3) & (a == 0))], family = "binomial")
conf <- predict(conf_lasso, newx = v_first_order, s = "lambda.min", type = "response")
conf1se <- predict(conf_lasso, newx = v_first_order, type = "response")
pl_lasso <- cv.glmnet(v_first_order[s == 3, ], muhat[s == 3])
pl <- predict(pl_lasso, newx = v_first_order, s = "lambda.min")
pl1se <- predict(pl_lasso, newx = v_first_order)
bc_lasso <- cv.glmnet(v_first_order[s == 3, ], bchat[s == 3])
bc <- predict(bc_lasso, newx = v_first_order, s = "lambda.min")
bct_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true[s == 3])
bct <- predict(bct_lasso, newx = v_first_order, s = "lambda.min")
bctp_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true_prop[s == 3])
bct_prop <- predict(bctp_lasso, newx = v_first_order, s = "lambda.min")
bctm_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_true_mu[s == 3])
bct_mu <- predict(bctm_lasso, newx = v_first_order, s = "lambda.min")
bcrt_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_rct[s == 3])
bcr <- predict(bcrt_lasso, newx = v_first_order, s = "lambda.min")
bcrt_muest_lasso <- cv.glmnet(v_first_order[s == 3, ], bc_rct_muest[s == 3])
bcr_muest <- predict(bcrt_muest_lasso, newx = v_first_order, s = "lambda.min")
tibble(
"mse" = c(
mean((conf - nu)[s == 4]^2),
mean((pl - nu)[s == 4]^2),
mean((bc - nu)[s == 4]^2),
mean((bct - nu)[s == 4]^2),
mean((bct_prop - nu)[s == 4]^2),
mean((bct_mu - nu)[s == 4]^2),
mean((bcr - nu)[s == 4]^2),
mean((bcr_muest - nu)[s == 4]^2),
mean((conf1se - nu)[s == 4]^2),
mean((pl1se - nu)[s == 4]^2),
mean((mu0 - nu)[s == 4]^2)
),
"method" = c("conf", "pl", "bc", "bct", "bc_true_prop", "bc_true_mu", "bc_rt_true_mu", "bc_rt_muest", "conf1se", "pl1se", "regression_diff"),
"sim" = sim_num,
"prop_nnzero" = nnzero(coef(prop_lasso, s = prop_lasso$lambda.1se)),
"mu_nnzero" = nnzero(coef(mu_lasso, s = mu_lasso$lambda.1se))
)
}
saveRDS(tibble(
"dim" = d,
"n_in_each_fold" = n / 4,
"q" = q,
"dim_z" = q,
"p" = p,
"zeta" = zeta,
"gamma" = gamma,
"beta" = beta,
"alpha_v" = alpha_v,
"alpha_z" = alpha_z,
"alpha" = alpha
), glue::glue(results_folder, "parameters.Rds"))
saveRDS(bind_rows(results), glue::glue(results_folder, "results.Rds"))
task_time <- difftime(Sys.time(), start_time)
print(task_time) |
library('RUnit')
setwd("E://ACUO/projects/acuo-allocation/test/v0.0.2")
test.suite = defineTestSuite("example",
dirs = file.path("testAssetNumber"),
testFileRegexp = 'assetNumberTests.R')
test.result <- runTestSuite(test.suite)
printTextProtocol(test.result)
| /test/v0.0.2/testAssetNumber.R | no_license | AcuoFS/acuo-allocation | R | false | false | 320 | r | library('RUnit')
setwd("E://ACUO/projects/acuo-allocation/test/v0.0.2")
test.suite = defineTestSuite("example",
dirs = file.path("testAssetNumber"),
testFileRegexp = 'assetNumberTests.R')
test.result <- runTestSuite(test.suite)
printTextProtocol(test.result)
|
# Metadata retrieval test
# source libraries
library(RJSONIO)
library(RCurl)
library(matlab)
crop_metadata <- function(metadata,exclude,unique_rows=F){
#input: metadata = metadata file, exclude = vector containing substrings that are to be
# excluded from metadata
#output: cropped metadata file named cropped_metadata
metadata <- read.delim(metadata,header = F)
metadata_orginal <<-metadata
thrash <- c()
changeddata <- metadata_orginal
if(unique_rows){
for (i in 1:(length(metadata[1,])-1)){
if(length(unique(metadata[2:nrow(metadata),i]))==1){
thrash <- c(thrash,i)
}
}
changeddata <- metadata[-thrash]
}
for (x in exclude){
new <- unlist(changeddata[1,])
new <- as.character(new)
thrash2 <- (grep(x,new))
changeddata <<- changeddata[-thrash2]
}
#print(length(changeddata[1,]))
write.table(changeddata,"cropped_metadata",sep="\t",row.names = F,col.names = F,quote = F)
}
get_mapping_options <- function(){
#returns a vector of endpoints under mapping/expand
system("curl 'https://gdc-api.nci.nih.gov/cases/_mapping' > mapping_JSON.txt")
endpoints <- fromJSON("mapping_JSON.txt")["expand"]
#print(endpoints)
nodes <- c()
for (i in 1:length(endpoints$expand)){
a <- unlist(as.vector(strsplit(endpoints$expand[i],"\\.")))
nodes <- c(nodes,a[1])
}
nodes <- unique(nodes)
return(nodes)
}
endpoints <- get_mapping_options()
############################
### ### ### MAIN ### ### ###
############################
get_GDC_metadata <- function(id_list, my_rot="no", output="file", order_rows=TRUE, order_columns=TRUE, verbose=FALSE, debug=FALSE){
# import list of ids
my_ids <- flatten_list(as.list(scan(file=id_list, what="character")))
metadata_matrix <- matrix()
for (i in 1:length(my_ids)){
#if(verbose==TRUE){print(paste("Processing sample (", i, ")"))}
print(paste("Processing sample (", i, ":", my_ids[i], ")"))
raw_metadata_vector <- vector()
unique_metadata_vector <- vector
if(debug==TRUE){print("Made it here (1)")}
###########CHANGED
endpoints <- get_mapping_options()
# this line is under the library commands inorder to perform it once
for (endpoint in endpoints){
temp <- vector()
for(j in c(1:50)){
temp <- metadata_cases(my_id=my_ids[i], dict=endpoint)
if(is.atomic(temp))
Sys.sleep(10)
else
break
}
raw_metadata_vector <- c(raw_metadata_vector,
flatten_list(temp$data))
}
###########END CHANGED
if(debug==TRUE){print("Made it here (2)")}
for (j in 1:length(unique(names(raw_metadata_vector)))){
my_name <- unique(names(raw_metadata_vector))[j]
#print(my_name)
unique_name_value_pairs <- unique( raw_metadata_vector[ which( names(raw_metadata_vector) == my_name ) ] )
name_list <- vector()
#names(unique_name_value_pairs) <- rep(my_name, length(unique_name_value_pairs))
for (k in 1:length(unique_name_value_pairs)){
name_list <- c(name_list, (paste( my_name, ".", k ,sep="")))
}
names(unique_name_value_pairs) <- name_list
unique_metadata_vector <- c(unique_metadata_vector, unique_name_value_pairs)
}
if(debug==TRUE){print("Made it here (3)")}
if( i==1 ){ # on first sample, create the data matrix
metadata_matrix <- matrix(unique_metadata_vector, ncol=1)
rownames(metadata_matrix) <- names(unique_metadata_vector)
colnames(metadata_matrix) <- convert_fileUUID_to_name(my_ids[i])
if(debug==TRUE){print("Made it here (3.1)")}
}else{ # for all additional samples add on to the existing matrix
if(debug==TRUE){print("Made it here (3.2)")}
sample_metadata_matrix <- matrix(unique_metadata_vector, ncol=1)
if(debug==TRUE){print("Made it here (3.3)")}
rownames(sample_metadata_matrix) <- names(unique_metadata_vector)
if(debug==TRUE){print("Made it here (3.4)")}
colnames(sample_metadata_matrix) <- convert_fileUUID_to_name(my_ids[i])
if(debug==TRUE){
print("Made it here (3.5)")
matrix1 <<- metadata_matrix
matrix2 <<- sample_metadata_matrix
}
if(debug==TRUE){print("Made it here (3.6)")}
# place merge code here
# Note - merging changes class of metadata_matrix from "matrix" to "data frame"; it's converted back below
metadata_matrix <- combine_matrices_by_column(matrix1=metadata_matrix, matrix2=sample_metadata_matrix, func_order_rows=order_rows, func_order_columns=order_columns, func_debug=debug)
if(debug==TRUE){
print("Made it here (3.7)")
print(paste("DATA_CLASS:", class(metadata_matrix)))
}
}
}
#ADDED
#print(head(metadata_matrix))
metadata_matrix <- metadata_matrix[-1,]
# temporary edit to get rid of junk metadata that was screwing up the table
#END ADDED
# covert data product from data.frame back to matrix
metadata_matrix <- as.matrix(metadata_matrix)
#ADDED
#sorts matrix by name
#metadata_matrix <- metadata_matrix[, order(colnames(metadata_matrix))]
if(debug==TRUE){print("Made it here (4)")}
# rotate the matrix if that option is selected
if( identical(my_rot, "yes")==TRUE ){
metadata_matrix <- rot90(rot90(rot90(metadata_matrix)))
}
if(debug==TRUE){print("Made it here (5)")}
# output the matrix as a flat file (default) or return as matrix
# create name for the output file
if( identical(output, "file")==TRUE ){
date_tag <- gsub(" ", "_", date())
date_tag <- gsub("__", "_", date_tag)
date_tag <- gsub(":", "-", date_tag)
#output_name =paste(id_list, ".", date_tag, ".GDC_METADATA.txt", sep="")
output_name = gsub(" ", "", paste(id_list,".GDC_METADATA.txt"))
if(debug==TRUE){ print(paste("FILE_OUT: ", output_name)) }
export_data(metadata_matrix, output_name)
}else{
return(metadata_matrix)
}
if(debug==TRUE){print("Made it here (6)")}
}
############################
############################
############################
############################
### ### ### SUBS ### ### ###
############################
metadata_cases <- function(before_id="https://gdc-api.nci.nih.gov/cases?filters=%7b%0d%0a+++%22op%22+%3a+%22%3d%22+%2c%0d%0a+++%22content%22+%3a+%7b%0d%0a+++++++%22field%22+%3a+%22files.file_id%22+%2c%0d%0a+++++++%22value%22+%3a+%5b+%22",
after_id="%22+%5d%0d%0a+++%7d%0d%0a%7d&pretty=true&fields=case_id&expand=",
my_id="07218202-2cd3-4db1-93e7-071879e36f27",
dict="")
{
my_call <- gsub(" ", "", paste(before_id, my_id, after_id, dict))
my_call.json <- fromJSON(getURL(my_call))
#print(my_call.json)
return(my_call.json)
#my_call.list <- flatten_list(my_call.json)
#print(my_call.list)
}
export_data <- function(data_object, file_name){
write.table(data_object, file=file_name, sep="\t", col.names = NA, row.names = TRUE, quote = FALSE, eol="\n")
}
flatten_list <- function(some_list){
flat_list <- unlist(some_list)
flat_list <- gsub("\r","",flat_list)
flat_list <- gsub("\n","",flat_list)
flat_list <- gsub("\t","",flat_list)
}
combine_matrices_by_column <- function(matrix1, matrix2, func_order_rows=TRUE, func_order_columns=TRUE, func_debug=debug){
# perform the merge
comb_matrix<- merge(data.frame(matrix1), data.frame(matrix2), by="row.names", all=TRUE)
if(func_debug==TRUE){
print("Made it here (3.6.1)")
print(paste("MATRIX_1", dim(matrix1)))
print(paste("MATRIX_2",dim(matrix2)))
print(paste("MATRIX_C",dim(comb_matrix)))
#print(colnames(matrix1))
#print(colnames(matrix2))
matrix3 <<- comb_matrix
}
# undo garbage formatting that merge introduces
rownames(comb_matrix) <- comb_matrix$Row.names
comb_matrix$Row.names <- NULL
matrix4 <<- comb_matrix
#if(func_debug==TRUE){print(paste("MATRIX DIM:", dim(comb_matrix)))}
colnames(comb_matrix) <- c(colnames(matrix1), colnames(matrix2))
#if(func_debug==TRUE){print("Made it here (3.6.2)")}
# order columns
if( func_order_rows==TRUE){
ordered_rownames <- order(rownames(comb_matrix))
comb_matrix <- comb_matrix[ordered_rownames,]
}
#if(func_debug==TRUE){print("Made it here (3.6.3)")}
# order rows
if( func_order_columns==TRUE){
ordered_colnames <- order(colnames(comb_matrix))
comb_matrix <- comb_matrix[,ordered_colnames]
}
#if(func_debug==TRUE){print("Made it here (3.6.4)")}
#if(func_debug==TRUE){ matrix5 <<- comb_matrix }
return(comb_matrix)
}
combine_matrices_by_row <- function(matrix1, matrix2, pseudo_fudge=10000, func_order_rows=TRUE, func_order_columns=TRUE, func_debug=debug){
# perform the merge
comb_matrix<- merge(matrix1, matrix2, by="col.names", all=TRUE)
# undo garbage formatting that merge introduces
rownames(comb_matrix) <- comb_matrix$Col.names
comb_matrix$Col.names <- NULL
colnames(comb_matrix) <- c(colnames(matrix1), colnames(matrix2))
# order columns
if( func_order_rows==TRUE){
ordered_rownames <- order(rownames(comb_matrix))
comb_matrix <- comb_matrix[ordered_rownames,]
}
# order rows
if( func_order_columns==TRUE){
ordered_colnames <- order(colnames(comb_matrix))
comb_matrix <- comb_matrix[,ordered_colnames]
}
return(comb_matrix)
}
convert_fileUUID_to_name <- function(UUID)
{
before_id <- "https://gdc-api.nci.nih.gov/files/"
after_id <- "?&fields=file_name&pretty=true"
my_call <- gsub(" ", "", paste(before_id, UUID, after_id))
my_call.json <- fromJSON(getURL(my_call))
return(substr(unname(my_call.json$data),1,36))
}
############################
############################
############################
############################
### ### ## NOTES ### ### ###
############################
## length(my_metadata)
## length(unique(my_metadata))
## length(names(my_metadata))
## length(unique(names(my_metadata)))
## length(unique_metadata)
## length(unique(unique_metadata))
## length(names(unique_metadata))
## length(unique(names(unique_metadata)))
############################
############################
############################
| /GDC_metadata_download.RandM.r | no_license | RonaldHShi/Ronald-and-Mert | R | false | false | 10,956 | r | # Metadata retrieval test
# source libraries
library(RJSONIO)
library(RCurl)
library(matlab)
crop_metadata <- function(metadata,exclude,unique_rows=F){
#input: metadata = metadata file, exclude = vector containing substrings that are to be
# excluded from metadata
#output: cropped metadata file named cropped_metadata
metadata <- read.delim(metadata,header = F)
metadata_orginal <<-metadata
thrash <- c()
changeddata <- metadata_orginal
if(unique_rows){
for (i in 1:(length(metadata[1,])-1)){
if(length(unique(metadata[2:nrow(metadata),i]))==1){
thrash <- c(thrash,i)
}
}
changeddata <- metadata[-thrash]
}
for (x in exclude){
new <- unlist(changeddata[1,])
new <- as.character(new)
thrash2 <- (grep(x,new))
changeddata <<- changeddata[-thrash2]
}
#print(length(changeddata[1,]))
write.table(changeddata,"cropped_metadata",sep="\t",row.names = F,col.names = F,quote = F)
}
get_mapping_options <- function(){
#returns a vector of endpoints under mapping/expand
system("curl 'https://gdc-api.nci.nih.gov/cases/_mapping' > mapping_JSON.txt")
endpoints <- fromJSON("mapping_JSON.txt")["expand"]
#print(endpoints)
nodes <- c()
for (i in 1:length(endpoints$expand)){
a <- unlist(as.vector(strsplit(endpoints$expand[i],"\\.")))
nodes <- c(nodes,a[1])
}
nodes <- unique(nodes)
return(nodes)
}
endpoints <- get_mapping_options()
############################
### ### ### MAIN ### ### ###
############################
get_GDC_metadata <- function(id_list, my_rot="no", output="file", order_rows=TRUE, order_columns=TRUE, verbose=FALSE, debug=FALSE){
# import list of ids
my_ids <- flatten_list(as.list(scan(file=id_list, what="character")))
metadata_matrix <- matrix()
for (i in 1:length(my_ids)){
#if(verbose==TRUE){print(paste("Processing sample (", i, ")"))}
print(paste("Processing sample (", i, ":", my_ids[i], ")"))
raw_metadata_vector <- vector()
unique_metadata_vector <- vector
if(debug==TRUE){print("Made it here (1)")}
###########CHANGED
endpoints <- get_mapping_options()
# this line is under the library commands inorder to perform it once
for (endpoint in endpoints){
temp <- vector()
for(j in c(1:50)){
temp <- metadata_cases(my_id=my_ids[i], dict=endpoint)
if(is.atomic(temp))
Sys.sleep(10)
else
break
}
raw_metadata_vector <- c(raw_metadata_vector,
flatten_list(temp$data))
}
###########END CHANGED
if(debug==TRUE){print("Made it here (2)")}
for (j in 1:length(unique(names(raw_metadata_vector)))){
my_name <- unique(names(raw_metadata_vector))[j]
#print(my_name)
unique_name_value_pairs <- unique( raw_metadata_vector[ which( names(raw_metadata_vector) == my_name ) ] )
name_list <- vector()
#names(unique_name_value_pairs) <- rep(my_name, length(unique_name_value_pairs))
for (k in 1:length(unique_name_value_pairs)){
name_list <- c(name_list, (paste( my_name, ".", k ,sep="")))
}
names(unique_name_value_pairs) <- name_list
unique_metadata_vector <- c(unique_metadata_vector, unique_name_value_pairs)
}
if(debug==TRUE){print("Made it here (3)")}
if( i==1 ){ # on first sample, create the data matrix
metadata_matrix <- matrix(unique_metadata_vector, ncol=1)
rownames(metadata_matrix) <- names(unique_metadata_vector)
colnames(metadata_matrix) <- convert_fileUUID_to_name(my_ids[i])
if(debug==TRUE){print("Made it here (3.1)")}
}else{ # for all additional samples add on to the existing matrix
if(debug==TRUE){print("Made it here (3.2)")}
sample_metadata_matrix <- matrix(unique_metadata_vector, ncol=1)
if(debug==TRUE){print("Made it here (3.3)")}
rownames(sample_metadata_matrix) <- names(unique_metadata_vector)
if(debug==TRUE){print("Made it here (3.4)")}
colnames(sample_metadata_matrix) <- convert_fileUUID_to_name(my_ids[i])
if(debug==TRUE){
print("Made it here (3.5)")
matrix1 <<- metadata_matrix
matrix2 <<- sample_metadata_matrix
}
if(debug==TRUE){print("Made it here (3.6)")}
# place merge code here
# Note - merging changes class of metadata_matrix from "matrix" to "data frame"; it's converted back below
metadata_matrix <- combine_matrices_by_column(matrix1=metadata_matrix, matrix2=sample_metadata_matrix, func_order_rows=order_rows, func_order_columns=order_columns, func_debug=debug)
if(debug==TRUE){
print("Made it here (3.7)")
print(paste("DATA_CLASS:", class(metadata_matrix)))
}
}
}
#ADDED
#print(head(metadata_matrix))
metadata_matrix <- metadata_matrix[-1,]
# temporary edit to get rid of junk metadata that was screwing up the table
#END ADDED
# covert data product from data.frame back to matrix
metadata_matrix <- as.matrix(metadata_matrix)
#ADDED
#sorts matrix by name
#metadata_matrix <- metadata_matrix[, order(colnames(metadata_matrix))]
if(debug==TRUE){print("Made it here (4)")}
# rotate the matrix if that option is selected
if( identical(my_rot, "yes")==TRUE ){
metadata_matrix <- rot90(rot90(rot90(metadata_matrix)))
}
if(debug==TRUE){print("Made it here (5)")}
# output the matrix as a flat file (default) or return as matrix
# create name for the output file
if( identical(output, "file")==TRUE ){
date_tag <- gsub(" ", "_", date())
date_tag <- gsub("__", "_", date_tag)
date_tag <- gsub(":", "-", date_tag)
#output_name =paste(id_list, ".", date_tag, ".GDC_METADATA.txt", sep="")
output_name = gsub(" ", "", paste(id_list,".GDC_METADATA.txt"))
if(debug==TRUE){ print(paste("FILE_OUT: ", output_name)) }
export_data(metadata_matrix, output_name)
}else{
return(metadata_matrix)
}
if(debug==TRUE){print("Made it here (6)")}
}
############################
############################
############################
############################
### ### ### SUBS ### ### ###
############################
metadata_cases <- function(before_id="https://gdc-api.nci.nih.gov/cases?filters=%7b%0d%0a+++%22op%22+%3a+%22%3d%22+%2c%0d%0a+++%22content%22+%3a+%7b%0d%0a+++++++%22field%22+%3a+%22files.file_id%22+%2c%0d%0a+++++++%22value%22+%3a+%5b+%22",
after_id="%22+%5d%0d%0a+++%7d%0d%0a%7d&pretty=true&fields=case_id&expand=",
my_id="07218202-2cd3-4db1-93e7-071879e36f27",
dict="")
{
my_call <- gsub(" ", "", paste(before_id, my_id, after_id, dict))
my_call.json <- fromJSON(getURL(my_call))
#print(my_call.json)
return(my_call.json)
#my_call.list <- flatten_list(my_call.json)
#print(my_call.list)
}
export_data <- function(data_object, file_name){
write.table(data_object, file=file_name, sep="\t", col.names = NA, row.names = TRUE, quote = FALSE, eol="\n")
}
flatten_list <- function(some_list){
flat_list <- unlist(some_list)
flat_list <- gsub("\r","",flat_list)
flat_list <- gsub("\n","",flat_list)
flat_list <- gsub("\t","",flat_list)
}
combine_matrices_by_column <- function(matrix1, matrix2, func_order_rows=TRUE, func_order_columns=TRUE, func_debug=debug){
# perform the merge
comb_matrix<- merge(data.frame(matrix1), data.frame(matrix2), by="row.names", all=TRUE)
if(func_debug==TRUE){
print("Made it here (3.6.1)")
print(paste("MATRIX_1", dim(matrix1)))
print(paste("MATRIX_2",dim(matrix2)))
print(paste("MATRIX_C",dim(comb_matrix)))
#print(colnames(matrix1))
#print(colnames(matrix2))
matrix3 <<- comb_matrix
}
# undo garbage formatting that merge introduces
rownames(comb_matrix) <- comb_matrix$Row.names
comb_matrix$Row.names <- NULL
matrix4 <<- comb_matrix
#if(func_debug==TRUE){print(paste("MATRIX DIM:", dim(comb_matrix)))}
colnames(comb_matrix) <- c(colnames(matrix1), colnames(matrix2))
#if(func_debug==TRUE){print("Made it here (3.6.2)")}
# order columns
if( func_order_rows==TRUE){
ordered_rownames <- order(rownames(comb_matrix))
comb_matrix <- comb_matrix[ordered_rownames,]
}
#if(func_debug==TRUE){print("Made it here (3.6.3)")}
# order rows
if( func_order_columns==TRUE){
ordered_colnames <- order(colnames(comb_matrix))
comb_matrix <- comb_matrix[,ordered_colnames]
}
#if(func_debug==TRUE){print("Made it here (3.6.4)")}
#if(func_debug==TRUE){ matrix5 <<- comb_matrix }
return(comb_matrix)
}
combine_matrices_by_row <- function(matrix1, matrix2, pseudo_fudge=10000, func_order_rows=TRUE, func_order_columns=TRUE, func_debug=debug){
# perform the merge
comb_matrix<- merge(matrix1, matrix2, by="col.names", all=TRUE)
# undo garbage formatting that merge introduces
rownames(comb_matrix) <- comb_matrix$Col.names
comb_matrix$Col.names <- NULL
colnames(comb_matrix) <- c(colnames(matrix1), colnames(matrix2))
# order columns
if( func_order_rows==TRUE){
ordered_rownames <- order(rownames(comb_matrix))
comb_matrix <- comb_matrix[ordered_rownames,]
}
# order rows
if( func_order_columns==TRUE){
ordered_colnames <- order(colnames(comb_matrix))
comb_matrix <- comb_matrix[,ordered_colnames]
}
return(comb_matrix)
}
convert_fileUUID_to_name <- function(UUID)
{
before_id <- "https://gdc-api.nci.nih.gov/files/"
after_id <- "?&fields=file_name&pretty=true"
my_call <- gsub(" ", "", paste(before_id, UUID, after_id))
my_call.json <- fromJSON(getURL(my_call))
return(substr(unname(my_call.json$data),1,36))
}
############################
############################
############################
############################
### ### ## NOTES ### ### ###
############################
## length(my_metadata)
## length(unique(my_metadata))
## length(names(my_metadata))
## length(unique(names(my_metadata)))
## length(unique_metadata)
## length(unique(unique_metadata))
## length(names(unique_metadata))
## length(unique(names(unique_metadata)))
############################
############################
############################
|
options(digits=4, width=70)
library(corrplot)
library(IntroCompFinR)
library(PerformanceAnalytics)
library(zoo)
# load data from file (Make sure you change the path to where you downloaded the file)
lab5returns.df = read.csv(file="/Users/cuijy/Desktop/econ424lab5_return_1.csv",
stringsAsFactors=FALSE)
# Fix to problem with the yearmon class
dates = seq(as.Date("1992-07-01"), as.Date("2000-10-01"), by="months")
lab5returns.df$Date = dates
# create zoo object
lab5returns.z = zoo(lab5returns.df[,-1], lab5returns.df$Date)
my.panel <- function(...) {
lines(...)
abline(h=0)
}
plot(lab5returns.z, lwd=2, col="blue", panel = my.panel)
# compute estimates of CER model and annualize
muhat.annual = apply(lab5returns.z,2,mean)*12
sigma2.annual = apply(lab5returns.z,2,var)*12
sigma.annual = sqrt(sigma2.annual)
covmat.annual = cov(lab5returns.z)*12
covhat.annual = cov(lab5returns.z)[1,2]*12
rhohat.annual = cor(lab5returns.z)[1,2]
mu.s = muhat.annual["rsbux"]
mu.m = muhat.annual["rmsft"]
sig2.s = sigma2.annual["rsbux"]
sig2.m = sigma2.annual["rmsft"]
sig.s = sigma.annual["rsbux"]
sig.m = sigma.annual["rmsft"]
sig.sm = covhat.annual
rho.sm = rhohat.annual
#
# create portfolios and plot
#
x.s = seq(from=-1, to=2, by=0.1)
x.m = 1 - x.s
mu.p = x.s*mu.s + x.m*mu.m
sig2.p = x.s^2 * sig2.s + x.m^2 * sig2.m + 2*x.s*x.m*sig.sm
sig.p = sqrt(sig2.p)
cbind(x.s, x.m, mu.p, sig.p, sig2.p)
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
# now compute portfolios with assets and T-bills as well as Sharpe slopes
r.f = 0.025
# T-bills + SBUX
x.s = seq(from=0, to=2, by=0.1)
mu.p.s = r.f + x.s*(mu.s - r.f)
mu.p.s
sig2.p.s = x.s*x.s*sig.s*sig.s
sig2.p.s
sig.p.s = x.s*sig.s
sig.p.s
sharpe.s = (mu.s - r.f)/sig.s
sharpe.s
# T-bills + MSFT
x.m = seq(from=0, to=2, by=0.1)
mu.p.m = r.f + x.m*(mu.m - r.f)
mu.p.m
sig2.p.m = x.m*x.m*sig.m*sig.m
sig2.p.m
sig.p.m = x.m*sig.m
sig.p.m
sharpe.m = (mu.m - r.f)/sig.m
sharpe.m
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
points(sig.p.s, mu.p.s, type="b", col="blue")
points(sig.p.m, mu.p.m, type="b", col="orange")
plot(sig.p, mu.p, type="b", pch=16, ylim=c(0, max(mu.p)), xlim=c(0, max(sig.p)),
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
points(sig.p.s, mu.p.s, type="b", col="blue")
points(sig.p.m, mu.p.m, type="b", col="orange")
# 2 compute global minimum variance portfolio
gmin.port = globalMin.portfolio(muhat.annual, covmat.annual)
gmin.port
summary(gmin.port, risk.free=0.025)
plot(gmin.port)
pie(gmin.port$weights)
sharpe.gmin = (gmin.port$er - r.f)/gmin.port$sd
sharpe.gmin
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
text(x=gmin.port$sd, y=gmin.port$er, labels="Global min", pos=4)
sigma2.gim = 0.327^2
sigma2.gim
# compute tangency portfolio
tan.port = tangency.portfolio(muhat.annual, covmat.annual,risk.free=0.025)
tan.port
summary(tan.port,risk.free=0.025)
plot(tan.port)
pie(tan.port$weights)
# T-bills + tangency
x.t = seq(from=0, to=2, by=0.1)
mu.p.t = r.f + x.t*(tan.port$er - r.f)
mu.p.t
sig.p.t = x.t*tan.port$sd
sig.p.t
sig2.p.t = sig.p.t^2
sig2.p.t
sharpe.t = (tan.port$er - r.f)/tan.port$sd
sharpe.t
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
text(x=tan.port$sd, y=tan.port$er, labels="Tangency", pos=4)
points(sig.p.t, mu.p.t, type="b", col="blue", pch=16)
# part 2 Computing Efficient Portfolios Using Matrix Algebra
# Clear the the environment before part II
rm(list=ls())
options(digits=4, width=70)
library(zoo)
library(corrplot)
library(IntroCompFinR)
# load the data into a zoo object using the zoo function read.csv
lab5.df = read.csv(file="/Users/cuijy/Desktop/econ424lab5_return_2.csv",
stringsAsFactors=F)
colnames(lab5.df)
#
# Create zoo object from data and dates in lab5.df
#
lab5.z = zoo(x=lab5.df[, -1],
order.by=as.yearmon(lab5.df[, 1], format="%b-%y"))
start(lab5.z)
end(lab5.z)
colnames(lab5.z)
ret.mat = coredata(lab5.z)
#
# Create timePlots of data
#
# create custom panel function to draw horizontal
# line at zero in each panel of plot
my.panel <- function(...) {
lines(...)
abline(h=0)
}
plot(lab5.z, lwd=2, panel=my.panel, col="blue")
# all on the same graph
plot(lab5.z, plot.type = "single", main="lab5 returns",
col=1:4, lwd=2)
abline(h=0)
legend(x="bottomleft", legend=colnames(lab5.z), col=1:4, lwd=2)
#
# Compute pairwise scatterplots
#
pairs(coredata(lab5.z), col="blue", pch=16)
corrplot(cor(lab5.z), method="ellipse")
# clear the plots if use Rstudio
#
# Compute estimates of CER model parameters
#
muhat.vals = apply(ret.mat, 2, mean)
sigma2hat.vals = apply(ret.mat, 2, var)
sigmahat.vals = apply(ret.mat, 2, sd)
cov.mat = var(ret.mat)
cor.mat = cor(ret.mat)
covhat.vals = cov.mat[lower.tri(cov.mat)]
rhohat.vals = cor.mat[lower.tri(cor.mat)]
names(covhat.vals) <- names(rhohat.vals) <-
c("Nord,Boeing","SBUX,Boeing","MSFT,Boeing","SBUX,Nord",
"MSFT,Nord","MSFT,SBUX")
muhat.vals
sigma2hat.vals
sigmahat.vals
covhat.vals
rhohat.vals
cbind(muhat.vals,sigma2hat.vals,sigmahat.vals)
cbind(covhat.vals,rhohat.vals)
#
# Compute standard errors for estimated parameters
#
# compute estimated standard error for mean
nobs = nrow(ret.mat)
nobs
se.muhat = sigmahat.vals/sqrt(nobs)
se.muhat
cbind(muhat.vals,se.muhat)
# compute estimated standard errors for variance and sd
se.sigma2hat = sigma2hat.vals/sqrt(nobs/2)
se.sigma2hat
se.sigmahat = sigmahat.vals/sqrt(2*nobs)
se.sigmahat
cbind(sigma2hat.vals,se.sigma2hat)
cbind(sigmahat.vals,se.sigmahat)
# compute estimated standard errors for correlation
se.rhohat = (1-rhohat.vals^2)/sqrt(nobs)
se.rhohat
cbind(rhohat.vals,se.rhohat)
#
# Export means and covariance matrix to .csv file for import to Excel.
#
write.csv(muhat.vals, file="/Users/cuijy/Desktop/muhatvals.csv")
write.csv(cov.mat, file="/Users/cuijy/Desktop/covmat.csv")
#
# portfolio theory calculations
#
# compute global minimum variance portfolio with short sales
gmin.port = globalMin.portfolio(muhat.vals, cov.mat)
gmin.port
plot(gmin.port, col="blue")
# compute efficient portfolio with target return equal to highest average return
mu.target = max(muhat.vals)
e1.port = efficient.portfolio(muhat.vals, cov.mat, mu.target)
e1.port
plot(e1.port, col="blue")
gmin.port.w = gmin.port[[4]]
e1.port.w = e1.port[[4]]
cov = t(gmin.port.w)%*%cov.mat%*%e1.port.w
cov
# compute efficient portfolio with target return equal to highest average return
# but do not allow short sales
mu.target = max(muhat.vals)
e1.noshorts.port = efficient.portfolio(muhat.vals, cov.mat, mu.target, shorts=FALSE)
e1.noshorts.port
plot(e1.noshorts.port, col="blue")
# compute global minimum variance portfolio without short sales
gmin.noshort.port = globalMin.portfolio(muhat.vals, cov.mat, shorts = FALSE)
gmin.noshort.port
plot(gmin.noshort.port, col="blue")
# compute tangency portfolio with rf = 0.005
tan.port = tangency.portfolio(muhat.vals, cov.mat, risk.free=0.005)
summary(tan.port)
plot(tan.port, col="blue")
# compute tangency portfolio with rf = 0.005
tan.noshort.port = tangency.portfolio(muhat.vals, cov.mat,
risk.free=0.005, shorts = FALSE)
summary(tan.noshort.port)
plot(tan.noshort.port, col="blue")
# Plot the efficient frontier
plot(e.frontier, plot.assets=T, col="blue", pch=16)
points(gmin.port$sd, gmin.port$er,
col="green", pch=16, cex=2)
points(tan.port$sd, tan.port$er, col="red",
pch=16, cex=2)
text(gmin.port$sd, gmin.port$er,
labels="GLOBAL MIN", pos=2)
text(tan.port$sd, tan.port$er,
labels="TANGENCY", pos=2)
sharpe.tan = (tan.port$er - 0.005)/tan.port$sd
sharpe.tan
abline(a=0.005, b=sharpe.tan, col="green", lwd=2)
# efficient portfolio of T-bills + tangency that has the same SD as sbux
names(tan.port)
x.tan = sigmahat.vals["Starbucks"]/tan.port$sd
x.tan
mu.pe = 0.005 + x.tan*(tan.port$er - 0.005)
mu.pe
# VaR analysis
w0 = 50000
qhat.05 = muhat.vals + sigmahat.vals*qnorm(0.05)
qhat.01 = muhat.vals + sigmahat.vals*qnorm(0.01)
qhatGmin.05 = gmin.port$er + gmin.port$sd*qnorm(0.05)
qhatGmin.01 = gmin.port$er + gmin.port$sd*qnorm(0.01)
VaR.05 = w0*qhat.05
VaR.01 = w0*qhat.01
VaR.05
VaR.01
VaRgmin.05 = w0*qhatGmin.05
VaRgmin.01 = w0*qhatGmin.01
VaRgmin.05
VaRgmin.01 | /lab5.R | no_license | jingyi0936/ECON-424 | R | false | false | 9,432 | r | options(digits=4, width=70)
library(corrplot)
library(IntroCompFinR)
library(PerformanceAnalytics)
library(zoo)
# load data from file (Make sure you change the path to where you downloaded the file)
lab5returns.df = read.csv(file="/Users/cuijy/Desktop/econ424lab5_return_1.csv",
stringsAsFactors=FALSE)
# Fix to problem with the yearmon class
dates = seq(as.Date("1992-07-01"), as.Date("2000-10-01"), by="months")
lab5returns.df$Date = dates
# create zoo object
lab5returns.z = zoo(lab5returns.df[,-1], lab5returns.df$Date)
my.panel <- function(...) {
lines(...)
abline(h=0)
}
plot(lab5returns.z, lwd=2, col="blue", panel = my.panel)
# compute estimates of CER model and annualize
muhat.annual = apply(lab5returns.z,2,mean)*12
sigma2.annual = apply(lab5returns.z,2,var)*12
sigma.annual = sqrt(sigma2.annual)
covmat.annual = cov(lab5returns.z)*12
covhat.annual = cov(lab5returns.z)[1,2]*12
rhohat.annual = cor(lab5returns.z)[1,2]
mu.s = muhat.annual["rsbux"]
mu.m = muhat.annual["rmsft"]
sig2.s = sigma2.annual["rsbux"]
sig2.m = sigma2.annual["rmsft"]
sig.s = sigma.annual["rsbux"]
sig.m = sigma.annual["rmsft"]
sig.sm = covhat.annual
rho.sm = rhohat.annual
#
# create portfolios and plot
#
x.s = seq(from=-1, to=2, by=0.1)
x.m = 1 - x.s
mu.p = x.s*mu.s + x.m*mu.m
sig2.p = x.s^2 * sig2.s + x.m^2 * sig2.m + 2*x.s*x.m*sig.sm
sig.p = sqrt(sig2.p)
cbind(x.s, x.m, mu.p, sig.p, sig2.p)
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
# now compute portfolios with assets and T-bills as well as Sharpe slopes
r.f = 0.025
# T-bills + SBUX
x.s = seq(from=0, to=2, by=0.1)
mu.p.s = r.f + x.s*(mu.s - r.f)
mu.p.s
sig2.p.s = x.s*x.s*sig.s*sig.s
sig2.p.s
sig.p.s = x.s*sig.s
sig.p.s
sharpe.s = (mu.s - r.f)/sig.s
sharpe.s
# T-bills + MSFT
x.m = seq(from=0, to=2, by=0.1)
mu.p.m = r.f + x.m*(mu.m - r.f)
mu.p.m
sig2.p.m = x.m*x.m*sig.m*sig.m
sig2.p.m
sig.p.m = x.m*sig.m
sig.p.m
sharpe.m = (mu.m - r.f)/sig.m
sharpe.m
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
points(sig.p.s, mu.p.s, type="b", col="blue")
points(sig.p.m, mu.p.m, type="b", col="orange")
plot(sig.p, mu.p, type="b", pch=16, ylim=c(0, max(mu.p)), xlim=c(0, max(sig.p)),
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
points(sig.p.s, mu.p.s, type="b", col="blue")
points(sig.p.m, mu.p.m, type="b", col="orange")
# 2 compute global minimum variance portfolio
gmin.port = globalMin.portfolio(muhat.annual, covmat.annual)
gmin.port
summary(gmin.port, risk.free=0.025)
plot(gmin.port)
pie(gmin.port$weights)
sharpe.gmin = (gmin.port$er - r.f)/gmin.port$sd
sharpe.gmin
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
text(x=gmin.port$sd, y=gmin.port$er, labels="Global min", pos=4)
sigma2.gim = 0.327^2
sigma2.gim
# compute tangency portfolio
tan.port = tangency.portfolio(muhat.annual, covmat.annual,risk.free=0.025)
tan.port
summary(tan.port,risk.free=0.025)
plot(tan.port)
pie(tan.port$weights)
# T-bills + tangency
x.t = seq(from=0, to=2, by=0.1)
mu.p.t = r.f + x.t*(tan.port$er - r.f)
mu.p.t
sig.p.t = x.t*tan.port$sd
sig.p.t
sig2.p.t = sig.p.t^2
sig2.p.t
sharpe.t = (tan.port$er - r.f)/tan.port$sd
sharpe.t
plot(sig.p, mu.p, type="b", pch=16,
xlab=expression(sigma[p]), ylab=expression(mu[p]),
col=c(rep("green", 14), rep("red", 17)))
text(x=sig.s, y=mu.s, labels="SBUX", pos=4)
text(x=sig.m, y=mu.m, labels="MSFT", pos=4)
text(x=tan.port$sd, y=tan.port$er, labels="Tangency", pos=4)
points(sig.p.t, mu.p.t, type="b", col="blue", pch=16)
# part 2 Computing Efficient Portfolios Using Matrix Algebra
# Clear the the environment before part II
rm(list=ls())
options(digits=4, width=70)
library(zoo)
library(corrplot)
library(IntroCompFinR)
# load the data into a zoo object using the zoo function read.csv
lab5.df = read.csv(file="/Users/cuijy/Desktop/econ424lab5_return_2.csv",
stringsAsFactors=F)
colnames(lab5.df)
#
# Create zoo object from data and dates in lab5.df
#
lab5.z = zoo(x=lab5.df[, -1],
order.by=as.yearmon(lab5.df[, 1], format="%b-%y"))
start(lab5.z)
end(lab5.z)
colnames(lab5.z)
ret.mat = coredata(lab5.z)
#
# Create timePlots of data
#
# create custom panel function to draw horizontal
# line at zero in each panel of plot
my.panel <- function(...) {
lines(...)
abline(h=0)
}
plot(lab5.z, lwd=2, panel=my.panel, col="blue")
# all on the same graph
plot(lab5.z, plot.type = "single", main="lab5 returns",
col=1:4, lwd=2)
abline(h=0)
legend(x="bottomleft", legend=colnames(lab5.z), col=1:4, lwd=2)
#
# Compute pairwise scatterplots
#
pairs(coredata(lab5.z), col="blue", pch=16)
corrplot(cor(lab5.z), method="ellipse")
# clear the plots if use Rstudio
#
# Compute estimates of CER model parameters
#
muhat.vals = apply(ret.mat, 2, mean)
sigma2hat.vals = apply(ret.mat, 2, var)
sigmahat.vals = apply(ret.mat, 2, sd)
cov.mat = var(ret.mat)
cor.mat = cor(ret.mat)
covhat.vals = cov.mat[lower.tri(cov.mat)]
rhohat.vals = cor.mat[lower.tri(cor.mat)]
names(covhat.vals) <- names(rhohat.vals) <-
c("Nord,Boeing","SBUX,Boeing","MSFT,Boeing","SBUX,Nord",
"MSFT,Nord","MSFT,SBUX")
muhat.vals
sigma2hat.vals
sigmahat.vals
covhat.vals
rhohat.vals
cbind(muhat.vals,sigma2hat.vals,sigmahat.vals)
cbind(covhat.vals,rhohat.vals)
#
# Compute standard errors for estimated parameters
#
# compute estimated standard error for mean
nobs = nrow(ret.mat)
nobs
se.muhat = sigmahat.vals/sqrt(nobs)
se.muhat
cbind(muhat.vals,se.muhat)
# compute estimated standard errors for variance and sd
se.sigma2hat = sigma2hat.vals/sqrt(nobs/2)
se.sigma2hat
se.sigmahat = sigmahat.vals/sqrt(2*nobs)
se.sigmahat
cbind(sigma2hat.vals,se.sigma2hat)
cbind(sigmahat.vals,se.sigmahat)
# compute estimated standard errors for correlation
se.rhohat = (1-rhohat.vals^2)/sqrt(nobs)
se.rhohat
cbind(rhohat.vals,se.rhohat)
#
# Export means and covariance matrix to .csv file for import to Excel.
#
write.csv(muhat.vals, file="/Users/cuijy/Desktop/muhatvals.csv")
write.csv(cov.mat, file="/Users/cuijy/Desktop/covmat.csv")
#
# portfolio theory calculations
#
# compute global minimum variance portfolio with short sales
gmin.port = globalMin.portfolio(muhat.vals, cov.mat)
gmin.port
plot(gmin.port, col="blue")
# compute efficient portfolio with target return equal to highest average return
mu.target = max(muhat.vals)
e1.port = efficient.portfolio(muhat.vals, cov.mat, mu.target)
e1.port
plot(e1.port, col="blue")
gmin.port.w = gmin.port[[4]]
e1.port.w = e1.port[[4]]
cov = t(gmin.port.w)%*%cov.mat%*%e1.port.w
cov
# compute efficient portfolio with target return equal to highest average return
# but do not allow short sales
mu.target = max(muhat.vals)
e1.noshorts.port = efficient.portfolio(muhat.vals, cov.mat, mu.target, shorts=FALSE)
e1.noshorts.port
plot(e1.noshorts.port, col="blue")
# compute global minimum variance portfolio without short sales
gmin.noshort.port = globalMin.portfolio(muhat.vals, cov.mat, shorts = FALSE)
gmin.noshort.port
plot(gmin.noshort.port, col="blue")
# compute tangency portfolio with rf = 0.005
tan.port = tangency.portfolio(muhat.vals, cov.mat, risk.free=0.005)
summary(tan.port)
plot(tan.port, col="blue")
# compute tangency portfolio with rf = 0.005
tan.noshort.port = tangency.portfolio(muhat.vals, cov.mat,
risk.free=0.005, shorts = FALSE)
summary(tan.noshort.port)
plot(tan.noshort.port, col="blue")
# Plot the efficient frontier
plot(e.frontier, plot.assets=T, col="blue", pch=16)
points(gmin.port$sd, gmin.port$er,
col="green", pch=16, cex=2)
points(tan.port$sd, tan.port$er, col="red",
pch=16, cex=2)
text(gmin.port$sd, gmin.port$er,
labels="GLOBAL MIN", pos=2)
text(tan.port$sd, tan.port$er,
labels="TANGENCY", pos=2)
sharpe.tan = (tan.port$er - 0.005)/tan.port$sd
sharpe.tan
abline(a=0.005, b=sharpe.tan, col="green", lwd=2)
# efficient portfolio of T-bills + tangency that has the same SD as sbux
names(tan.port)
x.tan = sigmahat.vals["Starbucks"]/tan.port$sd
x.tan
mu.pe = 0.005 + x.tan*(tan.port$er - 0.005)
mu.pe
# VaR analysis
w0 = 50000
qhat.05 = muhat.vals + sigmahat.vals*qnorm(0.05)
qhat.01 = muhat.vals + sigmahat.vals*qnorm(0.01)
qhatGmin.05 = gmin.port$er + gmin.port$sd*qnorm(0.05)
qhatGmin.01 = gmin.port$er + gmin.port$sd*qnorm(0.01)
VaR.05 = w0*qhat.05
VaR.01 = w0*qhat.01
VaR.05
VaR.01
VaRgmin.05 = w0*qhatGmin.05
VaRgmin.01 = w0*qhatGmin.01
VaRgmin.05
VaRgmin.01 |
# CS555 Data Analysis and Visualization
# Homework5.R
# Jefferson Parker, japarker@bu.edu
# 20180803
# Load libraries.
library(car);
library(lsmeans);
# Save the student data to a file and load to R.
inputDir <- "C:/Users/jparker/Code/Input";
setwd(inputDir);
studentData <- read.table(file = "studentIq.txt", header = TRUE, sep = "\t");
# 1. How many students are in each group.
# Summarize the data relating to both test score and age by group.
aggregate(studentData, by = list(studentData$group), summary);
boxplot(age ~ group, data = studentData, main = "Age per Student Group");
boxplot(iq ~ group, data = studentData, main = "IQ per Student Group");
# 2. Do test scores vary by group?
# Critical F value
qf(0.05, 2, 42, lower.tail = FALSE);
testModel <- aov(iq ~ group, data = studentData);
summary(testModel);
# If the overall model is significant, perform pairwise testing.
# Note of confusion. The homework says use Tukey's adjustment method but that is
# not an option for pairwise.t.test. I am using both pairwise.t.test and TukeyHSD
# to check both methods.
pairwise.t.test(studentData$iq, studentData$group, p.adjust.method = 'bonferroni');
TukeyHSD(testModel);
# 3. Create the appropriate number of dummy variables for student group
# and re-run the one way ANOVA using the lm function.
# Set 'Chemistry student' as the reference group.
studentData$gC <- ifelse(studentData$group == 'Chemistry student', 1, 0);
studentData$gM <- ifelse(studentData$group == 'Math student', 1, 0);
studentData$gP <- ifelse(studentData$group == 'Physics student', 1, 0);
lineModel <- lm(iq ~ gM + gP, data = studentData);
summary(lineModel);
# 4. Re-do the one way ANOVA adjusting for age (ANCOVA).
Anova(lm(iq ~ group + age, data = studentData), type = 3);
# set our categorical variable options.
options(contrasts = c("contr.treatment", "contr.poly"));
# Measure pairwise differences between groups, accounting for differences in age.
lsmeans(lm(iq ~ group + age, data = studentData), pairwise ~ group, adjust = 'Tukey');
# Just checking
# install.packages("emmeans");
library(emmeans);
emmeans(lm(iq ~ group + age, data = studentData), pairwise ~ group, adjust = 'Tukey');
| /CS555 Data Analysis and Visualization (R Code)/Homework5/Parker - Homework5.R | no_license | japarker02446/BUProjects | R | false | false | 2,244 | r | # CS555 Data Analysis and Visualization
# Homework5.R
# Jefferson Parker, japarker@bu.edu
# 20180803
# Load libraries.
library(car);
library(lsmeans);
# Save the student data to a file and load to R.
inputDir <- "C:/Users/jparker/Code/Input";
setwd(inputDir);
studentData <- read.table(file = "studentIq.txt", header = TRUE, sep = "\t");
# 1. How many students are in each group.
# Summarize the data relating to both test score and age by group.
aggregate(studentData, by = list(studentData$group), summary);
boxplot(age ~ group, data = studentData, main = "Age per Student Group");
boxplot(iq ~ group, data = studentData, main = "IQ per Student Group");
# 2. Do test scores vary by group?
# Critical F value
qf(0.05, 2, 42, lower.tail = FALSE);
testModel <- aov(iq ~ group, data = studentData);
summary(testModel);
# If the overall model is significant, perform pairwise testing.
# Note of confusion. The homework says use Tukey's adjustment method but that is
# not an option for pairwise.t.test. I am using both pairwise.t.test and TukeyHSD
# to check both methods.
pairwise.t.test(studentData$iq, studentData$group, p.adjust.method = 'bonferroni');
TukeyHSD(testModel);
# 3. Create the appropriate number of dummy variables for student group
# and re-run the one way ANOVA using the lm function.
# Set 'Chemistry student' as the reference group.
studentData$gC <- ifelse(studentData$group == 'Chemistry student', 1, 0);
studentData$gM <- ifelse(studentData$group == 'Math student', 1, 0);
studentData$gP <- ifelse(studentData$group == 'Physics student', 1, 0);
lineModel <- lm(iq ~ gM + gP, data = studentData);
summary(lineModel);
# 4. Re-do the one way ANOVA adjusting for age (ANCOVA).
Anova(lm(iq ~ group + age, data = studentData), type = 3);
# set our categorical variable options.
options(contrasts = c("contr.treatment", "contr.poly"));
# Measure pairwise differences between groups, accounting for differences in age.
lsmeans(lm(iq ~ group + age, data = studentData), pairwise ~ group, adjust = 'Tukey');
# Just checking
# install.packages("emmeans");
library(emmeans);
emmeans(lm(iq ~ group + age, data = studentData), pairwise ~ group, adjust = 'Tukey');
|
# Decision Tree Classification
# Importing the dataset
dataset = read.csv('iris.csv',
col.names = c('sepal length','sepal width','petal length','petal width','class'),
header = FALSE)
# Encoding the target feature as factor
dataset$class = factor(dataset$class, levels = c('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$class, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Fitting Decision Tree Classification to the Training set
# install.packages('rpart')
library(rpart)
classifier = rpart(formula = class ~ .,
data = training_set)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-5], type = 'class')
# Making the Confusion Matrix
cm = table(test_set[, 5], y_pred)
# Iris-setosa Iris-versicolor Iris-virginica
# Iris-setosa 12 0 0
# Iris-versicolor 0 9 3
# Iris-virginica 0 1 11
#Getting the accuracy of the model
ac= sum(diag(cm))/sum(cm)
# ac = 0.8888889
# visualizing the tree
plot(classifier)
text(classifier) | /irisR.R | no_license | harshaljaiswal/DecisionTREE_classification | R | false | false | 1,367 | r | # Decision Tree Classification
# Importing the dataset
dataset = read.csv('iris.csv',
col.names = c('sepal length','sepal width','petal length','petal width','class'),
header = FALSE)
# Encoding the target feature as factor
dataset$class = factor(dataset$class, levels = c('Iris-setosa', 'Iris-versicolor', 'Iris-virginica'))
# Splitting the dataset into the Training set and Test set
# install.packages('caTools')
library(caTools)
set.seed(123)
split = sample.split(dataset$class, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
# Fitting Decision Tree Classification to the Training set
# install.packages('rpart')
library(rpart)
classifier = rpart(formula = class ~ .,
data = training_set)
# Predicting the Test set results
y_pred = predict(classifier, newdata = test_set[-5], type = 'class')
# Making the Confusion Matrix
cm = table(test_set[, 5], y_pred)
# Iris-setosa Iris-versicolor Iris-virginica
# Iris-setosa 12 0 0
# Iris-versicolor 0 9 3
# Iris-virginica 0 1 11
#Getting the accuracy of the model
ac= sum(diag(cm))/sum(cm)
# ac = 0.8888889
# visualizing the tree
plot(classifier)
text(classifier) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_model_drift.R
\name{calculate_model_drift}
\alias{calculate_model_drift}
\title{Calculate Model Drift for comparison of models trained on new/old data}
\usage{
calculate_model_drift(model_old, model_new, data_new, y_new,
predict_function = predict, max_obs = 100, scale = sd(y_new, na.rm
= TRUE))
}
\arguments{
\item{model_old}{model created on historical / `old`data}
\item{model_new}{model created on current / `new`data}
\item{data_new}{data frame with current / `new` data}
\item{y_new}{true values of target variable for current / `new` data}
\item{predict_function}{function that takes two arguments: model and new data and returns numeric vector with predictions, by default it's `predict`}
\item{max_obs}{if negative, them all observations are used for calculation of PDP, is positive, then only `max_obs` are used for calculation of PDP}
\item{scale}{scale parameter for calculation of scaled drift}
}
\value{
an object of a class `model_drift` (data.frame) with distances calculated based on Partial Dependency Plots
}
\description{
This function calculates differences between PDP curves calculated for new/old models
}
\examples{
library("DALEX")
model_old <- lm(m2.price ~ ., data = apartments)
model_new <- lm(m2.price ~ ., data = apartments_test[1:1000,])
calculate_model_drift(model_old, model_new,
apartments_test[1:1000,],
apartments_test[1:1000,]$m2.price)
\donttest{
library("ranger")
predict_function <- function(m,x,...) predict(m, x, ...)$predictions
model_old <- ranger(m2.price ~ ., data = apartments)
model_new <- ranger(m2.price ~ ., data = apartments_test)
calculate_model_drift(model_old, model_new,
apartments_test,
apartments_test$m2.price,
predict_function = predict_function)
# here we compare model created on male data
# with model applied to female data
# there is interaction with age, and it is detected here
predict_function <- function(m,x,...) predict(m, x, ..., probability=TRUE)$predictions[,1]
data_old = HR[HR$gender == "male", -1]
data_new = HR[HR$gender == "female", -1]
model_old <- ranger(status ~ ., data = data_old, probability=TRUE)
model_new <- ranger(status ~ ., data = data_new, probability=TRUE)
calculate_model_drift(model_old, model_new,
HR_test,
HR_test$status == "fired",
predict_function = predict_function)
# plot it
library("ingredients")
prof_old <- partial_dependency(model_old,
data = data_new[1:500,],
label = "model_old",
predict_function = predict_function,
grid_points = 101,
variable_splits = NULL)
prof_new <- partial_dependency(model_new,
data = data_new[1:500,],
label = "model_new",
predict_function = predict_function,
grid_points = 101,
variable_splits = NULL)
plot(prof_old, prof_new, color = "_label_")
}
}
| /man/calculate_model_drift.Rd | no_license | kexin997/drifter | R | false | true | 3,326 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculate_model_drift.R
\name{calculate_model_drift}
\alias{calculate_model_drift}
\title{Calculate Model Drift for comparison of models trained on new/old data}
\usage{
calculate_model_drift(model_old, model_new, data_new, y_new,
predict_function = predict, max_obs = 100, scale = sd(y_new, na.rm
= TRUE))
}
\arguments{
\item{model_old}{model created on historical / `old`data}
\item{model_new}{model created on current / `new`data}
\item{data_new}{data frame with current / `new` data}
\item{y_new}{true values of target variable for current / `new` data}
\item{predict_function}{function that takes two arguments: model and new data and returns numeric vector with predictions, by default it's `predict`}
\item{max_obs}{if negative, them all observations are used for calculation of PDP, is positive, then only `max_obs` are used for calculation of PDP}
\item{scale}{scale parameter for calculation of scaled drift}
}
\value{
an object of a class `model_drift` (data.frame) with distances calculated based on Partial Dependency Plots
}
\description{
This function calculates differences between PDP curves calculated for new/old models
}
\examples{
library("DALEX")
model_old <- lm(m2.price ~ ., data = apartments)
model_new <- lm(m2.price ~ ., data = apartments_test[1:1000,])
calculate_model_drift(model_old, model_new,
apartments_test[1:1000,],
apartments_test[1:1000,]$m2.price)
\donttest{
library("ranger")
predict_function <- function(m,x,...) predict(m, x, ...)$predictions
model_old <- ranger(m2.price ~ ., data = apartments)
model_new <- ranger(m2.price ~ ., data = apartments_test)
calculate_model_drift(model_old, model_new,
apartments_test,
apartments_test$m2.price,
predict_function = predict_function)
# here we compare model created on male data
# with model applied to female data
# there is interaction with age, and it is detected here
predict_function <- function(m,x,...) predict(m, x, ..., probability=TRUE)$predictions[,1]
data_old = HR[HR$gender == "male", -1]
data_new = HR[HR$gender == "female", -1]
model_old <- ranger(status ~ ., data = data_old, probability=TRUE)
model_new <- ranger(status ~ ., data = data_new, probability=TRUE)
calculate_model_drift(model_old, model_new,
HR_test,
HR_test$status == "fired",
predict_function = predict_function)
# plot it
library("ingredients")
prof_old <- partial_dependency(model_old,
data = data_new[1:500,],
label = "model_old",
predict_function = predict_function,
grid_points = 101,
variable_splits = NULL)
prof_new <- partial_dependency(model_new,
data = data_new[1:500,],
label = "model_new",
predict_function = predict_function,
grid_points = 101,
variable_splits = NULL)
plot(prof_old, prof_new, color = "_label_")
}
}
|
#!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=60G
#SBATCH --time=02:00:00
#SBATCH --job-name='delta_AF'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/delta_AF.stdout
#SBATCH -p koeniglab
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
options(stringsAsFactors = F)
minor_frequencies <- read_delim("minor_frequencies","\t", col_names = FALSE, trim_ws = TRUE)
colnames(minor_frequencies) <- c("CHR","POS","F1","F18","F27","F28","F50","F58")
delta_AF <- minor_frequencies[,1:2]
# between all progeny and parents
delta_AF$F1toF18 <- minor_frequencies$F18 - minor_frequencies$F1
delta_AF$F1toF28 <- minor_frequencies$F28 - minor_frequencies$F1
delta_AF$F1toF58 <- minor_frequencies$F58 - minor_frequencies$F1
delta_AF$F1toF27 <- minor_frequencies$F27 - minor_frequencies$F1
delta_AF$F1toF50 <- minor_frequencies$F50 - minor_frequencies$F1
write_delim(delta_AF,"delta_AF_F1toALL",delim="\t",col_names = T)
# between each Davis generation
delta_AF <- minor_frequencies[,1:2]
delta_AF$F18toF28 <- minor_frequencies$F28 - minor_frequencies$F18
delta_AF$F28toF58 <- minor_frequencies$F58 - minor_frequencies$F28
delta_AF$F18toF58 <- minor_frequencies$F58 - minor_frequencies$F18
write_delim(delta_AF,"delta_AF_DAVIS",delim="\t",col_names = T)
# between each Bozeman generation
delta_AF <- minor_frequencies[,1:2]
delta_AF$F18toF27 <- minor_frequencies$F27 - minor_frequencies$F18
delta_AF$F27toF50 <- minor_frequencies$F50 - minor_frequencies$F27
delta_AF$F18toF50 <- minor_frequencies$F50 - minor_frequencies$F18
write_delim(delta_AF,"delta_AF_BOZ",delim="\t",col_names = T)
| /scripts/allele_frequency/delta_AFs.R | no_license | jmmarzolino/CCII_BOZ | R | false | false | 1,616 | r | #!/usr/bin/env Rscript
#SBATCH --ntasks=1
#SBATCH --mem=60G
#SBATCH --time=02:00:00
#SBATCH --job-name='delta_AF'
#SBATCH --output=/rhome/jmarz001/bigdata/CCII_BOZ/scripts/delta_AF.stdout
#SBATCH -p koeniglab
setwd("/bigdata/koeniglab/jmarz001/CCII_BOZ/results")
library(readr)
options(stringsAsFactors = F)
minor_frequencies <- read_delim("minor_frequencies","\t", col_names = FALSE, trim_ws = TRUE)
colnames(minor_frequencies) <- c("CHR","POS","F1","F18","F27","F28","F50","F58")
delta_AF <- minor_frequencies[,1:2]
# between all progeny and parents
delta_AF$F1toF18 <- minor_frequencies$F18 - minor_frequencies$F1
delta_AF$F1toF28 <- minor_frequencies$F28 - minor_frequencies$F1
delta_AF$F1toF58 <- minor_frequencies$F58 - minor_frequencies$F1
delta_AF$F1toF27 <- minor_frequencies$F27 - minor_frequencies$F1
delta_AF$F1toF50 <- minor_frequencies$F50 - minor_frequencies$F1
write_delim(delta_AF,"delta_AF_F1toALL",delim="\t",col_names = T)
# between each Davis generation
delta_AF <- minor_frequencies[,1:2]
delta_AF$F18toF28 <- minor_frequencies$F28 - minor_frequencies$F18
delta_AF$F28toF58 <- minor_frequencies$F58 - minor_frequencies$F28
delta_AF$F18toF58 <- minor_frequencies$F58 - minor_frequencies$F18
write_delim(delta_AF,"delta_AF_DAVIS",delim="\t",col_names = T)
# between each Bozeman generation
delta_AF <- minor_frequencies[,1:2]
delta_AF$F18toF27 <- minor_frequencies$F27 - minor_frequencies$F18
delta_AF$F27toF50 <- minor_frequencies$F50 - minor_frequencies$F27
delta_AF$F18toF50 <- minor_frequencies$F50 - minor_frequencies$F18
write_delim(delta_AF,"delta_AF_BOZ",delim="\t",col_names = T)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_functions.R
\docType{package}
\name{analytics_googleAuthR}
\alias{analytics_googleAuthR}
\alias{analytics_googleAuthR-package}
\title{Google Analytics API
Views and manages your Google Analytics data.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:25:02
filename: /Users/mark/dev/R/autoGoogleAPI/googleanalyticsv3.auto/R/analytics_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/analytics
\item https://www.googleapis.com/auth/analytics.edit
\item https://www.googleapis.com/auth/analytics.manage.users
\item https://www.googleapis.com/auth/analytics.manage.users.readonly
\item https://www.googleapis.com/auth/analytics.provision
\item https://www.googleapis.com/auth/analytics.readonly
}
}
| /googleanalyticsv3.auto/man/analytics_googleAuthR.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 903 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/analytics_functions.R
\docType{package}
\name{analytics_googleAuthR}
\alias{analytics_googleAuthR}
\alias{analytics_googleAuthR-package}
\title{Google Analytics API
Views and manages your Google Analytics data.}
\description{
Auto-generated code by googleAuthR::gar_create_api_skeleton
at 2017-03-05 19:25:02
filename: /Users/mark/dev/R/autoGoogleAPI/googleanalyticsv3.auto/R/analytics_functions.R
api_json: api_json
}
\details{
Authentication scopes used are:
\itemize{
\item https://www.googleapis.com/auth/analytics
\item https://www.googleapis.com/auth/analytics.edit
\item https://www.googleapis.com/auth/analytics.manage.users
\item https://www.googleapis.com/auth/analytics.manage.users.readonly
\item https://www.googleapis.com/auth/analytics.provision
\item https://www.googleapis.com/auth/analytics.readonly
}
}
|
\name{random_letter}
\alias{random_letter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
random letter
}
\description{
generates one random letter
}
\usage{
random_letter(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Hannah Butler
}
\note{
Simulation HW 5
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
letter <- sample(LETTERS, 1)
return(letter)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /simpack/man/random_letter.Rd | no_license | hbutler/sim_package | R | false | false | 1,266 | rd | \name{random_letter}
\alias{random_letter}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
random letter
}
\description{
generates one random letter
}
\usage{
random_letter(x)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Hannah Butler
}
\note{
Simulation HW 5
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
letter <- sample(LETTERS, 1)
return(letter)
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
/cumSeg/R/jumpoints.R | no_license | ingted/R-Examples | R | false | false | 13,303 | r | ||
library(tools)
library(parsedate)
library(reshape2)
library(ggplot2)
library(shiny)
library(DT)
library(shinycssloaders)
library(httr)
library(Hmisc)
library(DT)
library(xml2)
library(xslt)
library(RCurl)
GitHubPath <- 'https://github.com/bripaisley/phuse-scripts-Lilly/tree/master'
dataPath <- 'data/send/8409511'
# dataPath <- 'data/send/CJUGSEND00'
functionPath <- 'contributed/Nonclinical/R/Functions/Functions.R'
#script <- getURL(paste(GitHubPath,functionPath,sep='/'), ssl.verifypeer = FALSE)
#eval(parse(text = script))
source(paste(GitHubPath,functionPath,sep='/'))
#source('/lrlhps/users/c143390/For_Janice/Functions.R')
#print(paste(GitHubPath,functionPath,sep='/'))
#source('https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Functions/Functions.R')
DOIs <- c('cv','eg','vs')
domainColumnsNoINT <- c('TEST','TESTCD','STRESN','STRESU','NOMDY','TPTNUM','ELTM','ELTMN','TPTREF')
domainColumnsINT <- c(domainColumnsNoINT,'EVLINT','STINT','ENINT','EVLINTN')
values <- reactiveValues()
Authenticate <- TRUE
server <- function(input, output,session) {
loadData <- reactive({
withProgress({
if (Authenticate==TRUE) {
Data <- load.GitHub.xpt.files(studyDir=input$dataPath,authenticate=T,User='bripaisley',Password='Derekhottie1!',showProgress=T)
} #else {
# Data <- load.GitHub.xpt.files(studyDir=input$dataPath,authenticate=F,showProgress=T)
# }
setProgress(value=1,message='Processing Data...')
values$domainNames <- toupper(names(Data))
subjects <- unique(Data$dm$USUBJID)
elementLevels <- levels(Data$se$ELEMENT)
for (domain in DOIs) {
sexData <- Data$dm[,c('USUBJID','SEX')]
elementData <- Data$se[,c('USUBJID','SESTDTC','ELEMENT')]
Data[[domain]] <- merge(Data[[domain]],sexData,by='USUBJID')
Data[[domain]] <- merge(Data[[domain]],elementData,by.x=c('USUBJID',paste0(toupper(domain),'RFTDTC')),by.y=c('USUBJID','SESTDTC'))
Data[[domain]]$SEX <- factor(Data[[domain]]$SEX,levels=c('M','F'))
ELTM <- Data[[domain]][[paste0(toupper(domain),'ELTM')]]
ELTMnum <- sapply(ELTM,DUR_to_seconds)/3600
Data[[domain]][[paste0(toupper(domain),'ELTM')]] <- paste(ELTMnum,'h')
orderedELTM <- Data[[domain]][[paste0(toupper(domain),'ELTM')]][order(Data[[domain]][[paste0(toupper(domain),'TPTNUM')]])]
orderedLevelsELTM <- unique(orderedELTM)
Data[[domain]][[paste0(toupper(domain),'ELTM')]] <- factor(Data[[domain]][[paste0(toupper(domain),'ELTM')]],levels=orderedLevelsELTM)
Data[[domain]][[paste0(toupper(domain),'ELTMN')]] <- ELTMnum
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
STINT <- Data[[domain]][[paste0(toupper(domain),'STINT')]]
STINTnum <- sapply(STINT,DUR_to_seconds)/3600
ENINT <- Data[[domain]][[paste0(toupper(domain),'ENINT')]]
ENINTnum <- sapply(ENINT,DUR_to_seconds)/3600
Data[[domain]][[paste0(toupper(domain),'EVLINT')]] <- paste0(STINTnum,' to ',ENINTnum,' h')
orderedEVLINT <- Data[[domain]][[paste0(toupper(domain),'EVLINT')]][order(Data[[domain]][[paste0(toupper(domain),'TPTNUM')]])]
orderedLevelsEVLINT <- unique(orderedEVLINT)
Data[[domain]][[paste0(toupper(domain),'EVLINT')]] <- factor(Data[[domain]][[paste0(toupper(domain),'EVLINT')]],levels=orderedLevelsEVLINT)
Data[[domain]][[paste0(toupper(domain),'EVLINTN')]] <- rowMeans(cbind(STINTnum,ENINTnum))
}
}
})
return(Data)
})
output$tests <- renderUI({
req(input$DOIs)
Data <- loadData()
Tests <- NULL
for (domain in input$DOIs) {
testNames <- levels(Data[[domain]][[paste0(toupper(domain),'TEST')]])[unique(Data[[domain]][[paste0(toupper(domain),'TEST')]])]
Tests <- c(Tests,testNames)
}
checkboxGroupInput('tests','Tests of Interest:',Tests,Tests)
})
output$doses <- renderUI({
Data <- loadData()
doses <- NULL
for (domain in DOIs) {
doses <- unique(c(doses,levels(Data[[domain]][['ELEMENT']])[Data[[domain]][['ELEMENT']]]))
}
dosesN <- as.numeric(lapply(strsplit(doses,' '),`[[`,1))
doses <- doses[order(dosesN)]
checkboxGroupInput('doses','Filter by Dose Level:',choices=doses,selected=doses)
})
output$subjects <- renderUI({
Data <- loadData()
subjects <- NULL
for (domain in DOIs) {
subjects <- unique(c(subjects,levels(Data[[domain]][['USUBJID']])[Data[[domain]][['USUBJID']]]))
}
checkboxGroupInput('subjects','Filter by Subject:',choices=subjects,selected=subjects)
})
output$days <- renderUI({
Data <- loadData()
days <- NULL
for (domain in DOIs) {
days <- unique(c(days,Data[[domain]][[paste0(toupper(domain),'NOMDY')]]))
}
checkboxGroupInput('days','Filter by Day:',choices=days,selected=days)
})
filterData <- reactive({
Data <- loadData()
for (domain in DOIs) {
testIndex <- which(levels(Data[[domain]][[paste0(toupper(domain),'TEST')]])[Data[[domain]][[paste0(toupper(domain),'TEST')]]]
%in% input$tests)
sexIndex <- which(Data[[domain]][['SEX']] %in% input$sex)
doseIndex <- which(Data[[domain]][['ELEMENT']] %in% input$doses)
subjectIndex <- which(Data[[domain]][['USUBJID']] %in% input$subjects)
dayIndex <- which(Data[[domain]][[paste0(toupper(domain),'NOMDY')]] %in% input$days)
index <- Reduce(intersect,list(testIndex,sexIndex,doseIndex,subjectIndex,dayIndex))
Data[[domain]] <- Data[[domain]][index,]
}
return(Data)
})
getIndividualTables <- reactive({
req(input$DOIs)
Data <- filterData()
individualTables <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]])
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
if (INTflag == T) {
testIndividualData <- dcast(testData,TEST+ELEMENT+SEX+NOMDY+USUBJID~EVLINT,value.var = 'STRESN')
} else {
testIndividualData <- dcast(testData,TEST+ELEMENT+SEX+NOMDY+USUBJID~ELTM,value.var = 'STRESN')
}
testIndividualData <- testIndividualData[order(testIndividualData$ELEMENT,testIndividualData$SEX,testIndividualData$NOMDY,testIndividualData$USUBJID,decreasing=F),]
individualTables[[paste(toupper(domain),testCD,sep='_')]] <- testIndividualData
}
}
return(individualTables)
})
getMeanTables <- reactive({
req(input$DOIs)
Data <- filterData()
meanTables <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
groupElement <- paste(testData$USUBJID,testData$ELEMENT,sep='_')
testData <- cbind(testData,groupElement)
if (INTflag == T) {
meanTestData <- dcast(testData,TEST+ELEMENT+SEX~EVLINT,value.var='STRESN',mean)
} else {
meanTestData <- dcast(testData,TEST+ELEMENT+SEX~ELTM,value.var='STRESN',mean)
}
meanTestData <- meanTestData[order(meanTestData$ELEMENT,meanTestData$SEX,decreasing=F),]
meanTables[[paste(toupper(domain),testCD,sep='_')]] <- meanTestData
}
}
return(meanTables)
})
observe({
req(input$tests)
values$nTests <- length(input$tests)
})
observe({
req(input$DOIs)
if (input$summary=='Individual Subjects') {
values$table <- getIndividualTables()
} else if (input$summary=='Group Means') {
values$table <- getMeanTables()
}
})
output$tables <- renderUI({
req(input$DOIs)
table_output_list <- lapply(seq(values$nTests), function(i) {
tableName <- paste0('table',i)
DT::dataTableOutput(tableName)
})
do.call(tagList, table_output_list)
})
observe({
lapply(seq(values$nTests),function(i) {
output[[paste0('table',i)]] <- DT::renderDataTable({
datatable({
Table <- values$table[[i]]
},options=list(autoWidth=T,scrollX=T,pageLength=100,paging=F,searching=F,#),
columnDefs=list(list(className='dt-center',width='100px',
targets=seq(0,(ncol(values$table[[i]])-1))))),
rownames=F)
})
})
})
getTestData <- reactive({
req(input$DOIs)
Data <- filterData()
testDataList <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
if (input$plotBy=='Subject') {
groupElement <- paste(testData$USUBJID,testData$ELEMENT,sep='_')
} else if (input$plotBy=='Day') {
groupElement <- paste(testData$NOMDY,testData$ELEMENT,sep='_')
}
testData <- cbind(testData,groupElement)
testDataList[[paste(toupper(domain),testCD,sep='_')]] <- testData
}
}
return(testDataList)
})
getMeanTestData <- reactive({
req(input$DOIs)
Data <- filterData()
meanTestDataList <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
sexElement <- paste(testData$SEX,testData$ELEMENT,sep='_')
if (INTflag == T) {
meanTables <- dcast(testData,TEST+sexElement+ELEMENT+SEX~EVLINTN,value.var='STRESN',mean)
} else {
meanTables <- dcast(testData,TEST+sexElement+ELEMENT+SEX~ELTMN,value.var='STRESN',mean)
}
meanTestData <- melt(meanTables,id=c('TEST','sexElement','ELEMENT','SEX'))
meanTestDataList[[paste(toupper(domain),testCD,sep='_')]] <- meanTestData
}
}
return(meanTestDataList)
})
output$plots <- renderUI({
req(input$DOIs)
plot_output_list <- lapply(seq(values$nTests), function(i) {
plotName <- paste("plot", i, sep="")
plotOutput(plotName,height='600px')
})
do.call(tagList, plot_output_list)
})
observe({
lapply(seq(values$nTests),function(i) {
output[[paste0('plot',i)]] <- renderPlot({
pointSize <- 3
colorPalette <- c('black','blue','purple','red')
# if (i <= values$nTests) {
if (input$summary=='Individual Subjects') {
testDataList <- getTestData()
testData <- testDataList[[i]]
testData$NOMDY <- factor(as.character(testData$NOMDY),levels=(as.character(unique(testData$NOMDY)[order(unique(testData$NOMDY))])))
if (length(grep('INT',colnames(testData)))>=2) {
if (input$plotBy == 'Subject') {
p <- ggplot(testData,aes(x=EVLINTN,y=STRESN,group=groupElement,color=ELEMENT,shape=USUBJID)) + scale_shape_discrete('Subject')
} else if (input$plotBy == 'Day') {
p <- ggplot(testData,aes(x=EVLINTN,y=STRESN,group=groupElement,color=ELEMENT,shape=NOMDY)) + scale_shape_discrete('Day')
}
} else {
if (input$plotBy == 'Subject') {
p <- ggplot(testData,aes(x=ELTMN,y=STRESN,group=groupElement,color=ELEMENT,shape=USUBJID)) + scale_shape_discrete('Subject')
} else if (input$plotBy == 'Day') {
p <- ggplot(testData,aes(x=ELTMN,y=STRESN,group=groupElement,color=ELEMENT,shape=NOMDY)) + scale_shape_discrete('Day')
}
}
p <- p + geom_point(size=pointSize) + geom_line() + labs(title=testData$TEST[1],x='Time (h)',y=paste0(testData$TESTCD[1],' (',testData$STRESU[1],')')) +
scale_color_discrete(name = "Dose Group")# + scale_shape_discrete('Subject')
} else if (input$summary=='Group Means') {
testDataList <- getTestData()
testData <- testDataList[[i]]
meanTestDataList <- getMeanTestData()
meanTestData <- meanTestDataList[[i]]
p <- ggplot(meanTestData,aes(x=as.numeric(levels(variable)[variable]),y=value,group=sexElement,color=ELEMENT,shape=SEX)) +
geom_point(size=pointSize) + geom_line() + labs(title=meanTestData$TEST[1],x='Time (h)',y=paste0(testData$TESTCD[1],' (',testData$STRESU[1],')')) +
scale_color_discrete(name = "Dose Group") + scale_shape_discrete(name = 'Sex')
}
p <- p + theme_classic() +
theme(text=element_text(size=16),
axis.title.y=element_text(margin = margin(t=0,r=10,b=0,l=0)),
axis.title.x=element_text(margin = margin(t=10,r=,b=0,l=0)),
plot.title=element_text(hjust=0.5,margin=margin(t=0,r=0,b=10,l=0))) +
scale_color_manual(values=colorPalette)
p
# }
})
})
})
output$SENDdomains <- renderUI({
nTabs <- length(values$domainNames)
myTabs <- lapply(seq_len(nTabs),function(i) {
tabPanel(values$domainNames[i],
DT::dataTableOutput(paste0('datatable_',i))
)
})
do.call(tabsetPanel,myTabs)
})
observe({
lapply(seq(values$domainNames),function(i) {
output[[paste0('datatable_',i)]] <- DT::renderDataTable({
datatable({
Data <- loadData()
rawTable <- Data[[tolower(values$domainNames[i])]]
},options=list(autoWidth=T,scrollX=T,pageLength=10,paging=T,searching=T,
columnDefs=list(list(className='dt-center',targets='_all'))),
rownames=F)
})
})
})
output$define <- renderUI({
doc <- read_xml(paste(GitHubPath,input$dataPath,'define.xml',sep='/'))
style <- read_xml(paste(GitHubPath,input$dataPath,'define2-0-0.xsl',sep='/'))
html <- xml_xslt(doc,style)
cat(as.character(html),file='www/temp.html')
define <- tags$iframe(src='temp.html', height='700', width='100%')
define
})
}
ui <- shinyUI(
fluidPage(titlePanel(title='CDISC-SEND Safety Pharmacology Proof-of-Concept Pilot',
windowTitle = 'CDISC-SEND Safety Pharmacology Proof-of-Concept Pilot'),br(),
sidebarLayout(
sidebarPanel(width=3,
selectInput('dataPath','Select Dataset:',
choices = list(Lilly='data/send/8409511',
alsoLilly ='data/send/8409511')),
checkboxGroupInput('DOIs','Domains of Interest:',choiceNames=toupper(DOIs),choiceValues=DOIs,selected=DOIs),
uiOutput('tests'),
radioButtons('summary','Display:',c('Group Means','Individual Subjects')),
checkboxGroupInput('sex','Filter by Sex:',list(Male='M',Female='F'),selected=c('M','F')),
uiOutput('doses'),
uiOutput('subjects'),
uiOutput('days'),
conditionalPanel(condition='input.summary=="Individual Subjects"',
radioButtons('plotBy','Plot by:',c('Subject','Day'))
)
),
mainPanel(width=9,
tabsetPanel(
tabPanel('Tables',
withSpinner(uiOutput('tables'),type=5)
),
tabPanel('Figures',
withSpinner(uiOutput('plots'),type=5)
),
tabPanel('Source Data',
tabsetPanel(
tabPanel('SEND Domains',
withSpinner(uiOutput('SENDdomains'),type=5)
),
tabPanel('DEFINE',
withSpinner(htmlOutput('define'),type=5)
),
tabPanel('README',
column(11,
conditionalPanel(condition="input.dataPath=='data/send/8409511'",
includeMarkdown('https://github.com/bripaisley/phuse-scripts-Lilly/tree/master/data/send/CDISC-Safety-Pharmacology-POC/readme.txt')
#includeMarkdown('https://github.com/bripaisley/phuse-scripts-Lilly/tree/master/data/send/CDISC-Safety-Pharmacology-POC/readme.txt')
)
)
)
)
),
tabPanel('Algorithm Description',
column(11,
h4('A Latin square experimental design is used to control variation in an experiment across
two blocking factors, in this case: subject and time. In a traditional Latin square safety
pharmacology study, several findings e.g., heart rate, QT duration, temperature, are recorded
in each dog for a couple of hours predose and then for an extended period of time, e.g. 24
hours, following a single dose of the investigational therapy or vehicle. Doses are typically
followed by a one-week washout period. As shown in the example Latin square study design
below, each dog receives a single dose of each treatment level throughout the course of
the study, but no two dogs receive the same treatment on the same day:'),
br(),
HTML('<center><img src="Latin Square.png"></center>'),
br(),
br(),
h4('This type of study design was modeled in the proof-of-concept SEND dataset by describing the
treatment level in the ELEMENT field of the subject elements (SE) domain. As each ELEMENT
began at the time of dosing, treatment effects can be observed by matching, for each subject,
the start time of each ELEMENT (SESTDTC) with the reference dose time (--RFTDTC) of each finding
record. The following example of SQL code demonstrates the logic of this operation:'),
br(),
h4('SELECT * FROM CV, SE'),
h4('JOIN CV, SE'),
h4('ON CV.USUBJID = SE.USUBJID'),
h4('AND CV.CVRFTDTC = SE.SESTDTC'),
br(),
h4('Tables were created for each type of finding (--TEST) in each of the finding domains by
pivoting the table on the field encoding the duration of time elapsed between dosing and
each observation (--ELTM). If the finding of interest was collected over an interval of
time, then the start (--STINT) and end (ENINT) times of the recording interval for each
record were used to represent the recording interval in the place of the elapsed time point.
The order of time points/intervals was set chronologically using the --TPTNUM variable.
Mean values were calculated for each dosing group (ELEMENT) within each sex (SEX) for each
time point or interval of observation. Plots were generated using the same method, except
observations recorded over an interval of time were represented on the x-axis at the
mid-point of the interval.'),
br(),
h4('The proof-of-concept dataset used here was created by CDISC and is publicly available at:'),
tags$a(href='https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/data/send/CDISC-Safety-Pharmacology-POC/',
'https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/data/send/CDISC-Safety-Pharmacology-POC/'),
br(),br(),
h4('The source code for this R Shiny application is also publicly availalble at:'),
tags$a(href='https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Safety%20Pharmacology%20Pilot/Safety%20Pharmacology%20Analysis',
'https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Safety%20Pharmacology%20Pilot/Safety%20Pharmacology%20Analysis'),
br(),br()
)
)
)
)
)
)
)
# Run Shiny App
shinyApp(ui = ui, server = server)
| /contributed/Nonclinical/R/Safety Pharmacology Pilot/Safety Pharmacology Analysis.R | permissive | bripaisley/phuse-scripts-Lilly | R | false | false | 24,731 | r | library(tools)
library(parsedate)
library(reshape2)
library(ggplot2)
library(shiny)
library(DT)
library(shinycssloaders)
library(httr)
library(Hmisc)
library(DT)
library(xml2)
library(xslt)
library(RCurl)
GitHubPath <- 'https://github.com/bripaisley/phuse-scripts-Lilly/tree/master'
dataPath <- 'data/send/8409511'
# dataPath <- 'data/send/CJUGSEND00'
functionPath <- 'contributed/Nonclinical/R/Functions/Functions.R'
#script <- getURL(paste(GitHubPath,functionPath,sep='/'), ssl.verifypeer = FALSE)
#eval(parse(text = script))
source(paste(GitHubPath,functionPath,sep='/'))
#source('/lrlhps/users/c143390/For_Janice/Functions.R')
#print(paste(GitHubPath,functionPath,sep='/'))
#source('https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Functions/Functions.R')
DOIs <- c('cv','eg','vs')
domainColumnsNoINT <- c('TEST','TESTCD','STRESN','STRESU','NOMDY','TPTNUM','ELTM','ELTMN','TPTREF')
domainColumnsINT <- c(domainColumnsNoINT,'EVLINT','STINT','ENINT','EVLINTN')
values <- reactiveValues()
Authenticate <- TRUE
server <- function(input, output,session) {
loadData <- reactive({
withProgress({
if (Authenticate==TRUE) {
Data <- load.GitHub.xpt.files(studyDir=input$dataPath,authenticate=T,User='bripaisley',Password='Derekhottie1!',showProgress=T)
} #else {
# Data <- load.GitHub.xpt.files(studyDir=input$dataPath,authenticate=F,showProgress=T)
# }
setProgress(value=1,message='Processing Data...')
values$domainNames <- toupper(names(Data))
subjects <- unique(Data$dm$USUBJID)
elementLevels <- levels(Data$se$ELEMENT)
for (domain in DOIs) {
sexData <- Data$dm[,c('USUBJID','SEX')]
elementData <- Data$se[,c('USUBJID','SESTDTC','ELEMENT')]
Data[[domain]] <- merge(Data[[domain]],sexData,by='USUBJID')
Data[[domain]] <- merge(Data[[domain]],elementData,by.x=c('USUBJID',paste0(toupper(domain),'RFTDTC')),by.y=c('USUBJID','SESTDTC'))
Data[[domain]]$SEX <- factor(Data[[domain]]$SEX,levels=c('M','F'))
ELTM <- Data[[domain]][[paste0(toupper(domain),'ELTM')]]
ELTMnum <- sapply(ELTM,DUR_to_seconds)/3600
Data[[domain]][[paste0(toupper(domain),'ELTM')]] <- paste(ELTMnum,'h')
orderedELTM <- Data[[domain]][[paste0(toupper(domain),'ELTM')]][order(Data[[domain]][[paste0(toupper(domain),'TPTNUM')]])]
orderedLevelsELTM <- unique(orderedELTM)
Data[[domain]][[paste0(toupper(domain),'ELTM')]] <- factor(Data[[domain]][[paste0(toupper(domain),'ELTM')]],levels=orderedLevelsELTM)
Data[[domain]][[paste0(toupper(domain),'ELTMN')]] <- ELTMnum
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
STINT <- Data[[domain]][[paste0(toupper(domain),'STINT')]]
STINTnum <- sapply(STINT,DUR_to_seconds)/3600
ENINT <- Data[[domain]][[paste0(toupper(domain),'ENINT')]]
ENINTnum <- sapply(ENINT,DUR_to_seconds)/3600
Data[[domain]][[paste0(toupper(domain),'EVLINT')]] <- paste0(STINTnum,' to ',ENINTnum,' h')
orderedEVLINT <- Data[[domain]][[paste0(toupper(domain),'EVLINT')]][order(Data[[domain]][[paste0(toupper(domain),'TPTNUM')]])]
orderedLevelsEVLINT <- unique(orderedEVLINT)
Data[[domain]][[paste0(toupper(domain),'EVLINT')]] <- factor(Data[[domain]][[paste0(toupper(domain),'EVLINT')]],levels=orderedLevelsEVLINT)
Data[[domain]][[paste0(toupper(domain),'EVLINTN')]] <- rowMeans(cbind(STINTnum,ENINTnum))
}
}
})
return(Data)
})
output$tests <- renderUI({
req(input$DOIs)
Data <- loadData()
Tests <- NULL
for (domain in input$DOIs) {
testNames <- levels(Data[[domain]][[paste0(toupper(domain),'TEST')]])[unique(Data[[domain]][[paste0(toupper(domain),'TEST')]])]
Tests <- c(Tests,testNames)
}
checkboxGroupInput('tests','Tests of Interest:',Tests,Tests)
})
output$doses <- renderUI({
Data <- loadData()
doses <- NULL
for (domain in DOIs) {
doses <- unique(c(doses,levels(Data[[domain]][['ELEMENT']])[Data[[domain]][['ELEMENT']]]))
}
dosesN <- as.numeric(lapply(strsplit(doses,' '),`[[`,1))
doses <- doses[order(dosesN)]
checkboxGroupInput('doses','Filter by Dose Level:',choices=doses,selected=doses)
})
output$subjects <- renderUI({
Data <- loadData()
subjects <- NULL
for (domain in DOIs) {
subjects <- unique(c(subjects,levels(Data[[domain]][['USUBJID']])[Data[[domain]][['USUBJID']]]))
}
checkboxGroupInput('subjects','Filter by Subject:',choices=subjects,selected=subjects)
})
output$days <- renderUI({
Data <- loadData()
days <- NULL
for (domain in DOIs) {
days <- unique(c(days,Data[[domain]][[paste0(toupper(domain),'NOMDY')]]))
}
checkboxGroupInput('days','Filter by Day:',choices=days,selected=days)
})
filterData <- reactive({
Data <- loadData()
for (domain in DOIs) {
testIndex <- which(levels(Data[[domain]][[paste0(toupper(domain),'TEST')]])[Data[[domain]][[paste0(toupper(domain),'TEST')]]]
%in% input$tests)
sexIndex <- which(Data[[domain]][['SEX']] %in% input$sex)
doseIndex <- which(Data[[domain]][['ELEMENT']] %in% input$doses)
subjectIndex <- which(Data[[domain]][['USUBJID']] %in% input$subjects)
dayIndex <- which(Data[[domain]][[paste0(toupper(domain),'NOMDY')]] %in% input$days)
index <- Reduce(intersect,list(testIndex,sexIndex,doseIndex,subjectIndex,dayIndex))
Data[[domain]] <- Data[[domain]][index,]
}
return(Data)
})
getIndividualTables <- reactive({
req(input$DOIs)
Data <- filterData()
individualTables <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]])
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
if (INTflag == T) {
testIndividualData <- dcast(testData,TEST+ELEMENT+SEX+NOMDY+USUBJID~EVLINT,value.var = 'STRESN')
} else {
testIndividualData <- dcast(testData,TEST+ELEMENT+SEX+NOMDY+USUBJID~ELTM,value.var = 'STRESN')
}
testIndividualData <- testIndividualData[order(testIndividualData$ELEMENT,testIndividualData$SEX,testIndividualData$NOMDY,testIndividualData$USUBJID,decreasing=F),]
individualTables[[paste(toupper(domain),testCD,sep='_')]] <- testIndividualData
}
}
return(individualTables)
})
getMeanTables <- reactive({
req(input$DOIs)
Data <- filterData()
meanTables <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
groupElement <- paste(testData$USUBJID,testData$ELEMENT,sep='_')
testData <- cbind(testData,groupElement)
if (INTflag == T) {
meanTestData <- dcast(testData,TEST+ELEMENT+SEX~EVLINT,value.var='STRESN',mean)
} else {
meanTestData <- dcast(testData,TEST+ELEMENT+SEX~ELTM,value.var='STRESN',mean)
}
meanTestData <- meanTestData[order(meanTestData$ELEMENT,meanTestData$SEX,decreasing=F),]
meanTables[[paste(toupper(domain),testCD,sep='_')]] <- meanTestData
}
}
return(meanTables)
})
observe({
req(input$tests)
values$nTests <- length(input$tests)
})
observe({
req(input$DOIs)
if (input$summary=='Individual Subjects') {
values$table <- getIndividualTables()
} else if (input$summary=='Group Means') {
values$table <- getMeanTables()
}
})
output$tables <- renderUI({
req(input$DOIs)
table_output_list <- lapply(seq(values$nTests), function(i) {
tableName <- paste0('table',i)
DT::dataTableOutput(tableName)
})
do.call(tagList, table_output_list)
})
observe({
lapply(seq(values$nTests),function(i) {
output[[paste0('table',i)]] <- DT::renderDataTable({
datatable({
Table <- values$table[[i]]
},options=list(autoWidth=T,scrollX=T,pageLength=100,paging=F,searching=F,#),
columnDefs=list(list(className='dt-center',width='100px',
targets=seq(0,(ncol(values$table[[i]])-1))))),
rownames=F)
})
})
})
getTestData <- reactive({
req(input$DOIs)
Data <- filterData()
testDataList <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
if (input$plotBy=='Subject') {
groupElement <- paste(testData$USUBJID,testData$ELEMENT,sep='_')
} else if (input$plotBy=='Day') {
groupElement <- paste(testData$NOMDY,testData$ELEMENT,sep='_')
}
testData <- cbind(testData,groupElement)
testDataList[[paste(toupper(domain),testCD,sep='_')]] <- testData
}
}
return(testDataList)
})
getMeanTestData <- reactive({
req(input$DOIs)
Data <- filterData()
meanTestDataList <- list()
for (domain in input$DOIs) {
if (length(grep('INT',colnames(Data[[domain]])))>=2) {
domainColumns <- domainColumnsINT
INTflag <- T
} else {
domainColumns <- domainColumnsNoINT
INTflag <- F
}
if (length(grep('CJUGSEND00',input$dataPath))>0) {
INTflag <- F
}
testDataColumns <- c('USUBJID',paste0(toupper(domain),domainColumns),'ELEMENT','SEX')
testDataColumns <- testDataColumns[testDataColumns %in% colnames(Data[[domain]])]
testCDs <- unique(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]) # this will be user-defined
for (testCD in testCDs) {
if (exists('testData')) rm(testData)
testData <- Data[[domain]][which(Data[[domain]][[paste0(toupper(domain),'TESTCD')]]==testCD),testDataColumns]
colnames(testData)[(seq(length(domainColumns))+1)] <- domainColumns
sexElement <- paste(testData$SEX,testData$ELEMENT,sep='_')
if (INTflag == T) {
meanTables <- dcast(testData,TEST+sexElement+ELEMENT+SEX~EVLINTN,value.var='STRESN',mean)
} else {
meanTables <- dcast(testData,TEST+sexElement+ELEMENT+SEX~ELTMN,value.var='STRESN',mean)
}
meanTestData <- melt(meanTables,id=c('TEST','sexElement','ELEMENT','SEX'))
meanTestDataList[[paste(toupper(domain),testCD,sep='_')]] <- meanTestData
}
}
return(meanTestDataList)
})
output$plots <- renderUI({
req(input$DOIs)
plot_output_list <- lapply(seq(values$nTests), function(i) {
plotName <- paste("plot", i, sep="")
plotOutput(plotName,height='600px')
})
do.call(tagList, plot_output_list)
})
observe({
lapply(seq(values$nTests),function(i) {
output[[paste0('plot',i)]] <- renderPlot({
pointSize <- 3
colorPalette <- c('black','blue','purple','red')
# if (i <= values$nTests) {
if (input$summary=='Individual Subjects') {
testDataList <- getTestData()
testData <- testDataList[[i]]
testData$NOMDY <- factor(as.character(testData$NOMDY),levels=(as.character(unique(testData$NOMDY)[order(unique(testData$NOMDY))])))
if (length(grep('INT',colnames(testData)))>=2) {
if (input$plotBy == 'Subject') {
p <- ggplot(testData,aes(x=EVLINTN,y=STRESN,group=groupElement,color=ELEMENT,shape=USUBJID)) + scale_shape_discrete('Subject')
} else if (input$plotBy == 'Day') {
p <- ggplot(testData,aes(x=EVLINTN,y=STRESN,group=groupElement,color=ELEMENT,shape=NOMDY)) + scale_shape_discrete('Day')
}
} else {
if (input$plotBy == 'Subject') {
p <- ggplot(testData,aes(x=ELTMN,y=STRESN,group=groupElement,color=ELEMENT,shape=USUBJID)) + scale_shape_discrete('Subject')
} else if (input$plotBy == 'Day') {
p <- ggplot(testData,aes(x=ELTMN,y=STRESN,group=groupElement,color=ELEMENT,shape=NOMDY)) + scale_shape_discrete('Day')
}
}
p <- p + geom_point(size=pointSize) + geom_line() + labs(title=testData$TEST[1],x='Time (h)',y=paste0(testData$TESTCD[1],' (',testData$STRESU[1],')')) +
scale_color_discrete(name = "Dose Group")# + scale_shape_discrete('Subject')
} else if (input$summary=='Group Means') {
testDataList <- getTestData()
testData <- testDataList[[i]]
meanTestDataList <- getMeanTestData()
meanTestData <- meanTestDataList[[i]]
p <- ggplot(meanTestData,aes(x=as.numeric(levels(variable)[variable]),y=value,group=sexElement,color=ELEMENT,shape=SEX)) +
geom_point(size=pointSize) + geom_line() + labs(title=meanTestData$TEST[1],x='Time (h)',y=paste0(testData$TESTCD[1],' (',testData$STRESU[1],')')) +
scale_color_discrete(name = "Dose Group") + scale_shape_discrete(name = 'Sex')
}
p <- p + theme_classic() +
theme(text=element_text(size=16),
axis.title.y=element_text(margin = margin(t=0,r=10,b=0,l=0)),
axis.title.x=element_text(margin = margin(t=10,r=,b=0,l=0)),
plot.title=element_text(hjust=0.5,margin=margin(t=0,r=0,b=10,l=0))) +
scale_color_manual(values=colorPalette)
p
# }
})
})
})
output$SENDdomains <- renderUI({
nTabs <- length(values$domainNames)
myTabs <- lapply(seq_len(nTabs),function(i) {
tabPanel(values$domainNames[i],
DT::dataTableOutput(paste0('datatable_',i))
)
})
do.call(tabsetPanel,myTabs)
})
observe({
lapply(seq(values$domainNames),function(i) {
output[[paste0('datatable_',i)]] <- DT::renderDataTable({
datatable({
Data <- loadData()
rawTable <- Data[[tolower(values$domainNames[i])]]
},options=list(autoWidth=T,scrollX=T,pageLength=10,paging=T,searching=T,
columnDefs=list(list(className='dt-center',targets='_all'))),
rownames=F)
})
})
})
output$define <- renderUI({
doc <- read_xml(paste(GitHubPath,input$dataPath,'define.xml',sep='/'))
style <- read_xml(paste(GitHubPath,input$dataPath,'define2-0-0.xsl',sep='/'))
html <- xml_xslt(doc,style)
cat(as.character(html),file='www/temp.html')
define <- tags$iframe(src='temp.html', height='700', width='100%')
define
})
}
ui <- shinyUI(
fluidPage(titlePanel(title='CDISC-SEND Safety Pharmacology Proof-of-Concept Pilot',
windowTitle = 'CDISC-SEND Safety Pharmacology Proof-of-Concept Pilot'),br(),
sidebarLayout(
sidebarPanel(width=3,
selectInput('dataPath','Select Dataset:',
choices = list(Lilly='data/send/8409511',
alsoLilly ='data/send/8409511')),
checkboxGroupInput('DOIs','Domains of Interest:',choiceNames=toupper(DOIs),choiceValues=DOIs,selected=DOIs),
uiOutput('tests'),
radioButtons('summary','Display:',c('Group Means','Individual Subjects')),
checkboxGroupInput('sex','Filter by Sex:',list(Male='M',Female='F'),selected=c('M','F')),
uiOutput('doses'),
uiOutput('subjects'),
uiOutput('days'),
conditionalPanel(condition='input.summary=="Individual Subjects"',
radioButtons('plotBy','Plot by:',c('Subject','Day'))
)
),
mainPanel(width=9,
tabsetPanel(
tabPanel('Tables',
withSpinner(uiOutput('tables'),type=5)
),
tabPanel('Figures',
withSpinner(uiOutput('plots'),type=5)
),
tabPanel('Source Data',
tabsetPanel(
tabPanel('SEND Domains',
withSpinner(uiOutput('SENDdomains'),type=5)
),
tabPanel('DEFINE',
withSpinner(htmlOutput('define'),type=5)
),
tabPanel('README',
column(11,
conditionalPanel(condition="input.dataPath=='data/send/8409511'",
includeMarkdown('https://github.com/bripaisley/phuse-scripts-Lilly/tree/master/data/send/CDISC-Safety-Pharmacology-POC/readme.txt')
#includeMarkdown('https://github.com/bripaisley/phuse-scripts-Lilly/tree/master/data/send/CDISC-Safety-Pharmacology-POC/readme.txt')
)
)
)
)
),
tabPanel('Algorithm Description',
column(11,
h4('A Latin square experimental design is used to control variation in an experiment across
two blocking factors, in this case: subject and time. In a traditional Latin square safety
pharmacology study, several findings e.g., heart rate, QT duration, temperature, are recorded
in each dog for a couple of hours predose and then for an extended period of time, e.g. 24
hours, following a single dose of the investigational therapy or vehicle. Doses are typically
followed by a one-week washout period. As shown in the example Latin square study design
below, each dog receives a single dose of each treatment level throughout the course of
the study, but no two dogs receive the same treatment on the same day:'),
br(),
HTML('<center><img src="Latin Square.png"></center>'),
br(),
br(),
h4('This type of study design was modeled in the proof-of-concept SEND dataset by describing the
treatment level in the ELEMENT field of the subject elements (SE) domain. As each ELEMENT
began at the time of dosing, treatment effects can be observed by matching, for each subject,
the start time of each ELEMENT (SESTDTC) with the reference dose time (--RFTDTC) of each finding
record. The following example of SQL code demonstrates the logic of this operation:'),
br(),
h4('SELECT * FROM CV, SE'),
h4('JOIN CV, SE'),
h4('ON CV.USUBJID = SE.USUBJID'),
h4('AND CV.CVRFTDTC = SE.SESTDTC'),
br(),
h4('Tables were created for each type of finding (--TEST) in each of the finding domains by
pivoting the table on the field encoding the duration of time elapsed between dosing and
each observation (--ELTM). If the finding of interest was collected over an interval of
time, then the start (--STINT) and end (ENINT) times of the recording interval for each
record were used to represent the recording interval in the place of the elapsed time point.
The order of time points/intervals was set chronologically using the --TPTNUM variable.
Mean values were calculated for each dosing group (ELEMENT) within each sex (SEX) for each
time point or interval of observation. Plots were generated using the same method, except
observations recorded over an interval of time were represented on the x-axis at the
mid-point of the interval.'),
br(),
h4('The proof-of-concept dataset used here was created by CDISC and is publicly available at:'),
tags$a(href='https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/data/send/CDISC-Safety-Pharmacology-POC/',
'https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/data/send/CDISC-Safety-Pharmacology-POC/'),
br(),br(),
h4('The source code for this R Shiny application is also publicly availalble at:'),
tags$a(href='https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Safety%20Pharmacology%20Pilot/Safety%20Pharmacology%20Analysis',
'https://github.com/bripaisley/phuse-scripts-Lilly/blob/master/contributed/Nonclinical/R/Safety%20Pharmacology%20Pilot/Safety%20Pharmacology%20Analysis'),
br(),br()
)
)
)
)
)
)
)
# Run Shiny App
shinyApp(ui = ui, server = server)
|
# Step 1. Create a connection
connection <- get_connection("fdb_destatis")
# Step 2. Login with your username and password
## Normal login
connection <- login(connection)
## Login with predefined username
connection <- login(connection, user = "max.knobloch@ingef.de")
# Step 3. Validate login and permission on dataset
validate_connection(connection)
# Change dataset of connection
connection <- change_dataset(connection, "fdb_demo")
| /man/examples/get_connection.R | no_license | ingef/cqapiR | R | false | false | 442 | r |
# Step 1. Create a connection
connection <- get_connection("fdb_destatis")
# Step 2. Login with your username and password
## Normal login
connection <- login(connection)
## Login with predefined username
connection <- login(connection, user = "max.knobloch@ingef.de")
# Step 3. Validate login and permission on dataset
validate_connection(connection)
# Change dataset of connection
connection <- change_dataset(connection, "fdb_demo")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_progress_branches.R
\name{tar_progress_branches}
\alias{tar_progress_branches}
\title{Read the target progress of the latest run of the pipeline.}
\usage{
tar_progress_branches(names = NULL, fields = NULL)
}
\arguments{
\item{names}{Optional, names of the targets. If supplied, \code{tar_progress()}
only returns progress information on these targets.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.}
\item{fields}{Optional, names of progress data columns to read.
Set to \code{NULL} to read all fields.}
}
\value{
A data frame with one row per target per progress status
and the following columns.
\itemize{
\item \code{name}: name of the pattern.
\item \code{progress}: progress status: \code{"running"}, \code{"built"}, \code{"cancelled"},
or \code{"errored"}.
\item \code{branches}: number of branches in the progress category.
\item \code{total}: total number of branches planned for the whole pattern.
Values within the same pattern should all be equal.
}
}
\description{
Read a project's target progress data for the most recent
run of \code{\link[=tar_make]{tar_make()}} or similar. Only the most recent record is shown.
}
\examples{
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
list(
tar_target(x, seq_len(2)),
tar_target(y, x, pattern = map(x)),
tar_target(z, stopifnot(y < 1.5), pattern = map(y))
)
}, ask = FALSE)
try(tar_make())
tar_progress_branches()
})
}
}
| /man/tar_progress_branches.Rd | permissive | russHyde/targets | R | false | true | 1,631 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tar_progress_branches.R
\name{tar_progress_branches}
\alias{tar_progress_branches}
\title{Read the target progress of the latest run of the pipeline.}
\usage{
tar_progress_branches(names = NULL, fields = NULL)
}
\arguments{
\item{names}{Optional, names of the targets. If supplied, \code{tar_progress()}
only returns progress information on these targets.
You can supply symbols, a character vector,
or \code{tidyselect} helpers like \code{\link[=starts_with]{starts_with()}}.}
\item{fields}{Optional, names of progress data columns to read.
Set to \code{NULL} to read all fields.}
}
\value{
A data frame with one row per target per progress status
and the following columns.
\itemize{
\item \code{name}: name of the pattern.
\item \code{progress}: progress status: \code{"running"}, \code{"built"}, \code{"cancelled"},
or \code{"errored"}.
\item \code{branches}: number of branches in the progress category.
\item \code{total}: total number of branches planned for the whole pattern.
Values within the same pattern should all be equal.
}
}
\description{
Read a project's target progress data for the most recent
run of \code{\link[=tar_make]{tar_make()}} or similar. Only the most recent record is shown.
}
\examples{
if (identical(Sys.getenv("TAR_LONG_EXAMPLES"), "true")) {
tar_dir({ # tar_dir() runs code from a temporary directory.
tar_script({
list(
tar_target(x, seq_len(2)),
tar_target(y, x, pattern = map(x)),
tar_target(z, stopifnot(y < 1.5), pattern = map(y))
)
}, ask = FALSE)
try(tar_make())
tar_progress_branches()
})
}
}
|
#' Title
#'
#' @param a
#'
#' @return
#' @export
#'
#' @examples
celsius_to_kelvin <- function(a){
kelvin <- a+273.15
return(kelvin)
}
| /R/celsius_to_kelvin.R | permissive | hwallimann/convertR | R | false | false | 142 | r | #' Title
#'
#' @param a
#'
#' @return
#' @export
#'
#' @examples
celsius_to_kelvin <- function(a){
kelvin <- a+273.15
return(kelvin)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/license_list.R
\name{license_list}
\alias{license_list}
\title{Return the list of licenses available for datasets on the site.}
\usage{
license_list(
id,
url = get_default_url(),
key = get_default_key(),
as = "list",
...
)
}
\arguments{
\item{id}{(character) Package identifier.}
\item{url}{Base url to use. Default: \url{http://data.techno-science.ca}. See
also \code{\link{ckanr_setup}} and \code{\link{get_default_url}}.}
\item{key}{A privileged CKAN API key, Default: your key set with \code{\link{ckanr_setup}}}
\item{as}{(character) One of list (default), table, or json. Parsing with table option
uses \code{jsonlite::fromJSON(..., simplifyDataFrame = TRUE)}, which attempts to parse
data to data.frame's when possible, so the result can vary from a vector, list or
data.frame. (required)}
\item{...}{Curl args passed on to \code{\link[crul]{verb-POST}} (optional)}
}
\description{
Return the list of licenses available for datasets on the site.
}
\examples{
\dontrun{
license_list()
license_list(as = "table")
license_list(as = "json")
}
}
| /man/license_list.Rd | permissive | AleKoure/ckanr | R | false | true | 1,140 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/license_list.R
\name{license_list}
\alias{license_list}
\title{Return the list of licenses available for datasets on the site.}
\usage{
license_list(
id,
url = get_default_url(),
key = get_default_key(),
as = "list",
...
)
}
\arguments{
\item{id}{(character) Package identifier.}
\item{url}{Base url to use. Default: \url{http://data.techno-science.ca}. See
also \code{\link{ckanr_setup}} and \code{\link{get_default_url}}.}
\item{key}{A privileged CKAN API key, Default: your key set with \code{\link{ckanr_setup}}}
\item{as}{(character) One of list (default), table, or json. Parsing with table option
uses \code{jsonlite::fromJSON(..., simplifyDataFrame = TRUE)}, which attempts to parse
data to data.frame's when possible, so the result can vary from a vector, list or
data.frame. (required)}
\item{...}{Curl args passed on to \code{\link[crul]{verb-POST}} (optional)}
}
\description{
Return the list of licenses available for datasets on the site.
}
\examples{
\dontrun{
license_list()
license_list(as = "table")
license_list(as = "json")
}
}
|
#############################################
## VolcanoPlot
#############################################
#' @export
#' @importFrom gplots heatmap.2
#' @importFrom stats hclust
#' @importFrom ggrepel geom_text_repel
#' @importFrom marray maPalette
VolcanoPlot <- function(data = data,
prob = "qvalue"
prob.name = "Q-value"
sig = 0.05,
FCcutoff = FALSE,
logBase.pvalue = 10,
colors = c(non.sign = "gray65",
sign.num = "red",
sign.den = "blue"),
labels = c("No regulation",
"Up-regulated",
"Down-regulated"),
ylimUp = FALSE,
ylimDown = FALSE,
xlimUp = FALSE,
x.axis.size = 10,
y.axis.size = 10,
dot.size = 3,
text.size = 4,
legend.size = 13,
ProteinName = TRUE,
numProtein = 100,
clustering = "both",
width = 10,
height = 10,
which.Comparison = "all",
address="") {
## save process output in each step
allfiles <- list.files()
filenaming <- "msstats"
if (length(grep(filenaming, allfiles)) == 0) {
finalfile <- "msstats.log"
processout <- NULL
} else {
num <- 0
finalfile <- "msstats.log"
while(is.element(finalfile, allfiles)) {
num <- num + 1
lastfilename <- finalfile ## in order to rea
finalfile <- paste0(paste(filenaming, num, sep="-"), ".log")
}
finalfile <- lastfilename
processout <- as.matrix(read.table(finalfile, header = TRUE, sep = "\t"))
}
processout <- rbind(processout, as.matrix(c(" ", " ", "MSstats - VolcanoPlot function", " "), ncol = 1))
## make upper letter
type <- "VOLCANOPLOT"
## check logBase.pvalue is 2,10 or not
if (logBase.pvalue != 2 & logBase.pvalue != 10) {
processout <- rbind(processout, c("ERROR : (-) Logarithm transformation for adjusted p-values : log2 or log10 only - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Only -log2 or -log10 for logarithm transformation for adjusted p-values are posssible.\n")
}
## choose comparison to draw plots
if (which.Comparison != "all") {
## check which.comparison is name of comparison
if (is.character(which.Comparison)) {
temp.name <- which.Comparison
## message if name of comparison is wrong.
if (length(setdiff(temp.name, unique(data$Label))) > 0) {
processout <- rbind(processout, paste("Please check labels of comparions. Result does not have this comparison. -", paste(temp.name, collapse = ", "), sep = " "))
write.table(processout, file = finalfile, row.names = FALSE)
stop(paste("Please check labels of comparions. Result does not have this comparison. -", paste(temp.name, collapse = ", "), sep = " "))
}
}
## check which.comparison is order number of comparison
if (is.numeric(which.Comparison)) {
temp.name <- levels(data$Label)[which.Comparison]
## message if name of comparison is wrong.
if (length(levels(data$Label)) < max(which.Comparison)) {
stop(paste("Please check your selection of comparisons. There are ", length(levels(data$Label)), " comparisons in this result.", sep = " "))
}
}
## use only assigned proteins
data <- data[which(data$Label %in% temp.name), ]
data$Protein <- factor(data$Protein)
data$Label <- factor(data$Label)
} else {
data$Protein <- factor(data$Protein)
data$Label <- factor(data$Label)
}
#######################
## VolcanoPlot
#######################
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste(address, "VolcanoPlot", sep="")
finalfile <- paste(address, "VolcanoPlot.pdf", sep="")
while(is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep = "-"), ".pdf")
}
pdf(finalfile, width = width, height = height)
}
if (logBase.pvalue == 2) {
y.limUp <- 30
} else if (logBase.pvalue == 10) {
y.limUp <- 10
}
if (is.numeric(ylimUp)) y.limUp <- ylimUp
## remove the result, NA
data <- data[!is.na(data[, prob]),]
## group for coloring dots
fc.cutoff <- grepl("log[12][0]?FC", colnames(data), value = T)
if(strsplit(fc.cutoff, split = "")[[1]][4] == 2 {
fc.cutoff.base <- 2
} else if (strsplit(fc.cutoff, split = "")[[1]][4] == 1 &
strsplit(fc.cutoff, split = "")[[1]][5] == 10){
fc.cutoff.base <- 10
} else {
processout <- rbind(processout, "Please check labels of Fold Changes. FC should be expressed as log2FC or log10FC")
write.table(processout, file = finalfile, row.names = FALSE)
stop("Please check labels of Fold Changes. FC should be expressed as log2FC or log10FC")
}
data$colgroup <- colors[1]
if (is.numeric(FCcutoff) & FCcutoff > 0) {
data[data[, prob] < sig & data[, fc.cutoff] > log(FCcutoff, base = fc.cutoff.base), "colgroup"] <- colors[2]
data[data[, prob] < sig & data[, fc.cutoff] < -log(FCcutoff, base = fc.cutoff.base), "colgroup"] <- colors[3]
} else {
data[data[, prob] < sig & data[, fc.cutoff] > 0, "colgroup"] <- colors[2] #
data[data[, prob] < sig & data[, fc.cutoff] < 0, "colgroup"] <- colors[3]
}
data$colgroup <- factor(data$colgroup, levels = colors)
## for multiple volcano plots,
for(i in seq_along(levels(data$Label))) {
sub <- data[data$Label == levels(data$Label)[i], ]
sub[, prob] [sub[, prob] < logBase.pvalue^(-y.limUp)] <- logBase.pvalue^(-y.limUp)
sub <- as.data.frame(sub)
## ylimUp
y.limup <- ceiling(max(-log(sub[!is.na(sub[, prob]), prob], base = logBase.pvalue)))
if (y.limup < (-log(sig, base = logBase.pvalue))) {
y.limup <- (-log(sig, base = logBase.pvalue) + 1) ## for too small y.lim
}
## ylimDown
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
} else {
y.limdown <- 0 ## default is zero
}
## x.lim
if (is.numeric(xlimUp)) {
x.lim <- xlimUp
} else {
x.lim <- ceiling(max(abs(sub[!is.na(sub[, fc.cutoff]) & abs(sub[, fc.cutoff]) != Inf , fc.cutoff]))) ## log2FC or log10FC
if (x.lim < 3) {
x.lim <- 3
}
}
## for assigning x in ggplot2
subtemp <- sub
colnames(subtemp)[fc.cutoff] <- "logFC"
##log-prob
subtemp$log.adjp <- (-log(subtemp[, prob], base = logBase.pvalue))
## for x limit for inf or -inf
subtemp$newlogFC <- subtemp$logFC
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing" & subtemp$logFC == Inf, "newlogFC"] <- (x.lim - 0.2)
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing" & subtemp$logFC == (-Inf), "newlogFC"] <- (x.lim - 0.2) *(-1)
## add (*) in Protein name for Inf or -Inf
subtemp$Protein <- as.character(subtemp$Protein)
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing", "Protein"] <- paste("*", subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing", "Protein"], sep="")
## Plotting
ptemp <- ggplot(aes_string(x='logFC', y='log.adjp', color='colgroup', label='Protein'), data=subtemp) +
geom_point(size=dot.size)+
scale_colour_manual(values = colors,
labels = labels) +
scale_y_continuous(paste0('-Log', logBase.pvalue, ' (', prob.name ')'),
limits = c(y.limdown, y.limup)) +
labs(title = unique(sub$Label))
}
## x-axis labeling
ptemp <- ptemp + scale_x_continuous(paste0('Log', fc.cutoff.base, ' fold change'), limits=c(-x.lim, x.lim))
## add protein name
if (ProteinName) {
if(length(unique(subtemp$colgroup)) == 1 & any(unique(subtemp$colgroup) == 'black')){
message(paste("The volcano plot for ", unique(subtemp$Label), " does not show the protein names because none of them is significant.", sep=""))
} else {
ptemp <- ptemp + geom_text_repel(data=subtemp[subtemp$colgroup != "black", ], aes(label=Protein), size=text.size, col='black')
}
}
## For legend of linetype for cutoffs
## first assign line type
ltypes <- c("type1"="twodash", "type2"="dotted")
## cutoff lines, FDR only
if (!FCcutoff) {
if (logBase.pvalue == 2) {
sigcut <- data.frame(Protein='sigline', logFC=seq(-x.lim, x.lim, length.out=20), log.adjp=(-log(sig, base = fc.cutoff.base)), line='twodash')
pfinal <- ptemp + geom_line(data=sigcut, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
scale_linetype_manual(values=c('twodash'=6),
labels=c(paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
sigcut <- data.frame(Protein='sigline', logFC=seq(-x.lim, x.lim, length.out=20), log10adjp=(-log10(sig)), line='twodash')
pfinal <- ptemp + geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
scale_linetype_manual(values=c('twodash'=6),
labels=c(paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
## cutoff lines, FDR and Fold change cutoff
if (is.numeric(FCcutoff)) {
if (colnames(sub)[3] == "log2FC") {
if (logBase.pvalue == 2) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log.adjp=(-log(sig, base = fc.cutoff.base)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log2(FCcutoff),
log.adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log2(FCcutoff)),
log.adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6, show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log10adjp=(-log10(sig)), line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log2(FCcutoff),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log2(FCcutoff)),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
if (colnames(sub)[3] == "log10FC") {
if (logBase.pvalue == 2) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log.adjp=(-log2(sig)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log10(FCcutoff),
log2adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log10(FCcutoff)),
log2adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log10adjp=(-log10(sig)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log10(FCcutoff),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log10(FCcutoff)),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
}
pfinal <- pfinal+theme(
panel.background = element_rect(fill='white', colour="black"),
panel.grid.minor = element_blank(),
axis.text.x = element_text(size=x.axis.size, colour="black"),
axis.text.y = element_text(size=y.axis.size, colour="black"),
axis.ticks = element_line(colour="black"),
axis.title.x = element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y = element_text(size=y.axis.size+5, vjust=0.3),
title = element_text(size=x.axis.size+8, vjust=1.5),
legend.position="bottom",
legend.key = element_rect(fill='white', colour='white'),
legend.text = element_text(size=legend.size),
legend.title = element_blank()
)
print(pfinal)
} ## end-loop
if (address!=FALSE) dev.off()
}
| /R/VolacanoPlot.R | no_license | IvanSilbern/MSstats | R | false | false | 20,386 | r | #############################################
## VolcanoPlot
#############################################
#' @export
#' @importFrom gplots heatmap.2
#' @importFrom stats hclust
#' @importFrom ggrepel geom_text_repel
#' @importFrom marray maPalette
VolcanoPlot <- function(data = data,
prob = "qvalue"
prob.name = "Q-value"
sig = 0.05,
FCcutoff = FALSE,
logBase.pvalue = 10,
colors = c(non.sign = "gray65",
sign.num = "red",
sign.den = "blue"),
labels = c("No regulation",
"Up-regulated",
"Down-regulated"),
ylimUp = FALSE,
ylimDown = FALSE,
xlimUp = FALSE,
x.axis.size = 10,
y.axis.size = 10,
dot.size = 3,
text.size = 4,
legend.size = 13,
ProteinName = TRUE,
numProtein = 100,
clustering = "both",
width = 10,
height = 10,
which.Comparison = "all",
address="") {
## save process output in each step
allfiles <- list.files()
filenaming <- "msstats"
if (length(grep(filenaming, allfiles)) == 0) {
finalfile <- "msstats.log"
processout <- NULL
} else {
num <- 0
finalfile <- "msstats.log"
while(is.element(finalfile, allfiles)) {
num <- num + 1
lastfilename <- finalfile ## in order to rea
finalfile <- paste0(paste(filenaming, num, sep="-"), ".log")
}
finalfile <- lastfilename
processout <- as.matrix(read.table(finalfile, header = TRUE, sep = "\t"))
}
processout <- rbind(processout, as.matrix(c(" ", " ", "MSstats - VolcanoPlot function", " "), ncol = 1))
## make upper letter
type <- "VOLCANOPLOT"
## check logBase.pvalue is 2,10 or not
if (logBase.pvalue != 2 & logBase.pvalue != 10) {
processout <- rbind(processout, c("ERROR : (-) Logarithm transformation for adjusted p-values : log2 or log10 only - stop"))
write.table(processout, file=finalfile, row.names=FALSE)
stop("Only -log2 or -log10 for logarithm transformation for adjusted p-values are posssible.\n")
}
## choose comparison to draw plots
if (which.Comparison != "all") {
## check which.comparison is name of comparison
if (is.character(which.Comparison)) {
temp.name <- which.Comparison
## message if name of comparison is wrong.
if (length(setdiff(temp.name, unique(data$Label))) > 0) {
processout <- rbind(processout, paste("Please check labels of comparions. Result does not have this comparison. -", paste(temp.name, collapse = ", "), sep = " "))
write.table(processout, file = finalfile, row.names = FALSE)
stop(paste("Please check labels of comparions. Result does not have this comparison. -", paste(temp.name, collapse = ", "), sep = " "))
}
}
## check which.comparison is order number of comparison
if (is.numeric(which.Comparison)) {
temp.name <- levels(data$Label)[which.Comparison]
## message if name of comparison is wrong.
if (length(levels(data$Label)) < max(which.Comparison)) {
stop(paste("Please check your selection of comparisons. There are ", length(levels(data$Label)), " comparisons in this result.", sep = " "))
}
}
## use only assigned proteins
data <- data[which(data$Label %in% temp.name), ]
data$Protein <- factor(data$Protein)
data$Label <- factor(data$Label)
} else {
data$Protein <- factor(data$Protein)
data$Label <- factor(data$Label)
}
#######################
## VolcanoPlot
#######################
## If there are the file with the same name, add next numbering at the end of file name
if (address != FALSE) {
allfiles <- list.files()
num <- 0
filenaming <- paste(address, "VolcanoPlot", sep="")
finalfile <- paste(address, "VolcanoPlot.pdf", sep="")
while(is.element(finalfile, allfiles)) {
num <- num + 1
finalfile <- paste0(paste(filenaming, num, sep = "-"), ".pdf")
}
pdf(finalfile, width = width, height = height)
}
if (logBase.pvalue == 2) {
y.limUp <- 30
} else if (logBase.pvalue == 10) {
y.limUp <- 10
}
if (is.numeric(ylimUp)) y.limUp <- ylimUp
## remove the result, NA
data <- data[!is.na(data[, prob]),]
## group for coloring dots
fc.cutoff <- grepl("log[12][0]?FC", colnames(data), value = T)
if(strsplit(fc.cutoff, split = "")[[1]][4] == 2 {
fc.cutoff.base <- 2
} else if (strsplit(fc.cutoff, split = "")[[1]][4] == 1 &
strsplit(fc.cutoff, split = "")[[1]][5] == 10){
fc.cutoff.base <- 10
} else {
processout <- rbind(processout, "Please check labels of Fold Changes. FC should be expressed as log2FC or log10FC")
write.table(processout, file = finalfile, row.names = FALSE)
stop("Please check labels of Fold Changes. FC should be expressed as log2FC or log10FC")
}
data$colgroup <- colors[1]
if (is.numeric(FCcutoff) & FCcutoff > 0) {
data[data[, prob] < sig & data[, fc.cutoff] > log(FCcutoff, base = fc.cutoff.base), "colgroup"] <- colors[2]
data[data[, prob] < sig & data[, fc.cutoff] < -log(FCcutoff, base = fc.cutoff.base), "colgroup"] <- colors[3]
} else {
data[data[, prob] < sig & data[, fc.cutoff] > 0, "colgroup"] <- colors[2] #
data[data[, prob] < sig & data[, fc.cutoff] < 0, "colgroup"] <- colors[3]
}
data$colgroup <- factor(data$colgroup, levels = colors)
## for multiple volcano plots,
for(i in seq_along(levels(data$Label))) {
sub <- data[data$Label == levels(data$Label)[i], ]
sub[, prob] [sub[, prob] < logBase.pvalue^(-y.limUp)] <- logBase.pvalue^(-y.limUp)
sub <- as.data.frame(sub)
## ylimUp
y.limup <- ceiling(max(-log(sub[!is.na(sub[, prob]), prob], base = logBase.pvalue)))
if (y.limup < (-log(sig, base = logBase.pvalue))) {
y.limup <- (-log(sig, base = logBase.pvalue) + 1) ## for too small y.lim
}
## ylimDown
if (is.numeric(ylimDown)) {
y.limdown <- ylimDown
} else {
y.limdown <- 0 ## default is zero
}
## x.lim
if (is.numeric(xlimUp)) {
x.lim <- xlimUp
} else {
x.lim <- ceiling(max(abs(sub[!is.na(sub[, fc.cutoff]) & abs(sub[, fc.cutoff]) != Inf , fc.cutoff]))) ## log2FC or log10FC
if (x.lim < 3) {
x.lim <- 3
}
}
## for assigning x in ggplot2
subtemp <- sub
colnames(subtemp)[fc.cutoff] <- "logFC"
##log-prob
subtemp$log.adjp <- (-log(subtemp[, prob], base = logBase.pvalue))
## for x limit for inf or -inf
subtemp$newlogFC <- subtemp$logFC
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing" & subtemp$logFC == Inf, "newlogFC"] <- (x.lim - 0.2)
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing" & subtemp$logFC == (-Inf), "newlogFC"] <- (x.lim - 0.2) *(-1)
## add (*) in Protein name for Inf or -Inf
subtemp$Protein <- as.character(subtemp$Protein)
subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing", "Protein"] <- paste("*", subtemp[!is.na(subtemp$issue) & subtemp$issue == "oneConditionMissing", "Protein"], sep="")
## Plotting
ptemp <- ggplot(aes_string(x='logFC', y='log.adjp', color='colgroup', label='Protein'), data=subtemp) +
geom_point(size=dot.size)+
scale_colour_manual(values = colors,
labels = labels) +
scale_y_continuous(paste0('-Log', logBase.pvalue, ' (', prob.name ')'),
limits = c(y.limdown, y.limup)) +
labs(title = unique(sub$Label))
}
## x-axis labeling
ptemp <- ptemp + scale_x_continuous(paste0('Log', fc.cutoff.base, ' fold change'), limits=c(-x.lim, x.lim))
## add protein name
if (ProteinName) {
if(length(unique(subtemp$colgroup)) == 1 & any(unique(subtemp$colgroup) == 'black')){
message(paste("The volcano plot for ", unique(subtemp$Label), " does not show the protein names because none of them is significant.", sep=""))
} else {
ptemp <- ptemp + geom_text_repel(data=subtemp[subtemp$colgroup != "black", ], aes(label=Protein), size=text.size, col='black')
}
}
## For legend of linetype for cutoffs
## first assign line type
ltypes <- c("type1"="twodash", "type2"="dotted")
## cutoff lines, FDR only
if (!FCcutoff) {
if (logBase.pvalue == 2) {
sigcut <- data.frame(Protein='sigline', logFC=seq(-x.lim, x.lim, length.out=20), log.adjp=(-log(sig, base = fc.cutoff.base)), line='twodash')
pfinal <- ptemp + geom_line(data=sigcut, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
scale_linetype_manual(values=c('twodash'=6),
labels=c(paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
sigcut <- data.frame(Protein='sigline', logFC=seq(-x.lim, x.lim, length.out=20), log10adjp=(-log10(sig)), line='twodash')
pfinal <- ptemp + geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
scale_linetype_manual(values=c('twodash'=6),
labels=c(paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
## cutoff lines, FDR and Fold change cutoff
if (is.numeric(FCcutoff)) {
if (colnames(sub)[3] == "log2FC") {
if (logBase.pvalue == 2) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log.adjp=(-log(sig, base = fc.cutoff.base)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log2(FCcutoff),
log.adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log2(FCcutoff)),
log.adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6, show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log.adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log10adjp=(-log10(sig)), line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log2(FCcutoff),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log2(FCcutoff)),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
if (colnames(sub)[3] == "log10FC") {
if (logBase.pvalue == 2) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log.adjp=(-log2(sig)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log10(FCcutoff),
log2adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log10(FCcutoff)),
log2adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log2adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
if (logBase.pvalue == 10) {
## three different lines
sigcut <- data.frame(Protein='sigline',
logFC=seq(-x.lim, x.lim, length.out=10),
log10adjp=(-log10(sig)),
line='twodash')
FCcutpos <- data.frame(Protein='sigline',
logFC=log10(FCcutoff),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
FCcutneg <- data.frame(Protein='sigline',
logFC=(-log10(FCcutoff)),
log10adjp=seq(y.limdown, y.limup, length.out=10),
line='dotted')
## three lines, with order color first and then assign linetype manual
pfinal <- ptemp+geom_line(data=sigcut, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutpos, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6,
show.legend=TRUE)+
geom_line(data=FCcutneg, aes_string(x='logFC', y='log10adjp', linetype='line'),
colour="darkgrey",
size=0.6)+
scale_linetype_manual(values=c('dotted'=3, 'twodash'=6),
labels=c(paste("Fold change cutoff (", FCcutoff, ")", sep=""), paste("Adj p-value cutoff (", sig, ")", sep="")))+
guides(colour=guide_legend(override.aes=list(linetype=0)),
linetype=guide_legend())
}
}
}
pfinal <- pfinal+theme(
panel.background = element_rect(fill='white', colour="black"),
panel.grid.minor = element_blank(),
axis.text.x = element_text(size=x.axis.size, colour="black"),
axis.text.y = element_text(size=y.axis.size, colour="black"),
axis.ticks = element_line(colour="black"),
axis.title.x = element_text(size=x.axis.size+5, vjust=-0.4),
axis.title.y = element_text(size=y.axis.size+5, vjust=0.3),
title = element_text(size=x.axis.size+8, vjust=1.5),
legend.position="bottom",
legend.key = element_rect(fill='white', colour='white'),
legend.text = element_text(size=legend.size),
legend.title = element_blank()
)
print(pfinal)
} ## end-loop
if (address!=FALSE) dev.off()
}
|
#' Get rank for a given taxonomic name.
#'
#' @export
#' @param x (character) Vector of one or more taxon names (character) or
#' IDs (character or numeric) to query. Or objects returned from `get_*()`
#' functions like [get_tsn()]
#' @param db (character) database to query. either `ncbi`, `itis`, `eol`, `col`,
#' `tropicos`, `gbif`,`nbn`, `worms`, `natserv`, `bold`. Note that each
#' taxonomic data source has their own identifiers, so that if you provide the
#' wrong `db` value for the identifier you may get a result, but it will
#' likely be wrong (not what you were expecting). If using ncbi or eol we
#' recommend getting an API key; see [taxize-authentication]
#' @param rows numeric; Any number from 1 to infinity. If the default NA,
#' all rows are considered. passed down to `get_*()` functions.
#' @param ... Additional arguments to [classification()]
#' @return A named list of character vectors with ranks (all lower-cased)
#' @note While [tax_name()] returns the name of a specified
#' rank, [tax_rank()] returns the actual rank of the taxon.
#' @seealso [classification()],[tax_name()]
#' @examples \dontrun{
#' tax_rank(x = "Helianthus annuus", db = "itis")
#' tax_rank(get_tsn("Helianthus annuus"))
#' tax_rank(c("Helianthus", "Pinus", "Poa"), db = "itis")
#'
#' tax_rank(get_boldid("Helianthus annuus"))
#' tax_rank("421377", db = "bold")
#' tax_rank(421377, db = "bold")
#'
#' tax_rank(c("Plantae", "Helianthus annuus",
#' "Puma", "Homo sapiens"), db = 'itis')
#' tax_rank(c("Helianthus annuus", "Quercus", "Fabaceae"), db = 'tropicos')
#'
#' tax_rank(names_list("species"), db = 'gbif')
#' tax_rank(names_list("family"), db = 'gbif')
#'
#' tax_rank(c("Gadus morhua", "Lichenopora neapolitana"),
#' db = "worms")
#' }
tax_rank <- function(x, db = NULL, rows = NA, ...) {
UseMethod("tax_rank")
}
#' @export
tax_rank.default <- function(x, db = NULL, rows = NA, ...) {
stats::setNames(tax_rank_(x, ...), x)
}
#' @export
tax_rank.character <- function(x, db = NULL, rows = NA, ...) {
nstop(db)
stopifnot(length(db) == 1)
switch(
db,
bold = stats::setNames(tax_rank_(process_ids(x, db, get_boldid,
rows = rows), ...), x),
col = stats::setNames(tax_rank_(process_ids(x, db, get_colid,
rows = rows), ...), x),
eol = stats::setNames(tax_rank_(process_ids(x, db, get_eolid,
rows = rows), ...), x),
gbif = stats::setNames(tax_rank_(process_ids(x, db, get_gbifid,
rows = rows), ...), x),
natserv = stats::setNames(tax_rank_(process_ids(x, db, get_natservid,
rows = rows), ...), x),
nbn = stats::setNames(tax_rank_(process_ids(x, db, get_nbnid,
rows = rows), ...), x),
tol = stats::setNames(tax_rank_(process_ids(x, db, get_tolid,
rows = rows), ...), x),
tropicos = stats::setNames(tax_rank_(process_ids(x, db, get_tpsid,
rows = rows), ...), x),
itis = stats::setNames(tax_rank_(process_ids(x, db, get_tsn,
rows = rows), ...), x),
ncbi = stats::setNames(tax_rank_(process_ids(x, db, get_uid,
rows = rows), ...), x),
worms = stats::setNames(tax_rank_(process_ids(x, db, get_wormsid,
rows = rows), ...), x),
stop("the provided db value was not recognised", call. = FALSE)
)
}
#' @export
tax_rank.numeric <- function(x, db = NULL, rows = NA, ...) {
tax_rank(as.character(x), db, rows, ...)
}
# ---------
tax_rank_ <- function(id, ...) {
fun <- function(x, clz, ...) {
res <- classification(x, db = clz, ...)
if (all(is.na(res))) {
NA_character_
} else {
if (NROW(res[[1]]) > 0) {
tt <- res[[1]]
out <- tt[nrow(tt), "rank"][[1]]
if (length(out) == 0) NA_character_ else tolower(out)
} else {
NA_character_
}
}
}
lapply(id, fun, clz = dbswap(class(id)), ...)
}
| /R/tax_rank.R | permissive | muschellij2/taxize | R | false | false | 3,787 | r | #' Get rank for a given taxonomic name.
#'
#' @export
#' @param x (character) Vector of one or more taxon names (character) or
#' IDs (character or numeric) to query. Or objects returned from `get_*()`
#' functions like [get_tsn()]
#' @param db (character) database to query. either `ncbi`, `itis`, `eol`, `col`,
#' `tropicos`, `gbif`,`nbn`, `worms`, `natserv`, `bold`. Note that each
#' taxonomic data source has their own identifiers, so that if you provide the
#' wrong `db` value for the identifier you may get a result, but it will
#' likely be wrong (not what you were expecting). If using ncbi or eol we
#' recommend getting an API key; see [taxize-authentication]
#' @param rows numeric; Any number from 1 to infinity. If the default NA,
#' all rows are considered. passed down to `get_*()` functions.
#' @param ... Additional arguments to [classification()]
#' @return A named list of character vectors with ranks (all lower-cased)
#' @note While [tax_name()] returns the name of a specified
#' rank, [tax_rank()] returns the actual rank of the taxon.
#' @seealso [classification()],[tax_name()]
#' @examples \dontrun{
#' tax_rank(x = "Helianthus annuus", db = "itis")
#' tax_rank(get_tsn("Helianthus annuus"))
#' tax_rank(c("Helianthus", "Pinus", "Poa"), db = "itis")
#'
#' tax_rank(get_boldid("Helianthus annuus"))
#' tax_rank("421377", db = "bold")
#' tax_rank(421377, db = "bold")
#'
#' tax_rank(c("Plantae", "Helianthus annuus",
#' "Puma", "Homo sapiens"), db = 'itis')
#' tax_rank(c("Helianthus annuus", "Quercus", "Fabaceae"), db = 'tropicos')
#'
#' tax_rank(names_list("species"), db = 'gbif')
#' tax_rank(names_list("family"), db = 'gbif')
#'
#' tax_rank(c("Gadus morhua", "Lichenopora neapolitana"),
#' db = "worms")
#' }
tax_rank <- function(x, db = NULL, rows = NA, ...) {
UseMethod("tax_rank")
}
#' @export
tax_rank.default <- function(x, db = NULL, rows = NA, ...) {
stats::setNames(tax_rank_(x, ...), x)
}
#' @export
tax_rank.character <- function(x, db = NULL, rows = NA, ...) {
nstop(db)
stopifnot(length(db) == 1)
switch(
db,
bold = stats::setNames(tax_rank_(process_ids(x, db, get_boldid,
rows = rows), ...), x),
col = stats::setNames(tax_rank_(process_ids(x, db, get_colid,
rows = rows), ...), x),
eol = stats::setNames(tax_rank_(process_ids(x, db, get_eolid,
rows = rows), ...), x),
gbif = stats::setNames(tax_rank_(process_ids(x, db, get_gbifid,
rows = rows), ...), x),
natserv = stats::setNames(tax_rank_(process_ids(x, db, get_natservid,
rows = rows), ...), x),
nbn = stats::setNames(tax_rank_(process_ids(x, db, get_nbnid,
rows = rows), ...), x),
tol = stats::setNames(tax_rank_(process_ids(x, db, get_tolid,
rows = rows), ...), x),
tropicos = stats::setNames(tax_rank_(process_ids(x, db, get_tpsid,
rows = rows), ...), x),
itis = stats::setNames(tax_rank_(process_ids(x, db, get_tsn,
rows = rows), ...), x),
ncbi = stats::setNames(tax_rank_(process_ids(x, db, get_uid,
rows = rows), ...), x),
worms = stats::setNames(tax_rank_(process_ids(x, db, get_wormsid,
rows = rows), ...), x),
stop("the provided db value was not recognised", call. = FALSE)
)
}
#' @export
tax_rank.numeric <- function(x, db = NULL, rows = NA, ...) {
tax_rank(as.character(x), db, rows, ...)
}
# ---------
tax_rank_ <- function(id, ...) {
fun <- function(x, clz, ...) {
res <- classification(x, db = clz, ...)
if (all(is.na(res))) {
NA_character_
} else {
if (NROW(res[[1]]) > 0) {
tt <- res[[1]]
out <- tt[nrow(tt), "rank"][[1]]
if (length(out) == 0) NA_character_ else tolower(out)
} else {
NA_character_
}
}
}
lapply(id, fun, clz = dbswap(class(id)), ...)
}
|
#!/usr/bin/Rscript
##
## fig4_plotExpl.R
##
## EDF 5/12/2021
##
library(ggplot2)
library(dplyr)
library(tidyr)
setwd("~/projects/MANUSCRIPT/")
temp_data = data.frame(
cond=factor(c(rep('Promoter',2),rep('Control',2)),levels=c('Promoter','Control')),
allele=rep(c('ref','alt'),2),
value=c(36,64,42,36)
)
temp_data %>%
ggplot(aes(cond,value)) +
geom_col(aes(alpha=cond,fill=allele),
position=position_dodge(),
width=.75) +
scale_alpha_discrete(range=c(.5,1)) +
scale_fill_manual(values=c('deepskyblue2','brown3')) +
theme_classic() +
ylab('Read Count') +
xlab('Condition') +
theme(legend.position='none')
ggsave("plots/fig4_plotExpl1.pdf",
height=2, width=2)
# temp_data %>%
# pivot_wider(names_from=allele,values_from=value) %>%
# mutate(tot_read = ref+alt,
# alt_af = alt/tot_read) %>%
# ggplot(aes(cond,alt_af)) +
# geom_point(aes(color=cond,size=tot_read)) +
# scale_color_manual(values=c('darkorchid2','darkorchid4')) +
# scale_size_continuous(limits=c(1,100)) +
# theme_classic() +
# ylab('ALT AF') +
# scale_y_continuous(breaks=c(0,.5,1),
# limits=c(0,1)) +
# xlab('Condition') +
# theme(legend.position='none')
# ggsave("plots/fig4_plotExpl2.pdf",
# height=2, width=2)
#
# mpileups_comb_fi_test_lowp %>%
# head(n=32) %>%
# ggplot(aes(1,value)) +
# geom_col(aes(alpha=cond, fill=allele),
# position=position_dodge(),
# width=.75) +
# geom_text(aes(1,p_height,
# label=p_lab)) +
# scale_alpha_discrete(range=c(.5,1)) +
# scale_fill_manual(values=c('deepskyblue2','brown3')) +
# geom_text(aes(1,p_height*1.1,
# label=NA)) +
# facet_wrap(~gene_p_lab,
# scales = 'free_y') +
# theme_classic() +
# ylab('Read Count') +
# xlab('Time Point')
| /plots/fig4_plotExpl.R | no_license | LappalainenLab/TF-eQTL_preprint | R | false | false | 1,853 | r | #!/usr/bin/Rscript
##
## fig4_plotExpl.R
##
## EDF 5/12/2021
##
library(ggplot2)
library(dplyr)
library(tidyr)
setwd("~/projects/MANUSCRIPT/")
temp_data = data.frame(
cond=factor(c(rep('Promoter',2),rep('Control',2)),levels=c('Promoter','Control')),
allele=rep(c('ref','alt'),2),
value=c(36,64,42,36)
)
temp_data %>%
ggplot(aes(cond,value)) +
geom_col(aes(alpha=cond,fill=allele),
position=position_dodge(),
width=.75) +
scale_alpha_discrete(range=c(.5,1)) +
scale_fill_manual(values=c('deepskyblue2','brown3')) +
theme_classic() +
ylab('Read Count') +
xlab('Condition') +
theme(legend.position='none')
ggsave("plots/fig4_plotExpl1.pdf",
height=2, width=2)
# temp_data %>%
# pivot_wider(names_from=allele,values_from=value) %>%
# mutate(tot_read = ref+alt,
# alt_af = alt/tot_read) %>%
# ggplot(aes(cond,alt_af)) +
# geom_point(aes(color=cond,size=tot_read)) +
# scale_color_manual(values=c('darkorchid2','darkorchid4')) +
# scale_size_continuous(limits=c(1,100)) +
# theme_classic() +
# ylab('ALT AF') +
# scale_y_continuous(breaks=c(0,.5,1),
# limits=c(0,1)) +
# xlab('Condition') +
# theme(legend.position='none')
# ggsave("plots/fig4_plotExpl2.pdf",
# height=2, width=2)
#
# mpileups_comb_fi_test_lowp %>%
# head(n=32) %>%
# ggplot(aes(1,value)) +
# geom_col(aes(alpha=cond, fill=allele),
# position=position_dodge(),
# width=.75) +
# geom_text(aes(1,p_height,
# label=p_lab)) +
# scale_alpha_discrete(range=c(.5,1)) +
# scale_fill_manual(values=c('deepskyblue2','brown3')) +
# geom_text(aes(1,p_height*1.1,
# label=NA)) +
# facet_wrap(~gene_p_lab,
# scales = 'free_y') +
# theme_classic() +
# ylab('Read Count') +
# xlab('Time Point')
|
dat<-read.table("D:/2013data.txt",header = T)
reg<-lm(gc~pop+wage+stu+traffic+sales+hos+asp+sdh, data=dat)
summary(reg)
res<-resid(reg)
sigma<-sum$sigma
qqnorm(res,main="normal Q-Q plot of residuals")
qqline(res)
pred<-predict(reg)
plot(pred,res,xlab="predicted value",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
sum=summary(reg)
# traffic is transformed to log(traffic) due to the presence of outlier
# hos is transformed to log(hos) due to the presence of outlier.
# sdh is transformed to log(sdh) due to the presence of outliers.
dat$pop=dat$pop/100
dat$wage=dat$wage/1000
dat$traffic=log(dat$traffic/1000)
dat$sales=dat$sales/100
dat$hos=log(dat$hos)
dat$asp=dat$asp/1000
dat$sdh=log(dat$sdh/1000)
dat$gc=dat$gc/100
### plots
plot(dat$pop,res,xlab="pop",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$wage,res,xlab="wage",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$stu,res,xlab="stu",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$traffic,res,xlab="traffic",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$sales,res,xlab="sales",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$hos,res,xlab="log(hos)",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$asp,res,xlab="asp",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$sdh,res,xlab="sdh",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
# All Variables
reg1<-lm(gc~pop+wage+stu+traffic+sales+hos+asp+sdh+iC+iE+iS+iNE+iSW+iNW,data = dat)
summary(reg1)
# Exhaustive of all variables
install.packages("leaps")
library(leaps)
s1 <- regsubsets(gc~pop+wage+stu+traffic+sales+hos+asp+sdh+iSW+iC+iE+iS+iNE+iNW, data=dat, method="exhaustive",nbest=1,nvmax=14)
ss1<-summary(s1)
ss1
ss1$cp
ss1$adjr2
# Cross Validation — leave-one-out
> ls.cvrmse <- function(ls.out)
{
res.cv <- ls.out$residuals / (1.0 - ls.diag(ls.out)$hat)
is.na.res <- is.na(res.cv)
res.cv <- res.cv[!is.na.res]
cvrmse <- sqrt(sum(res.cv^2) / length(res.cv))
return(cvrmse)
}
model1<-lm(gc~pop+stu+traffic+sales+asp+sdh+iE+iS+iSW,data = dat)
model1.cvrmse <- ls.cvrmse(model1)
model2<-lm(gc~pop+stu+traffic+sales+hos+asp+sdh+iE+iS+iNE+iSW+iNW,data = dat)
model2.cvrmse <- ls.cvrmse(model2)
print(c(model1.cvrmse, model2.cvrmse))
# 5-fold cross validation
n <- nrow(dat)
sn <- floor(n/5)
set.seed(306)
B <- 500
errMx <- matrix(NA, B, 2)
colnames(errMx) <- c("model1", "model2")
for (i in 1:B)
{
testInd <- sample(1:n, sn, replace=FALSE)
tTestDat <- dat[testInd, ] #Treat the sampled index as testing set
tTrainDat <- dat[-testInd, ] #The rest is training set.
tmodel1 <- lm(gc~pop+stu+traffic+sales+asp+sdh+iE+iS+iSW, data = tTrainDat)
tmodel1.pred <- predict(tmodel1, tTestDat)
errMx[i, 1] <- sqrt(sum((tTestDat$gc - tmodel1.pred)^2)/sn)
tmodel2 <- lm(gc~pop+stu+traffic+sales+hos+asp+sdh+iE+iS+iNE+iSW+iNW, data = tTrainDat)
tmodel2.pred <- predict(tmodel2, tTestDat)
}
| /Rcode.R | no_license | Melauria/STAT306 | R | false | false | 3,167 | r | dat<-read.table("D:/2013data.txt",header = T)
reg<-lm(gc~pop+wage+stu+traffic+sales+hos+asp+sdh, data=dat)
summary(reg)
res<-resid(reg)
sigma<-sum$sigma
qqnorm(res,main="normal Q-Q plot of residuals")
qqline(res)
pred<-predict(reg)
plot(pred,res,xlab="predicted value",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
sum=summary(reg)
# traffic is transformed to log(traffic) due to the presence of outlier
# hos is transformed to log(hos) due to the presence of outlier.
# sdh is transformed to log(sdh) due to the presence of outliers.
dat$pop=dat$pop/100
dat$wage=dat$wage/1000
dat$traffic=log(dat$traffic/1000)
dat$sales=dat$sales/100
dat$hos=log(dat$hos)
dat$asp=dat$asp/1000
dat$sdh=log(dat$sdh/1000)
dat$gc=dat$gc/100
### plots
plot(dat$pop,res,xlab="pop",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$wage,res,xlab="wage",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$stu,res,xlab="stu",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$traffic,res,xlab="traffic",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$sales,res,xlab="sales",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$hos,res,xlab="log(hos)",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$asp,res,xlab="asp",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
plot(dat$sdh,res,xlab="sdh",ylab="residual")
abline(h=2*sigma); abline(h=-2*sigma); abline(h=0)
# All Variables
reg1<-lm(gc~pop+wage+stu+traffic+sales+hos+asp+sdh+iC+iE+iS+iNE+iSW+iNW,data = dat)
summary(reg1)
# Exhaustive of all variables
install.packages("leaps")
library(leaps)
s1 <- regsubsets(gc~pop+wage+stu+traffic+sales+hos+asp+sdh+iSW+iC+iE+iS+iNE+iNW, data=dat, method="exhaustive",nbest=1,nvmax=14)
ss1<-summary(s1)
ss1
ss1$cp
ss1$adjr2
# Cross Validation — leave-one-out
> ls.cvrmse <- function(ls.out)
{
res.cv <- ls.out$residuals / (1.0 - ls.diag(ls.out)$hat)
is.na.res <- is.na(res.cv)
res.cv <- res.cv[!is.na.res]
cvrmse <- sqrt(sum(res.cv^2) / length(res.cv))
return(cvrmse)
}
model1<-lm(gc~pop+stu+traffic+sales+asp+sdh+iE+iS+iSW,data = dat)
model1.cvrmse <- ls.cvrmse(model1)
model2<-lm(gc~pop+stu+traffic+sales+hos+asp+sdh+iE+iS+iNE+iSW+iNW,data = dat)
model2.cvrmse <- ls.cvrmse(model2)
print(c(model1.cvrmse, model2.cvrmse))
# 5-fold cross validation
n <- nrow(dat)
sn <- floor(n/5)
set.seed(306)
B <- 500
errMx <- matrix(NA, B, 2)
colnames(errMx) <- c("model1", "model2")
for (i in 1:B)
{
testInd <- sample(1:n, sn, replace=FALSE)
tTestDat <- dat[testInd, ] #Treat the sampled index as testing set
tTrainDat <- dat[-testInd, ] #The rest is training set.
tmodel1 <- lm(gc~pop+stu+traffic+sales+asp+sdh+iE+iS+iSW, data = tTrainDat)
tmodel1.pred <- predict(tmodel1, tTestDat)
errMx[i, 1] <- sqrt(sum((tTestDat$gc - tmodel1.pred)^2)/sn)
tmodel2 <- lm(gc~pop+stu+traffic+sales+hos+asp+sdh+iE+iS+iNE+iSW+iNW, data = tTrainDat)
tmodel2.pred <- predict(tmodel2, tTestDat)
}
|
require("Hmisc")
require("plyr")
require("ggplot2")
require("car")
require("compositions")
require("MatchIt")
require("cem")
require("Zelig")
library("Matching")
library("fastDummies")
options(digits=10)
options(scipen=10)
##############################################################
## Matching (Beta)
##############################################################
rm(list=ls())
load("prematch_data_all.RData")
data <- data.prematch
# Set treatment groups
data$TREATB <- NA
data[data$BETAQ==5,"TREATB"] <- 1
data[data$BETAQ==1,"TREATB"] <- 0
# Take log of cap and volume
data$LOGCAP <- log(data$CAP)
data$LOGVOL <- log(data$VOL)
# Make beta related data
data.beta <- data[,c("TREATB","PERIOD","RETURN","CAP","LOGCAP","VOL","LOGVOL","GROUP","INDUSTRY")]
data.beta <- data.beta[complete.cases(data.beta),]
data.beta <- data.beta[data.beta$CAP>0,]
data.beta <- data.beta[data.beta$VOL>0,]
# Coarsen the groups using SICC codes (https://www.naics.com/sic-codes-industry-drilldown/)
group_coarse <- rep(0, nrow(data.beta))
group_col <- data.beta$GROUP
for (i in (1:length(group_col))) {
if (group_col[i] >= 1 & group_col[i] <= 9) {
group_coarse[i] <- 1
}
if (group_col[i] >= 10 & group_col[i] <= 14) {
group_coarse[i] <- 2
}
if (group_col[i] >= 15 & group_col[i] <= 17) {
group_coarse[i] <- 3
}
if (group_col[i] >= 20 & group_col[i] <= 39) {
group_coarse[i] <- 4
}
if (group_col[i] >= 40 & group_col[i] <= 49) {
group_coarse[i] <- 5
}
if (group_col[i] == 50 | group_col[i] == 51) {
group_coarse[i] <- 6
}
if (group_col[i] >= 52 & group_col[i] <= 59) {
group_coarse[i] <- 7
}
if (group_col[i] >= 60 & group_col[i] <= 67) {
group_coarse[i] <- 8
}
if (group_col[i] >= 70 & group_col[i] <= 89) {
group_coarse[i] <- 9
}
if (group_col[i] >= 90 & group_col[i] <= 99) {
group_coarse[i] <- 10
}
}
# Remove all stocks with group == 0, add dummy variables for the rest
data.beta$GROUP_COARSE <- group_coarse
data.beta <- data.beta[data.beta$GROUP_COARSE != 0,]
data.beta <- dummy_cols(data.beta, select_columns = 'GROUP_COARSE')
# We chose to only replicate from period 452 to 552 for computational limitations
start <- 452
end <- 552
# Data frame to store data set.
data.prunedb <- data.frame()
for (i in start:end) {
print(i)
# Subset the data for period i
data.curb <- data.beta[data.beta$PERIOD==i,]
# Select the covariates, perform gen match and match
Xb <-cbind(data.curb$LOGCAP, data.curb$LOGVOL, data.curb$GROUP_COARSE_5,
data.curb$GROUP_COARSE_8, data.curb$GROUP_COARSE_2, data.curb$GROUP_COARSE_4,
data.curb$GROUP_COARSE_9, data.curb$GROUP_COARSE_6, data.curb$GROUP_COARSE_7,
data.curb$GROUP_COARSE_3, data.curb$GROUP_COARSE_10)
genoutb <- GenMatch(Tr=data.curb$TREATB, X=Xb, M=1, pop.size=200,
max.generations=10, wait.generations=5, caliper = 0.1, print.level = 0)
moutb <- Match(Tr=data.curb$TREATB, X=Xb, Weight.matrix=genoutb, caliper = 0.1)
# Select matched data set, rbind to the bigger data set
matched_treated <- cbind(data.curb[moutb$index.treated,], weights = moutb$weights)
matched_control <- cbind(data.curb[moutb$index.control,], weights = moutb$weights)
matched_data <- rbind(matched_treated, matched_control)
data.prunedb <- rbind(data.prunedb, matched_data)
# We are going to use the L1 distance to check the balance here instead of MatchBalance
# in order to be comparable to the original paper.
}
# We had to split the data in half and run gen match for each half in our own machine
#load("SHO_temp.RData")
#load("HUNG_temp.RData")
# Combine the data
#data.prunedb <- rbind(data.prunedb.Hung, data.prunedb_sho)
#save(data.beta, data.prunedb, file="genmatch_data.RData")
# This is the data after the aggregation, just load and use.
load("genmatch_data.RData")
# Process the data just like the papaer did
data.prunedb$WEIGHT <- data.prunedb$CAP * data.prunedb$weights
ret.beta <- ddply(data.prunedb,.(PERIOD,TREATB), summarise, RETURN=weighted.mean(RETURN,WEIGHT))
# Cumulative returns
cum.beta0 <- cumprod(ret.beta[ret.beta$TREATB==0,"RETURN"]+1)
cum.beta1 <- cumprod(ret.beta[ret.beta$TREATB==1,"RETURN"]+1)
cum.beta0[101]
cum.beta1[101]
##############################################################
## FIGURE 1
##############################################################
load("results_beta_all.RData")
load("genmatch_data.RData")
# Change the range of the original data
new_beta <- beta[which(beta$PERIOD >= 452),]
# Matched Beta plot
# Changed the range of the dates so they match our data
d <- data.frame(YEAR=1963+451/12+(1:101)/12,LINE=1,CUMRET=cum.beta0)
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=2,CUMRET=cum.beta1))
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=3,CUMRET=new_beta[new_beta$BETAQ==1,"CUMRET"]))
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=4,CUMRET=new_beta[new_beta$BETAQ==5,"CUMRET"]))
d[d$LINE==1,"Beta Quintiles"] <- paste("Matched Quintile 1 ($",format(d[d$LINE==1 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==2,"Beta Quintiles"] <- paste("Matched Quintile 5 ($",format(d[d$LINE==2 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==3,"Beta Quintiles"] <- paste("Original Quintile 1 ($",format(d[d$LINE==3 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==4,"Beta Quintiles"] <- paste("Original Quintile 5 ($",format(d[d$LINE==4 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
g <- ggplot(data=d,aes(x=YEAR,y=CUMRET,colour=`Beta Quintiles`,linetype=`Beta Quintiles`)) + geom_line(lwd=1)
g <- g + scale_y_log10("Cumulative Return",limits = c(.1,150),breaks=c(.1,1,10,100),labels=c("$0.10","$1","$10","$100"))
g <- g + scale_x_continuous("",limits = c(2000,2009),breaks=seq(2000,2008,1),seq(2000,2008,1),expand=c(0,0))
g <- g + ggtitle("All Stocks, Beta Quintiles\nCumulative Return of $1 invested in 1968\n")
g <- g + scale_color_manual(values=c("red","blue","red","blue"))
g <- g + scale_linetype_manual(values=c(1,1,3,3))
g <- g + theme(legend.position=c(.2,.8),panel.background=element_blank(),axis.line=element_blank(),panel.grid.minor=element_blank(),panel.grid.major=element_blank(),plot.background=element_rect(fill=NA,colour =NA))
g
#dev.copy(pdf,"matchplotbeta.pdf")
#dev.copy(png,"matchplotbeta.png")
#dev.off()
L1 <- data.frame(period=NA,prebeta=NA,postbeta=NA)
N <- data.frame(period=NA,prebeta=NA,postbeta=NA)
for(i in 452:552) {
print(i)
prebeta <- imbalance(group=data.beta[data.beta$PERIOD==i,"TREATB"],data=data.beta[data.beta$PERIOD==i,c("LOGCAP","LOGVOL","GROUP_COARSE_5", "GROUP_COARSE_8","GROUP_COARSE_2", "GROUP_COARSE_4", "GROUP_COARSE_9","GROUP_COARSE_6", "GROUP_COARSE_7", "GROUP_COARSE_3","GROUP_COARSE_10")])$L1$L1
postbeta <- imbalance(group=data.prunedb[data.prunedb$PERIOD==i,"TREATB"],
data=data.prunedb[data.prunedb$PERIOD==i,c("LOGCAP","LOGVOL","GROUP_COARSE_5", "GROUP_COARSE_8","GROUP_COARSE_2", "GROUP_COARSE_4", "GROUP_COARSE_9","GROUP_COARSE_6", "GROUP_COARSE_7", "GROUP_COARSE_3","GROUP_COARSE_10")], weights=data.prunedb[data.prunedb$PERIOD==i,"weights"])$L1$L1
L1 <- rbind(L1,c(i,prebeta,postbeta))
N <- rbind(N,c(i, nrow(data.beta[data.beta$PERIOD==i,]),nrow(data.prunedb[data.prunedb$PERIOD==i,])))
}
L1 <- L1[-1,]
N <- N[-1,]
apply(L1,2,mean)[-1]
apply(L1,2,sd)[-1]
load('new_l1.RData')
# Histogram of L1 statistic for Genetic Matching
hist(L1$postbeta, main = 'Histogram of L1 statistic for each period - GenMatch', xlab = 'L1 Statistic')
load('matching_imbalance.RData')
# Histogram of L1 statistic for original code
hist(L1$postbeta, main = 'Histogram of L1 statistic for each period - CEM', xlab = 'L1 Statistic')
| /replication_genmatch_new.R | no_license | hu-ng/cs112-fp | R | false | false | 7,957 | r | require("Hmisc")
require("plyr")
require("ggplot2")
require("car")
require("compositions")
require("MatchIt")
require("cem")
require("Zelig")
library("Matching")
library("fastDummies")
options(digits=10)
options(scipen=10)
##############################################################
## Matching (Beta)
##############################################################
rm(list=ls())
load("prematch_data_all.RData")
data <- data.prematch
# Set treatment groups
data$TREATB <- NA
data[data$BETAQ==5,"TREATB"] <- 1
data[data$BETAQ==1,"TREATB"] <- 0
# Take log of cap and volume
data$LOGCAP <- log(data$CAP)
data$LOGVOL <- log(data$VOL)
# Make beta related data
data.beta <- data[,c("TREATB","PERIOD","RETURN","CAP","LOGCAP","VOL","LOGVOL","GROUP","INDUSTRY")]
data.beta <- data.beta[complete.cases(data.beta),]
data.beta <- data.beta[data.beta$CAP>0,]
data.beta <- data.beta[data.beta$VOL>0,]
# Coarsen the groups using SICC codes (https://www.naics.com/sic-codes-industry-drilldown/)
group_coarse <- rep(0, nrow(data.beta))
group_col <- data.beta$GROUP
for (i in (1:length(group_col))) {
if (group_col[i] >= 1 & group_col[i] <= 9) {
group_coarse[i] <- 1
}
if (group_col[i] >= 10 & group_col[i] <= 14) {
group_coarse[i] <- 2
}
if (group_col[i] >= 15 & group_col[i] <= 17) {
group_coarse[i] <- 3
}
if (group_col[i] >= 20 & group_col[i] <= 39) {
group_coarse[i] <- 4
}
if (group_col[i] >= 40 & group_col[i] <= 49) {
group_coarse[i] <- 5
}
if (group_col[i] == 50 | group_col[i] == 51) {
group_coarse[i] <- 6
}
if (group_col[i] >= 52 & group_col[i] <= 59) {
group_coarse[i] <- 7
}
if (group_col[i] >= 60 & group_col[i] <= 67) {
group_coarse[i] <- 8
}
if (group_col[i] >= 70 & group_col[i] <= 89) {
group_coarse[i] <- 9
}
if (group_col[i] >= 90 & group_col[i] <= 99) {
group_coarse[i] <- 10
}
}
# Remove all stocks with group == 0, add dummy variables for the rest
data.beta$GROUP_COARSE <- group_coarse
data.beta <- data.beta[data.beta$GROUP_COARSE != 0,]
data.beta <- dummy_cols(data.beta, select_columns = 'GROUP_COARSE')
# We chose to only replicate from period 452 to 552 for computational limitations
start <- 452
end <- 552
# Data frame to store data set.
data.prunedb <- data.frame()
for (i in start:end) {
print(i)
# Subset the data for period i
data.curb <- data.beta[data.beta$PERIOD==i,]
# Select the covariates, perform gen match and match
Xb <-cbind(data.curb$LOGCAP, data.curb$LOGVOL, data.curb$GROUP_COARSE_5,
data.curb$GROUP_COARSE_8, data.curb$GROUP_COARSE_2, data.curb$GROUP_COARSE_4,
data.curb$GROUP_COARSE_9, data.curb$GROUP_COARSE_6, data.curb$GROUP_COARSE_7,
data.curb$GROUP_COARSE_3, data.curb$GROUP_COARSE_10)
genoutb <- GenMatch(Tr=data.curb$TREATB, X=Xb, M=1, pop.size=200,
max.generations=10, wait.generations=5, caliper = 0.1, print.level = 0)
moutb <- Match(Tr=data.curb$TREATB, X=Xb, Weight.matrix=genoutb, caliper = 0.1)
# Select matched data set, rbind to the bigger data set
matched_treated <- cbind(data.curb[moutb$index.treated,], weights = moutb$weights)
matched_control <- cbind(data.curb[moutb$index.control,], weights = moutb$weights)
matched_data <- rbind(matched_treated, matched_control)
data.prunedb <- rbind(data.prunedb, matched_data)
# We are going to use the L1 distance to check the balance here instead of MatchBalance
# in order to be comparable to the original paper.
}
# We had to split the data in half and run gen match for each half in our own machine
#load("SHO_temp.RData")
#load("HUNG_temp.RData")
# Combine the data
#data.prunedb <- rbind(data.prunedb.Hung, data.prunedb_sho)
#save(data.beta, data.prunedb, file="genmatch_data.RData")
# This is the data after the aggregation, just load and use.
load("genmatch_data.RData")
# Process the data just like the papaer did
data.prunedb$WEIGHT <- data.prunedb$CAP * data.prunedb$weights
ret.beta <- ddply(data.prunedb,.(PERIOD,TREATB), summarise, RETURN=weighted.mean(RETURN,WEIGHT))
# Cumulative returns
cum.beta0 <- cumprod(ret.beta[ret.beta$TREATB==0,"RETURN"]+1)
cum.beta1 <- cumprod(ret.beta[ret.beta$TREATB==1,"RETURN"]+1)
cum.beta0[101]
cum.beta1[101]
##############################################################
## FIGURE 1
##############################################################
load("results_beta_all.RData")
load("genmatch_data.RData")
# Change the range of the original data
new_beta <- beta[which(beta$PERIOD >= 452),]
# Matched Beta plot
# Changed the range of the dates so they match our data
d <- data.frame(YEAR=1963+451/12+(1:101)/12,LINE=1,CUMRET=cum.beta0)
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=2,CUMRET=cum.beta1))
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=3,CUMRET=new_beta[new_beta$BETAQ==1,"CUMRET"]))
d <- rbind(d,data.frame(YEAR=1963+451/12+(1:101)/12,LINE=4,CUMRET=new_beta[new_beta$BETAQ==5,"CUMRET"]))
d[d$LINE==1,"Beta Quintiles"] <- paste("Matched Quintile 1 ($",format(d[d$LINE==1 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==2,"Beta Quintiles"] <- paste("Matched Quintile 5 ($",format(d[d$LINE==2 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==3,"Beta Quintiles"] <- paste("Original Quintile 1 ($",format(d[d$LINE==3 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
d[d$LINE==4,"Beta Quintiles"] <- paste("Original Quintile 5 ($",format(d[d$LINE==4 & d$YEAR==2009,"CUMRET"],digits=2,nsmall=2),")",sep="")
g <- ggplot(data=d,aes(x=YEAR,y=CUMRET,colour=`Beta Quintiles`,linetype=`Beta Quintiles`)) + geom_line(lwd=1)
g <- g + scale_y_log10("Cumulative Return",limits = c(.1,150),breaks=c(.1,1,10,100),labels=c("$0.10","$1","$10","$100"))
g <- g + scale_x_continuous("",limits = c(2000,2009),breaks=seq(2000,2008,1),seq(2000,2008,1),expand=c(0,0))
g <- g + ggtitle("All Stocks, Beta Quintiles\nCumulative Return of $1 invested in 1968\n")
g <- g + scale_color_manual(values=c("red","blue","red","blue"))
g <- g + scale_linetype_manual(values=c(1,1,3,3))
g <- g + theme(legend.position=c(.2,.8),panel.background=element_blank(),axis.line=element_blank(),panel.grid.minor=element_blank(),panel.grid.major=element_blank(),plot.background=element_rect(fill=NA,colour =NA))
g
#dev.copy(pdf,"matchplotbeta.pdf")
#dev.copy(png,"matchplotbeta.png")
#dev.off()
L1 <- data.frame(period=NA,prebeta=NA,postbeta=NA)
N <- data.frame(period=NA,prebeta=NA,postbeta=NA)
for(i in 452:552) {
print(i)
prebeta <- imbalance(group=data.beta[data.beta$PERIOD==i,"TREATB"],data=data.beta[data.beta$PERIOD==i,c("LOGCAP","LOGVOL","GROUP_COARSE_5", "GROUP_COARSE_8","GROUP_COARSE_2", "GROUP_COARSE_4", "GROUP_COARSE_9","GROUP_COARSE_6", "GROUP_COARSE_7", "GROUP_COARSE_3","GROUP_COARSE_10")])$L1$L1
postbeta <- imbalance(group=data.prunedb[data.prunedb$PERIOD==i,"TREATB"],
data=data.prunedb[data.prunedb$PERIOD==i,c("LOGCAP","LOGVOL","GROUP_COARSE_5", "GROUP_COARSE_8","GROUP_COARSE_2", "GROUP_COARSE_4", "GROUP_COARSE_9","GROUP_COARSE_6", "GROUP_COARSE_7", "GROUP_COARSE_3","GROUP_COARSE_10")], weights=data.prunedb[data.prunedb$PERIOD==i,"weights"])$L1$L1
L1 <- rbind(L1,c(i,prebeta,postbeta))
N <- rbind(N,c(i, nrow(data.beta[data.beta$PERIOD==i,]),nrow(data.prunedb[data.prunedb$PERIOD==i,])))
}
L1 <- L1[-1,]
N <- N[-1,]
apply(L1,2,mean)[-1]
apply(L1,2,sd)[-1]
load('new_l1.RData')
# Histogram of L1 statistic for Genetic Matching
hist(L1$postbeta, main = 'Histogram of L1 statistic for each period - GenMatch', xlab = 'L1 Statistic')
load('matching_imbalance.RData')
# Histogram of L1 statistic for original code
hist(L1$postbeta, main = 'Histogram of L1 statistic for each period - CEM', xlab = 'L1 Statistic')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterEval.R
\name{topicEval}
\alias{topicEval}
\title{topicEval}
\usage{
topicEval(clust.res, threshold = NULL)
}
\arguments{
\item{clust.res}{A list return by running clusterConcepts()}
}
\value{
A list containing:
\item{topicsOrdered}{ A data frame containing the mean of each feature per cluster}
\item{topicsMax}{ A data frame containing the standard deviation of each feature value per cluster}
\item{topicsKmeans}{ A data frame containing the fraction of each cluster with non-zero values for the feature}
}
\description{
This function simply calculates summaries of each topic and returns these
as a list.
}
\details{
This function only has one input, the clusterResults obtained by applying clusterConcepts
}
\keyword{OHDSI,}
\keyword{clustering}
| /man/topicEval.Rd | permissive | jreps/patientCluster | R | false | true | 836 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clusterEval.R
\name{topicEval}
\alias{topicEval}
\title{topicEval}
\usage{
topicEval(clust.res, threshold = NULL)
}
\arguments{
\item{clust.res}{A list return by running clusterConcepts()}
}
\value{
A list containing:
\item{topicsOrdered}{ A data frame containing the mean of each feature per cluster}
\item{topicsMax}{ A data frame containing the standard deviation of each feature value per cluster}
\item{topicsKmeans}{ A data frame containing the fraction of each cluster with non-zero values for the feature}
}
\description{
This function simply calculates summaries of each topic and returns these
as a list.
}
\details{
This function only has one input, the clusterResults obtained by applying clusterConcepts
}
\keyword{OHDSI,}
\keyword{clustering}
|
library(testthat)
library(ClusterMultinom)
test_check("ClusterMultinom")
| /tests/testthat.R | no_license | jenniferthompson/ClusterMultinom | R | false | false | 74 | r | library(testthat)
library(ClusterMultinom)
test_check("ClusterMultinom")
|
/MacOSX10.4u.sdk/System/Library/Frameworks/CoreServices.framework/Versions/A/Frameworks/OSServices.framework/Versions/A/Headers/OpenTransport.r | no_license | alexey-lysiuk/macos-sdk | R | false | false | 2,495 | r | ||
emails = read.csv("./data/emails.csv",stringsAsFactors = FALSE)
nrow(emails)
sum(emails$spam)
t_cha = lapply(emails$text,nchar)
nchar(emails$text[which.max(t_cha)])
which.min(nchar(emails$text))
library(tm)
corpus = Corpus(VectorSource(emails$text))
corpus = tm_map(corpus,content_transformer(tolower))
corpus = tm_map(corpus,removePunctuation)
corpus = tm_map(corpus,removeWords,stopwords("english"))
corpus = tm_map(corpus,stemDocument)
dtm = DocumentTermMatrix(corpus)
ncol(dtm)
dtm= removeSparseTerms(dtm,0.95)
ncol(dtm)
emailsSparse = as.data.frame(as.matrix(dtm))
w_sum = lapply(emailsSparse,sum)
w_sum[which.max(w_sum)]
emailsSparse$spam = emails$spam
hamEmails = subset(emailsSparse,emailsSparse$spam == 0)
sum(as.numeric(colSums(hamEmails) >= 5000))
spamEmails = subset(emailsSparse,emailsSparse$spam == 1)
# Ignore "spam" because it's dependent variable
sum(as.numeric(colSums(spamEmails) >= 1000))
emailsSparse$spam = as.factor(emailsSparse$spam)
set.seed(123)
library(caTools)
spl = sample.split(emailsSparse,SplitRatio = 0.7)
train = subset(emailsSparse,spl=TRUE)
test = subset(emailsSparse,spl = FALSE)
library(rpart)
library(rpart.plot)
# Logistic Regression Model
spamLog = glm(spam ~ ., data=train, family="binomial")
# Cross Validation Model
spamCART = rpart(spam ~ ., data=train, method="class")
# Random Forest Model
library(randomForest)
spamRf = randomForest(spam ~.,data=train)
# Predictions
predictSpamLog = predict(spamLog,data=train, type="response")
predictSpamCART = predict(spamCART ,data=train)[,2]
predictSpamRf = predict(spamRf ,data=train, type="prob")
sum(predictSpamLog < 0.00001)
sum(predictSpamLog > 0.99999)
sum((predictSpamLog <= 0.99999) & (predictSpamLog >= 0.00001))
summary(spamLog)
prp(spamCART)
table(train$spam, predictSpamLog >0.5) | /SEPARATING SPAM FROM HAM.R | no_license | lucaslrolim/MITx-15.071x | R | false | false | 1,778 | r | emails = read.csv("./data/emails.csv",stringsAsFactors = FALSE)
nrow(emails)
sum(emails$spam)
t_cha = lapply(emails$text,nchar)
nchar(emails$text[which.max(t_cha)])
which.min(nchar(emails$text))
library(tm)
corpus = Corpus(VectorSource(emails$text))
corpus = tm_map(corpus,content_transformer(tolower))
corpus = tm_map(corpus,removePunctuation)
corpus = tm_map(corpus,removeWords,stopwords("english"))
corpus = tm_map(corpus,stemDocument)
dtm = DocumentTermMatrix(corpus)
ncol(dtm)
dtm= removeSparseTerms(dtm,0.95)
ncol(dtm)
emailsSparse = as.data.frame(as.matrix(dtm))
w_sum = lapply(emailsSparse,sum)
w_sum[which.max(w_sum)]
emailsSparse$spam = emails$spam
hamEmails = subset(emailsSparse,emailsSparse$spam == 0)
sum(as.numeric(colSums(hamEmails) >= 5000))
spamEmails = subset(emailsSparse,emailsSparse$spam == 1)
# Ignore "spam" because it's dependent variable
sum(as.numeric(colSums(spamEmails) >= 1000))
emailsSparse$spam = as.factor(emailsSparse$spam)
set.seed(123)
library(caTools)
spl = sample.split(emailsSparse,SplitRatio = 0.7)
train = subset(emailsSparse,spl=TRUE)
test = subset(emailsSparse,spl = FALSE)
library(rpart)
library(rpart.plot)
# Logistic Regression Model
spamLog = glm(spam ~ ., data=train, family="binomial")
# Cross Validation Model
spamCART = rpart(spam ~ ., data=train, method="class")
# Random Forest Model
library(randomForest)
spamRf = randomForest(spam ~.,data=train)
# Predictions
predictSpamLog = predict(spamLog,data=train, type="response")
predictSpamCART = predict(spamCART ,data=train)[,2]
predictSpamRf = predict(spamRf ,data=train, type="prob")
sum(predictSpamLog < 0.00001)
sum(predictSpamLog > 0.99999)
sum((predictSpamLog <= 0.99999) & (predictSpamLog >= 0.00001))
summary(spamLog)
prp(spamCART)
table(train$spam, predictSpamLog >0.5) |
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/make_horse.R
\name{make_horse}
\alias{make_horse}
\title{Build a horse object}
\usage{
make_horse(tweets)
}
\arguments{
\item{tweets}{a character vector.}
}
\value{
a horse object ready to generate tweets from.
}
\description{
This function takes a set of tweets and builds a transition matrix from
them. This then gives a Markov chain that can be used to generate new
strings from.
}
\author{
David L. Miller
}
| /man/make_horse.Rd | no_license | dill/horse | R | false | false | 499 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/make_horse.R
\name{make_horse}
\alias{make_horse}
\title{Build a horse object}
\usage{
make_horse(tweets)
}
\arguments{
\item{tweets}{a character vector.}
}
\value{
a horse object ready to generate tweets from.
}
\description{
This function takes a set of tweets and builds a transition matrix from
them. This then gives a Markov chain that can be used to generate new
strings from.
}
\author{
David L. Miller
}
|
<<<<<<< HEAD
run.nucmer=function(str_name,ref_name="N315", cmd_path,out_path ){
=======
function(str_name,ref_name="N315", cmd_path,out_path ){
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
require(ape)
#### Check that the inputs are all DNA sequences
if (unique(names(table(as.character(read.FASTA(paste0(out_path, str_name,".fasta")))))!= c("a", "c", "g" ,"t"))) stop("Str not a DNA sequence")
if (unique(names(table(as.character(read.FASTA(paste0(out_path, ref_name,".fasta")))))!= c("a", "c", "g" ,"t"))) stop("Ref not a DNA sequence")
#### 1) run NUCmer w following options #####
# --mum Use anchor matches that are unique in both the reference and query;
return_filename=paste0("nucmer_",ref_name,"_",str_name)
system(paste0(cmd_path,"nucmer --mum --prefix=", return_filename," ",
out_path, ref_name, ".fasta ",
out_path, str_name,".fasta"))
# 1.1) filter NUCmer w following options------
# -q Query alignment using length*identity weighted LIS. For each query, leave only the alignments which form the longest consistent set for the query
# -r Reference alignment using length*identity weighted LIS. For each reference, leave only the alignments which form the longest consistent set for the reference.
# -u float Set the minimum alignment uniqueness, i.e. percent of the alignment matching to unique reference AND query sequence [0, 100], (default 0)
# -o float Set the maximum alignment overlap for -r and -q options as a percent of the alignment length [0, 100], (default 75)
system(paste0("/data1/home/rpetit/bin/MUMmer/delta-filter -q -r -o 0 ",
return_filename,".delta > ",
return_filename,"_f",".delta"))
ret_filename=paste0(return_filename,"_f")
return(paste0(ret_filename))
}
<<<<<<< HEAD
run.summary=function(ret_filename,cmd_path){
=======
function(ret_filename,cmd_path){
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
#### 2) generate summary of the alignments ######
# -g nly display alignments included in the Longest Ascending Subset, i.e. the global alignment. Recommened to be used in conjunction with the -r or -q options. Does not support circular sequences
# -l includes seq length
# -T tab-delimited
#-q sort by query length (since we are interested in the qry strain as teh starting block of ancestraal reconstruciton further down the pipeline)
system(paste0(cmd_path, "show-coords -g -H -l -T -q ",
ret_filename,".delta > ",ret_filename,".coords"))
# 2.1 )load aligns-----
aligns_df=read.table(paste0(ret_filename,".coords"),blank.lines.skip=TRUE)
names(aligns_df)= c("ref_pos","ref_end", "str_pos", "str_end", "ref_al_len", "str_al_len", "pct_match", "ref_len", "str_len" , "str_name","ref_name")
ref_fasta_name=levels(aligns_df$ref_name)[1]
str_fasta_name=levels(aligns_df$str_name)[1]
#### 3) generate summary of SNPs ######
#-q sort by query strain.
#-H drop header, make it tab-delimited (-T), include sequence length
system(paste0(cmd_path, "show-snps -H -T -q -l ",
ret_filename,".delta > ",ret_filename,".snps"))
# 3.1) load snps-----
snps_df=read.table(paste0(ret_filename,".snps"),blank.lines.skip=TRUE,)
snps_df=snps_df[,1:6]
names(snps_df)= c("ref_pos","ref_sub", "str_sub", "str_pos", "len", "dist")
#### 4) show aligns ######
system(paste0(cmd_path,"show-aligns ",
ret_filename,".delta > ",
ret_filename,".aligns '", str_fasta_name, "' '",ref_fasta_name,"'"))
return(list(
snps_df=snps_df,
aligns_df=aligns_df,
aligns_filename=paste0(ret_filename,".aligns")
))
}
<<<<<<< HEAD
build.mummat=function(snps_df, aligns_df){
###########
#5)create matrix of alignment where columns are the aligned positions of strain vs reference, and snp=0 for mum, 1 for subs, 2 for indel------
mum_snps=matrix(nrow=max(aligns_df$str_len)+1, ncol=3)
=======
function(snps_df, aligns_df){
###########
#5)create matrix of alignment where columns are the aligned positions of strain vs reference, and snp=0 for mum, 1 for subs, 2 for indel------
mum_snps=matrix(nrow=max(aligns_df$str_len), ncol=3)
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
colnames(mum_snps)=c("str_pos", "ref_pos", "snp")
mum_snps[,"snp"]="non-align"
###########
#6) Create matrix [mum_snps] aligning the qry to the reference strain, and indicating SNPs -------
#loop over the aligns_df list of alignments and the snps_df list of SNPs
#for each alignmnet in the aligns_df list (going down the qry strain):
for (a in 1:nrow(aligns_df)){
#get start and end of alignment
temp_align_s=aligns_df[a,]$str_pos
temp_align_e=aligns_df[a,]$str_end
#fill out the snp status variable to default to "mum"
mum_snps[temp_align_s:temp_align_e,"snp"]="mum"
#fill out the qry strain positions in matrix
mum_snps[temp_align_s:temp_align_e,"str_pos"]=temp_align_s:temp_align_e
# cut up the SNPs df
snps_df_temp = snps_df[which(snps_df$str_pos>=temp_align_s & snps_df$str_pos<= temp_align_e),]
if (nrow(snps_df_temp) ==0) {
warning(paste("Ambiguous alignment between qry strain pos",temp_align_s,temp_align_e,", no mapping produced" ))
<<<<<<< HEAD
} else {
print(paste("Aligning qry strain pos",temp_align_s,"-", temp_align_e ))
#make string variable describing type of SNP
snps_df_temp$snp = paste0(snps_df_temp$str_sub,">",snps_df_temp$ref_sub)
#fill positions up to first snp of that alignment:
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"str_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)
diff= temp_align_s-aligns_df[a,]$ref_pos
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"ref_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)-diff
mum_snps[!is.na(mum_snps)&mum_snps<=0]=NA
#for each SNP in the alignment:
for (i in 1:nrow(snps_df_temp) ) {
#if the SNP is a substitution:
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub!="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
#mark as snp
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="sub"
}
#if SNP is a deletion in qry string (insertion)
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub=="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
temp_s=snps_df_temp[i,]$str_pos+1
#if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e+1 else temp_e=snps_df_temp[i+1,]$str_pos+1
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
mum_snps[(temp_s-1),"snp"]="ins_s"
mum_snps[(temp_s),"snp"]="ins_e"
}
if (snps_df_temp[i,]$ref_sub=="." & snps_df_temp[i,]$str_sub!=".") {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[(temp_s):temp_e,"ref_pos"]=c( NA,(temp_s+1):(temp_e)-temp_diff )
#change the snp variable to mark as deletion in reference string
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="del"
}
=======
} else print(paste("Aligning qry strain pos",temp_align_s,"-", temp_align_e ))
#make string variable describing type of SNP
snps_df_temp$snp = paste0(snps_df_temp$str_sub,">",snps_df_temp$ref_sub)
#fill positions up to first snp of that alignment:
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"str_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)
diff= temp_align_s-aligns_df[a,]$ref_pos
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"ref_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)-diff
mum_snps[!is.na(mum_snps)&mum_snps<=0]=NA
#for each SNP in the alignment:
for (i in 1:nrow(snps_df_temp) ) {
#if the SNP is a substitution:
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub!="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
#mark as snp
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="sub"
}
#if SNP is a deletion in qry string (insertion)
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub=="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
temp_s=snps_df_temp[i,]$str_pos+1
#if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e+1 else temp_e=snps_df_temp[i+1,]$str_pos+1
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
mum_snps[(temp_s-1),"snp"]="ins_s"
mum_snps[(temp_s),"snp"]="ins_e"
}
if (snps_df_temp[i,]$ref_sub=="." & snps_df_temp[i,]$str_sub!=".") {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[(temp_s):temp_e,"ref_pos"]=c( NA,(temp_s+1):(temp_e)-temp_diff )
#change the snp variable to mark as deletion in reference string
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="del"
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
}
}
}
return(mum_snps)
}
| /mummer align functions.R | no_license | NBRAYKO/StaphRotation | R | false | false | 12,036 | r | <<<<<<< HEAD
run.nucmer=function(str_name,ref_name="N315", cmd_path,out_path ){
=======
function(str_name,ref_name="N315", cmd_path,out_path ){
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
require(ape)
#### Check that the inputs are all DNA sequences
if (unique(names(table(as.character(read.FASTA(paste0(out_path, str_name,".fasta")))))!= c("a", "c", "g" ,"t"))) stop("Str not a DNA sequence")
if (unique(names(table(as.character(read.FASTA(paste0(out_path, ref_name,".fasta")))))!= c("a", "c", "g" ,"t"))) stop("Ref not a DNA sequence")
#### 1) run NUCmer w following options #####
# --mum Use anchor matches that are unique in both the reference and query;
return_filename=paste0("nucmer_",ref_name,"_",str_name)
system(paste0(cmd_path,"nucmer --mum --prefix=", return_filename," ",
out_path, ref_name, ".fasta ",
out_path, str_name,".fasta"))
# 1.1) filter NUCmer w following options------
# -q Query alignment using length*identity weighted LIS. For each query, leave only the alignments which form the longest consistent set for the query
# -r Reference alignment using length*identity weighted LIS. For each reference, leave only the alignments which form the longest consistent set for the reference.
# -u float Set the minimum alignment uniqueness, i.e. percent of the alignment matching to unique reference AND query sequence [0, 100], (default 0)
# -o float Set the maximum alignment overlap for -r and -q options as a percent of the alignment length [0, 100], (default 75)
system(paste0("/data1/home/rpetit/bin/MUMmer/delta-filter -q -r -o 0 ",
return_filename,".delta > ",
return_filename,"_f",".delta"))
ret_filename=paste0(return_filename,"_f")
return(paste0(ret_filename))
}
<<<<<<< HEAD
run.summary=function(ret_filename,cmd_path){
=======
function(ret_filename,cmd_path){
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
#### 2) generate summary of the alignments ######
# -g nly display alignments included in the Longest Ascending Subset, i.e. the global alignment. Recommened to be used in conjunction with the -r or -q options. Does not support circular sequences
# -l includes seq length
# -T tab-delimited
#-q sort by query length (since we are interested in the qry strain as teh starting block of ancestraal reconstruciton further down the pipeline)
system(paste0(cmd_path, "show-coords -g -H -l -T -q ",
ret_filename,".delta > ",ret_filename,".coords"))
# 2.1 )load aligns-----
aligns_df=read.table(paste0(ret_filename,".coords"),blank.lines.skip=TRUE)
names(aligns_df)= c("ref_pos","ref_end", "str_pos", "str_end", "ref_al_len", "str_al_len", "pct_match", "ref_len", "str_len" , "str_name","ref_name")
ref_fasta_name=levels(aligns_df$ref_name)[1]
str_fasta_name=levels(aligns_df$str_name)[1]
#### 3) generate summary of SNPs ######
#-q sort by query strain.
#-H drop header, make it tab-delimited (-T), include sequence length
system(paste0(cmd_path, "show-snps -H -T -q -l ",
ret_filename,".delta > ",ret_filename,".snps"))
# 3.1) load snps-----
snps_df=read.table(paste0(ret_filename,".snps"),blank.lines.skip=TRUE,)
snps_df=snps_df[,1:6]
names(snps_df)= c("ref_pos","ref_sub", "str_sub", "str_pos", "len", "dist")
#### 4) show aligns ######
system(paste0(cmd_path,"show-aligns ",
ret_filename,".delta > ",
ret_filename,".aligns '", str_fasta_name, "' '",ref_fasta_name,"'"))
return(list(
snps_df=snps_df,
aligns_df=aligns_df,
aligns_filename=paste0(ret_filename,".aligns")
))
}
<<<<<<< HEAD
build.mummat=function(snps_df, aligns_df){
###########
#5)create matrix of alignment where columns are the aligned positions of strain vs reference, and snp=0 for mum, 1 for subs, 2 for indel------
mum_snps=matrix(nrow=max(aligns_df$str_len)+1, ncol=3)
=======
function(snps_df, aligns_df){
###########
#5)create matrix of alignment where columns are the aligned positions of strain vs reference, and snp=0 for mum, 1 for subs, 2 for indel------
mum_snps=matrix(nrow=max(aligns_df$str_len), ncol=3)
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
colnames(mum_snps)=c("str_pos", "ref_pos", "snp")
mum_snps[,"snp"]="non-align"
###########
#6) Create matrix [mum_snps] aligning the qry to the reference strain, and indicating SNPs -------
#loop over the aligns_df list of alignments and the snps_df list of SNPs
#for each alignmnet in the aligns_df list (going down the qry strain):
for (a in 1:nrow(aligns_df)){
#get start and end of alignment
temp_align_s=aligns_df[a,]$str_pos
temp_align_e=aligns_df[a,]$str_end
#fill out the snp status variable to default to "mum"
mum_snps[temp_align_s:temp_align_e,"snp"]="mum"
#fill out the qry strain positions in matrix
mum_snps[temp_align_s:temp_align_e,"str_pos"]=temp_align_s:temp_align_e
# cut up the SNPs df
snps_df_temp = snps_df[which(snps_df$str_pos>=temp_align_s & snps_df$str_pos<= temp_align_e),]
if (nrow(snps_df_temp) ==0) {
warning(paste("Ambiguous alignment between qry strain pos",temp_align_s,temp_align_e,", no mapping produced" ))
<<<<<<< HEAD
} else {
print(paste("Aligning qry strain pos",temp_align_s,"-", temp_align_e ))
#make string variable describing type of SNP
snps_df_temp$snp = paste0(snps_df_temp$str_sub,">",snps_df_temp$ref_sub)
#fill positions up to first snp of that alignment:
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"str_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)
diff= temp_align_s-aligns_df[a,]$ref_pos
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"ref_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)-diff
mum_snps[!is.na(mum_snps)&mum_snps<=0]=NA
#for each SNP in the alignment:
for (i in 1:nrow(snps_df_temp) ) {
#if the SNP is a substitution:
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub!="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
#mark as snp
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="sub"
}
#if SNP is a deletion in qry string (insertion)
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub=="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
temp_s=snps_df_temp[i,]$str_pos+1
#if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e+1 else temp_e=snps_df_temp[i+1,]$str_pos+1
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
mum_snps[(temp_s-1),"snp"]="ins_s"
mum_snps[(temp_s),"snp"]="ins_e"
}
if (snps_df_temp[i,]$ref_sub=="." & snps_df_temp[i,]$str_sub!=".") {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[(temp_s):temp_e,"ref_pos"]=c( NA,(temp_s+1):(temp_e)-temp_diff )
#change the snp variable to mark as deletion in reference string
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="del"
}
=======
} else print(paste("Aligning qry strain pos",temp_align_s,"-", temp_align_e ))
#make string variable describing type of SNP
snps_df_temp$snp = paste0(snps_df_temp$str_sub,">",snps_df_temp$ref_sub)
#fill positions up to first snp of that alignment:
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"str_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)
diff= temp_align_s-aligns_df[a,]$ref_pos
mum_snps[temp_align_s:(snps_df_temp[1,]$str_pos),"ref_pos"]= temp_align_s:(snps_df_temp[1,]$str_pos)-diff
mum_snps[!is.na(mum_snps)&mum_snps<=0]=NA
#for each SNP in the alignment:
for (i in 1:nrow(snps_df_temp) ) {
#if the SNP is a substitution:
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub!="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
#mark as snp
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="sub"
}
#if SNP is a deletion in qry string (insertion)
if (snps_df_temp[i,]$ref_sub!="." & snps_df_temp[i,]$str_sub=="." ) {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
temp_s=snps_df_temp[i,]$str_pos+1
#if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e+1 else temp_e=snps_df_temp[i+1,]$str_pos+1
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[temp_s:temp_e,"ref_pos"]=(temp_s:temp_e)-temp_diff
mum_snps[(temp_s-1),"snp"]="ins_s"
mum_snps[(temp_s),"snp"]="ins_e"
}
if (snps_df_temp[i,]$ref_sub=="." & snps_df_temp[i,]$str_sub!=".") {
#fill in positions for the qry strain for basepairs between current and previous sub/indel
temp_diff=snps_df_temp[i,]$str_pos - snps_df_temp[i,]$ref_pos
#get start position of mum gap for str
temp_s=snps_df_temp[i,]$str_pos
#get the end. if the end of alignment is reached, the temp_end of the mum is temp_align_e
if (i==nrow(snps_df_temp)) temp_e = temp_align_e else temp_e=snps_df_temp[i+1,]$str_pos
mum_snps[temp_s:temp_e,"str_pos"]=temp_s:temp_e
#the ref position is the query - the shift between the frames
mum_snps[(temp_s):temp_e,"ref_pos"]=c( NA,(temp_s+1):(temp_e)-temp_diff )
#change the snp variable to mark as deletion in reference string
#mum_snps[temp_s,"snp"]=snps_df_temp[i,"snp"]
mum_snps[temp_s,"snp"]="del"
>>>>>>> dd81f1bdf4e2b72bf0f1acd60b551cafeafc2eb6
}
}
}
return(mum_snps)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mice.impute.logreg.R
\name{mice.impute.logreg}
\alias{mice.impute.logreg}
\title{Imputation by logistic regression}
\usage{
mice.impute.logreg(y, ry, x, wy = NULL, ...)
}
\arguments{
\item{y}{Vector to be imputed}
\item{ry}{Logical vector of length \code{length(y)} indicating the
the subset \code{y[ry]} of elements in \code{y} to which the imputation
model is fitted. The \code{ry} generally distinguishes the observed
(\code{TRUE}) and missing values (\code{FALSE}) in \code{y}.}
\item{x}{Numeric design matrix with \code{length(y)} rows with predictors for
\code{y}. Matrix \code{x} may have no missing values.}
\item{wy}{Logical vector of length \code{length(y)}. A \code{TRUE} value
indicates locations in \code{y} for which imputations are created.}
\item{...}{Other named arguments.}
}
\value{
Vector with imputed data, same type as \code{y}, and of length
\code{sum(wy)}
}
\description{
Imputes univariate missing data using logistic regression.
}
\details{
Imputation for binary response variables by the Bayesian logistic regression
model (Rubin 1987, p. 169-170). The
Bayesian method consists of the following steps:
\enumerate{
\item Fit a logit, and find (bhat, V(bhat))
\item Draw BETA from N(bhat, V(bhat))
\item Compute predicted scores for m.d., i.e. logit-1(X BETA)
\item Compare the score to a random (0,1) deviate, and impute.
}
The method relies on the
standard \code{glm.fit} function. Warnings from \code{glm.fit} are
suppressed. Perfect prediction is handled by the data augmentation
method.
}
\references{
Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}:
Multivariate Imputation by Chained Equations in \code{R}. \emph{Journal of
Statistical Software}, \bold{45}(3), 1-67.
\url{https://www.jstatsoft.org/v45/i03/}
Brand, J.P.L. (1999). Development, Implementation and Evaluation of Multiple
Imputation Strategies for the Statistical Analysis of Incomplete Data Sets.
Ph.D. Thesis, TNO Prevention and Health/Erasmus University Rotterdam. ISBN
90-74479-08-1.
Venables, W.N. & Ripley, B.D. (1997). Modern applied statistics with S-Plus
(2nd ed). Springer, Berlin.
White, I., Daniel, R. and Royston, P (2010). Avoiding bias due to perfect
prediction in multiple imputation of incomplete categorical variables.
Computational Statistics and Data Analysis, 54:22672275.
}
\seealso{
\code{\link{mice}}, \code{\link{glm}}, \code{\link{glm.fit}}
Other univariate imputation functions: \code{\link{mice.impute.cart}},
\code{\link{mice.impute.lda}},
\code{\link{mice.impute.logreg.boot}},
\code{\link{mice.impute.mean}},
\code{\link{mice.impute.midastouch}},
\code{\link{mice.impute.norm.boot}},
\code{\link{mice.impute.norm.nob}},
\code{\link{mice.impute.norm.predict}},
\code{\link{mice.impute.norm}},
\code{\link{mice.impute.pmm}},
\code{\link{mice.impute.polr}},
\code{\link{mice.impute.polyreg}},
\code{\link{mice.impute.quadratic}},
\code{\link{mice.impute.rf}},
\code{\link{mice.impute.ri}}
}
\author{
Stef van Buuren, Karin Groothuis-Oudshoorn
}
\concept{univariate imputation functions}
\keyword{datagen}
| /man/mice.impute.logreg.Rd | no_license | mproeling/mice | R | false | true | 3,167 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mice.impute.logreg.R
\name{mice.impute.logreg}
\alias{mice.impute.logreg}
\title{Imputation by logistic regression}
\usage{
mice.impute.logreg(y, ry, x, wy = NULL, ...)
}
\arguments{
\item{y}{Vector to be imputed}
\item{ry}{Logical vector of length \code{length(y)} indicating the
the subset \code{y[ry]} of elements in \code{y} to which the imputation
model is fitted. The \code{ry} generally distinguishes the observed
(\code{TRUE}) and missing values (\code{FALSE}) in \code{y}.}
\item{x}{Numeric design matrix with \code{length(y)} rows with predictors for
\code{y}. Matrix \code{x} may have no missing values.}
\item{wy}{Logical vector of length \code{length(y)}. A \code{TRUE} value
indicates locations in \code{y} for which imputations are created.}
\item{...}{Other named arguments.}
}
\value{
Vector with imputed data, same type as \code{y}, and of length
\code{sum(wy)}
}
\description{
Imputes univariate missing data using logistic regression.
}
\details{
Imputation for binary response variables by the Bayesian logistic regression
model (Rubin 1987, p. 169-170). The
Bayesian method consists of the following steps:
\enumerate{
\item Fit a logit, and find (bhat, V(bhat))
\item Draw BETA from N(bhat, V(bhat))
\item Compute predicted scores for m.d., i.e. logit-1(X BETA)
\item Compare the score to a random (0,1) deviate, and impute.
}
The method relies on the
standard \code{glm.fit} function. Warnings from \code{glm.fit} are
suppressed. Perfect prediction is handled by the data augmentation
method.
}
\references{
Van Buuren, S., Groothuis-Oudshoorn, K. (2011). \code{mice}:
Multivariate Imputation by Chained Equations in \code{R}. \emph{Journal of
Statistical Software}, \bold{45}(3), 1-67.
\url{https://www.jstatsoft.org/v45/i03/}
Brand, J.P.L. (1999). Development, Implementation and Evaluation of Multiple
Imputation Strategies for the Statistical Analysis of Incomplete Data Sets.
Ph.D. Thesis, TNO Prevention and Health/Erasmus University Rotterdam. ISBN
90-74479-08-1.
Venables, W.N. & Ripley, B.D. (1997). Modern applied statistics with S-Plus
(2nd ed). Springer, Berlin.
White, I., Daniel, R. and Royston, P (2010). Avoiding bias due to perfect
prediction in multiple imputation of incomplete categorical variables.
Computational Statistics and Data Analysis, 54:22672275.
}
\seealso{
\code{\link{mice}}, \code{\link{glm}}, \code{\link{glm.fit}}
Other univariate imputation functions: \code{\link{mice.impute.cart}},
\code{\link{mice.impute.lda}},
\code{\link{mice.impute.logreg.boot}},
\code{\link{mice.impute.mean}},
\code{\link{mice.impute.midastouch}},
\code{\link{mice.impute.norm.boot}},
\code{\link{mice.impute.norm.nob}},
\code{\link{mice.impute.norm.predict}},
\code{\link{mice.impute.norm}},
\code{\link{mice.impute.pmm}},
\code{\link{mice.impute.polr}},
\code{\link{mice.impute.polyreg}},
\code{\link{mice.impute.quadratic}},
\code{\link{mice.impute.rf}},
\code{\link{mice.impute.ri}}
}
\author{
Stef van Buuren, Karin Groothuis-Oudshoorn
}
\concept{univariate imputation functions}
\keyword{datagen}
|
setwd("C:/Users/manan/Downloads")
x<- read.csv('new.csv',stringsAsFactors =TRUE,fileEncoding="latin1")
#Removing URL
df <- x[,-1]
#Replacing the missing (NA) days on market values with the median
df$DOM<- ifelse(is.na(df$DOM),median(df$DOM,na.rm=T),df$DOM)
library(dplyr)
#Removing id and cid
df2 <- data.frame(df %>% dplyr::select(-id, -Cid))
#Numeric to Nominal
df2$livingRoom <- as.numeric(df2$livingRoom)
df2$bathRoom <- as.numeric(df2$bathRoom)
df2$drawingRoom <- as.numeric(df2$drawingRoom)
df2$district <- as.factor(df2$district)
#Obtaining Building Types
makeBuildingType <- function(x){
if(!is.na(x)){
if(x==1){
return('Tower')
}
else if (x==2){
return('Bungalow')
}
else if (x==3){
return('Mix plate tower')
}
else if (x==4){
return('plate')
}
else return('wrong_coded')
}
else{return('missing')}
}
df2$buildingType <- sapply(df2$buildingType, makeBuildingType)
df2 <- data.frame(df2 %>% filter(buildingType != 'wrong_coded' & buildingType !='missing'))
#obtaining renovation condition
makeRenovationCondition <- function(x){
if(x==1){
return('Other')
}
else if (x==2){
return('Rough')
}
else if (x==3){
return('Simplicity')
}
else if (x==4){
return('Hardcover')
}
}
df2$renovationCondition <- sapply(df2$renovationCondition, makeRenovationCondition)
#Obtaining building structure
makeBuildingStructure <- function(x){
if(x==1){
return('Unknown')
}
else if (x==2){
return('Mix')
}
else if (x==3){
return('Brick_Wood')
}
else if (x==4){
return('Brick_Concrete')
}
else if (x==5){
return('Steel')
}
else if (x==6){
return('Steel_Concrete')
}
}
df2$buildingStructure <- sapply(df2$buildingStructure, makeBuildingStructure)
df2$elevator <- ifelse(df2$elevator==1,'has_elevator','no_elevator')
df2$constructionTime <-as.numeric(df2$constructionTime)
df2$district <-as.factor(df2$district)
df2$subway <- ifelse(df2$subway==1,'has_subway','no_subway')
df2$fiveYearsProperty <- ifelse(df2$fiveYearsProperty==1,'owner_less_5y','owner_more_5y')
df3 <- data.frame(df2 %>% na.omit())
df3$buildingType <- as.factor(df3$buildingType)
df3$buildingStructure <- as.factor(df3$buildingStructure)
df3$elevator <- as.factor(df3$elevator)
df3$fiveYearsProperty <- as.factor(df3$fiveYearsProperty)
df3$subway <- as.factor(df3$subway)
df3$district <- as.factor(df3$district)
df3$renovationCondition <- as.factor(df3$renovationCondition)
str(df3)
any(is.na(df3))
# Correlation Plot
install.packages("corrplot")
library(corrplot)
corrplot(cor(
df3 %>% select_if(is.numeric) %>% select(-Lng, -Lat) ,use = "pairwise.complete.obs",
method='pearson')
,method='ellipse',
tl.cex=1,
col = viridis::viridis(50),
tl.col='black')
library(ggplot2)
library(gridExtra)
library(RColorBrewer)
makeFeatureCatEDA <- function(x, numFeatures){
if(numFeatures < 13){
mypalette <-'Paired'
mycols <- 2
mybox <- df3 %>% ggplot(aes_string(x,'price')) + geom_boxplot(aes_string(color=x)) +
scale_color_brewer(name='', palette=mypalette) + theme_minimal(12) +
theme(axis.title =element_blank(), legend.position='None') +
labs(title='average price of homes') + coord_flip()
}
else{
mypalette <- colorRampPalette(brewer.pal(12,'Paired'))(numFeatures)
mycols <- 3
mybox <- df3 %>% ggplot(aes_string(x,'price')) + geom_boxplot(aes_string(color=x)) +
scale_color_manual(name='',values=mypalette) + theme_minimal(12) +
theme(axis.title =element_blank(), legend.position='None') +
labs(title='average price of homes') + coord_flip()
}
grid.arrange(mybox)
}
makeFeatureCatEDA('buildingStructure', length(unique(df3$buildingStructure)))
makeFeatureCatEDA('buildingType', length(unique(df3$buildingType)))
makeFeatureCatEDA('renovationCondition', length(unique(df3$renovationCondition)))
makeFeatureCatEDA('elevator', length(unique(df3$elevator)))
makeFeatureCatEDA('subway', length(unique(df3$subway)))
makeFeatureCatEDA('fiveYearsProperty', length(unique(df3$fiveYearsProperty)))
makeFeatureCatEDA('district', length(unique(df3$district)))
#Association Rule Mining
str(df3)
df3 <- data.frame(df2 %>% dplyr::select(-floor))
a<- df3$price
quantile(a)
dum<-replicate(length(df3$price), "Medium")
dum[df3$price < 28050]<-"Low"
dum[df3$price > 53819]<-"High"
df3$price<- dum
dum<-replicate(length(df3$DOM), "Medium")
dum[df3$DOM < 420]<-"Low"
dum[df3$DOM > 1250]<-"High"
df3$DOM<- dum
a<- df3$livingRoom
quantile(a)
dum<-replicate(length(df3$livingRoom), "Less than 4")
dum[df3$livingRoom > 4]<-"High"
df3$livingRoom<- dum
a<- df3$followers
quantile(a)
dum<-replicate(length(df3$followers), "Medium")
dum[df3$followers < 285]<-"Low"
dum[df3$followers > 857]<-"High"
df3$followers<- dum
a<- df3$square
max(a)
dum<-replicate(length(df3$square), "Medium")
dum[df3$square < 230]<-"Low"
dum[df3$square > 691]<-"High"
df3$square<- dum
a<- df3$drawingRoom
max(a)
dum<-replicate(length(df3$drawingRoom), "Medium")
dum[df3$drawingRoom < 4]<-"Low"
dum[df3$drawingRoom > 12]<-"High"
df3$drawingRoom<- dum
a<- df3$kitchen
max(a)
dum<-replicate(length(df3$kitchen), "Medium")
dum[df3$kitchen < 1]<-"Low"
dum[df3$kitchen > 2]<-"High"
df3$kitchen<- dum
a<- df3$bathRoom
max(a)
dum<-replicate(length(df3$bathRoom), "Medium")
dum[df3$bathRoom < 4]<-"Low"
dum[df3$bathRoom > 12]<-"High"
df3$bathRoom<- dum
a<- df3$constructionTime
max(a)
dum<-replicate(length(df3$constructionTime), "Medium")
dum[df3$constructionTime < 18]<-"Low"
dum[df3$constructionTime > 55]<-"High"
df3$constructionTime<- dum
a<- df3$ladderRatio
max(a)
dum<-replicate(length(df3$ladderRatio), "Medium")
dum[df3$ladderRatio < 0.25]<-"Low"
dum[df3$ladderRatio > 0.75]<-"High"
df3$ladderRatio<- dum
##############################################################
# Apriori
x1 <- Sys.time()
library(arules)
arm <- data.frame(df3$DOM, df3$followers, df3$price, df3$square, df3$livingRoom, df3$drawingRoom, df3$kitchen, df3$bathRoom, df3$buildingType, df3$constructionTime, df3$renovationCondition, df3$buildingStructure, df3$ladderRatio, df3$elevator, df3$fiveYearsProperty,df3$subway, df3$district)
armx<- as(arm,"transactions")
rules<-apriori(armx,parameter = list(support=0.1,confidence=0.2,minlen=1),appearance = list(rhs=c("df3.price=High")))
inspect(rules)
goodrules<-rules[quality(rules)$lift>1.385]
inspect(goodrules)
library(arulesViz)
plot(goodrules,method="graph",engine="htmlwidget")
x2 <- Sys.time()
x3 <- x2-x1
x3
###############################################################
# Decision tree
x4 <- Sys.time()
library(dplyr)
library(class)
library(gmodels)
library(RWeka)
library(tidyverse)
library(kernlab)
library(randomForest)
library(tree)
df3.factor<-df3
df3.factor$DOM<-as.factor(df3.factor$DOM)
df3.factor$followers<-as.factor(df3.factor$followers)
df3.factor$price<-as.factor(df3.factor$price)
df3.factor$square<-as.factor(df3.factor$square)
df3.factor$livingRoom<-as.factor(df3.factor$livingRoom)
df3.factor$drawingRoom<-as.factor(df3.factor$drawingRoom)
df3.factor$kitchen<-as.factor(df3.factor$kitchen)
df3.factor$bathRoom<-as.factor(df3.factor$bathRoom)
df3.factor$buildingType<-as.factor(df3.factor$buildingType)
df3.factor$constructionTime<-as.factor(df3.factor$constructionTime)
df3.factor$renovationCondition<-as.factor(df3.factor$renovationCondition)
df3.factor$buildingStructure<-as.factor(df3.factor$buildingStructure)
df3.factor$ladderRatio<-as.factor(df3.factor$ladderRatio)
df3.factor$elevator<-as.factor(df3.factor$elevator)
df3.factor$fiveYearsProperty<-as.factor(df3.factor$fiveYearsProperty)
df3.factor$subway<-as.factor(df3.factor$subway)
str(df3.factor)
df3.factor <- subset(df3.factor, select=-c(Lng,Lat,tradeTime,totalPrice,communityAverage))
str(df3.factor)
test.tree <- tree(price~.,data = df3.factor)
summary(test.tree)
plot(test.tree)
text(test.tree, pretty = 0)
x5 <- Sys.time()
x6 <- x5-x4
x6
###############################################################################
# Naive Bayes
x7 <- Sys.time()
install.packages("RWeka")
library("RWeka")
Naive <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
bayesmodel=Naive(price~., data=df3.factor)
trainx <- sample(seq_len(nrow(df3)),size = floor(0.66*nrow(df3)))
train <- df3[trainx, ]
test <- df3[-trainx, ]
train[] <- lapply(train, factor)
test[] <- lapply(test, factor)
predbayes = predict (bayesmodel, newdata = df3.factor, type = c("class"))
predbayes
#Perform 10 fold cross validation
evalbayes <- evaluate_Weka_classifier(bayesmodel, numFolds = 10, seed = 1, class = TRUE)
evalbayes
#Perform 3 fold cross validation
#evalbayes3 <-evaluate_Weka_classifier(bayesmodel,numFolds = 3,seed = 1, class = TRUE)
#evalbayes3
#morebaths <- subset(df3, df3$bathRoom > 4)
x8 <- Sys.time()
x9 <- x8-x7
x9
##############################################################################
# SVM
svmSampleTrain <- sample_n(df3,30000)
View(df3)
svm.model <- ksvm(price~., data = svmSampleTrain, kernel = "rbfdot", kpar="automatic", C=75, cross=50,
prob.model=TRUE, type = "C-svc")
svm.model
svm.Pred <- predict(svm.model,testdata)
| /IST707Proj.R | no_license | Manandedhia/Beijing-House-Price-Prediction | R | false | false | 9,158 | r | setwd("C:/Users/manan/Downloads")
x<- read.csv('new.csv',stringsAsFactors =TRUE,fileEncoding="latin1")
#Removing URL
df <- x[,-1]
#Replacing the missing (NA) days on market values with the median
df$DOM<- ifelse(is.na(df$DOM),median(df$DOM,na.rm=T),df$DOM)
library(dplyr)
#Removing id and cid
df2 <- data.frame(df %>% dplyr::select(-id, -Cid))
#Numeric to Nominal
df2$livingRoom <- as.numeric(df2$livingRoom)
df2$bathRoom <- as.numeric(df2$bathRoom)
df2$drawingRoom <- as.numeric(df2$drawingRoom)
df2$district <- as.factor(df2$district)
#Obtaining Building Types
makeBuildingType <- function(x){
if(!is.na(x)){
if(x==1){
return('Tower')
}
else if (x==2){
return('Bungalow')
}
else if (x==3){
return('Mix plate tower')
}
else if (x==4){
return('plate')
}
else return('wrong_coded')
}
else{return('missing')}
}
df2$buildingType <- sapply(df2$buildingType, makeBuildingType)
df2 <- data.frame(df2 %>% filter(buildingType != 'wrong_coded' & buildingType !='missing'))
#obtaining renovation condition
makeRenovationCondition <- function(x){
if(x==1){
return('Other')
}
else if (x==2){
return('Rough')
}
else if (x==3){
return('Simplicity')
}
else if (x==4){
return('Hardcover')
}
}
df2$renovationCondition <- sapply(df2$renovationCondition, makeRenovationCondition)
#Obtaining building structure
makeBuildingStructure <- function(x){
if(x==1){
return('Unknown')
}
else if (x==2){
return('Mix')
}
else if (x==3){
return('Brick_Wood')
}
else if (x==4){
return('Brick_Concrete')
}
else if (x==5){
return('Steel')
}
else if (x==6){
return('Steel_Concrete')
}
}
df2$buildingStructure <- sapply(df2$buildingStructure, makeBuildingStructure)
df2$elevator <- ifelse(df2$elevator==1,'has_elevator','no_elevator')
df2$constructionTime <-as.numeric(df2$constructionTime)
df2$district <-as.factor(df2$district)
df2$subway <- ifelse(df2$subway==1,'has_subway','no_subway')
df2$fiveYearsProperty <- ifelse(df2$fiveYearsProperty==1,'owner_less_5y','owner_more_5y')
df3 <- data.frame(df2 %>% na.omit())
df3$buildingType <- as.factor(df3$buildingType)
df3$buildingStructure <- as.factor(df3$buildingStructure)
df3$elevator <- as.factor(df3$elevator)
df3$fiveYearsProperty <- as.factor(df3$fiveYearsProperty)
df3$subway <- as.factor(df3$subway)
df3$district <- as.factor(df3$district)
df3$renovationCondition <- as.factor(df3$renovationCondition)
str(df3)
any(is.na(df3))
# Correlation Plot
install.packages("corrplot")
library(corrplot)
corrplot(cor(
df3 %>% select_if(is.numeric) %>% select(-Lng, -Lat) ,use = "pairwise.complete.obs",
method='pearson')
,method='ellipse',
tl.cex=1,
col = viridis::viridis(50),
tl.col='black')
library(ggplot2)
library(gridExtra)
library(RColorBrewer)
makeFeatureCatEDA <- function(x, numFeatures){
if(numFeatures < 13){
mypalette <-'Paired'
mycols <- 2
mybox <- df3 %>% ggplot(aes_string(x,'price')) + geom_boxplot(aes_string(color=x)) +
scale_color_brewer(name='', palette=mypalette) + theme_minimal(12) +
theme(axis.title =element_blank(), legend.position='None') +
labs(title='average price of homes') + coord_flip()
}
else{
mypalette <- colorRampPalette(brewer.pal(12,'Paired'))(numFeatures)
mycols <- 3
mybox <- df3 %>% ggplot(aes_string(x,'price')) + geom_boxplot(aes_string(color=x)) +
scale_color_manual(name='',values=mypalette) + theme_minimal(12) +
theme(axis.title =element_blank(), legend.position='None') +
labs(title='average price of homes') + coord_flip()
}
grid.arrange(mybox)
}
makeFeatureCatEDA('buildingStructure', length(unique(df3$buildingStructure)))
makeFeatureCatEDA('buildingType', length(unique(df3$buildingType)))
makeFeatureCatEDA('renovationCondition', length(unique(df3$renovationCondition)))
makeFeatureCatEDA('elevator', length(unique(df3$elevator)))
makeFeatureCatEDA('subway', length(unique(df3$subway)))
makeFeatureCatEDA('fiveYearsProperty', length(unique(df3$fiveYearsProperty)))
makeFeatureCatEDA('district', length(unique(df3$district)))
#Association Rule Mining
str(df3)
df3 <- data.frame(df2 %>% dplyr::select(-floor))
a<- df3$price
quantile(a)
dum<-replicate(length(df3$price), "Medium")
dum[df3$price < 28050]<-"Low"
dum[df3$price > 53819]<-"High"
df3$price<- dum
dum<-replicate(length(df3$DOM), "Medium")
dum[df3$DOM < 420]<-"Low"
dum[df3$DOM > 1250]<-"High"
df3$DOM<- dum
a<- df3$livingRoom
quantile(a)
dum<-replicate(length(df3$livingRoom), "Less than 4")
dum[df3$livingRoom > 4]<-"High"
df3$livingRoom<- dum
a<- df3$followers
quantile(a)
dum<-replicate(length(df3$followers), "Medium")
dum[df3$followers < 285]<-"Low"
dum[df3$followers > 857]<-"High"
df3$followers<- dum
a<- df3$square
max(a)
dum<-replicate(length(df3$square), "Medium")
dum[df3$square < 230]<-"Low"
dum[df3$square > 691]<-"High"
df3$square<- dum
a<- df3$drawingRoom
max(a)
dum<-replicate(length(df3$drawingRoom), "Medium")
dum[df3$drawingRoom < 4]<-"Low"
dum[df3$drawingRoom > 12]<-"High"
df3$drawingRoom<- dum
a<- df3$kitchen
max(a)
dum<-replicate(length(df3$kitchen), "Medium")
dum[df3$kitchen < 1]<-"Low"
dum[df3$kitchen > 2]<-"High"
df3$kitchen<- dum
a<- df3$bathRoom
max(a)
dum<-replicate(length(df3$bathRoom), "Medium")
dum[df3$bathRoom < 4]<-"Low"
dum[df3$bathRoom > 12]<-"High"
df3$bathRoom<- dum
a<- df3$constructionTime
max(a)
dum<-replicate(length(df3$constructionTime), "Medium")
dum[df3$constructionTime < 18]<-"Low"
dum[df3$constructionTime > 55]<-"High"
df3$constructionTime<- dum
a<- df3$ladderRatio
max(a)
dum<-replicate(length(df3$ladderRatio), "Medium")
dum[df3$ladderRatio < 0.25]<-"Low"
dum[df3$ladderRatio > 0.75]<-"High"
df3$ladderRatio<- dum
##############################################################
# Apriori
x1 <- Sys.time()
library(arules)
arm <- data.frame(df3$DOM, df3$followers, df3$price, df3$square, df3$livingRoom, df3$drawingRoom, df3$kitchen, df3$bathRoom, df3$buildingType, df3$constructionTime, df3$renovationCondition, df3$buildingStructure, df3$ladderRatio, df3$elevator, df3$fiveYearsProperty,df3$subway, df3$district)
armx<- as(arm,"transactions")
rules<-apriori(armx,parameter = list(support=0.1,confidence=0.2,minlen=1),appearance = list(rhs=c("df3.price=High")))
inspect(rules)
goodrules<-rules[quality(rules)$lift>1.385]
inspect(goodrules)
library(arulesViz)
plot(goodrules,method="graph",engine="htmlwidget")
x2 <- Sys.time()
x3 <- x2-x1
x3
###############################################################
# Decision tree
x4 <- Sys.time()
library(dplyr)
library(class)
library(gmodels)
library(RWeka)
library(tidyverse)
library(kernlab)
library(randomForest)
library(tree)
df3.factor<-df3
df3.factor$DOM<-as.factor(df3.factor$DOM)
df3.factor$followers<-as.factor(df3.factor$followers)
df3.factor$price<-as.factor(df3.factor$price)
df3.factor$square<-as.factor(df3.factor$square)
df3.factor$livingRoom<-as.factor(df3.factor$livingRoom)
df3.factor$drawingRoom<-as.factor(df3.factor$drawingRoom)
df3.factor$kitchen<-as.factor(df3.factor$kitchen)
df3.factor$bathRoom<-as.factor(df3.factor$bathRoom)
df3.factor$buildingType<-as.factor(df3.factor$buildingType)
df3.factor$constructionTime<-as.factor(df3.factor$constructionTime)
df3.factor$renovationCondition<-as.factor(df3.factor$renovationCondition)
df3.factor$buildingStructure<-as.factor(df3.factor$buildingStructure)
df3.factor$ladderRatio<-as.factor(df3.factor$ladderRatio)
df3.factor$elevator<-as.factor(df3.factor$elevator)
df3.factor$fiveYearsProperty<-as.factor(df3.factor$fiveYearsProperty)
df3.factor$subway<-as.factor(df3.factor$subway)
str(df3.factor)
df3.factor <- subset(df3.factor, select=-c(Lng,Lat,tradeTime,totalPrice,communityAverage))
str(df3.factor)
test.tree <- tree(price~.,data = df3.factor)
summary(test.tree)
plot(test.tree)
text(test.tree, pretty = 0)
x5 <- Sys.time()
x6 <- x5-x4
x6
###############################################################################
# Naive Bayes
x7 <- Sys.time()
install.packages("RWeka")
library("RWeka")
Naive <- make_Weka_classifier("weka/classifiers/bayes/NaiveBayes")
bayesmodel=Naive(price~., data=df3.factor)
trainx <- sample(seq_len(nrow(df3)),size = floor(0.66*nrow(df3)))
train <- df3[trainx, ]
test <- df3[-trainx, ]
train[] <- lapply(train, factor)
test[] <- lapply(test, factor)
predbayes = predict (bayesmodel, newdata = df3.factor, type = c("class"))
predbayes
#Perform 10 fold cross validation
evalbayes <- evaluate_Weka_classifier(bayesmodel, numFolds = 10, seed = 1, class = TRUE)
evalbayes
#Perform 3 fold cross validation
#evalbayes3 <-evaluate_Weka_classifier(bayesmodel,numFolds = 3,seed = 1, class = TRUE)
#evalbayes3
#morebaths <- subset(df3, df3$bathRoom > 4)
x8 <- Sys.time()
x9 <- x8-x7
x9
##############################################################################
# SVM
svmSampleTrain <- sample_n(df3,30000)
View(df3)
svm.model <- ksvm(price~., data = svmSampleTrain, kernel = "rbfdot", kpar="automatic", C=75, cross=50,
prob.model=TRUE, type = "C-svc")
svm.model
svm.Pred <- predict(svm.model,testdata)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_select_multiple_binary.R
\name{new_sm_binary}
\alias{new_sm_binary}
\title{Low level select multiple binary constructor}
\usage{
new_sm_binary(
x = logical(),
relevant = NA,
choice_name = NA,
choice_label = NA,
q_name = NA,
label = NA,
constraint = NA,
binary_sep = "/"
)
}
\description{
Low level select multiple binary constructor
}
| /man/new_sm_binary.Rd | no_license | zackarno/koborg | R | false | true | 435 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/class_select_multiple_binary.R
\name{new_sm_binary}
\alias{new_sm_binary}
\title{Low level select multiple binary constructor}
\usage{
new_sm_binary(
x = logical(),
relevant = NA,
choice_name = NA,
choice_label = NA,
q_name = NA,
label = NA,
constraint = NA,
binary_sep = "/"
)
}
\description{
Low level select multiple binary constructor
}
|
source("gearcalib.R") ## Get fitted model
obj <- fits[[1]]$obj
getSample <- function(obj, as.list=TRUE){
tmp <- obj$env$MC(n=1, keep=TRUE, antithetic=FALSE)
samp <- attr(tmp,"samples")
if(as.list){
par <- obj$env$last.par.best
par[obj$env$random] <- samp
samp <- obj$env$parList(par=par)
}
samp
}
pl <- getSample(obj)
| /Misc/validation.R | no_license | Uffe-H-Thygesen/Intercalibration | R | false | false | 364 | r | source("gearcalib.R") ## Get fitted model
obj <- fits[[1]]$obj
getSample <- function(obj, as.list=TRUE){
tmp <- obj$env$MC(n=1, keep=TRUE, antithetic=FALSE)
samp <- attr(tmp,"samples")
if(as.list){
par <- obj$env$last.par.best
par[obj$env$random] <- samp
samp <- obj$env$parList(par=par)
}
samp
}
pl <- getSample(obj)
|
## global.R ##
# 加载R包-----
options(warn=-1)
enableBookmarking(store = "url")
options(shiny.sanitize.errors = FALSE)
library(shiny);
library(shinydashboard);
library(tsda);
library(tsdo);
library(tsui);
library(nsgenpkg);
# 设置引入页-----
source('00_data.R',encoding = 'utf-8');
source('topbarMenu.R',encoding = 'utf-8');
source('sideBarSetting.R',encoding = 'utf-8');
source('01_row_body.R',encoding = 'utf-8');
source('02_column_body.R',encoding = 'utf-8');
source('03_book_body.R',encoding = 'utf-8');
source('04_series_body.R',encoding = 'utf-8');
source('05_majority_body.R',encoding = 'utf-8');
source('06_tutor_body.R',encoding = 'utf-8');
source('99_sysSetting_body.R',encoding = 'utf-8');
source('workAreaSetting.R',encoding = 'utf-8');
| /global.R | no_license | takewiki/nsgen | R | false | false | 767 | r | ## global.R ##
# 加载R包-----
options(warn=-1)
enableBookmarking(store = "url")
options(shiny.sanitize.errors = FALSE)
library(shiny);
library(shinydashboard);
library(tsda);
library(tsdo);
library(tsui);
library(nsgenpkg);
# 设置引入页-----
source('00_data.R',encoding = 'utf-8');
source('topbarMenu.R',encoding = 'utf-8');
source('sideBarSetting.R',encoding = 'utf-8');
source('01_row_body.R',encoding = 'utf-8');
source('02_column_body.R',encoding = 'utf-8');
source('03_book_body.R',encoding = 'utf-8');
source('04_series_body.R',encoding = 'utf-8');
source('05_majority_body.R',encoding = 'utf-8');
source('06_tutor_body.R',encoding = 'utf-8');
source('99_sysSetting_body.R',encoding = 'utf-8');
source('workAreaSetting.R',encoding = 'utf-8');
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeRBPObj.R
\name{makeRBPObj}
\alias{makeRBPObj}
\alias{RBPObj}
\title{Create data container for RBP curve.}
\usage{
makeRBPObj(pred, y, positive = NULL)
}
\arguments{
\item{pred}{[\code{numeric}]\cr
Predicted probabilities for each observation.}
\item{y}{[\code{numeric} | \code{factor}]\cr
Class labels of the target variable.
Either a numeric vector with values \code{0} or \code{1}, or a factor with two levels.}
\item{positive}{[\code{character(1)}]\cr
Set positive class label for target variable which is transformed as \code{1} to compute.
Only needed when \code{y} is a "factor".}
}
\value{
Object members:
\describe{
\item{\code{n} [\code{numeric(1)}]}{Number of observations.}
\item{\code{pred} [\code{numeric(n)}]}{Predicted probabilities.}
\item{\code{y} [\code{numeric(n)}]}{Target variable having the values 0 and 1.}
\item{\code{positive} [\code{character(1)}]}{Positive class label of traget variable. Only present when \code{y} is a factor.}
\item{\code{e0} [\code{numeric(1)}]}{Average of the predicted probabilities conditional on \code{y=0}.}
\item{\code{e1} [\code{numeric(1)}]}{Average of the predicted probabilities conditional on \code{y=1}.}
\item{\code{pev} [\code{numeric(1)}]}{Proportion of explained variation measure. Computed as \code{e1-e0}.}
\item{\code{tpr} [\code{numeric(1)}]}{True positive rate.}
\item{\code{fpr} [\code{numeric(1)}]}{False positive rate.}
\item{\code{prev} [\code{numeric(1)}]}{Prevalence.}
\item{\code{one.min.prev} [\code{numeric(1)}]}{One minus the value of the prevalence.}
\item{\code{axis.x} [\code{numeric(n)}]}{Values for the X-Axis of the RBP curve.}
\item{\code{axis.y} [\code{numeric(n)}]}{Values for the Y-Axis of the RBP curve.}
}
}
\description{
Must be created for all subsequent plot function calls.
}
| /man/makeRBPObj.Rd | no_license | giuseppec/RBPcurve | R | false | true | 1,882 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeRBPObj.R
\name{makeRBPObj}
\alias{makeRBPObj}
\alias{RBPObj}
\title{Create data container for RBP curve.}
\usage{
makeRBPObj(pred, y, positive = NULL)
}
\arguments{
\item{pred}{[\code{numeric}]\cr
Predicted probabilities for each observation.}
\item{y}{[\code{numeric} | \code{factor}]\cr
Class labels of the target variable.
Either a numeric vector with values \code{0} or \code{1}, or a factor with two levels.}
\item{positive}{[\code{character(1)}]\cr
Set positive class label for target variable which is transformed as \code{1} to compute.
Only needed when \code{y} is a "factor".}
}
\value{
Object members:
\describe{
\item{\code{n} [\code{numeric(1)}]}{Number of observations.}
\item{\code{pred} [\code{numeric(n)}]}{Predicted probabilities.}
\item{\code{y} [\code{numeric(n)}]}{Target variable having the values 0 and 1.}
\item{\code{positive} [\code{character(1)}]}{Positive class label of traget variable. Only present when \code{y} is a factor.}
\item{\code{e0} [\code{numeric(1)}]}{Average of the predicted probabilities conditional on \code{y=0}.}
\item{\code{e1} [\code{numeric(1)}]}{Average of the predicted probabilities conditional on \code{y=1}.}
\item{\code{pev} [\code{numeric(1)}]}{Proportion of explained variation measure. Computed as \code{e1-e0}.}
\item{\code{tpr} [\code{numeric(1)}]}{True positive rate.}
\item{\code{fpr} [\code{numeric(1)}]}{False positive rate.}
\item{\code{prev} [\code{numeric(1)}]}{Prevalence.}
\item{\code{one.min.prev} [\code{numeric(1)}]}{One minus the value of the prevalence.}
\item{\code{axis.x} [\code{numeric(n)}]}{Values for the X-Axis of the RBP curve.}
\item{\code{axis.y} [\code{numeric(n)}]}{Values for the Y-Axis of the RBP curve.}
}
}
\description{
Must be created for all subsequent plot function calls.
}
|
#################################################################
#### evaluating a solution ####
##' @title Evaluate the fitness of a population
##' @description Internal function of the genetic algorithm that evaluates the fitness (penalized log-likelihood) of a set (population) of permutations. It internally computes the best triangular matrix associated to each permutation of the population.
##' @param Pop Population of permutations from [1,p]: matrix with \code{pop.size} rows and p columns, each row corresponding to one permutation of the population.
##' @param X Design matrix, with samples (n) in rows and variables (p) in columns.
##' @param XtX (optional) Cross-product of X; computed if not provided.
##' @param lambda Parameter of penalization (>0).
##' @param grad.control A list containing the parameters for controlling the inner optimization, i.e. the gradient descent
##' \itemize{
##' \item{\code{tol.obj.inner}}{ tolerance (>0),}
##' \item{\code{max.ite.inner}}{ maximum number of iterations (>0).}
##' }
##' @param ncores Number of cores (>1, depending on your computer).
##' @return A list with the following elements:
##' \itemize{
##' \item{Tpop}{ Matrix with pxp columns, each column corresponding to the best triangular matrix (in a vector form) associated to each permutation of the population.}
##' \item{f}{ Fitness of the population.}
##' }
##' @rawNamespace export(evaluation)
##' @seealso \code{\link{GADAG}}, \code{\link{GADAG_Run}}, \code{\link{fitness}}.
##' @return A list with the following elements:
##' \itemize{
##' \item{\code{Tpop}}{ Matrix with p rows and pop.size columns, each column corresponding to the best triangular matrix (in a vector form) associated to each permutation of the population.}
##' \item{\code{f}}{ Fitness of the population.}
##' }
##' @author \packageAuthor{GADAG}
##' @examples
##' #############################################################
##' # Loading toy data
##' #############################################################
##' data(toy_data)
##' # toy_data is a list of two matrices corresponding to a "star"
##' # DAG (node 1 activates all other nodes):
##' # - toy_data$X is a 100x10 design matrix
##' # - toy_data$G is the 10x10 adjacency matrix (ground trough)
##'
##' ########################################################
##' # Creating a population of permutations
##' ########################################################
##' # first, define the parameters
##' p <- ncol(toy_data$G) # number of nodes
##' pop.size <- 10 # population size
##'
##' # then create your population of permutations
##' Pop <- matrix(data = 0, nrow = pop.size, ncol = p)
##' for(i in 1:pop.size){
##' Pop[i,] = sample(p)
##' }
##'
##' ########################################################
##' # Evaluating the fitness of the population
##' ########################################################
##' # evaluation of the whole population
##' Evaluation <- evaluation(Pop=Pop,X=toy_data$X,lambda=0.1)
##' print(Evaluation$f) # here is the fitness of the whole population
##'
##' # evaluation of one of the permutation
##' Perm <- Pop[1,]
##' Evaluation <- evaluation(Pop=Perm,toy_data$X,lambda=0.1)
##'
##' # optimal matrix T associated to Perm:
##' T <- matrix(Evaluation$Tpop,p,p)
evaluation <- function(Pop, X, XtX=NULL, lambda, grad.control = list(tol.obj=1e-6, max.ite=50), ncores=1){
# Pop: population of permutations (pop.size*p)
# X: observation matrix (n*p)
# XtX: t(X)%*%X matrix, precomputed for speed
# lambda: penalization term
# tol.obj: tolerance for the gradient descent (on the norm of T)
# max.ite: maximum number of iterations for the gradient descent
#
# OUTPUTS
# List of two with
# Tpop: optimal T values for the population (pop.size*p^2 matrix, one row for each individual)
# f: fitness of the population
if (is.null(grad.control$tol.obj)){
tol.obj <- 1e-6
} else {
tol.obj <- grad.control$tol.obj
}
if (is.null(grad.control$max.ite)){
max.ite <- 50
} else {
max.ite <- grad.control$max.ite
}
if (is.null(XtX)) {
XtX <- crossprod(X)
}
n <- dim(X)[1]
p <- dim(X)[2]
L <- (2/n) * norm(XtX,'f') # Lispchitz constant
if (max.ite < 0){
stop("max.ite should be non-negative.")
}
if (is.vector(Pop)==TRUE){
Pop <- t(as.matrix(Pop,ncol=length(Pop),nrow=1))
}
pop.size <- dim(Pop)[1]
if (ncol(Pop)!=ncol(X)){
stop("The number of variables of Pop does not correspond to the number of variables of X.")
}
my.gradientdescent <- function(i){
gradientdescent(P=chrom(Pop[i,]), n=n, XtX=XtX, L=L, lambda=lambda, maxite=max.ite, tolobj=tol.obj)
}
Tpop <- matrix(unlist(mclapply(X=1:pop.size, FUN=my.gradientdescent, mc.cores = ncores, mc.preschedule=FALSE)), nrow=pop.size, byrow=TRUE)
my.fitness <- function(i){
fitness(P=chrom(Pop[i,]), X, matrix(Tpop[i,], p, p), lambda=lambda)
}
f <- unlist(mclapply(X=1:pop.size, FUN=my.fitness))
return(list(Tpop = Tpop, f = f))
}
| /fuzzedpackages/GADAG/R/evaluation.R | no_license | akhikolla/testpackages | R | false | false | 5,121 | r | #################################################################
#### evaluating a solution ####
##' @title Evaluate the fitness of a population
##' @description Internal function of the genetic algorithm that evaluates the fitness (penalized log-likelihood) of a set (population) of permutations. It internally computes the best triangular matrix associated to each permutation of the population.
##' @param Pop Population of permutations from [1,p]: matrix with \code{pop.size} rows and p columns, each row corresponding to one permutation of the population.
##' @param X Design matrix, with samples (n) in rows and variables (p) in columns.
##' @param XtX (optional) Cross-product of X; computed if not provided.
##' @param lambda Parameter of penalization (>0).
##' @param grad.control A list containing the parameters for controlling the inner optimization, i.e. the gradient descent
##' \itemize{
##' \item{\code{tol.obj.inner}}{ tolerance (>0),}
##' \item{\code{max.ite.inner}}{ maximum number of iterations (>0).}
##' }
##' @param ncores Number of cores (>1, depending on your computer).
##' @return A list with the following elements:
##' \itemize{
##' \item{Tpop}{ Matrix with pxp columns, each column corresponding to the best triangular matrix (in a vector form) associated to each permutation of the population.}
##' \item{f}{ Fitness of the population.}
##' }
##' @rawNamespace export(evaluation)
##' @seealso \code{\link{GADAG}}, \code{\link{GADAG_Run}}, \code{\link{fitness}}.
##' @return A list with the following elements:
##' \itemize{
##' \item{\code{Tpop}}{ Matrix with p rows and pop.size columns, each column corresponding to the best triangular matrix (in a vector form) associated to each permutation of the population.}
##' \item{\code{f}}{ Fitness of the population.}
##' }
##' @author \packageAuthor{GADAG}
##' @examples
##' #############################################################
##' # Loading toy data
##' #############################################################
##' data(toy_data)
##' # toy_data is a list of two matrices corresponding to a "star"
##' # DAG (node 1 activates all other nodes):
##' # - toy_data$X is a 100x10 design matrix
##' # - toy_data$G is the 10x10 adjacency matrix (ground trough)
##'
##' ########################################################
##' # Creating a population of permutations
##' ########################################################
##' # first, define the parameters
##' p <- ncol(toy_data$G) # number of nodes
##' pop.size <- 10 # population size
##'
##' # then create your population of permutations
##' Pop <- matrix(data = 0, nrow = pop.size, ncol = p)
##' for(i in 1:pop.size){
##' Pop[i,] = sample(p)
##' }
##'
##' ########################################################
##' # Evaluating the fitness of the population
##' ########################################################
##' # evaluation of the whole population
##' Evaluation <- evaluation(Pop=Pop,X=toy_data$X,lambda=0.1)
##' print(Evaluation$f) # here is the fitness of the whole population
##'
##' # evaluation of one of the permutation
##' Perm <- Pop[1,]
##' Evaluation <- evaluation(Pop=Perm,toy_data$X,lambda=0.1)
##'
##' # optimal matrix T associated to Perm:
##' T <- matrix(Evaluation$Tpop,p,p)
evaluation <- function(Pop, X, XtX=NULL, lambda, grad.control = list(tol.obj=1e-6, max.ite=50), ncores=1){
# Pop: population of permutations (pop.size*p)
# X: observation matrix (n*p)
# XtX: t(X)%*%X matrix, precomputed for speed
# lambda: penalization term
# tol.obj: tolerance for the gradient descent (on the norm of T)
# max.ite: maximum number of iterations for the gradient descent
#
# OUTPUTS
# List of two with
# Tpop: optimal T values for the population (pop.size*p^2 matrix, one row for each individual)
# f: fitness of the population
if (is.null(grad.control$tol.obj)){
tol.obj <- 1e-6
} else {
tol.obj <- grad.control$tol.obj
}
if (is.null(grad.control$max.ite)){
max.ite <- 50
} else {
max.ite <- grad.control$max.ite
}
if (is.null(XtX)) {
XtX <- crossprod(X)
}
n <- dim(X)[1]
p <- dim(X)[2]
L <- (2/n) * norm(XtX,'f') # Lispchitz constant
if (max.ite < 0){
stop("max.ite should be non-negative.")
}
if (is.vector(Pop)==TRUE){
Pop <- t(as.matrix(Pop,ncol=length(Pop),nrow=1))
}
pop.size <- dim(Pop)[1]
if (ncol(Pop)!=ncol(X)){
stop("The number of variables of Pop does not correspond to the number of variables of X.")
}
my.gradientdescent <- function(i){
gradientdescent(P=chrom(Pop[i,]), n=n, XtX=XtX, L=L, lambda=lambda, maxite=max.ite, tolobj=tol.obj)
}
Tpop <- matrix(unlist(mclapply(X=1:pop.size, FUN=my.gradientdescent, mc.cores = ncores, mc.preschedule=FALSE)), nrow=pop.size, byrow=TRUE)
my.fitness <- function(i){
fitness(P=chrom(Pop[i,]), X, matrix(Tpop[i,], p, p), lambda=lambda)
}
f <- unlist(mclapply(X=1:pop.size, FUN=my.fitness))
return(list(Tpop = Tpop, f = f))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation-WL.R
\name{WL.randProj.test}
\alias{WL.randProj.test}
\title{Two-sample covariance test (Wu and Li 2015)}
\usage{
WL.randProj.test(X, Y, nproj = 100, useMC = FALSE, mc.cores = 1)
}
\arguments{
\item{X}{n1 by p matrix, observation of the first population, columns are features}
\item{Y}{n2 by p matrix, observation of the second population, columns are features}
\item{nproj}{number of random projections to use}
\item{useMC}{logical variable indicating whether to use multicore parallelization.
R packages \code{parallel} and \code{doParallel} are required if set to \code{TRUE}.}
\item{mc.cores}{decide the number of cores to use when \code{useMC} is set to \code{TRUE}.}
}
\value{
A list containing the following components:
\item{test.stat}{test statistic}
\item{pVal}{the p-value calculated using the limiting distribution
(max of independent standard normal)}
}
\description{
The two-sample covariance test using random projections
proposed in Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection".
}
\references{
Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection",
arXiv preprint arXiv:1511.01611.
}
\seealso{
\code{Cai.max.test()}, \code{Chang.maxBoot.test()}, \code{LC.U.test()},
\code{Schott.Frob.test()}.
}
| /man/WL.randProj.test.Rd | no_license | lingxuez/sLED | R | false | true | 1,418 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/simulation-WL.R
\name{WL.randProj.test}
\alias{WL.randProj.test}
\title{Two-sample covariance test (Wu and Li 2015)}
\usage{
WL.randProj.test(X, Y, nproj = 100, useMC = FALSE, mc.cores = 1)
}
\arguments{
\item{X}{n1 by p matrix, observation of the first population, columns are features}
\item{Y}{n2 by p matrix, observation of the second population, columns are features}
\item{nproj}{number of random projections to use}
\item{useMC}{logical variable indicating whether to use multicore parallelization.
R packages \code{parallel} and \code{doParallel} are required if set to \code{TRUE}.}
\item{mc.cores}{decide the number of cores to use when \code{useMC} is set to \code{TRUE}.}
}
\value{
A list containing the following components:
\item{test.stat}{test statistic}
\item{pVal}{the p-value calculated using the limiting distribution
(max of independent standard normal)}
}
\description{
The two-sample covariance test using random projections
proposed in Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection".
}
\references{
Wu and Li (2015)
"Tests for High-Dimensional Covariance Matrices Using Random Matrix Projection",
arXiv preprint arXiv:1511.01611.
}
\seealso{
\code{Cai.max.test()}, \code{Chang.maxBoot.test()}, \code{LC.U.test()},
\code{Schott.Frob.test()}.
}
|
library(readr)
library(tibble)
library(ggplot2)
make_fake_data <- function(data,train_file_name,test_file_name,plot_file_name,seed=0){
set.seed(seed)
cna <- data
cna[is.na(cna)] <- 0
#get the protein names
names <- unlist(cna['idx'])
#remove the row names
cna <- cna[,2:ncol(cna)]
#transpose the data
cna <- as.tibble(t(cna))
colnames(cna) <- names
#function take from: https://rdrr.io/cran/stackoverflow/src/R/chunk2.R
chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
#creates phony samples by randomly sampling from the list of values found for each protein
create_phony_samples <- function(df,n_samples=2) {
#get n_samples from each protein's distribution
phonies <- apply(df,2,function(col){
sample(col,n_samples)
})
phony_list <- chunk2(as.numeric(unlist(phonies)),n_samples)
phony_tibble <- tibble(phony_list[[1]])
for(i in seq(2,n_samples)) {
phony_tibble[,i] <- phony_list[[i]]
}
return( as.tibble(t(phony_tibble)) )
}
#number of phonies samples to be created
number_of_phonies <- 50
#create the phonie samples
phonies <- create_phony_samples(cna,number_of_phonies)
#add classificiation labels to the data
phonies['labels'] <- replicate(number_of_phonies,'phony')
cna['labels'] <- replicate(nrow(cna),'real')
#add labels onto the list of column names
names <- c(names,'labels')
colnames(phonies) <- names
colnames(cna) <- names
#takes in a tibble
#makes two random 50% splits of the tibble
#returns a list containing the two newly created tibbles
get_random_halves<- function(df){
indicies <- seq(1,nrow(df))
train_indicies <- sample.int(nrow(df), (nrow(df)/2))
test_indicies <- setdiff(indicies, train_indicies)
return(list(df[train_indicies,],df[test_indicies,]))
}
#get random splits of the phony and real datasets
phony_splits <- get_random_halves(phonies)
real_splits <- get_random_halves(cna)
#extract the train and test sets from the lists return from get_random_halves()
phony_train <- phony_splits[[1]]
phony_test <- phony_splits[[2]]
real_train <- real_splits[[1]]
real_test <- real_splits[[2]]
#combind the real and fake data into the train and test sets
train <- rbind(real_train,phony_train)
test <- rbind(real_test,phony_test)
#give column names to the train and test
colnames(train) <- names
colnames(test) <- names
#write train and test sets as csv
write_csv(train,train_file_name)
write_csv(test,test_file_name)
#make a PCA plot of the combind data
combind <- test
combind[(nrow(combind)+1):(nrow(combind)+nrow(train)),] <- train
pca <- prcomp(combind[,(1:ncol(combind)-1)])
summary_pca <- summary(pca)
pca_tibble <- tibble('pc1'=pca$x[,1],'pc2'=pca$x[,2],'label'=combind$labels)
plot <- ggplot(data = pca_tibble, aes(x=pc1,y=pc2,colour=label)) +
geom_point() +
ggtitle('PCA Resampling Transcriptomics') +
xlab(paste('PC1',summary_pca$importance[2,1]) ) +
ylab(paste('PC2',summary_pca$importance[2,2]) )
plot
ggsave(plot_file_name,plot)
}
setwd('C:/Users/Michael/Documents/Holden/')
inputdata <- read_tsv('Data/Data-Uncompressed-Original/transcriptomics.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/train_transcriptomics_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/test_transcriptomics_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/plots/pca-resampling-transcriptomics',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
}
inputdata <- read_tsv('Data/Data-Uncompressed-Original/CNA.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/CNA-100/train_cna_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/CNA-100/test_cna_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/CNA-100/plots/pca-resampling-cna',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
}
inputdata <- read_tsv('Data/Data-Uncompressed-Original/proteomics.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/Proteomics-100/train_proteomics_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/Proteomics-100/test_proteomics_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/Proteomics-100/plots/pca-resampling-proteomics',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
} | /Phony-Scripts/distribution-data-100-all-types.R | no_license | MSBradshaw/FakeData | R | false | false | 4,801 | r | library(readr)
library(tibble)
library(ggplot2)
make_fake_data <- function(data,train_file_name,test_file_name,plot_file_name,seed=0){
set.seed(seed)
cna <- data
cna[is.na(cna)] <- 0
#get the protein names
names <- unlist(cna['idx'])
#remove the row names
cna <- cna[,2:ncol(cna)]
#transpose the data
cna <- as.tibble(t(cna))
colnames(cna) <- names
#function take from: https://rdrr.io/cran/stackoverflow/src/R/chunk2.R
chunk2 <- function(x,n) split(x, cut(seq_along(x), n, labels = FALSE))
#creates phony samples by randomly sampling from the list of values found for each protein
create_phony_samples <- function(df,n_samples=2) {
#get n_samples from each protein's distribution
phonies <- apply(df,2,function(col){
sample(col,n_samples)
})
phony_list <- chunk2(as.numeric(unlist(phonies)),n_samples)
phony_tibble <- tibble(phony_list[[1]])
for(i in seq(2,n_samples)) {
phony_tibble[,i] <- phony_list[[i]]
}
return( as.tibble(t(phony_tibble)) )
}
#number of phonies samples to be created
number_of_phonies <- 50
#create the phonie samples
phonies <- create_phony_samples(cna,number_of_phonies)
#add classificiation labels to the data
phonies['labels'] <- replicate(number_of_phonies,'phony')
cna['labels'] <- replicate(nrow(cna),'real')
#add labels onto the list of column names
names <- c(names,'labels')
colnames(phonies) <- names
colnames(cna) <- names
#takes in a tibble
#makes two random 50% splits of the tibble
#returns a list containing the two newly created tibbles
get_random_halves<- function(df){
indicies <- seq(1,nrow(df))
train_indicies <- sample.int(nrow(df), (nrow(df)/2))
test_indicies <- setdiff(indicies, train_indicies)
return(list(df[train_indicies,],df[test_indicies,]))
}
#get random splits of the phony and real datasets
phony_splits <- get_random_halves(phonies)
real_splits <- get_random_halves(cna)
#extract the train and test sets from the lists return from get_random_halves()
phony_train <- phony_splits[[1]]
phony_test <- phony_splits[[2]]
real_train <- real_splits[[1]]
real_test <- real_splits[[2]]
#combind the real and fake data into the train and test sets
train <- rbind(real_train,phony_train)
test <- rbind(real_test,phony_test)
#give column names to the train and test
colnames(train) <- names
colnames(test) <- names
#write train and test sets as csv
write_csv(train,train_file_name)
write_csv(test,test_file_name)
#make a PCA plot of the combind data
combind <- test
combind[(nrow(combind)+1):(nrow(combind)+nrow(train)),] <- train
pca <- prcomp(combind[,(1:ncol(combind)-1)])
summary_pca <- summary(pca)
pca_tibble <- tibble('pc1'=pca$x[,1],'pc2'=pca$x[,2],'label'=combind$labels)
plot <- ggplot(data = pca_tibble, aes(x=pc1,y=pc2,colour=label)) +
geom_point() +
ggtitle('PCA Resampling Transcriptomics') +
xlab(paste('PC1',summary_pca$importance[2,1]) ) +
ylab(paste('PC2',summary_pca$importance[2,2]) )
plot
ggsave(plot_file_name,plot)
}
setwd('C:/Users/Michael/Documents/Holden/')
inputdata <- read_tsv('Data/Data-Uncompressed-Original/transcriptomics.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/train_transcriptomics_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/test_transcriptomics_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/Transcriptomics-100/plots/pca-resampling-transcriptomics',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
}
inputdata <- read_tsv('Data/Data-Uncompressed-Original/CNA.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/CNA-100/train_cna_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/CNA-100/test_cna_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/CNA-100/plots/pca-resampling-cna',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
}
inputdata <- read_tsv('Data/Data-Uncompressed-Original/proteomics.cct')
for(i in seq(1,100)){
train_name <- paste('Data/Distribution-Data-Set/Proteomics-100/train_proteomics_distribution',i,'.csv',sep='')
test_name <- paste('Data/Distribution-Data-Set/Proteomics-100/test_proteomics_distribution',i,'.csv',sep='')
plot_name <- paste('Data/Distribution-Data-Set/Proteomics-100/plots/pca-resampling-proteomics',i,'.png',sep='')
make_fake_data(inputdata,train_name,test_name,plot_name,i)
print(i)
} |
##The Sentials
library(Rstem)
library(sentiment)
library(qdap)
library(plyr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
# remove retweet entities
sentinal = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", sentinal)
# remove at people
sentinal = gsub("@\\w+", "", sentinal)
# remove punctuation
sentinal = gsub("[[:punct:]]", "", sentinal)
# remove numbers
sentinal = gsub("[[:digit:]]", "", sentinal)
# remove html links
sentinal = gsub("http\\w+", "", sentinal)
# remove unnecessary spaces
sentinal = gsub("[ \t]{2,}", "", sentinal)
sentinal = gsub("^\\s+|\\s+$", "", sentinal)
# classify emotion
class_emo = classify_emotion(sentinal, algorithm="bayes", prior=1.0)
# get emotion best fit
emotion = class_emo[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_pol = classify_polarity(sentinal, algorithm="bayes")
# get polarity best fit
polarity = class_pol[,4]
# data frame with results
sent_df = data.frame(text=sentinal, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
sent_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# plot distribution of emotions
png("Sentinals.png", width=1280,height=800)
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
ggtitle("Sentiment Analysis of Tweets:\n(classification by Emotion)") +
theme(legend.position="right") + ylab("Number of Tweets") + xlab("Emotion Categories")
dev.off()
# plot distribution of polarity
png("Polars.png", width=1280,height=800)
ggplot(sent_df, aes(x=polarity)) +
geom_bar(aes(y=..count.., fill=polarity)) +
scale_fill_brewer(palette="RdGy") +
ggtitle("Sentiment Analysis of Tweets:\n(classification by Polarity)") +
theme(legend.position="right") + ylab("Number of Tweets") + xlab("Polarity Categories")
| /Election analysis with twitter data using R/sentinals.r | no_license | aj399/Algorithms-and-mini-projects | R | false | false | 1,954 | r |
##The Sentials
library(Rstem)
library(sentiment)
library(qdap)
library(plyr)
library(ggplot2)
library(wordcloud)
library(RColorBrewer)
# remove retweet entities
sentinal = gsub("(RT|via)((?:\\b\\W*@\\w+)+)", "", sentinal)
# remove at people
sentinal = gsub("@\\w+", "", sentinal)
# remove punctuation
sentinal = gsub("[[:punct:]]", "", sentinal)
# remove numbers
sentinal = gsub("[[:digit:]]", "", sentinal)
# remove html links
sentinal = gsub("http\\w+", "", sentinal)
# remove unnecessary spaces
sentinal = gsub("[ \t]{2,}", "", sentinal)
sentinal = gsub("^\\s+|\\s+$", "", sentinal)
# classify emotion
class_emo = classify_emotion(sentinal, algorithm="bayes", prior=1.0)
# get emotion best fit
emotion = class_emo[,7]
# substitute NA's by "unknown"
emotion[is.na(emotion)] = "unknown"
# classify polarity
class_pol = classify_polarity(sentinal, algorithm="bayes")
# get polarity best fit
polarity = class_pol[,4]
# data frame with results
sent_df = data.frame(text=sentinal, emotion=emotion,
polarity=polarity, stringsAsFactors=FALSE)
# sort data frame
sent_df = within(sent_df,
emotion <- factor(emotion, levels=names(sort(table(emotion), decreasing=TRUE))))
# plot distribution of emotions
png("Sentinals.png", width=1280,height=800)
ggplot(sent_df, aes(x=emotion)) + geom_bar(aes(y=..count.., fill=emotion)) +
scale_fill_brewer(palette="Dark2") +
ggtitle("Sentiment Analysis of Tweets:\n(classification by Emotion)") +
theme(legend.position="right") + ylab("Number of Tweets") + xlab("Emotion Categories")
dev.off()
# plot distribution of polarity
png("Polars.png", width=1280,height=800)
ggplot(sent_df, aes(x=polarity)) +
geom_bar(aes(y=..count.., fill=polarity)) +
scale_fill_brewer(palette="RdGy") +
ggtitle("Sentiment Analysis of Tweets:\n(classification by Polarity)") +
theme(legend.position="right") + ylab("Number of Tweets") + xlab("Polarity Categories")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlation_filtering_clustering.R
\name{num_cell_after_cor_filt_scExp}
\alias{num_cell_after_cor_filt_scExp}
\title{Number of cells before & after correlation filtering}
\usage{
num_cell_after_cor_filt_scExp(scExp, scExp_cf)
}
\arguments{
\item{scExp}{SingleCellExperiment object before correlation filtering.}
\item{scExp_cf}{SingleCellExperiment object atfer correlation filtering.}
}
\value{
A colored kable with the number of cells per sample before and after
filtering for display
}
\description{
Number of cells before & after correlation filtering
}
\examples{
data("scExp")
scExp_cf = correlation_and_hierarchical_clust_scExp(scExp)
scExp_cf = filter_correlated_cell_scExp(scExp_cf,
corr_threshold = 99, percent_correlation = 5)
num_cell_after_cor_filt_scExp(scExp,scExp_cf)
}
| /man/num_cell_after_cor_filt_scExp.Rd | no_license | hjames1/ChromSCape | R | false | true | 866 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correlation_filtering_clustering.R
\name{num_cell_after_cor_filt_scExp}
\alias{num_cell_after_cor_filt_scExp}
\title{Number of cells before & after correlation filtering}
\usage{
num_cell_after_cor_filt_scExp(scExp, scExp_cf)
}
\arguments{
\item{scExp}{SingleCellExperiment object before correlation filtering.}
\item{scExp_cf}{SingleCellExperiment object atfer correlation filtering.}
}
\value{
A colored kable with the number of cells per sample before and after
filtering for display
}
\description{
Number of cells before & after correlation filtering
}
\examples{
data("scExp")
scExp_cf = correlation_and_hierarchical_clust_scExp(scExp)
scExp_cf = filter_correlated_cell_scExp(scExp_cf,
corr_threshold = 99, percent_correlation = 5)
num_cell_after_cor_filt_scExp(scExp,scExp_cf)
}
|
library(xtable)
### performance table for simulations
# results from pred_fam.ipynb
digits = c(1, 2, 2, 3, 2)
res.fcnn = read.csv("../results/res_800000_30_0.csv")
res.fcnn = res.fcnn[, c(1, 5, 2, 3, 4)]
res.fcnn = sapply(1:5, function(x) round(res.fcnn[, x], digits[x]))
res.cnn = read.csv("../results/res_cnn_800000_15.csv")
res.cnn = res.cnn[, c(1, 5, 2, 3, 4)]
res.cnn = sapply(1:5, function(x) round(res.cnn[, x], digits[x]))
res.brca = read.csv("../results/res_brca.csv")
res.brca$corr = 1
res.brca = res.brca[, c(1, 4, 2, 3, 5)]
res.brca = sapply(1:5, function(x) round(res.brca[, x], digits[x]))
res.lr = read.csv("../results/res_lr.csv")
res.lr = res.lr[, c(1, 5, 2, 3, 4)]
res.lr = sapply(1:5, function(x) round(res.lr[, x], digits[x]))
perf.table = data.frame(OE=rep(NA, 4), AUC=NA, BS=NA, corr=NA)
perf.table[1, ] = apply(res.fcnn[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[2, ] = apply(res.cnn[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[3, ] = apply(res.brca[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[4, ] = apply(res.lr[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
rownames(perf.table) = c("FCNN", "CNN", "BRCAPRO", "LR")
library(xtable)
xtable(perf.table)
### comparisons across bootstrap replicates
oe = t(read.csv("../results/oe_boot.csv"))[-1, ]
auc = t(read.csv("../results/auc_boot.csv"))[-1, ]
brier = t(read.csv("../results/bs_boot.csv"))[-1, ]
corr = t(read.csv("../results/corr_boot.csv"))[-1, ]
boot.table = data.frame(comparison=c("FCNN>CNN", "FCNN>BRCAPRO", "FCNN>LR", "CNN>BRCAPRO", "CNN>LR"), OE=NA, AUC=NA, BS=NA, cor=NA)
boot.table[, "AUC"] = c(sapply(2:4, function(x) length(which(auc[,1] > auc[,x]))), sapply(3:4, function(x) length(which(auc[,2] > auc[,x]))))
boot.table[, "BS"] = c(sapply(2:4, function(x) length(which(brier[,1] < brier[,x]))), sapply(3:4, function(x) length(which(brier[,2] < brier[,x]))))
boot.table[, "OE"] = c(sapply(2:4, function(x) length(which( abs(oe[,1]-1) < abs(oe[,x]-1) ))), sapply(3:4, function(x) length(which(abs(oe[,2]-1) < abs(oe[,x]-1) ))))
boot.table[, "cor"] = c(sapply(2:4, function(x) length(which(corr[,1] > corr[,x]))), sapply(3:4, function(x) length(which(corr[,2] > corr[,x]))))
boot.table[, 2:5] = boot.table[, 2:5]/nrow(oe)
print(xtable(boot.table, digits=c(0, 0, 3, 3, 3, 3)), include.rownames=F)
| /tables_figures/table_sim.R | no_license | zoeguan/nn_cancer_risk | R | false | false | 2,415 | r | library(xtable)
### performance table for simulations
# results from pred_fam.ipynb
digits = c(1, 2, 2, 3, 2)
res.fcnn = read.csv("../results/res_800000_30_0.csv")
res.fcnn = res.fcnn[, c(1, 5, 2, 3, 4)]
res.fcnn = sapply(1:5, function(x) round(res.fcnn[, x], digits[x]))
res.cnn = read.csv("../results/res_cnn_800000_15.csv")
res.cnn = res.cnn[, c(1, 5, 2, 3, 4)]
res.cnn = sapply(1:5, function(x) round(res.cnn[, x], digits[x]))
res.brca = read.csv("../results/res_brca.csv")
res.brca$corr = 1
res.brca = res.brca[, c(1, 4, 2, 3, 5)]
res.brca = sapply(1:5, function(x) round(res.brca[, x], digits[x]))
res.lr = read.csv("../results/res_lr.csv")
res.lr = res.lr[, c(1, 5, 2, 3, 4)]
res.lr = sapply(1:5, function(x) round(res.lr[, x], digits[x]))
perf.table = data.frame(OE=rep(NA, 4), AUC=NA, BS=NA, corr=NA)
perf.table[1, ] = apply(res.fcnn[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[2, ] = apply(res.cnn[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[3, ] = apply(res.brca[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
perf.table[4, ] = apply(res.lr[, 2:5], 2, function(x) paste0(x[1], " (", x[2], ", ", x[3], ")"))
rownames(perf.table) = c("FCNN", "CNN", "BRCAPRO", "LR")
library(xtable)
xtable(perf.table)
### comparisons across bootstrap replicates
oe = t(read.csv("../results/oe_boot.csv"))[-1, ]
auc = t(read.csv("../results/auc_boot.csv"))[-1, ]
brier = t(read.csv("../results/bs_boot.csv"))[-1, ]
corr = t(read.csv("../results/corr_boot.csv"))[-1, ]
boot.table = data.frame(comparison=c("FCNN>CNN", "FCNN>BRCAPRO", "FCNN>LR", "CNN>BRCAPRO", "CNN>LR"), OE=NA, AUC=NA, BS=NA, cor=NA)
boot.table[, "AUC"] = c(sapply(2:4, function(x) length(which(auc[,1] > auc[,x]))), sapply(3:4, function(x) length(which(auc[,2] > auc[,x]))))
boot.table[, "BS"] = c(sapply(2:4, function(x) length(which(brier[,1] < brier[,x]))), sapply(3:4, function(x) length(which(brier[,2] < brier[,x]))))
boot.table[, "OE"] = c(sapply(2:4, function(x) length(which( abs(oe[,1]-1) < abs(oe[,x]-1) ))), sapply(3:4, function(x) length(which(abs(oe[,2]-1) < abs(oe[,x]-1) ))))
boot.table[, "cor"] = c(sapply(2:4, function(x) length(which(corr[,1] > corr[,x]))), sapply(3:4, function(x) length(which(corr[,2] > corr[,x]))))
boot.table[, 2:5] = boot.table[, 2:5]/nrow(oe)
print(xtable(boot.table, digits=c(0, 0, 3, 3, 3, 3)), include.rownames=F)
|
# Statistical Analysis of Data from
#load data
library(readr)
ch2 <- read_csv("Chapter2_complieddata.csv",
col_types = cols(Herbivory = col_factor(levels = c("0",
"1")), Label = col_factor(levels = c("0",
"1")), Treatment = col_factor(levels = c("1",
"2", "3", "4"))))
perN <- read_csv("perN_balance.csv")
perC <- read_csv("perC_balance.csv")
# Calculate the decomposition of litter N
Ldecomptime = ifelse(ch2$Label==0|ch2$Plot <9, 289-170, 290-170)
ch2["LitterN2"] = ch2$Litter*perN$Litter/100
ch2["LNDecomp"] = 1000*(7*(ch2$perN/100) - ch2$LitterN2)/Ldecomptime # mg-N day-1
#Subset out the control plots
ch2exp = subset(ch2, Plot <41)
head(ch2exp)
# Run model selection -----------------------------------------------------
# create center data frame for calculating vif
library(car)
variablestocenter = c("Isopods","N_Sol_June", "SIR_June", "Worms","NE_June", "NS_June")
ch2exp_center = ch2exp
for(i in 1:length(variablestocenter)) {
ch2exp_center[variablestocenter[i]] = ch2exp_center[variablestocenter[i]] - colMeans(ch2exp_center[variablestocenter[i]], na.rm=T)
}
#*********************************************
# Plants
#*********************************************
vif(lm(Plant~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
P1 = lm(Plant~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(P1)
P2 = update(P1, .~. -Label:N_Sol_June); summary(P2)
P3 = update(P2, .~. -Label:Isopods); summary(P3)
P4 = update(P3, .~. -Herbivory:Label); summary(P4)
P5 = update(P4, .~. -NE_June); summary(P5)
P6 = update(P5, .~. -Herbivory:Isopods); summary(P6)
P6 = update(P6, .~. -Label); summary(P6)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(P6), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Litter Decomposition
#*********************************************
vif(lm(LNDecomp~(Herbivory+Label+Isopods)^2 +N_Sol_June+SIR_June+Worms + NE_June, data=ch2exp_center))<10
LND1 = lm(LNDecomp~(Herbivory+Label+Isopods)^2 +N_Sol_June+SIR_June+Worms + NE_June, data=ch2exp); summary(LND1)
LND2 = update(LND1, .~. -NE_June); summary(LND2)
LND3 = update(LND2, .~. -N_Sol_June); summary(LND3)
LND4 = update(LND3, .~. -Worms); summary(LND4)
LND5 = update(LND4, .~. -Herbivory:Isopods); summary(LND5)
LND6 = update(LND5, .~. -Herbivory:Label); summary(LND6)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(LND6), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Extractable Nitrogen
#*********************************************
NE1 = lm(NE_Oct~(Herbivory+Label+Isopods+ NE_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp); summary(NE1)
NE2 = update(NE1, .~. -Herbivory:Isopods); summary(NE2)
NE3 = update(NE2, .~. -SIR_June); summary(NE3)
NE4 = update(NE3, .~. -Herbivory:Label); summary(NE4)
NE5 = update(NE4, .~. -Label:Isopods); summary(NE5)
NE6 = update(NE5, .~. -Herbivory:NE_June); summary(NE6)
NE7 = update(NE6, .~. -Isopods:NE_June); summary(NE7)
NE8 = update(NE7, .~. -Isopods); summary(NE8)
NE9 = update(NE8, .~. -Herbivory); summary(NE9)
NE10 = update(NE9, .~. -Label:NE_June); summary(NE10)
NE11 = update(NE10, .~. -Worms); summary(NE11)
NE12 = update(NE11, .~. -NE_June); summary(NE12)
NE13 = update(NE12, .~. -Herbivory:SIR_June); summary(NE13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(NE13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Ion exchange membrane Nitrogen
#*********************************************
vif(lm(NS_Oct~(Herbivory+Label+Isopods+ NS_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp_center))<10
NS1 = lm(NS_Oct~(Herbivory+Label+Isopods+ NS_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp); summary(NS1)
NS2 = update(NS1, .~. -Herbivory:Isopods); summary(NS2)
NS3 = update(NS2, .~. -Label:Isopods); summary(NS3)
NS4 = update(NS3, .~. -Herbivory:Label); summary(NS4)
NS5 = update(NS4, .~. -N_Sol_June); summary(NS5)
NS6 = update(NS5, .~. -SIR_June); summary(NS6)
NS7 = update(NS6, .~. -Herbivory:NS_June); summary(NS7)
NS8 = update(NS7, .~. -Label:NS_June); summary(NS8)
NS9 = update(NS8, .~. -Label); summary(NS9)
NS10 = update(NS9, .~. -Isopods:NS_June ); summary(NS10)
NS11 = update(NS10, .~. -NS_June); summary(NS11)
NS12 = update(NS11, .~. -Isopods); summary(NS12)
NS13 = update(NS12, .~. -Worms); summary(NS13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(NS13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Roots
#*********************************************
vif(lm(Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
R1 = lm(Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(R1)
R2 = update(R1, .~. -Herbivory:N_Sol_June); summary(R2)
R3 = update(R2, .~. -Isopods:N_Sol_June); summary(R3)
R4 = update(R3, .~. -Label:Isopods); summary(R4)
R5 = update(R4, .~. -Worms); summary(R5)
R6 = update(R5, .~. -SIR_June); summary(R6)
R7 = update(R6, .~. -NE_June); summary(R7)
R8 = update(R7, .~. -Herbivory:Label); summary(R8)
R9 = update(R8, .~. -Label:N_Sol_June); summary(R9)
R10 = update(R9, .~. -N_Sol_June); summary(R10)
R11 = update(R10, .~. -Isopods:Herbivory); summary(R11)
R12 = update(R11, .~. -Herbivory); summary(R12)
R13 = update(R12, .~. -Isopods); summary(R13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(R13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Total Plant Biomass
#*********************************************
vif(lm(Plant + Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
TPR1 = lm(Plant + Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(TPR1)
TPR2 = update(TPR1, .~. -Herbivory:N_Sol_June); summary(TPR2)
TPR3 = update(TPR2, .~. -Isopods:N_Sol_June); summary(TPR3)
TPR4 = update(TPR3, .~. -Label:Isopods); summary(TPR4)
TPR5 = update(TPR4, .~. -Worms); summary(TPR5)
TPR6 = update(TPR5, .~. -SIR_June); summary(TPR6)
TPR7 = update(TPR6, .~. -NE_June); summary(TPR7)
TPR8 = update(TPR7, .~. -Herbivory:Label); summary(TPR8)
TPR9 = update(TPR8, .~. -Label:N_Sol_June); summary(TPR9)
TPR10 = update(TPR9, .~. -N_Sol_June); summary(TPR10)
TPR11 = update(TPR10, .~. -Isopods:Herbivory); summary(TPR11)
TPR12 = update(TPR11, .~. -Herbivory); summary(TPR12)
TPR13 = update(TPR12, .~. -Isopods); summary(TPR13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(TPR13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Forbs
#*********************************************
vif(lm(Forbs~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
F1 = lm(Forbs~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(F1)
F2 = update(F1, .~. -Label:N_Sol_June); summary(F2)
F3 = update(F2, .~. -Herbivory:Label); summary(F3)
F4 = update(F3, .~. -NE_June); summary(F4)
F5 = update(F4, .~. -SIR_June); summary(F5)
F6 = update(F5, .~. -Label:Isopods); summary(F6)
F7 = update(F6, .~. -Herbivory:Isopods); summary(F7)
F8 = update(F7, .~. -Worms); summary(F8)
F9 = update(F8, .~. -Isopods:N_Sol_June); summary(F9)
F10 = update(F9, .~. -Isopods); summary(F10)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(F10), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Grass
#*********************************************
vif(lm(Grass~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
G1 = lm(Grass~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(G1)
G2 = update(G1, .~. -Herbivory:Isopods); summary(G2)
G3 = update(G2, .~. -Herbivory:N_Sol_June); summary(G3)
G4 = update(G3, .~. -Label:N_Sol_June); summary(G4)
G5 = update(G4, .~. -Label:Isopods); summary(G5)
G6 = update(G5, .~. -Herbivory:Label); summary(G6)
G7 = update(G6, .~. -Isopods:N_Sol_June); summary(G7)
G8 = update(G7, .~. -Isopods); summary(G8)
G9 = update(G8, .~. -N_Sol_June); summary(G9)
G10 = update(G9, .~. -NE_June); summary(G10)
G11 = update(G10, .~. -Herbivory); summary(G11)
G12 = update(G11, .~. -Worms); summary(G12)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(G12), data=subset(ch2exp, Isopods <20 & Worms <20)))
# Plot Model Results ------------------------------------------------------
#*********************************************
# Create predictions
#*********************************************
for(i in 0:1){
for(j in 0:1){
isorange = range(subset(ch2exp, Herbivory==i & Label==j)$Isopods)
isoseq = seq(isorange[1], isorange[2], by = 1)
assign(paste0("Treat",i,j),cbind(isoseq, rep(i, length(isoseq)), rep(j, length(isoseq))))
}
}
newdata = as.data.frame(rbind(Treat00, Treat01, Treat10, Treat11))
names(newdata) = c("Isopods", "Herbivory", "Label")
newdata["Worms"] = rep(mean(ch2exp$Worms), times=dim(newdata)[1])
newdata["SIR_June"] = rep(mean(ch2exp$SIR_June, na.rm=T), times=dim(newdata)[1])
newdata["N_Sol_June"] = rep(2, times=dim(newdata)[1])
newdata["Herbivory"] = as.factor(newdata$Herbivory)
newdata["Label"] = as.factor(newdata$Label)
newdata["NE_June"] = rep(mean(ch2exp$NE_June), times=dim(newdata)[1])
newdata["NS_June"] = rep(mean(ch2exp$NS_June), times=dim(newdata)[1])
#New Model
newdata["Npredict"] = predict(P6, newdata)
newdata["Npredictse"] = predict(P6, newdata, se.fit=T)$se.fit
newdata["NpredictNE"] = predict(NE13, newdata)
newdata["NpredictseNE"] = predict(NE13, newdata, se.fit=T)$se.fit
newdata["NpredictL"] = predict(LND6, newdata)
newdata["NpredictseL"] = predict(LND6, newdata, se.fit=T)$se.fit
#*********************************************
# Plot Results
#*********************************************
pdf("Figure2.pdf", width=8, height=6)
par(oma=c(3,0,0,0),mar=c(3,5,2,2),mfrow=c(2,2), cex.lab=1.2)
#Litter
plot(LNDecomp~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab=expression(Litter~Decomp~(mg[N]~day^-1)), main="No Herbivory", type="n",
ylim=c(1,2), xlim=c(0,20))
text(x=0, y=1.9, label="a", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$LNDecomp, pch=19,col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(LNDecomp~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", main="Herbivory", type="n",
ylim=c(1,2), xlim=c(0,20))
text(x=0, y=1.9, label="b", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$LNDecomp, pch=10,col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
#NE Oct
plot(NE_Oct~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab=expression(Soil~N~(mu*g[N]~g[DMES]^-1)), type="n",
ylim=c(0,5), xlim=c(0,20))
text(x=0, y=4.5, label="c", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$NE_Oct, pch=19, col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(NE_Oct~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",
ylim=c(0,5), xlim=c(0,20))
text(x=0, y=4.5, label="d", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$NE_Oct, pch=10, col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
mtext(text="Isopods (#)",side=1,line=0,outer=TRUE)
dev.off()
pdf("Figure3.pdf", width=8, height=6)
par(oma=c(1,1,0,0),mar=c(3,5,2,2),mfrow=c(2,2), cex.lab=1.2)
#Plants
plot(Plant~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",main="No Herbivory",
ylim=c(0,14), xlim=c(0,20))
text(x=0, y=13.5, label="a", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$Plant, pch=19, col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(Plant~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",main="Herbivory",
ylim=c(0,14), xlim=c(0,20))
text(x=0, y=13.5, label="b", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$Plant, pch=10, col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
Isopodrange = seq(0,12, by=1)
Wormmean = rep(mean(ch2exp$Worms), times=length(Isopodrange))
SIRmean = rep(mean(ch2exp$SIR_June, na.rm=T), times=length(Isopodrange))
N_Sol_June1 = rep(1, times=length(Isopodrange))
Label1 = rep(1, times=length(Isopodrange))
#Herbivory 0
newdatplant1 = as.data.frame(cbind(Isopodrange, Wormmean, SIRmean, rep(0, length(Isopodrange)), N_Sol_June1))
names(newdatplant1) = c("Isopods", "Worms", "SIR_June", "Herbivory", "N_Sol_June")
newdatplant1["Herbivory"] = as.factor(newdatplant1$Herbivory)
newplant1 = predict(P6, newdata=newdatplant1, se.fit=T)$fit
newdatplant2 = newdatplant1
newdatplant2["N_Sol_June"] = newdatplant1$N_Sol_June + 1
newplant2 = predict(P6, newdata=newdatplant2, se.fit=T)$fit
newdatplant3 = newdatplant1
newdatplant3["N_Sol_June"] = newdatplant1$N_Sol_June + 2
newplant3 = predict(P6, newdata=newdatplant3, se.fit=T)$fit
newdatplant4 = newdatplant1
newdatplant4["N_Sol_June"] = newdatplant1$N_Sol_June + 3
newplant4 = predict(P6, newdata=newdatplant4, se.fit=T)$fit
newdatplant5 = newdatplant1
newdatplant5["N_Sol_June"] = 0
newplant5 = predict(P6, newdata=newdatplant5, se.fit=T)$fit
plot(newplant1~Isopodrange, pch=19, type="b", ylim=c(0,14), col="brown",
ylab="", xlab="", xlim=c(0,20))
text(x=0, y=13.5, label="c", cex=2)
points(newplant5~Isopodrange, pch=19, type="b", col="black")
points(newplant2~Isopodrange, pch=19, type="b", col="orange")
points(newplant3~Isopodrange, pch=19, type="b", col="red")
points(newplant4~Isopodrange, pch=19, type="b", col="green")
legend("bottomright", legend=c(0,1,2,3,4),
col=c("black", "brown", "orange", "red", "green", "darkgreen"),
pch=19, title=expression(italic(Solidago)), bty="n",
x.intersp = 0.5, y.intersp = 1)
# Herbivory 1
newdatplant1 = as.data.frame(cbind(Isopodrange, Wormmean, SIRmean, Label1, N_Sol_June1))
names(newdatplant1) = c("Isopods", "Worms", "SIR_June", "Herbivory", "N_Sol_June")
newdatplant1["Herbivory"] = as.factor(newdatplant1$Herbivory)
newplant1 = predict(P6, newdata=newdatplant1, se.fit=T)$fit
newdatplant2 = newdatplant1
newdatplant2["N_Sol_June"] = newdatplant1$N_Sol_June + 1
newplant2 = predict(P6, newdata=newdatplant2, se.fit=T)$fit
newdatplant3 = newdatplant1
newdatplant3["N_Sol_June"] = newdatplant1$N_Sol_June + 2
newplant3 = predict(P6, newdata=newdatplant3, se.fit=T)$fit
newdatplant4 = newdatplant1
newdatplant4["N_Sol_June"] = newdatplant1$N_Sol_June + 3
newplant4 = predict(P6, newdata=newdatplant4, se.fit=T)$fit
newdatplant5 = newdatplant1
newdatplant5["N_Sol_June"] = 0
newplant5 = predict(P6, newdata=newdatplant5, se.fit=T)$fit
plot(newplant1~Isopodrange, pch=19, type="b", ylim=c(0,14), col="brown",
ylab="", xlab="", xlim=c(0,20))
text(x=0, y=13.5, label="d", cex=2)
points(newplant5~Isopodrange, pch=19, type="b", col="black")
points(newplant2~Isopodrange, pch=19, type="b", col="orange")
points(newplant3~Isopodrange, pch=19, type="b", col="red")
points(newplant4~Isopodrange, pch=19, type="b", col="green")
mtext(text="Aboveground Plant Biomass (g)",side=2,line=-1,outer=TRUE)
mtext(text="Isopods (#)",side=1,line=0,outer=TRUE)
dev.off()
| /statisticalanalysis_herbdetplant.R | no_license | robertwbuchkowski/animals_litterdecomp_plants | R | false | false | 19,838 | r | # Statistical Analysis of Data from
#load data
library(readr)
ch2 <- read_csv("Chapter2_complieddata.csv",
col_types = cols(Herbivory = col_factor(levels = c("0",
"1")), Label = col_factor(levels = c("0",
"1")), Treatment = col_factor(levels = c("1",
"2", "3", "4"))))
perN <- read_csv("perN_balance.csv")
perC <- read_csv("perC_balance.csv")
# Calculate the decomposition of litter N
Ldecomptime = ifelse(ch2$Label==0|ch2$Plot <9, 289-170, 290-170)
ch2["LitterN2"] = ch2$Litter*perN$Litter/100
ch2["LNDecomp"] = 1000*(7*(ch2$perN/100) - ch2$LitterN2)/Ldecomptime # mg-N day-1
#Subset out the control plots
ch2exp = subset(ch2, Plot <41)
head(ch2exp)
# Run model selection -----------------------------------------------------
# create center data frame for calculating vif
library(car)
variablestocenter = c("Isopods","N_Sol_June", "SIR_June", "Worms","NE_June", "NS_June")
ch2exp_center = ch2exp
for(i in 1:length(variablestocenter)) {
ch2exp_center[variablestocenter[i]] = ch2exp_center[variablestocenter[i]] - colMeans(ch2exp_center[variablestocenter[i]], na.rm=T)
}
#*********************************************
# Plants
#*********************************************
vif(lm(Plant~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
P1 = lm(Plant~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(P1)
P2 = update(P1, .~. -Label:N_Sol_June); summary(P2)
P3 = update(P2, .~. -Label:Isopods); summary(P3)
P4 = update(P3, .~. -Herbivory:Label); summary(P4)
P5 = update(P4, .~. -NE_June); summary(P5)
P6 = update(P5, .~. -Herbivory:Isopods); summary(P6)
P6 = update(P6, .~. -Label); summary(P6)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(P6), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Litter Decomposition
#*********************************************
vif(lm(LNDecomp~(Herbivory+Label+Isopods)^2 +N_Sol_June+SIR_June+Worms + NE_June, data=ch2exp_center))<10
LND1 = lm(LNDecomp~(Herbivory+Label+Isopods)^2 +N_Sol_June+SIR_June+Worms + NE_June, data=ch2exp); summary(LND1)
LND2 = update(LND1, .~. -NE_June); summary(LND2)
LND3 = update(LND2, .~. -N_Sol_June); summary(LND3)
LND4 = update(LND3, .~. -Worms); summary(LND4)
LND5 = update(LND4, .~. -Herbivory:Isopods); summary(LND5)
LND6 = update(LND5, .~. -Herbivory:Label); summary(LND6)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(LND6), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Extractable Nitrogen
#*********************************************
NE1 = lm(NE_Oct~(Herbivory+Label+Isopods+ NE_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp); summary(NE1)
NE2 = update(NE1, .~. -Herbivory:Isopods); summary(NE2)
NE3 = update(NE2, .~. -SIR_June); summary(NE3)
NE4 = update(NE3, .~. -Herbivory:Label); summary(NE4)
NE5 = update(NE4, .~. -Label:Isopods); summary(NE5)
NE6 = update(NE5, .~. -Herbivory:NE_June); summary(NE6)
NE7 = update(NE6, .~. -Isopods:NE_June); summary(NE7)
NE8 = update(NE7, .~. -Isopods); summary(NE8)
NE9 = update(NE8, .~. -Herbivory); summary(NE9)
NE10 = update(NE9, .~. -Label:NE_June); summary(NE10)
NE11 = update(NE10, .~. -Worms); summary(NE11)
NE12 = update(NE11, .~. -NE_June); summary(NE12)
NE13 = update(NE12, .~. -Herbivory:SIR_June); summary(NE13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(NE13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Ion exchange membrane Nitrogen
#*********************************************
vif(lm(NS_Oct~(Herbivory+Label+Isopods+ NS_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp_center))<10
NS1 = lm(NS_Oct~(Herbivory+Label+Isopods+ NS_June)^2+N_Sol_June+SIR_June+Worms, data=ch2exp); summary(NS1)
NS2 = update(NS1, .~. -Herbivory:Isopods); summary(NS2)
NS3 = update(NS2, .~. -Label:Isopods); summary(NS3)
NS4 = update(NS3, .~. -Herbivory:Label); summary(NS4)
NS5 = update(NS4, .~. -N_Sol_June); summary(NS5)
NS6 = update(NS5, .~. -SIR_June); summary(NS6)
NS7 = update(NS6, .~. -Herbivory:NS_June); summary(NS7)
NS8 = update(NS7, .~. -Label:NS_June); summary(NS8)
NS9 = update(NS8, .~. -Label); summary(NS9)
NS10 = update(NS9, .~. -Isopods:NS_June ); summary(NS10)
NS11 = update(NS10, .~. -NS_June); summary(NS11)
NS12 = update(NS11, .~. -Isopods); summary(NS12)
NS13 = update(NS12, .~. -Worms); summary(NS13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(NS13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Roots
#*********************************************
vif(lm(Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
R1 = lm(Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(R1)
R2 = update(R1, .~. -Herbivory:N_Sol_June); summary(R2)
R3 = update(R2, .~. -Isopods:N_Sol_June); summary(R3)
R4 = update(R3, .~. -Label:Isopods); summary(R4)
R5 = update(R4, .~. -Worms); summary(R5)
R6 = update(R5, .~. -SIR_June); summary(R6)
R7 = update(R6, .~. -NE_June); summary(R7)
R8 = update(R7, .~. -Herbivory:Label); summary(R8)
R9 = update(R8, .~. -Label:N_Sol_June); summary(R9)
R10 = update(R9, .~. -N_Sol_June); summary(R10)
R11 = update(R10, .~. -Isopods:Herbivory); summary(R11)
R12 = update(R11, .~. -Herbivory); summary(R12)
R13 = update(R12, .~. -Isopods); summary(R13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(R13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Total Plant Biomass
#*********************************************
vif(lm(Plant + Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
TPR1 = lm(Plant + Roots~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(TPR1)
TPR2 = update(TPR1, .~. -Herbivory:N_Sol_June); summary(TPR2)
TPR3 = update(TPR2, .~. -Isopods:N_Sol_June); summary(TPR3)
TPR4 = update(TPR3, .~. -Label:Isopods); summary(TPR4)
TPR5 = update(TPR4, .~. -Worms); summary(TPR5)
TPR6 = update(TPR5, .~. -SIR_June); summary(TPR6)
TPR7 = update(TPR6, .~. -NE_June); summary(TPR7)
TPR8 = update(TPR7, .~. -Herbivory:Label); summary(TPR8)
TPR9 = update(TPR8, .~. -Label:N_Sol_June); summary(TPR9)
TPR10 = update(TPR9, .~. -N_Sol_June); summary(TPR10)
TPR11 = update(TPR10, .~. -Isopods:Herbivory); summary(TPR11)
TPR12 = update(TPR11, .~. -Herbivory); summary(TPR12)
TPR13 = update(TPR12, .~. -Isopods); summary(TPR13)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(TPR13), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Forbs
#*********************************************
vif(lm(Forbs~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
F1 = lm(Forbs~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(F1)
F2 = update(F1, .~. -Label:N_Sol_June); summary(F2)
F3 = update(F2, .~. -Herbivory:Label); summary(F3)
F4 = update(F3, .~. -NE_June); summary(F4)
F5 = update(F4, .~. -SIR_June); summary(F5)
F6 = update(F5, .~. -Label:Isopods); summary(F6)
F7 = update(F6, .~. -Herbivory:Isopods); summary(F7)
F8 = update(F7, .~. -Worms); summary(F8)
F9 = update(F8, .~. -Isopods:N_Sol_June); summary(F9)
F10 = update(F9, .~. -Isopods); summary(F10)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(F10), data=subset(ch2exp, Isopods <20 & Worms <20)))
#*********************************************
# Grass
#*********************************************
vif(lm(Grass~(Herbivory+Label+Isopods+N_Sol_June)^2 +SIR_June+Worms + NE_June, data=ch2exp_center))<10
G1 = lm(Grass~(Herbivory+Label+Isopods+N_Sol_June)^2 + SIR_June + Worms + NE_June, data=ch2exp); summary(G1)
G2 = update(G1, .~. -Herbivory:Isopods); summary(G2)
G3 = update(G2, .~. -Herbivory:N_Sol_June); summary(G3)
G4 = update(G3, .~. -Label:N_Sol_June); summary(G4)
G5 = update(G4, .~. -Label:Isopods); summary(G5)
G6 = update(G5, .~. -Herbivory:Label); summary(G6)
G7 = update(G6, .~. -Isopods:N_Sol_June); summary(G7)
G8 = update(G7, .~. -Isopods); summary(G8)
G9 = update(G8, .~. -N_Sol_June); summary(G9)
G10 = update(G9, .~. -NE_June); summary(G10)
G11 = update(G10, .~. -Herbivory); summary(G11)
G12 = update(G11, .~. -Worms); summary(G12)
# check if removal of high isopods and worm mesocosms matters
summary(lm(formula(G12), data=subset(ch2exp, Isopods <20 & Worms <20)))
# Plot Model Results ------------------------------------------------------
#*********************************************
# Create predictions
#*********************************************
for(i in 0:1){
for(j in 0:1){
isorange = range(subset(ch2exp, Herbivory==i & Label==j)$Isopods)
isoseq = seq(isorange[1], isorange[2], by = 1)
assign(paste0("Treat",i,j),cbind(isoseq, rep(i, length(isoseq)), rep(j, length(isoseq))))
}
}
newdata = as.data.frame(rbind(Treat00, Treat01, Treat10, Treat11))
names(newdata) = c("Isopods", "Herbivory", "Label")
newdata["Worms"] = rep(mean(ch2exp$Worms), times=dim(newdata)[1])
newdata["SIR_June"] = rep(mean(ch2exp$SIR_June, na.rm=T), times=dim(newdata)[1])
newdata["N_Sol_June"] = rep(2, times=dim(newdata)[1])
newdata["Herbivory"] = as.factor(newdata$Herbivory)
newdata["Label"] = as.factor(newdata$Label)
newdata["NE_June"] = rep(mean(ch2exp$NE_June), times=dim(newdata)[1])
newdata["NS_June"] = rep(mean(ch2exp$NS_June), times=dim(newdata)[1])
#New Model
newdata["Npredict"] = predict(P6, newdata)
newdata["Npredictse"] = predict(P6, newdata, se.fit=T)$se.fit
newdata["NpredictNE"] = predict(NE13, newdata)
newdata["NpredictseNE"] = predict(NE13, newdata, se.fit=T)$se.fit
newdata["NpredictL"] = predict(LND6, newdata)
newdata["NpredictseL"] = predict(LND6, newdata, se.fit=T)$se.fit
#*********************************************
# Plot Results
#*********************************************
pdf("Figure2.pdf", width=8, height=6)
par(oma=c(3,0,0,0),mar=c(3,5,2,2),mfrow=c(2,2), cex.lab=1.2)
#Litter
plot(LNDecomp~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab=expression(Litter~Decomp~(mg[N]~day^-1)), main="No Herbivory", type="n",
ylim=c(1,2), xlim=c(0,20))
text(x=0, y=1.9, label="a", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$LNDecomp, pch=19,col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(LNDecomp~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", main="Herbivory", type="n",
ylim=c(1,2), xlim=c(0,20))
text(x=0, y=1.9, label="b", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictL, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictL+subofdata$NpredictseL,rev(subofdata$NpredictL-subofdata$NpredictseL)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$LNDecomp, pch=10,col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
#NE Oct
plot(NE_Oct~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab=expression(Soil~N~(mu*g[N]~g[DMES]^-1)), type="n",
ylim=c(0,5), xlim=c(0,20))
text(x=0, y=4.5, label="c", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$NE_Oct, pch=19, col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(NE_Oct~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",
ylim=c(0,5), xlim=c(0,20))
text(x=0, y=4.5, label="d", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$NpredictNE, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$NpredictNE+subofdata$NpredictseNE,rev(subofdata$NpredictNE-subofdata$NpredictseNE)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$NE_Oct, pch=10, col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
mtext(text="Isopods (#)",side=1,line=0,outer=TRUE)
dev.off()
pdf("Figure3.pdf", width=8, height=6)
par(oma=c(1,1,0,0),mar=c(3,5,2,2),mfrow=c(2,2), cex.lab=1.2)
#Plants
plot(Plant~Isopods, data=subset(ch2exp, Herbivory==0), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",main="No Herbivory",
ylim=c(0,14), xlim=c(0,20))
text(x=0, y=13.5, label="a", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=1)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==0)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=1, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==0)$Isopods, subset(ch2exp, Herbivory==0)$Plant, pch=19, col=subset(ch2exp, Herbivory==0)$Label, cex=1.5)
plot(Plant~Isopods, data=subset(ch2exp, Herbivory==1), pch=19, col=Label,
xlab="Isopods (#)", ylab="", type="n",main="Herbivory",
ylim=c(0,14), xlim=c(0,20))
text(x=0, y=13.5, label="b", cex=2)
subofdata = subset(newdata, Label==0 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=2)
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("grey",.5))
subofdata = subset(newdata, Label==1 & Herbivory==1)
lines(subofdata$Isopods,
subofdata$Npredict, cex=2, lty=2, col="red")
polygon(c(subofdata$Isopods,rev(subofdata$Isopods)),
c(subofdata$Npredict+subofdata$Npredictse,rev(subofdata$Npredict-subofdata$Npredictse)),
col=scales::alpha("red",.5))
points(subset(ch2exp, Herbivory==1)$Isopods, subset(ch2exp, Herbivory==1)$Plant, pch=10, col=subset(ch2exp, Herbivory==1)$Label, cex=1.5)
Isopodrange = seq(0,12, by=1)
Wormmean = rep(mean(ch2exp$Worms), times=length(Isopodrange))
SIRmean = rep(mean(ch2exp$SIR_June, na.rm=T), times=length(Isopodrange))
N_Sol_June1 = rep(1, times=length(Isopodrange))
Label1 = rep(1, times=length(Isopodrange))
#Herbivory 0
newdatplant1 = as.data.frame(cbind(Isopodrange, Wormmean, SIRmean, rep(0, length(Isopodrange)), N_Sol_June1))
names(newdatplant1) = c("Isopods", "Worms", "SIR_June", "Herbivory", "N_Sol_June")
newdatplant1["Herbivory"] = as.factor(newdatplant1$Herbivory)
newplant1 = predict(P6, newdata=newdatplant1, se.fit=T)$fit
newdatplant2 = newdatplant1
newdatplant2["N_Sol_June"] = newdatplant1$N_Sol_June + 1
newplant2 = predict(P6, newdata=newdatplant2, se.fit=T)$fit
newdatplant3 = newdatplant1
newdatplant3["N_Sol_June"] = newdatplant1$N_Sol_June + 2
newplant3 = predict(P6, newdata=newdatplant3, se.fit=T)$fit
newdatplant4 = newdatplant1
newdatplant4["N_Sol_June"] = newdatplant1$N_Sol_June + 3
newplant4 = predict(P6, newdata=newdatplant4, se.fit=T)$fit
newdatplant5 = newdatplant1
newdatplant5["N_Sol_June"] = 0
newplant5 = predict(P6, newdata=newdatplant5, se.fit=T)$fit
plot(newplant1~Isopodrange, pch=19, type="b", ylim=c(0,14), col="brown",
ylab="", xlab="", xlim=c(0,20))
text(x=0, y=13.5, label="c", cex=2)
points(newplant5~Isopodrange, pch=19, type="b", col="black")
points(newplant2~Isopodrange, pch=19, type="b", col="orange")
points(newplant3~Isopodrange, pch=19, type="b", col="red")
points(newplant4~Isopodrange, pch=19, type="b", col="green")
legend("bottomright", legend=c(0,1,2,3,4),
col=c("black", "brown", "orange", "red", "green", "darkgreen"),
pch=19, title=expression(italic(Solidago)), bty="n",
x.intersp = 0.5, y.intersp = 1)
# Herbivory 1
newdatplant1 = as.data.frame(cbind(Isopodrange, Wormmean, SIRmean, Label1, N_Sol_June1))
names(newdatplant1) = c("Isopods", "Worms", "SIR_June", "Herbivory", "N_Sol_June")
newdatplant1["Herbivory"] = as.factor(newdatplant1$Herbivory)
newplant1 = predict(P6, newdata=newdatplant1, se.fit=T)$fit
newdatplant2 = newdatplant1
newdatplant2["N_Sol_June"] = newdatplant1$N_Sol_June + 1
newplant2 = predict(P6, newdata=newdatplant2, se.fit=T)$fit
newdatplant3 = newdatplant1
newdatplant3["N_Sol_June"] = newdatplant1$N_Sol_June + 2
newplant3 = predict(P6, newdata=newdatplant3, se.fit=T)$fit
newdatplant4 = newdatplant1
newdatplant4["N_Sol_June"] = newdatplant1$N_Sol_June + 3
newplant4 = predict(P6, newdata=newdatplant4, se.fit=T)$fit
newdatplant5 = newdatplant1
newdatplant5["N_Sol_June"] = 0
newplant5 = predict(P6, newdata=newdatplant5, se.fit=T)$fit
plot(newplant1~Isopodrange, pch=19, type="b", ylim=c(0,14), col="brown",
ylab="", xlab="", xlim=c(0,20))
text(x=0, y=13.5, label="d", cex=2)
points(newplant5~Isopodrange, pch=19, type="b", col="black")
points(newplant2~Isopodrange, pch=19, type="b", col="orange")
points(newplant3~Isopodrange, pch=19, type="b", col="red")
points(newplant4~Isopodrange, pch=19, type="b", col="green")
mtext(text="Aboveground Plant Biomass (g)",side=2,line=-1,outer=TRUE)
mtext(text="Isopods (#)",side=1,line=0,outer=TRUE)
dev.off()
|
# functions for managing the names of the variables
# read_data.R saves two dataframe with the correspondence of codes and names for cnt and ind:
# cntNameCode,indNameCode
library(dplyr)
# the following functions receive in input a vector of names (codes) and give as an output
# the vector of correspondent codes (names)
# 01.1 name2codeCnt
name2codeCnt <- function(names, cntNameCode){
codes <- cntNameCode %>% filter(CountryName %in% names)
return(codes$CountryCode)
}
# 01.2 code2nameCnt
code2nameCnt <- function(codes, cntNameCode){
names <- cntNameCode %>% filter(CountryCode %in% codes)
return(names$CountryName)
}
# 02.1 name2codeInd
name2codeInd <- function(names, indNameCode){
codes <- indNameCode %>% filter(IndicatorName %in% names)
return(codes$IndicatorCode)
}
# 02.2 code2nameInd
code2nameInd <- function(codes, indNameCode){
names <- indNameCode %>% filter(IndicatorCode %in% codes)
return(names$IndicatorName)
}
# 03 indShortName [empty]
| /function_name.R | no_license | skiamu/StatApp_test | R | false | false | 981 | r | # functions for managing the names of the variables
# read_data.R saves two dataframe with the correspondence of codes and names for cnt and ind:
# cntNameCode,indNameCode
library(dplyr)
# the following functions receive in input a vector of names (codes) and give as an output
# the vector of correspondent codes (names)
# 01.1 name2codeCnt
name2codeCnt <- function(names, cntNameCode){
codes <- cntNameCode %>% filter(CountryName %in% names)
return(codes$CountryCode)
}
# 01.2 code2nameCnt
code2nameCnt <- function(codes, cntNameCode){
names <- cntNameCode %>% filter(CountryCode %in% codes)
return(names$CountryName)
}
# 02.1 name2codeInd
name2codeInd <- function(names, indNameCode){
codes <- indNameCode %>% filter(IndicatorName %in% names)
return(codes$IndicatorCode)
}
# 02.2 code2nameInd
code2nameInd <- function(codes, indNameCode){
names <- indNameCode %>% filter(IndicatorCode %in% codes)
return(names$IndicatorName)
}
# 03 indShortName [empty]
|
alleleRto1 <-
function ( geno , marker.label=NULL , miss.val=NA) {
geno <- as.matrix(geno)
if ( !all(is.na(miss.val)) ) { geno [ geno %in% miss.val ] <- NA }
geno [ !(geno %in% c(1,3,2)) ] <- NA
geno <- replace(geno,(geno %in% 1),0)
geno <- replace(geno,(geno %in% 3),1)
if ( is.null(marker.label) ) marker.label <- paste("S",1:ncol(geno),sep="")
colnames(geno) <- marker.label
rownames (geno) <- rownames(geno)
return ( geno )
}
| /R/alleleRto1.R | no_license | cran/HapEstXXR | R | false | false | 465 | r | alleleRto1 <-
function ( geno , marker.label=NULL , miss.val=NA) {
geno <- as.matrix(geno)
if ( !all(is.na(miss.val)) ) { geno [ geno %in% miss.val ] <- NA }
geno [ !(geno %in% c(1,3,2)) ] <- NA
geno <- replace(geno,(geno %in% 1),0)
geno <- replace(geno,(geno %in% 3),1)
if ( is.null(marker.label) ) marker.label <- paste("S",1:ncol(geno),sep="")
colnames(geno) <- marker.label
rownames (geno) <- rownames(geno)
return ( geno )
}
|
#' @title Check if parameter values contain expressions.
#'
#' @description Checks if a parameter, parameter set or list of parameters
#' contain expressions.
#' @param obj ([Param()] | [ParamHelpers::ParamSet()] | `list`)\cr
#' Parameter, parameter set or list of parameters.
#' @return `logical(1)`.
#' @examples
#' ps1 = makeParamSet(
#' makeNumericParam("x", lower = 1, upper = 2),
#' makeNumericParam("y", lower = 1, upper = 10)
#' )
#'
#' ps2 = makeParamSet(
#' makeNumericLearnerParam("x", lower = 1, upper = 2),
#' makeNumericLearnerParam("y", lower = 1, upper = expression(p))
#' )
#'
#' hasExpression(ps1)
#' hasExpression(ps2)
#' @export
hasExpression = function(obj) {
UseMethod("hasExpression")
}
#' @export
hasExpression.Param = function(obj) {
any(vlapply(obj, is.expression))
}
#' @export
hasExpression.LearnerParam = function(obj) {
any(vlapply(obj, is.expression))
}
#' @export
hasExpression.ParamSet = function(obj) {
any(vlapply(obj$pars, hasExpression))
}
#' @export
hasExpression.LearnerParamSet = function(obj) {
any(vlapply(obj$pars, hasExpression))
}
#' @export
hasExpression.list = function(obj) {
any(vlapply(obj, hasExpression))
}
| /R/hasExpression.R | no_license | cran/ParamHelpers | R | false | false | 1,188 | r | #' @title Check if parameter values contain expressions.
#'
#' @description Checks if a parameter, parameter set or list of parameters
#' contain expressions.
#' @param obj ([Param()] | [ParamHelpers::ParamSet()] | `list`)\cr
#' Parameter, parameter set or list of parameters.
#' @return `logical(1)`.
#' @examples
#' ps1 = makeParamSet(
#' makeNumericParam("x", lower = 1, upper = 2),
#' makeNumericParam("y", lower = 1, upper = 10)
#' )
#'
#' ps2 = makeParamSet(
#' makeNumericLearnerParam("x", lower = 1, upper = 2),
#' makeNumericLearnerParam("y", lower = 1, upper = expression(p))
#' )
#'
#' hasExpression(ps1)
#' hasExpression(ps2)
#' @export
hasExpression = function(obj) {
UseMethod("hasExpression")
}
#' @export
hasExpression.Param = function(obj) {
any(vlapply(obj, is.expression))
}
#' @export
hasExpression.LearnerParam = function(obj) {
any(vlapply(obj, is.expression))
}
#' @export
hasExpression.ParamSet = function(obj) {
any(vlapply(obj$pars, hasExpression))
}
#' @export
hasExpression.LearnerParamSet = function(obj) {
any(vlapply(obj$pars, hasExpression))
}
#' @export
hasExpression.list = function(obj) {
any(vlapply(obj, hasExpression))
}
|
# Mehmet Gonen (mehmet.gonen@aalto.fi)
# Helsinki Institute for Information Technology HIIT
# Department of Information and Computer Science
# Aalto University School of Science
logdet <- function(Sigma) {
2 * sum(log(diag(chol(Sigma))))
}
repmat <- function(M, row, column) {
kronecker(matrix(1, row, column), M)
}
| /code/kernelMTL/helper.R | no_license | jguinney/virtualIC50 | R | false | false | 326 | r | # Mehmet Gonen (mehmet.gonen@aalto.fi)
# Helsinki Institute for Information Technology HIIT
# Department of Information and Computer Science
# Aalto University School of Science
logdet <- function(Sigma) {
2 * sum(log(diag(chol(Sigma))))
}
repmat <- function(M, row, column) {
kronecker(matrix(1, row, column), M)
}
|
library(kernDeepStackNet)
### Name: randomFourierTrans
### Title: Random Fourier transformation
### Aliases: randomFourierTrans
### Keywords: models & regression
### ** Examples
# Generate data matrix
X <- data.frame(rnorm(100), rnorm(100), rnorm(100), rnorm(100), rnorm(100),
factor(sample(c("a", "b", "c", "d", "e"), 100, replace=TRUE)))
X <- model.matrix(object=~., data=X)
# Exclude intercept
X <- X[, -1]
# Apply a random Fourier transformation of lower dimension
rft <- randomFourierTrans(X=X, Dim=2, sigma=1, seedW=0)
# Transformed data
rft$Z
# Used weight matrix
rft$rW
| /data/genthat_extracted_code/kernDeepStackNet/examples/randomFourierTrans.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 590 | r | library(kernDeepStackNet)
### Name: randomFourierTrans
### Title: Random Fourier transformation
### Aliases: randomFourierTrans
### Keywords: models & regression
### ** Examples
# Generate data matrix
X <- data.frame(rnorm(100), rnorm(100), rnorm(100), rnorm(100), rnorm(100),
factor(sample(c("a", "b", "c", "d", "e"), 100, replace=TRUE)))
X <- model.matrix(object=~., data=X)
# Exclude intercept
X <- X[, -1]
# Apply a random Fourier transformation of lower dimension
rft <- randomFourierTrans(X=X, Dim=2, sigma=1, seedW=0)
# Transformed data
rft$Z
# Used weight matrix
rft$rW
|
# Calcula el CCL de los últimos 90 o la cantidad de días que se determine en inicio
# calcula la fecha final del rango como la fecha de hoy ajustada si es hábil - 4 así tiene los úlitmos 5 días
library(tidyquant)
library(bizdays)
library(tidyverse)
#library(ggthemes)
returnCcl <- function(graba = FALSE, lookBack = 90)
{
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
cal <- create.calendar("Argentina/ANBIMA", holidaysANBIMA, weekdays=c("saturday", "sunday"))
final <- adjust.previous(Sys.Date(), cal)
inicio <- adjust.previous(final - lookBack, cal)
file = paste("../ccl/", "ccl", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".csv", sep='')
file_prom = paste("../ccl/", "CCLProm", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".csv", sep='')
file_grafprom = paste("../ccl/", "CCLPromGraf", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".jpg", sep='')
# cargo los adr argentinos y los cedears. Cada uno viene con sus symbol, symbol_local y ratio.
adr_argentinos <- read_csv("~/Google Drive/analisis financieros/ccl/ADRs_Argentinos/adr_argentinos.csv",
col_types = cols(empresa = col_skip(),
ratio = col_number()))
cedears <- read_csv("~/Google Drive/analisis financieros/ccl/Cedear/cedears.csv",
col_types = cols(Nombre = col_skip(),
Cod_Caja = col_skip(), ISIN_Cedear = col_skip(),
ISIN_Suby = col_skip(), CUSIP = col_skip(),
ratio = col_number()))
# creo un df con todos los activos a analizar con su correspondiente activo local
# luego hay que reciclarla para pedir a tq_get todos los archivos (externo y local) y no hacer varias llamadas
activos <- bind_rows(adr_argentinos, cedears)
lista_activos <- bind_rows(activos %>% transmute(symbol1 = symbol, symbol2 = symbol_local, ratio = ratio),
activos %>% transmute(symbol1 = symbol_local, symbol2 = symbol, ratio = ratio))
colnames(lista_activos) <- c("symbol", "symbol2", "ratio")
rm(activos) # lo borro
# voy a restringir a los activos que necesito para ccl y nada mas porque está tardando mucho con la
# conexión que tengo
lista_activos <- lista_activos %>% filter(symbol == "GGAL.BA" | symbol == "BMA.BA" | symbol == "YPFD.BA" | symbol == "EDN.BA" | symbol == "GGAL"| symbol == "BMA"| symbol == "EDN"| symbol == "YPF")
# esto me devuelve un df con los precios en formato OHLCVA.
precios <- lista_activos$symbol %>%
tq_get(get = "stock.prices",
from = inicio,
to = final + 1) %>%
group_by(symbol)
#ahora los separo entre local y externo
local <- precios %>% filter(str_detect(symbol, fixed(".")))
afuera <- precios %>% filter(!str_detect(symbol, fixed(".")))
rm(precios) # lo descarto
# ahora agregamos el simbolo correspondiente a cada uno, tomandolo de lista_activos
local <- left_join(local, lista_activos)
afuera <- left_join(afuera, lista_activos)
# ahora que ambos tiene su correspondiente symbolo de la otra bolsa los juntamos
df_ccl <- left_join(local, afuera, by = c("symbol2" = "symbol", "date" = "date"))
# ahora tenemos en final los activos locales y su precio afuera
# ahora vamos a calcularle el ccl
# y luego borrarles los que tienen volumen 0
# finalmente lo graba
df_ccl <- df_ccl %>% mutate(
ccl = close.x * ratio.x / close.y) %>%
select(date, symbol, volume.x, close.x, adjusted.x, symbol2, ratio.x, volume.y, close.y, adjusted.y, ccl) %>%
filter (volume.x != 0)
#write_csv(df_ccl, file, col_names = TRUE)
ccl <- df_ccl %>%
group_by(date) %>%
summarise (CCL_prom = mean(ccl))
colnames(ccl) <- c('fecha', 'CCL')
# Acá calculo un CCL con Galicia, BMA, YPF y EDN como para tomar una referencia.
# GBYE <- df_ccl %>% select(date, symbol, close.x, symbol2, ratio.x, close.y, ccl) %>%
# filter(symbol == "GGAL.BA" | symbol == "BMA.BA" | symbol == "YPFD.BA" | symbol == "EDN.BA") %>% drop_na()
#
# write_csv(GBYE, file_prom, col_names = TRUE)
#
grafprom <- ccl %>%
ggplot(aes(x = fecha, y = CCL)) +
geom_line() +
theme_economist() +
scale_x_date(date_breaks="1 month", date_labels="%Y %m") +
scale_color_economist() +
labs(title = "CCL prom con GGAL BMA YPFD EDN",
y = "CCL calculado con precios de Cierre", x = "")
if (graba == TRUE){
write_csv(df_ccl, 'ccl.csv', col_names = TRUE)
write_csv(ccl, 'cclProm.csv', col_names = TRUE)
ggsave(file_grafprom, grafprom, units = "mm", width = 150, height = 75)
}
ccl
}
| /ccl/ccl.R | no_license | jmtruffa/ccl | R | false | false | 4,609 | r | # Calcula el CCL de los últimos 90 o la cantidad de días que se determine en inicio
# calcula la fecha final del rango como la fecha de hoy ajustada si es hábil - 4 así tiene los úlitmos 5 días
library(tidyquant)
library(bizdays)
library(tidyverse)
#library(ggthemes)
returnCcl <- function(graba = FALSE, lookBack = 90)
{
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
cal <- create.calendar("Argentina/ANBIMA", holidaysANBIMA, weekdays=c("saturday", "sunday"))
final <- adjust.previous(Sys.Date(), cal)
inicio <- adjust.previous(final - lookBack, cal)
file = paste("../ccl/", "ccl", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".csv", sep='')
file_prom = paste("../ccl/", "CCLProm", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".csv", sep='')
file_grafprom = paste("../ccl/", "CCLPromGraf", str_remove_all(inicio, "-"), "-", str_remove_all(final, "-"), ".jpg", sep='')
# cargo los adr argentinos y los cedears. Cada uno viene con sus symbol, symbol_local y ratio.
adr_argentinos <- read_csv("~/Google Drive/analisis financieros/ccl/ADRs_Argentinos/adr_argentinos.csv",
col_types = cols(empresa = col_skip(),
ratio = col_number()))
cedears <- read_csv("~/Google Drive/analisis financieros/ccl/Cedear/cedears.csv",
col_types = cols(Nombre = col_skip(),
Cod_Caja = col_skip(), ISIN_Cedear = col_skip(),
ISIN_Suby = col_skip(), CUSIP = col_skip(),
ratio = col_number()))
# creo un df con todos los activos a analizar con su correspondiente activo local
# luego hay que reciclarla para pedir a tq_get todos los archivos (externo y local) y no hacer varias llamadas
activos <- bind_rows(adr_argentinos, cedears)
lista_activos <- bind_rows(activos %>% transmute(symbol1 = symbol, symbol2 = symbol_local, ratio = ratio),
activos %>% transmute(symbol1 = symbol_local, symbol2 = symbol, ratio = ratio))
colnames(lista_activos) <- c("symbol", "symbol2", "ratio")
rm(activos) # lo borro
# voy a restringir a los activos que necesito para ccl y nada mas porque está tardando mucho con la
# conexión que tengo
lista_activos <- lista_activos %>% filter(symbol == "GGAL.BA" | symbol == "BMA.BA" | symbol == "YPFD.BA" | symbol == "EDN.BA" | symbol == "GGAL"| symbol == "BMA"| symbol == "EDN"| symbol == "YPF")
# esto me devuelve un df con los precios en formato OHLCVA.
precios <- lista_activos$symbol %>%
tq_get(get = "stock.prices",
from = inicio,
to = final + 1) %>%
group_by(symbol)
#ahora los separo entre local y externo
local <- precios %>% filter(str_detect(symbol, fixed(".")))
afuera <- precios %>% filter(!str_detect(symbol, fixed(".")))
rm(precios) # lo descarto
# ahora agregamos el simbolo correspondiente a cada uno, tomandolo de lista_activos
local <- left_join(local, lista_activos)
afuera <- left_join(afuera, lista_activos)
# ahora que ambos tiene su correspondiente symbolo de la otra bolsa los juntamos
df_ccl <- left_join(local, afuera, by = c("symbol2" = "symbol", "date" = "date"))
# ahora tenemos en final los activos locales y su precio afuera
# ahora vamos a calcularle el ccl
# y luego borrarles los que tienen volumen 0
# finalmente lo graba
df_ccl <- df_ccl %>% mutate(
ccl = close.x * ratio.x / close.y) %>%
select(date, symbol, volume.x, close.x, adjusted.x, symbol2, ratio.x, volume.y, close.y, adjusted.y, ccl) %>%
filter (volume.x != 0)
#write_csv(df_ccl, file, col_names = TRUE)
ccl <- df_ccl %>%
group_by(date) %>%
summarise (CCL_prom = mean(ccl))
colnames(ccl) <- c('fecha', 'CCL')
# Acá calculo un CCL con Galicia, BMA, YPF y EDN como para tomar una referencia.
# GBYE <- df_ccl %>% select(date, symbol, close.x, symbol2, ratio.x, close.y, ccl) %>%
# filter(symbol == "GGAL.BA" | symbol == "BMA.BA" | symbol == "YPFD.BA" | symbol == "EDN.BA") %>% drop_na()
#
# write_csv(GBYE, file_prom, col_names = TRUE)
#
grafprom <- ccl %>%
ggplot(aes(x = fecha, y = CCL)) +
geom_line() +
theme_economist() +
scale_x_date(date_breaks="1 month", date_labels="%Y %m") +
scale_color_economist() +
labs(title = "CCL prom con GGAL BMA YPFD EDN",
y = "CCL calculado con precios de Cierre", x = "")
if (graba == TRUE){
write_csv(df_ccl, 'ccl.csv', col_names = TRUE)
write_csv(ccl, 'cclProm.csv', col_names = TRUE)
ggsave(file_grafprom, grafprom, units = "mm", width = 150, height = 75)
}
ccl
}
|
rm(list=ls())
library(data.table)
removeNABlank <- function(df) {
col_means <- colMeans(is.na(df))
col_means <- col_means[col_means>0.3]
keep_names <- names(df)[! names(df) %in% names(col_means)]
df <- subset(df,select=keep_names)
col_means <- colMeans(df=="")
col_means <- col_means[col_means>0.3]
keep_names <- names(df)[! names(df) %in% names(col_means)]
return(subset(df,select=keep_names))
}
# use Layout.xlsx for column names
folder = 'C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/raw/36/ZAsmt'
files = list.files(path = folder, pattern = '*',full.names = FALSE)
variable_names <- read.csv(file="C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/variable_names_ZAsmt.csv",stringsAsFactors = FALSE)
variable_names$file <- tolower(sapply(variable_names$file,function(x) paste(substr(x,3,nchar(x)),".txt",sep="")))
options(warn = -1)
for(file in files) {
print(file)
tryCatch(
{
# temp <- read.dta(file=paste(folder,"/",file,sep=""))
temp <- fread(file=paste(folder,"/",file,sep=""),sep="|")
names(temp) <- variable_names[variable_names$file==file,]$columnname
# names(temp) <- variable_names[variable_names$file==paste(substr(file,1,4),".txt",sep=""),]$columnname
# temp <- removeNABlank(removeNABlank)
saveRDS(temp,file=paste(folder,"/",substr(file,1,nchar(file)-4),".rds",sep=""))
# saveRDS(temp,file=paste(folder,"/",file,".rds",sep=""))
rm(temp)
gc()
},error=function(cond) {
print(paste("Error",file))
})
}
options(warn = 0)
#
# # Run following only for Historical files ---------------------------------
#
# folder = 'C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/raw/Historical/17'
# files = list.files(path = folder, pattern = '.rds',full.names = FALSE)
#
# for(file in files) {
# print(file)
# temp <-readRDS(file=paste(folder,"/",file,sep=""))
# temp[,c("BKFSPID","BatchID","SubEdition","PropertyAddressMatchcode","PropertyAddressCarrierRoute","PropertyAddressGeoCodeMatchCode",
# "PropertyAddressLatitude","PropertyAddressLongitude","PropertyAddressCensusTractAndBlock","LoadID","LegalSecTwnRngMer",
# "FIPS","State","County","ExtractDate","Edition","ZVendorStndCode","UnformattedAssessorParcelNumber","ParcelSequenceNumber",
# "PropertyHouseNumber","PropertyStreetName","PropertyStreetSuffix","PropertyFullStreetAddress",
# "PropertyCity","PropertyState","PropertyZip","PropertyZip4")] <- NULL
# temp <- removeNABlank(temp)
# saveRDS(temp,file=paste(folder,"/",file,sep=""))
# }
#
#
#
# files <- list.files(path=folder,pattern=".*rds",full.names = TRUE)
# propertyinfo <- lapply(files,function(x) readRDS(x))
# propertyinfo <- rbindlist(propertyinfo)
# saveRDS(propertyinfo,file=paste(folder,"/main.rds",sep="")) | /convert_to_rds_ZAsmt.R | no_license | dimuthu999/sunkcost_2019 | R | false | false | 2,959 | r | rm(list=ls())
library(data.table)
removeNABlank <- function(df) {
col_means <- colMeans(is.na(df))
col_means <- col_means[col_means>0.3]
keep_names <- names(df)[! names(df) %in% names(col_means)]
df <- subset(df,select=keep_names)
col_means <- colMeans(df=="")
col_means <- col_means[col_means>0.3]
keep_names <- names(df)[! names(df) %in% names(col_means)]
return(subset(df,select=keep_names))
}
# use Layout.xlsx for column names
folder = 'C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/raw/36/ZAsmt'
files = list.files(path = folder, pattern = '*',full.names = FALSE)
variable_names <- read.csv(file="C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/variable_names_ZAsmt.csv",stringsAsFactors = FALSE)
variable_names$file <- tolower(sapply(variable_names$file,function(x) paste(substr(x,3,nchar(x)),".txt",sep="")))
options(warn = -1)
for(file in files) {
print(file)
tryCatch(
{
# temp <- read.dta(file=paste(folder,"/",file,sep=""))
temp <- fread(file=paste(folder,"/",file,sep=""),sep="|")
names(temp) <- variable_names[variable_names$file==file,]$columnname
# names(temp) <- variable_names[variable_names$file==paste(substr(file,1,4),".txt",sep=""),]$columnname
# temp <- removeNABlank(removeNABlank)
saveRDS(temp,file=paste(folder,"/",substr(file,1,nchar(file)-4),".rds",sep=""))
# saveRDS(temp,file=paste(folder,"/",file,".rds",sep=""))
rm(temp)
gc()
},error=function(cond) {
print(paste("Error",file))
})
}
options(warn = 0)
#
# # Run following only for Historical files ---------------------------------
#
# folder = 'C:/Users/dnratnadiwakara/Documents/sunkcost_2019/ztraxdata/raw/Historical/17'
# files = list.files(path = folder, pattern = '.rds',full.names = FALSE)
#
# for(file in files) {
# print(file)
# temp <-readRDS(file=paste(folder,"/",file,sep=""))
# temp[,c("BKFSPID","BatchID","SubEdition","PropertyAddressMatchcode","PropertyAddressCarrierRoute","PropertyAddressGeoCodeMatchCode",
# "PropertyAddressLatitude","PropertyAddressLongitude","PropertyAddressCensusTractAndBlock","LoadID","LegalSecTwnRngMer",
# "FIPS","State","County","ExtractDate","Edition","ZVendorStndCode","UnformattedAssessorParcelNumber","ParcelSequenceNumber",
# "PropertyHouseNumber","PropertyStreetName","PropertyStreetSuffix","PropertyFullStreetAddress",
# "PropertyCity","PropertyState","PropertyZip","PropertyZip4")] <- NULL
# temp <- removeNABlank(temp)
# saveRDS(temp,file=paste(folder,"/",file,sep=""))
# }
#
#
#
# files <- list.files(path=folder,pattern=".*rds",full.names = TRUE)
# propertyinfo <- lapply(files,function(x) readRDS(x))
# propertyinfo <- rbindlist(propertyinfo)
# saveRDS(propertyinfo,file=paste(folder,"/main.rds",sep="")) |
#' Check Less Than or Equal To
#'
#' @description
#' Checks if all non-missing values are less than or equal to y using
#'
#' `all(x[!is.na(x)] <= value)`
#'
#' @inheritParams params
#' @return
#' The `chk_` function throws an informative error if the test fails.
#'
#' The `vld_` function returns a flag indicating whether the test was met.
#'
#' @family chk_ranges
#' @export
#'
#' @examples
#'
#' # chk_lte
#' chk_lte(0)
#' try(chk_lte(0.1))
chk_lte <- function(x, value = 0, x_name = NULL) {
if (vld_lte(x, value)) {
return(invisible())
}
if (is.null(x_name)) x_name <- deparse_backtick_chk(substitute(x))
if (length(x) == 1L) {
abort_chk(
x_name, " must be less than or equal to ", cc(value), ", not ", cc(x),
""
)
}
abort_chk(x_name, " must have values less than or equal to ", cc(value))
}
#' @describeIn chk_lte Validate Less Than or Equal To
#'
#' @export
#'
#' @examples
#'
#' # vld_lte
#' vld_lte(numeric(0))
#' vld_lte(0)
#' vld_lte(0.1)
#' vld_lte(c(-0.1, -0.2, NA))
#' vld_lte(c(-0.1, -0.2, NA), value = -1)
vld_lte <- function(x, value = 0) all(x[!is.na(x)] <= value)
| /R/chk-lte.R | permissive | krlmlr/chk | R | false | false | 1,120 | r | #' Check Less Than or Equal To
#'
#' @description
#' Checks if all non-missing values are less than or equal to y using
#'
#' `all(x[!is.na(x)] <= value)`
#'
#' @inheritParams params
#' @return
#' The `chk_` function throws an informative error if the test fails.
#'
#' The `vld_` function returns a flag indicating whether the test was met.
#'
#' @family chk_ranges
#' @export
#'
#' @examples
#'
#' # chk_lte
#' chk_lte(0)
#' try(chk_lte(0.1))
chk_lte <- function(x, value = 0, x_name = NULL) {
if (vld_lte(x, value)) {
return(invisible())
}
if (is.null(x_name)) x_name <- deparse_backtick_chk(substitute(x))
if (length(x) == 1L) {
abort_chk(
x_name, " must be less than or equal to ", cc(value), ", not ", cc(x),
""
)
}
abort_chk(x_name, " must have values less than or equal to ", cc(value))
}
#' @describeIn chk_lte Validate Less Than or Equal To
#'
#' @export
#'
#' @examples
#'
#' # vld_lte
#' vld_lte(numeric(0))
#' vld_lte(0)
#' vld_lte(0.1)
#' vld_lte(c(-0.1, -0.2, NA))
#' vld_lte(c(-0.1, -0.2, NA), value = -1)
vld_lte <- function(x, value = 0) all(x[!is.na(x)] <= value)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{simSummary.Kernelheaping}
\alias{simSummary.Kernelheaping}
\title{Simulation Summary}
\usage{
simSummary.Kernelheaping(sim, coverage = 0.9)
}
\arguments{
\item{sim}{Simulation object returned from sim.Kernelheaping}
\item{coverage}{probability for computing coverage intervals}
}
\value{
list with summary statistics
}
\description{
Simulation Summary
}
| /man/simSummary.Kernelheaping.Rd | no_license | cran/Kernelheaping | R | false | true | 470 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\name{simSummary.Kernelheaping}
\alias{simSummary.Kernelheaping}
\title{Simulation Summary}
\usage{
simSummary.Kernelheaping(sim, coverage = 0.9)
}
\arguments{
\item{sim}{Simulation object returned from sim.Kernelheaping}
\item{coverage}{probability for computing coverage intervals}
}
\value{
list with summary statistics
}
\description{
Simulation Summary
}
|
\name{plot_coh_pars}
\alias{plot_coh_pars}
\title{
Miscellaneous plotting functions for \code{lca.rh} type regression objects.
Plot of the cohort effects of the generalised Lee-Carter model
}
\description{
This function plots the age- and time-specific patterns of the cohort effects (only) obtained from the fitting of a generalised Lee-Carter model.
}
\usage{
plot_coh_pars(lca.obj)
}
\arguments{
\item{lca.obj}{an object of class \code{lca.rh} (containing a generalised LC model with a cohort effect)}
}
\value{
A plot with two graphical regions showing the age- and time-specific cohort parameters (i.e. \eqn{beta_x^{(0)}} and \eqn{iota_t}).
}
\references{
Renshaw, A. E. and Haberman, S. (2006), ``A cohort-based extension to the Lee-Carter model for mortality reduction factors", \emph{Insurance: Mathematics and Economics}, \bold{38}, 556-570.
R. D. Lee and L. Carter (1992) ``Modeling and forecasting U.S. mortality", Journal of the American Statistical Association, 87(419), 659-671.
}
\author{
Z. Butt and S. Haberman and H. L. Shang
}
\seealso{
\code{\link[ilc]{plot.per.pars}}, \code{\link[ilc]{lca.rh}}
}
\examples{
mod1 <- lca.rh(dd.cmi.pens, age=60:100, mod='m', interpolate=TRUE, res='dev', dec=1)
plot_coh_pars(mod1)
}
\keyword{plots}
| /man/plot_coh_pars.Rd | no_license | valentinamiot/ilc | R | false | false | 1,256 | rd | \name{plot_coh_pars}
\alias{plot_coh_pars}
\title{
Miscellaneous plotting functions for \code{lca.rh} type regression objects.
Plot of the cohort effects of the generalised Lee-Carter model
}
\description{
This function plots the age- and time-specific patterns of the cohort effects (only) obtained from the fitting of a generalised Lee-Carter model.
}
\usage{
plot_coh_pars(lca.obj)
}
\arguments{
\item{lca.obj}{an object of class \code{lca.rh} (containing a generalised LC model with a cohort effect)}
}
\value{
A plot with two graphical regions showing the age- and time-specific cohort parameters (i.e. \eqn{beta_x^{(0)}} and \eqn{iota_t}).
}
\references{
Renshaw, A. E. and Haberman, S. (2006), ``A cohort-based extension to the Lee-Carter model for mortality reduction factors", \emph{Insurance: Mathematics and Economics}, \bold{38}, 556-570.
R. D. Lee and L. Carter (1992) ``Modeling and forecasting U.S. mortality", Journal of the American Statistical Association, 87(419), 659-671.
}
\author{
Z. Butt and S. Haberman and H. L. Shang
}
\seealso{
\code{\link[ilc]{plot.per.pars}}, \code{\link[ilc]{lca.rh}}
}
\examples{
mod1 <- lca.rh(dd.cmi.pens, age=60:100, mod='m', interpolate=TRUE, res='dev', dec=1)
plot_coh_pars(mod1)
}
\keyword{plots}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mids2mplus.r
\name{mids2mplus}
\alias{mids2mplus}
\title{Export \code{mids} object to Mplus}
\usage{
mids2mplus(imp, file.prefix = "imp", path = getwd(), sep = "\\t",
dec = ".", silent = FALSE)
}
\arguments{
\item{imp}{The \code{imp} argument is an object of class \code{mids},
typically produced by the \code{mice()} function.}
\item{file.prefix}{A character string describing the prefix of the output
data files.}
\item{path}{A character string containing the path of the output file. By
default, files are written to the current \code{R} working directory.}
\item{sep}{The separator between the data fields.}
\item{dec}{The decimal separator for numerical data.}
\item{silent}{A logical flag stating whether the names of the files should be
printed.}
}
\value{
The return value is \code{NULL}.
}
\description{
Converts a \code{mids} object into a format recognized by Mplus, and writes
the data and the Mplus input files
}
\details{
This function automates most of the work needed to export a \code{mids}
object to \code{Mplus}. The function writes the multiple imputation datasets,
the file that contains the names of the multiple imputation data sets and an
\code{Mplus} input file. The \code{Mplus} input file has the proper file
names, so in principle it should run and read the data without alteration.
\code{Mplus} will recognize the data set as a multiply imputed data set, and
do automatic pooling in procedures where that is supported.
}
\author{
Gerko Vink, 2011.
}
\seealso{
\code{\link[=mids-class]{mids}}, \code{\link{mids2spss}}
}
\keyword{manip}
| /man/mids2mplus.Rd | no_license | andland/mice | R | false | false | 1,659 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mids2mplus.r
\name{mids2mplus}
\alias{mids2mplus}
\title{Export \code{mids} object to Mplus}
\usage{
mids2mplus(imp, file.prefix = "imp", path = getwd(), sep = "\\t",
dec = ".", silent = FALSE)
}
\arguments{
\item{imp}{The \code{imp} argument is an object of class \code{mids},
typically produced by the \code{mice()} function.}
\item{file.prefix}{A character string describing the prefix of the output
data files.}
\item{path}{A character string containing the path of the output file. By
default, files are written to the current \code{R} working directory.}
\item{sep}{The separator between the data fields.}
\item{dec}{The decimal separator for numerical data.}
\item{silent}{A logical flag stating whether the names of the files should be
printed.}
}
\value{
The return value is \code{NULL}.
}
\description{
Converts a \code{mids} object into a format recognized by Mplus, and writes
the data and the Mplus input files
}
\details{
This function automates most of the work needed to export a \code{mids}
object to \code{Mplus}. The function writes the multiple imputation datasets,
the file that contains the names of the multiple imputation data sets and an
\code{Mplus} input file. The \code{Mplus} input file has the proper file
names, so in principle it should run and read the data without alteration.
\code{Mplus} will recognize the data set as a multiply imputed data set, and
do automatic pooling in procedures where that is supported.
}
\author{
Gerko Vink, 2011.
}
\seealso{
\code{\link[=mids-class]{mids}}, \code{\link{mids2spss}}
}
\keyword{manip}
|
.onLoad <- function (libname, pkgname)
{
# make data set names global to avoid CHECK notes
utils::globalVariables ("name")
utils::globalVariables ("Score")
utils::globalVariables ("OrthoScore")
utils::globalVariables ("p.1.")
utils::globalVariables ("p.corr..1.")
utils::globalVariables ("X")
#utils::globalVariables ("msDatabase_hilic0.0.1")
utils::globalVariables ("Raw.p")
utils::globalVariables ("%>%")
utils::globalVariables ("binomial")
utils::globalVariables ("element_text")
utils::globalVariables ("geom_segment")
utils::globalVariables ("heat.colors")
utils::globalVariables ("read.csv")
utils::globalVariables ("scale_color_manual")
utils::globalVariables ("symbols")
utils::globalVariables (".")
utils::globalVariables ("colorRampPalette")
#utils::globalVariables ("featureDefinitions")
utils::globalVariables ("geom_text_repel")
utils::globalVariables ("labs")
utils::globalVariables ("read_csv")
utils::globalVariables ("scale_fill_manual")
utils::globalVariables ("t.test")
#utils::globalVariables ("ObiwarpParam")
utils::globalVariables ("data")
utils::globalVariables ("fileNames")
utils::globalVariables ("geom_vline")
utils::globalVariables ("lm")
utils::globalVariables ("registerDoParallel")
utils::globalVariables ("sd")
utils::globalVariables ("theme")
utils::globalVariables ("add_column")
utils::globalVariables ("dev.off")
utils::globalVariables ("geom_hline")
utils::globalVariables ("ggplot")
utils::globalVariables ("median")
utils::globalVariables ("reorder")
utils::globalVariables ("stat_ellipse")
utils::globalVariables ("theme_bw")
utils::globalVariables ("aes")
utils::globalVariables ("element_blank")
utils::globalVariables ("geom_point")
utils::globalVariables ("ggsave")
utils::globalVariables ("p.adjust")
utils::globalVariables ("scale_color_brewer")
utils::globalVariables ("step")
utils::globalVariables ("wilcox.test")
utils::globalVariables ("annotate")
utils::globalVariables ("element_line")
utils::globalVariables ("glm")
utils::globalVariables ("par")
utils::globalVariables ("stopCluster")
utils::globalVariables ("write.csv")
utils::globalVariables ("as.ggplot")
utils::globalVariables ("grid")
utils::globalVariables ("plot")
utils::globalVariables ("xlab")
utils::globalVariables ("as.tibble")
utils::globalVariables ("png")
utils::globalVariables ("xlim")
utils::globalVariables ("axis")
utils::globalVariables ("predict")
utils::globalVariables ("ylim")
utils::globalVariables ("quantile")
invisible ()
}
| /R/global.R | no_license | shineshen007/shine | R | false | false | 2,595 | r | .onLoad <- function (libname, pkgname)
{
# make data set names global to avoid CHECK notes
utils::globalVariables ("name")
utils::globalVariables ("Score")
utils::globalVariables ("OrthoScore")
utils::globalVariables ("p.1.")
utils::globalVariables ("p.corr..1.")
utils::globalVariables ("X")
#utils::globalVariables ("msDatabase_hilic0.0.1")
utils::globalVariables ("Raw.p")
utils::globalVariables ("%>%")
utils::globalVariables ("binomial")
utils::globalVariables ("element_text")
utils::globalVariables ("geom_segment")
utils::globalVariables ("heat.colors")
utils::globalVariables ("read.csv")
utils::globalVariables ("scale_color_manual")
utils::globalVariables ("symbols")
utils::globalVariables (".")
utils::globalVariables ("colorRampPalette")
#utils::globalVariables ("featureDefinitions")
utils::globalVariables ("geom_text_repel")
utils::globalVariables ("labs")
utils::globalVariables ("read_csv")
utils::globalVariables ("scale_fill_manual")
utils::globalVariables ("t.test")
#utils::globalVariables ("ObiwarpParam")
utils::globalVariables ("data")
utils::globalVariables ("fileNames")
utils::globalVariables ("geom_vline")
utils::globalVariables ("lm")
utils::globalVariables ("registerDoParallel")
utils::globalVariables ("sd")
utils::globalVariables ("theme")
utils::globalVariables ("add_column")
utils::globalVariables ("dev.off")
utils::globalVariables ("geom_hline")
utils::globalVariables ("ggplot")
utils::globalVariables ("median")
utils::globalVariables ("reorder")
utils::globalVariables ("stat_ellipse")
utils::globalVariables ("theme_bw")
utils::globalVariables ("aes")
utils::globalVariables ("element_blank")
utils::globalVariables ("geom_point")
utils::globalVariables ("ggsave")
utils::globalVariables ("p.adjust")
utils::globalVariables ("scale_color_brewer")
utils::globalVariables ("step")
utils::globalVariables ("wilcox.test")
utils::globalVariables ("annotate")
utils::globalVariables ("element_line")
utils::globalVariables ("glm")
utils::globalVariables ("par")
utils::globalVariables ("stopCluster")
utils::globalVariables ("write.csv")
utils::globalVariables ("as.ggplot")
utils::globalVariables ("grid")
utils::globalVariables ("plot")
utils::globalVariables ("xlab")
utils::globalVariables ("as.tibble")
utils::globalVariables ("png")
utils::globalVariables ("xlim")
utils::globalVariables ("axis")
utils::globalVariables ("predict")
utils::globalVariables ("ylim")
utils::globalVariables ("quantile")
invisible ()
}
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
gbm.random.grid.test <- function() {
air.hex <- h2o.uploadFile(locate("smalldata/airlines/allyears2k_headers.zip"), destination_frame="air.hex")
print(summary(air.hex))
myX <- c("Year","Month","CRSDepTime","UniqueCarrier","Origin","Dest")
hyper_params = list(
## restrict the search to the range of max_depth established above
max_depth = seq(1,10,1),
## search a large space of row sampling rates per tree
sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per split
col_sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per tree
col_sample_rate_per_tree = seq(0.2,1,0.01),
## search a large space of how column sampling per split should change as a function of the depth of the split
col_sample_rate_change_per_level = seq(0.9,1.1,0.01),
## search a large space of the number of min rows in a terminal node
min_rows = 2^seq(0,log2(nrow(air.hex))-1,1),
## search a large space of the number of bins for split-finding for continuous and integer columns
nbins = 2^seq(4,10,1),
## search a large space of the number of bins for split-finding for categorical columns
nbins_cats = 2^seq(4,12,1),
## search a few minimum required relative error improvement thresholds for a split to happen
min_split_improvement = c(0,1e-8,1e-6,1e-4),
## try all histogram types (QuantilesGlobal and RoundRobin are good for numeric columns with outliers)
histogram_type = c("UniformAdaptive","QuantilesGlobal","RoundRobin")
)
search_criteria = list(
## Random grid search
strategy = "RandomDiscrete",
## limit the runtime to 10 minutes
max_runtime_secs = 600,
## build no more than 5 models
max_models = 5,
## random number generator seed to make sampling of parameter combinations reproducible
seed = 1234,
## early stopping once the leaderboard of the top 5 models is converged to 0.1% relative difference
stopping_rounds = 5,
stopping_metric = "AUC",
stopping_tolerance = 1e-3
)
air.grid <- h2o.grid("gbm", y = "IsDepDelayed", x = myX,
distribution="bernoulli",
seed=1234,
training_frame = air.hex,
hyper_params = hyper_params,
search_criteria = search_criteria)
print(air.grid)
expect_that(length(air.grid@model_ids) == 5, is_true())
}
doTest("GBM Grid Test: Airlines Smalldata", gbm.random.grid.test)
| /implementation/h2o-fakegame/h2o-r/tests/testdir_algos/gbm/runit_GBMRandomGrid_airlines_large.R | permissive | kordikp/AutoMLprediction | R | false | false | 3,161 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
gbm.random.grid.test <- function() {
air.hex <- h2o.uploadFile(locate("smalldata/airlines/allyears2k_headers.zip"), destination_frame="air.hex")
print(summary(air.hex))
myX <- c("Year","Month","CRSDepTime","UniqueCarrier","Origin","Dest")
hyper_params = list(
## restrict the search to the range of max_depth established above
max_depth = seq(1,10,1),
## search a large space of row sampling rates per tree
sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per split
col_sample_rate = seq(0.2,1,0.01),
## search a large space of column sampling rates per tree
col_sample_rate_per_tree = seq(0.2,1,0.01),
## search a large space of how column sampling per split should change as a function of the depth of the split
col_sample_rate_change_per_level = seq(0.9,1.1,0.01),
## search a large space of the number of min rows in a terminal node
min_rows = 2^seq(0,log2(nrow(air.hex))-1,1),
## search a large space of the number of bins for split-finding for continuous and integer columns
nbins = 2^seq(4,10,1),
## search a large space of the number of bins for split-finding for categorical columns
nbins_cats = 2^seq(4,12,1),
## search a few minimum required relative error improvement thresholds for a split to happen
min_split_improvement = c(0,1e-8,1e-6,1e-4),
## try all histogram types (QuantilesGlobal and RoundRobin are good for numeric columns with outliers)
histogram_type = c("UniformAdaptive","QuantilesGlobal","RoundRobin")
)
search_criteria = list(
## Random grid search
strategy = "RandomDiscrete",
## limit the runtime to 10 minutes
max_runtime_secs = 600,
## build no more than 5 models
max_models = 5,
## random number generator seed to make sampling of parameter combinations reproducible
seed = 1234,
## early stopping once the leaderboard of the top 5 models is converged to 0.1% relative difference
stopping_rounds = 5,
stopping_metric = "AUC",
stopping_tolerance = 1e-3
)
air.grid <- h2o.grid("gbm", y = "IsDepDelayed", x = myX,
distribution="bernoulli",
seed=1234,
training_frame = air.hex,
hyper_params = hyper_params,
search_criteria = search_criteria)
print(air.grid)
expect_that(length(air.grid@model_ids) == 5, is_true())
}
doTest("GBM Grid Test: Airlines Smalldata", gbm.random.grid.test)
|
# Copyright 2018 Battelle Memorial Institute
# ------------------------------------------------------------------------------
# Program Name: C.1.gridding_data_reformatting.R
# Author(s): Leyang Feng, Caleb Braun
# Date Last Updated: May 29, 2018
# Program Purpose: Reformat the downscaled IAM emissions for gridding. Speciate
# VOCS if requested.
# Input Files: B.IAM_HARM-STATUS_emissions_downscaled_for_gridding_RUNSUFFIX
# Output Files: C.IAM_HARM-STATUS_emissions_reformatted_RUNSUFFIX
# Notes:
# TODO: update reference emissions so there would be no NA in X2015
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# 0. Read in global settings and headers
# Must be run from the emissions_downscaling/input directory
if ( !endsWith( getwd(), '/input' ) ) setwd( 'input' )
PARAM_DIR <- "../code/parameters/"
# Call standard script header function to read in universal header files -
# provides logging, file support, and system functions - and start the script log.
headers <- c( 'module-A_functions.R', 'all_module_functions.R' )
log_msg <- "Reformat the downscaled IAM emissions for gridding"
script_name <- "C.1.gridding_data_reformatting.R"
source( paste0( PARAM_DIR, "header.R" ) )
initialize( script_name, log_msg, headers )
# ------------------------------------------------------------------------------
# 0.5 Define IAM variable
if ( !exists( 'command_args' ) ) command_args <- commandArgs( TRUE )
iam <- command_args[ 1 ]
harm_status <- command_args[ 2 ]
input_file <- command_args[ 3 ]
run_species <- command_args[ 7 ]
if ( is.na( iam ) ) iam <- "GCAM4"
# ------------------------------------------------------------------------------
# 1. Read mapping files and extract iam info
# read in master config file
master_config <- readData( 'MAPPINGS', 'master_config', column_names = F )
# select iam configuration line from the mapping and read the iam information as a list
iam_info_list <- iamInfoExtract( master_config, iam )
printLog( paste0( 'The IAM to be processed is: ', iam_name ) )
# -----------------------------------------------------------------------------
# 2. Read in the downscaled emissions and gridding mapping file
iam_data_fname <- paste0( 'B.', iam, '_', harm_status, '_emissions_downscaled_for_gridding_', RUNSUFFIX )
iam_data <- readData( domain = 'MED_OUT', file_name = iam_data_fname )
sector_mapping <- readData( domain = 'GRIDDING',
domain_extension = 'gridding-mappings/',
file_name = gridding_sector_mapping )
# -----------------------------------------------------------------------------
# 3. Disaggregate NMVOCs
# Take the anthropogenic NMVOC emissions and speciate them into the CEDS species
VOC_SPEC <- get_constant('voc_speciation')
if ( VOC_SPEC != 'none' ) {
REF_EM_CSV <- get_constant( 'reference_emissions' )
historical <- readData( 'REF_EM', REF_EM_CSV, domain_extension = ref_domain_extension )
VOC_burn_ratios <- readData( 'GRIDDING', 'VOC_ratio_BurnSectors', domain_extension = "gridding-mappings/" )
VOC_ratios <- readData( 'GRIDDING', 'VOC_ratio_AllSectors', domain_extension = "gridding-mappings/" )
sect_maps <- readData( 'MAPPINGS', 'IAMC_CEDS16_CEDS9' )
# create map from CEDS16 format (ex. Fossil Fuel Fires or FFFI) to CEDS9
# format (ex. Energy Sector)
CEDS16_to_CEDS9 <- sect_maps %>%
dplyr::select( CEDS16, CEDS16_abr, CEDS9 ) %>%
dplyr::distinct()
# the TANK sector exists for the ratios but not in the data; average the
# ratios for the TANK sector with the SHP sector
TANK_RATIO <- 0.7511027754013855
weights <- c( 1 - TANK_RATIO, TANK_RATIO )
# from support script calculate_voc_ratios.R
shp_corrected <- c(0, 0.0698525581123288, 0.193784516053557, 0.204299954909177,
0.109661005208602, 0.117076899357022, 0.0519144539149665, 0.0568823442417576,
0.00149036709803732, 0.00546467935947016, 0.0238517927949798,
0.0144187204004595, 0.0151875808550091, 0.00919059710456345,
0.0570838109305053, 0, 0, 0, 0, 0, 0, 0, 0.0698407196595634)
VOC_ratios[ VOC_ratios$sector == 'SHP', 3:25 ] <- shp_corrected
# VOC_ratios %>%
# dplyr::filter( sector %in% c( 'SHP', 'TANK' ) ) %>%
# dplyr::mutate( sector = 'SHP' ) %>%
# dplyr::group_by( iso, sector ) %>%
# dplyr::summarise_if( is.numeric, weighted.mean, weights )
# expand VOC_ratios sector from CEDS16_abr to CEDS16
VOC_ratios <- VOC_ratios %>%
dplyr::rename( CEDS16_abr = sector ) %>%
dplyr::filter( CEDS16_abr != 'TANK' ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = 'CEDS16_abr' )
# select non-burning VOCs from historical and map to proper sectors
x_base_year <- paste0( 'X', base_year )
historical <- historical %>%
dplyr::filter( em == 'VOC', !grepl( 'Burning', sector ) ) %>%
dplyr::rename( base_value = !!x_base_year, CEDS16 = sector ) %>%
dplyr::select( iso, CEDS16, base_value ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = 'CEDS16' )
# find sectors missing from historical, but that we have ratios for
missing_sectors <- VOC_ratios %>%
dplyr::anti_join( historical, by = c( 'iso', 'CEDS16' ) ) %>%
dplyr::select( iso, CEDS16, CEDS16_abr, CEDS9 ) %>%
dplyr::mutate( base_value = 0 )
# calculate iso sector ratios from CEDS16 to CEDS9
VOC_ratio_shares <- historical %>%
dplyr::bind_rows( missing_sectors ) %>%
dplyr::group_by( iso, CEDS9 ) %>%
dplyr::mutate( share = base_value / sum( base_value ) ) %>%
dplyr::mutate( share = if_else( is.nan( share ), 1 / n(), share ) )
# map the sub-VOC shares of each sector to CEDS9 format
VOC_ratios_CEDS9 <- VOC_ratios %>%
tidyr::gather( sub_VOC, ratio, VOC01:VOC25 ) %>%
dplyr::left_join( VOC_ratio_shares, by = c( 'iso', 'CEDS16', 'CEDS16_abr', 'CEDS9' ) ) %>%
dplyr::mutate( share = if_else( is.na( share ), 0, share ) ) %>%
dplyr::mutate( em = 'NMVOC', iso = gsub( 'global', 'World', iso ) ) %>%
dplyr::group_by( iso, CEDS9, em, sub_VOC ) %>%
dplyr::summarise( ratio = sum( ratio * share ) )
# add on open burning ratios
VOC_ratios_CEDS9 <- VOC_burn_ratios %>%
tidyr::gather( sub_VOC, ratio, -iso, -sector ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = c( 'sector' = 'CEDS16_abr' ) ) %>%
dplyr::mutate( em = 'NMVOC', sub_VOC = gsub( '\\.', '-', sub_VOC ) ) %>%
dplyr::select( iso, CEDS9, em, sub_VOC, ratio ) %>%
dplyr::bind_rows( VOC_ratios_CEDS9 )
# assert that after aggregating, ratios still sum to one for each sector
# (we round to the 12th digit because the arithmetic is not exact)
ratio_sums <- VOC_ratios_CEDS9 %>%
dplyr::group_by( iso, CEDS9, em ) %>%
dplyr::summarise( ratio = sum( ratio ) ) %>%
dplyr::filter( round( ratio, 8 ) != 1 )
writeData( ratio_sums, 'DIAG_OUT', 'NMVOC_ratio_sums' )
# Check if user requested a specific sub-VOC
sub_nmvocs <- gsub( '\\.', '-', union( names( VOC_ratios ), names( VOC_burn_ratios ) ) )
if ( !is.na( run_species ) && run_species %in% sub_nmvocs )
em_filter <- run_species
else
em_filter <- sub_nmvocs
# disaggregate VOCs into sub-VOCs, then multiply each sub-VOC by its
# corresponding ratio
iam_data_sub_vocs <- iam_data %>%
dplyr::left_join( VOC_ratios_CEDS9, by = c( 'iso', 'em', 'sector' = 'CEDS9' ) ) %>%
dplyr::mutate( ratio = if_else( is.na( ratio ), 1, ratio ),
em = if_else( is.na( sub_VOC ), em, sub_VOC ) ) %>%
dplyr::filter( em %in% em_filter ) %>%
dplyr::mutate_at( vars( num_range( 'X', ds_start_year:ds_end_year ) ), funs( . * ratio ) ) %>%
dplyr::select( -sub_VOC, -ratio )
# Remove non-sub-VOC emissions if specified, otherwise keep 'VOC' original
if ( VOC_SPEC == 'all' ) {
iam_data <- dplyr::bind_rows( iam_data, iam_data_sub_vocs )
} else { # 'only'
iam_data <- iam_data_sub_vocs
}
}
# -----------------------------------------------------------------------------
# 4. Map to sector short name and remove version number in scenario name.
iam_em <- iam_data %>%
dplyr::inner_join( sector_mapping, by = c( 'sector' = 'sector_name' ) ) %>%
dplyr::mutate( sector = sector_short,
scenario = substr( scenario, 1, nchar( scenario ) - 4) ) %>%
dplyr::select( -sector_short )
# -----------------------------------------------------------------------------
# 5. Write out
# write the interpolated iam_data into intermediate output folder
out_fname <- paste0( 'C.', iam, '_', harm_status, '_emissions_reformatted_', RUNSUFFIX )
writeData( iam_em, 'MED_OUT', out_fname, meta = F )
logStop()
| /code/module-C/C.1.gridding_data_reformatting.R | permissive | iiasa/emissions_downscaling | R | false | false | 8,726 | r | # Copyright 2018 Battelle Memorial Institute
# ------------------------------------------------------------------------------
# Program Name: C.1.gridding_data_reformatting.R
# Author(s): Leyang Feng, Caleb Braun
# Date Last Updated: May 29, 2018
# Program Purpose: Reformat the downscaled IAM emissions for gridding. Speciate
# VOCS if requested.
# Input Files: B.IAM_HARM-STATUS_emissions_downscaled_for_gridding_RUNSUFFIX
# Output Files: C.IAM_HARM-STATUS_emissions_reformatted_RUNSUFFIX
# Notes:
# TODO: update reference emissions so there would be no NA in X2015
# ------------------------------------------------------------------------------
# ------------------------------------------------------------------------------
# 0. Read in global settings and headers
# Must be run from the emissions_downscaling/input directory
if ( !endsWith( getwd(), '/input' ) ) setwd( 'input' )
PARAM_DIR <- "../code/parameters/"
# Call standard script header function to read in universal header files -
# provides logging, file support, and system functions - and start the script log.
headers <- c( 'module-A_functions.R', 'all_module_functions.R' )
log_msg <- "Reformat the downscaled IAM emissions for gridding"
script_name <- "C.1.gridding_data_reformatting.R"
source( paste0( PARAM_DIR, "header.R" ) )
initialize( script_name, log_msg, headers )
# ------------------------------------------------------------------------------
# 0.5 Define IAM variable
if ( !exists( 'command_args' ) ) command_args <- commandArgs( TRUE )
iam <- command_args[ 1 ]
harm_status <- command_args[ 2 ]
input_file <- command_args[ 3 ]
run_species <- command_args[ 7 ]
if ( is.na( iam ) ) iam <- "GCAM4"
# ------------------------------------------------------------------------------
# 1. Read mapping files and extract iam info
# read in master config file
master_config <- readData( 'MAPPINGS', 'master_config', column_names = F )
# select iam configuration line from the mapping and read the iam information as a list
iam_info_list <- iamInfoExtract( master_config, iam )
printLog( paste0( 'The IAM to be processed is: ', iam_name ) )
# -----------------------------------------------------------------------------
# 2. Read in the downscaled emissions and gridding mapping file
iam_data_fname <- paste0( 'B.', iam, '_', harm_status, '_emissions_downscaled_for_gridding_', RUNSUFFIX )
iam_data <- readData( domain = 'MED_OUT', file_name = iam_data_fname )
sector_mapping <- readData( domain = 'GRIDDING',
domain_extension = 'gridding-mappings/',
file_name = gridding_sector_mapping )
# -----------------------------------------------------------------------------
# 3. Disaggregate NMVOCs
# Take the anthropogenic NMVOC emissions and speciate them into the CEDS species
VOC_SPEC <- get_constant('voc_speciation')
if ( VOC_SPEC != 'none' ) {
REF_EM_CSV <- get_constant( 'reference_emissions' )
historical <- readData( 'REF_EM', REF_EM_CSV, domain_extension = ref_domain_extension )
VOC_burn_ratios <- readData( 'GRIDDING', 'VOC_ratio_BurnSectors', domain_extension = "gridding-mappings/" )
VOC_ratios <- readData( 'GRIDDING', 'VOC_ratio_AllSectors', domain_extension = "gridding-mappings/" )
sect_maps <- readData( 'MAPPINGS', 'IAMC_CEDS16_CEDS9' )
# create map from CEDS16 format (ex. Fossil Fuel Fires or FFFI) to CEDS9
# format (ex. Energy Sector)
CEDS16_to_CEDS9 <- sect_maps %>%
dplyr::select( CEDS16, CEDS16_abr, CEDS9 ) %>%
dplyr::distinct()
# the TANK sector exists for the ratios but not in the data; average the
# ratios for the TANK sector with the SHP sector
TANK_RATIO <- 0.7511027754013855
weights <- c( 1 - TANK_RATIO, TANK_RATIO )
# from support script calculate_voc_ratios.R
shp_corrected <- c(0, 0.0698525581123288, 0.193784516053557, 0.204299954909177,
0.109661005208602, 0.117076899357022, 0.0519144539149665, 0.0568823442417576,
0.00149036709803732, 0.00546467935947016, 0.0238517927949798,
0.0144187204004595, 0.0151875808550091, 0.00919059710456345,
0.0570838109305053, 0, 0, 0, 0, 0, 0, 0, 0.0698407196595634)
VOC_ratios[ VOC_ratios$sector == 'SHP', 3:25 ] <- shp_corrected
# VOC_ratios %>%
# dplyr::filter( sector %in% c( 'SHP', 'TANK' ) ) %>%
# dplyr::mutate( sector = 'SHP' ) %>%
# dplyr::group_by( iso, sector ) %>%
# dplyr::summarise_if( is.numeric, weighted.mean, weights )
# expand VOC_ratios sector from CEDS16_abr to CEDS16
VOC_ratios <- VOC_ratios %>%
dplyr::rename( CEDS16_abr = sector ) %>%
dplyr::filter( CEDS16_abr != 'TANK' ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = 'CEDS16_abr' )
# select non-burning VOCs from historical and map to proper sectors
x_base_year <- paste0( 'X', base_year )
historical <- historical %>%
dplyr::filter( em == 'VOC', !grepl( 'Burning', sector ) ) %>%
dplyr::rename( base_value = !!x_base_year, CEDS16 = sector ) %>%
dplyr::select( iso, CEDS16, base_value ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = 'CEDS16' )
# find sectors missing from historical, but that we have ratios for
missing_sectors <- VOC_ratios %>%
dplyr::anti_join( historical, by = c( 'iso', 'CEDS16' ) ) %>%
dplyr::select( iso, CEDS16, CEDS16_abr, CEDS9 ) %>%
dplyr::mutate( base_value = 0 )
# calculate iso sector ratios from CEDS16 to CEDS9
VOC_ratio_shares <- historical %>%
dplyr::bind_rows( missing_sectors ) %>%
dplyr::group_by( iso, CEDS9 ) %>%
dplyr::mutate( share = base_value / sum( base_value ) ) %>%
dplyr::mutate( share = if_else( is.nan( share ), 1 / n(), share ) )
# map the sub-VOC shares of each sector to CEDS9 format
VOC_ratios_CEDS9 <- VOC_ratios %>%
tidyr::gather( sub_VOC, ratio, VOC01:VOC25 ) %>%
dplyr::left_join( VOC_ratio_shares, by = c( 'iso', 'CEDS16', 'CEDS16_abr', 'CEDS9' ) ) %>%
dplyr::mutate( share = if_else( is.na( share ), 0, share ) ) %>%
dplyr::mutate( em = 'NMVOC', iso = gsub( 'global', 'World', iso ) ) %>%
dplyr::group_by( iso, CEDS9, em, sub_VOC ) %>%
dplyr::summarise( ratio = sum( ratio * share ) )
# add on open burning ratios
VOC_ratios_CEDS9 <- VOC_burn_ratios %>%
tidyr::gather( sub_VOC, ratio, -iso, -sector ) %>%
dplyr::left_join( CEDS16_to_CEDS9, by = c( 'sector' = 'CEDS16_abr' ) ) %>%
dplyr::mutate( em = 'NMVOC', sub_VOC = gsub( '\\.', '-', sub_VOC ) ) %>%
dplyr::select( iso, CEDS9, em, sub_VOC, ratio ) %>%
dplyr::bind_rows( VOC_ratios_CEDS9 )
# assert that after aggregating, ratios still sum to one for each sector
# (we round to the 12th digit because the arithmetic is not exact)
ratio_sums <- VOC_ratios_CEDS9 %>%
dplyr::group_by( iso, CEDS9, em ) %>%
dplyr::summarise( ratio = sum( ratio ) ) %>%
dplyr::filter( round( ratio, 8 ) != 1 )
writeData( ratio_sums, 'DIAG_OUT', 'NMVOC_ratio_sums' )
# Check if user requested a specific sub-VOC
sub_nmvocs <- gsub( '\\.', '-', union( names( VOC_ratios ), names( VOC_burn_ratios ) ) )
if ( !is.na( run_species ) && run_species %in% sub_nmvocs )
em_filter <- run_species
else
em_filter <- sub_nmvocs
# disaggregate VOCs into sub-VOCs, then multiply each sub-VOC by its
# corresponding ratio
iam_data_sub_vocs <- iam_data %>%
dplyr::left_join( VOC_ratios_CEDS9, by = c( 'iso', 'em', 'sector' = 'CEDS9' ) ) %>%
dplyr::mutate( ratio = if_else( is.na( ratio ), 1, ratio ),
em = if_else( is.na( sub_VOC ), em, sub_VOC ) ) %>%
dplyr::filter( em %in% em_filter ) %>%
dplyr::mutate_at( vars( num_range( 'X', ds_start_year:ds_end_year ) ), funs( . * ratio ) ) %>%
dplyr::select( -sub_VOC, -ratio )
# Remove non-sub-VOC emissions if specified, otherwise keep 'VOC' original
if ( VOC_SPEC == 'all' ) {
iam_data <- dplyr::bind_rows( iam_data, iam_data_sub_vocs )
} else { # 'only'
iam_data <- iam_data_sub_vocs
}
}
# -----------------------------------------------------------------------------
# 4. Map to sector short name and remove version number in scenario name.
iam_em <- iam_data %>%
dplyr::inner_join( sector_mapping, by = c( 'sector' = 'sector_name' ) ) %>%
dplyr::mutate( sector = sector_short,
scenario = substr( scenario, 1, nchar( scenario ) - 4) ) %>%
dplyr::select( -sector_short )
# -----------------------------------------------------------------------------
# 5. Write out
# write the interpolated iam_data into intermediate output folder
out_fname <- paste0( 'C.', iam, '_', harm_status, '_emissions_reformatted_', RUNSUFFIX )
writeData( iam_em, 'MED_OUT', out_fname, meta = F )
logStop()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distsumlpmin.R
\name{distsumlpmin}
\alias{distsumlpmin}
\alias{distsumlpmin,loca.p-method}
\title{distsumlpmin at orloca package}
\usage{
distsumlpmin(
o,
x = 0,
y = 0,
p = 2,
max.iter = 100,
eps = 0.001,
verbose = FALSE,
algorithm = "Weiszfeld",
...
)
}
\arguments{
\item{o}{An object of loca.p class.}
\item{x}{The x coordinate of the starting point. It's default value is 0.}
\item{y}{The y coordinate of the starting point. It's default value is 0.}
\item{max.iter}{Maximum number of iterations allowed. It's default value is 100000.}
\item{eps}{The module of the gradient in the stop rule. It's default value is 1e-3.}
\item{verbose}{If TRUE the function produces detailed output. It's default value is FALSE.}
\item{algorithm}{The method to be use. For this version of the package, the valid values are: "gradient" for a gradient based method, "search" for local search method (this option is deprecated), "ucminf" for optimization with ucminf from ucminf package, and "Weiszfeld" for the Weiszfeld method or any of the valid method for optim function, now "Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN". "Weiszfeld" is the default value.}
}
\value{
\code{distsummin} returns an array with the coordinates of the solution point.
}
\description{
\code{distsumlpmin} is the \code{distsummin} function for the norm (\eqn{l_p}).
This function returns the solution of the minimization problem.
Mainly for internal use.
}
\details{
The algorithms Weiszfeld and gradient include and optimality test for demand points.
The Weiszfeld version of the algorithm also implements slow convergence test and accelerator procedure.
If \eqn{p < 1} thus \eqn{l_p} is not a norm, so, only \eqn{p \ge 1} are valid values.
Since \eqn{l_2} norm is the Euclidean norm, when \eqn{p=2} \code{distsumlpmin} are equal to \code{distsummin}.
But the computations involved are greater for the first form.
max.iter for SANN algorithm is the number of evaluation of objective function, so this methos usually requires large values of max.iter to reach optimal value
The function zsummin is deprecated and will be removed from new versions of the package.
}
\examples{
# A new unweighted loca.p object
loca <- loca.p(x = c(-1, 1, 1, -1), y = c(-1, -1, 1, 1))
# Compute the minimum
sol<-distsummin(loca)
# Show the result
sol
# Evaluation of the objective function at solution point
distsum(loca, sol[1], sol[2])
}
\seealso{
See also \code{\link{orloca-package}}, \code{\link{loca.p}} and \code{\link{distsum}}.
}
\keyword{classes}
\keyword{internal}
\keyword{optimize}
| /man/distsumlpmin.Rd | no_license | cran/orloca | R | false | true | 2,651 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/distsumlpmin.R
\name{distsumlpmin}
\alias{distsumlpmin}
\alias{distsumlpmin,loca.p-method}
\title{distsumlpmin at orloca package}
\usage{
distsumlpmin(
o,
x = 0,
y = 0,
p = 2,
max.iter = 100,
eps = 0.001,
verbose = FALSE,
algorithm = "Weiszfeld",
...
)
}
\arguments{
\item{o}{An object of loca.p class.}
\item{x}{The x coordinate of the starting point. It's default value is 0.}
\item{y}{The y coordinate of the starting point. It's default value is 0.}
\item{max.iter}{Maximum number of iterations allowed. It's default value is 100000.}
\item{eps}{The module of the gradient in the stop rule. It's default value is 1e-3.}
\item{verbose}{If TRUE the function produces detailed output. It's default value is FALSE.}
\item{algorithm}{The method to be use. For this version of the package, the valid values are: "gradient" for a gradient based method, "search" for local search method (this option is deprecated), "ucminf" for optimization with ucminf from ucminf package, and "Weiszfeld" for the Weiszfeld method or any of the valid method for optim function, now "Nelder-Mead", "BFGS", "CG", "L-BFGS-B", "SANN". "Weiszfeld" is the default value.}
}
\value{
\code{distsummin} returns an array with the coordinates of the solution point.
}
\description{
\code{distsumlpmin} is the \code{distsummin} function for the norm (\eqn{l_p}).
This function returns the solution of the minimization problem.
Mainly for internal use.
}
\details{
The algorithms Weiszfeld and gradient include and optimality test for demand points.
The Weiszfeld version of the algorithm also implements slow convergence test and accelerator procedure.
If \eqn{p < 1} thus \eqn{l_p} is not a norm, so, only \eqn{p \ge 1} are valid values.
Since \eqn{l_2} norm is the Euclidean norm, when \eqn{p=2} \code{distsumlpmin} are equal to \code{distsummin}.
But the computations involved are greater for the first form.
max.iter for SANN algorithm is the number of evaluation of objective function, so this methos usually requires large values of max.iter to reach optimal value
The function zsummin is deprecated and will be removed from new versions of the package.
}
\examples{
# A new unweighted loca.p object
loca <- loca.p(x = c(-1, 1, 1, -1), y = c(-1, -1, 1, 1))
# Compute the minimum
sol<-distsummin(loca)
# Show the result
sol
# Evaluation of the objective function at solution point
distsum(loca, sol[1], sol[2])
}
\seealso{
See also \code{\link{orloca-package}}, \code{\link{loca.p}} and \code{\link{distsum}}.
}
\keyword{classes}
\keyword{internal}
\keyword{optimize}
|
#https://cran.r-project.org/web/packages/elo/vignettes/elo.html
library(elo)
data(tournament)
tournament$wins.A <- tournament$points.Home > tournament$points.Visitor
ex1<-elo.run(wins.A ~ team.Home + team.Visitor, data = tournament, k = 20)
ex2<-elo.run(score(points.Home, points.Visitor) ~ team.Home + team.Visitor, data = tournament, k = 20)
ex3<-elo.run(score(points.Home, points.Visitor) ~ team.Home + team.Visitor +
k(20*log(abs(points.Home - points.Visitor) + 1)), data = tournament) | /tmp/elo_tutorial.R | no_license | remedic/tbd_league | R | false | false | 504 | r | #https://cran.r-project.org/web/packages/elo/vignettes/elo.html
library(elo)
data(tournament)
tournament$wins.A <- tournament$points.Home > tournament$points.Visitor
ex1<-elo.run(wins.A ~ team.Home + team.Visitor, data = tournament, k = 20)
ex2<-elo.run(score(points.Home, points.Visitor) ~ team.Home + team.Visitor, data = tournament, k = 20)
ex3<-elo.run(score(points.Home, points.Visitor) ~ team.Home + team.Visitor +
k(20*log(abs(points.Home - points.Visitor) + 1)), data = tournament) |
#Load packages
library("tidyverse")
library("dplyr")
library("tidyr")
#Load dataset
sectors <- read.csv("https://raw.githubusercontent.com/darshils2001/info201-project/main/datasets/National_Sector_Dataset.csv")
# Convert Sector Numbers to Names
sectors$NAICS_SECTOR <- gsub(11, "Agriculture", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(21, "Mining", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(22, "Utilities", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(23, "Construction", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(31, "Manufacturing", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(42, "Wholesale Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(44, "Retail Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(48, "Transportation", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(51, "Information", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(52, "Finance", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(53, "Real Estate", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(54, "Professional Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(55, "Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(56, "Waste Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(61, "Educational Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(62, "Health Care", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(71, "Arts", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(72, "Food Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(81, "Other Services", sectors$NAICS_SECTOR)
#Convert NAICS_SECTOR id to words
sectors$ESTIMATE_PERCENTAGE <- as.numeric(
gsub("[\\%,]", "", sectors$ESTIMATE_PERCENTAGE)
)
#Create an aggregate table
#using groupby, readable column names, sorted, rounded
aggregate_table_sectors <- sectors %>%
group_by(NAICS_SECTOR) %>%
summarize(
AVERAGE_PERCENT = mean(ESTIMATE_PERCENTAGE),
MEDIAN_PERCENT = median(ESTIMATE_PERCENTAGE),
MIN_PERCENT = min(ESTIMATE_PERCENTAGE),
MAX_PERCENT = max(ESTIMATE_PERCENTAGE)
)
| /scripts/aggregate_table_sector.R | no_license | darshils2001/info201-project | R | false | false | 2,083 | r | #Load packages
library("tidyverse")
library("dplyr")
library("tidyr")
#Load dataset
sectors <- read.csv("https://raw.githubusercontent.com/darshils2001/info201-project/main/datasets/National_Sector_Dataset.csv")
# Convert Sector Numbers to Names
sectors$NAICS_SECTOR <- gsub(11, "Agriculture", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(21, "Mining", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(22, "Utilities", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(23, "Construction", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(31, "Manufacturing", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(42, "Wholesale Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(44, "Retail Trade", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(48, "Transportation", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(51, "Information", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(52, "Finance", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(53, "Real Estate", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(54, "Professional Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(55, "Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(56, "Waste Management", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(61, "Educational Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(62, "Health Care", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(71, "Arts", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(72, "Food Services", sectors$NAICS_SECTOR)
sectors$NAICS_SECTOR <- gsub(81, "Other Services", sectors$NAICS_SECTOR)
#Convert NAICS_SECTOR id to words
sectors$ESTIMATE_PERCENTAGE <- as.numeric(
gsub("[\\%,]", "", sectors$ESTIMATE_PERCENTAGE)
)
#Create an aggregate table
#using groupby, readable column names, sorted, rounded
aggregate_table_sectors <- sectors %>%
group_by(NAICS_SECTOR) %>%
summarize(
AVERAGE_PERCENT = mean(ESTIMATE_PERCENTAGE),
MEDIAN_PERCENT = median(ESTIMATE_PERCENTAGE),
MIN_PERCENT = min(ESTIMATE_PERCENTAGE),
MAX_PERCENT = max(ESTIMATE_PERCENTAGE)
)
|
as.best <-
function(x,...)UseMethod('as.best')
as.best.data.frame <- function(x,...){
for(col in names(x)){
tryCatch(
x[[col]] <- as.best(x[[col]],...),
error = function(e) stop('in column ',col,': ',e$message)
)
}
x
}
as.best.default <-
function(x,prefix='#',na.strings=c('.','NA',''),...){
stopifnot(length(prefix)<=1)
x <- as.character(x)
x <- sub('^\\s*','',x)
x <- sub('\\s*$','',x)
x[x %in% na.strings] <- NA
y <- suppressWarnings(as.numeric(x))
newNA <- !is.na(x) & is.na(y)
if(all(is.na(y)))return(x) # nothing converted to numeric
if(!any(newNA))return(y) # no loss on conversion to numeric
if(!length(prefix))stop('character values mixed with numeric, e.g. ', x[newNA][[1]])
# If we reached here, x has some values coercible to numeric and some not, maybe some NA.
# Numeric values buried in a character vector are ambiguous
x[!is.na(y)] <- glue(prefix,x[!is.na(y)])
return(x)
}
#as.best.comment <- function(x,...)as.character(x)
#as.best.timepoint <- function(x,...)as.character(x)
| /R/as.best.R | no_license | metrumresearchgroup/metrumrg | R | false | false | 1,049 | r | as.best <-
function(x,...)UseMethod('as.best')
as.best.data.frame <- function(x,...){
for(col in names(x)){
tryCatch(
x[[col]] <- as.best(x[[col]],...),
error = function(e) stop('in column ',col,': ',e$message)
)
}
x
}
as.best.default <-
function(x,prefix='#',na.strings=c('.','NA',''),...){
stopifnot(length(prefix)<=1)
x <- as.character(x)
x <- sub('^\\s*','',x)
x <- sub('\\s*$','',x)
x[x %in% na.strings] <- NA
y <- suppressWarnings(as.numeric(x))
newNA <- !is.na(x) & is.na(y)
if(all(is.na(y)))return(x) # nothing converted to numeric
if(!any(newNA))return(y) # no loss on conversion to numeric
if(!length(prefix))stop('character values mixed with numeric, e.g. ', x[newNA][[1]])
# If we reached here, x has some values coercible to numeric and some not, maybe some NA.
# Numeric values buried in a character vector are ambiguous
x[!is.na(y)] <- glue(prefix,x[!is.na(y)])
return(x)
}
#as.best.comment <- function(x,...)as.character(x)
#as.best.timepoint <- function(x,...)as.character(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getParams.R
\name{getParams}
\alias{getParams}
\title{getParams}
\usage{
getParams(logfile)
}
\arguments{
\item{logfile}{The name of the trait data file on which BayesTraits was run.}
}
\value{
A vector of the parameter names of the model logfile results from.
}
\description{
This functions returns the names of the estimated parameters from a BayesTraits
analysis logfile for input into plotting and other analysis functions.
Takes a logfile from a BayesTraits MCMC analysis and returns a vector
containing the names of all parameters estimated during the analysis. Useful
for defining parameters for other bayestraitr functions.
}
\examples{
getParams("cool-data.txt.log.txt")
}
| /man/getParams.Rd | no_license | hferg/bayestraitr | R | false | true | 760 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getParams.R
\name{getParams}
\alias{getParams}
\title{getParams}
\usage{
getParams(logfile)
}
\arguments{
\item{logfile}{The name of the trait data file on which BayesTraits was run.}
}
\value{
A vector of the parameter names of the model logfile results from.
}
\description{
This functions returns the names of the estimated parameters from a BayesTraits
analysis logfile for input into plotting and other analysis functions.
Takes a logfile from a BayesTraits MCMC analysis and returns a vector
containing the names of all parameters estimated during the analysis. Useful
for defining parameters for other bayestraitr functions.
}
\examples{
getParams("cool-data.txt.log.txt")
}
|
Validation.HOV.PanelCmd <- function(clim.var){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- 36
largeur1 <- 38
largeur2 <- 38
largeur3 <- 20
largeur4 <- 17
largeur5 <- 4
largeur6 <- 34
largeur7 <- 7
largeur8 <- 8
largeur9 <- 18
}else{
largeur0 <- 32
largeur1 <- 33
largeur2 <- 36
largeur3 <- 18
largeur4 <- 16
largeur5 <- 3
largeur6 <- 36
largeur7 <- 7
largeur8 <- 8
largeur9 <- 17
}
###################
aggFun <- switch(clim.var, "RR" = "sum", "TT" = "mean")
trhesVal <- switch(clim.var, "RR" = 1, "TT" = 20)
graphMin <- switch(clim.var, "RR" = 0, "TT" = 5)
graphMax <- switch(clim.var, "RR" = 80, "TT" = 35)
date.range <- list(start.year = 1981, start.mon = 1, start.dek = 1,
start.pen = 1, start.day = 1,
start.hour = 0, start.min = 0,
end.year = 2018, end.mon = 12, end.dek = 3,
end.pen = 6, end.day = 31,
end.hour = 23, end.min = 55)
GeneralParameters <- list(Tstep = "dekadal", STN.file = "", Extract.Date = date.range,
ncdf.file = list(dir = "", sample = "", format = "rr_mrg_%s%s%s.nc"),
type.select = "all",
shp.file = list(shp = "", attr = ""),
date.range = list(start.year = 1981, start.month = 1, end.year = 2018, end.month = 12),
aggr.series = list(aggr.data = FALSE, aggr.fun = aggFun, opr.fun = ">=", opr.thres = 0,
min.frac = list(unique = TRUE, all = 0.95,
month = rep(0.95, 12))),
stat.data = "all",
dicho.fcst = list(fun = ">=", thres = trhesVal),
volume.stat = list(user = TRUE, one.thres = TRUE,
user.val = 80, user.file = '', from = 'obs', perc = 75,
period = list(all.years = TRUE, start.year = 1981,
end.year = 2010, min.year = 5)
),
add.to.plot = list(add.shp = FALSE, shp.file = "", add.dem = FALSE, dem.file = ""),
outdir = "", clim.var = clim.var, statsVar = 'CORR', type.graph = "Scatter"
)
pointSizeI <- 1.0
.cdtData$EnvData$statMapOp <- list(presetCol = list(color = 'tim.colors', reverse = FALSE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = FALSE, levels = NULL, equidist = FALSE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
scalebar = list(add = FALSE, pos = 'bottomleft'),
pointSize = pointSizeI
)
.cdtData$EnvData$GraphOp <- list(
scatter = list(
xlim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
ylim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
point = list(pch = 20, cex = 0.9, col = 'grey10'),
line = list(draw = TRUE, lwd = 2, col = 'red')
),
cdf = list(
xlim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
ylim = list(is.min = FALSE, min = 0.05, is.max = FALSE, max = 1),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
legend = list(add = TRUE, obs = 'Station', est = 'Estimate'),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(obs = list(type = 'line', line = "blue", points = "cyan", lwd = 2, pch = 21, cex = 1),
est = list(type = 'line', line = "red", points = "pink", lwd = 2, pch = 21, cex = 1))
),
line = list(
xlim = list(is.min = FALSE, min = "1981-01-01", is.max = FALSE, max = "2017-12-31"),
ylim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
legend = list(add = TRUE, obs = 'Station', est = 'Estimate'),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(obs = list(type = 'line', line = "blue", points = "cyan", lwd = 2, pch = 21, cex = 1),
est = list(type = 'line', line = "red", points = "pink", lwd = 2, pch = 21, cex = 1))
)
)
.cdtData$EnvData$SHPOp <- list(col = "black", lwd = 1.5)
.cdtData$EnvData$dem$Opt <- list(
user.colors = list(custom = FALSE, color = NULL),
user.levels = list(custom = FALSE, levels = NULL, equidist = FALSE),
preset.colors = list(color = 'gray.colors', reverse = FALSE),
add.hill = FALSE
)
MOIS <- format(ISOdate(2014, 1:12, 1), "%b")
###################
xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtValidation_HOV_leftCmd.xml")
lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
.cdtData$EnvData$message <- lang.dlg[['message']]
###################
.cdtData$EnvData$zoom$xx1 <- tclVar()
.cdtData$EnvData$zoom$xx2 <- tclVar()
.cdtData$EnvData$zoom$yy1 <- tclVar()
.cdtData$EnvData$zoom$yy2 <- tclVar()
.cdtData$EnvData$zoom$pressButP <- tclVar(0)
.cdtData$EnvData$zoom$pressButM <- tclVar(0)
.cdtData$EnvData$zoom$pressButRect <- tclVar(0)
.cdtData$EnvData$zoom$pressButDrag <- tclVar(0)
.cdtData$EnvData$pressGetCoords <- tclVar(0)
ZoomXYval0 <- NULL
###################
.cdtEnv$tcl$main$cmd.frame <- tkframe(.cdtEnv$tcl$main$panel.left)
tknote.cmd <- bwNoteBook(.cdtEnv$tcl$main$cmd.frame)
cmd.tab1 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['1']])
cmd.tab2 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['2']])
cmd.tab3 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['3']])
cmd.tab4 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['4']])
cmd.tab5 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['5']])
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab3, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab4, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab5, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab1, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab2, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab3, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab4, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab5, 0, weight = 1)
#######################################################################################################
#Tab1
subfr1 <- bwTabScrollableFrame(cmd.tab1)
##############################################
frInputData <- ttklabelframe(subfr1, text = lang.dlg[['label']][['1']], relief = 'groove')
file.period <- tclVar()
CbperiodVAL <- .cdtEnv$tcl$lang$global[['combobox']][['1']][3:6]
periodVAL <- c('daily', 'pentad', 'dekadal', 'monthly')
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% GeneralParameters$Tstep]
file.stnfl <- tclVar(GeneralParameters$STN.file)
dirNetCDF <- tclVar(GeneralParameters$ncdf.file$dir)
txt.tstep <- tklabel(frInputData, text = lang.dlg[['label']][['2']], anchor = 'e', justify = 'right')
cb.tstep <- ttkcombobox(frInputData, values = CbperiodVAL, textvariable = file.period)
txt.stnfl <- tklabel(frInputData, text = lang.dlg[['label']][['3']], anchor = 'w', justify = 'left')
cb.stnfl <- ttkcombobox(frInputData, values = unlist(listOpenFiles), textvariable = file.stnfl, width = largeur0)
bt.stnfl <- tkbutton(frInputData, text = "...")
txt.dir.ncdf <- tklabel(frInputData, text = lang.dlg[['label']][['4']], anchor = 'w', justify = 'left')
set.dir.ncdf <- ttkbutton(frInputData, text = .cdtEnv$tcl$lang$global[['button']][['5']])
en.dir.ncdf <- tkentry(frInputData, textvariable = dirNetCDF, width = largeur1)
bt.dir.ncdf <- tkbutton(frInputData, text = "...")
#######################
tkgrid(txt.tstep, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(cb.tstep, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(txt.stnfl, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stnfl, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnfl, row = 3, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.dir.ncdf, row = 4, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(set.dir.ncdf, row = 4, column = 3, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.dir.ncdf, row = 5, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.dir.ncdf, row = 5, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
helpWidget(cb.tstep, lang.dlg[['tooltip']][['1']], lang.dlg[['status']][['1']])
helpWidget(cb.stnfl, lang.dlg[['tooltip']][['2']], lang.dlg[['status']][['2']])
helpWidget(bt.stnfl, lang.dlg[['tooltip']][['3']], lang.dlg[['status']][['3']])
helpWidget(en.dir.ncdf, lang.dlg[['tooltip']][['4']], lang.dlg[['status']][['4']])
helpWidget(bt.dir.ncdf, lang.dlg[['tooltip']][['3a']], lang.dlg[['status']][['3a']])
helpWidget(set.dir.ncdf, lang.dlg[['tooltip']][['2a']], lang.dlg[['status']][['2a']])
######################
tkconfigure(bt.stnfl, command = function(){
dat.opfiles <- getOpenFiles(.cdtEnv$tcl$main$win)
if(!is.null(dat.opfiles)){
update.OpenFiles('ascii', dat.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- dat.opfiles[[1]]
tclvalue(file.stnfl) <- dat.opfiles[[1]]
lapply(list(cb.stnfl, cb.shpF, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkconfigure(set.dir.ncdf, command = function(){
GeneralParameters[["ncdf.file"]] <<- getInfoNetcdfData(.cdtEnv$tcl$main$win,
GeneralParameters[["ncdf.file"]],
str_trim(tclvalue(dirNetCDF)),
str_trim(tclvalue(file.period)))
})
tkconfigure(bt.dir.ncdf, command = function(){
dirnc <- tk_choose.dir(getwd(), "")
tclvalue(dirNetCDF) <- if(!is.na(dirnc)) dirnc else ""
})
##############################################
btDateRange <- ttkbutton(subfr1, text = lang.dlg[['button']][['0a']])
tkconfigure(btDateRange, command = function(){
tstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
GeneralParameters[["Extract.Date"]] <<- getInfoDateRange(.cdtEnv$tcl$main$win,
GeneralParameters[["Extract.Date"]],
tstep)
})
helpWidget(btDateRange, lang.dlg[['tooltip']][['4a']], lang.dlg[['status']][['4a']])
##############################################
frameDirSav <- ttklabelframe(subfr1, text = lang.dlg[['label']][['5']], relief = 'groove')
file.save1 <- tclVar(GeneralParameters$outdir)
en.dir.save <- tkentry(frameDirSav, textvariable = file.save1, width = largeur1)
bt.dir.save <- tkbutton(frameDirSav, text = "...")
tkgrid(en.dir.save, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dir.save, row = 0, column = 5, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(en.dir.save, lang.dlg[['tooltip']][['5']], lang.dlg[['status']][['5']])
helpWidget(bt.dir.save, lang.dlg[['tooltip']][['6']], lang.dlg[['status']][['6']])
#######################
tkconfigure(bt.dir.save, command = function() fileORdir2Save(file.save1, isFile = FALSE))
#############################
tkgrid(frInputData, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(btDateRange, row = 1, column = 0, sticky = 'we', padx = 1, pady = 3, ipadx = 1, ipady = 1)
tkgrid(frameDirSav, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
subfr2 <- bwTabScrollableFrame(cmd.tab2)
##############################################
frameSelect <- ttklabelframe(subfr2, text = lang.dlg[['label']][['19a']], relief = 'groove')
type.select <- tclVar()
SELECTALL <- lang.dlg[['combobox']][['4']]
TypeSelect <- c('all', 'rect', 'poly')
tclvalue(type.select) <- SELECTALL[TypeSelect %in% GeneralParameters$type.select]
txt.type.select <- tklabel(frameSelect, text = lang.dlg[['label']][['19b']], anchor = 'e', justify = 'right')
cb.type.select <- ttkcombobox(frameSelect, values = SELECTALL, textvariable = type.select)
tkgrid(txt.type.select, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(cb.type.select, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#######################
.cdtData$EnvData$type.select <- GeneralParameters$type.select
tkbind(cb.type.select, "<<ComboboxSelected>>", function(){
.cdtData$EnvData$selectedPolygon <- NULL
.cdtData$EnvData$type.select <- TypeSelect[SELECTALL %in% str_trim(tclvalue(type.select))]
if(.cdtData$EnvData$type.select == 'all'){
statelonlat <- 'disabled'
statepolygon <- 'disabled'
}
if(.cdtData$EnvData$type.select == 'rect'){
statelonlat <- 'normal'
statepolygon <- 'disabled'
}
if(.cdtData$EnvData$type.select == 'poly'){
statelonlat <- 'disabled'
statepolygon <- 'normal'
if(tclvalue(.cdtData$EnvData$namePoly) != ''){
shpfopen <- getShpOpenData(file.dispShp)
if(!is.null(shpfopen)){
shpf <- shpfopen[[2]]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
.cdtData$EnvData$selectedPolygon <- getBoundaries(shpf[shpf@data[, ids] == tclvalue(.cdtData$EnvData$namePoly), ])
}
}
}
tkconfigure(en.minlon, state = statelonlat)
tkconfigure(en.maxlon, state = statelonlat)
tkconfigure(en.minlat, state = statelonlat)
tkconfigure(en.maxlat, state = statelonlat)
tkconfigure(.cdtData$EnvData$cb.shpAttr, state = statepolygon)
tkconfigure(cb.Polygon, state = statepolygon)
##
tclvalue(.cdtData$EnvData$minlonRect) <- ''
tclvalue(.cdtData$EnvData$maxlonRect) <- ''
tclvalue(.cdtData$EnvData$minlatRect) <- ''
tclvalue(.cdtData$EnvData$maxlatRect) <- ''
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0)
{
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
tkdelete(tkwinfo('children', .cdtData$OpenTab$Data[[tabid]][[1]][[2]]), 'rect')
}
}
}
})
##############################################
frameShp <- ttklabelframe(subfr2, text = lang.dlg[['label']][['20']], relief = 'groove')
file.dispShp <- tclVar(GeneralParameters$shp.file$shp)
shpAttr <- tclVar(GeneralParameters$shp.file$attr)
.cdtData$EnvData$namePoly <- tclVar()
cb.shpF <- ttkcombobox(frameShp, values = unlist(listOpenFiles), textvariable = file.dispShp, width = largeur0)
bt.shpF <- tkbutton(frameShp, text = "...")
txt.attr.shpF <- tklabel(frameShp, text = lang.dlg[['label']][['21']], anchor = 'w', justify = 'left')
.cdtData$EnvData$cb.shpAttr <- ttkcombobox(frameShp, values='', textvariable = shpAttr, state = 'disabled')
cb.Polygon <- ttkcombobox(frameShp, values = '', textvariable = .cdtData$EnvData$namePoly, state = 'disabled')
tkgrid(cb.shpF, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.shpF, row = 0, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
tkgrid(txt.attr.shpF, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(.cdtData$EnvData$cb.shpAttr, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 2)
tkgrid(cb.Polygon, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 2)
#######################
tkconfigure(bt.shpF, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.dispShp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
lapply(list(cb.stnfl, cb.shpF, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
###
shpf <- getShpOpenData(file.dispShp)
dat <- shpf[[2]]@data
AttrTable <- names(dat)
tclvalue(shpAttr) <- AttrTable[1]
adminN <- as.character(dat[, 1])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
tclvalue(.cdtData$EnvData$namePoly) <- name.poly[1]
tkconfigure(.cdtData$EnvData$cb.shpAttr, values = AttrTable)
tkconfigure(cb.Polygon, values = name.poly)
}
})
#######################
tkbind(cb.shpF, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(file.dispShp)
if(!is.null(shpf)){
dat <- shpf[[2]]@data
AttrTable <- names(dat)
tclvalue(shpAttr) <- AttrTable[1]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
adminN <- as.character(dat[, ids])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
}else{
AttrTable <- ''
tclvalue(shpAttr) <- ''
name.poly <- ''
tclvalue(.cdtData$EnvData$namePoly) <- ''
}
tkconfigure(.cdtData$EnvData$cb.shpAttr, values = AttrTable)
tkconfigure(cb.Polygon, values = name.poly)
})
########################
tkbind(.cdtData$EnvData$cb.shpAttr, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(file.dispShp)
if(!is.null(shpf)){
dat <- shpf[[2]]@data
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
adminN <- as.character(dat[, ids])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
}else{
name.poly <- ''
}
tclvalue(.cdtData$EnvData$namePoly) <- name.poly[1]
tkconfigure(cb.Polygon, values = name.poly)
})
########################
tkbind(cb.Polygon, "<<ComboboxSelected>>", function(){
.cdtData$EnvData$selectedPolygon <- NULL
if(tclvalue(.cdtData$EnvData$namePoly) != ''){
shpfopen <- getShpOpenData(file.dispShp)
if(!is.null(shpfopen)){
shpf <- shpfopen[[2]]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
spoly <- shpf@data[, ids] == tclvalue(.cdtData$EnvData$namePoly)
.cdtData$EnvData$selectedPolygon <- getBoundaries(shpf[spoly, ])
}
}
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0)
{
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
}
}
}
})
##############################################
bt.dispMap <- ttkbutton(subfr2, text = lang.dlg[['button']][['0b']])
#######################
.cdtData$EnvData$tab$MapSelect <- NULL
tkconfigure(bt.dispMap, command = function(){
donne <- getStnOpenData(file.stnfl)
shpofile <- getShpOpenData(file.dispShp)
if(!is.null(donne)){
.cdtData$EnvData$donne <- donne[1:3, -1]
lonStn <- as.numeric(.cdtData$EnvData$donne[2, ])
latStn <- as.numeric(.cdtData$EnvData$donne[3, ])
lo1 <- min(lonStn, na.rm = TRUE)
lo2 <- max(lonStn, na.rm = TRUE)
la1 <- min(latStn, na.rm = TRUE)
la2 <- max(latStn, na.rm = TRUE)
plotOK <- TRUE
shpf <- shpofile[[2]]
.cdtData$EnvData$ocrds <- getBoundaries(shpf)
.cdtData$EnvData$shpf <- shpf
}else{
plotOK <- FALSE
Insert.Messages.Out(lang.dlg[['message']][['0a']], TRUE, 'e')
}
########
if(.cdtData$EnvData$type.select == 'poly' & plotOK){
if(!is.null(shpofile)){
shpf <- shpofile[[2]]
.cdtData$EnvData$ocrds <- getBoundaries(shpf)
.cdtData$EnvData$shpf <- shpf
bbxshp <- round(bbox(shpf), 4)
lo1 <- min(lo1, bbxshp[1, 1])
lo2 <- max(lo2, bbxshp[1, 2])
la1 <- min(la1, bbxshp[2, 1])
la2 <- max(la2, bbxshp[2, 2])
plotOK <- TRUE
}else{
plotOK <- FALSE
Insert.Messages.Out(lang.dlg[['message']][['0b']], TRUE, 'e')
}
}
########
if(plotOK){
ZoomXYval0 <<- c(lo1, lo2, la1, la2)
tclvalue(.cdtData$EnvData$zoom$xx1) <- lo1
tclvalue(.cdtData$EnvData$zoom$xx2) <- lo2
tclvalue(.cdtData$EnvData$zoom$yy1) <- la1
tclvalue(.cdtData$EnvData$zoom$yy2) <- la2
.cdtData$EnvData$ZoomXYval <- ZoomXYval0
imgContainer <- displayMap4Validation(.cdtData$EnvData$tab$MapSelect)
.cdtData$EnvData$tab$MapSelect <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$MapSelect)
}
})
##############################################
frameIMgMan <- tkframe(subfr2)
#######################
frameZoom <- ttklabelframe(frameIMgMan, text = "ZOOM", relief = 'groove')
.cdtData$EnvData$zoom$btZoomP <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$plus, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btZoomM <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$moins, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btZoomRect <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$rect, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btPanImg <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$pan, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btRedraw <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$redraw, relief = 'raised', state = 'disabled')
.cdtData$EnvData$zoom$btReset <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$reset, relief = 'raised')
#######################
tkgrid(.cdtData$EnvData$zoom$btZoomP, row = 0, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btZoomM, row = 0, column = 1, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btZoomRect, row = 0, column = 2, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btReset, row = 1, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btRedraw, row = 1, column = 1, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btPanImg, row = 1, column = 2, sticky = 'nswe', rowspan = 1, columnspan = 1)
helpWidget(.cdtData$EnvData$zoom$btZoomP, lang.dlg[['tooltip']][['12']], lang.dlg[['status']][['12']])
helpWidget(.cdtData$EnvData$zoom$btZoomM, lang.dlg[['tooltip']][['13']], lang.dlg[['status']][['13']])
helpWidget(.cdtData$EnvData$zoom$btZoomRect, lang.dlg[['tooltip']][['14']], lang.dlg[['status']][['14']])
helpWidget(.cdtData$EnvData$zoom$btPanImg, lang.dlg[['tooltip']][['15']], lang.dlg[['status']][['15']])
helpWidget(.cdtData$EnvData$zoom$btRedraw, lang.dlg[['tooltip']][['16']], lang.dlg[['status']][['16']])
helpWidget(.cdtData$EnvData$zoom$btReset, lang.dlg[['tooltip']][['17']], lang.dlg[['status']][['17']])
##############################################
frameCoord <- tkframe(frameIMgMan, relief = 'groove', borderwidth = 2)
.cdtData$EnvData$minlonRect <- tclVar()
.cdtData$EnvData$maxlonRect <- tclVar()
.cdtData$EnvData$minlatRect <- tclVar()
.cdtData$EnvData$maxlatRect <- tclVar()
txt.minLab <- tklabel(frameCoord, text = lang.dlg[['label']][['22']])
txt.maxLab <- tklabel(frameCoord, text = lang.dlg[['label']][['23']])
txt.lonLab <- tklabel(frameCoord, text = lang.dlg[['label']][['24']], anchor = 'e', justify = 'right')
txt.latLab <- tklabel(frameCoord, text = lang.dlg[['label']][['25']], anchor = 'e', justify = 'right')
en.minlon <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$minlonRect, justify = "left", state = 'disabled')
en.maxlon <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$maxlonRect, justify = "left", state = 'disabled')
en.minlat <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$minlatRect, justify = "left", state = 'disabled')
en.maxlat <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$maxlatRect, justify = "left", state = 'disabled')
tkgrid(txt.minLab, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(txt.maxLab, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(txt.lonLab, row = 1, column = 0, sticky = 'e', rowspan = 1, columnspan = 1)
tkgrid(txt.latLab, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 1)
tkgrid(en.minlon, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.maxlon, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.minlat, row = 2, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.maxlat, row = 2, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
##############################################
.cdtData$EnvData$bt.select <- tkbutton(frameIMgMan, text = lang.dlg[['button']][['0c']], relief = 'raised', bg = 'lightblue')
##############################################
tkgrid(frameZoom, row = 0, column = 0, sticky = 'news', rowspan = 2, padx = 1, ipady = 5)
tkgrid(frameCoord, row = 0, column = 1, sticky = 'we', rowspan = 1)
tkgrid(.cdtData$EnvData$bt.select, row = 1, column = 1, sticky = 'we', rowspan = 1)
##############################################
bt.extract.station <- ttkbutton(subfr2, text = lang.dlg[['button']][['0d']])
tkconfigure(bt.extract.station, command = function(){
GeneralParameters$clim.var <- clim.var
GeneralParameters$Tstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
GeneralParameters$STN.file <- str_trim(tclvalue(file.stnfl))
GeneralParameters$ncdf.file$dir <- str_trim(tclvalue(dirNetCDF))
GeneralParameters$outdir <- str_trim(tclvalue(file.save1))
GeneralParameters$shp.file$shp <- str_trim(tclvalue(file.dispShp))
GeneralParameters$shp.file$attr <- str_trim(tclvalue(shpAttr))
GeneralParameters$type.select <- TypeSelect[SELECTALL %in% str_trim(tclvalue(type.select))]
GeneralParameters$Geom <- NULL
GeneralParameters$Geom$minlon <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$minlonRect)))
GeneralParameters$Geom$maxlon <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$maxlonRect)))
GeneralParameters$Geom$minlat <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$minlatRect)))
GeneralParameters$Geom$maxlat <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$maxlatRect)))
GeneralParameters$Geom$namePoly <- str_trim(tclvalue(.cdtData$EnvData$namePoly))
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
Insert.Messages.Out(lang.dlg[['message']][['0c']], TRUE, "i")
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
ret <- tryCatch(
{
HOV_DataExtraction(GeneralParameters)
},
warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = {
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
}
)
if(!is.null(ret)){
if(ret == 0){
Insert.Messages.Out(lang.dlg[['message']][['0d']], TRUE, "s")
}else Insert.Messages.Out(lang.dlg[['message']][['0e']], TRUE, "e")
}else Insert.Messages.Out(lang.dlg[['message']][['0e']], TRUE, "e")
})
##############################################
tkgrid(frameSelect, row = 0, column = 0, sticky = '')
tkgrid(frameShp, row = 1, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.dispMap, row = 2, column = 0, sticky = 'we', pady = 3)
tkgrid(frameIMgMan, row = 3, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.extract.station, row = 4, column = 0, sticky = 'we', pady = 3)
##############################################
tkconfigure(.cdtData$EnvData$zoom$btReset, command = function(){
.cdtData$EnvData$ZoomXYval <- ZoomXYval0
tclvalue(.cdtData$EnvData$zoom$xx1) <- ZoomXYval0[1]
tclvalue(.cdtData$EnvData$zoom$xx2) <- ZoomXYval0[2]
tclvalue(.cdtData$EnvData$zoom$yy1) <- ZoomXYval0[3]
tclvalue(.cdtData$EnvData$zoom$yy2) <- ZoomXYval0[4]
tabid <- as.numeric(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0){
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
}
}
}
})
##########################
tkbind(.cdtData$EnvData$zoom$btReset, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomP, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomM, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomRect, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btPanImg, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 1
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$bt.select, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 1
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'red', state = 'disabled')
})
#######################################################################################################
#Tab3
subfr3 <- bwTabScrollableFrame(cmd.tab3)
##############################################
frameHOV <- ttklabelframe(subfr3, text = lang.dlg[['label']][['6']], relief = 'groove')
validExist <- tclVar(0)
file.hovd <- tclVar()
stateHOVd <- if(tclvalue(validExist) == "1") "normal" else "disabled"
chk.hovd <- tkcheckbutton(frameHOV, variable = validExist, text = lang.dlg[['checkbutton']][['1']], anchor = 'w', justify = 'left')
en.hovd <- tkentry(frameHOV, textvariable = file.hovd, width = largeur1 + 5, state = stateHOVd)
bt.hovd <- ttkbutton(frameHOV, text = .cdtEnv$tcl$lang$global[['button']][['6']], state = stateHOVd)
tkgrid(chk.hovd, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.hovd, row = 0, column = 4, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.hovd, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
###############
tkconfigure(bt.hovd, command = function(){
path.hovd <- tclvalue(tkgetOpenFile(initialdir = getwd(), filetypes = .cdtEnv$tcl$data$filetypes6))
if(path.hovd == "") return(NULL)
tclvalue(file.hovd) <- path.hovd
if(file.exists(str_trim(tclvalue(file.hovd)))){
hovd.data <- try(readRDS(str_trim(tclvalue(file.hovd))), silent = TRUE)
if(inherits(hovd.data, "try-error")){
Insert.Messages.Out(lang.dlg[['message']][['4']], TRUE, 'e')
Insert.Messages.Out(gsub('[\r\n]', '', hovd.data[1]), TRUE, 'e')
return(NULL)
}
.cdtData$EnvData$file.hovd <- str_trim(tclvalue(file.hovd))
.cdtData$EnvData$GeneralParameters <- hovd.data$GeneralParameters
.cdtData$EnvData$cdtData <- hovd.data$cdtData
.cdtData$EnvData$stnData <- hovd.data$stnData
.cdtData$EnvData$ncdfData <- hovd.data$ncdfData
if(!is.null(hovd.data$opDATA)){
.cdtData$EnvData$opDATA <- hovd.data$opDATA
.cdtData$EnvData$Statistics <- hovd.data$Statistics
}
###
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% hovd.data$GeneralParameters$Tstep]
if(!is.null(.cdtData$EnvData$opDATA$id)){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
stateDispSTN <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stat.sel, values = .cdtData$EnvData$opDATA$id, state = stateDispSTN)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(bt.stat.prev, state = stateDispSTN)
tkconfigure(bt.stat.next, state = stateDispSTN)
stateMaps <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stats.maps, state = stateMaps)
tkconfigure(bt.stats.maps, state = stateMaps)
tkconfigure(cb.plot.type, state = stateMaps)
tkconfigure(bt.stats.Opt, state = stateMaps)
stateStnID <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stn.graph, values = .cdtData$EnvData$opDATA$id, state = stateStnID)
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(bt.stn.graph.prev, state = stateStnID)
tkconfigure(bt.stn.graph.next, state = stateStnID)
itype <- if(statsdata == 'all') 1:2 else 1:3
CbTypeGRAPH <- typeGraphCombo[itype]
if(statsdata == 'all'){
if(str_trim(tclvalue(type.graph)) == typeGraphCombo[3])
tclvalue(type.graph) <- typeGraphCombo[1]
}
tkconfigure(cb.stats.graph, values = CbTypeGRAPH)
}
}
})
###############
tkbind(chk.hovd, "<Button-1>", function(){
stateHOVd <- if(tclvalue(validExist) == '1') 'disabled' else 'normal'
tkconfigure(en.hovd, state = stateHOVd)
tkconfigure(bt.hovd, state = stateHOVd)
stateBTEx <- if(tclvalue(validExist) == '1') 'normal' else 'disabled'
tcl(tknote.cmd, 'itemconfigure', cmd.tab1$IDtab, state = stateBTEx)
tcl(tknote.cmd, 'itemconfigure', cmd.tab2$IDtab, state = stateBTEx)
})
##############################################
frameSeason <- ttklabelframe(subfr3, text = lang.dlg[['label']][['7']], relief = 'groove')
##############
fr.year <- ttklabelframe(frameSeason, text = lang.dlg[['label']][['9']], relief = 'sunken', labelanchor = "n", borderwidth = 2)
start.year <- tclVar(GeneralParameters$date.range$start.year)
end.year <- tclVar(GeneralParameters$date.range$end.year)
txt.to1 <- tklabel(fr.year, text = paste0('-', lang.dlg[['label']][['10']], '-'))
en.years1 <- tkentry(fr.year, width = 5, textvariable = start.year, justify = 'right')
en.years2 <- tkentry(fr.year, width = 5, textvariable = end.year, justify = 'right')
tkgrid(en.years1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(txt.to1, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(en.years2, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
helpWidget(en.years1, lang.dlg[['tooltip']][['7']], lang.dlg[['status']][['7']])
helpWidget(en.years2, lang.dlg[['tooltip']][['8']], lang.dlg[['status']][['8']])
##############
fr.seas <- ttklabelframe(frameSeason, text = lang.dlg[['label']][['8']], relief = 'sunken', labelanchor = "n", borderwidth = 2)
mon1 <- as.numeric(str_trim(GeneralParameters$date.range$start.month))
mon2 <- as.numeric(str_trim(GeneralParameters$date.range$end.month))
start.mois <- tclVar(MOIS[mon1])
end.mois <- tclVar(MOIS[mon2])
txt.to2 <- tklabel(fr.seas, text = paste0('-', lang.dlg[['label']][['10']], '-'))
cb.month1 <- ttkcombobox(fr.seas, values = MOIS, textvariable = start.mois, width = 5)
cb.month2 <- ttkcombobox(fr.seas, values = MOIS, textvariable = end.mois, width = 5)
tkgrid(cb.month1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(txt.to2, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(cb.month2, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
helpWidget(cb.month1, lang.dlg[['tooltip']][['9']], lang.dlg[['status']][['9']])
helpWidget(cb.month2, lang.dlg[['tooltip']][['10']], lang.dlg[['status']][['10']])
##############
sepSeason <- tklabel(frameSeason, text = "", width = largeur5)
tkgrid(fr.seas, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(sepSeason, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(fr.year, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
##############################################
frameAggr <- ttklabelframe(subfr3, text = lang.dlg[['label']][['11']], relief = 'groove')
aggr.data <- tclVar(GeneralParameters$aggr.series$aggr.data)
stateAggr <- if(GeneralParameters$aggr.series$aggr.data) "normal" else "disabled"
chk.aggrdata <- tkcheckbutton(frameAggr, variable = aggr.data, text = lang.dlg[['checkbutton']][['2']], anchor = 'w', justify = 'left', width = largeur6)
bt.aggrPars <- ttkbutton(frameAggr, text = lang.dlg[['button']][['1']], state = stateAggr)
tkgrid(chk.aggrdata, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.aggrPars, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
########
tkconfigure(bt.aggrPars, command = function(){
GeneralParameters[['aggr.series']] <<- getInfo_AggregateFun(.cdtEnv$tcl$main$win,
GeneralParameters[['aggr.series']])
})
tkbind(chk.aggrdata, "<Button-1>", function(){
stateAggr <- if(tclvalue(aggr.data) == '1') 'disabled' else 'normal'
tkconfigure(bt.aggrPars, state = stateAggr)
})
##############################################
frameStatData <- tkframe(subfr3, relief = 'groove', borderwidth = 2)
STATDATATYPE <- lang.dlg[['combobox']][['1']]
StatDataT <- c('all', 'avg', 'stn')
stat.data <- tclVar()
tclvalue(stat.data) <- STATDATATYPE[StatDataT %in% GeneralParameters$stat.data]
txt.stat.data <- tklabel(frameStatData, text = lang.dlg[['label']][['12']], anchor = 'e', justify = 'right')
cb.stat.data <- ttkcombobox(frameStatData, values = STATDATATYPE, textvariable = stat.data, justify = 'center', width = largeur4)
tkgrid(txt.stat.data, row = 0, column = 0, sticky = 'e', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stat.data, row = 0, column = 1, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(cb.stat.data, lang.dlg[['tooltip']][['11']], lang.dlg[['status']][['11']])
#################
tkbind(cb.stat.data, "<<ComboboxSelected>>", function(){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
stateDispSTN <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(bt.stat.prev, state = stateDispSTN)
tkconfigure(cb.stat.sel, state = stateDispSTN)
tkconfigure(bt.stat.next, state = stateDispSTN)
stateMaps <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stats.maps, state = stateMaps)
tkconfigure(bt.stats.maps, state = stateMaps)
tkconfigure(cb.plot.type, state = stateMaps)
tkconfigure(bt.stats.Opt, state = stateMaps)
stateStnID <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stn.graph, state = stateStnID)
tkconfigure(bt.stn.graph.prev, state = stateStnID)
tkconfigure(bt.stn.graph.next, state = stateStnID)
itype <- if(statsdata == 'all') 1:2 else 1:3
CbTypeGRAPH <- typeGraphCombo[itype]
if(statsdata == 'all'){
if(str_trim(tclvalue(type.graph)) == typeGraphCombo[3])
tclvalue(type.graph) <- typeGraphCombo[1]
}
tkconfigure(cb.stats.graph, values = CbTypeGRAPH)
})
##############################################
bt.categStats <- ttkbutton(subfr3, text = lang.dlg[['button']][['2']])
tkconfigure(bt.categStats, command = function(){
GeneralParameters[['dicho.fcst']] <<- getInfo_categoricalValid(.cdtEnv$tcl$main$win,
GeneralParameters[['dicho.fcst']])
})
##############################################
bt.volumeStats <- ttkbutton(subfr3, text = lang.dlg[['button']][['3']])
tkconfigure(bt.volumeStats, command = function(){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
GeneralParameters[['volume.stat']] <<- getInfo_volumetricValid(.cdtEnv$tcl$main$win, statsdata,
GeneralParameters[['volume.stat']])
})
##############################################
bt.calc.stat <- ttkbutton(subfr3, text = lang.dlg[['button']][['4']])
tkconfigure(bt.calc.stat, command = function(){
GeneralParameters$date.range$start.month <- which(MOIS %in% str_trim(tclvalue(start.mois)))
GeneralParameters$date.range$end.month <- which(MOIS %in% str_trim(tclvalue(end.mois)))
GeneralParameters$date.range$start.year <- as.numeric(str_trim(tclvalue(start.year)))
GeneralParameters$date.range$end.year <- as.numeric(str_trim(tclvalue(end.year)))
GeneralParameters$aggr.series$aggr.data <- switch(tclvalue(aggr.data), '0' = FALSE, '1' = TRUE)
GeneralParameters$stat.data <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
#####
# GeneralParameters$STN.file <- basename(str_trim(tclvalue(dirNetCDF)))
GeneralParameters$STN.file <- str_trim(tclvalue(file.stnfl))
GeneralParameters$outdir <- str_trim(tclvalue(file.save1))
GeneralParameters$validExist <- switch(tclvalue(validExist), '0' = FALSE, '1' = TRUE)
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
Insert.Messages.Out(lang.dlg[['message']][['1']], TRUE, "i")
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
ret <- tryCatch(
{
ValidationDataProcs(GeneralParameters)
},
warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = {
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
}
)
if(!is.null(ret)){
if(ret == 0){
Insert.Messages.Out(lang.dlg[['message']][['2']], TRUE, "s")
if(GeneralParameters$stat.data == 'stn'){
tkconfigure(cb.stat.sel, values = .cdtData$EnvData$opDATA$id)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(cb.stn.graph, values = .cdtData$EnvData$opDATA$id, state = 'normal')
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[1]
}
}else Insert.Messages.Out(lang.dlg[['message']][['3']], TRUE, 'e')
}else Insert.Messages.Out(lang.dlg[['message']][['3']], TRUE, 'e')
})
##############################################
tkgrid(frameHOV, row = 0, column = 0, sticky = 'we')
tkgrid(frameSeason, row = 1, column = 0, sticky = 'we', pady = 1)
tkgrid(frameAggr, row = 2, column = 0, sticky = 'we', pady = 1)
tkgrid(frameStatData, row = 3, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.categStats, row = 4, column = 0, sticky = 'we', pady = 3)
if(clim.var == 'RR')
tkgrid(bt.volumeStats, row = 5, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.calc.stat, row = 6, column = 0, sticky = 'we', pady = 3)
#######################################################################################################
#Tab4
subfr4 <- bwTabScrollableFrame(cmd.tab4)
##############################################
frameStatTab <- ttklabelframe(subfr4, text = lang.dlg[['label']][['13']], relief = 'groove')
STATIONIDS <- ''
stn.stat.tab <- tclVar()
stateDispSTN <- if(GeneralParameters$stat.data == 'stn') 'normal' else 'disabled'
bt.stat.prev <- ttkbutton(frameStatTab, text = "<<", state = stateDispSTN, width = largeur7)
bt.stat.next <- ttkbutton(frameStatTab, text = ">>", state = stateDispSTN, width = largeur7)
cb.stat.sel <- ttkcombobox(frameStatTab, values = STATIONIDS, textvariable = stn.stat.tab, width = largeur3, state = stateDispSTN, justify = 'center')
bt.stat.disp <- ttkbutton(frameStatTab, text = lang.dlg[['button']][['5']])
tkgrid(bt.stat.prev, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stat.sel, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stat.next, row = 0, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stat.disp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
################
.cdtData$EnvData$tab$validStat <- NULL
tkconfigure(bt.stat.disp, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
if(statsdata == 'all'){
don <- .cdtData$EnvData$Statistics$ALL
dat2disp <- data.frame(don$statNames, don$statistics, don$description, don$perfect.score)
titleTab <- 'All-Data Statistics'
}
if(statsdata == 'avg'){
don <- .cdtData$EnvData$Statistics$AVG
dat2disp <- data.frame(don$statNames, don$statistics, don$description, don$perfect.score)
titleTab <- 'Spatial-Average Statistics'
}
if(statsdata == 'stn'){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
}
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
tkconfigure(bt.stat.prev, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
istn <- istn - 1
if(istn < 1) istn <- length(.cdtData$EnvData$opDATA$id)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[istn]
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
tkconfigure(bt.stat.next, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
istn <- istn + 1
if(istn > length(.cdtData$EnvData$opDATA$id)) istn <- 1
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[istn]
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
##############################################
frameMap <- ttklabelframe(subfr4, text = lang.dlg[['label']][['14']], relief = 'groove')
statsCON <- c('CORR', 'BR2', 'BIAS', 'PBIAS', 'ME', 'MAE', 'RMSE', 'NSE', 'MNSE', 'RNSE', 'IOA', 'MIOA', 'RIOA')
statsCAT <- c('POD', 'POFD', 'FAR', 'FBS', 'CSI', 'HSS')
statsVOL <- c('MQB', 'MQE', 'VHI', 'QPOD', 'VFAR', 'QFAR', 'VMI', 'QMISS', 'VCSI', 'QCSI')
ValStatNAMES0 <- c(statsCON, statsCAT, statsVOL)
CbStatNAMES0 <- lang.dlg[['combobox']][['2']]
ivarL <- switch(clim.var, "RR" = 1:29, "TT" = 1:19)
statsVAR <- tclVar()
CbStatNAMES <- CbStatNAMES0[ivarL]
ValStatNAMES <- ValStatNAMES0[ivarL]
tclvalue(statsVAR) <- CbStatNAMES[ValStatNAMES %in% GeneralParameters$statsVar]
stateMaps <- if(GeneralParameters$stat.data == 'stn') 'normal' else 'disabled'
cb.stats.maps <- ttkcombobox(frameMap, values = CbStatNAMES, textvariable = statsVAR, width = largeur2, state = stateMaps)
##########
frMapBt <- tkframe(frameMap)
bt.stats.maps <- ttkbutton(frMapBt, text = .cdtEnv$tcl$lang$global[['button']][['3']], state = stateMaps, width = largeur9)
bt.stats.Opt <- ttkbutton(frMapBt, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateMaps, width = largeur9)
tkgrid(bt.stats.Opt, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stats.maps, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
##########
frPlotT <- tkframe(frameMap)
typeMapPLOT <- c("Points", "Pixels")
.cdtData$EnvData$typeMap <- tclVar("Points")
txt.plot.type <- tklabel(frPlotT, text = lang.dlg[['label']][['15']], anchor = "e", justify = "right")
cb.plot.type <- ttkcombobox(frPlotT, values = typeMapPLOT, textvariable = .cdtData$EnvData$typeMap, width = largeur8, state = stateMaps)
tkgrid(txt.plot.type, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.plot.type, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
##########
tkgrid(cb.stats.maps, row = 0, column = 0, sticky = 'we')
tkgrid(frMapBt, row = 1, column = 0, sticky = '')
tkgrid(frPlotT, row = 2, column = 0, sticky = '')
##############
tkconfigure(bt.stats.Opt, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
mapstat <- ValStatNAMES[CbStatNAMES %in% str_trim(tclvalue(statsVAR))]
istat <- which(.cdtData$EnvData$Statistics$STN$statNames == mapstat)
don <- .cdtData$EnvData$Statistics$STN$statistics[istat, ]
atlevel <- pretty(don, n = 10, min.n = 7)
if(is.null(.cdtData$EnvData$statMapOp$userLvl$levels)){
.cdtData$EnvData$statMapOp$userLvl$levels <- atlevel
}else{
if(!.cdtData$EnvData$statMapOp$userLvl$custom)
.cdtData$EnvData$statMapOp$userLvl$levels <- atlevel
}
}
.cdtData$EnvData$statMapOp <- MapGraph.MapOptions(.cdtData$EnvData$statMapOp)
if(str_trim(tclvalue(.cdtData$EnvData$typeMap)) == "Points")
pointSizeI <<- .cdtData$EnvData$statMapOp$pointSize
})
################
.cdtData$EnvData$tab$Maps <- NULL
tkconfigure(bt.stats.maps, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
.cdtData$EnvData$statVAR <- ValStatNAMES[CbStatNAMES %in% str_trim(tclvalue(statsVAR))]
.cdtData$EnvData$plot.maps$data.type <- "cdtstation"
.cdtData$EnvData$plot.maps$lon <- .cdtData$EnvData$opDATA$lon
.cdtData$EnvData$plot.maps$lat <- .cdtData$EnvData$opDATA$lat
.cdtData$EnvData$plot.maps$id <- .cdtData$EnvData$opDATA$id
Validation.DisplayStatMaps()
}
})
##############################################
frameGraph <- ttklabelframe(subfr4, text = lang.dlg[['label']][['16']], relief = 'groove')
############
frameGrP <- tkframe(frameGraph)
typeGraphCombo <- lang.dlg[['combobox']][['3']]
valGraphCombo <- c("Scatter", "CDF", "Lines")
itype <- if(GeneralParameters$stat.data == 'all') 1:2 else 1:3
type.graph <- tclVar()
CbTypeGRAPH <- typeGraphCombo[itype]
ValTypeGRAPH <- valGraphCombo[itype]
tclvalue(type.graph) <- CbTypeGRAPH[ValTypeGRAPH %in% GeneralParameters$type.graph]
cb.stats.graph <- ttkcombobox(frameGrP, values = CbTypeGRAPH, textvariable = type.graph, width = largeur2)
bt.stats.graph <- ttkbutton(frameGrP, text = .cdtEnv$tcl$lang$global[['button']][['3']], width = largeur9)
bt.Opt.graph <- ttkbutton(frameGrP, text = .cdtEnv$tcl$lang$global[['button']][['4']], width = largeur9)
tkgrid(cb.stats.graph, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.Opt.graph, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 2, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stats.graph, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 3, padx = 2, pady = 1, ipadx = 1, ipady = 1)
############
frameGrS <- tkframe(frameGraph)
STNIDGRAPH <- ""
.cdtData$EnvData$stnIDGraph <- tclVar()
stateStnID <- "disabled"
cb.stn.graph <- ttkcombobox(frameGrS, values = STNIDGRAPH, textvariable = .cdtData$EnvData$stnIDGraph, width = largeur3, state = stateStnID, justify = 'center')
bt.stn.graph.prev <- ttkbutton(frameGrS, text = "<<", state = stateStnID, width = largeur7)
bt.stn.graph.next <- ttkbutton(frameGrS, text = ">>", state = stateStnID, width = largeur7)
tkgrid(bt.stn.graph.prev, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(cb.stn.graph, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(bt.stn.graph.next, row = 0, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 2, ipadx = 1, ipady = 1)
##############
tkgrid(frameGrP, row = 0, column = 0, sticky = 'we')
tkgrid(frameGrS, row = 1, column = 0, sticky = 'we')
##############
.cdtData$EnvData$tab$Graph <- NULL
tkconfigure(bt.stats.graph, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
tkconfigure(bt.stn.graph.prev, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(.cdtData$EnvData$stnIDGraph)))
istn <- istn - 1
if(istn < 1) istn <- length(.cdtData$EnvData$opDATA$id)
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[istn]
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
tkconfigure(bt.stn.graph.next, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(.cdtData$EnvData$stnIDGraph)))
istn <- istn + 1
if(istn > length(.cdtData$EnvData$opDATA$id)) istn <- 1
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[istn]
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
##############
tkconfigure(bt.Opt.graph, command = function(){
typeGraph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
plot.fun <- get(paste0("Validation.GraphOptions.", typeGraph), mode = "function")
.cdtData$EnvData$GraphOp <- plot.fun(.cdtData$EnvData$GraphOp)
})
#############################
tkgrid(frameStatTab, row = 0, column = 0, sticky = 'we')
tkgrid(frameMap, row = 1, column = 0, sticky = 'we', pady = 3)
tkgrid(frameGraph, row = 2, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
#Tab5
subfr5 <- bwTabScrollableFrame(cmd.tab5)
##############################################
frameSHP <- ttklabelframe(subfr5, text = lang.dlg[['label']][['17']], relief = 'groove')
.cdtData$EnvData$shp$add.shp <- tclVar(GeneralParameters$add.to.plot$add.shp)
file.plotShp <- tclVar(GeneralParameters$add.to.plot$shp.file)
stateSHP <- if(GeneralParameters$add.to.plot$add.shp) "normal" else "disabled"
chk.addshp <- tkcheckbutton(frameSHP, variable = .cdtData$EnvData$shp$add.shp, text = lang.dlg[['checkbutton']][['3']], anchor = 'w', justify = 'left')
bt.addshpOpt <- ttkbutton(frameSHP, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateSHP)
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = file.plotShp, width = largeur0, state = stateSHP)
bt.addshp <- tkbutton(frameSHP, text = "...", state = stateSHP)
tkgrid(chk.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.addshpOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#################
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.plotShp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
listOpenFiles <- openFile_ttkcomboList()
lapply(list(cb.stnfl, cb.valid, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile))
.cdtData$EnvData$shp$ocrds <- NULL
else
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpofile[[2]])
}
})
tkconfigure(bt.addshpOpt, command = function(){
.cdtData$EnvData$SHPOp <- MapGraph.GraphOptions.LineSHP(.cdtData$EnvData$SHPOp)
})
#################
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile))
.cdtData$EnvData$shp$ocrds <- NULL
else
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpofile[[2]])
})
tkbind(chk.addshp, "<Button-1>", function(){
stateSHP <- if(tclvalue(.cdtData$EnvData$shp$add.shp) == "1") "disabled" else "normal"
tkconfigure(cb.addshp, state = stateSHP)
tkconfigure(bt.addshp, state = stateSHP)
tkconfigure(bt.addshpOpt, state = stateSHP)
})
##############################################
frameDEM <- ttklabelframe(subfr5, text = lang.dlg[['label']][['18']], relief = 'groove')
.cdtData$EnvData$dem$add.dem <- tclVar(GeneralParameters$add.to.plot$add.dem)
file.grddem <- tclVar(GeneralParameters$add.to.plot$dem.file)
stateDEM <- if(GeneralParameters$add.to.plot$add.dem) "normal" else "disabled"
chk.adddem <- tkcheckbutton(frameDEM, variable = .cdtData$EnvData$dem$add.dem, text = lang.dlg[['checkbutton']][['4']], anchor = 'w', justify = 'left')
bt.adddemOpt <- ttkbutton(frameDEM, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateDEM)
cb.adddem <- ttkcombobox(frameDEM, values = unlist(listOpenFiles), textvariable = file.grddem, width = largeur0, state = stateDEM)
bt.adddem <- tkbutton(frameDEM, text = "...", state = stateDEM)
tkgrid(chk.adddem, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.adddemOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.adddem, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.adddem, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#################
tkconfigure(bt.adddem, command = function(){
nc.opfiles <- getOpenNetcdf(.cdtEnv$tcl$main$win, initialdir = getwd())
if(!is.null(nc.opfiles)){
update.OpenFiles('netcdf', nc.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- nc.opfiles[[1]]
tclvalue(file.grddem) <- nc.opfiles[[1]]
listOpenFiles <- openFile_ttkcomboList()
lapply(list(cb.stnfl, cb.valid, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
demData <- getNCDFSampleData(str_trim(tclvalue(file.grddem)))
if(!is.null(demData)){
jfile <- getIndex.AllOpenFiles(str_trim(tclvalue(file.grddem)))
demData <- .cdtData$OpenFiles$Data[[jfile]][[2]]
.cdtData$EnvData$dem$elv <- demData[c('x', 'y', 'z')]
demr <- raster::raster(demData[c('x', 'y', 'z')])
slope <- raster::terrain(demr, opt = 'slope')
aspect <- raster::terrain(demr, opt = 'aspect')
hill <- raster::hillShade(slope, aspect, angle = 40, direction = 270)
hill <- matrix(hill@data@values, hill@ncols, hill@nrows)
hill <- hill[, rev(seq(ncol(hill)))]
.cdtData$EnvData$dem$hill <- list(x = demData$x, y = demData$y, z = hill)
rm(demData, demr, slope, aspect, hill)
}else{
Insert.Messages.Out(lang.dlg[['message']][['5']], TRUE, "e")
tclvalue(file.grddem) <- ""
.cdtData$EnvData$dem <- NULL
}
}
})
tkconfigure(bt.adddemOpt, command = function(){
if(!is.null(.cdtData$EnvData$dem$elv)){
atlevel <- pretty(.cdtData$EnvData$dem$elv$z, n = 10, min.n = 7)
if(is.null(.cdtData$EnvData$dem$Opt$user.levels$levels)){
.cdtData$EnvData$dem$Opt$user.levels$levels <- atlevel
}else{
if(!.cdtData$EnvData$dem$Opt$user.levels$custom)
.cdtData$EnvData$dem$Opt$user.levels$levels <- atlevel
}
}
.cdtData$EnvData$dem$Opt <- MapGraph.gridDataLayer(.cdtData$EnvData$dem$Opt)
})
#################
tkbind(cb.adddem, "<<ComboboxSelected>>", function(){
demData <- getNCDFSampleData(str_trim(tclvalue(file.grddem)))
if(!is.null(demData)){
jfile <- getIndex.AllOpenFiles(str_trim(tclvalue(file.grddem)))
demData <- .cdtData$OpenFiles$Data[[jfile]][[2]]
.cdtData$EnvData$dem$elv <- demData[c('x', 'y', 'z')]
demr <- raster::raster(demData[c('x', 'y', 'z')])
slope <- raster::terrain(demr, opt = 'slope')
aspect <- raster::terrain(demr, opt = 'aspect')
hill <- raster::hillShade(slope, aspect, angle = 40, direction = 270)
hill <- matrix(hill@data@values, hill@ncols, hill@nrows)
hill <- hill[, rev(seq(ncol(hill)))]
.cdtData$EnvData$dem$hill <- list(x = demData$x, y = demData$y, z = hill)
rm(demData, demr, slope, aspect, hill)
}else{
Insert.Messages.Out(lang.dlg[['message']][['5']], TRUE, "e")
tclvalue(file.grddem) <- ""
.cdtData$EnvData$dem <- NULL
}
})
tkbind(chk.adddem, "<Button-1>", function(){
stateDEM <- if(tclvalue(.cdtData$EnvData$dem$add.dem) == "1") "disabled" else "normal"
tkconfigure(cb.adddem, state = stateDEM)
tkconfigure(bt.adddem, state = stateDEM)
tkconfigure(bt.adddemOpt, state = stateDEM)
})
#############################
tkgrid(frameSHP, row = 0, column = 0, sticky = 'we', pady = 1)
tkgrid(frameDEM, row = 1, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
tkgrid.rowconfigure(tknote.cmd, 0, weight = 1)
tcl('update')
tkgrid(.cdtEnv$tcl$main$cmd.frame, sticky = 'nwes', pady = 1)
tkgrid.columnconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
tkgrid.rowconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
invisible()
}
| /R/cdtValidation_HOV_leftCmd.R | no_license | Benminoungou/CDT | R | false | false | 79,994 | r |
Validation.HOV.PanelCmd <- function(clim.var){
listOpenFiles <- openFile_ttkcomboList()
if(WindowsOS()){
largeur0 <- 36
largeur1 <- 38
largeur2 <- 38
largeur3 <- 20
largeur4 <- 17
largeur5 <- 4
largeur6 <- 34
largeur7 <- 7
largeur8 <- 8
largeur9 <- 18
}else{
largeur0 <- 32
largeur1 <- 33
largeur2 <- 36
largeur3 <- 18
largeur4 <- 16
largeur5 <- 3
largeur6 <- 36
largeur7 <- 7
largeur8 <- 8
largeur9 <- 17
}
###################
aggFun <- switch(clim.var, "RR" = "sum", "TT" = "mean")
trhesVal <- switch(clim.var, "RR" = 1, "TT" = 20)
graphMin <- switch(clim.var, "RR" = 0, "TT" = 5)
graphMax <- switch(clim.var, "RR" = 80, "TT" = 35)
date.range <- list(start.year = 1981, start.mon = 1, start.dek = 1,
start.pen = 1, start.day = 1,
start.hour = 0, start.min = 0,
end.year = 2018, end.mon = 12, end.dek = 3,
end.pen = 6, end.day = 31,
end.hour = 23, end.min = 55)
GeneralParameters <- list(Tstep = "dekadal", STN.file = "", Extract.Date = date.range,
ncdf.file = list(dir = "", sample = "", format = "rr_mrg_%s%s%s.nc"),
type.select = "all",
shp.file = list(shp = "", attr = ""),
date.range = list(start.year = 1981, start.month = 1, end.year = 2018, end.month = 12),
aggr.series = list(aggr.data = FALSE, aggr.fun = aggFun, opr.fun = ">=", opr.thres = 0,
min.frac = list(unique = TRUE, all = 0.95,
month = rep(0.95, 12))),
stat.data = "all",
dicho.fcst = list(fun = ">=", thres = trhesVal),
volume.stat = list(user = TRUE, one.thres = TRUE,
user.val = 80, user.file = '', from = 'obs', perc = 75,
period = list(all.years = TRUE, start.year = 1981,
end.year = 2010, min.year = 5)
),
add.to.plot = list(add.shp = FALSE, shp.file = "", add.dem = FALSE, dem.file = ""),
outdir = "", clim.var = clim.var, statsVar = 'CORR', type.graph = "Scatter"
)
pointSizeI <- 1.0
.cdtData$EnvData$statMapOp <- list(presetCol = list(color = 'tim.colors', reverse = FALSE),
userCol = list(custom = FALSE, color = NULL),
userLvl = list(custom = FALSE, levels = NULL, equidist = FALSE),
title = list(user = FALSE, title = ''),
colkeyLab = list(user = FALSE, label = ''),
scalebar = list(add = FALSE, pos = 'bottomleft'),
pointSize = pointSizeI
)
.cdtData$EnvData$GraphOp <- list(
scatter = list(
xlim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
ylim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
title = list(is.title = FALSE, title = '', position = 'top'),
point = list(pch = 20, cex = 0.9, col = 'grey10'),
line = list(draw = TRUE, lwd = 2, col = 'red')
),
cdf = list(
xlim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
ylim = list(is.min = FALSE, min = 0.05, is.max = FALSE, max = 1),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
legend = list(add = TRUE, obs = 'Station', est = 'Estimate'),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(obs = list(type = 'line', line = "blue", points = "cyan", lwd = 2, pch = 21, cex = 1),
est = list(type = 'line', line = "red", points = "pink", lwd = 2, pch = 21, cex = 1))
),
line = list(
xlim = list(is.min = FALSE, min = "1981-01-01", is.max = FALSE, max = "2017-12-31"),
ylim = list(is.min = FALSE, min = graphMin, is.max = FALSE, max = graphMax),
axislabs = list(is.xlab = FALSE, xlab = '', is.ylab = FALSE, ylab = ''),
legend = list(add = TRUE, obs = 'Station', est = 'Estimate'),
title = list(is.title = FALSE, title = '', position = 'top'),
plot = list(obs = list(type = 'line', line = "blue", points = "cyan", lwd = 2, pch = 21, cex = 1),
est = list(type = 'line', line = "red", points = "pink", lwd = 2, pch = 21, cex = 1))
)
)
.cdtData$EnvData$SHPOp <- list(col = "black", lwd = 1.5)
.cdtData$EnvData$dem$Opt <- list(
user.colors = list(custom = FALSE, color = NULL),
user.levels = list(custom = FALSE, levels = NULL, equidist = FALSE),
preset.colors = list(color = 'gray.colors', reverse = FALSE),
add.hill = FALSE
)
MOIS <- format(ISOdate(2014, 1:12, 1), "%b")
###################
xml.dlg <- file.path(.cdtDir$dirLocal, "languages", "cdtValidation_HOV_leftCmd.xml")
lang.dlg <- cdtLanguageParse(xml.dlg, .cdtData$Config$lang.iso)
.cdtData$EnvData$message <- lang.dlg[['message']]
###################
.cdtData$EnvData$zoom$xx1 <- tclVar()
.cdtData$EnvData$zoom$xx2 <- tclVar()
.cdtData$EnvData$zoom$yy1 <- tclVar()
.cdtData$EnvData$zoom$yy2 <- tclVar()
.cdtData$EnvData$zoom$pressButP <- tclVar(0)
.cdtData$EnvData$zoom$pressButM <- tclVar(0)
.cdtData$EnvData$zoom$pressButRect <- tclVar(0)
.cdtData$EnvData$zoom$pressButDrag <- tclVar(0)
.cdtData$EnvData$pressGetCoords <- tclVar(0)
ZoomXYval0 <- NULL
###################
.cdtEnv$tcl$main$cmd.frame <- tkframe(.cdtEnv$tcl$main$panel.left)
tknote.cmd <- bwNoteBook(.cdtEnv$tcl$main$cmd.frame)
cmd.tab1 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['1']])
cmd.tab2 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['2']])
cmd.tab3 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['3']])
cmd.tab4 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['4']])
cmd.tab5 <- bwAddTab(tknote.cmd, text = lang.dlg[['tab_title']][['5']])
bwRaiseTab(tknote.cmd, cmd.tab1)
tkgrid.columnconfigure(cmd.tab1, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab2, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab3, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab4, 0, weight = 1)
tkgrid.columnconfigure(cmd.tab5, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab1, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab2, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab3, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab4, 0, weight = 1)
tkgrid.rowconfigure(cmd.tab5, 0, weight = 1)
#######################################################################################################
#Tab1
subfr1 <- bwTabScrollableFrame(cmd.tab1)
##############################################
frInputData <- ttklabelframe(subfr1, text = lang.dlg[['label']][['1']], relief = 'groove')
file.period <- tclVar()
CbperiodVAL <- .cdtEnv$tcl$lang$global[['combobox']][['1']][3:6]
periodVAL <- c('daily', 'pentad', 'dekadal', 'monthly')
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% GeneralParameters$Tstep]
file.stnfl <- tclVar(GeneralParameters$STN.file)
dirNetCDF <- tclVar(GeneralParameters$ncdf.file$dir)
txt.tstep <- tklabel(frInputData, text = lang.dlg[['label']][['2']], anchor = 'e', justify = 'right')
cb.tstep <- ttkcombobox(frInputData, values = CbperiodVAL, textvariable = file.period)
txt.stnfl <- tklabel(frInputData, text = lang.dlg[['label']][['3']], anchor = 'w', justify = 'left')
cb.stnfl <- ttkcombobox(frInputData, values = unlist(listOpenFiles), textvariable = file.stnfl, width = largeur0)
bt.stnfl <- tkbutton(frInputData, text = "...")
txt.dir.ncdf <- tklabel(frInputData, text = lang.dlg[['label']][['4']], anchor = 'w', justify = 'left')
set.dir.ncdf <- ttkbutton(frInputData, text = .cdtEnv$tcl$lang$global[['button']][['5']])
en.dir.ncdf <- tkentry(frInputData, textvariable = dirNetCDF, width = largeur1)
bt.dir.ncdf <- tkbutton(frInputData, text = "...")
#######################
tkgrid(txt.tstep, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(cb.tstep, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(txt.stnfl, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stnfl, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stnfl, row = 3, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 1, ipadx = 1, ipady = 1)
tkgrid(txt.dir.ncdf, row = 4, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(set.dir.ncdf, row = 4, column = 3, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 0, ipadx = 1, ipady = 1)
tkgrid(en.dir.ncdf, row = 5, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 0, pady = 0, ipadx = 1, ipady = 1)
tkgrid(bt.dir.ncdf, row = 5, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 0, pady = 0, ipadx = 1, ipady = 1)
helpWidget(cb.tstep, lang.dlg[['tooltip']][['1']], lang.dlg[['status']][['1']])
helpWidget(cb.stnfl, lang.dlg[['tooltip']][['2']], lang.dlg[['status']][['2']])
helpWidget(bt.stnfl, lang.dlg[['tooltip']][['3']], lang.dlg[['status']][['3']])
helpWidget(en.dir.ncdf, lang.dlg[['tooltip']][['4']], lang.dlg[['status']][['4']])
helpWidget(bt.dir.ncdf, lang.dlg[['tooltip']][['3a']], lang.dlg[['status']][['3a']])
helpWidget(set.dir.ncdf, lang.dlg[['tooltip']][['2a']], lang.dlg[['status']][['2a']])
######################
tkconfigure(bt.stnfl, command = function(){
dat.opfiles <- getOpenFiles(.cdtEnv$tcl$main$win)
if(!is.null(dat.opfiles)){
update.OpenFiles('ascii', dat.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- dat.opfiles[[1]]
tclvalue(file.stnfl) <- dat.opfiles[[1]]
lapply(list(cb.stnfl, cb.shpF, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
}
})
tkconfigure(set.dir.ncdf, command = function(){
GeneralParameters[["ncdf.file"]] <<- getInfoNetcdfData(.cdtEnv$tcl$main$win,
GeneralParameters[["ncdf.file"]],
str_trim(tclvalue(dirNetCDF)),
str_trim(tclvalue(file.period)))
})
tkconfigure(bt.dir.ncdf, command = function(){
dirnc <- tk_choose.dir(getwd(), "")
tclvalue(dirNetCDF) <- if(!is.na(dirnc)) dirnc else ""
})
##############################################
btDateRange <- ttkbutton(subfr1, text = lang.dlg[['button']][['0a']])
tkconfigure(btDateRange, command = function(){
tstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
GeneralParameters[["Extract.Date"]] <<- getInfoDateRange(.cdtEnv$tcl$main$win,
GeneralParameters[["Extract.Date"]],
tstep)
})
helpWidget(btDateRange, lang.dlg[['tooltip']][['4a']], lang.dlg[['status']][['4a']])
##############################################
frameDirSav <- ttklabelframe(subfr1, text = lang.dlg[['label']][['5']], relief = 'groove')
file.save1 <- tclVar(GeneralParameters$outdir)
en.dir.save <- tkentry(frameDirSav, textvariable = file.save1, width = largeur1)
bt.dir.save <- tkbutton(frameDirSav, text = "...")
tkgrid(en.dir.save, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.dir.save, row = 0, column = 5, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(en.dir.save, lang.dlg[['tooltip']][['5']], lang.dlg[['status']][['5']])
helpWidget(bt.dir.save, lang.dlg[['tooltip']][['6']], lang.dlg[['status']][['6']])
#######################
tkconfigure(bt.dir.save, command = function() fileORdir2Save(file.save1, isFile = FALSE))
#############################
tkgrid(frInputData, row = 0, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(btDateRange, row = 1, column = 0, sticky = 'we', padx = 1, pady = 3, ipadx = 1, ipady = 1)
tkgrid(frameDirSav, row = 2, column = 0, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
#######################################################################################################
#Tab2
subfr2 <- bwTabScrollableFrame(cmd.tab2)
##############################################
frameSelect <- ttklabelframe(subfr2, text = lang.dlg[['label']][['19a']], relief = 'groove')
type.select <- tclVar()
SELECTALL <- lang.dlg[['combobox']][['4']]
TypeSelect <- c('all', 'rect', 'poly')
tclvalue(type.select) <- SELECTALL[TypeSelect %in% GeneralParameters$type.select]
txt.type.select <- tklabel(frameSelect, text = lang.dlg[['label']][['19b']], anchor = 'e', justify = 'right')
cb.type.select <- ttkcombobox(frameSelect, values = SELECTALL, textvariable = type.select)
tkgrid(txt.type.select, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(cb.type.select, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#######################
.cdtData$EnvData$type.select <- GeneralParameters$type.select
tkbind(cb.type.select, "<<ComboboxSelected>>", function(){
.cdtData$EnvData$selectedPolygon <- NULL
.cdtData$EnvData$type.select <- TypeSelect[SELECTALL %in% str_trim(tclvalue(type.select))]
if(.cdtData$EnvData$type.select == 'all'){
statelonlat <- 'disabled'
statepolygon <- 'disabled'
}
if(.cdtData$EnvData$type.select == 'rect'){
statelonlat <- 'normal'
statepolygon <- 'disabled'
}
if(.cdtData$EnvData$type.select == 'poly'){
statelonlat <- 'disabled'
statepolygon <- 'normal'
if(tclvalue(.cdtData$EnvData$namePoly) != ''){
shpfopen <- getShpOpenData(file.dispShp)
if(!is.null(shpfopen)){
shpf <- shpfopen[[2]]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
.cdtData$EnvData$selectedPolygon <- getBoundaries(shpf[shpf@data[, ids] == tclvalue(.cdtData$EnvData$namePoly), ])
}
}
}
tkconfigure(en.minlon, state = statelonlat)
tkconfigure(en.maxlon, state = statelonlat)
tkconfigure(en.minlat, state = statelonlat)
tkconfigure(en.maxlat, state = statelonlat)
tkconfigure(.cdtData$EnvData$cb.shpAttr, state = statepolygon)
tkconfigure(cb.Polygon, state = statepolygon)
##
tclvalue(.cdtData$EnvData$minlonRect) <- ''
tclvalue(.cdtData$EnvData$maxlonRect) <- ''
tclvalue(.cdtData$EnvData$minlatRect) <- ''
tclvalue(.cdtData$EnvData$maxlatRect) <- ''
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0)
{
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
tkdelete(tkwinfo('children', .cdtData$OpenTab$Data[[tabid]][[1]][[2]]), 'rect')
}
}
}
})
##############################################
frameShp <- ttklabelframe(subfr2, text = lang.dlg[['label']][['20']], relief = 'groove')
file.dispShp <- tclVar(GeneralParameters$shp.file$shp)
shpAttr <- tclVar(GeneralParameters$shp.file$attr)
.cdtData$EnvData$namePoly <- tclVar()
cb.shpF <- ttkcombobox(frameShp, values = unlist(listOpenFiles), textvariable = file.dispShp, width = largeur0)
bt.shpF <- tkbutton(frameShp, text = "...")
txt.attr.shpF <- tklabel(frameShp, text = lang.dlg[['label']][['21']], anchor = 'w', justify = 'left')
.cdtData$EnvData$cb.shpAttr <- ttkcombobox(frameShp, values='', textvariable = shpAttr, state = 'disabled')
cb.Polygon <- ttkcombobox(frameShp, values = '', textvariable = .cdtData$EnvData$namePoly, state = 'disabled')
tkgrid(cb.shpF, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.shpF, row = 0, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 0, pady = 1)
tkgrid(txt.attr.shpF, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 1)
tkgrid(.cdtData$EnvData$cb.shpAttr, row = 2, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 2)
tkgrid(cb.Polygon, row = 3, column = 0, sticky = 'we', rowspan = 1, columnspan = 8, padx = 1, pady = 2)
#######################
tkconfigure(bt.shpF, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.dispShp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
lapply(list(cb.stnfl, cb.shpF, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
###
shpf <- getShpOpenData(file.dispShp)
dat <- shpf[[2]]@data
AttrTable <- names(dat)
tclvalue(shpAttr) <- AttrTable[1]
adminN <- as.character(dat[, 1])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
tclvalue(.cdtData$EnvData$namePoly) <- name.poly[1]
tkconfigure(.cdtData$EnvData$cb.shpAttr, values = AttrTable)
tkconfigure(cb.Polygon, values = name.poly)
}
})
#######################
tkbind(cb.shpF, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(file.dispShp)
if(!is.null(shpf)){
dat <- shpf[[2]]@data
AttrTable <- names(dat)
tclvalue(shpAttr) <- AttrTable[1]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
adminN <- as.character(dat[, ids])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
}else{
AttrTable <- ''
tclvalue(shpAttr) <- ''
name.poly <- ''
tclvalue(.cdtData$EnvData$namePoly) <- ''
}
tkconfigure(.cdtData$EnvData$cb.shpAttr, values = AttrTable)
tkconfigure(cb.Polygon, values = name.poly)
})
########################
tkbind(.cdtData$EnvData$cb.shpAttr, "<<ComboboxSelected>>", function(){
shpf <- getShpOpenData(file.dispShp)
if(!is.null(shpf)){
dat <- shpf[[2]]@data
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
adminN <- as.character(dat[, ids])
name.poly <- levels(as.factor(adminN))
if(length(name.poly) < 2) name.poly <- c(name.poly, "")
}else{
name.poly <- ''
}
tclvalue(.cdtData$EnvData$namePoly) <- name.poly[1]
tkconfigure(cb.Polygon, values = name.poly)
})
########################
tkbind(cb.Polygon, "<<ComboboxSelected>>", function(){
.cdtData$EnvData$selectedPolygon <- NULL
if(tclvalue(.cdtData$EnvData$namePoly) != ''){
shpfopen <- getShpOpenData(file.dispShp)
if(!is.null(shpfopen)){
shpf <- shpfopen[[2]]
ids <- as.integer(tclvalue(tcl(.cdtData$EnvData$cb.shpAttr, 'current'))) + 1
spoly <- shpf@data[, ids] == tclvalue(.cdtData$EnvData$namePoly)
.cdtData$EnvData$selectedPolygon <- getBoundaries(shpf[spoly, ])
}
}
tabid <- as.integer(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0)
{
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
}
}
}
})
##############################################
bt.dispMap <- ttkbutton(subfr2, text = lang.dlg[['button']][['0b']])
#######################
.cdtData$EnvData$tab$MapSelect <- NULL
tkconfigure(bt.dispMap, command = function(){
donne <- getStnOpenData(file.stnfl)
shpofile <- getShpOpenData(file.dispShp)
if(!is.null(donne)){
.cdtData$EnvData$donne <- donne[1:3, -1]
lonStn <- as.numeric(.cdtData$EnvData$donne[2, ])
latStn <- as.numeric(.cdtData$EnvData$donne[3, ])
lo1 <- min(lonStn, na.rm = TRUE)
lo2 <- max(lonStn, na.rm = TRUE)
la1 <- min(latStn, na.rm = TRUE)
la2 <- max(latStn, na.rm = TRUE)
plotOK <- TRUE
shpf <- shpofile[[2]]
.cdtData$EnvData$ocrds <- getBoundaries(shpf)
.cdtData$EnvData$shpf <- shpf
}else{
plotOK <- FALSE
Insert.Messages.Out(lang.dlg[['message']][['0a']], TRUE, 'e')
}
########
if(.cdtData$EnvData$type.select == 'poly' & plotOK){
if(!is.null(shpofile)){
shpf <- shpofile[[2]]
.cdtData$EnvData$ocrds <- getBoundaries(shpf)
.cdtData$EnvData$shpf <- shpf
bbxshp <- round(bbox(shpf), 4)
lo1 <- min(lo1, bbxshp[1, 1])
lo2 <- max(lo2, bbxshp[1, 2])
la1 <- min(la1, bbxshp[2, 1])
la2 <- max(la2, bbxshp[2, 2])
plotOK <- TRUE
}else{
plotOK <- FALSE
Insert.Messages.Out(lang.dlg[['message']][['0b']], TRUE, 'e')
}
}
########
if(plotOK){
ZoomXYval0 <<- c(lo1, lo2, la1, la2)
tclvalue(.cdtData$EnvData$zoom$xx1) <- lo1
tclvalue(.cdtData$EnvData$zoom$xx2) <- lo2
tclvalue(.cdtData$EnvData$zoom$yy1) <- la1
tclvalue(.cdtData$EnvData$zoom$yy2) <- la2
.cdtData$EnvData$ZoomXYval <- ZoomXYval0
imgContainer <- displayMap4Validation(.cdtData$EnvData$tab$MapSelect)
.cdtData$EnvData$tab$MapSelect <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$MapSelect)
}
})
##############################################
frameIMgMan <- tkframe(subfr2)
#######################
frameZoom <- ttklabelframe(frameIMgMan, text = "ZOOM", relief = 'groove')
.cdtData$EnvData$zoom$btZoomP <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$plus, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btZoomM <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$moins, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btZoomRect <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$rect, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btPanImg <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$pan, relief = 'raised', bg = 'lightblue', state = 'normal')
.cdtData$EnvData$zoom$btRedraw <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$redraw, relief = 'raised', state = 'disabled')
.cdtData$EnvData$zoom$btReset <- tkbutton(frameZoom, image = .cdtEnv$tcl$zoom$img$reset, relief = 'raised')
#######################
tkgrid(.cdtData$EnvData$zoom$btZoomP, row = 0, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btZoomM, row = 0, column = 1, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btZoomRect, row = 0, column = 2, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btReset, row = 1, column = 0, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btRedraw, row = 1, column = 1, sticky = 'nswe', rowspan = 1, columnspan = 1)
tkgrid(.cdtData$EnvData$zoom$btPanImg, row = 1, column = 2, sticky = 'nswe', rowspan = 1, columnspan = 1)
helpWidget(.cdtData$EnvData$zoom$btZoomP, lang.dlg[['tooltip']][['12']], lang.dlg[['status']][['12']])
helpWidget(.cdtData$EnvData$zoom$btZoomM, lang.dlg[['tooltip']][['13']], lang.dlg[['status']][['13']])
helpWidget(.cdtData$EnvData$zoom$btZoomRect, lang.dlg[['tooltip']][['14']], lang.dlg[['status']][['14']])
helpWidget(.cdtData$EnvData$zoom$btPanImg, lang.dlg[['tooltip']][['15']], lang.dlg[['status']][['15']])
helpWidget(.cdtData$EnvData$zoom$btRedraw, lang.dlg[['tooltip']][['16']], lang.dlg[['status']][['16']])
helpWidget(.cdtData$EnvData$zoom$btReset, lang.dlg[['tooltip']][['17']], lang.dlg[['status']][['17']])
##############################################
frameCoord <- tkframe(frameIMgMan, relief = 'groove', borderwidth = 2)
.cdtData$EnvData$minlonRect <- tclVar()
.cdtData$EnvData$maxlonRect <- tclVar()
.cdtData$EnvData$minlatRect <- tclVar()
.cdtData$EnvData$maxlatRect <- tclVar()
txt.minLab <- tklabel(frameCoord, text = lang.dlg[['label']][['22']])
txt.maxLab <- tklabel(frameCoord, text = lang.dlg[['label']][['23']])
txt.lonLab <- tklabel(frameCoord, text = lang.dlg[['label']][['24']], anchor = 'e', justify = 'right')
txt.latLab <- tklabel(frameCoord, text = lang.dlg[['label']][['25']], anchor = 'e', justify = 'right')
en.minlon <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$minlonRect, justify = "left", state = 'disabled')
en.maxlon <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$maxlonRect, justify = "left", state = 'disabled')
en.minlat <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$minlatRect, justify = "left", state = 'disabled')
en.maxlat <- tkentry(frameCoord, width = 7, textvariable = .cdtData$EnvData$maxlatRect, justify = "left", state = 'disabled')
tkgrid(txt.minLab, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(txt.maxLab, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(txt.lonLab, row = 1, column = 0, sticky = 'e', rowspan = 1, columnspan = 1)
tkgrid(txt.latLab, row = 2, column = 0, sticky = 'e', rowspan = 1, columnspan = 1)
tkgrid(en.minlon, row = 1, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.maxlon, row = 1, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.minlat, row = 2, column = 1, sticky = 'we', rowspan = 1, columnspan = 1)
tkgrid(en.maxlat, row = 2, column = 2, sticky = 'we', rowspan = 1, columnspan = 1)
##############################################
.cdtData$EnvData$bt.select <- tkbutton(frameIMgMan, text = lang.dlg[['button']][['0c']], relief = 'raised', bg = 'lightblue')
##############################################
tkgrid(frameZoom, row = 0, column = 0, sticky = 'news', rowspan = 2, padx = 1, ipady = 5)
tkgrid(frameCoord, row = 0, column = 1, sticky = 'we', rowspan = 1)
tkgrid(.cdtData$EnvData$bt.select, row = 1, column = 1, sticky = 'we', rowspan = 1)
##############################################
bt.extract.station <- ttkbutton(subfr2, text = lang.dlg[['button']][['0d']])
tkconfigure(bt.extract.station, command = function(){
GeneralParameters$clim.var <- clim.var
GeneralParameters$Tstep <- periodVAL[CbperiodVAL %in% str_trim(tclvalue(file.period))]
GeneralParameters$STN.file <- str_trim(tclvalue(file.stnfl))
GeneralParameters$ncdf.file$dir <- str_trim(tclvalue(dirNetCDF))
GeneralParameters$outdir <- str_trim(tclvalue(file.save1))
GeneralParameters$shp.file$shp <- str_trim(tclvalue(file.dispShp))
GeneralParameters$shp.file$attr <- str_trim(tclvalue(shpAttr))
GeneralParameters$type.select <- TypeSelect[SELECTALL %in% str_trim(tclvalue(type.select))]
GeneralParameters$Geom <- NULL
GeneralParameters$Geom$minlon <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$minlonRect)))
GeneralParameters$Geom$maxlon <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$maxlonRect)))
GeneralParameters$Geom$minlat <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$minlatRect)))
GeneralParameters$Geom$maxlat <- as.numeric(str_trim(tclvalue(.cdtData$EnvData$maxlatRect)))
GeneralParameters$Geom$namePoly <- str_trim(tclvalue(.cdtData$EnvData$namePoly))
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
Insert.Messages.Out(lang.dlg[['message']][['0c']], TRUE, "i")
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
ret <- tryCatch(
{
HOV_DataExtraction(GeneralParameters)
},
warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = {
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
}
)
if(!is.null(ret)){
if(ret == 0){
Insert.Messages.Out(lang.dlg[['message']][['0d']], TRUE, "s")
}else Insert.Messages.Out(lang.dlg[['message']][['0e']], TRUE, "e")
}else Insert.Messages.Out(lang.dlg[['message']][['0e']], TRUE, "e")
})
##############################################
tkgrid(frameSelect, row = 0, column = 0, sticky = '')
tkgrid(frameShp, row = 1, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.dispMap, row = 2, column = 0, sticky = 'we', pady = 3)
tkgrid(frameIMgMan, row = 3, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.extract.station, row = 4, column = 0, sticky = 'we', pady = 3)
##############################################
tkconfigure(.cdtData$EnvData$zoom$btReset, command = function(){
.cdtData$EnvData$ZoomXYval <- ZoomXYval0
tclvalue(.cdtData$EnvData$zoom$xx1) <- ZoomXYval0[1]
tclvalue(.cdtData$EnvData$zoom$xx2) <- ZoomXYval0[2]
tclvalue(.cdtData$EnvData$zoom$yy1) <- ZoomXYval0[3]
tclvalue(.cdtData$EnvData$zoom$yy2) <- ZoomXYval0[4]
tabid <- as.numeric(tclvalue(tkindex(.cdtEnv$tcl$main$tknotes, 'current'))) + 1
if(length(.cdtData$OpenTab$Type) > 0){
if(.cdtData$OpenTab$Type[[tabid]] == "img" & !is.null(.cdtData$EnvData$tab$MapSelect))
{
if(.cdtData$OpenTab$Data[[tabid]][[1]][[1]]$ID == .cdtData$EnvData$tab$MapSelect[[2]])
{
refreshPlot(W = .cdtData$OpenTab$Data[[tabid]][[2]][[1]],
img = .cdtData$OpenTab$Data[[tabid]][[2]][[2]],
hscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinH))),
vscale = as.numeric(tclvalue(tkget(.cdtEnv$tcl$toolbar$spinV))))
}
}
}
})
##########################
tkbind(.cdtData$EnvData$zoom$btReset, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomP, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomM, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btZoomRect, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 1
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$zoom$btPanImg, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 1
tclvalue(.cdtData$EnvData$pressGetCoords) <- 0
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'red', state = 'disabled')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'lightblue', state = 'normal')
})
tkbind(.cdtData$EnvData$bt.select, "<Button-1>", function(){
tclvalue(.cdtData$EnvData$zoom$pressButP) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButM) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButRect) <- 0
tclvalue(.cdtData$EnvData$zoom$pressButDrag) <- 0
tclvalue(.cdtData$EnvData$pressGetCoords) <- 1
tkconfigure(.cdtData$EnvData$zoom$btZoomP, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomM, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btZoomRect, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$zoom$btPanImg, relief = 'raised', bg = 'lightblue', state = 'normal')
tkconfigure(.cdtData$EnvData$bt.select, relief = 'raised', bg = 'red', state = 'disabled')
})
#######################################################################################################
#Tab3
subfr3 <- bwTabScrollableFrame(cmd.tab3)
##############################################
frameHOV <- ttklabelframe(subfr3, text = lang.dlg[['label']][['6']], relief = 'groove')
validExist <- tclVar(0)
file.hovd <- tclVar()
stateHOVd <- if(tclvalue(validExist) == "1") "normal" else "disabled"
chk.hovd <- tkcheckbutton(frameHOV, variable = validExist, text = lang.dlg[['checkbutton']][['1']], anchor = 'w', justify = 'left')
en.hovd <- tkentry(frameHOV, textvariable = file.hovd, width = largeur1 + 5, state = stateHOVd)
bt.hovd <- ttkbutton(frameHOV, text = .cdtEnv$tcl$lang$global[['button']][['6']], state = stateHOVd)
tkgrid(chk.hovd, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 4, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.hovd, row = 0, column = 4, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(en.hovd, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
###############
tkconfigure(bt.hovd, command = function(){
path.hovd <- tclvalue(tkgetOpenFile(initialdir = getwd(), filetypes = .cdtEnv$tcl$data$filetypes6))
if(path.hovd == "") return(NULL)
tclvalue(file.hovd) <- path.hovd
if(file.exists(str_trim(tclvalue(file.hovd)))){
hovd.data <- try(readRDS(str_trim(tclvalue(file.hovd))), silent = TRUE)
if(inherits(hovd.data, "try-error")){
Insert.Messages.Out(lang.dlg[['message']][['4']], TRUE, 'e')
Insert.Messages.Out(gsub('[\r\n]', '', hovd.data[1]), TRUE, 'e')
return(NULL)
}
.cdtData$EnvData$file.hovd <- str_trim(tclvalue(file.hovd))
.cdtData$EnvData$GeneralParameters <- hovd.data$GeneralParameters
.cdtData$EnvData$cdtData <- hovd.data$cdtData
.cdtData$EnvData$stnData <- hovd.data$stnData
.cdtData$EnvData$ncdfData <- hovd.data$ncdfData
if(!is.null(hovd.data$opDATA)){
.cdtData$EnvData$opDATA <- hovd.data$opDATA
.cdtData$EnvData$Statistics <- hovd.data$Statistics
}
###
tclvalue(file.period) <- CbperiodVAL[periodVAL %in% hovd.data$GeneralParameters$Tstep]
if(!is.null(.cdtData$EnvData$opDATA$id)){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
stateDispSTN <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stat.sel, values = .cdtData$EnvData$opDATA$id, state = stateDispSTN)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(bt.stat.prev, state = stateDispSTN)
tkconfigure(bt.stat.next, state = stateDispSTN)
stateMaps <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stats.maps, state = stateMaps)
tkconfigure(bt.stats.maps, state = stateMaps)
tkconfigure(cb.plot.type, state = stateMaps)
tkconfigure(bt.stats.Opt, state = stateMaps)
stateStnID <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stn.graph, values = .cdtData$EnvData$opDATA$id, state = stateStnID)
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(bt.stn.graph.prev, state = stateStnID)
tkconfigure(bt.stn.graph.next, state = stateStnID)
itype <- if(statsdata == 'all') 1:2 else 1:3
CbTypeGRAPH <- typeGraphCombo[itype]
if(statsdata == 'all'){
if(str_trim(tclvalue(type.graph)) == typeGraphCombo[3])
tclvalue(type.graph) <- typeGraphCombo[1]
}
tkconfigure(cb.stats.graph, values = CbTypeGRAPH)
}
}
})
###############
tkbind(chk.hovd, "<Button-1>", function(){
stateHOVd <- if(tclvalue(validExist) == '1') 'disabled' else 'normal'
tkconfigure(en.hovd, state = stateHOVd)
tkconfigure(bt.hovd, state = stateHOVd)
stateBTEx <- if(tclvalue(validExist) == '1') 'normal' else 'disabled'
tcl(tknote.cmd, 'itemconfigure', cmd.tab1$IDtab, state = stateBTEx)
tcl(tknote.cmd, 'itemconfigure', cmd.tab2$IDtab, state = stateBTEx)
})
##############################################
frameSeason <- ttklabelframe(subfr3, text = lang.dlg[['label']][['7']], relief = 'groove')
##############
fr.year <- ttklabelframe(frameSeason, text = lang.dlg[['label']][['9']], relief = 'sunken', labelanchor = "n", borderwidth = 2)
start.year <- tclVar(GeneralParameters$date.range$start.year)
end.year <- tclVar(GeneralParameters$date.range$end.year)
txt.to1 <- tklabel(fr.year, text = paste0('-', lang.dlg[['label']][['10']], '-'))
en.years1 <- tkentry(fr.year, width = 5, textvariable = start.year, justify = 'right')
en.years2 <- tkentry(fr.year, width = 5, textvariable = end.year, justify = 'right')
tkgrid(en.years1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(txt.to1, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(en.years2, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
helpWidget(en.years1, lang.dlg[['tooltip']][['7']], lang.dlg[['status']][['7']])
helpWidget(en.years2, lang.dlg[['tooltip']][['8']], lang.dlg[['status']][['8']])
##############
fr.seas <- ttklabelframe(frameSeason, text = lang.dlg[['label']][['8']], relief = 'sunken', labelanchor = "n", borderwidth = 2)
mon1 <- as.numeric(str_trim(GeneralParameters$date.range$start.month))
mon2 <- as.numeric(str_trim(GeneralParameters$date.range$end.month))
start.mois <- tclVar(MOIS[mon1])
end.mois <- tclVar(MOIS[mon2])
txt.to2 <- tklabel(fr.seas, text = paste0('-', lang.dlg[['label']][['10']], '-'))
cb.month1 <- ttkcombobox(fr.seas, values = MOIS, textvariable = start.mois, width = 5)
cb.month2 <- ttkcombobox(fr.seas, values = MOIS, textvariable = end.mois, width = 5)
tkgrid(cb.month1, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(txt.to2, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(cb.month2, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
helpWidget(cb.month1, lang.dlg[['tooltip']][['9']], lang.dlg[['status']][['9']])
helpWidget(cb.month2, lang.dlg[['tooltip']][['10']], lang.dlg[['status']][['10']])
##############
sepSeason <- tklabel(frameSeason, text = "", width = largeur5)
tkgrid(fr.seas, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(sepSeason, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
tkgrid(fr.year, row = 0, column = 2, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
##############################################
frameAggr <- ttklabelframe(subfr3, text = lang.dlg[['label']][['11']], relief = 'groove')
aggr.data <- tclVar(GeneralParameters$aggr.series$aggr.data)
stateAggr <- if(GeneralParameters$aggr.series$aggr.data) "normal" else "disabled"
chk.aggrdata <- tkcheckbutton(frameAggr, variable = aggr.data, text = lang.dlg[['checkbutton']][['2']], anchor = 'w', justify = 'left', width = largeur6)
bt.aggrPars <- ttkbutton(frameAggr, text = lang.dlg[['button']][['1']], state = stateAggr)
tkgrid(chk.aggrdata, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.aggrPars, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
########
tkconfigure(bt.aggrPars, command = function(){
GeneralParameters[['aggr.series']] <<- getInfo_AggregateFun(.cdtEnv$tcl$main$win,
GeneralParameters[['aggr.series']])
})
tkbind(chk.aggrdata, "<Button-1>", function(){
stateAggr <- if(tclvalue(aggr.data) == '1') 'disabled' else 'normal'
tkconfigure(bt.aggrPars, state = stateAggr)
})
##############################################
frameStatData <- tkframe(subfr3, relief = 'groove', borderwidth = 2)
STATDATATYPE <- lang.dlg[['combobox']][['1']]
StatDataT <- c('all', 'avg', 'stn')
stat.data <- tclVar()
tclvalue(stat.data) <- STATDATATYPE[StatDataT %in% GeneralParameters$stat.data]
txt.stat.data <- tklabel(frameStatData, text = lang.dlg[['label']][['12']], anchor = 'e', justify = 'right')
cb.stat.data <- ttkcombobox(frameStatData, values = STATDATATYPE, textvariable = stat.data, justify = 'center', width = largeur4)
tkgrid(txt.stat.data, row = 0, column = 0, sticky = 'e', padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stat.data, row = 0, column = 1, sticky = 'we', padx = 1, pady = 1, ipadx = 1, ipady = 1)
helpWidget(cb.stat.data, lang.dlg[['tooltip']][['11']], lang.dlg[['status']][['11']])
#################
tkbind(cb.stat.data, "<<ComboboxSelected>>", function(){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
stateDispSTN <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(bt.stat.prev, state = stateDispSTN)
tkconfigure(cb.stat.sel, state = stateDispSTN)
tkconfigure(bt.stat.next, state = stateDispSTN)
stateMaps <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stats.maps, state = stateMaps)
tkconfigure(bt.stats.maps, state = stateMaps)
tkconfigure(cb.plot.type, state = stateMaps)
tkconfigure(bt.stats.Opt, state = stateMaps)
stateStnID <- if(statsdata == 'stn') 'normal' else 'disabled'
tkconfigure(cb.stn.graph, state = stateStnID)
tkconfigure(bt.stn.graph.prev, state = stateStnID)
tkconfigure(bt.stn.graph.next, state = stateStnID)
itype <- if(statsdata == 'all') 1:2 else 1:3
CbTypeGRAPH <- typeGraphCombo[itype]
if(statsdata == 'all'){
if(str_trim(tclvalue(type.graph)) == typeGraphCombo[3])
tclvalue(type.graph) <- typeGraphCombo[1]
}
tkconfigure(cb.stats.graph, values = CbTypeGRAPH)
})
##############################################
bt.categStats <- ttkbutton(subfr3, text = lang.dlg[['button']][['2']])
tkconfigure(bt.categStats, command = function(){
GeneralParameters[['dicho.fcst']] <<- getInfo_categoricalValid(.cdtEnv$tcl$main$win,
GeneralParameters[['dicho.fcst']])
})
##############################################
bt.volumeStats <- ttkbutton(subfr3, text = lang.dlg[['button']][['3']])
tkconfigure(bt.volumeStats, command = function(){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
GeneralParameters[['volume.stat']] <<- getInfo_volumetricValid(.cdtEnv$tcl$main$win, statsdata,
GeneralParameters[['volume.stat']])
})
##############################################
bt.calc.stat <- ttkbutton(subfr3, text = lang.dlg[['button']][['4']])
tkconfigure(bt.calc.stat, command = function(){
GeneralParameters$date.range$start.month <- which(MOIS %in% str_trim(tclvalue(start.mois)))
GeneralParameters$date.range$end.month <- which(MOIS %in% str_trim(tclvalue(end.mois)))
GeneralParameters$date.range$start.year <- as.numeric(str_trim(tclvalue(start.year)))
GeneralParameters$date.range$end.year <- as.numeric(str_trim(tclvalue(end.year)))
GeneralParameters$aggr.series$aggr.data <- switch(tclvalue(aggr.data), '0' = FALSE, '1' = TRUE)
GeneralParameters$stat.data <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
#####
# GeneralParameters$STN.file <- basename(str_trim(tclvalue(dirNetCDF)))
GeneralParameters$STN.file <- str_trim(tclvalue(file.stnfl))
GeneralParameters$outdir <- str_trim(tclvalue(file.save1))
GeneralParameters$validExist <- switch(tclvalue(validExist), '0' = FALSE, '1' = TRUE)
# assign('GeneralParameters', GeneralParameters, envir = .GlobalEnv)
Insert.Messages.Out(lang.dlg[['message']][['1']], TRUE, "i")
tkconfigure(.cdtEnv$tcl$main$win, cursor = 'watch')
tcl('update')
ret <- tryCatch(
{
ValidationDataProcs(GeneralParameters)
},
warning = function(w) warningFun(w),
error = function(e) errorFun(e),
finally = {
tkconfigure(.cdtEnv$tcl$main$win, cursor = '')
tcl('update')
}
)
if(!is.null(ret)){
if(ret == 0){
Insert.Messages.Out(lang.dlg[['message']][['2']], TRUE, "s")
if(GeneralParameters$stat.data == 'stn'){
tkconfigure(cb.stat.sel, values = .cdtData$EnvData$opDATA$id)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[1]
tkconfigure(cb.stn.graph, values = .cdtData$EnvData$opDATA$id, state = 'normal')
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[1]
}
}else Insert.Messages.Out(lang.dlg[['message']][['3']], TRUE, 'e')
}else Insert.Messages.Out(lang.dlg[['message']][['3']], TRUE, 'e')
})
##############################################
tkgrid(frameHOV, row = 0, column = 0, sticky = 'we')
tkgrid(frameSeason, row = 1, column = 0, sticky = 'we', pady = 1)
tkgrid(frameAggr, row = 2, column = 0, sticky = 'we', pady = 1)
tkgrid(frameStatData, row = 3, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.categStats, row = 4, column = 0, sticky = 'we', pady = 3)
if(clim.var == 'RR')
tkgrid(bt.volumeStats, row = 5, column = 0, sticky = 'we', pady = 3)
tkgrid(bt.calc.stat, row = 6, column = 0, sticky = 'we', pady = 3)
#######################################################################################################
#Tab4
subfr4 <- bwTabScrollableFrame(cmd.tab4)
##############################################
frameStatTab <- ttklabelframe(subfr4, text = lang.dlg[['label']][['13']], relief = 'groove')
STATIONIDS <- ''
stn.stat.tab <- tclVar()
stateDispSTN <- if(GeneralParameters$stat.data == 'stn') 'normal' else 'disabled'
bt.stat.prev <- ttkbutton(frameStatTab, text = "<<", state = stateDispSTN, width = largeur7)
bt.stat.next <- ttkbutton(frameStatTab, text = ">>", state = stateDispSTN, width = largeur7)
cb.stat.sel <- ttkcombobox(frameStatTab, values = STATIONIDS, textvariable = stn.stat.tab, width = largeur3, state = stateDispSTN, justify = 'center')
bt.stat.disp <- ttkbutton(frameStatTab, text = lang.dlg[['button']][['5']])
tkgrid(bt.stat.prev, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.stat.sel, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 3, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stat.next, row = 0, column = 4, sticky = 'w', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stat.disp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 5, padx = 1, pady = 1, ipadx = 1, ipady = 1)
################
.cdtData$EnvData$tab$validStat <- NULL
tkconfigure(bt.stat.disp, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
statsdata <- StatDataT[STATDATATYPE %in% str_trim(tclvalue(stat.data))]
if(statsdata == 'all'){
don <- .cdtData$EnvData$Statistics$ALL
dat2disp <- data.frame(don$statNames, don$statistics, don$description, don$perfect.score)
titleTab <- 'All-Data Statistics'
}
if(statsdata == 'avg'){
don <- .cdtData$EnvData$Statistics$AVG
dat2disp <- data.frame(don$statNames, don$statistics, don$description, don$perfect.score)
titleTab <- 'Spatial-Average Statistics'
}
if(statsdata == 'stn'){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
}
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
tkconfigure(bt.stat.prev, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
istn <- istn - 1
if(istn < 1) istn <- length(.cdtData$EnvData$opDATA$id)
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[istn]
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
tkconfigure(bt.stat.next, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
don <- .cdtData$EnvData$Statistics$STN
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(stn.stat.tab)))
istn <- istn + 1
if(istn > length(.cdtData$EnvData$opDATA$id)) istn <- 1
tclvalue(stn.stat.tab) <- .cdtData$EnvData$opDATA$id[istn]
dat2disp <- data.frame(don$statNames, don$statistics[, istn], don$description, don$perfect.score)
names(dat2disp) <- c('Name', 'Statistics', 'Description', 'Perfect.Score')
rownames(dat2disp) <- NULL
titleTab <- paste(tclvalue(stn.stat.tab), 'Statistics')
.cdtData$EnvData$tab$validStat <- tableNotebookTab_unik(dat2disp, .cdtData$EnvData$tab$validStat, titleTab, 12)
}
})
##############################################
frameMap <- ttklabelframe(subfr4, text = lang.dlg[['label']][['14']], relief = 'groove')
statsCON <- c('CORR', 'BR2', 'BIAS', 'PBIAS', 'ME', 'MAE', 'RMSE', 'NSE', 'MNSE', 'RNSE', 'IOA', 'MIOA', 'RIOA')
statsCAT <- c('POD', 'POFD', 'FAR', 'FBS', 'CSI', 'HSS')
statsVOL <- c('MQB', 'MQE', 'VHI', 'QPOD', 'VFAR', 'QFAR', 'VMI', 'QMISS', 'VCSI', 'QCSI')
ValStatNAMES0 <- c(statsCON, statsCAT, statsVOL)
CbStatNAMES0 <- lang.dlg[['combobox']][['2']]
ivarL <- switch(clim.var, "RR" = 1:29, "TT" = 1:19)
statsVAR <- tclVar()
CbStatNAMES <- CbStatNAMES0[ivarL]
ValStatNAMES <- ValStatNAMES0[ivarL]
tclvalue(statsVAR) <- CbStatNAMES[ValStatNAMES %in% GeneralParameters$statsVar]
stateMaps <- if(GeneralParameters$stat.data == 'stn') 'normal' else 'disabled'
cb.stats.maps <- ttkcombobox(frameMap, values = CbStatNAMES, textvariable = statsVAR, width = largeur2, state = stateMaps)
##########
frMapBt <- tkframe(frameMap)
bt.stats.maps <- ttkbutton(frMapBt, text = .cdtEnv$tcl$lang$global[['button']][['3']], state = stateMaps, width = largeur9)
bt.stats.Opt <- ttkbutton(frMapBt, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateMaps, width = largeur9)
tkgrid(bt.stats.Opt, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stats.maps, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 2, pady = 1, ipadx = 1, ipady = 1)
##########
frPlotT <- tkframe(frameMap)
typeMapPLOT <- c("Points", "Pixels")
.cdtData$EnvData$typeMap <- tclVar("Points")
txt.plot.type <- tklabel(frPlotT, text = lang.dlg[['label']][['15']], anchor = "e", justify = "right")
cb.plot.type <- ttkcombobox(frPlotT, values = typeMapPLOT, textvariable = .cdtData$EnvData$typeMap, width = largeur8, state = stateMaps)
tkgrid(txt.plot.type, row = 0, column = 0, sticky = 'e', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(cb.plot.type, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1, ipadx = 1, ipady = 1)
##########
tkgrid(cb.stats.maps, row = 0, column = 0, sticky = 'we')
tkgrid(frMapBt, row = 1, column = 0, sticky = '')
tkgrid(frPlotT, row = 2, column = 0, sticky = '')
##############
tkconfigure(bt.stats.Opt, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
mapstat <- ValStatNAMES[CbStatNAMES %in% str_trim(tclvalue(statsVAR))]
istat <- which(.cdtData$EnvData$Statistics$STN$statNames == mapstat)
don <- .cdtData$EnvData$Statistics$STN$statistics[istat, ]
atlevel <- pretty(don, n = 10, min.n = 7)
if(is.null(.cdtData$EnvData$statMapOp$userLvl$levels)){
.cdtData$EnvData$statMapOp$userLvl$levels <- atlevel
}else{
if(!.cdtData$EnvData$statMapOp$userLvl$custom)
.cdtData$EnvData$statMapOp$userLvl$levels <- atlevel
}
}
.cdtData$EnvData$statMapOp <- MapGraph.MapOptions(.cdtData$EnvData$statMapOp)
if(str_trim(tclvalue(.cdtData$EnvData$typeMap)) == "Points")
pointSizeI <<- .cdtData$EnvData$statMapOp$pointSize
})
################
.cdtData$EnvData$tab$Maps <- NULL
tkconfigure(bt.stats.maps, command = function(){
if(!is.null(.cdtData$EnvData$Statistics)){
.cdtData$EnvData$statVAR <- ValStatNAMES[CbStatNAMES %in% str_trim(tclvalue(statsVAR))]
.cdtData$EnvData$plot.maps$data.type <- "cdtstation"
.cdtData$EnvData$plot.maps$lon <- .cdtData$EnvData$opDATA$lon
.cdtData$EnvData$plot.maps$lat <- .cdtData$EnvData$opDATA$lat
.cdtData$EnvData$plot.maps$id <- .cdtData$EnvData$opDATA$id
Validation.DisplayStatMaps()
}
})
##############################################
frameGraph <- ttklabelframe(subfr4, text = lang.dlg[['label']][['16']], relief = 'groove')
############
frameGrP <- tkframe(frameGraph)
typeGraphCombo <- lang.dlg[['combobox']][['3']]
valGraphCombo <- c("Scatter", "CDF", "Lines")
itype <- if(GeneralParameters$stat.data == 'all') 1:2 else 1:3
type.graph <- tclVar()
CbTypeGRAPH <- typeGraphCombo[itype]
ValTypeGRAPH <- valGraphCombo[itype]
tclvalue(type.graph) <- CbTypeGRAPH[ValTypeGRAPH %in% GeneralParameters$type.graph]
cb.stats.graph <- ttkcombobox(frameGrP, values = CbTypeGRAPH, textvariable = type.graph, width = largeur2)
bt.stats.graph <- ttkbutton(frameGrP, text = .cdtEnv$tcl$lang$global[['button']][['3']], width = largeur9)
bt.Opt.graph <- ttkbutton(frameGrP, text = .cdtEnv$tcl$lang$global[['button']][['4']], width = largeur9)
tkgrid(cb.stats.graph, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.Opt.graph, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 3, padx = 2, pady = 1, ipadx = 1, ipady = 1)
tkgrid(bt.stats.graph, row = 1, column = 3, sticky = 'we', rowspan = 1, columnspan = 3, padx = 2, pady = 1, ipadx = 1, ipady = 1)
############
frameGrS <- tkframe(frameGraph)
STNIDGRAPH <- ""
.cdtData$EnvData$stnIDGraph <- tclVar()
stateStnID <- "disabled"
cb.stn.graph <- ttkcombobox(frameGrS, values = STNIDGRAPH, textvariable = .cdtData$EnvData$stnIDGraph, width = largeur3, state = stateStnID, justify = 'center')
bt.stn.graph.prev <- ttkbutton(frameGrS, text = "<<", state = stateStnID, width = largeur7)
bt.stn.graph.next <- ttkbutton(frameGrS, text = ">>", state = stateStnID, width = largeur7)
tkgrid(bt.stn.graph.prev, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(cb.stn.graph, row = 0, column = 1, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 2, ipadx = 1, ipady = 1)
tkgrid(bt.stn.graph.next, row = 0, column = 3, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 2, ipadx = 1, ipady = 1)
##############
tkgrid(frameGrP, row = 0, column = 0, sticky = 'we')
tkgrid(frameGrS, row = 1, column = 0, sticky = 'we')
##############
.cdtData$EnvData$tab$Graph <- NULL
tkconfigure(bt.stats.graph, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
tkconfigure(bt.stn.graph.prev, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(.cdtData$EnvData$stnIDGraph)))
istn <- istn - 1
if(istn < 1) istn <- length(.cdtData$EnvData$opDATA$id)
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[istn]
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
tkconfigure(bt.stn.graph.next, command = function(){
.cdtData$EnvData$type.graph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
if(!is.null(.cdtData$EnvData$opDATA$stnStatData)){
istn <- which(.cdtData$EnvData$opDATA$id == str_trim(tclvalue(.cdtData$EnvData$stnIDGraph)))
istn <- istn + 1
if(istn > length(.cdtData$EnvData$opDATA$id)) istn <- 1
tclvalue(.cdtData$EnvData$stnIDGraph) <- .cdtData$EnvData$opDATA$id[istn]
imgContainer <- CDT.Display.Graph(Validation.plotGraph, .cdtData$EnvData$tab$Graph, 'Validation-Plot')
.cdtData$EnvData$tab$Graph <- imageNotebookTab_unik(imgContainer, .cdtData$EnvData$tab$Graph)
}
})
##############
tkconfigure(bt.Opt.graph, command = function(){
typeGraph <- valGraphCombo[typeGraphCombo %in% str_trim(tclvalue(type.graph))]
plot.fun <- get(paste0("Validation.GraphOptions.", typeGraph), mode = "function")
.cdtData$EnvData$GraphOp <- plot.fun(.cdtData$EnvData$GraphOp)
})
#############################
tkgrid(frameStatTab, row = 0, column = 0, sticky = 'we')
tkgrid(frameMap, row = 1, column = 0, sticky = 'we', pady = 3)
tkgrid(frameGraph, row = 2, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
#Tab5
subfr5 <- bwTabScrollableFrame(cmd.tab5)
##############################################
frameSHP <- ttklabelframe(subfr5, text = lang.dlg[['label']][['17']], relief = 'groove')
.cdtData$EnvData$shp$add.shp <- tclVar(GeneralParameters$add.to.plot$add.shp)
file.plotShp <- tclVar(GeneralParameters$add.to.plot$shp.file)
stateSHP <- if(GeneralParameters$add.to.plot$add.shp) "normal" else "disabled"
chk.addshp <- tkcheckbutton(frameSHP, variable = .cdtData$EnvData$shp$add.shp, text = lang.dlg[['checkbutton']][['3']], anchor = 'w', justify = 'left')
bt.addshpOpt <- ttkbutton(frameSHP, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateSHP)
cb.addshp <- ttkcombobox(frameSHP, values = unlist(listOpenFiles), textvariable = file.plotShp, width = largeur0, state = stateSHP)
bt.addshp <- tkbutton(frameSHP, text = "...", state = stateSHP)
tkgrid(chk.addshp, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.addshpOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.addshp, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.addshp, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#################
tkconfigure(bt.addshp, command = function(){
shp.opfiles <- getOpenShp(.cdtEnv$tcl$main$win)
if(!is.null(shp.opfiles)){
update.OpenFiles('shp', shp.opfiles)
tclvalue(file.plotShp) <- shp.opfiles[[1]]
listOpenFiles[[length(listOpenFiles) + 1]] <<- shp.opfiles[[1]]
listOpenFiles <- openFile_ttkcomboList()
lapply(list(cb.stnfl, cb.valid, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile))
.cdtData$EnvData$shp$ocrds <- NULL
else
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpofile[[2]])
}
})
tkconfigure(bt.addshpOpt, command = function(){
.cdtData$EnvData$SHPOp <- MapGraph.GraphOptions.LineSHP(.cdtData$EnvData$SHPOp)
})
#################
tkbind(cb.addshp, "<<ComboboxSelected>>", function(){
shpofile <- getShpOpenData(file.plotShp)
if(is.null(shpofile))
.cdtData$EnvData$shp$ocrds <- NULL
else
.cdtData$EnvData$shp$ocrds <- getBoundaries(shpofile[[2]])
})
tkbind(chk.addshp, "<Button-1>", function(){
stateSHP <- if(tclvalue(.cdtData$EnvData$shp$add.shp) == "1") "disabled" else "normal"
tkconfigure(cb.addshp, state = stateSHP)
tkconfigure(bt.addshp, state = stateSHP)
tkconfigure(bt.addshpOpt, state = stateSHP)
})
##############################################
frameDEM <- ttklabelframe(subfr5, text = lang.dlg[['label']][['18']], relief = 'groove')
.cdtData$EnvData$dem$add.dem <- tclVar(GeneralParameters$add.to.plot$add.dem)
file.grddem <- tclVar(GeneralParameters$add.to.plot$dem.file)
stateDEM <- if(GeneralParameters$add.to.plot$add.dem) "normal" else "disabled"
chk.adddem <- tkcheckbutton(frameDEM, variable = .cdtData$EnvData$dem$add.dem, text = lang.dlg[['checkbutton']][['4']], anchor = 'w', justify = 'left')
bt.adddemOpt <- ttkbutton(frameDEM, text = .cdtEnv$tcl$lang$global[['button']][['4']], state = stateDEM)
cb.adddem <- ttkcombobox(frameDEM, values = unlist(listOpenFiles), textvariable = file.grddem, width = largeur0, state = stateDEM)
bt.adddem <- tkbutton(frameDEM, text = "...", state = stateDEM)
tkgrid(chk.adddem, row = 0, column = 0, sticky = 'we', rowspan = 1, columnspan = 6, padx = 1, pady = 1)
tkgrid(bt.adddemOpt, row = 0, column = 6, sticky = 'we', rowspan = 1, columnspan = 2, padx = 1, pady = 1)
tkgrid(cb.adddem, row = 1, column = 0, sticky = 'we', rowspan = 1, columnspan = 7, padx = 1, pady = 1)
tkgrid(bt.adddem, row = 1, column = 7, sticky = 'we', rowspan = 1, columnspan = 1, padx = 1, pady = 1)
#################
tkconfigure(bt.adddem, command = function(){
nc.opfiles <- getOpenNetcdf(.cdtEnv$tcl$main$win, initialdir = getwd())
if(!is.null(nc.opfiles)){
update.OpenFiles('netcdf', nc.opfiles)
listOpenFiles[[length(listOpenFiles) + 1]] <<- nc.opfiles[[1]]
tclvalue(file.grddem) <- nc.opfiles[[1]]
listOpenFiles <- openFile_ttkcomboList()
lapply(list(cb.stnfl, cb.valid, cb.adddem, cb.addshp), tkconfigure, values = unlist(listOpenFiles))
demData <- getNCDFSampleData(str_trim(tclvalue(file.grddem)))
if(!is.null(demData)){
jfile <- getIndex.AllOpenFiles(str_trim(tclvalue(file.grddem)))
demData <- .cdtData$OpenFiles$Data[[jfile]][[2]]
.cdtData$EnvData$dem$elv <- demData[c('x', 'y', 'z')]
demr <- raster::raster(demData[c('x', 'y', 'z')])
slope <- raster::terrain(demr, opt = 'slope')
aspect <- raster::terrain(demr, opt = 'aspect')
hill <- raster::hillShade(slope, aspect, angle = 40, direction = 270)
hill <- matrix(hill@data@values, hill@ncols, hill@nrows)
hill <- hill[, rev(seq(ncol(hill)))]
.cdtData$EnvData$dem$hill <- list(x = demData$x, y = demData$y, z = hill)
rm(demData, demr, slope, aspect, hill)
}else{
Insert.Messages.Out(lang.dlg[['message']][['5']], TRUE, "e")
tclvalue(file.grddem) <- ""
.cdtData$EnvData$dem <- NULL
}
}
})
tkconfigure(bt.adddemOpt, command = function(){
if(!is.null(.cdtData$EnvData$dem$elv)){
atlevel <- pretty(.cdtData$EnvData$dem$elv$z, n = 10, min.n = 7)
if(is.null(.cdtData$EnvData$dem$Opt$user.levels$levels)){
.cdtData$EnvData$dem$Opt$user.levels$levels <- atlevel
}else{
if(!.cdtData$EnvData$dem$Opt$user.levels$custom)
.cdtData$EnvData$dem$Opt$user.levels$levels <- atlevel
}
}
.cdtData$EnvData$dem$Opt <- MapGraph.gridDataLayer(.cdtData$EnvData$dem$Opt)
})
#################
tkbind(cb.adddem, "<<ComboboxSelected>>", function(){
demData <- getNCDFSampleData(str_trim(tclvalue(file.grddem)))
if(!is.null(demData)){
jfile <- getIndex.AllOpenFiles(str_trim(tclvalue(file.grddem)))
demData <- .cdtData$OpenFiles$Data[[jfile]][[2]]
.cdtData$EnvData$dem$elv <- demData[c('x', 'y', 'z')]
demr <- raster::raster(demData[c('x', 'y', 'z')])
slope <- raster::terrain(demr, opt = 'slope')
aspect <- raster::terrain(demr, opt = 'aspect')
hill <- raster::hillShade(slope, aspect, angle = 40, direction = 270)
hill <- matrix(hill@data@values, hill@ncols, hill@nrows)
hill <- hill[, rev(seq(ncol(hill)))]
.cdtData$EnvData$dem$hill <- list(x = demData$x, y = demData$y, z = hill)
rm(demData, demr, slope, aspect, hill)
}else{
Insert.Messages.Out(lang.dlg[['message']][['5']], TRUE, "e")
tclvalue(file.grddem) <- ""
.cdtData$EnvData$dem <- NULL
}
})
tkbind(chk.adddem, "<Button-1>", function(){
stateDEM <- if(tclvalue(.cdtData$EnvData$dem$add.dem) == "1") "disabled" else "normal"
tkconfigure(cb.adddem, state = stateDEM)
tkconfigure(bt.adddem, state = stateDEM)
tkconfigure(bt.adddemOpt, state = stateDEM)
})
#############################
tkgrid(frameSHP, row = 0, column = 0, sticky = 'we', pady = 1)
tkgrid(frameDEM, row = 1, column = 0, sticky = 'we', pady = 1)
#######################################################################################################
tkgrid(tknote.cmd, sticky = 'nwes')
tkgrid.columnconfigure(tknote.cmd, 0, weight = 1)
tkgrid.rowconfigure(tknote.cmd, 0, weight = 1)
tcl('update')
tkgrid(.cdtEnv$tcl$main$cmd.frame, sticky = 'nwes', pady = 1)
tkgrid.columnconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
tkgrid.rowconfigure(.cdtEnv$tcl$main$cmd.frame, 0, weight = 1)
invisible()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotmethods.R
\docType{methods}
\name{plot}
\alias{plot}
\title{generic plot method for mortfit package}
\usage{
plot(x, y, ...)
}
\arguments{
\item{x}{req'd as part of the generic defn of plot}
\item{y}{req'd as part of the generic defn of plot}
\item{...}{used in some contexts}
}
\description{
generic plot method for mortfit package
}
\seealso{
\code{\link{plot.mortalityDataWithFit},
\link{plot.mortalityData},
\link{plot.mortalityDataWithHazard},
\link{plot.mortalityHazard},
\link{plot.mortalityDataWithFit}}
}
| /man/plot.Rd | no_license | dfeehan/mortfit | R | false | true | 659 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotmethods.R
\docType{methods}
\name{plot}
\alias{plot}
\title{generic plot method for mortfit package}
\usage{
plot(x, y, ...)
}
\arguments{
\item{x}{req'd as part of the generic defn of plot}
\item{y}{req'd as part of the generic defn of plot}
\item{...}{used in some contexts}
}
\description{
generic plot method for mortfit package
}
\seealso{
\code{\link{plot.mortalityDataWithFit},
\link{plot.mortalityData},
\link{plot.mortalityDataWithHazard},
\link{plot.mortalityHazard},
\link{plot.mortalityDataWithFit}}
}
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Risk of Associated Disease According to BMI and Waist Size"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Body Mass Index (BMI) and Waist size are common measure for health, this application helps you find it out"),
br(),
numericInput("int_height_cm",label = "What is your height (CM):",min = 50, max = 250,0), #,value = 170
numericInput("ing_weight_kg",label = "How much do you weight (KG):",min = 40, max = 300,0), #, value = 70
sliderInput("waist_size", "What is your waist size (IN):", min=20, max=100, value=30),
radioButtons("gender", "Your gender:",
c("Male" = "Male",
"Female" = "Female")),
br(),
actionButton("FindBMI", label = "Calculate")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("BMI Result",
p(h4("Here are your current measures:")),
textOutput("current_height"),
textOutput("current_weight"),
textOutput("waist_size"),
textOutput("gender"),
br(),
p(h4("Your calculated BMI is:")),
textOutput("BMI_result"),
br(),
p(h4("Your BMI category is:")),
textOutput("status_indicator"),
br(),
p(h4("Risk of associated disease is:")),
textOutput("status_risk")
),
tabPanel(
"Documentation",
p(h3("The Application")),
helpText("This simple application calculates a person BMI based on current weight and height."),
p(h3("What is Body Mass Index (BMI)?")),
helpText("BMI can be said as a screening tool which serves the purpose of identifying weight problems that possibly happen to adults, people over 18 years old."),
helpText("It only helps to calculate the possibility of weight problems, nothing more nothing less."),
helpText("The index will show that you are underweight, normal weight, overweight, or obesity."),
p(h3("What is BMI Formula?")),
helpText("BMI is calculated by dividing weight by the square of height as follows:"),
helpText("BMI = Weight (kg)/Height (m)2"),
p(h3("Waist circumference and disease risk")),
helpText("Waist circumference thresholds which indicate increased risk of disease are below:"),
helpText("For women:
risk is increased at more than or equal to 80 cm
risk is high at more than or equal to 88 cm"),
helpText("For men:
risk is increased at more than or equal to 94 cm
risk is high at more than or equal to 102 cm for men")
)
)
)
)
))
| /ui.R | no_license | Ir-Nazrul/Developing_Data_Product | R | false | false | 3,901 | r | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Risk of Associated Disease According to BMI and Waist Size"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
helpText("Body Mass Index (BMI) and Waist size are common measure for health, this application helps you find it out"),
br(),
numericInput("int_height_cm",label = "What is your height (CM):",min = 50, max = 250,0), #,value = 170
numericInput("ing_weight_kg",label = "How much do you weight (KG):",min = 40, max = 300,0), #, value = 70
sliderInput("waist_size", "What is your waist size (IN):", min=20, max=100, value=30),
radioButtons("gender", "Your gender:",
c("Male" = "Male",
"Female" = "Female")),
br(),
actionButton("FindBMI", label = "Calculate")
),
# Show a plot of the generated distribution
mainPanel(
tabsetPanel(
tabPanel("BMI Result",
p(h4("Here are your current measures:")),
textOutput("current_height"),
textOutput("current_weight"),
textOutput("waist_size"),
textOutput("gender"),
br(),
p(h4("Your calculated BMI is:")),
textOutput("BMI_result"),
br(),
p(h4("Your BMI category is:")),
textOutput("status_indicator"),
br(),
p(h4("Risk of associated disease is:")),
textOutput("status_risk")
),
tabPanel(
"Documentation",
p(h3("The Application")),
helpText("This simple application calculates a person BMI based on current weight and height."),
p(h3("What is Body Mass Index (BMI)?")),
helpText("BMI can be said as a screening tool which serves the purpose of identifying weight problems that possibly happen to adults, people over 18 years old."),
helpText("It only helps to calculate the possibility of weight problems, nothing more nothing less."),
helpText("The index will show that you are underweight, normal weight, overweight, or obesity."),
p(h3("What is BMI Formula?")),
helpText("BMI is calculated by dividing weight by the square of height as follows:"),
helpText("BMI = Weight (kg)/Height (m)2"),
p(h3("Waist circumference and disease risk")),
helpText("Waist circumference thresholds which indicate increased risk of disease are below:"),
helpText("For women:
risk is increased at more than or equal to 80 cm
risk is high at more than or equal to 88 cm"),
helpText("For men:
risk is increased at more than or equal to 94 cm
risk is high at more than or equal to 102 cm for men")
)
)
)
)
))
|
plot.mfp <- function (x, var=NULL, ref.zero=TRUE, what="all", ask=TRUE, ...)
{
if (!inherits(x, "mfp"))
stop("This is not an mfp object")
name <- dimnames(x$powers)[[1]]
choices <- name
if(is.null(var)) {
w <- which(is.na(x$powers[,1]))
if(length(w)==0) { pick <- seq(name) }
else { pick <- seq(name)[-w] }
} else {
pick <- which(name %in% var)
}
int <- as.numeric(x$family[["family"]] != "Cox")
for(ip in pick) {
namex <- name[ip]
if (is.null(x$X))
stop("you did not specify x=TRUE in the fit")
if (any(dimnames(x$X)[[2]] == namex, na.rm = TRUE)) {
tmpx <- x$X[, namex]
ix <- which(dimnames(x$X)[[2]] == namex)
}
else {
tmpx <- eval(as.name(namex))
}
ord <- order(tmpx)
tmpx <- tmpx[ord]
#
npwrsx <- sum(is.finite(x$powers[ip, ]))
if (npwrsx > 0) {
if (ip > 1)
posx <- int + sum(is.finite(x$powers[seq(ip-1), ])) + seq(npwrsx)
else posx <- int + seq(npwrsx)
# xtmp <- t(matrix(tmpx,ncol=length(tmpx),nrow=npwrsx,byrow=TRUE)^x$powers[ip,1:npwrsx])
px <- predict(x, type="link", se.fit=TRUE)
fx <- px$fit
conf.int <- 0.95
zcrit <- if (length(idf <- x$df.residual))
qt((1 + conf.int)/2, idf)
else qnorm((1 + conf.int)/2)
# actually only for linearily related covariates
lower <- fx-zcrit*px$se.fit; upper <- fx-zcrit*px$se.fit
}
# Plots
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
#
if (int) { # generalized linear model
if (npwrsx > 0) {
limits <- range(c(lower,upper))
plot(tmpx, fx, xlab = namex, ylab = paste("Linear predictor", sep = ""), type = "l", ylim=limits, ...)
lines(tmpx, lower, lty=2); lines(tmpx, upper, lty=2)
pres <- x$residuals[ord] + fx
points(tmpx, pres)
# plot(tmpx, pres, xlab = namex, ylab = "Partial residuals", ...)
ok <- is.finite(tmpx) & is.finite(pres)
fl <- lowess(tmpx[ok], pres[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
}
}
else { # Cox proportional hazards model
# Martingale residuals
x0 <- coxph(x$y ~ 1)
res0 <- resid(x0, type = "mart")[ord]
plot(tmpx, res0, xlab = namex, ylab = "Martingale residuals",
type = "p", ...)
ok <- is.finite(tmpx) & is.finite(res0)
fl <- lowess(tmpx[ok], res0[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
# Fitted function
if (npwrsx > 0) {
limits <- range(c(lower,upper))
plot(tmpx, fx, xlab = namex, ylab = "Linear predictor",
type = "l", ylim=limits, ...)
lines(tmpx, lower, lty=2); lines(tmpx, upper, lty=2)
pres <- x$residuals[ord] + fx
points(tmpx, pres)
# plot(tmpx, pres, xlab = namex, ylab = "Partial residuals", ...)
ok <- is.finite(tmpx) & is.finite(pres)
fl <- lowess(tmpx[ok], pres[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
}
}
}
}
| /mfp/R/plot.mfp.R | no_license | ingted/R-Examples | R | false | false | 3,300 | r | plot.mfp <- function (x, var=NULL, ref.zero=TRUE, what="all", ask=TRUE, ...)
{
if (!inherits(x, "mfp"))
stop("This is not an mfp object")
name <- dimnames(x$powers)[[1]]
choices <- name
if(is.null(var)) {
w <- which(is.na(x$powers[,1]))
if(length(w)==0) { pick <- seq(name) }
else { pick <- seq(name)[-w] }
} else {
pick <- which(name %in% var)
}
int <- as.numeric(x$family[["family"]] != "Cox")
for(ip in pick) {
namex <- name[ip]
if (is.null(x$X))
stop("you did not specify x=TRUE in the fit")
if (any(dimnames(x$X)[[2]] == namex, na.rm = TRUE)) {
tmpx <- x$X[, namex]
ix <- which(dimnames(x$X)[[2]] == namex)
}
else {
tmpx <- eval(as.name(namex))
}
ord <- order(tmpx)
tmpx <- tmpx[ord]
#
npwrsx <- sum(is.finite(x$powers[ip, ]))
if (npwrsx > 0) {
if (ip > 1)
posx <- int + sum(is.finite(x$powers[seq(ip-1), ])) + seq(npwrsx)
else posx <- int + seq(npwrsx)
# xtmp <- t(matrix(tmpx,ncol=length(tmpx),nrow=npwrsx,byrow=TRUE)^x$powers[ip,1:npwrsx])
px <- predict(x, type="link", se.fit=TRUE)
fx <- px$fit
conf.int <- 0.95
zcrit <- if (length(idf <- x$df.residual))
qt((1 + conf.int)/2, idf)
else qnorm((1 + conf.int)/2)
# actually only for linearily related covariates
lower <- fx-zcrit*px$se.fit; upper <- fx-zcrit*px$se.fit
}
# Plots
if (ask) {
op <- par(ask = TRUE)
on.exit(par(op))
}
#
if (int) { # generalized linear model
if (npwrsx > 0) {
limits <- range(c(lower,upper))
plot(tmpx, fx, xlab = namex, ylab = paste("Linear predictor", sep = ""), type = "l", ylim=limits, ...)
lines(tmpx, lower, lty=2); lines(tmpx, upper, lty=2)
pres <- x$residuals[ord] + fx
points(tmpx, pres)
# plot(tmpx, pres, xlab = namex, ylab = "Partial residuals", ...)
ok <- is.finite(tmpx) & is.finite(pres)
fl <- lowess(tmpx[ok], pres[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
}
}
else { # Cox proportional hazards model
# Martingale residuals
x0 <- coxph(x$y ~ 1)
res0 <- resid(x0, type = "mart")[ord]
plot(tmpx, res0, xlab = namex, ylab = "Martingale residuals",
type = "p", ...)
ok <- is.finite(tmpx) & is.finite(res0)
fl <- lowess(tmpx[ok], res0[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
# Fitted function
if (npwrsx > 0) {
limits <- range(c(lower,upper))
plot(tmpx, fx, xlab = namex, ylab = "Linear predictor",
type = "l", ylim=limits, ...)
lines(tmpx, lower, lty=2); lines(tmpx, upper, lty=2)
pres <- x$residuals[ord] + fx
points(tmpx, pres)
# plot(tmpx, pres, xlab = namex, ylab = "Partial residuals", ...)
ok <- is.finite(tmpx) & is.finite(pres)
fl <- lowess(tmpx[ok], pres[ok], iter = 0)
lines(fl$x, fl$y, lwd = 1, col = "red")
}
}
}
}
|
# Ejemplo 2. Conexión a una BDD con R
# Comenzaremos instalando las librerias necesarias para realizar la conexión y
# lectura de la base de datos en RStudio, si previamente los tenías instalados
# omite la instalación, recuerda que solo necesitas realizarla una vez.
# install.packages("DBI")
# install.packages("RMySQL")
library(DBI)
library(RMySQL)
# Una vez que se tengan las librerias necesarias se procede a la lectura
# (podría ser que necesites otras, si te las solicita instalalas y cargalas),
# de la base de datos de Shiny la cual es un demo y nos permite interactuar con
# este tipo de objetos. El comando dbConnect es el indicado para realizar la
# lectura, los demás parametros son los que nos dan acceso a la BDD.
MyDataBase <- dbConnect(
drv = RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest")
# Si no se arrojaron errores por parte de R, vamos a explorar la BDD
dbListTables(MyDataBase)
# Ahora si se quieren desplegar los campos o variables que contiene la tabla
# City se hará lo siguiente
dbListFields(MyDataBase, 'City')
# Para realizar una consulta tipo MySQL sobre la tabla seleccionada haremos lo
# siguiente
DataDB <- dbGetQuery(MyDataBase, "select * from City")
# Observemos que el objeto DataDB es un data frame, por lo tanto ya es un objeto
# de R y podemos aplicar los comandos usuales
class(DataDB)
head(DataDB)
pop.mean <- mean(DataDB$Population) # Media a la variable de población
pop.mean
pop.3 <- pop.mean *3 # Operaciones aritméticas
pop.3
# Incluso podemos hacer unos de otros comandos de busqueda aplicando la
# libreria dplyr
library(dplyr)
pop50.mex <- DataDB %>% filter(CountryCode == "MEX" , Population > 50000) # Ciudades del país de México con más de 50,000 habitantes
head(pop50.mex)
unique(DataDB$CountryCode) # Países que contiene la BDD
| /Sesion-07/Ejemplo-02/Ejemplo_02.R | no_license | DillanAS/Programacion-R-Santander-2021 | R | false | false | 1,999 | r | # Ejemplo 2. Conexión a una BDD con R
# Comenzaremos instalando las librerias necesarias para realizar la conexión y
# lectura de la base de datos en RStudio, si previamente los tenías instalados
# omite la instalación, recuerda que solo necesitas realizarla una vez.
# install.packages("DBI")
# install.packages("RMySQL")
library(DBI)
library(RMySQL)
# Una vez que se tengan las librerias necesarias se procede a la lectura
# (podría ser que necesites otras, si te las solicita instalalas y cargalas),
# de la base de datos de Shiny la cual es un demo y nos permite interactuar con
# este tipo de objetos. El comando dbConnect es el indicado para realizar la
# lectura, los demás parametros son los que nos dan acceso a la BDD.
MyDataBase <- dbConnect(
drv = RMySQL::MySQL(),
dbname = "shinydemo",
host = "shiny-demo.csa7qlmguqrf.us-east-1.rds.amazonaws.com",
username = "guest",
password = "guest")
# Si no se arrojaron errores por parte de R, vamos a explorar la BDD
dbListTables(MyDataBase)
# Ahora si se quieren desplegar los campos o variables que contiene la tabla
# City se hará lo siguiente
dbListFields(MyDataBase, 'City')
# Para realizar una consulta tipo MySQL sobre la tabla seleccionada haremos lo
# siguiente
DataDB <- dbGetQuery(MyDataBase, "select * from City")
# Observemos que el objeto DataDB es un data frame, por lo tanto ya es un objeto
# de R y podemos aplicar los comandos usuales
class(DataDB)
head(DataDB)
pop.mean <- mean(DataDB$Population) # Media a la variable de población
pop.mean
pop.3 <- pop.mean *3 # Operaciones aritméticas
pop.3
# Incluso podemos hacer unos de otros comandos de busqueda aplicando la
# libreria dplyr
library(dplyr)
pop50.mex <- DataDB %>% filter(CountryCode == "MEX" , Population > 50000) # Ciudades del país de México con más de 50,000 habitantes
head(pop50.mex)
unique(DataDB$CountryCode) # Países que contiene la BDD
|
#' Computing age mixing measurements in transmission clusters in missing completly at random scenarios
#'
#' @param simpact.trans.net Transmission network and record produced by \code{\link{advanced.transmission.network.builder()}}
#' @param datalist.agemix Data list of simpact output produced by \code{\link{readthedata()}}
#' @param work.dir Working directory
#' @param dirfasttree Directory where fastTree soaftware is called from
#' @param sub.dir.rename Sub-directory where simpact output are stored
#' @param limitTransmEvents Consider a transmission network which counts individuals more than the value (numeric)
#' @param timewindow Time window in which the experience is carried out
#' @param seq.cov Sequence coverage
#' @param seq.gender.ratio Proportion of women in the selected population (women/(women + men))
#' @param age.group.15.25 Consider individuals with age greater than 15 and less than 25
#' @param age.group.25.40 Consider individuals with age greater than 25 and less than 40
#' @param age.group.40.50 Consider individuals with age greater than 40 and less than 50
#' @param cut.off Cut off value for constructing pairings based on tMRCA
#' @export
# The outputs of the function are
# (i) as observed in transmisison network constructed from transmission clusters
# - table of numbers of age structured pairings between female/male across different age groups from the transmission clusters
# - table of proportion of men in a give age group who are paired to women of a given age group
# - table of proportion of women in a give age group who are paired to men of a given age group
# - numbers of men, and women in the three age groups
# - mean, median, and standard deviation of age gap between individuals in different age groups
# (e.g.: women aged between 15 and 25 who are paired to men regardless age group of men)
# table.cl.age.str, table.cl.age.str.prop.men, table.cl.age.str.prop.women,
# numbers.individuals.age.groups.cl,
# mean.AD.age.groups.cl, med.AD.age.groups.cl,
# sd.AD.age.groups.cl
# (ii) true values of the above mentioned measurements as observed in transmission network record
# table.cl.true.age.str, table.cl.true.age.str.prop.men, table.cl.true.age.str.prop.women,
# numbers.individuals.age.groups.true.cl,
# mean.AD.age.groups.true.cl, med.AD.age.groups.true.cl,
# sd.AD.age.groups.true.cl
# (iii) true values of the above mentioned measurements as observed in transmission network record but for the entire phylogenetic tree
# note: not all leaves of a phylogenetic tree belong to transmisison clusters!
# Directorinality is counted with label "MtoW" for infection from man to womand and "WtoM" for vice versa
# - table of numbers of age structured pairings between female/male across different age groups
# - table of numbers of age structured pairings between female/male across different age groups with men to women infections
# - table of numbers of age structured pairings between female/male across different age groups with women to men infections
# - table of proportion of men in a give age group who are paired to women of a given age group
# - table of proportion of men in a give age group who are paired to women of a given age group with men to women infections
# - table of proportion of men in a give age group who are paired to women of a given age group with women to men infections
# - table of proportion of women in a give age group who are paired to men of a given age group
# - numbers of men, and women in the three age groups
# - mean, median, and standard deviation of age gap between individuals in different age groups
# (e.g.: women aged between 15 and 25 who are paired to men regardless age group of men)
# table.tree.tra.age.str, table.tree.tra.age.str.MtoW, table.tree.tra.age.str.WtoM,
# table.tree.trans.true.age.str.prop.men, table.tree.trans.true.age.str.MtoW.prop.men,
# table.tree.trans.true.age.str.WtoM.prop.men, table.tree.trans.true.age.str.prop.women,
# numbers.individuals.age.groups.net, mean.AD.age.groups.net, med.AD.age.groups.net, sd.AD.age.groups.net
# (iv) True age mixing patterns in relationships
# "T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male"
# "T.p.prev.6months.m", # "T.p.prev.6months.f",
# (iv) Transmission clusters
# - mean, median, and standard devation of transmission cluster sizes
age.mixing.MAR.fun <- function(simpact.trans.net = simpact.trans.net.adv,
datalist.agemix = datalist.agemix,
work.dir = work.dir,
dirfasttree = dirfasttree,
sub.dir.rename = sub.dir.rename,
limitTransmEvents = 7,
timewindow = c(30,40),
seq.cov = 35,
seq.gender.ratio = 0.7,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50),
cut.off = 7){
# source("~/phylosimpact_simulation_studies_2018/stress_testing/needed.functions.RSimpactHelp.R")
# source("/home/niyukuri/Dropbox/25.10.2018.age.mix2/age_mixing_large_AD/needed.functions.RSimpactHelp.R")
source("/home/dniyukuri/lustre/age_mixing_large_AD/needed.functions.RSimpactHelp.R")
# sys.source("/home/dniyukuri/lustre/age_mixing_large_AD/needed.functions.RSimpactHelp.R", env = .GlobalEnv, keep.source = TRUE)
# Data list of infected individuals
# Select IDs in MCAR scenario
simpact.trans.net <- simpact.trans.net
limitTransmEvents <- limitTransmEvents
timewindow <- timewindow
seq.cov <- seq.cov
age.group.40.50 <- age.group.40.50
mAr.IDs <- IDs.Seq.Age.Groups(simpact.trans.net = simpact.trans.net,
limitTransmEvents = limitTransmEvents,
timewindow = timewindow,
seq.cov = seq.cov,
seq.gender.ratio = seq.gender.ratio,
age.group.15.25 = age.group.15.25,
age.group.25.40 = age.group.25.40,
age.group.40.50 = age.group.40.50)
if(length(mAr.IDs) >= 20){
simpact.trans.net.adv <- simpact.trans.net
# Transmission network table as from transmission networks for further steps
############################################################################
infectionTable <- vector("list", length(simpact.trans.net.adv))
for(j in 1:length(simpact.trans.net.adv)){
p <- j
trans.network.i <- as.data.frame(simpact.trans.net.adv[[p]])
# trans.network.i <- trans.network.i[-1,]
id.lab <- paste0(p,".",trans.network.i$id,".C")
trans.network.i$id.lab <- id.lab
trans.network.i$ageSampTimeRec <- trans.network.i$SampTime - trans.network.i$TOBRec
infectionTable[[p]] <- trans.network.i
}
infecttable <- rbindlist(infectionTable)
table.simpact.trans.net.adv <- infecttable # rbindlist(simpact.trans.net.adv)
Study.DataTable <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%mAr.IDs)
IDs.study <- Study.DataTable$RecId
transm.datalist.agemix <- datalist.agemix # assign full data set new age mix data set
# Transmission table of selected individuals
table.simpact.trans.net.cov <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%mAr.IDs)
# Person table of selected individuals
transm.datalist.agemix$ptable <- dplyr::filter(transm.datalist.agemix$ptable, transm.datalist.agemix$ptable$ID%in%IDs.study)
# (i) Age mixing in relationships
#
agemix.rels.transm.df <- agemix.df.maker(transm.datalist.agemix)
#
agemix.model <- pattern.modeller(dataframe = agemix.rels.transm.df,
agegroup = c(15, 50),
timepoint = 40, # transm.datalist.agemix$itable$population.simtime[1],
timewindow = 10)#1)#3)
#
# # men.lme <- tryCatch(agemixing.lme.fitter(data = dplyr::filter(agemix.model[[1]], Gender =="male")),
# # error = agemixing.lme.errFunction) # Returns an empty list if the lme model can't be fitted
#
# men.lmer <- ampmodel(data = dplyr::filter(agemix.model[[1]], Gender =="male"))
data = dplyr::filter(agemix.model[[1]], Gender =="male")
if( nrow(data) > length(unique(data$ID)) & length(unique(data$ID)) > 1 ){
men.lmer <- lmer(pagerelform ~ agerelform0 + (1 | ID),
data = dplyr::filter(agemix.model[[1]], Gender =="male"),
REML = TRUE,
control=lmerControl(check.nobs.vs.nlev = "ignore",
check.nobs.vs.rankZ = "ignore",
check.nobs.vs.nRE="ignore"))
bignumber <- NA # let's try if NA works (instead of 9999 for example)
AAD.male <- ifelse(length(men.lmer) > 0, mean(dplyr::filter(agemix.model[[1]], Gender =="male")$AgeGap), bignumber)
SDAD.male <- ifelse(length(men.lmer) > 0, sd(dplyr::filter(agemix.model[[1]], Gender =="male")$AgeGap), bignumber)
#powerm <- ifelse(length(men.lme) > 0, as.numeric(attributes(men.lme$apVar)$Pars["varStruct.power"]), bignumber)
slope.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$coefficients[2, 1], bignumber) #summary(men.lmer)$tTable[2, 1], bignumber)
WSD.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$sigma, bignumber) #WVAD.base <- ifelse(length(men.lme) > 0, men.lme$sigma^2, bignumber)
BSD.male <- ifelse(length(men.lmer) > 0, bvar(men.lmer), bignumber) # Bad name for the function because it actually extracts between subject standard deviation # BVAD <- ifelse(length(men.lmer) > 0, getVarCov(men.lme)[1,1], bignumber)
intercept.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$coefficients[1,1] - 15, bignumber)
# c(AAD.male, SDAD.male, slope.male, WSD.male, BSD.male, intercept.male)
## AAD: average age difference across all relationship
## VAD: variance of these age differences
## SDAD: standard deviation of age differences
## BSD: between-subject standard deviation of age differences
mix.rels.transm.dat <- c(AAD.male, SDAD.male, slope.male, WSD.male, BSD.male, intercept.male)
names(mix.rels.transm.dat) <- c("T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male")
}else{
mix.rels.transm.dat <- rep(NA, 6)
names(mix.rels.transm.dat) <- c("T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male")
}
# age.scatter.df <- agemix.model[[1]]
# (ii) Point prevalence of concurrency in the adult population:
# Concurrency point prevalence 6 months before a survey, among men
pp.cp.6months.male.transm <- tryCatch(concurr.pointprev.calculator(datalist = transm.datalist.agemix,
timepoint = 40 - 0.5),
error=function(e) return(rep(NA, 1)))
#
# pp.cp.6months.male.transm <- tryCatch(concurr.pointprev.calculator(datalist = transm.datalist.agemix,
# timepoint = 40 - 0.5) %>%
# dplyr::select(concurr.pointprev) %>%
# dplyr::slice(1) %>%
# as.numeric(),
# error=function(e) return(rep(NA, 1)))
#
# pp.cp.6months.female.transm <- concurr.pointprev.calculator(datalist = transm.datalist.agemix,
# timepoint = 40 - 0.5) %>%
# dplyr::select(concurr.pointprev) %>%
# dplyr::slice(2) %>%
# as.numeric()
#
# (iii) Prevalence
hiv.prev.lt25.women <-prevalence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.lt25.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
hiv.prev.25.40.women <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.25.40.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
hiv.prev.40.50.women <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.40.50.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
# (iv) Incidence
epi.transm.incidence.df.15.24.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.15.24.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
epi.transm.incidence.df.25.39.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.25.39.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
epi.transm.incidence.df.40.49.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.40.49.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
summary.epidemic.transm.df <- c(hiv.prev.lt25.women, hiv.prev.lt25.men,
hiv.prev.25.40.women, hiv.prev.25.40.men,
hiv.prev.40.50.women, hiv.prev.40.50.men,
mix.rels.transm.dat,
pp.cp.6months.male.transm, # pp.cp.6months.female.transm,
epi.transm.incidence.df.15.24.men, epi.transm.incidence.df.15.24.women,
epi.transm.incidence.df.25.39.men, epi.transm.incidence.df.25.39.women,
epi.transm.incidence.df.40.49.men, epi.transm.incidence.df.40.49.women)
names(summary.epidemic.transm.df) <- c("T.prev.15.25.w", "T.prev.15.25.m", "T.prev.25.40.w", "T.prev.25.40.m", "T.prev.40.50.w", "T.prev.40.50.m",
names(mix.rels.transm.dat),
"T.p.prev.6months.m", # "T.p.prev.6months.f",
"T.inc.15.25.m", "T.inc.15.25.w", "T.inc.25.40.m", "T.inc.25.40.w", "T.inc.40.50.m", "T.inc.40.50.w")
# Function to handle NAs
NA.handle.fun <- function(input=input){
v.names <- names(input)
v <- as.numeric(input)
v.vec <- vector()
for(i in 1:length(v)){
v.i <- v[i]
if(is.na(v.i)==TRUE){
v.j <- 0
}else{
v.j <- v.i
}
v.vec <- c(v.vec, v.j)
}
names(v.vec) <- v.names
return(v.vec)
}
######################################
# Step 5: Building phylogenetic tree #
######################################
dirfasttree <- work.dir
# Select sequences from the pool of alignment
##############################################
choose.sequence.ind(pool.seq.file = paste0(sub.dir.rename,"/C.Epidemic.fas"),
select.vec = mAr.IDs,
name.file = paste0(sub.dir.rename,"/",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta")))
# Build and calibrate the phylogenetic tree
############################################
mAr.IDs.tree.calib <- phylogenetic.tree.fasttree.par(dir.tree = dirfasttree,
sub.dir.rename = sub.dir.rename,
fasttree.tool = "FastTreeMP",
calendar.dates = "samplingtimes.all.csv",
simseqfile = paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),
count.start = 1977,
endsim = 40,
clust = TRUE)
N <- node.age(mAr.IDs.tree.calib)
# Time to MRCA: internal nodes ages
int.node.age <- N$Ti
latest.samp <- N$timeToMRCA+N$timeOfMRCA # latest sampling date
mrca.v <- mrca(mAr.IDs.tree.calib, full = FALSE) # MRCA ids
sampling.dates <- read.csv(paste0(sub.dir.rename,"/samplingtimes.all.csv")) # sampling times
#
# tree.cal.cov.35.IDs <- read.tree(paste0(sub.dir.rename, paste0("/calibrated.tree.cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta.tree")))
#
# Compute transmission clusters
###############################
# run ClusterPicker
system(paste("java -jar ", paste(paste0(work.dir,"/ClusterPicker_1.2.3.jar"), paste0(sub.dir.rename,"/", paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta")), paste0(sub.dir.rename,"/",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta.nwk")), paste0("0.9 0.9 0.045 2 gap"))))
# Read clusters' files
dd <- list.files(path = paste0(sub.dir.rename), pattern = paste0(paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),"_",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),"_","clusterPicks_cluste"),
all.files = FALSE,
full.names = FALSE, recursive = FALSE)
# Transmission clusters.
d <- clust.names <- dd
data.list.simpact.trans.net.adv <- vector("list", length(d)) # list() # initialise gender and age-structured data table of pairings in each transission cluster
# Transmission table of individuals in the transmission clusters
#################################################################
# Binding all data tables of clusters as these information are captured in transmission networks
clust.size <- vector() # size of each cluster # table.simpact.trans.net.adv
transm.df <- table.simpact.trans.net.adv
for (i in 1:length(d)) {
transm.df.cl.dat <- NULL
clus.read <- read.table(file = paste0(paste0(sub.dir.rename,"/"),d[i]), header = FALSE) # Ids of each cluster
clust.size <- c(clust.size, nrow(clus.read))
data.table.simpact.trans.net.i <- subset(transm.df, transm.df$id.lab%in%as.character(clus.read$V1)) # transmission data table of IDs of that cluster
data.table.simpact.trans.net.i$clust.ID <- rep(i, nrow(data.table.simpact.trans.net.i))
data.list.simpact.trans.net.adv[[i]] <- as.data.frame(data.table.simpact.trans.net.i)
}
data.table.simpact.trans.clusts.net.adv <- as.data.frame(do.call(rbind, data.list.simpact.trans.net.adv)) # data.table & data.frame
data.table.simpact.trans.clusts.net.adv <- data.table.simpact.trans.clusts.net.adv[!duplicated(data.table.simpact.trans.clusts.net.adv[c("id","id.lab")]),] # remove duplicate id.lab
# t may happen that one seq.ID appear in more than one cluster
# data.table.simpact.trans.clusts.net.adv <- data.table.simpact.trans.net.adv
## Aligning internal nodes IDs and their age: !they must get same length
ancestor <- Ancestors(mAr.IDs.tree.calib) # ancestors of each tips and internal node
# All ancestors output are internal nodes
ancestor.v <- vector()
for(i in 1:length(ancestor)){
k <- ancestor[[i]]
ancestor.v <- c(ancestor.v, unique(k))
}
sort.int.ancestor <- unique(sort(ancestor.v))
sort.int.node.age <- sort(int.node.age)
tip.names <- names(mrca.v[1,])
dates.tree.df <- dplyr::filter(sampling.dates, sampling.dates$V1%in%tip.names) # dates of these tips
# rearrange dates in tips order as are displayed on the tree
tip.names.f <- vector()
dates.tree.dat <- vector()
for(i in 1:nrow(dates.tree.df)){
for(j in 1:length(tip.names)){
if(tip.names[i] == dates.tree.df$V1[[j]]){
tip.names.f <- c(tip.names.f, tip.names[i])
dates.tree.dat <- c(dates.tree.dat, 1977+40-dates.tree.df$V2[[j]])
}
}
}
dates.tree.named <- dates.tree.dat
names(dates.tree.named) <- tip.names.f
# MRCA matrix
#############
# make mrca matrix diagonal 0 and other elements (internal nodes IDs) assign them the age of mrca
mrca.v.age <- mrca.v
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i==j){
mrca.v.age[i,j] <- 0
}else{
if(mrca.v[i,j] %in% sort.int.ancestor){
p.index <- which(sort.int.ancestor == mrca.v[i,j])
mrca.v.age[i,j] <- sort.int.node.age[p.index]
}
}
}
}
# make mrca matrix elements: sampling date - age of mrca
# Fist contingency matrix
mrca.v.age.samp <- mrca.v.age
mrca.v.age.samp.cont1 <- mrca.v.age.samp
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i!=j){
i.dat <- tip.names.f[i]
v.index <- which(tip.names.f == i.dat)
samp.date.tip <- dates.tree.dat[v.index]
mrca.v.age.samp.cont1[i,] <- samp.date.tip - mrca.v.age.samp[i,]
}
}
}
# Second contingency matrix
mrca.v.age.samp <- mrca.v.age
mrca.v.age.samp.cont2 <- mrca.v.age.samp
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i!=j){
i.dat <- tip.names.f[i]
v.index <- which(tip.names.f == i.dat)
samp.date.tip <- dates.tree.dat[v.index]
mrca.v.age.samp.cont2[,i] <- samp.date.tip - mrca.v.age.samp[,i]
}
}
}
# Diagonal zero for mrca.v.age.samp.cont1 and mrca.v.age.samp.cont2
for(i in 1:nrow(mrca.v.age.samp.cont1)){
for(j in 1:nrow(mrca.v.age.samp.cont1)){
if(i==j){
mrca.v.age.samp.cont1[i,j] <- 0
}
}
}
for(i in 1:nrow(mrca.v.age.samp.cont2)){
for(j in 1:nrow(mrca.v.age.samp.cont2)){
if(i==j){
mrca.v.age.samp.cont2[i,j] <- 0
}
}
}
# filter table.simpact.trans.net.adv and remain with table of tips names (individulas in the tree)
attributes.table.simpact.trans.net.adv <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%tip.names)
V.gender <- vector()
V.cd4 <- vector()
V.vl <- vector()
V.x <- vector()
V.y <- vector()
iD <- vector()
for(i in 1:length(tip.names)){
for(j in 1:nrow(attributes.table.simpact.trans.net.adv)){
if(tip.names[i] == attributes.table.simpact.trans.net.adv$id.lab[j]){
V.gender <- c(V.gender, attributes.table.simpact.trans.net.adv$GenderRec[j])
V.cd4 <- c(V.cd4, attributes.table.simpact.trans.net.adv$cd4[j])
V.vl <- c(V.vl, attributes.table.simpact.trans.net.adv$vl[j])
V.x <- c(V.x, attributes.table.simpact.trans.net.adv$location.x[j])
V.y <- c(V.y, attributes.table.simpact.trans.net.adv$location.y[j])
iD <- c(iD, tip.names[i])
}
}
}
Node.gender.cd4.vl.x.y <- data.table(V.gender,V.cd4, V.vl, V.x, V.y, iD)
# Adding clusters ID on the previous attributes table from attributes.table.simpact.trans.net.adv
clust.ID.vec <- vector()
id.vec <- vector()
for(k in 1:nrow(Node.gender.cd4.vl.x.y)){ # attributes table ofr all tips on the tree: Node.gender.cd4.vl.x.y
id <- Node.gender.cd4.vl.x.y$iD[k]
if(id%in%data.table.simpact.trans.clusts.net.adv$id.lab){ # ID of tree which belongs to IDs of clusters
# transmission table of individuls in the transmission clusters: data.table.simpact.trans.net.adv
id.index <- which(data.table.simpact.trans.clusts.net.adv$id.lab == id)
clust.ID.vec.i <- data.table.simpact.trans.clusts.net.adv$clust.ID[id.index]
}else{
clust.ID.vec.i <- 0 # tip ID which is not in any transmission cluster is assigned value 0
}
clust.ID.vec <- c(clust.ID.vec, clust.ID.vec.i)
id.vec <- c(id.vec, id)
}
Node.gender.cd4.vl.x.y$clust.ID <- clust.ID.vec
Node.gender.cd4.vl.x.y.clusID <- Node.gender.cd4.vl.x.y # attributes table with clusters' IDs
## Building transmission network
# 1. consider contigency matrix 2
mrca.times.final <- as.matrix(abs(mrca.v.age.samp.cont2))
net <- graph.adjacency(as.matrix(mrca.times.final), mode="undirected",weighted=T,diag=FALSE)
# E(net) # The edges of the "net" object
#
# V(net) # The vertices of the "net" object
V(net)$gender <- Node.gender.cd4.vl.x.y$V.gender
V(net)$cd4 <- Node.gender.cd4.vl.x.y$V.cd4
V(net)$vl <- Node.gender.cd4.vl.x.y$V.vl
V(net)$loc.x <- Node.gender.cd4.vl.x.y$V.x
V(net)$loc.y <- Node.gender.cd4.vl.x.y$V.y
## Filtering the network by breaking some edges due to conditions from individuals attributes:
##############################################################################################
# 1. Gender, 2. cluster belonging, 3. geographical location, 4. CD4, and 5. Viral load
# Now considering 1 and 2
names.attributes.ngaha <- Node.gender.cd4.vl.x.y
names.matrix.contigency <- names(mrca.times.final[1,])
gender.l <- names.attributes.ngaha$V.gender
clusters.zose <- Node.gender.cd4.vl.x.y$clust.ID
mrca.times.filter <- mrca.times.final
#
# for (i in 1:length(names(mrca.times.final[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final[1,]) == name.col.i)
#
# gender.i <- gender.l[index.i]
#
# cluster.i <- clusters.zose[index.i]
#
#
# for(j in 1:length(names(mrca.times.final[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final[1,]) == name.col.j)
#
# gender.j <- gender.l[index.j]
#
# cluster.j <- clusters.zose[index.j]
#
#
# if(gender.i == gender.j){ # if same gender break the link
#
# mrca.times.filter[i,j] <- 0
#
# }
#
# if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
#
# mrca.times.filter[i,j] <- 0
#
# }
#
#
# }
#
# }
#
# }
# i. Gender
############
for (i in 1:length(names(mrca.times.final[1,]))) {
name.col.i <- names.matrix.contigency[i]
index.i <- which(names(mrca.times.final[1,]) == name.col.i)
gender.i <- gender.l[index.i]
for(j in 1:length(names(mrca.times.final[1,]))){
if(i != j){
name.col.j <- names.matrix.contigency[j]
index.j <- which(names(mrca.times.final[1,]) == name.col.j)
gender.j <- gender.l[index.j]
if(gender.i == gender.j){ # if same gender break the link
mrca.times.filter[i,j] <- 0
}
}
}
}
mrca.times.filter.gender <- mrca.times.filter
# ii. Cluster
#############
mrca.times.filter.gender.clust <- mrca.times.filter.gender
for (i in 1:length(names(mrca.times.final[1,]))) {
name.col.i <- names.matrix.contigency[i]
index.i <- which(names(mrca.times.final[1,]) == name.col.i)
cluster.i <- clusters.zose[index.i]
for(j in 1:length(names(mrca.times.final[1,]))){
if(i != j){
name.col.j <- names.matrix.contigency[j]
index.j <- which(names(mrca.times.final[1,]) == name.col.j)
cluster.j <- clusters.zose[index.j]
if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
mrca.times.filter.gender.clust[i,j] <- 0
}
}
}
}
# iii. tMRCA
#############
net.cont.1 <- graph.adjacency(as.matrix(mrca.times.filter.gender.clust),mode="undirected",weighted=T,diag=FALSE)
# Consider plausible transmissions and difference between sampling time and tMRCA
cut.off <- cut.off # years
# E(net.cont.1)$weight
net.cont.1 <- delete_edges(net.cont.1, E(net.cont.1)[weight>=cut.off]) # remove link greater to the cuttoff
# E(net.cont.1)$weight
# plot(net.cont.1, layout=layout_with_kk)
# Delete tips of the phylogenetic tree which are not part of transmission clusters: they have clust.ID==0 >> deletes vertices
###################################################################################
Non.ids.dat <- dplyr::filter(Node.gender.cd4.vl.x.y, Node.gender.cd4.vl.x.y$clust.ID==0)
Non.ids <- Non.ids.dat$iD
net.cleaned <- delete_vertices(net.cont.1, Non.ids)
#
# # 2. consider contigency matrix 1
#
# mrca.times.final.2 <- as.matrix(abs(mrca.v.age.samp.cont1))
#
#
# net.2 <- graph.adjacency(as.matrix(mrca.times.final.2), mode="undirected",weighted=T,diag=FALSE)
#
# E(net.2) # The edges of the "net.2" object
#
# V(net.2) # The vertices of the "net.2" object
#
# V(net.2)$gender <- Node.gender.cd4.vl.x.y$V.gender
# V(net.2)$cd4 <- Node.gender.cd4.vl.x.y$V.cd4
# V(net.2)$vl <- Node.gender.cd4.vl.x.y$V.vl
# V(net.2)$loc.x <- Node.gender.cd4.vl.x.y$V.x
# V(net.2)$loc.y <- Node.gender.cd4.vl.x.y$V.y
#
#
#
#
# ## Filtering the net.2work by breaking some edges due to conditions from individuals attributes:
#
# # 1. Gender, 2. cluster belonging, 3. geographical location, 4. CD4, and 5. Viral load
#
#
# names.attributes.ngaha <- Node.gender.cd4.vl.x.y
#
# names.matrix.contigency <- names(mrca.times.final.2[1,])
#
# gender.l <- names.attributes.ngaha$V.gender
#
#
# clusters.zose <- Node.gender.cd4.vl.x.y$clust.ID
#
#
# mrca.times.filter.2 <- mrca.times.final.2
#
#
# #
# # for (i in 1:length(names(mrca.times.final.2[1,]))) {
# #
# # name.col.i <- names.matrix.contigency[i]
# #
# # index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
# #
# # gender.i <- gender.l[index.i]
# #
# # cluster.i <- clusters.zose[index.i]
# #
# #
# # for(j in 1:length(names(mrca.times.final.2[1,]))){
# #
# # if(i != j){
# #
# # name.col.j <- names.matrix.contigency[j]
# #
# # index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
# #
# # gender.j <- gender.l[index.j]
# #
# # cluster.j <- clusters.zose[index.j]
# #
# #
# # if(gender.i == gender.j){ # if same gender break the link
# #
# # mrca.times.filter.2[i,j] <- 0
# #
# # }
# #
# # if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
# #
# # mrca.times.filter.2[i,j] <- 0
# #
# # }
# #
# #
# # }
# #
# # }
# #
# # }
#
# # i. Gender
#
# for (i in 1:length(names(mrca.times.final.2[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
#
# gender.i <- gender.l[index.i]
#
# for(j in 1:length(names(mrca.times.final.2[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
#
# gender.j <- gender.l[index.j]
#
# if(gender.i == gender.j){ # if same gender break the link
#
# mrca.times.filter.2[i,j] <- 0
#
# }
#
# }
#
# }
#
# }
#
# mrca.times.filter.2.gender <- mrca.times.filter.2
#
#
# # ii. Cluster
#
# mrca.times.filter.2.gender.clust <- mrca.times.filter.2.gender
#
# for (i in 1:length(names(mrca.times.final.2[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
#
# cluster.i <- clusters.zose[index.i]
#
#
# for(j in 1:length(names(mrca.times.final.2[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
#
# cluster.j <- clusters.zose[index.j]
#
#
# if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
#
# mrca.times.filter.2.gender.clust[i,j] <- 0
#
# }
#
#
# }
#
# }
#
# }
#
#
#
# net.2.cont.1 <- graph.adjacency(as.matrix(mrca.times.filter.2.gender.clust),mode="undirected",weighted=T,diag=FALSE)
#
#
# # Consider plausible transmissions and difference between sampling time and tMRCA
#
#
# cut.off <- 20
#
# E(net.2.cont.1)$weight
#
# net.2.cont.1 <- delete_edges(net.2.cont.1, E(net.2.cont.1)[weight>=cut.off]) # remove link greater to the cuttoff
#
# E(net.2.cont.1)$weight
#
# plot(net.2.cont.1, layout=layout_with_kk)
#
#
# # Delete tips which are not part of transmission clusters, they have clust.ID==0 >> deletes vertices
#
# Non.ids.dat <- dplyr::filter(Node.gender.cd4.vl.x.y, Node.gender.cd4.vl.x.y$clust.ID==0)
# Non.ids <- Non.ids.dat$iD
#
# net.2.cleaned <- delete_vertices(net.2.cont.1, Non.ids)
# r=graph.union(net.cleaned, net.2.cleaned)
# Age structure in the transmission network built from phylogenetic tree
#########################################################################
# produce age table
net.sp <- net.cleaned
transm.matrix <- as.data.table(get.edgelist(net.sp)) # matrix of links of the transmission network built from phylogenetic tree
# table.simpact.trans.net.adv
# reduced transmission table: table.simpact.trans.net.adv of ids in transmission clusters
ids <- unique(c(transm.matrix$V1, transm.matrix$V2))
table.transm.clust.net.igraph <- dplyr::filter(data.table.simpact.trans.clusts.net.adv, data.table.simpact.trans.clusts.net.adv$id.lab%in%ids)
# 1.
# Age structure in transmission clusters as observed from phylogenetic tree #
##################################################################################
age.groups.filtered.trans.clust.network.fun <- function(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
transm.matrix = transm.matrix,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
Age.groups.table <- NULL
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
age.diff <- vector()
for(i in 1:nrow(transm.matrix)){
v1 <- transm.matrix$V1[i]
v2 <- transm.matrix$V2[i]
index.v1 <- which(table.transm.clust.net.igraph$id.lab == v1)
index.v2 <- which(table.transm.clust.net.igraph$id.lab == v2)
age1 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v1]
age2 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v2]
gender1 <- table.transm.clust.net.igraph$GenderRec[index.v1]
gender2 <- table.transm.clust.net.igraph$GenderRec[index.v2]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
ad <- abs(age1 - age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
age.diff <- c(age.diff, ad)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat, age.diff)
# men
men.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==0)
men.age.15.25 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.15.25[1] & men.age.table.1$age1.dat < age.group.15.25[2])
men.age.25.40 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.25.40[1] & men.age.table.1$age1.dat < age.group.25.40[2])
men.age.40.50 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.40.50[1] & men.age.table.1$age1.dat < age.group.40.50[2])
# women
women.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==1)
women.age.15.25 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.15.25[1] & women.age.table.1$age1.dat < age.group.15.25[2])
women.age.25.40 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.25.40[1] & women.age.table.1$age1.dat < age.group.25.40[2])
women.age.40.50 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.40.50[1] & women.age.table.1$age1.dat < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(women.age.15.25)
numbers.indiv.men.15.25 <- nrow(men.age.15.25)
numbers.indiv.women.25.40 <- nrow(women.age.25.40)
numbers.indiv.men.25.40 <- nrow(men.age.25.40)
numbers.indiv.women.40.50 <- nrow(women.age.40.50)
numbers.indiv.men.40.50 <- nrow(men.age.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.cl.15.25", "num.men.cl.15.25",
"num.women.cl.25.40", "num.men.cl.25.40",
"num.women.cl.40.50", "num.men.cl.40.50")
# Age differences
AD.num.women.15.25 <- women.age.15.25$age.diff
AD.num.men.15.25 <- men.age.15.25$age.diff
AD.num.women.25.40 <- women.age.25.40$age.diff
AD.num.men.25.40 <- men.age.25.40$age.diff
AD.num.women.40.50 <- women.age.40.50$age.diff
AD.num.men.40.50 <- men.age.40.50$age.diff
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.cl.15.25", "mean.AD.num.men.cl.15.25",
"mean.AD.num.women.cl.25.40", "mean.AD.num.men.cl.25.40",
"mean.AD.num.women.cl.40.50", "mean.AD.num.men.cl.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.cl.15.25", "med.AD.num.men.cl.15.25",
"med.AD.num.women.cl.25.40", "med.AD.num.men.cl.25.40",
"med.AD.num.women.cl.40.50", "med.AD.num.men.cl.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.cl.15.25", "sd.AD.num.men.cl.15.25",
"sd.AD.num.women.cl.25.40", "sd.AD.num.men.cl.25.40",
"sd.AD.num.women.cl.40.50", "sd.AD.num.men.cl.40.50")
# Number os pairings
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1)>1){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1)>1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# age structured pairings in both gender without directionality
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50)) # number of pairings of men between 15 and 24
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50)) # number of pairings of men between 25 and 39
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)) # number of pairings of men between 40 and 49
# proportion of men in different age groups
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25)) # number of pairings of men between 15 and 24
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40)) # number of pairings of men between 25 and 39
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50)) # number of pairings of men between 40 and 49
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 1. Results: age structure table obtained from transmission network built from transmission clusters
age.structure.transm.clust.List <- age.groups.filtered.trans.clust.network.fun(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
transm.matrix = transm.matrix,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
age.structure.transm.clust <- age.structure.transm.clust.List$Age.groups.table
cl.age.str.M.15.25.F.15.25 <- age.structure.transm.clust[1,][1]
cl.age.str.M.25.40.F.15.25 <- age.structure.transm.clust[2,][1]
cl.age.str.M.40.50.F.15.25 <- age.structure.transm.clust[3,][1]
cl.age.str.M.15.25.F.25.40 <- age.structure.transm.clust[1,][2]
cl.age.str.M.25.40.F.25.40 <- age.structure.transm.clust[2,][2]
cl.age.str.M.40.50.F.25.40 <- age.structure.transm.clust[3,][2]
cl.age.str.M.15.25.F.40.50 <- age.structure.transm.clust[1,][3]
cl.age.str.M.25.40.F.40.50 <- age.structure.transm.clust[2,][3]
cl.age.str.M.40.50.F.40.50 <- age.structure.transm.clust[3,][3]
table.cl.age.str <- c(cl.age.str.M.15.25.F.15.25, cl.age.str.M.25.40.F.15.25, cl.age.str.M.40.50.F.15.25,
cl.age.str.M.15.25.F.25.40, cl.age.str.M.25.40.F.25.40, cl.age.str.M.40.50.F.25.40,
cl.age.str.M.15.25.F.40.50, cl.age.str.M.25.40.F.40.50, cl.age.str.M.40.50.F.40.50)
names(table.cl.age.str) <- c("cl.M.15.25.F.15.25", "cl.M.25.40.F.15.25", "cl.M.40.50.F.15.25",
"cl.M.15.25.F.25.40", "cl.M.25.40.F.25.40", "cl.M.40.50.F.25.40",
"cl.M.15.25.F.40.50", "cl.M.25.40.F.40.50", "cl.M.40.50.F.40.50")
# Men prop
age.structure.transm.clust.prop.men <- age.structure.transm.clust.List$prop.men.age.groups.table
cl.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.clust.prop.men[1,][1]
cl.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.clust.prop.men[2,][1]
cl.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.clust.prop.men[3,][1]
cl.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.clust.prop.men[1,][2]
cl.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.clust.prop.men[2,][2]
cl.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.clust.prop.men[3,][2]
cl.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.clust.prop.men[1,][3]
cl.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.clust.prop.men[2,][3]
cl.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.clust.prop.men[3,][3]
table.cl.age.str.prop.men <- c(cl.age.str.prop.men.15.25.F.15.25, cl.age.str.prop.men.25.40.F.15.25, cl.age.str.prop.men.40.50.F.15.25,
cl.age.str.prop.men.15.25.F.25.40, cl.age.str.prop.men.25.40.F.25.40, cl.age.str.prop.men.40.50.F.25.40,
cl.age.str.prop.men.15.25.F.40.50, cl.age.str.prop.men.25.40.F.40.50, cl.age.str.prop.men.40.50.F.40.50)
names(table.cl.age.str.prop.men) <- c("cl.prop.men15.25.F.15.25", "cl.prop.men25.40.F.15.25", "cl.prop.men40.50.F.15.25",
"cl.prop.men15.25.F.25.40", "cl.prop.men25.40.F.25.40", "cl.prop.men40.50.F.25.40",
"cl.prop.men15.25.F.40.50", "cl.prop.men25.40.F.40.50", "cl.prop.men40.50.F.40.50")
table.cl.age.str.prop.men <- NA.handle.fun(input = table.cl.age.str.prop.men)
# Women prop
age.structure.transm.clust.prop.women <- age.structure.transm.clust.List$prop.women.age.groups.table
cl.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.prop.women[1,][1]
cl.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.prop.women[2,][1]
cl.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.prop.women[3,][1]
cl.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.prop.women[1,][2]
cl.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.prop.women[2,][2]
cl.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.prop.women[3,][2]
cl.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.prop.women[1,][3]
cl.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.prop.women[2,][3]
cl.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.prop.women[3,][3]
table.cl.age.str.prop.women <- c(cl.age.str.prop.women.15.25.M.15.25, cl.age.str.prop.women.25.40.M.15.25, cl.age.str.prop.women.40.50.M.15.25,
cl.age.str.prop.women.15.25.M.25.40, cl.age.str.prop.women.25.40.M.25.40, cl.age.str.prop.women.40.50.M.25.40,
cl.age.str.prop.women.15.25.M.40.50, cl.age.str.prop.women.25.40.M.40.50, cl.age.str.prop.women.40.50.M.40.50)
names(table.cl.age.str.prop.women) <- c("cl.prop.women15.25.M.15.25", "cl.prop.women25.40.M.15.25", "cl.prop.women40.50.M.15.25",
"cl.prop.women15.25.M.25.40", "cl.prop.women25.40.M.25.40", "cl.prop.women40.50.M.25.40",
"cl.prop.women15.25.M.40.50", "cl.prop.women25.40.M.40.50", "cl.prop.women40.50.M.40.50")
table.cl.age.str.prop.women <- NA.handle.fun(input = table.cl.age.str.prop.women)
#
numbers.individuals.age.groups.cl <- age.structure.transm.clust.List$numbers.individuals.age.groups
mean.AD.age.groups.cl <- age.structure.transm.clust.List$mean.AD.age.groups
med.AD.age.groups.cl <- age.structure.transm.clust.List$med.AD.age.groups
sd.AD.age.groups.cl <- age.structure.transm.clust.List$sd.AD.age.groups
res1 <- c(table.cl.age.str, table.cl.age.str.prop.men, table.cl.age.str.prop.women,
numbers.individuals.age.groups.cl,
mean.AD.age.groups.cl, med.AD.age.groups.cl,
sd.AD.age.groups.cl)
# 2.
# True age structure in transmission clusters as observed from transmission network #
#####################################################################################
age.groups.filtered.transmission.clust.fun <- function(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
num.women.15.25 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.15.25[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.15.25[2])
num.men.15.25 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.15.25[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.15.25[2])
num.women.25.40 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.25.40[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.25.40[2])
num.men.25.40 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.25.40[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.25.40[2])
num.women.40.50 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.40.50[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.40.50[2])
num.men.40.50 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.40.50[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(num.women.15.25)
numbers.indiv.men.15.25 <- nrow(num.men.15.25)
numbers.indiv.women.25.40 <- nrow(num.women.25.40)
numbers.indiv.men.25.40 <- nrow(num.men.25.40)
numbers.indiv.women.40.50 <- nrow(num.women.40.50)
numbers.indiv.men.40.50 <- nrow(num.men.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.true.cl.15.25", "num.men.true.cl.15.25",
"num.women.true.cl.25.40", "num.men.true.cl.25.40",
"num.women.true.cl.40.50", "num.men.true.cl.40.50")
num.women.15.25$ageSampTimeDon <- num.women.15.25$SampTime - num.women.15.25$TOBDon
num.men.15.25$ageSampTimeDon <- num.men.15.25$SampTime - num.men.15.25$TOBDon
num.women.25.40$ageSampTimeDon <- num.women.25.40$SampTime - num.women.25.40$TOBDon
num.men.25.40$ageSampTimeDon <- num.men.25.40$SampTime - num.men.25.40$TOBDon
num.women.40.50$ageSampTimeDon <- num.women.40.50$SampTime - num.women.40.50$TOBDon
num.men.40.50$ageSampTimeDon <- num.men.40.50$SampTime - num.men.40.50$TOBDon
# Age differences
AD.num.women.15.25 <- abs(num.women.15.25$ageSampTimeDon - num.women.15.25$ageSampTimeRec)
AD.num.men.15.25 <- abs(num.men.15.25$ageSampTimeDon - num.men.15.25$ageSampTimeRec)
AD.num.women.25.40 <- abs(num.women.25.40$ageSampTimeDon - num.women.25.40$ageSampTimeRec)
AD.num.men.25.40 <- abs(num.men.25.40$ageSampTimeDon - num.men.25.40$ageSampTimeRec)
AD.num.women.40.50 <- abs(num.women.40.50$ageSampTimeDon - num.women.40.50$ageSampTimeRec)
AD.num.men.40.50 <- abs(num.men.40.50$ageSampTimeDon - num.men.40.50$ageSampTimeRec)
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.true.cl.15.25", "mean.AD.num.men.true.cl.15.25",
"mean.AD.num.women.true.cl.25.40", "mean.AD.num.men.true.cl.25.40",
"mean.AD.num.women.true.cl.40.50", "mean.AD.num.men.true.cl.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.true.cl.15.25", "med.AD.num.men.true.cl.15.25",
"med.AD.num.women.true.cl.25.40", "med.AD.num.men.true.cl.25.40",
"med.AD.num.women.true.cl.40.50", "med.AD.num.men.true.cl.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.true.cl.15.25", "sd.AD.num.men.true.cl.15.25",
"sd.AD.num.women.true.cl.25.40", "sd.AD.num.men.true.cl.25.40",
"sd.AD.num.women.true.cl.40.50", "sd.AD.num.men.true.cl.40.50")
table.transm.clust.net.igraph$ageSampTimeDon <- table.transm.clust.net.igraph$SampTime - table.transm.clust.net.igraph$TOBDon
Age.groups.table <- NULL
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
for(i in 1:nrow(table.transm.clust.net.igraph)){
v1 <- table.transm.clust.net.igraph$RecId[i]
v2 <- table.transm.clust.net.igraph$DonId[i]
index.v1 <- which(table.transm.clust.net.igraph$RecId == v1)
age1 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v1]
age2 <- table.transm.clust.net.igraph$ageSampTimeDon[index.v1]
gender1 <- table.transm.clust.net.igraph$GenderRec[index.v1]
gender2 <- table.transm.clust.net.igraph$GenderDon[index.v1]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat)
# men
men.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==0)
# women
women.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==1)
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) > 1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50))
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50))
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50))
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25))
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40))
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50))
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 2. Results: true age structure table from transmission network of these individuals in transmission clusters
age.structure.transm.clus.true.List <- age.groups.filtered.transmission.clust.fun(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
age.structure.transm.clust.true <- age.structure.transm.clus.true.List$Age.groups.table
cl.true.age.str.M.15.25.F.15.25 <- age.structure.transm.clust.true[1,][1]
cl.true.age.str.M.25.40.F.15.25 <- age.structure.transm.clust.true[2,][1]
cl.true.age.str.M.40.50.F.15.25 <- age.structure.transm.clust.true[3,][1]
cl.true.age.str.M.15.25.F.25.40 <- age.structure.transm.clust.true[1,][2]
cl.true.age.str.M.25.40.F.25.40 <- age.structure.transm.clust.true[2,][2]
cl.true.age.str.M.40.50.F.25.40 <- age.structure.transm.clust.true[3,][2]
cl.true.age.str.M.15.25.F.40.50 <- age.structure.transm.clust.true[1,][3]
cl.true.age.str.M.25.40.F.40.50 <- age.structure.transm.clust.true[2,][3]
cl.true.age.str.M.40.50.F.40.50 <- age.structure.transm.clust.true[3,][3]
table.cl.true.age.str <- c(cl.true.age.str.M.15.25.F.15.25, cl.true.age.str.M.25.40.F.15.25, cl.true.age.str.M.40.50.F.15.25,
cl.true.age.str.M.15.25.F.25.40, cl.true.age.str.M.25.40.F.25.40, cl.true.age.str.M.40.50.F.25.40,
cl.true.age.str.M.15.25.F.40.50, cl.true.age.str.M.25.40.F.40.50, cl.true.age.str.M.40.50.F.40.50)
names(table.cl.true.age.str) <- c("cl.true.M.15.25.F.15.25", "cl.true.M.25.40.F.15.25", "cl.true.M.40.50.F.15.25",
"cl.true.M.15.25.F.25.40", "cl.true.M.25.40.F.25.40", "cl.true.M.40.50.F.25.40",
"cl.true.M.15.25.F.40.50", "cl.true.M.25.40.F.40.50", "cl.true.M.40.50.F.40.50")
# Men prop
age.structure.transm.clus.true.prop.men <- age.structure.transm.clus.true.List$prop.men.age.groups.table
cl.true.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.clus.true.prop.men[1,][1]
cl.true.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.clus.true.prop.men[2,][1]
cl.true.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.clus.true.prop.men[3,][1]
cl.true.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.clus.true.prop.men[1,][2]
cl.true.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.clus.true.prop.men[2,][2]
cl.true.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.clus.true.prop.men[3,][2]
cl.true.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.clus.true.prop.men[1,][3]
cl.true.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.clus.true.prop.men[2,][3]
cl.true.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.clus.true.prop.men[3,][3]
table.cl.true.age.str.prop.men <- c(cl.true.age.str.prop.men.15.25.F.15.25, cl.true.age.str.prop.men.25.40.F.15.25, cl.true.age.str.prop.men.40.50.F.15.25,
cl.true.age.str.prop.men.15.25.F.25.40, cl.true.age.str.prop.men.25.40.F.25.40, cl.true.age.str.prop.men.40.50.F.25.40,
cl.true.age.str.prop.men.15.25.F.40.50, cl.true.age.str.prop.men.25.40.F.40.50, cl.true.age.str.prop.men.40.50.F.40.50)
names(table.cl.true.age.str.prop.men) <- c("cl.true.prop.men15.25.F.15.25", "cl.true.prop.men25.40.F.15.25", "cl.true.prop.men40.50.F.15.25",
"cl.true.prop.men15.25.F.25.40", "cl.true.prop.men25.40.F.25.40", "cl.true.prop.men40.50.F.25.40",
"cl.true.prop.men15.25.F.40.50", "cl.true.prop.men25.40.F.40.50", "cl.true.prop.men40.50.F.40.50")
table.cl.true.age.str.prop.men <- NA.handle.fun(input = table.cl.true.age.str.prop.men)
# Women prop
age.structure.transm.clust.true.prop.women <- age.structure.transm.clust.List$prop.women.age.groups.table
cl.true.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.true.prop.women[1,][1]
cl.true.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.true.prop.women[2,][1]
cl.true.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.true.prop.women[3,][1]
cl.true.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.true.prop.women[1,][2]
cl.true.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.true.prop.women[2,][2]
cl.true.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.true.prop.women[3,][2]
cl.true.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.true.prop.women[1,][3]
cl.true.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.true.prop.women[2,][3]
cl.true.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.true.prop.women[3,][3]
table.cl.true.age.str.prop.women <- c(cl.true.age.str.prop.women.15.25.M.15.25, cl.true.age.str.prop.women.25.40.M.15.25, cl.true.age.str.prop.women.40.50.M.15.25,
cl.true.age.str.prop.women.15.25.M.25.40, cl.true.age.str.prop.women.25.40.M.25.40, cl.true.age.str.prop.women.40.50.M.25.40,
cl.true.age.str.prop.women.15.25.M.40.50, cl.true.age.str.prop.women.25.40.M.40.50, cl.true.age.str.prop.women.40.50.M.40.50)
names(table.cl.true.age.str.prop.women) <- c("cl.true.prop.women15.25.M.15.25", "cl.true.prop.women25.40.M.15.25", "cl.true.prop.women40.50.M.15.25",
"cl.true.prop.women15.25.M.25.40", "cl.true.prop.women25.40.M.25.40", "cl.true.prop.women40.50.M.25.40",
"cl.true.prop.women15.25.M.40.50", "cl.true.prop.women25.40.M.40.50", "cl.true.prop.women40.50.M.40.50")
table.cl.true.age.str.prop.women <- NA.handle.fun(input = table.cl.true.age.str.prop.women)
#
numbers.individuals.age.groups.true.cl <- age.structure.transm.clus.true.List$numbers.individuals.age.groups
mean.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$mean.AD.age.groups
med.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$med.AD.age.groups
sd.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$sd.AD.age.groups
res2 <- c(table.cl.true.age.str, table.cl.true.age.str.prop.men, table.cl.true.age.str.prop.women,
numbers.individuals.age.groups.true.cl,
mean.AD.age.groups.true.cl, med.AD.age.groups.true.cl,
sd.AD.age.groups.true.cl)
# 3.
# True age structure in transmission transmission network for selected individuals #
#####################################################################################
age.groups.filtered.transmission.net.fun <- function(table.transmission.net.cov = table.simpact.trans.net.cov,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
num.women.15.25 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.15.25[1] & table.transmission.net.cov$ageSampTimeRec < age.group.15.25[2])
num.men.15.25 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.15.25[1] & table.transmission.net.cov$ageSampTimeRec < age.group.15.25[2])
num.women.25.40 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.25.40[1] & table.transmission.net.cov$ageSampTimeRec < age.group.25.40[2])
num.men.25.40 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.25.40[1] & table.transmission.net.cov$ageSampTimeRec < age.group.25.40[2])
num.women.40.50 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.40.50[1] & table.transmission.net.cov$ageSampTimeRec < age.group.40.50[2])
num.men.40.50 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.40.50[1] & table.transmission.net.cov$ageSampTimeRec < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(num.women.15.25)
numbers.indiv.men.15.25 <- nrow(num.men.15.25)
numbers.indiv.women.25.40 <- nrow(num.women.25.40)
numbers.indiv.men.25.40 <- nrow(num.men.25.40)
numbers.indiv.women.40.50 <- nrow(num.women.40.50)
numbers.indiv.men.40.50 <- nrow(num.men.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.true.net.15.25", "num.men.true.net.15.25",
"num.women.true.net.25.40", "num.men.true.net.25.40",
"num.women.true.net.40.50", "num.men.true.net.40.50")
num.women.15.25$ageSampTimeDon <- num.women.15.25$SampTime - num.women.15.25$TOBDon
num.men.15.25$ageSampTimeDon <- num.men.15.25$SampTime - num.men.15.25$TOBDon
num.women.25.40$ageSampTimeDon <- num.women.25.40$SampTime - num.women.25.40$TOBDon
num.men.25.40$ageSampTimeDon <- num.men.25.40$SampTime - num.men.25.40$TOBDon
num.women.40.50$ageSampTimeDon <- num.women.40.50$SampTime - num.women.40.50$TOBDon
num.men.40.50$ageSampTimeDon <- num.men.40.50$SampTime - num.men.40.50$TOBDon
# Age differences
AD.num.women.15.25 <- abs(num.women.15.25$ageSampTimeDon - num.women.15.25$ageSampTimeRec)
AD.num.men.15.25 <- abs(num.men.15.25$ageSampTimeDon - num.men.15.25$ageSampTimeRec)
AD.num.women.25.40 <- abs(num.women.25.40$ageSampTimeDon - num.women.25.40$ageSampTimeRec)
AD.num.men.25.40 <- abs(num.men.25.40$ageSampTimeDon - num.men.25.40$ageSampTimeRec)
AD.num.women.40.50 <- abs(num.women.40.50$ageSampTimeDon - num.women.40.50$ageSampTimeRec)
AD.num.men.40.50 <- abs(num.men.40.50$ageSampTimeDon - num.men.40.50$ageSampTimeRec)
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.true.net.15.25", "mean.AD.num.men.true.net.15.25",
"mean.AD.num.women.true.net.25.40", "mean.AD.num.men.true.net.25.40",
"mean.AD.num.women.true.net.40.50", "mean.AD.num.men.true.net.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.true.net.15.25", "med.AD.num.men.true.net.15.25",
"med.AD.num.women.true.net.25.40", "med.AD.num.men.true.net.25.40",
"med.AD.num.women.true.net.40.50", "med.AD.num.men.true.net.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.true.net.15.25", "sd.AD.num.men.true.net.15.25",
"sd.AD.num.women.true.net.25.40", "sd.AD.num.men.true.net.25.40",
"sd.AD.num.women.true.net.40.50", "sd.AD.num.men.true.net.40.50")
table.transmission.net.cov$ageSampTimeDon <- table.transmission.net.cov$SampTime - table.transmission.net.cov$TOBDon
men.df <- dplyr::filter(table.transmission.net.cov, table.transmission.net.cov$GenderDon=="0")
women.df <- dplyr::filter(table.transmission.net.cov, table.transmission.net.cov$GenderDon=="1")
Age.groups.table <- NULL
filter.dat <- function(table.dat.fr = table.dat.fr){
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
for(i in 1:nrow(table.dat.fr)){
v1 <- table.dat.fr$RecId[i]
v2 <- table.dat.fr$DonId[i]
index.v1 <- which(table.dat.fr$RecId == v1)
age1 <- table.dat.fr$ageSampTimeRec[index.v1]
age2 <- table.dat.fr$ageSampTimeDon[index.v1]
gender1 <- table.dat.fr$GenderRec[index.v1]
gender2 <- table.dat.fr$GenderDon[index.v1]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat)
return(age.table)
}
age.table <- filter.dat(table.dat.fr = table.transmission.net.cov)
# men as donors
men.age.table.1 <- filter.dat(table.dat.fr = men.df) # dplyr::filter(age.table, age.table$gender1.dat==0)
# women as donors
women.age.table.1 <- filter.dat(table.dat.fr = women.df) # dplyr::filter(age.table, age.table$gender1.dat==1)
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) > 1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) > 1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50))
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50))
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50))
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25))
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40))
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50))
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
# Directionality
men.15.25.women.15.25.MtoW <- c(men.15.25.women.15.25.1)
men.15.25.women.25.40.MtoW <- c(men.15.25.women.25.40.1)
men.15.25.women.40.50.MtoW <- c(men.15.25.women.40.50.1)
men.25.40.women.15.25.MtoW <- c(men.25.40.women.15.25.1)
men.25.40.women.25.40.MtoW <- c(men.25.40.women.25.40.1)
men.25.40.women.40.50.MtoW <- c(men.25.40.women.40.50.1)
men.40.50.women.15.25.MtoW <- c(men.40.50.women.15.25.1)
men.40.50.women.25.40.MtoW <- c(men.40.50.women.25.40.1)
men.40.50.women.40.50.MtoW <- c(men.40.50.women.40.50.1)
Age.groups.table.MtoW <- matrix(c(length(men.15.25.women.15.25.MtoW), length(men.15.25.women.25.40.MtoW), length(men.15.25.women.40.50.MtoW),
length(men.25.40.women.15.25.MtoW), length(men.25.40.women.25.40.MtoW), length(men.25.40.women.40.50.MtoW),
length(men.40.50.women.15.25.MtoW), length(men.40.50.women.25.40.MtoW), length(men.40.50.women.40.50.MtoW)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table.MtoW) <- c("Female.15.25.MtoW", "Female.25.40.MtoW", "Female.40.50.MtoW")
rownames(Age.groups.table.MtoW) <- c("Male.15.25.MtoW", "Male.25.40.MtoW", "Male.40.50.MtoW")
Age.groups.table.MtoW <- as.table(Age.groups.table.MtoW)
men.15.25.T.MtoW <- sum(length(men.15.25.women.15.25.MtoW), length(men.15.25.women.25.40.MtoW), length(men.15.25.women.40.50.MtoW))
men.25.40.T.MtoW <- sum(length(men.25.40.women.15.25.MtoW), length(men.25.40.women.25.40.MtoW), length(men.25.40.women.40.50.MtoW))
men.40.50.T.MtoW <- sum(length(men.40.50.women.15.25.MtoW), length(men.40.50.women.25.40.MtoW), length(men.40.50.women.40.50.MtoW))
prop.men.age.groups.table.MtoW <- matrix(c(length(men.15.25.women.15.25.MtoW)/men.15.25.T.MtoW, length(men.15.25.women.25.40.MtoW)/men.15.25.T.MtoW, length(men.15.25.women.40.50.MtoW)/men.15.25.T.MtoW,
length(men.25.40.women.15.25.MtoW)/men.25.40.T.MtoW, length(men.25.40.women.25.40.MtoW)/men.25.40.T.MtoW, length(men.25.40.women.40.50.MtoW)/men.25.40.T.MtoW,
length(men.40.50.women.15.25.MtoW)/men.40.50.T.MtoW, length(men.40.50.women.25.40.MtoW)/men.40.50.T.MtoW, length(men.40.50.women.40.50.MtoW)/men.40.50.T.MtoW),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table.MtoW) <- c("Female.15.25.MtoW", "Female.25.40.MtoW", "Female.40.50.MtoW")
rownames(prop.men.age.groups.table.MtoW) <- c("prop.Male.15.25.MtoW", "prop.Male.25.40.MtoW", "prop.Male.40.50.MtoW")
men.15.25.women.15.25.WtoM <- c(women.15.25.men.15.25.2)
men.15.25.women.25.40.WtoM <- c(women.25.40.men.15.25.2)
men.15.25.women.40.50.WtoM <- c(women.40.50.men.15.25.2)
men.25.40.women.15.25.WtoM <- c( women.15.25.men.25.40.2)
men.25.40.women.25.40.WtoM <- c(women.25.40.men.25.40.2)
men.25.40.women.40.50.WtoM <- c(women.40.50.men.25.40.2)
men.40.50.women.15.25.WtoM <- c(women.15.25.men.40.50.2)
men.40.50.women.25.40.WtoM <- c(women.25.40.men.40.50.2)
men.40.50.women.40.50.WtoM <- c(women.40.50.men.40.50.2)
Age.groups.table.WtoM <- matrix(c(length(men.15.25.women.15.25.WtoM), length(men.15.25.women.25.40.WtoM), length(men.15.25.women.40.50.WtoM),
length(men.25.40.women.15.25.WtoM), length(men.25.40.women.25.40.WtoM), length(men.25.40.women.40.50.WtoM),
length(men.40.50.women.15.25.WtoM), length(men.40.50.women.25.40.WtoM), length(men.40.50.women.40.50.WtoM)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table.WtoM) <- c("Female.15.25.WtoM", "Female.25.40.WtoM", "Female.40.50.WtoM")
rownames(Age.groups.table.WtoM) <- c("Male.15.25.WtoM", "Male.25.40.WtoM", "Male.40.50.WtoM")
Age.groups.table.WtoM <- as.table(Age.groups.table.WtoM)
men.15.25.T.WtoM <- sum(length(men.15.25.women.15.25.WtoM), length(men.15.25.women.25.40.WtoM), length(men.15.25.women.40.50.WtoM))
men.25.40.T.WtoM <- sum(length(men.25.40.women.15.25.WtoM), length(men.25.40.women.25.40.WtoM), length(men.25.40.women.40.50.WtoM))
men.40.50.T.WtoM <- sum(length(men.40.50.women.15.25.WtoM), length(men.40.50.women.25.40.WtoM), length(men.40.50.women.40.50.WtoM))
prop.men.age.groups.table.WtoM <- matrix(c(length(men.15.25.women.15.25.WtoM)/men.15.25.T.WtoM, length(men.15.25.women.25.40.WtoM)/men.15.25.T.WtoM, length(men.15.25.women.40.50.WtoM)/men.15.25.T.WtoM,
length(men.25.40.women.15.25.WtoM)/men.25.40.T.WtoM, length(men.25.40.women.25.40.WtoM)/men.25.40.T.WtoM, length(men.25.40.women.40.50.WtoM)/men.25.40.T.WtoM,
length(men.40.50.women.15.25.WtoM)/men.40.50.T.WtoM, length(men.40.50.women.25.40.WtoM)/men.40.50.T.WtoM, length(men.40.50.women.40.50.WtoM)/men.40.50.T.WtoM),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table.WtoM) <- c("Female.15.25.WtoM", "Female.25.40.WtoM", "Female.40.50.WtoM")
rownames(prop.men.age.groups.table.WtoM) <- c("prop.Male.15.25.WtoM", "prop.Male.25.40.WtoM", "prop.Male.40.50.WtoM")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$Age.groups.table.MtoW <- Age.groups.table.MtoW
outputlist$prop.men.age.groups.table.MtoW <- prop.men.age.groups.table.MtoW
outputlist$Age.groups.table.WtoM <- Age.groups.table.WtoM
outputlist$prop.men.age.groups.table.WtoM <- prop.men.age.groups.table.WtoM
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 3. Results: true age structure table from transmission network of all selected individuals (people in the phylogenetic tree)
age.structure.transm.net.true.List <- age.groups.filtered.transmission.net.fun(table.transmission.net.cov = table.simpact.trans.net.cov,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
# (i) Aggregated tables of pairings
age.structure.transm.net.true <- age.structure.transm.net.true.List$Age.groups.table
tree.tra.age.str.M.15.25.F.15.25 <- age.structure.transm.net.true[1,][1]
tree.tra.age.str.M.25.40.F.15.25 <- age.structure.transm.net.true[2,][1]
tree.tra.age.str.M.40.50.F.15.25 <- age.structure.transm.net.true[3,][1]
tree.tra.age.str.M.15.25.F.25.40 <- age.structure.transm.net.true[1,][2]
tree.tra.age.str.M.25.40.F.25.40 <- age.structure.transm.net.true[2,][2]
tree.tra.age.str.M.40.50.F.25.40 <- age.structure.transm.net.true[3,][2]
tree.tra.age.str.M.15.25.F.40.50 <- age.structure.transm.net.true[1,][3]
tree.tra.age.str.M.25.40.F.40.50 <- age.structure.transm.net.true[2,][3]
tree.tra.age.str.M.40.50.F.40.50 <- age.structure.transm.net.true[3,][3]
table.tree.tra.age.str <- c(tree.tra.age.str.M.15.25.F.15.25, tree.tra.age.str.M.25.40.F.15.25, tree.tra.age.str.M.40.50.F.15.25,
tree.tra.age.str.M.15.25.F.25.40, tree.tra.age.str.M.25.40.F.25.40, tree.tra.age.str.M.40.50.F.25.40,
tree.tra.age.str.M.15.25.F.40.50, tree.tra.age.str.M.25.40.F.40.50, tree.tra.age.str.M.40.50.F.40.50)
names(table.tree.tra.age.str) <- c("tree.tra.M.15.25.F.15.25", "tree.tra.M.25.40.F.15.25", "tree.tra.M.40.50.F.15.25",
"tree.tra.M.15.25.F.25.40", "tree.tra.M.25.40.F.25.40", "tree.tra.M.40.50.F.25.40",
"tree.tra.M.15.25.F.40.50", "tree.tra.M.25.40.F.40.50", "tree.tra.M.40.50.F.40.50")
# (ii) Pairings table with men to women infection: directionality
age.structure.transm.net.true.MtoW <- age.structure.transm.net.true.List$Age.groups.table.MtoW
tree.tra.age.str.MtoW.M.15.25.F.15.25 <- age.structure.transm.net.true.MtoW[1,][1]
tree.tra.age.str.MtoW.M.25.40.F.15.25 <- age.structure.transm.net.true.MtoW[2,][1]
tree.tra.age.str.MtoW.M.40.50.F.15.25 <- age.structure.transm.net.true.MtoW[3,][1]
tree.tra.age.str.MtoW.M.15.25.F.25.40 <- age.structure.transm.net.true.MtoW[1,][2]
tree.tra.age.str.MtoW.M.25.40.F.25.40 <- age.structure.transm.net.true.MtoW[2,][2]
tree.tra.age.str.MtoW.M.40.50.F.25.40 <- age.structure.transm.net.true.MtoW[3,][2]
tree.tra.age.str.MtoW.M.15.25.F.40.50 <- age.structure.transm.net.true.MtoW[1,][3]
tree.tra.age.str.MtoW.M.25.40.F.40.50 <- age.structure.transm.net.true.MtoW[2,][3]
tree.tra.age.str.MtoW.M.40.50.F.40.50 <- age.structure.transm.net.true.MtoW[3,][3]
table.tree.tra.age.str.MtoW <- c(tree.tra.age.str.MtoW.M.15.25.F.15.25, tree.tra.age.str.MtoW.M.25.40.F.15.25, tree.tra.age.str.MtoW.M.40.50.F.15.25,
tree.tra.age.str.MtoW.M.15.25.F.25.40, tree.tra.age.str.MtoW.M.25.40.F.25.40, tree.tra.age.str.MtoW.M.40.50.F.25.40,
tree.tra.age.str.MtoW.M.15.25.F.40.50, tree.tra.age.str.MtoW.M.25.40.F.40.50, tree.tra.age.str.MtoW.M.40.50.F.40.50)
names(table.tree.tra.age.str.MtoW) <- c("tree.tra.MtoW.M.15.25.F.15.25", "tree.tra.MtoW.M.25.40.F.15.25", "tree.tra.MtoW.M.40.50.F.15.25",
"tree.tra.MtoW.M.15.25.F.25.40", "tree.tra.MtoW.M.25.40.F.25.40", "tree.tra.MtoW.M.40.50.F.25.40",
"tree.tra.MtoW.M.15.25.F.40.50", "tree.tra.MtoW.M.25.40.F.40.50", "tree.tra.MtoW.M.40.50.F.40.50")
# (iii) Pairings table with women to men infection: directionality
age.structure.transm.net.true.WtoM <- age.structure.transm.net.true.List$Age.groups.table.WtoM
tree.tra.age.str.WtoM.M.15.25.F.15.25 <- age.structure.transm.net.true.WtoM[1,][1]
tree.tra.age.str.WtoM.M.25.40.F.15.25 <- age.structure.transm.net.true.WtoM[2,][1]
tree.tra.age.str.WtoM.M.40.50.F.15.25 <- age.structure.transm.net.true.WtoM[3,][1]
tree.tra.age.str.WtoM.M.15.25.F.25.40 <- age.structure.transm.net.true.WtoM[1,][2]
tree.tra.age.str.WtoM.M.25.40.F.25.40 <- age.structure.transm.net.true.WtoM[2,][2]
tree.tra.age.str.WtoM.M.40.50.F.25.40 <- age.structure.transm.net.true.WtoM[3,][2]
tree.tra.age.str.WtoM.M.15.25.F.40.50 <- age.structure.transm.net.true.WtoM[1,][3]
tree.tra.age.str.WtoM.M.25.40.F.40.50 <- age.structure.transm.net.true.WtoM[2,][3]
tree.tra.age.str.WtoM.M.40.50.F.40.50 <- age.structure.transm.net.true.WtoM[3,][3]
table.tree.tra.age.str.WtoM <- c(tree.tra.age.str.WtoM.M.15.25.F.15.25, tree.tra.age.str.WtoM.M.25.40.F.15.25, tree.tra.age.str.WtoM.M.40.50.F.15.25,
tree.tra.age.str.WtoM.M.15.25.F.25.40, tree.tra.age.str.WtoM.M.25.40.F.25.40, tree.tra.age.str.WtoM.M.40.50.F.25.40,
tree.tra.age.str.WtoM.M.15.25.F.40.50, tree.tra.age.str.WtoM.M.25.40.F.40.50, tree.tra.age.str.WtoM.M.40.50.F.40.50)
names(table.tree.tra.age.str.WtoM) <- c("tree.tra.WtoM.M.15.25.F.15.25", "tree.tra.WtoM.M.25.40.F.15.25", "tree.tra.WtoM.M.40.50.F.15.25",
"tree.tra.WtoM.M.15.25.F.25.40", "tree.tra.WtoM.M.25.40.F.25.40", "tree.tra.WtoM.M.40.50.F.25.40",
"tree.tra.WtoM.M.15.25.F.40.50", "tree.tra.WtoM.M.25.40.F.40.50", "tree.tra.WtoM.M.40.50.F.40.50")
# (iv) Men's pairings proportions in aggregated table
age.structure.transm.net.true.prop.men <- age.structure.transm.net.true.List$prop.men.age.groups.table
tree.trans.true.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men[1,][1]
tree.trans.true.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men[2,][1]
tree.trans.true.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men[3,][1]
tree.trans.true.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men[1,][2]
tree.trans.true.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men[2,][2]
tree.trans.true.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men[3,][2]
tree.trans.true.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men[1,][3]
tree.trans.true.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men[2,][3]
tree.trans.true.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men[3,][3]
table.tree.trans.true.age.str.prop.men <- c(tree.trans.true.age.str.prop.men.15.25.F.15.25, tree.trans.true.age.str.prop.men.25.40.F.15.25, tree.trans.true.age.str.prop.men.40.50.F.15.25,
tree.trans.true.age.str.prop.men.15.25.F.25.40, tree.trans.true.age.str.prop.men.25.40.F.25.40, tree.trans.true.age.str.prop.men.40.50.F.25.40,
tree.trans.true.age.str.prop.men.15.25.F.40.50, tree.trans.true.age.str.prop.men.25.40.F.40.50, tree.trans.true.age.str.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.prop.men) <- c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50")
table.tree.trans.true.age.str.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.prop.men)
# (v) Men's pairings proportions in men to women infection: directionality
age.structure.transm.net.true.prop.men.MtoW <- age.structure.transm.net.true.List$prop.men.age.groups.table.MtoW
tree.trans.true.age.str.MtoW.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[1,][1]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[2,][1]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[3,][1]
tree.trans.true.age.str.MtoW.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[1,][2]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[2,][2]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[3,][2]
tree.trans.true.age.str.MtoW.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[1,][3]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[2,][3]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[3,][3]
table.tree.trans.true.age.str.MtoW.prop.men <- c(tree.trans.true.age.str.MtoW.prop.men.15.25.F.15.25, tree.trans.true.age.str.MtoW.prop.men.25.40.F.15.25, tree.trans.true.age.str.MtoW.prop.men.40.50.F.15.25,
tree.trans.true.age.str.MtoW.prop.men.15.25.F.25.40, tree.trans.true.age.str.MtoW.prop.men.25.40.F.25.40, tree.trans.true.age.str.MtoW.prop.men.40.50.F.25.40,
tree.trans.true.age.str.MtoW.prop.men.15.25.F.40.50, tree.trans.true.age.str.MtoW.prop.men.25.40.F.40.50, tree.trans.true.age.str.MtoW.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.MtoW.prop.men) <- paste0("MtoW.", c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50"))
table.tree.trans.true.age.str.MtoW.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.MtoW.prop.men)
# (vi) Men's pairings proportions in women to men infection: directionality
age.structure.transm.net.true.prop.men.WtoM <- age.structure.transm.net.true.List$prop.men.age.groups.table.WtoM
tree.trans.true.age.str.WtoM.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[1,][1]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[2,][1]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[3,][1]
tree.trans.true.age.str.WtoM.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[1,][2]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[2,][2]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[3,][2]
tree.trans.true.age.str.WtoM.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[1,][3]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[2,][3]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[3,][3]
table.tree.trans.true.age.str.WtoM.prop.men <- c(tree.trans.true.age.str.WtoM.prop.men.15.25.F.15.25, tree.trans.true.age.str.WtoM.prop.men.25.40.F.15.25, tree.trans.true.age.str.WtoM.prop.men.40.50.F.15.25,
tree.trans.true.age.str.WtoM.prop.men.15.25.F.25.40, tree.trans.true.age.str.WtoM.prop.men.25.40.F.25.40, tree.trans.true.age.str.WtoM.prop.men.40.50.F.25.40,
tree.trans.true.age.str.WtoM.prop.men.15.25.F.40.50, tree.trans.true.age.str.WtoM.prop.men.25.40.F.40.50, tree.trans.true.age.str.WtoM.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.WtoM.prop.men) <- paste0("WtoM.", c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50"))
table.tree.trans.true.age.str.WtoM.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.WtoM.prop.men)
# (vii) Womens' pairings proportions in aggregated table
age.structure.transm.clust.true.prop.women <- age.structure.transm.net.true.List$prop.women.age.groups.table
tree.trans.true.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.true.prop.women[1,][1]
tree.trans.true.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.true.prop.women[2,][1]
tree.trans.true.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.true.prop.women[3,][1]
tree.trans.true.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.true.prop.women[1,][2]
tree.trans.true.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.true.prop.women[2,][2]
tree.trans.true.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.true.prop.women[3,][2]
tree.trans.true.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.true.prop.women[1,][3]
tree.trans.true.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.true.prop.women[2,][3]
tree.trans.true.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.true.prop.women[3,][3]
table.tree.trans.true.age.str.prop.women <- c(tree.trans.true.age.str.prop.women.15.25.M.15.25, tree.trans.true.age.str.prop.women.25.40.M.15.25, tree.trans.true.age.str.prop.women.40.50.M.15.25,
tree.trans.true.age.str.prop.women.15.25.M.25.40, tree.trans.true.age.str.prop.women.25.40.M.25.40, tree.trans.true.age.str.prop.women.40.50.M.25.40,
tree.trans.true.age.str.prop.women.15.25.M.40.50, tree.trans.true.age.str.prop.women.25.40.M.40.50, tree.trans.true.age.str.prop.women.40.50.M.40.50)
names(table.tree.trans.true.age.str.prop.women) <- c("tree.trans.true.prop.women15.25.M.15.25", "tree.trans.true.prop.women25.40.M.15.25", "tree.trans.true.prop.women40.50.M.15.25",
"tree.trans.true.prop.women15.25.M.25.40", "tree.trans.true.prop.women25.40.M.25.40", "tree.trans.true.prop.women40.50.M.25.40",
"tree.trans.true.prop.women15.25.M.40.50", "tree.trans.true.prop.women25.40.M.40.50", "tree.trans.true.prop.women40.50.M.40.50")
table.tree.trans.true.age.str.prop.women <- NA.handle.fun(input = table.tree.trans.true.age.str.prop.women)
#
numbers.individuals.age.groups.net <- age.structure.transm.net.true.List$numbers.individuals.age.groups
mean.AD.age.groups.net <- age.structure.transm.net.true.List$mean.AD.age.groups
med.AD.age.groups.net <- age.structure.transm.net.true.List$med.AD.age.groups
sd.AD.age.groups.net <- age.structure.transm.net.true.List$sd.AD.age.groups
names(numbers.individuals.age.groups.net) <- paste0("tree.trans.", names(numbers.individuals.age.groups.net))
names(mean.AD.age.groups.net) <- paste0("tree.trans.", names(mean.AD.age.groups.net))
names(med.AD.age.groups.net) <- paste0("tree.trans.", names(med.AD.age.groups.net))
names(sd.AD.age.groups.net) <- paste0("tree.trans.", names(sd.AD.age.groups.net))
res3 <- c(table.tree.tra.age.str, table.tree.tra.age.str.MtoW, table.tree.tra.age.str.WtoM,
table.tree.trans.true.age.str.prop.men, table.tree.trans.true.age.str.MtoW.prop.men,
table.tree.trans.true.age.str.WtoM.prop.men, table.tree.trans.true.age.str.prop.women,
numbers.individuals.age.groups.net, mean.AD.age.groups.net, med.AD.age.groups.net, sd.AD.age.groups.net)
# Clusters statistics
mean.clust.size <- mean(clust.size)
median.clust.size <- median(clust.size)
sd.clust.size <- sd(clust.size)
clust.size.stat <- c(mean.clust.size, median.clust.size, sd.clust.size)
names(clust.size.stat) <- c("mean.cl.size", "med.cl.size", "sd.cl.size")
# Binding everuthing together
output.num.vec <- as.numeric(c(res1, res2, res3, mix.rels.transm.dat, clust.size.stat))
names.output.vec <- names(c(res1, res2, res3, mix.rels.transm.dat, clust.size.stat))
names(output.num.vec) <- names.output.vec
}else{
output.num.vec <- rep(NA, 198)
}
return(output.num.vec)
}
| /age.mixing.MAR.fun_CD4_VL.R | no_license | niyukuri/age_mixing_AD_clusters | R | false | false | 139,171 | r | #' Computing age mixing measurements in transmission clusters in missing completly at random scenarios
#'
#' @param simpact.trans.net Transmission network and record produced by \code{\link{advanced.transmission.network.builder()}}
#' @param datalist.agemix Data list of simpact output produced by \code{\link{readthedata()}}
#' @param work.dir Working directory
#' @param dirfasttree Directory where fastTree soaftware is called from
#' @param sub.dir.rename Sub-directory where simpact output are stored
#' @param limitTransmEvents Consider a transmission network which counts individuals more than the value (numeric)
#' @param timewindow Time window in which the experience is carried out
#' @param seq.cov Sequence coverage
#' @param seq.gender.ratio Proportion of women in the selected population (women/(women + men))
#' @param age.group.15.25 Consider individuals with age greater than 15 and less than 25
#' @param age.group.25.40 Consider individuals with age greater than 25 and less than 40
#' @param age.group.40.50 Consider individuals with age greater than 40 and less than 50
#' @param cut.off Cut off value for constructing pairings based on tMRCA
#' @export
# The outputs of the function are
# (i) as observed in transmisison network constructed from transmission clusters
# - table of numbers of age structured pairings between female/male across different age groups from the transmission clusters
# - table of proportion of men in a give age group who are paired to women of a given age group
# - table of proportion of women in a give age group who are paired to men of a given age group
# - numbers of men, and women in the three age groups
# - mean, median, and standard deviation of age gap between individuals in different age groups
# (e.g.: women aged between 15 and 25 who are paired to men regardless age group of men)
# table.cl.age.str, table.cl.age.str.prop.men, table.cl.age.str.prop.women,
# numbers.individuals.age.groups.cl,
# mean.AD.age.groups.cl, med.AD.age.groups.cl,
# sd.AD.age.groups.cl
# (ii) true values of the above mentioned measurements as observed in transmission network record
# table.cl.true.age.str, table.cl.true.age.str.prop.men, table.cl.true.age.str.prop.women,
# numbers.individuals.age.groups.true.cl,
# mean.AD.age.groups.true.cl, med.AD.age.groups.true.cl,
# sd.AD.age.groups.true.cl
# (iii) true values of the above mentioned measurements as observed in transmission network record but for the entire phylogenetic tree
# note: not all leaves of a phylogenetic tree belong to transmisison clusters!
# Directorinality is counted with label "MtoW" for infection from man to womand and "WtoM" for vice versa
# - table of numbers of age structured pairings between female/male across different age groups
# - table of numbers of age structured pairings between female/male across different age groups with men to women infections
# - table of numbers of age structured pairings between female/male across different age groups with women to men infections
# - table of proportion of men in a give age group who are paired to women of a given age group
# - table of proportion of men in a give age group who are paired to women of a given age group with men to women infections
# - table of proportion of men in a give age group who are paired to women of a given age group with women to men infections
# - table of proportion of women in a give age group who are paired to men of a given age group
# - numbers of men, and women in the three age groups
# - mean, median, and standard deviation of age gap between individuals in different age groups
# (e.g.: women aged between 15 and 25 who are paired to men regardless age group of men)
# table.tree.tra.age.str, table.tree.tra.age.str.MtoW, table.tree.tra.age.str.WtoM,
# table.tree.trans.true.age.str.prop.men, table.tree.trans.true.age.str.MtoW.prop.men,
# table.tree.trans.true.age.str.WtoM.prop.men, table.tree.trans.true.age.str.prop.women,
# numbers.individuals.age.groups.net, mean.AD.age.groups.net, med.AD.age.groups.net, sd.AD.age.groups.net
# (iv) True age mixing patterns in relationships
# "T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male"
# "T.p.prev.6months.m", # "T.p.prev.6months.f",
# (iv) Transmission clusters
# - mean, median, and standard devation of transmission cluster sizes
age.mixing.MAR.fun <- function(simpact.trans.net = simpact.trans.net.adv,
datalist.agemix = datalist.agemix,
work.dir = work.dir,
dirfasttree = dirfasttree,
sub.dir.rename = sub.dir.rename,
limitTransmEvents = 7,
timewindow = c(30,40),
seq.cov = 35,
seq.gender.ratio = 0.7,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50),
cut.off = 7){
# source("~/phylosimpact_simulation_studies_2018/stress_testing/needed.functions.RSimpactHelp.R")
# source("/home/niyukuri/Dropbox/25.10.2018.age.mix2/age_mixing_large_AD/needed.functions.RSimpactHelp.R")
source("/home/dniyukuri/lustre/age_mixing_large_AD/needed.functions.RSimpactHelp.R")
# sys.source("/home/dniyukuri/lustre/age_mixing_large_AD/needed.functions.RSimpactHelp.R", env = .GlobalEnv, keep.source = TRUE)
# Data list of infected individuals
# Select IDs in MCAR scenario
simpact.trans.net <- simpact.trans.net
limitTransmEvents <- limitTransmEvents
timewindow <- timewindow
seq.cov <- seq.cov
age.group.40.50 <- age.group.40.50
mAr.IDs <- IDs.Seq.Age.Groups(simpact.trans.net = simpact.trans.net,
limitTransmEvents = limitTransmEvents,
timewindow = timewindow,
seq.cov = seq.cov,
seq.gender.ratio = seq.gender.ratio,
age.group.15.25 = age.group.15.25,
age.group.25.40 = age.group.25.40,
age.group.40.50 = age.group.40.50)
if(length(mAr.IDs) >= 20){
simpact.trans.net.adv <- simpact.trans.net
# Transmission network table as from transmission networks for further steps
############################################################################
infectionTable <- vector("list", length(simpact.trans.net.adv))
for(j in 1:length(simpact.trans.net.adv)){
p <- j
trans.network.i <- as.data.frame(simpact.trans.net.adv[[p]])
# trans.network.i <- trans.network.i[-1,]
id.lab <- paste0(p,".",trans.network.i$id,".C")
trans.network.i$id.lab <- id.lab
trans.network.i$ageSampTimeRec <- trans.network.i$SampTime - trans.network.i$TOBRec
infectionTable[[p]] <- trans.network.i
}
infecttable <- rbindlist(infectionTable)
table.simpact.trans.net.adv <- infecttable # rbindlist(simpact.trans.net.adv)
Study.DataTable <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%mAr.IDs)
IDs.study <- Study.DataTable$RecId
transm.datalist.agemix <- datalist.agemix # assign full data set new age mix data set
# Transmission table of selected individuals
table.simpact.trans.net.cov <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%mAr.IDs)
# Person table of selected individuals
transm.datalist.agemix$ptable <- dplyr::filter(transm.datalist.agemix$ptable, transm.datalist.agemix$ptable$ID%in%IDs.study)
# (i) Age mixing in relationships
#
agemix.rels.transm.df <- agemix.df.maker(transm.datalist.agemix)
#
agemix.model <- pattern.modeller(dataframe = agemix.rels.transm.df,
agegroup = c(15, 50),
timepoint = 40, # transm.datalist.agemix$itable$population.simtime[1],
timewindow = 10)#1)#3)
#
# # men.lme <- tryCatch(agemixing.lme.fitter(data = dplyr::filter(agemix.model[[1]], Gender =="male")),
# # error = agemixing.lme.errFunction) # Returns an empty list if the lme model can't be fitted
#
# men.lmer <- ampmodel(data = dplyr::filter(agemix.model[[1]], Gender =="male"))
data = dplyr::filter(agemix.model[[1]], Gender =="male")
if( nrow(data) > length(unique(data$ID)) & length(unique(data$ID)) > 1 ){
men.lmer <- lmer(pagerelform ~ agerelform0 + (1 | ID),
data = dplyr::filter(agemix.model[[1]], Gender =="male"),
REML = TRUE,
control=lmerControl(check.nobs.vs.nlev = "ignore",
check.nobs.vs.rankZ = "ignore",
check.nobs.vs.nRE="ignore"))
bignumber <- NA # let's try if NA works (instead of 9999 for example)
AAD.male <- ifelse(length(men.lmer) > 0, mean(dplyr::filter(agemix.model[[1]], Gender =="male")$AgeGap), bignumber)
SDAD.male <- ifelse(length(men.lmer) > 0, sd(dplyr::filter(agemix.model[[1]], Gender =="male")$AgeGap), bignumber)
#powerm <- ifelse(length(men.lme) > 0, as.numeric(attributes(men.lme$apVar)$Pars["varStruct.power"]), bignumber)
slope.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$coefficients[2, 1], bignumber) #summary(men.lmer)$tTable[2, 1], bignumber)
WSD.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$sigma, bignumber) #WVAD.base <- ifelse(length(men.lme) > 0, men.lme$sigma^2, bignumber)
BSD.male <- ifelse(length(men.lmer) > 0, bvar(men.lmer), bignumber) # Bad name for the function because it actually extracts between subject standard deviation # BVAD <- ifelse(length(men.lmer) > 0, getVarCov(men.lme)[1,1], bignumber)
intercept.male <- ifelse(length(men.lmer) > 0, summary(men.lmer)$coefficients[1,1] - 15, bignumber)
# c(AAD.male, SDAD.male, slope.male, WSD.male, BSD.male, intercept.male)
## AAD: average age difference across all relationship
## VAD: variance of these age differences
## SDAD: standard deviation of age differences
## BSD: between-subject standard deviation of age differences
mix.rels.transm.dat <- c(AAD.male, SDAD.male, slope.male, WSD.male, BSD.male, intercept.male)
names(mix.rels.transm.dat) <- c("T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male")
}else{
mix.rels.transm.dat <- rep(NA, 6)
names(mix.rels.transm.dat) <- c("T.AAD.male", "T.SDAD.male", "T.slope.male", "T.WSD.male", "T.BSD.male", "T.intercept.male")
}
# age.scatter.df <- agemix.model[[1]]
# (ii) Point prevalence of concurrency in the adult population:
# Concurrency point prevalence 6 months before a survey, among men
pp.cp.6months.male.transm <- tryCatch(concurr.pointprev.calculator(datalist = transm.datalist.agemix,
timepoint = 40 - 0.5),
error=function(e) return(rep(NA, 1)))
#
# pp.cp.6months.male.transm <- tryCatch(concurr.pointprev.calculator(datalist = transm.datalist.agemix,
# timepoint = 40 - 0.5) %>%
# dplyr::select(concurr.pointprev) %>%
# dplyr::slice(1) %>%
# as.numeric(),
# error=function(e) return(rep(NA, 1)))
#
# pp.cp.6months.female.transm <- concurr.pointprev.calculator(datalist = transm.datalist.agemix,
# timepoint = 40 - 0.5) %>%
# dplyr::select(concurr.pointprev) %>%
# dplyr::slice(2) %>%
# as.numeric()
#
# (iii) Prevalence
hiv.prev.lt25.women <-prevalence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.lt25.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
hiv.prev.25.40.women <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.25.40.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
hiv.prev.40.50.women <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(2) %>%
as.numeric()
hiv.prev.40.50.men <- prevalence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timepoint = 40) %>%
dplyr::select(pointprevalence) %>%
dplyr::slice(1) %>%
as.numeric()
# (iv) Incidence
epi.transm.incidence.df.15.24.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.15.24.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(15, 25),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
epi.transm.incidence.df.25.39.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.25.39.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(25, 40),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
epi.transm.incidence.df.40.49.men <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(1) %>%
as.numeric()
epi.transm.incidence.df.40.49.women <- incidence.calculator(datalist = datalist.agemix,
agegroup = c(40, 50),
timewindow = timewindow,
only.active = "No") %>%
dplyr::select(incidence) %>%
dplyr::slice(2) %>%
as.numeric()
summary.epidemic.transm.df <- c(hiv.prev.lt25.women, hiv.prev.lt25.men,
hiv.prev.25.40.women, hiv.prev.25.40.men,
hiv.prev.40.50.women, hiv.prev.40.50.men,
mix.rels.transm.dat,
pp.cp.6months.male.transm, # pp.cp.6months.female.transm,
epi.transm.incidence.df.15.24.men, epi.transm.incidence.df.15.24.women,
epi.transm.incidence.df.25.39.men, epi.transm.incidence.df.25.39.women,
epi.transm.incidence.df.40.49.men, epi.transm.incidence.df.40.49.women)
names(summary.epidemic.transm.df) <- c("T.prev.15.25.w", "T.prev.15.25.m", "T.prev.25.40.w", "T.prev.25.40.m", "T.prev.40.50.w", "T.prev.40.50.m",
names(mix.rels.transm.dat),
"T.p.prev.6months.m", # "T.p.prev.6months.f",
"T.inc.15.25.m", "T.inc.15.25.w", "T.inc.25.40.m", "T.inc.25.40.w", "T.inc.40.50.m", "T.inc.40.50.w")
# Function to handle NAs
NA.handle.fun <- function(input=input){
v.names <- names(input)
v <- as.numeric(input)
v.vec <- vector()
for(i in 1:length(v)){
v.i <- v[i]
if(is.na(v.i)==TRUE){
v.j <- 0
}else{
v.j <- v.i
}
v.vec <- c(v.vec, v.j)
}
names(v.vec) <- v.names
return(v.vec)
}
######################################
# Step 5: Building phylogenetic tree #
######################################
dirfasttree <- work.dir
# Select sequences from the pool of alignment
##############################################
choose.sequence.ind(pool.seq.file = paste0(sub.dir.rename,"/C.Epidemic.fas"),
select.vec = mAr.IDs,
name.file = paste0(sub.dir.rename,"/",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta")))
# Build and calibrate the phylogenetic tree
############################################
mAr.IDs.tree.calib <- phylogenetic.tree.fasttree.par(dir.tree = dirfasttree,
sub.dir.rename = sub.dir.rename,
fasttree.tool = "FastTreeMP",
calendar.dates = "samplingtimes.all.csv",
simseqfile = paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),
count.start = 1977,
endsim = 40,
clust = TRUE)
N <- node.age(mAr.IDs.tree.calib)
# Time to MRCA: internal nodes ages
int.node.age <- N$Ti
latest.samp <- N$timeToMRCA+N$timeOfMRCA # latest sampling date
mrca.v <- mrca(mAr.IDs.tree.calib, full = FALSE) # MRCA ids
sampling.dates <- read.csv(paste0(sub.dir.rename,"/samplingtimes.all.csv")) # sampling times
#
# tree.cal.cov.35.IDs <- read.tree(paste0(sub.dir.rename, paste0("/calibrated.tree.cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta.tree")))
#
# Compute transmission clusters
###############################
# run ClusterPicker
system(paste("java -jar ", paste(paste0(work.dir,"/ClusterPicker_1.2.3.jar"), paste0(sub.dir.rename,"/", paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta")), paste0(sub.dir.rename,"/",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta.nwk")), paste0("0.9 0.9 0.045 2 gap"))))
# Read clusters' files
dd <- list.files(path = paste0(sub.dir.rename), pattern = paste0(paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),"_",paste0("cov.",seq.cov, ".mAr.IDs.C.Epidemic.Fasta"),"_","clusterPicks_cluste"),
all.files = FALSE,
full.names = FALSE, recursive = FALSE)
# Transmission clusters.
d <- clust.names <- dd
data.list.simpact.trans.net.adv <- vector("list", length(d)) # list() # initialise gender and age-structured data table of pairings in each transission cluster
# Transmission table of individuals in the transmission clusters
#################################################################
# Binding all data tables of clusters as these information are captured in transmission networks
clust.size <- vector() # size of each cluster # table.simpact.trans.net.adv
transm.df <- table.simpact.trans.net.adv
for (i in 1:length(d)) {
transm.df.cl.dat <- NULL
clus.read <- read.table(file = paste0(paste0(sub.dir.rename,"/"),d[i]), header = FALSE) # Ids of each cluster
clust.size <- c(clust.size, nrow(clus.read))
data.table.simpact.trans.net.i <- subset(transm.df, transm.df$id.lab%in%as.character(clus.read$V1)) # transmission data table of IDs of that cluster
data.table.simpact.trans.net.i$clust.ID <- rep(i, nrow(data.table.simpact.trans.net.i))
data.list.simpact.trans.net.adv[[i]] <- as.data.frame(data.table.simpact.trans.net.i)
}
data.table.simpact.trans.clusts.net.adv <- as.data.frame(do.call(rbind, data.list.simpact.trans.net.adv)) # data.table & data.frame
data.table.simpact.trans.clusts.net.adv <- data.table.simpact.trans.clusts.net.adv[!duplicated(data.table.simpact.trans.clusts.net.adv[c("id","id.lab")]),] # remove duplicate id.lab
# t may happen that one seq.ID appear in more than one cluster
# data.table.simpact.trans.clusts.net.adv <- data.table.simpact.trans.net.adv
## Aligning internal nodes IDs and their age: !they must get same length
ancestor <- Ancestors(mAr.IDs.tree.calib) # ancestors of each tips and internal node
# All ancestors output are internal nodes
ancestor.v <- vector()
for(i in 1:length(ancestor)){
k <- ancestor[[i]]
ancestor.v <- c(ancestor.v, unique(k))
}
sort.int.ancestor <- unique(sort(ancestor.v))
sort.int.node.age <- sort(int.node.age)
tip.names <- names(mrca.v[1,])
dates.tree.df <- dplyr::filter(sampling.dates, sampling.dates$V1%in%tip.names) # dates of these tips
# rearrange dates in tips order as are displayed on the tree
tip.names.f <- vector()
dates.tree.dat <- vector()
for(i in 1:nrow(dates.tree.df)){
for(j in 1:length(tip.names)){
if(tip.names[i] == dates.tree.df$V1[[j]]){
tip.names.f <- c(tip.names.f, tip.names[i])
dates.tree.dat <- c(dates.tree.dat, 1977+40-dates.tree.df$V2[[j]])
}
}
}
dates.tree.named <- dates.tree.dat
names(dates.tree.named) <- tip.names.f
# MRCA matrix
#############
# make mrca matrix diagonal 0 and other elements (internal nodes IDs) assign them the age of mrca
mrca.v.age <- mrca.v
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i==j){
mrca.v.age[i,j] <- 0
}else{
if(mrca.v[i,j] %in% sort.int.ancestor){
p.index <- which(sort.int.ancestor == mrca.v[i,j])
mrca.v.age[i,j] <- sort.int.node.age[p.index]
}
}
}
}
# make mrca matrix elements: sampling date - age of mrca
# Fist contingency matrix
mrca.v.age.samp <- mrca.v.age
mrca.v.age.samp.cont1 <- mrca.v.age.samp
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i!=j){
i.dat <- tip.names.f[i]
v.index <- which(tip.names.f == i.dat)
samp.date.tip <- dates.tree.dat[v.index]
mrca.v.age.samp.cont1[i,] <- samp.date.tip - mrca.v.age.samp[i,]
}
}
}
# Second contingency matrix
mrca.v.age.samp <- mrca.v.age
mrca.v.age.samp.cont2 <- mrca.v.age.samp
for(i in 1:nrow(mrca.v.age)){
for(j in 1:nrow(mrca.v.age)){
if(i!=j){
i.dat <- tip.names.f[i]
v.index <- which(tip.names.f == i.dat)
samp.date.tip <- dates.tree.dat[v.index]
mrca.v.age.samp.cont2[,i] <- samp.date.tip - mrca.v.age.samp[,i]
}
}
}
# Diagonal zero for mrca.v.age.samp.cont1 and mrca.v.age.samp.cont2
for(i in 1:nrow(mrca.v.age.samp.cont1)){
for(j in 1:nrow(mrca.v.age.samp.cont1)){
if(i==j){
mrca.v.age.samp.cont1[i,j] <- 0
}
}
}
for(i in 1:nrow(mrca.v.age.samp.cont2)){
for(j in 1:nrow(mrca.v.age.samp.cont2)){
if(i==j){
mrca.v.age.samp.cont2[i,j] <- 0
}
}
}
# filter table.simpact.trans.net.adv and remain with table of tips names (individulas in the tree)
attributes.table.simpact.trans.net.adv <- dplyr::filter(table.simpact.trans.net.adv, table.simpact.trans.net.adv$id.lab%in%tip.names)
V.gender <- vector()
V.cd4 <- vector()
V.vl <- vector()
V.x <- vector()
V.y <- vector()
iD <- vector()
for(i in 1:length(tip.names)){
for(j in 1:nrow(attributes.table.simpact.trans.net.adv)){
if(tip.names[i] == attributes.table.simpact.trans.net.adv$id.lab[j]){
V.gender <- c(V.gender, attributes.table.simpact.trans.net.adv$GenderRec[j])
V.cd4 <- c(V.cd4, attributes.table.simpact.trans.net.adv$cd4[j])
V.vl <- c(V.vl, attributes.table.simpact.trans.net.adv$vl[j])
V.x <- c(V.x, attributes.table.simpact.trans.net.adv$location.x[j])
V.y <- c(V.y, attributes.table.simpact.trans.net.adv$location.y[j])
iD <- c(iD, tip.names[i])
}
}
}
Node.gender.cd4.vl.x.y <- data.table(V.gender,V.cd4, V.vl, V.x, V.y, iD)
# Adding clusters ID on the previous attributes table from attributes.table.simpact.trans.net.adv
clust.ID.vec <- vector()
id.vec <- vector()
for(k in 1:nrow(Node.gender.cd4.vl.x.y)){ # attributes table ofr all tips on the tree: Node.gender.cd4.vl.x.y
id <- Node.gender.cd4.vl.x.y$iD[k]
if(id%in%data.table.simpact.trans.clusts.net.adv$id.lab){ # ID of tree which belongs to IDs of clusters
# transmission table of individuls in the transmission clusters: data.table.simpact.trans.net.adv
id.index <- which(data.table.simpact.trans.clusts.net.adv$id.lab == id)
clust.ID.vec.i <- data.table.simpact.trans.clusts.net.adv$clust.ID[id.index]
}else{
clust.ID.vec.i <- 0 # tip ID which is not in any transmission cluster is assigned value 0
}
clust.ID.vec <- c(clust.ID.vec, clust.ID.vec.i)
id.vec <- c(id.vec, id)
}
Node.gender.cd4.vl.x.y$clust.ID <- clust.ID.vec
Node.gender.cd4.vl.x.y.clusID <- Node.gender.cd4.vl.x.y # attributes table with clusters' IDs
## Building transmission network
# 1. consider contigency matrix 2
mrca.times.final <- as.matrix(abs(mrca.v.age.samp.cont2))
net <- graph.adjacency(as.matrix(mrca.times.final), mode="undirected",weighted=T,diag=FALSE)
# E(net) # The edges of the "net" object
#
# V(net) # The vertices of the "net" object
V(net)$gender <- Node.gender.cd4.vl.x.y$V.gender
V(net)$cd4 <- Node.gender.cd4.vl.x.y$V.cd4
V(net)$vl <- Node.gender.cd4.vl.x.y$V.vl
V(net)$loc.x <- Node.gender.cd4.vl.x.y$V.x
V(net)$loc.y <- Node.gender.cd4.vl.x.y$V.y
## Filtering the network by breaking some edges due to conditions from individuals attributes:
##############################################################################################
# 1. Gender, 2. cluster belonging, 3. geographical location, 4. CD4, and 5. Viral load
# Now considering 1 and 2
names.attributes.ngaha <- Node.gender.cd4.vl.x.y
names.matrix.contigency <- names(mrca.times.final[1,])
gender.l <- names.attributes.ngaha$V.gender
clusters.zose <- Node.gender.cd4.vl.x.y$clust.ID
mrca.times.filter <- mrca.times.final
#
# for (i in 1:length(names(mrca.times.final[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final[1,]) == name.col.i)
#
# gender.i <- gender.l[index.i]
#
# cluster.i <- clusters.zose[index.i]
#
#
# for(j in 1:length(names(mrca.times.final[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final[1,]) == name.col.j)
#
# gender.j <- gender.l[index.j]
#
# cluster.j <- clusters.zose[index.j]
#
#
# if(gender.i == gender.j){ # if same gender break the link
#
# mrca.times.filter[i,j] <- 0
#
# }
#
# if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
#
# mrca.times.filter[i,j] <- 0
#
# }
#
#
# }
#
# }
#
# }
# i. Gender
############
for (i in 1:length(names(mrca.times.final[1,]))) {
name.col.i <- names.matrix.contigency[i]
index.i <- which(names(mrca.times.final[1,]) == name.col.i)
gender.i <- gender.l[index.i]
for(j in 1:length(names(mrca.times.final[1,]))){
if(i != j){
name.col.j <- names.matrix.contigency[j]
index.j <- which(names(mrca.times.final[1,]) == name.col.j)
gender.j <- gender.l[index.j]
if(gender.i == gender.j){ # if same gender break the link
mrca.times.filter[i,j] <- 0
}
}
}
}
mrca.times.filter.gender <- mrca.times.filter
# ii. Cluster
#############
mrca.times.filter.gender.clust <- mrca.times.filter.gender
for (i in 1:length(names(mrca.times.final[1,]))) {
name.col.i <- names.matrix.contigency[i]
index.i <- which(names(mrca.times.final[1,]) == name.col.i)
cluster.i <- clusters.zose[index.i]
for(j in 1:length(names(mrca.times.final[1,]))){
if(i != j){
name.col.j <- names.matrix.contigency[j]
index.j <- which(names(mrca.times.final[1,]) == name.col.j)
cluster.j <- clusters.zose[index.j]
if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
mrca.times.filter.gender.clust[i,j] <- 0
}
}
}
}
# iii. tMRCA
#############
net.cont.1 <- graph.adjacency(as.matrix(mrca.times.filter.gender.clust),mode="undirected",weighted=T,diag=FALSE)
# Consider plausible transmissions and difference between sampling time and tMRCA
cut.off <- cut.off # years
# E(net.cont.1)$weight
net.cont.1 <- delete_edges(net.cont.1, E(net.cont.1)[weight>=cut.off]) # remove link greater to the cuttoff
# E(net.cont.1)$weight
# plot(net.cont.1, layout=layout_with_kk)
# Delete tips of the phylogenetic tree which are not part of transmission clusters: they have clust.ID==0 >> deletes vertices
###################################################################################
Non.ids.dat <- dplyr::filter(Node.gender.cd4.vl.x.y, Node.gender.cd4.vl.x.y$clust.ID==0)
Non.ids <- Non.ids.dat$iD
net.cleaned <- delete_vertices(net.cont.1, Non.ids)
#
# # 2. consider contigency matrix 1
#
# mrca.times.final.2 <- as.matrix(abs(mrca.v.age.samp.cont1))
#
#
# net.2 <- graph.adjacency(as.matrix(mrca.times.final.2), mode="undirected",weighted=T,diag=FALSE)
#
# E(net.2) # The edges of the "net.2" object
#
# V(net.2) # The vertices of the "net.2" object
#
# V(net.2)$gender <- Node.gender.cd4.vl.x.y$V.gender
# V(net.2)$cd4 <- Node.gender.cd4.vl.x.y$V.cd4
# V(net.2)$vl <- Node.gender.cd4.vl.x.y$V.vl
# V(net.2)$loc.x <- Node.gender.cd4.vl.x.y$V.x
# V(net.2)$loc.y <- Node.gender.cd4.vl.x.y$V.y
#
#
#
#
# ## Filtering the net.2work by breaking some edges due to conditions from individuals attributes:
#
# # 1. Gender, 2. cluster belonging, 3. geographical location, 4. CD4, and 5. Viral load
#
#
# names.attributes.ngaha <- Node.gender.cd4.vl.x.y
#
# names.matrix.contigency <- names(mrca.times.final.2[1,])
#
# gender.l <- names.attributes.ngaha$V.gender
#
#
# clusters.zose <- Node.gender.cd4.vl.x.y$clust.ID
#
#
# mrca.times.filter.2 <- mrca.times.final.2
#
#
# #
# # for (i in 1:length(names(mrca.times.final.2[1,]))) {
# #
# # name.col.i <- names.matrix.contigency[i]
# #
# # index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
# #
# # gender.i <- gender.l[index.i]
# #
# # cluster.i <- clusters.zose[index.i]
# #
# #
# # for(j in 1:length(names(mrca.times.final.2[1,]))){
# #
# # if(i != j){
# #
# # name.col.j <- names.matrix.contigency[j]
# #
# # index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
# #
# # gender.j <- gender.l[index.j]
# #
# # cluster.j <- clusters.zose[index.j]
# #
# #
# # if(gender.i == gender.j){ # if same gender break the link
# #
# # mrca.times.filter.2[i,j] <- 0
# #
# # }
# #
# # if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
# #
# # mrca.times.filter.2[i,j] <- 0
# #
# # }
# #
# #
# # }
# #
# # }
# #
# # }
#
# # i. Gender
#
# for (i in 1:length(names(mrca.times.final.2[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
#
# gender.i <- gender.l[index.i]
#
# for(j in 1:length(names(mrca.times.final.2[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
#
# gender.j <- gender.l[index.j]
#
# if(gender.i == gender.j){ # if same gender break the link
#
# mrca.times.filter.2[i,j] <- 0
#
# }
#
# }
#
# }
#
# }
#
# mrca.times.filter.2.gender <- mrca.times.filter.2
#
#
# # ii. Cluster
#
# mrca.times.filter.2.gender.clust <- mrca.times.filter.2.gender
#
# for (i in 1:length(names(mrca.times.final.2[1,]))) {
#
# name.col.i <- names.matrix.contigency[i]
#
# index.i <- which(names(mrca.times.final.2[1,]) == name.col.i)
#
# cluster.i <- clusters.zose[index.i]
#
#
# for(j in 1:length(names(mrca.times.final.2[1,]))){
#
# if(i != j){
#
# name.col.j <- names.matrix.contigency[j]
#
# index.j <- which(names(mrca.times.final.2[1,]) == name.col.j)
#
# cluster.j <- clusters.zose[index.j]
#
#
# if(cluster.i != 0 & cluster.j != 0 & cluster.i != cluster.j){
#
# mrca.times.filter.2.gender.clust[i,j] <- 0
#
# }
#
#
# }
#
# }
#
# }
#
#
#
# net.2.cont.1 <- graph.adjacency(as.matrix(mrca.times.filter.2.gender.clust),mode="undirected",weighted=T,diag=FALSE)
#
#
# # Consider plausible transmissions and difference between sampling time and tMRCA
#
#
# cut.off <- 20
#
# E(net.2.cont.1)$weight
#
# net.2.cont.1 <- delete_edges(net.2.cont.1, E(net.2.cont.1)[weight>=cut.off]) # remove link greater to the cuttoff
#
# E(net.2.cont.1)$weight
#
# plot(net.2.cont.1, layout=layout_with_kk)
#
#
# # Delete tips which are not part of transmission clusters, they have clust.ID==0 >> deletes vertices
#
# Non.ids.dat <- dplyr::filter(Node.gender.cd4.vl.x.y, Node.gender.cd4.vl.x.y$clust.ID==0)
# Non.ids <- Non.ids.dat$iD
#
# net.2.cleaned <- delete_vertices(net.2.cont.1, Non.ids)
# r=graph.union(net.cleaned, net.2.cleaned)
# Age structure in the transmission network built from phylogenetic tree
#########################################################################
# produce age table
net.sp <- net.cleaned
transm.matrix <- as.data.table(get.edgelist(net.sp)) # matrix of links of the transmission network built from phylogenetic tree
# table.simpact.trans.net.adv
# reduced transmission table: table.simpact.trans.net.adv of ids in transmission clusters
ids <- unique(c(transm.matrix$V1, transm.matrix$V2))
table.transm.clust.net.igraph <- dplyr::filter(data.table.simpact.trans.clusts.net.adv, data.table.simpact.trans.clusts.net.adv$id.lab%in%ids)
# 1.
# Age structure in transmission clusters as observed from phylogenetic tree #
##################################################################################
age.groups.filtered.trans.clust.network.fun <- function(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
transm.matrix = transm.matrix,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
Age.groups.table <- NULL
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
age.diff <- vector()
for(i in 1:nrow(transm.matrix)){
v1 <- transm.matrix$V1[i]
v2 <- transm.matrix$V2[i]
index.v1 <- which(table.transm.clust.net.igraph$id.lab == v1)
index.v2 <- which(table.transm.clust.net.igraph$id.lab == v2)
age1 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v1]
age2 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v2]
gender1 <- table.transm.clust.net.igraph$GenderRec[index.v1]
gender2 <- table.transm.clust.net.igraph$GenderRec[index.v2]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
ad <- abs(age1 - age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
age.diff <- c(age.diff, ad)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat, age.diff)
# men
men.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==0)
men.age.15.25 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.15.25[1] & men.age.table.1$age1.dat < age.group.15.25[2])
men.age.25.40 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.25.40[1] & men.age.table.1$age1.dat < age.group.25.40[2])
men.age.40.50 <- dplyr::filter(men.age.table.1, men.age.table.1$age1.dat >= age.group.40.50[1] & men.age.table.1$age1.dat < age.group.40.50[2])
# women
women.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==1)
women.age.15.25 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.15.25[1] & women.age.table.1$age1.dat < age.group.15.25[2])
women.age.25.40 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.25.40[1] & women.age.table.1$age1.dat < age.group.25.40[2])
women.age.40.50 <- dplyr::filter(women.age.table.1, women.age.table.1$age1.dat >= age.group.40.50[1] & women.age.table.1$age1.dat < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(women.age.15.25)
numbers.indiv.men.15.25 <- nrow(men.age.15.25)
numbers.indiv.women.25.40 <- nrow(women.age.25.40)
numbers.indiv.men.25.40 <- nrow(men.age.25.40)
numbers.indiv.women.40.50 <- nrow(women.age.40.50)
numbers.indiv.men.40.50 <- nrow(men.age.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.cl.15.25", "num.men.cl.15.25",
"num.women.cl.25.40", "num.men.cl.25.40",
"num.women.cl.40.50", "num.men.cl.40.50")
# Age differences
AD.num.women.15.25 <- women.age.15.25$age.diff
AD.num.men.15.25 <- men.age.15.25$age.diff
AD.num.women.25.40 <- women.age.25.40$age.diff
AD.num.men.25.40 <- men.age.25.40$age.diff
AD.num.women.40.50 <- women.age.40.50$age.diff
AD.num.men.40.50 <- men.age.40.50$age.diff
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.cl.15.25", "mean.AD.num.men.cl.15.25",
"mean.AD.num.women.cl.25.40", "mean.AD.num.men.cl.25.40",
"mean.AD.num.women.cl.40.50", "mean.AD.num.men.cl.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.cl.15.25", "med.AD.num.men.cl.15.25",
"med.AD.num.women.cl.25.40", "med.AD.num.men.cl.25.40",
"med.AD.num.women.cl.40.50", "med.AD.num.men.cl.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.cl.15.25", "sd.AD.num.men.cl.15.25",
"sd.AD.num.women.cl.25.40", "sd.AD.num.men.cl.25.40",
"sd.AD.num.women.cl.40.50", "sd.AD.num.men.cl.40.50")
# Number os pairings
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1)>1){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1)>1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# age structured pairings in both gender without directionality
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50)) # number of pairings of men between 15 and 24
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50)) # number of pairings of men between 25 and 39
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)) # number of pairings of men between 40 and 49
# proportion of men in different age groups
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25)) # number of pairings of men between 15 and 24
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40)) # number of pairings of men between 25 and 39
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50)) # number of pairings of men between 40 and 49
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 1. Results: age structure table obtained from transmission network built from transmission clusters
age.structure.transm.clust.List <- age.groups.filtered.trans.clust.network.fun(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
transm.matrix = transm.matrix,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
age.structure.transm.clust <- age.structure.transm.clust.List$Age.groups.table
cl.age.str.M.15.25.F.15.25 <- age.structure.transm.clust[1,][1]
cl.age.str.M.25.40.F.15.25 <- age.structure.transm.clust[2,][1]
cl.age.str.M.40.50.F.15.25 <- age.structure.transm.clust[3,][1]
cl.age.str.M.15.25.F.25.40 <- age.structure.transm.clust[1,][2]
cl.age.str.M.25.40.F.25.40 <- age.structure.transm.clust[2,][2]
cl.age.str.M.40.50.F.25.40 <- age.structure.transm.clust[3,][2]
cl.age.str.M.15.25.F.40.50 <- age.structure.transm.clust[1,][3]
cl.age.str.M.25.40.F.40.50 <- age.structure.transm.clust[2,][3]
cl.age.str.M.40.50.F.40.50 <- age.structure.transm.clust[3,][3]
table.cl.age.str <- c(cl.age.str.M.15.25.F.15.25, cl.age.str.M.25.40.F.15.25, cl.age.str.M.40.50.F.15.25,
cl.age.str.M.15.25.F.25.40, cl.age.str.M.25.40.F.25.40, cl.age.str.M.40.50.F.25.40,
cl.age.str.M.15.25.F.40.50, cl.age.str.M.25.40.F.40.50, cl.age.str.M.40.50.F.40.50)
names(table.cl.age.str) <- c("cl.M.15.25.F.15.25", "cl.M.25.40.F.15.25", "cl.M.40.50.F.15.25",
"cl.M.15.25.F.25.40", "cl.M.25.40.F.25.40", "cl.M.40.50.F.25.40",
"cl.M.15.25.F.40.50", "cl.M.25.40.F.40.50", "cl.M.40.50.F.40.50")
# Men prop
age.structure.transm.clust.prop.men <- age.structure.transm.clust.List$prop.men.age.groups.table
cl.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.clust.prop.men[1,][1]
cl.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.clust.prop.men[2,][1]
cl.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.clust.prop.men[3,][1]
cl.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.clust.prop.men[1,][2]
cl.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.clust.prop.men[2,][2]
cl.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.clust.prop.men[3,][2]
cl.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.clust.prop.men[1,][3]
cl.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.clust.prop.men[2,][3]
cl.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.clust.prop.men[3,][3]
table.cl.age.str.prop.men <- c(cl.age.str.prop.men.15.25.F.15.25, cl.age.str.prop.men.25.40.F.15.25, cl.age.str.prop.men.40.50.F.15.25,
cl.age.str.prop.men.15.25.F.25.40, cl.age.str.prop.men.25.40.F.25.40, cl.age.str.prop.men.40.50.F.25.40,
cl.age.str.prop.men.15.25.F.40.50, cl.age.str.prop.men.25.40.F.40.50, cl.age.str.prop.men.40.50.F.40.50)
names(table.cl.age.str.prop.men) <- c("cl.prop.men15.25.F.15.25", "cl.prop.men25.40.F.15.25", "cl.prop.men40.50.F.15.25",
"cl.prop.men15.25.F.25.40", "cl.prop.men25.40.F.25.40", "cl.prop.men40.50.F.25.40",
"cl.prop.men15.25.F.40.50", "cl.prop.men25.40.F.40.50", "cl.prop.men40.50.F.40.50")
table.cl.age.str.prop.men <- NA.handle.fun(input = table.cl.age.str.prop.men)
# Women prop
age.structure.transm.clust.prop.women <- age.structure.transm.clust.List$prop.women.age.groups.table
cl.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.prop.women[1,][1]
cl.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.prop.women[2,][1]
cl.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.prop.women[3,][1]
cl.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.prop.women[1,][2]
cl.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.prop.women[2,][2]
cl.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.prop.women[3,][2]
cl.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.prop.women[1,][3]
cl.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.prop.women[2,][3]
cl.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.prop.women[3,][3]
table.cl.age.str.prop.women <- c(cl.age.str.prop.women.15.25.M.15.25, cl.age.str.prop.women.25.40.M.15.25, cl.age.str.prop.women.40.50.M.15.25,
cl.age.str.prop.women.15.25.M.25.40, cl.age.str.prop.women.25.40.M.25.40, cl.age.str.prop.women.40.50.M.25.40,
cl.age.str.prop.women.15.25.M.40.50, cl.age.str.prop.women.25.40.M.40.50, cl.age.str.prop.women.40.50.M.40.50)
names(table.cl.age.str.prop.women) <- c("cl.prop.women15.25.M.15.25", "cl.prop.women25.40.M.15.25", "cl.prop.women40.50.M.15.25",
"cl.prop.women15.25.M.25.40", "cl.prop.women25.40.M.25.40", "cl.prop.women40.50.M.25.40",
"cl.prop.women15.25.M.40.50", "cl.prop.women25.40.M.40.50", "cl.prop.women40.50.M.40.50")
table.cl.age.str.prop.women <- NA.handle.fun(input = table.cl.age.str.prop.women)
#
numbers.individuals.age.groups.cl <- age.structure.transm.clust.List$numbers.individuals.age.groups
mean.AD.age.groups.cl <- age.structure.transm.clust.List$mean.AD.age.groups
med.AD.age.groups.cl <- age.structure.transm.clust.List$med.AD.age.groups
sd.AD.age.groups.cl <- age.structure.transm.clust.List$sd.AD.age.groups
res1 <- c(table.cl.age.str, table.cl.age.str.prop.men, table.cl.age.str.prop.women,
numbers.individuals.age.groups.cl,
mean.AD.age.groups.cl, med.AD.age.groups.cl,
sd.AD.age.groups.cl)
# 2.
# True age structure in transmission clusters as observed from transmission network #
#####################################################################################
age.groups.filtered.transmission.clust.fun <- function(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
num.women.15.25 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.15.25[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.15.25[2])
num.men.15.25 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.15.25[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.15.25[2])
num.women.25.40 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.25.40[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.25.40[2])
num.men.25.40 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.25.40[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.25.40[2])
num.women.40.50 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="1" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.40.50[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.40.50[2])
num.men.40.50 <- dplyr::filter(table.transm.clust.net.igraph,
table.transm.clust.net.igraph$GenderRec=="0" & table.transm.clust.net.igraph$ageSampTimeRec >= age.group.40.50[1] & table.transm.clust.net.igraph$ageSampTimeRec < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(num.women.15.25)
numbers.indiv.men.15.25 <- nrow(num.men.15.25)
numbers.indiv.women.25.40 <- nrow(num.women.25.40)
numbers.indiv.men.25.40 <- nrow(num.men.25.40)
numbers.indiv.women.40.50 <- nrow(num.women.40.50)
numbers.indiv.men.40.50 <- nrow(num.men.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.true.cl.15.25", "num.men.true.cl.15.25",
"num.women.true.cl.25.40", "num.men.true.cl.25.40",
"num.women.true.cl.40.50", "num.men.true.cl.40.50")
num.women.15.25$ageSampTimeDon <- num.women.15.25$SampTime - num.women.15.25$TOBDon
num.men.15.25$ageSampTimeDon <- num.men.15.25$SampTime - num.men.15.25$TOBDon
num.women.25.40$ageSampTimeDon <- num.women.25.40$SampTime - num.women.25.40$TOBDon
num.men.25.40$ageSampTimeDon <- num.men.25.40$SampTime - num.men.25.40$TOBDon
num.women.40.50$ageSampTimeDon <- num.women.40.50$SampTime - num.women.40.50$TOBDon
num.men.40.50$ageSampTimeDon <- num.men.40.50$SampTime - num.men.40.50$TOBDon
# Age differences
AD.num.women.15.25 <- abs(num.women.15.25$ageSampTimeDon - num.women.15.25$ageSampTimeRec)
AD.num.men.15.25 <- abs(num.men.15.25$ageSampTimeDon - num.men.15.25$ageSampTimeRec)
AD.num.women.25.40 <- abs(num.women.25.40$ageSampTimeDon - num.women.25.40$ageSampTimeRec)
AD.num.men.25.40 <- abs(num.men.25.40$ageSampTimeDon - num.men.25.40$ageSampTimeRec)
AD.num.women.40.50 <- abs(num.women.40.50$ageSampTimeDon - num.women.40.50$ageSampTimeRec)
AD.num.men.40.50 <- abs(num.men.40.50$ageSampTimeDon - num.men.40.50$ageSampTimeRec)
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.true.cl.15.25", "mean.AD.num.men.true.cl.15.25",
"mean.AD.num.women.true.cl.25.40", "mean.AD.num.men.true.cl.25.40",
"mean.AD.num.women.true.cl.40.50", "mean.AD.num.men.true.cl.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.true.cl.15.25", "med.AD.num.men.true.cl.15.25",
"med.AD.num.women.true.cl.25.40", "med.AD.num.men.true.cl.25.40",
"med.AD.num.women.true.cl.40.50", "med.AD.num.men.true.cl.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.true.cl.15.25", "sd.AD.num.men.true.cl.15.25",
"sd.AD.num.women.true.cl.25.40", "sd.AD.num.men.true.cl.25.40",
"sd.AD.num.women.true.cl.40.50", "sd.AD.num.men.true.cl.40.50")
table.transm.clust.net.igraph$ageSampTimeDon <- table.transm.clust.net.igraph$SampTime - table.transm.clust.net.igraph$TOBDon
Age.groups.table <- NULL
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
for(i in 1:nrow(table.transm.clust.net.igraph)){
v1 <- table.transm.clust.net.igraph$RecId[i]
v2 <- table.transm.clust.net.igraph$DonId[i]
index.v1 <- which(table.transm.clust.net.igraph$RecId == v1)
age1 <- table.transm.clust.net.igraph$ageSampTimeRec[index.v1]
age2 <- table.transm.clust.net.igraph$ageSampTimeDon[index.v1]
gender1 <- table.transm.clust.net.igraph$GenderRec[index.v1]
gender2 <- table.transm.clust.net.igraph$GenderDon[index.v1]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat)
# men
men.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==0)
# women
women.age.table.1 <- dplyr::filter(age.table, age.table$gender1.dat==1)
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) > 1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50))
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50))
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50))
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25))
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40))
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50))
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 2. Results: true age structure table from transmission network of these individuals in transmission clusters
age.structure.transm.clus.true.List <- age.groups.filtered.transmission.clust.fun(table.transm.clust.net.igraph = table.transm.clust.net.igraph,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
age.structure.transm.clust.true <- age.structure.transm.clus.true.List$Age.groups.table
cl.true.age.str.M.15.25.F.15.25 <- age.structure.transm.clust.true[1,][1]
cl.true.age.str.M.25.40.F.15.25 <- age.structure.transm.clust.true[2,][1]
cl.true.age.str.M.40.50.F.15.25 <- age.structure.transm.clust.true[3,][1]
cl.true.age.str.M.15.25.F.25.40 <- age.structure.transm.clust.true[1,][2]
cl.true.age.str.M.25.40.F.25.40 <- age.structure.transm.clust.true[2,][2]
cl.true.age.str.M.40.50.F.25.40 <- age.structure.transm.clust.true[3,][2]
cl.true.age.str.M.15.25.F.40.50 <- age.structure.transm.clust.true[1,][3]
cl.true.age.str.M.25.40.F.40.50 <- age.structure.transm.clust.true[2,][3]
cl.true.age.str.M.40.50.F.40.50 <- age.structure.transm.clust.true[3,][3]
table.cl.true.age.str <- c(cl.true.age.str.M.15.25.F.15.25, cl.true.age.str.M.25.40.F.15.25, cl.true.age.str.M.40.50.F.15.25,
cl.true.age.str.M.15.25.F.25.40, cl.true.age.str.M.25.40.F.25.40, cl.true.age.str.M.40.50.F.25.40,
cl.true.age.str.M.15.25.F.40.50, cl.true.age.str.M.25.40.F.40.50, cl.true.age.str.M.40.50.F.40.50)
names(table.cl.true.age.str) <- c("cl.true.M.15.25.F.15.25", "cl.true.M.25.40.F.15.25", "cl.true.M.40.50.F.15.25",
"cl.true.M.15.25.F.25.40", "cl.true.M.25.40.F.25.40", "cl.true.M.40.50.F.25.40",
"cl.true.M.15.25.F.40.50", "cl.true.M.25.40.F.40.50", "cl.true.M.40.50.F.40.50")
# Men prop
age.structure.transm.clus.true.prop.men <- age.structure.transm.clus.true.List$prop.men.age.groups.table
cl.true.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.clus.true.prop.men[1,][1]
cl.true.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.clus.true.prop.men[2,][1]
cl.true.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.clus.true.prop.men[3,][1]
cl.true.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.clus.true.prop.men[1,][2]
cl.true.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.clus.true.prop.men[2,][2]
cl.true.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.clus.true.prop.men[3,][2]
cl.true.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.clus.true.prop.men[1,][3]
cl.true.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.clus.true.prop.men[2,][3]
cl.true.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.clus.true.prop.men[3,][3]
table.cl.true.age.str.prop.men <- c(cl.true.age.str.prop.men.15.25.F.15.25, cl.true.age.str.prop.men.25.40.F.15.25, cl.true.age.str.prop.men.40.50.F.15.25,
cl.true.age.str.prop.men.15.25.F.25.40, cl.true.age.str.prop.men.25.40.F.25.40, cl.true.age.str.prop.men.40.50.F.25.40,
cl.true.age.str.prop.men.15.25.F.40.50, cl.true.age.str.prop.men.25.40.F.40.50, cl.true.age.str.prop.men.40.50.F.40.50)
names(table.cl.true.age.str.prop.men) <- c("cl.true.prop.men15.25.F.15.25", "cl.true.prop.men25.40.F.15.25", "cl.true.prop.men40.50.F.15.25",
"cl.true.prop.men15.25.F.25.40", "cl.true.prop.men25.40.F.25.40", "cl.true.prop.men40.50.F.25.40",
"cl.true.prop.men15.25.F.40.50", "cl.true.prop.men25.40.F.40.50", "cl.true.prop.men40.50.F.40.50")
table.cl.true.age.str.prop.men <- NA.handle.fun(input = table.cl.true.age.str.prop.men)
# Women prop
age.structure.transm.clust.true.prop.women <- age.structure.transm.clust.List$prop.women.age.groups.table
cl.true.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.true.prop.women[1,][1]
cl.true.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.true.prop.women[2,][1]
cl.true.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.true.prop.women[3,][1]
cl.true.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.true.prop.women[1,][2]
cl.true.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.true.prop.women[2,][2]
cl.true.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.true.prop.women[3,][2]
cl.true.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.true.prop.women[1,][3]
cl.true.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.true.prop.women[2,][3]
cl.true.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.true.prop.women[3,][3]
table.cl.true.age.str.prop.women <- c(cl.true.age.str.prop.women.15.25.M.15.25, cl.true.age.str.prop.women.25.40.M.15.25, cl.true.age.str.prop.women.40.50.M.15.25,
cl.true.age.str.prop.women.15.25.M.25.40, cl.true.age.str.prop.women.25.40.M.25.40, cl.true.age.str.prop.women.40.50.M.25.40,
cl.true.age.str.prop.women.15.25.M.40.50, cl.true.age.str.prop.women.25.40.M.40.50, cl.true.age.str.prop.women.40.50.M.40.50)
names(table.cl.true.age.str.prop.women) <- c("cl.true.prop.women15.25.M.15.25", "cl.true.prop.women25.40.M.15.25", "cl.true.prop.women40.50.M.15.25",
"cl.true.prop.women15.25.M.25.40", "cl.true.prop.women25.40.M.25.40", "cl.true.prop.women40.50.M.25.40",
"cl.true.prop.women15.25.M.40.50", "cl.true.prop.women25.40.M.40.50", "cl.true.prop.women40.50.M.40.50")
table.cl.true.age.str.prop.women <- NA.handle.fun(input = table.cl.true.age.str.prop.women)
#
numbers.individuals.age.groups.true.cl <- age.structure.transm.clus.true.List$numbers.individuals.age.groups
mean.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$mean.AD.age.groups
med.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$med.AD.age.groups
sd.AD.age.groups.true.cl <- age.structure.transm.clus.true.List$sd.AD.age.groups
res2 <- c(table.cl.true.age.str, table.cl.true.age.str.prop.men, table.cl.true.age.str.prop.women,
numbers.individuals.age.groups.true.cl,
mean.AD.age.groups.true.cl, med.AD.age.groups.true.cl,
sd.AD.age.groups.true.cl)
# 3.
# True age structure in transmission transmission network for selected individuals #
#####################################################################################
age.groups.filtered.transmission.net.fun <- function(table.transmission.net.cov = table.simpact.trans.net.cov,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50)){
num.women.15.25 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.15.25[1] & table.transmission.net.cov$ageSampTimeRec < age.group.15.25[2])
num.men.15.25 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.15.25[1] & table.transmission.net.cov$ageSampTimeRec < age.group.15.25[2])
num.women.25.40 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.25.40[1] & table.transmission.net.cov$ageSampTimeRec < age.group.25.40[2])
num.men.25.40 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.25.40[1] & table.transmission.net.cov$ageSampTimeRec < age.group.25.40[2])
num.women.40.50 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="1" & table.transmission.net.cov$ageSampTimeRec >= age.group.40.50[1] & table.transmission.net.cov$ageSampTimeRec < age.group.40.50[2])
num.men.40.50 <- dplyr::filter(table.transmission.net.cov,
table.transmission.net.cov$GenderRec=="0" & table.transmission.net.cov$ageSampTimeRec >= age.group.40.50[1] & table.transmission.net.cov$ageSampTimeRec < age.group.40.50[2])
numbers.indiv.women.15.25 <- nrow(num.women.15.25)
numbers.indiv.men.15.25 <- nrow(num.men.15.25)
numbers.indiv.women.25.40 <- nrow(num.women.25.40)
numbers.indiv.men.25.40 <- nrow(num.men.25.40)
numbers.indiv.women.40.50 <- nrow(num.women.40.50)
numbers.indiv.men.40.50 <- nrow(num.men.40.50)
numbers.individuals.age.groups <- c(numbers.indiv.women.15.25, numbers.indiv.men.15.25,
numbers.indiv.women.25.40, numbers.indiv.men.25.40,
numbers.indiv.women.40.50, numbers.indiv.men.40.50)
names(numbers.individuals.age.groups) <- c("num.women.true.net.15.25", "num.men.true.net.15.25",
"num.women.true.net.25.40", "num.men.true.net.25.40",
"num.women.true.net.40.50", "num.men.true.net.40.50")
num.women.15.25$ageSampTimeDon <- num.women.15.25$SampTime - num.women.15.25$TOBDon
num.men.15.25$ageSampTimeDon <- num.men.15.25$SampTime - num.men.15.25$TOBDon
num.women.25.40$ageSampTimeDon <- num.women.25.40$SampTime - num.women.25.40$TOBDon
num.men.25.40$ageSampTimeDon <- num.men.25.40$SampTime - num.men.25.40$TOBDon
num.women.40.50$ageSampTimeDon <- num.women.40.50$SampTime - num.women.40.50$TOBDon
num.men.40.50$ageSampTimeDon <- num.men.40.50$SampTime - num.men.40.50$TOBDon
# Age differences
AD.num.women.15.25 <- abs(num.women.15.25$ageSampTimeDon - num.women.15.25$ageSampTimeRec)
AD.num.men.15.25 <- abs(num.men.15.25$ageSampTimeDon - num.men.15.25$ageSampTimeRec)
AD.num.women.25.40 <- abs(num.women.25.40$ageSampTimeDon - num.women.25.40$ageSampTimeRec)
AD.num.men.25.40 <- abs(num.men.25.40$ageSampTimeDon - num.men.25.40$ageSampTimeRec)
AD.num.women.40.50 <- abs(num.women.40.50$ageSampTimeDon - num.women.40.50$ageSampTimeRec)
AD.num.men.40.50 <- abs(num.men.40.50$ageSampTimeDon - num.men.40.50$ageSampTimeRec)
mean.AD.num.women.15.25 <- mean(AD.num.women.15.25)
med.AD.num.women.15.25 <- median(AD.num.women.15.25)
sd.AD.num.women.15.25 <- sd(AD.num.women.15.25)
mean.AD.num.men.15.25 <- mean(AD.num.men.15.25)
med.AD.num.men.15.25 <- median(AD.num.men.15.25)
sd.AD.num.men.15.25 <- sd(AD.num.men.15.25)
mean.AD.num.women.25.40 <- mean(AD.num.women.25.40)
med.AD.num.women.25.40 <- median(AD.num.women.25.40)
sd.AD.num.women.25.40 <- sd(AD.num.women.25.40)
mean.AD.num.men.25.40 <- mean(AD.num.men.25.40)
med.AD.num.men.25.40 <- median(AD.num.men.25.40)
sd.AD.num.men.25.40 <- sd(AD.num.men.25.40)
mean.AD.num.women.40.50 <- mean(AD.num.women.40.50)
med.AD.num.women.40.50 <- median(AD.num.women.40.50)
sd.AD.num.women.40.50 <- sd(AD.num.women.40.50)
mean.AD.num.men.40.50 <- mean(AD.num.men.40.50)
med.AD.num.men.40.50 <- median(AD.num.men.40.50)
sd.AD.num.men.40.50 <- sd(AD.num.men.40.50)
mean.AD.age.groups <- c(mean.AD.num.women.15.25, mean.AD.num.men.15.25,
mean.AD.num.women.25.40, mean.AD.num.men.25.40,
mean.AD.num.women.40.50, mean.AD.num.men.40.50)
names(mean.AD.age.groups) <- c("mean.AD.num.women.true.net.15.25", "mean.AD.num.men.true.net.15.25",
"mean.AD.num.women.true.net.25.40", "mean.AD.num.men.true.net.25.40",
"mean.AD.num.women.true.net.40.50", "mean.AD.num.men.true.net.40.50")
med.AD.age.groups <- c(med.AD.num.women.15.25, med.AD.num.men.15.25,
med.AD.num.women.25.40, med.AD.num.men.25.40,
med.AD.num.women.40.50, med.AD.num.men.40.50)
names(med.AD.age.groups) <- c("med.AD.num.women.true.net.15.25", "med.AD.num.men.true.net.15.25",
"med.AD.num.women.true.net.25.40", "med.AD.num.men.true.net.25.40",
"med.AD.num.women.true.net.40.50", "med.AD.num.men.true.net.40.50")
sd.AD.age.groups <- c(sd.AD.num.women.15.25, sd.AD.num.men.15.25,
sd.AD.num.women.25.40, sd.AD.num.men.25.40,
sd.AD.num.women.40.50, sd.AD.num.men.40.50)
names(sd.AD.age.groups) <- c("sd.AD.num.women.true.net.15.25", "sd.AD.num.men.true.net.15.25",
"sd.AD.num.women.true.net.25.40", "sd.AD.num.men.true.net.25.40",
"sd.AD.num.women.true.net.40.50", "sd.AD.num.men.true.net.40.50")
table.transmission.net.cov$ageSampTimeDon <- table.transmission.net.cov$SampTime - table.transmission.net.cov$TOBDon
men.df <- dplyr::filter(table.transmission.net.cov, table.transmission.net.cov$GenderDon=="0")
women.df <- dplyr::filter(table.transmission.net.cov, table.transmission.net.cov$GenderDon=="1")
Age.groups.table <- NULL
filter.dat <- function(table.dat.fr = table.dat.fr){
v1.dat <- vector()
v2.dat <- vector()
age1.dat <- vector()
age2.dat <- vector()
gender1.dat <- vector()
gender2.dat <- vector()
for(i in 1:nrow(table.dat.fr)){
v1 <- table.dat.fr$RecId[i]
v2 <- table.dat.fr$DonId[i]
index.v1 <- which(table.dat.fr$RecId == v1)
age1 <- table.dat.fr$ageSampTimeRec[index.v1]
age2 <- table.dat.fr$ageSampTimeDon[index.v1]
gender1 <- table.dat.fr$GenderRec[index.v1]
gender2 <- table.dat.fr$GenderDon[index.v1]
v1.dat <- c(v1.dat, v1)
v2.dat <- c(v2.dat, v2)
age1.dat <- c(age1.dat, age1)
age2.dat <- c(age2.dat, age2)
gender1.dat <- c(gender1.dat, gender1)
gender2.dat <- c(gender2.dat, gender2)
}
age.table <- data.frame(v1.dat, gender1.dat, age1.dat, v2.dat, gender2.dat, age2.dat)
return(age.table)
}
age.table <- filter.dat(table.dat.fr = table.transmission.net.cov)
# men as donors
men.age.table.1 <- filter.dat(table.dat.fr = men.df) # dplyr::filter(age.table, age.table$gender1.dat==0)
# women as donors
women.age.table.1 <- filter.dat(table.dat.fr = women.df) # dplyr::filter(age.table, age.table$gender1.dat==1)
# men 15.25 and women
men.15.25.women.15.25.1 <- vector()
men.15.25.women.25.40.1 <- vector()
men.15.25.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.15.25[1] & men.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.15.25.women.15.25.1 <- c(men.15.25.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.15.25.women.25.40.1 <- c(men.15.25.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.15.25.women.40.50.1 <- c(men.15.25.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 15.25 and men
women.15.25.men.15.25.2 <- vector()
women.15.25.men.25.40.2 <- vector()
women.15.25.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.15.25[1] & women.age.table.1$age1.dat[j] < age.group.15.25[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.15.25.men.15.25.2 <- c(women.15.25.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.15.25.men.25.40.2 <- c(women.15.25.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.15.25.men.40.50.2 <- c(women.15.25.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 25.40 and women
men.25.40.women.15.25.1 <- vector()
men.25.40.women.25.40.1 <- vector()
men.25.40.women.40.50.1 <- vector()
if(nrow(men.age.table.1) > 1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.25.40[1] & men.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.25.40.women.15.25.1 <- c(men.25.40.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.25.40.women.25.40.1 <- c(men.25.40.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.25.40.women.40.50.1 <- c(men.25.40.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 25.40 and men
women.25.40.men.15.25.2 <- vector()
women.25.40.men.25.40.2 <- vector()
women.25.40.men.40.50.2 <- vector()
if(nrow(women.age.table.1) >1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.25.40[1] & women.age.table.1$age1.dat[j] < age.group.25.40[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.25.40.men.15.25.2 <- c(women.25.40.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.25.40.men.25.40.2 <- c(women.25.40.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.25.40.men.40.50.2 <- c(women.25.40.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
# men 40.50 and women
men.40.50.women.15.25.1 <- vector()
men.40.50.women.25.40.1 <- vector()
men.40.50.women.40.50.1 <- vector()
if(nrow(men.age.table.1) >1 ){
for (j in 1:nrow(men.age.table.1)) {
if(men.age.table.1$age1.dat[j] >= age.group.40.50[1] & men.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(men.age.table.1$age2.dat[j] >= age.group.15.25[1] & men.age.table.1$age2.dat[j] < age.group.15.25[2]){
men.40.50.women.15.25.1 <- c(men.40.50.women.15.25.1, men.age.table.1$age2.dat[j])
}else if(men.age.table.1$age2.dat[j] >= age.group.25.40[1] & men.age.table.1$age2.dat[j] < age.group.25.40[2]){
men.40.50.women.25.40.1 <- c(men.40.50.women.25.40.1, men.age.table.1$age2.dat[j])
}else if (men.age.table.1$age2.dat[j] >= age.group.40.50[1] & men.age.table.1$age2.dat[j] < age.group.40.50[2]){
men.40.50.women.40.50.1 <- c(men.40.50.women.40.50.1, men.age.table.1$age2.dat[j])
}
}
}
}
# women 40.50 and men
women.40.50.men.15.25.2 <- vector()
women.40.50.men.25.40.2 <- vector()
women.40.50.men.40.50.2 <- vector()
if(nrow(women.age.table.1) > 1 ){
for (j in 1:nrow(women.age.table.1)) {
if(women.age.table.1$age1.dat[j] >= age.group.40.50[1] & women.age.table.1$age1.dat[j] < age.group.40.50[2]){
if(women.age.table.1$age2.dat[j] >= age.group.15.25[1] & women.age.table.1$age2.dat[j] < age.group.15.25[2]){
women.40.50.men.15.25.2 <- c(women.40.50.men.15.25.2, women.age.table.1$age2.dat[j])
}else if(women.age.table.1$age2.dat[j] >= age.group.25.40[1] & women.age.table.1$age2.dat[j] < age.group.25.40[2]){
women.40.50.men.25.40.2 <- c(women.40.50.men.25.40.2, women.age.table.1$age2.dat[j])
}else if (women.age.table.1$age2.dat[j] >= age.group.40.50[1] & women.age.table.1$age2.dat[j] < age.group.40.50[2]){
women.40.50.men.40.50.2 <- c(women.40.50.men.40.50.2, women.age.table.1$age2.dat[j])
}
}
}
}
men.15.25.women.15.25 <- c(men.15.25.women.15.25.1, women.15.25.men.15.25.2)
men.15.25.women.25.40 <- c(men.15.25.women.25.40.1, women.25.40.men.15.25.2)
men.15.25.women.40.50 <- c(men.15.25.women.40.50.1, women.40.50.men.15.25.2)
men.25.40.women.15.25 <- c(men.25.40.women.15.25.1, women.15.25.men.25.40.2)
men.25.40.women.25.40 <- c(men.25.40.women.25.40.1, women.25.40.men.25.40.2)
men.25.40.women.40.50 <- c(men.25.40.women.40.50.1, women.40.50.men.25.40.2)
men.40.50.women.15.25 <- c(men.40.50.women.15.25.1, women.15.25.men.40.50.2)
men.40.50.women.25.40 <- c(men.40.50.women.25.40.1, women.25.40.men.40.50.2)
men.40.50.women.40.50 <- c(men.40.50.women.40.50.1, women.40.50.men.40.50.2)
Age.groups.table <- matrix(c(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50),
length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50),
length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(Age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
Age.groups.table <- as.table(Age.groups.table)
men.15.25.T <- sum(length(men.15.25.women.15.25), length(men.15.25.women.25.40), length(men.15.25.women.40.50))
men.25.40.T <- sum(length(men.25.40.women.15.25), length(men.25.40.women.25.40), length(men.25.40.women.40.50))
men.40.50.T <- sum(length(men.40.50.women.15.25), length(men.40.50.women.25.40), length(men.40.50.women.40.50))
prop.men.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/men.15.25.T, length(men.15.25.women.25.40)/men.15.25.T, length(men.15.25.women.40.50)/men.15.25.T,
length(men.25.40.women.15.25)/men.25.40.T, length(men.25.40.women.25.40)/men.25.40.T, length(men.25.40.women.40.50)/men.25.40.T,
length(men.40.50.women.15.25)/men.40.50.T, length(men.40.50.women.25.40)/men.40.50.T, length(men.40.50.women.40.50)/men.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table) <- c("Female.15.25", "Female.25.40", "Female.40.50")
rownames(prop.men.age.groups.table) <- c("prop.Male.15.25", "prop.Male.25.40", "prop.Male.40.50")
women.15.25.T <- sum(length(men.15.25.women.15.25), length(men.25.40.women.15.25), length(men.40.50.women.15.25))
women.25.40.T <- sum(length(men.15.25.women.25.40), length(men.25.40.women.25.40), length(men.40.50.women.25.40))
women.40.50.T <- sum(length(men.15.25.women.40.50), length(men.25.40.women.40.50), length(men.40.50.women.40.50))
prop.women.age.groups.table <- matrix(c(length(men.15.25.women.15.25)/women.15.25.T, length(men.25.40.women.15.25)/women.15.25.T, length(men.40.50.women.15.25)/women.15.25.T,
length(men.15.25.women.25.40)/women.25.40.T, length(men.25.40.women.25.40)/women.25.40.T, length(men.40.50.women.25.40)/women.25.40.T,
length(men.15.25.women.40.50)/women.40.50.T, length(men.25.40.women.40.50)/women.40.50.T, length(men.40.50.women.40.50)/women.40.50.T),
ncol = 3,
byrow = TRUE)
colnames(prop.women.age.groups.table) <- c("Male.15.25", "Male.25.40", "Male.40.50")
rownames(prop.women.age.groups.table) <- c("prop.Female.15.25", "prop.Female.25.40", "prop.Female.40.50")
# Directionality
men.15.25.women.15.25.MtoW <- c(men.15.25.women.15.25.1)
men.15.25.women.25.40.MtoW <- c(men.15.25.women.25.40.1)
men.15.25.women.40.50.MtoW <- c(men.15.25.women.40.50.1)
men.25.40.women.15.25.MtoW <- c(men.25.40.women.15.25.1)
men.25.40.women.25.40.MtoW <- c(men.25.40.women.25.40.1)
men.25.40.women.40.50.MtoW <- c(men.25.40.women.40.50.1)
men.40.50.women.15.25.MtoW <- c(men.40.50.women.15.25.1)
men.40.50.women.25.40.MtoW <- c(men.40.50.women.25.40.1)
men.40.50.women.40.50.MtoW <- c(men.40.50.women.40.50.1)
Age.groups.table.MtoW <- matrix(c(length(men.15.25.women.15.25.MtoW), length(men.15.25.women.25.40.MtoW), length(men.15.25.women.40.50.MtoW),
length(men.25.40.women.15.25.MtoW), length(men.25.40.women.25.40.MtoW), length(men.25.40.women.40.50.MtoW),
length(men.40.50.women.15.25.MtoW), length(men.40.50.women.25.40.MtoW), length(men.40.50.women.40.50.MtoW)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table.MtoW) <- c("Female.15.25.MtoW", "Female.25.40.MtoW", "Female.40.50.MtoW")
rownames(Age.groups.table.MtoW) <- c("Male.15.25.MtoW", "Male.25.40.MtoW", "Male.40.50.MtoW")
Age.groups.table.MtoW <- as.table(Age.groups.table.MtoW)
men.15.25.T.MtoW <- sum(length(men.15.25.women.15.25.MtoW), length(men.15.25.women.25.40.MtoW), length(men.15.25.women.40.50.MtoW))
men.25.40.T.MtoW <- sum(length(men.25.40.women.15.25.MtoW), length(men.25.40.women.25.40.MtoW), length(men.25.40.women.40.50.MtoW))
men.40.50.T.MtoW <- sum(length(men.40.50.women.15.25.MtoW), length(men.40.50.women.25.40.MtoW), length(men.40.50.women.40.50.MtoW))
prop.men.age.groups.table.MtoW <- matrix(c(length(men.15.25.women.15.25.MtoW)/men.15.25.T.MtoW, length(men.15.25.women.25.40.MtoW)/men.15.25.T.MtoW, length(men.15.25.women.40.50.MtoW)/men.15.25.T.MtoW,
length(men.25.40.women.15.25.MtoW)/men.25.40.T.MtoW, length(men.25.40.women.25.40.MtoW)/men.25.40.T.MtoW, length(men.25.40.women.40.50.MtoW)/men.25.40.T.MtoW,
length(men.40.50.women.15.25.MtoW)/men.40.50.T.MtoW, length(men.40.50.women.25.40.MtoW)/men.40.50.T.MtoW, length(men.40.50.women.40.50.MtoW)/men.40.50.T.MtoW),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table.MtoW) <- c("Female.15.25.MtoW", "Female.25.40.MtoW", "Female.40.50.MtoW")
rownames(prop.men.age.groups.table.MtoW) <- c("prop.Male.15.25.MtoW", "prop.Male.25.40.MtoW", "prop.Male.40.50.MtoW")
men.15.25.women.15.25.WtoM <- c(women.15.25.men.15.25.2)
men.15.25.women.25.40.WtoM <- c(women.25.40.men.15.25.2)
men.15.25.women.40.50.WtoM <- c(women.40.50.men.15.25.2)
men.25.40.women.15.25.WtoM <- c( women.15.25.men.25.40.2)
men.25.40.women.25.40.WtoM <- c(women.25.40.men.25.40.2)
men.25.40.women.40.50.WtoM <- c(women.40.50.men.25.40.2)
men.40.50.women.15.25.WtoM <- c(women.15.25.men.40.50.2)
men.40.50.women.25.40.WtoM <- c(women.25.40.men.40.50.2)
men.40.50.women.40.50.WtoM <- c(women.40.50.men.40.50.2)
Age.groups.table.WtoM <- matrix(c(length(men.15.25.women.15.25.WtoM), length(men.15.25.women.25.40.WtoM), length(men.15.25.women.40.50.WtoM),
length(men.25.40.women.15.25.WtoM), length(men.25.40.women.25.40.WtoM), length(men.25.40.women.40.50.WtoM),
length(men.40.50.women.15.25.WtoM), length(men.40.50.women.25.40.WtoM), length(men.40.50.women.40.50.WtoM)),
ncol = 3,
byrow = TRUE)
colnames(Age.groups.table.WtoM) <- c("Female.15.25.WtoM", "Female.25.40.WtoM", "Female.40.50.WtoM")
rownames(Age.groups.table.WtoM) <- c("Male.15.25.WtoM", "Male.25.40.WtoM", "Male.40.50.WtoM")
Age.groups.table.WtoM <- as.table(Age.groups.table.WtoM)
men.15.25.T.WtoM <- sum(length(men.15.25.women.15.25.WtoM), length(men.15.25.women.25.40.WtoM), length(men.15.25.women.40.50.WtoM))
men.25.40.T.WtoM <- sum(length(men.25.40.women.15.25.WtoM), length(men.25.40.women.25.40.WtoM), length(men.25.40.women.40.50.WtoM))
men.40.50.T.WtoM <- sum(length(men.40.50.women.15.25.WtoM), length(men.40.50.women.25.40.WtoM), length(men.40.50.women.40.50.WtoM))
prop.men.age.groups.table.WtoM <- matrix(c(length(men.15.25.women.15.25.WtoM)/men.15.25.T.WtoM, length(men.15.25.women.25.40.WtoM)/men.15.25.T.WtoM, length(men.15.25.women.40.50.WtoM)/men.15.25.T.WtoM,
length(men.25.40.women.15.25.WtoM)/men.25.40.T.WtoM, length(men.25.40.women.25.40.WtoM)/men.25.40.T.WtoM, length(men.25.40.women.40.50.WtoM)/men.25.40.T.WtoM,
length(men.40.50.women.15.25.WtoM)/men.40.50.T.WtoM, length(men.40.50.women.25.40.WtoM)/men.40.50.T.WtoM, length(men.40.50.women.40.50.WtoM)/men.40.50.T.WtoM),
ncol = 3,
byrow = TRUE)
colnames(prop.men.age.groups.table.WtoM) <- c("Female.15.25.WtoM", "Female.25.40.WtoM", "Female.40.50.WtoM")
rownames(prop.men.age.groups.table.WtoM) <- c("prop.Male.15.25.WtoM", "prop.Male.25.40.WtoM", "prop.Male.40.50.WtoM")
outputlist <- NULL
outputlist$Age.groups.table <- Age.groups.table
outputlist$prop.men.age.groups.table <- prop.men.age.groups.table
outputlist$prop.women.age.groups.table <- prop.women.age.groups.table
outputlist$Age.groups.table.MtoW <- Age.groups.table.MtoW
outputlist$prop.men.age.groups.table.MtoW <- prop.men.age.groups.table.MtoW
outputlist$Age.groups.table.WtoM <- Age.groups.table.WtoM
outputlist$prop.men.age.groups.table.WtoM <- prop.men.age.groups.table.WtoM
outputlist$numbers.individuals.age.groups <- numbers.individuals.age.groups
outputlist$mean.AD.age.groups <- mean.AD.age.groups
outputlist$med.AD.age.groups <- med.AD.age.groups
outputlist$sd.AD.age.groups <- sd.AD.age.groups
return(outputlist)
}
# 3. Results: true age structure table from transmission network of all selected individuals (people in the phylogenetic tree)
age.structure.transm.net.true.List <- age.groups.filtered.transmission.net.fun(table.transmission.net.cov = table.simpact.trans.net.cov,
age.group.15.25 = c(15,25),
age.group.25.40 = c(25,40),
age.group.40.50 = c(40,50))
# (i) Aggregated tables of pairings
age.structure.transm.net.true <- age.structure.transm.net.true.List$Age.groups.table
tree.tra.age.str.M.15.25.F.15.25 <- age.structure.transm.net.true[1,][1]
tree.tra.age.str.M.25.40.F.15.25 <- age.structure.transm.net.true[2,][1]
tree.tra.age.str.M.40.50.F.15.25 <- age.structure.transm.net.true[3,][1]
tree.tra.age.str.M.15.25.F.25.40 <- age.structure.transm.net.true[1,][2]
tree.tra.age.str.M.25.40.F.25.40 <- age.structure.transm.net.true[2,][2]
tree.tra.age.str.M.40.50.F.25.40 <- age.structure.transm.net.true[3,][2]
tree.tra.age.str.M.15.25.F.40.50 <- age.structure.transm.net.true[1,][3]
tree.tra.age.str.M.25.40.F.40.50 <- age.structure.transm.net.true[2,][3]
tree.tra.age.str.M.40.50.F.40.50 <- age.structure.transm.net.true[3,][3]
table.tree.tra.age.str <- c(tree.tra.age.str.M.15.25.F.15.25, tree.tra.age.str.M.25.40.F.15.25, tree.tra.age.str.M.40.50.F.15.25,
tree.tra.age.str.M.15.25.F.25.40, tree.tra.age.str.M.25.40.F.25.40, tree.tra.age.str.M.40.50.F.25.40,
tree.tra.age.str.M.15.25.F.40.50, tree.tra.age.str.M.25.40.F.40.50, tree.tra.age.str.M.40.50.F.40.50)
names(table.tree.tra.age.str) <- c("tree.tra.M.15.25.F.15.25", "tree.tra.M.25.40.F.15.25", "tree.tra.M.40.50.F.15.25",
"tree.tra.M.15.25.F.25.40", "tree.tra.M.25.40.F.25.40", "tree.tra.M.40.50.F.25.40",
"tree.tra.M.15.25.F.40.50", "tree.tra.M.25.40.F.40.50", "tree.tra.M.40.50.F.40.50")
# (ii) Pairings table with men to women infection: directionality
age.structure.transm.net.true.MtoW <- age.structure.transm.net.true.List$Age.groups.table.MtoW
tree.tra.age.str.MtoW.M.15.25.F.15.25 <- age.structure.transm.net.true.MtoW[1,][1]
tree.tra.age.str.MtoW.M.25.40.F.15.25 <- age.structure.transm.net.true.MtoW[2,][1]
tree.tra.age.str.MtoW.M.40.50.F.15.25 <- age.structure.transm.net.true.MtoW[3,][1]
tree.tra.age.str.MtoW.M.15.25.F.25.40 <- age.structure.transm.net.true.MtoW[1,][2]
tree.tra.age.str.MtoW.M.25.40.F.25.40 <- age.structure.transm.net.true.MtoW[2,][2]
tree.tra.age.str.MtoW.M.40.50.F.25.40 <- age.structure.transm.net.true.MtoW[3,][2]
tree.tra.age.str.MtoW.M.15.25.F.40.50 <- age.structure.transm.net.true.MtoW[1,][3]
tree.tra.age.str.MtoW.M.25.40.F.40.50 <- age.structure.transm.net.true.MtoW[2,][3]
tree.tra.age.str.MtoW.M.40.50.F.40.50 <- age.structure.transm.net.true.MtoW[3,][3]
table.tree.tra.age.str.MtoW <- c(tree.tra.age.str.MtoW.M.15.25.F.15.25, tree.tra.age.str.MtoW.M.25.40.F.15.25, tree.tra.age.str.MtoW.M.40.50.F.15.25,
tree.tra.age.str.MtoW.M.15.25.F.25.40, tree.tra.age.str.MtoW.M.25.40.F.25.40, tree.tra.age.str.MtoW.M.40.50.F.25.40,
tree.tra.age.str.MtoW.M.15.25.F.40.50, tree.tra.age.str.MtoW.M.25.40.F.40.50, tree.tra.age.str.MtoW.M.40.50.F.40.50)
names(table.tree.tra.age.str.MtoW) <- c("tree.tra.MtoW.M.15.25.F.15.25", "tree.tra.MtoW.M.25.40.F.15.25", "tree.tra.MtoW.M.40.50.F.15.25",
"tree.tra.MtoW.M.15.25.F.25.40", "tree.tra.MtoW.M.25.40.F.25.40", "tree.tra.MtoW.M.40.50.F.25.40",
"tree.tra.MtoW.M.15.25.F.40.50", "tree.tra.MtoW.M.25.40.F.40.50", "tree.tra.MtoW.M.40.50.F.40.50")
# (iii) Pairings table with women to men infection: directionality
age.structure.transm.net.true.WtoM <- age.structure.transm.net.true.List$Age.groups.table.WtoM
tree.tra.age.str.WtoM.M.15.25.F.15.25 <- age.structure.transm.net.true.WtoM[1,][1]
tree.tra.age.str.WtoM.M.25.40.F.15.25 <- age.structure.transm.net.true.WtoM[2,][1]
tree.tra.age.str.WtoM.M.40.50.F.15.25 <- age.structure.transm.net.true.WtoM[3,][1]
tree.tra.age.str.WtoM.M.15.25.F.25.40 <- age.structure.transm.net.true.WtoM[1,][2]
tree.tra.age.str.WtoM.M.25.40.F.25.40 <- age.structure.transm.net.true.WtoM[2,][2]
tree.tra.age.str.WtoM.M.40.50.F.25.40 <- age.structure.transm.net.true.WtoM[3,][2]
tree.tra.age.str.WtoM.M.15.25.F.40.50 <- age.structure.transm.net.true.WtoM[1,][3]
tree.tra.age.str.WtoM.M.25.40.F.40.50 <- age.structure.transm.net.true.WtoM[2,][3]
tree.tra.age.str.WtoM.M.40.50.F.40.50 <- age.structure.transm.net.true.WtoM[3,][3]
table.tree.tra.age.str.WtoM <- c(tree.tra.age.str.WtoM.M.15.25.F.15.25, tree.tra.age.str.WtoM.M.25.40.F.15.25, tree.tra.age.str.WtoM.M.40.50.F.15.25,
tree.tra.age.str.WtoM.M.15.25.F.25.40, tree.tra.age.str.WtoM.M.25.40.F.25.40, tree.tra.age.str.WtoM.M.40.50.F.25.40,
tree.tra.age.str.WtoM.M.15.25.F.40.50, tree.tra.age.str.WtoM.M.25.40.F.40.50, tree.tra.age.str.WtoM.M.40.50.F.40.50)
names(table.tree.tra.age.str.WtoM) <- c("tree.tra.WtoM.M.15.25.F.15.25", "tree.tra.WtoM.M.25.40.F.15.25", "tree.tra.WtoM.M.40.50.F.15.25",
"tree.tra.WtoM.M.15.25.F.25.40", "tree.tra.WtoM.M.25.40.F.25.40", "tree.tra.WtoM.M.40.50.F.25.40",
"tree.tra.WtoM.M.15.25.F.40.50", "tree.tra.WtoM.M.25.40.F.40.50", "tree.tra.WtoM.M.40.50.F.40.50")
# (iv) Men's pairings proportions in aggregated table
age.structure.transm.net.true.prop.men <- age.structure.transm.net.true.List$prop.men.age.groups.table
tree.trans.true.age.str.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men[1,][1]
tree.trans.true.age.str.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men[2,][1]
tree.trans.true.age.str.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men[3,][1]
tree.trans.true.age.str.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men[1,][2]
tree.trans.true.age.str.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men[2,][2]
tree.trans.true.age.str.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men[3,][2]
tree.trans.true.age.str.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men[1,][3]
tree.trans.true.age.str.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men[2,][3]
tree.trans.true.age.str.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men[3,][3]
table.tree.trans.true.age.str.prop.men <- c(tree.trans.true.age.str.prop.men.15.25.F.15.25, tree.trans.true.age.str.prop.men.25.40.F.15.25, tree.trans.true.age.str.prop.men.40.50.F.15.25,
tree.trans.true.age.str.prop.men.15.25.F.25.40, tree.trans.true.age.str.prop.men.25.40.F.25.40, tree.trans.true.age.str.prop.men.40.50.F.25.40,
tree.trans.true.age.str.prop.men.15.25.F.40.50, tree.trans.true.age.str.prop.men.25.40.F.40.50, tree.trans.true.age.str.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.prop.men) <- c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50")
table.tree.trans.true.age.str.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.prop.men)
# (v) Men's pairings proportions in men to women infection: directionality
age.structure.transm.net.true.prop.men.MtoW <- age.structure.transm.net.true.List$prop.men.age.groups.table.MtoW
tree.trans.true.age.str.MtoW.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[1,][1]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[2,][1]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men.MtoW[3,][1]
tree.trans.true.age.str.MtoW.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[1,][2]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[2,][2]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men.MtoW[3,][2]
tree.trans.true.age.str.MtoW.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[1,][3]
tree.trans.true.age.str.MtoW.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[2,][3]
tree.trans.true.age.str.MtoW.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men.MtoW[3,][3]
table.tree.trans.true.age.str.MtoW.prop.men <- c(tree.trans.true.age.str.MtoW.prop.men.15.25.F.15.25, tree.trans.true.age.str.MtoW.prop.men.25.40.F.15.25, tree.trans.true.age.str.MtoW.prop.men.40.50.F.15.25,
tree.trans.true.age.str.MtoW.prop.men.15.25.F.25.40, tree.trans.true.age.str.MtoW.prop.men.25.40.F.25.40, tree.trans.true.age.str.MtoW.prop.men.40.50.F.25.40,
tree.trans.true.age.str.MtoW.prop.men.15.25.F.40.50, tree.trans.true.age.str.MtoW.prop.men.25.40.F.40.50, tree.trans.true.age.str.MtoW.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.MtoW.prop.men) <- paste0("MtoW.", c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50"))
table.tree.trans.true.age.str.MtoW.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.MtoW.prop.men)
# (vi) Men's pairings proportions in women to men infection: directionality
age.structure.transm.net.true.prop.men.WtoM <- age.structure.transm.net.true.List$prop.men.age.groups.table.WtoM
tree.trans.true.age.str.WtoM.prop.men.15.25.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[1,][1]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[2,][1]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.15.25 <- age.structure.transm.net.true.prop.men.WtoM[3,][1]
tree.trans.true.age.str.WtoM.prop.men.15.25.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[1,][2]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[2,][2]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.25.40 <- age.structure.transm.net.true.prop.men.WtoM[3,][2]
tree.trans.true.age.str.WtoM.prop.men.15.25.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[1,][3]
tree.trans.true.age.str.WtoM.prop.men.25.40.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[2,][3]
tree.trans.true.age.str.WtoM.prop.men.40.50.F.40.50 <- age.structure.transm.net.true.prop.men.WtoM[3,][3]
table.tree.trans.true.age.str.WtoM.prop.men <- c(tree.trans.true.age.str.WtoM.prop.men.15.25.F.15.25, tree.trans.true.age.str.WtoM.prop.men.25.40.F.15.25, tree.trans.true.age.str.WtoM.prop.men.40.50.F.15.25,
tree.trans.true.age.str.WtoM.prop.men.15.25.F.25.40, tree.trans.true.age.str.WtoM.prop.men.25.40.F.25.40, tree.trans.true.age.str.WtoM.prop.men.40.50.F.25.40,
tree.trans.true.age.str.WtoM.prop.men.15.25.F.40.50, tree.trans.true.age.str.WtoM.prop.men.25.40.F.40.50, tree.trans.true.age.str.WtoM.prop.men.40.50.F.40.50)
names(table.tree.trans.true.age.str.WtoM.prop.men) <- paste0("WtoM.", c("tree.trans.true.prop.men15.25.F.15.25", "tree.trans.true.prop.men25.40.F.15.25", "tree.trans.true.prop.men40.50.F.15.25",
"tree.trans.true.prop.men15.25.F.25.40", "tree.trans.true.prop.men25.40.F.25.40", "tree.trans.true.prop.men40.50.F.25.40",
"tree.trans.true.prop.men15.25.F.40.50", "tree.trans.true.prop.men25.40.F.40.50", "tree.trans.true.prop.men40.50.F.40.50"))
table.tree.trans.true.age.str.WtoM.prop.men <- NA.handle.fun(input = table.tree.trans.true.age.str.WtoM.prop.men)
# (vii) Womens' pairings proportions in aggregated table
age.structure.transm.clust.true.prop.women <- age.structure.transm.net.true.List$prop.women.age.groups.table
tree.trans.true.age.str.prop.women.15.25.M.15.25 <- age.structure.transm.clust.true.prop.women[1,][1]
tree.trans.true.age.str.prop.women.25.40.M.15.25 <- age.structure.transm.clust.true.prop.women[2,][1]
tree.trans.true.age.str.prop.women.40.50.M.15.25 <- age.structure.transm.clust.true.prop.women[3,][1]
tree.trans.true.age.str.prop.women.15.25.M.25.40 <- age.structure.transm.clust.true.prop.women[1,][2]
tree.trans.true.age.str.prop.women.25.40.M.25.40 <- age.structure.transm.clust.true.prop.women[2,][2]
tree.trans.true.age.str.prop.women.40.50.M.25.40 <- age.structure.transm.clust.true.prop.women[3,][2]
tree.trans.true.age.str.prop.women.15.25.M.40.50 <- age.structure.transm.clust.true.prop.women[1,][3]
tree.trans.true.age.str.prop.women.25.40.M.40.50 <- age.structure.transm.clust.true.prop.women[2,][3]
tree.trans.true.age.str.prop.women.40.50.M.40.50 <- age.structure.transm.clust.true.prop.women[3,][3]
table.tree.trans.true.age.str.prop.women <- c(tree.trans.true.age.str.prop.women.15.25.M.15.25, tree.trans.true.age.str.prop.women.25.40.M.15.25, tree.trans.true.age.str.prop.women.40.50.M.15.25,
tree.trans.true.age.str.prop.women.15.25.M.25.40, tree.trans.true.age.str.prop.women.25.40.M.25.40, tree.trans.true.age.str.prop.women.40.50.M.25.40,
tree.trans.true.age.str.prop.women.15.25.M.40.50, tree.trans.true.age.str.prop.women.25.40.M.40.50, tree.trans.true.age.str.prop.women.40.50.M.40.50)
names(table.tree.trans.true.age.str.prop.women) <- c("tree.trans.true.prop.women15.25.M.15.25", "tree.trans.true.prop.women25.40.M.15.25", "tree.trans.true.prop.women40.50.M.15.25",
"tree.trans.true.prop.women15.25.M.25.40", "tree.trans.true.prop.women25.40.M.25.40", "tree.trans.true.prop.women40.50.M.25.40",
"tree.trans.true.prop.women15.25.M.40.50", "tree.trans.true.prop.women25.40.M.40.50", "tree.trans.true.prop.women40.50.M.40.50")
table.tree.trans.true.age.str.prop.women <- NA.handle.fun(input = table.tree.trans.true.age.str.prop.women)
#
numbers.individuals.age.groups.net <- age.structure.transm.net.true.List$numbers.individuals.age.groups
mean.AD.age.groups.net <- age.structure.transm.net.true.List$mean.AD.age.groups
med.AD.age.groups.net <- age.structure.transm.net.true.List$med.AD.age.groups
sd.AD.age.groups.net <- age.structure.transm.net.true.List$sd.AD.age.groups
names(numbers.individuals.age.groups.net) <- paste0("tree.trans.", names(numbers.individuals.age.groups.net))
names(mean.AD.age.groups.net) <- paste0("tree.trans.", names(mean.AD.age.groups.net))
names(med.AD.age.groups.net) <- paste0("tree.trans.", names(med.AD.age.groups.net))
names(sd.AD.age.groups.net) <- paste0("tree.trans.", names(sd.AD.age.groups.net))
res3 <- c(table.tree.tra.age.str, table.tree.tra.age.str.MtoW, table.tree.tra.age.str.WtoM,
table.tree.trans.true.age.str.prop.men, table.tree.trans.true.age.str.MtoW.prop.men,
table.tree.trans.true.age.str.WtoM.prop.men, table.tree.trans.true.age.str.prop.women,
numbers.individuals.age.groups.net, mean.AD.age.groups.net, med.AD.age.groups.net, sd.AD.age.groups.net)
# Clusters statistics
mean.clust.size <- mean(clust.size)
median.clust.size <- median(clust.size)
sd.clust.size <- sd(clust.size)
clust.size.stat <- c(mean.clust.size, median.clust.size, sd.clust.size)
names(clust.size.stat) <- c("mean.cl.size", "med.cl.size", "sd.cl.size")
# Binding everuthing together
output.num.vec <- as.numeric(c(res1, res2, res3, mix.rels.transm.dat, clust.size.stat))
names.output.vec <- names(c(res1, res2, res3, mix.rels.transm.dat, clust.size.stat))
names(output.num.vec) <- names.output.vec
}else{
output.num.vec <- rep(NA, 198)
}
return(output.num.vec)
}
|
##'A function for plotting choropleth map
##'
##' @export
plot_map = function (shpFile, var, data, countryCode = "FAOST_CODE",
n = 5, style = "jenks", manualBreaks,
col = c("#F5F5F5", "#C8E2DE", "#9CCFC7", "#70BCB0", "#44AA99"),
missCol = "#8B8878", countryCodeTransp = NULL,
missLabel = "No data available", subset = TRUE,
scale = 1, shpProj = "+proj=robin +ellps=WGS84",
outProj = "+proj=robin"){
if(!missing(shpFile)){
## Projection and read shapefile
llCRS = CRS(projargs = shpProj)
projCRS = CRS(outProj)
raw.sp = readShapePoly(shpFile, proj4string = llCRS)
transformed.sp = spTransform(raw.sp, CRSobj = projCRS)
transformed.df = fortify(transformed.sp, region = countryCode)
transformed.df$id = as.numeric(transformed.df$id)
} else {
transformed.sp = spTransform(GAULspatialPolygon,
CRSobj = CRS(proj4string(GAULspatialPolygon)))
transformed.df = fortify(transformed.sp, region = countryCode)
transformed.df$id = as.numeric(transformed.df$id)
cat("\nNOTE: GAUL border used as default\n")
}
transformed.df$order = 1:NROW(transformed.df$order)
## Subset and scale data
subset = substitute(subset)
sub_data = subset.data.frame(data, subset = eval(subset),
select = c(countryCode, var))
sub_data[, var] = sub_data[, var] * scale
sub_data = unique(sub_data)
## determine the breaks of the legend and color
if(missing(manualBreaks)){
brks = map_breaks(sub_data[, var], n = n, style = style)
} else {
brks = manualBreaks
}
sub_data$fillColor = as.character(findInterval(sub_data[, var],
brks, rightmost.closed = TRUE))
final.df = merge(sub_data, transformed.df, by.x = countryCode,
by.y = "id", all = TRUE)
final.df = arrange(final.df, order)
final.df[is.na(final.df[, var]) & !final.df[, countryCode] %in% countryCodeTransp, "fillColor"] = "0"
## Match the colors and create the legend
if(any(is.na(final.df[, var]) & !final.df[, countryCode] %in% countryCodeTransp)){
uVal = c(sort(unique(final.df$fillColor)))
uCol = c(missCol, col[sort(as.numeric(unique(final.df$fillColor)))])
uBrks = c(missLabel,
formatC(brks[sort(as.numeric(unique(final.df$fillColor))) + 1],
format = "fg", big.mark = " "))
nBrks = length(uBrks)
endMar = rep(0, nBrks)
endMar[3:(nBrks - 1)] = ifelse(uBrks[3:(nBrks - 1)] >= 10, 1, 0.01)
legendLab = paste(c(uBrks[-nBrks]), c("", rep(" ~ < ", nBrks - 3), " ~ "),
c("", uBrks[3:nBrks]), sep = "")
## ## Format the legend labels
## brkNames = c(missLabel, formatC(as.numeric(uBrks[-1]), format = "fg"))
## endVal = formatC(c(0, as.numeric(uBrks[-1]) - endMar[-1]),
## format = "fg")
## legendLab = paste(c("", brkNames[-c(1, nBrks, nBrks + 1)], ""),
## c(" < ", rep(" - ", nBrks - 2), " > "),
## c(endVal[-c(1, nBrks + 1)], endVal[nBrks]), " (",
## table(sub_data$fillColor), ")", sep = "")
} else {
uVal = sort(unique(final.df$fillColor))
uCol = col[sort(as.numeric(unique(final.df$fillColor)))]
uBrks = formatC(brks[c(sort(as.numeric(unique(final.df$fillColor))),
length(brks))], format = "fg", big.mark = " ")
nBrks = length(uBrks)
endMar = rep(0, nBrks)
endMar[3:(nBrks - 1)] = ifelse(uBrks[3:(nBrks - 1)] >= 10, 1, 0.01)
legendLab = paste(c(uBrks[-nBrks]), c(rep(" ~ < ", nBrks - 2), " ~ "),
c(uBrks[2:nBrks]), sep = "")
# legendLab = paste(c(uBrks[-nBrks]), c("", rep(" ~ < ", nBrks - 3), " ~ "),
# c("", uBrks[3:nBrks]), sep = "")
## ## Format the legend labels
## brkNames = formatC(uBrks, format = "fg")
## endVal = formatC(uBrks - endMar, format = "fg")
## legendLab = paste(c("", brkNames[-c(1, nBrks, nBrks + 1)], ""),
## c(" < ", rep(" - ", nBrks - 2), " > "),
## c(endVal[-c(1, nBrks + 1)], endVal[nBrks]), " (",
## table(sub_data$fillColor), ")", sep = "")
}
if (!is.null(countryCodeTransp)) {
final.df[final.df[, countryCode] %in% countryCodeTransp, "fillColor"] =
"transparent"
}
## Plot the map
map = ggplot(data = final.df, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = fillColor)) +
geom_path(color = "grey50") +
coord_equal() +
theme(legend.position = "top", legend.direction = "horizontal",
panel.background = element_blank(),
plot.background = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
legend.title = element_blank(),
plot.margin = unit(c(0, 0, 0, 0), "lines")) +
xlab(NULL) + ylab(NULL)
if (!is.null(countryCodeTransp)) {
map = map + scale_fill_manual(labels = c(legendLab, ""),
values = c(uCol, "transparent"),
breaks = c(uVal, "transparent"))
} else {
map = map + scale_fill_manual(labels = legendLab,
values = uCol,
breaks = uVal)
}
map
}
utils::globalVariables(names = c("GAULspatialPolygon", "long", "lat", "group",
"fillColor"))
| /Codes/plot_map.R | no_license | mkao006/FAOSYBpackage | R | false | false | 5,633 | r | ##'A function for plotting choropleth map
##'
##' @export
plot_map = function (shpFile, var, data, countryCode = "FAOST_CODE",
n = 5, style = "jenks", manualBreaks,
col = c("#F5F5F5", "#C8E2DE", "#9CCFC7", "#70BCB0", "#44AA99"),
missCol = "#8B8878", countryCodeTransp = NULL,
missLabel = "No data available", subset = TRUE,
scale = 1, shpProj = "+proj=robin +ellps=WGS84",
outProj = "+proj=robin"){
if(!missing(shpFile)){
## Projection and read shapefile
llCRS = CRS(projargs = shpProj)
projCRS = CRS(outProj)
raw.sp = readShapePoly(shpFile, proj4string = llCRS)
transformed.sp = spTransform(raw.sp, CRSobj = projCRS)
transformed.df = fortify(transformed.sp, region = countryCode)
transformed.df$id = as.numeric(transformed.df$id)
} else {
transformed.sp = spTransform(GAULspatialPolygon,
CRSobj = CRS(proj4string(GAULspatialPolygon)))
transformed.df = fortify(transformed.sp, region = countryCode)
transformed.df$id = as.numeric(transformed.df$id)
cat("\nNOTE: GAUL border used as default\n")
}
transformed.df$order = 1:NROW(transformed.df$order)
## Subset and scale data
subset = substitute(subset)
sub_data = subset.data.frame(data, subset = eval(subset),
select = c(countryCode, var))
sub_data[, var] = sub_data[, var] * scale
sub_data = unique(sub_data)
## determine the breaks of the legend and color
if(missing(manualBreaks)){
brks = map_breaks(sub_data[, var], n = n, style = style)
} else {
brks = manualBreaks
}
sub_data$fillColor = as.character(findInterval(sub_data[, var],
brks, rightmost.closed = TRUE))
final.df = merge(sub_data, transformed.df, by.x = countryCode,
by.y = "id", all = TRUE)
final.df = arrange(final.df, order)
final.df[is.na(final.df[, var]) & !final.df[, countryCode] %in% countryCodeTransp, "fillColor"] = "0"
## Match the colors and create the legend
if(any(is.na(final.df[, var]) & !final.df[, countryCode] %in% countryCodeTransp)){
uVal = c(sort(unique(final.df$fillColor)))
uCol = c(missCol, col[sort(as.numeric(unique(final.df$fillColor)))])
uBrks = c(missLabel,
formatC(brks[sort(as.numeric(unique(final.df$fillColor))) + 1],
format = "fg", big.mark = " "))
nBrks = length(uBrks)
endMar = rep(0, nBrks)
endMar[3:(nBrks - 1)] = ifelse(uBrks[3:(nBrks - 1)] >= 10, 1, 0.01)
legendLab = paste(c(uBrks[-nBrks]), c("", rep(" ~ < ", nBrks - 3), " ~ "),
c("", uBrks[3:nBrks]), sep = "")
## ## Format the legend labels
## brkNames = c(missLabel, formatC(as.numeric(uBrks[-1]), format = "fg"))
## endVal = formatC(c(0, as.numeric(uBrks[-1]) - endMar[-1]),
## format = "fg")
## legendLab = paste(c("", brkNames[-c(1, nBrks, nBrks + 1)], ""),
## c(" < ", rep(" - ", nBrks - 2), " > "),
## c(endVal[-c(1, nBrks + 1)], endVal[nBrks]), " (",
## table(sub_data$fillColor), ")", sep = "")
} else {
uVal = sort(unique(final.df$fillColor))
uCol = col[sort(as.numeric(unique(final.df$fillColor)))]
uBrks = formatC(brks[c(sort(as.numeric(unique(final.df$fillColor))),
length(brks))], format = "fg", big.mark = " ")
nBrks = length(uBrks)
endMar = rep(0, nBrks)
endMar[3:(nBrks - 1)] = ifelse(uBrks[3:(nBrks - 1)] >= 10, 1, 0.01)
legendLab = paste(c(uBrks[-nBrks]), c(rep(" ~ < ", nBrks - 2), " ~ "),
c(uBrks[2:nBrks]), sep = "")
# legendLab = paste(c(uBrks[-nBrks]), c("", rep(" ~ < ", nBrks - 3), " ~ "),
# c("", uBrks[3:nBrks]), sep = "")
## ## Format the legend labels
## brkNames = formatC(uBrks, format = "fg")
## endVal = formatC(uBrks - endMar, format = "fg")
## legendLab = paste(c("", brkNames[-c(1, nBrks, nBrks + 1)], ""),
## c(" < ", rep(" - ", nBrks - 2), " > "),
## c(endVal[-c(1, nBrks + 1)], endVal[nBrks]), " (",
## table(sub_data$fillColor), ")", sep = "")
}
if (!is.null(countryCodeTransp)) {
final.df[final.df[, countryCode] %in% countryCodeTransp, "fillColor"] =
"transparent"
}
## Plot the map
map = ggplot(data = final.df, aes(x = long, y = lat, group = group)) +
geom_polygon(aes(fill = fillColor)) +
geom_path(color = "grey50") +
coord_equal() +
theme(legend.position = "top", legend.direction = "horizontal",
panel.background = element_blank(),
plot.background = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
legend.title = element_blank(),
plot.margin = unit(c(0, 0, 0, 0), "lines")) +
xlab(NULL) + ylab(NULL)
if (!is.null(countryCodeTransp)) {
map = map + scale_fill_manual(labels = c(legendLab, ""),
values = c(uCol, "transparent"),
breaks = c(uVal, "transparent"))
} else {
map = map + scale_fill_manual(labels = legendLab,
values = uCol,
breaks = uVal)
}
map
}
utils::globalVariables(names = c("GAULspatialPolygon", "long", "lat", "group",
"fillColor"))
|
#' Evaluate input and return all details of evaluation.
#'
#' Compare to \code{\link{eval}}, \code{evaluate} captures all of the
#' information necessary to recreate the output as if you had copied and
#' pasted the code into a R terminal. It captures messages, warnings, errors
#' and output, all correctly interleaved in the order in which they occured.
#' It stores the final result, whether or not it should be visible, and the
#' contents of the current graphics device.
#'
#' @export
#' @param input input object to be parsed an evaluated. Maybe a string,
#' file connection or function.
#' @param envir environment in which to evaluate expressions
#' @param enclos when \code{envir} is a list or data frame, this is treated
#' as the parent environment to \code{envir}.
#' @param debug if \code{TRUE}, displays information useful for debugging,
#' including all output that evaluate captures
#' @param stop_on_error if \code{2}, evaluation will stop on first error and you
#' will get no results back. If \code{1}, evaluation will stop on first error,
#' but you will get back all results up to that point. If \code{0} will
#' continue running all code, just as if you'd pasted the code into the
#' command line.
#' @param keep_warning,keep_message whether to record warnings and messages
#' @param new_device if \code{TRUE}, will open a new graphics device and
#' automatically close it after completion. This prevents evaluation from
#' interfering with your existing graphics environment.
#' @param output_handler an instance of \code{\link{output_handler}}
#' that processes the output from the evaluation. The default simply
#' prints the visible return values.
#' @import stringr
evaluate <- function(input, envir = parent.frame(), enclos = NULL, debug = FALSE,
stop_on_error = 0L, keep_warning = TRUE, keep_message = TRUE,
new_device = TRUE, output_handler = new_output_handler()) {
parsed <- parse_all(input)
stop_on_error <- as.integer(stop_on_error)
stopifnot(length(stop_on_error) == 1)
if (is.null(enclos)) {
enclos <- if (is.list(envir) || is.pairlist(envir)) parent.frame() else baseenv()
}
if (new_device) {
# Start new graphics device and clean up afterwards
dev.new()
dev.control(displaylist = "enable")
dev <- dev.cur()
on.exit(dev.off(dev))
}
out <- vector("list", nrow(parsed))
for (i in seq_along(out)) {
expr <- parsed$expr[[i]]
if (!is.null(expr))
expr <- as.expression(expr)
out[[i]] <- evaluate_call(
expr, parsed$src[[i]],
envir = envir, enclos = enclos, debug = debug, last = i == length(out),
use_try = stop_on_error != 2L,
keep_warning = keep_warning, keep_message = keep_message,
output_handler = output_handler)
if (stop_on_error > 0L) {
errs <- vapply(out[[i]], is.error, logical(1))
if (!any(errs)) next
if (stop_on_error == 1L) break
}
}
unlist(out, recursive = FALSE, use.names = FALSE)
}
has_output <- function(x) !inherits(x, "___no_output___")
evaluate_call <- function(call, src = NULL,
envir = parent.frame(), enclos = NULL,
debug = FALSE, last = FALSE, use_try = FALSE,
keep_warning = TRUE, keep_message = TRUE,
output_handler = new_output_handler()) {
if (debug) message(src)
if (is.null(call)) {
return(list(new_source(src)))
}
stopifnot(is.call(call) || is.language(call) || is.atomic(call))
# Capture output
w <- watchout(debug)
on.exit(w$close())
source <- new_source(src)
output_handler$source(source)
output <- list(source)
handle_output <- function(plot = FALSE, incomplete_plots = FALSE) {
out <- w$get_new(plot, incomplete_plots)
if (!is.null(out$text))
output_handler$text(out$text)
if (!is.null(out$graphics))
output_handler$graphics(out$graphics)
output <<- c(output, out)
}
# Hooks to capture plot creation
capture_plot <- function() {
handle_output(TRUE)
}
old_hooks <- set_hooks(list(
persp = capture_plot,
before.plot.new = capture_plot,
before.grid.newpage = capture_plot))
on.exit(set_hooks(old_hooks, "replace"), add = TRUE)
handle_condition <- function(cond) {
handle_output()
output <<- c(output, list(cond))
}
handle_value <- function(val)
{
hval <- tryCatch(output_handler$value(val), error = function(e) e)
if(inherits(hval, "error"))
stop("Error in value handler within evaluate call:", hval$message)
#catch any errors, warnings, or graphics generated during the call
#to the value handler
handle_output(TRUE)
if(has_output(hval))
output <<- c(output, list(hval))
}
# Handlers for warnings, errors and messages
wHandler <- if (keep_warning) function(wn) {
handle_condition(wn)
output_handler$warning(wn)
invokeRestart("muffleWarning")
} else identity
eHandler <- if (use_try) function(e) {
handle_condition(e)
output_handler$error(e)
} else identity
mHandler <- if (keep_message) function(m) {
handle_condition(m)
output_handler$message(m)
invokeRestart("muffleMessage")
} else identity
ev <- list(value = NULL, visible = FALSE)
if (use_try) {
handle <- function(f) try(f, silent = TRUE)
} else {
handle <- force
}
handle(ev <- withCallingHandlers(
withVisible(eval(call, envir, enclos)),
warning = wHandler, error = eHandler, message = mHandler))
handle_output(TRUE)
# If visible, process the value itself as output via value_handler
if (ev$visible) {
handle(withCallingHandlers(handle_value(ev$value),
warning = wHandler, error = eHandler, message = mHandler))
}
# Always capture last plot, even if incomplete
if (last) {
handle_output(TRUE, TRUE)
}
output
}
| /R/eval.r | no_license | gmbecker/evaluate | R | false | false | 5,891 | r | #' Evaluate input and return all details of evaluation.
#'
#' Compare to \code{\link{eval}}, \code{evaluate} captures all of the
#' information necessary to recreate the output as if you had copied and
#' pasted the code into a R terminal. It captures messages, warnings, errors
#' and output, all correctly interleaved in the order in which they occured.
#' It stores the final result, whether or not it should be visible, and the
#' contents of the current graphics device.
#'
#' @export
#' @param input input object to be parsed an evaluated. Maybe a string,
#' file connection or function.
#' @param envir environment in which to evaluate expressions
#' @param enclos when \code{envir} is a list or data frame, this is treated
#' as the parent environment to \code{envir}.
#' @param debug if \code{TRUE}, displays information useful for debugging,
#' including all output that evaluate captures
#' @param stop_on_error if \code{2}, evaluation will stop on first error and you
#' will get no results back. If \code{1}, evaluation will stop on first error,
#' but you will get back all results up to that point. If \code{0} will
#' continue running all code, just as if you'd pasted the code into the
#' command line.
#' @param keep_warning,keep_message whether to record warnings and messages
#' @param new_device if \code{TRUE}, will open a new graphics device and
#' automatically close it after completion. This prevents evaluation from
#' interfering with your existing graphics environment.
#' @param output_handler an instance of \code{\link{output_handler}}
#' that processes the output from the evaluation. The default simply
#' prints the visible return values.
#' @import stringr
evaluate <- function(input, envir = parent.frame(), enclos = NULL, debug = FALSE,
stop_on_error = 0L, keep_warning = TRUE, keep_message = TRUE,
new_device = TRUE, output_handler = new_output_handler()) {
parsed <- parse_all(input)
stop_on_error <- as.integer(stop_on_error)
stopifnot(length(stop_on_error) == 1)
if (is.null(enclos)) {
enclos <- if (is.list(envir) || is.pairlist(envir)) parent.frame() else baseenv()
}
if (new_device) {
# Start new graphics device and clean up afterwards
dev.new()
dev.control(displaylist = "enable")
dev <- dev.cur()
on.exit(dev.off(dev))
}
out <- vector("list", nrow(parsed))
for (i in seq_along(out)) {
expr <- parsed$expr[[i]]
if (!is.null(expr))
expr <- as.expression(expr)
out[[i]] <- evaluate_call(
expr, parsed$src[[i]],
envir = envir, enclos = enclos, debug = debug, last = i == length(out),
use_try = stop_on_error != 2L,
keep_warning = keep_warning, keep_message = keep_message,
output_handler = output_handler)
if (stop_on_error > 0L) {
errs <- vapply(out[[i]], is.error, logical(1))
if (!any(errs)) next
if (stop_on_error == 1L) break
}
}
unlist(out, recursive = FALSE, use.names = FALSE)
}
has_output <- function(x) !inherits(x, "___no_output___")
evaluate_call <- function(call, src = NULL,
envir = parent.frame(), enclos = NULL,
debug = FALSE, last = FALSE, use_try = FALSE,
keep_warning = TRUE, keep_message = TRUE,
output_handler = new_output_handler()) {
if (debug) message(src)
if (is.null(call)) {
return(list(new_source(src)))
}
stopifnot(is.call(call) || is.language(call) || is.atomic(call))
# Capture output
w <- watchout(debug)
on.exit(w$close())
source <- new_source(src)
output_handler$source(source)
output <- list(source)
handle_output <- function(plot = FALSE, incomplete_plots = FALSE) {
out <- w$get_new(plot, incomplete_plots)
if (!is.null(out$text))
output_handler$text(out$text)
if (!is.null(out$graphics))
output_handler$graphics(out$graphics)
output <<- c(output, out)
}
# Hooks to capture plot creation
capture_plot <- function() {
handle_output(TRUE)
}
old_hooks <- set_hooks(list(
persp = capture_plot,
before.plot.new = capture_plot,
before.grid.newpage = capture_plot))
on.exit(set_hooks(old_hooks, "replace"), add = TRUE)
handle_condition <- function(cond) {
handle_output()
output <<- c(output, list(cond))
}
handle_value <- function(val)
{
hval <- tryCatch(output_handler$value(val), error = function(e) e)
if(inherits(hval, "error"))
stop("Error in value handler within evaluate call:", hval$message)
#catch any errors, warnings, or graphics generated during the call
#to the value handler
handle_output(TRUE)
if(has_output(hval))
output <<- c(output, list(hval))
}
# Handlers for warnings, errors and messages
wHandler <- if (keep_warning) function(wn) {
handle_condition(wn)
output_handler$warning(wn)
invokeRestart("muffleWarning")
} else identity
eHandler <- if (use_try) function(e) {
handle_condition(e)
output_handler$error(e)
} else identity
mHandler <- if (keep_message) function(m) {
handle_condition(m)
output_handler$message(m)
invokeRestart("muffleMessage")
} else identity
ev <- list(value = NULL, visible = FALSE)
if (use_try) {
handle <- function(f) try(f, silent = TRUE)
} else {
handle <- force
}
handle(ev <- withCallingHandlers(
withVisible(eval(call, envir, enclos)),
warning = wHandler, error = eHandler, message = mHandler))
handle_output(TRUE)
# If visible, process the value itself as output via value_handler
if (ev$visible) {
handle(withCallingHandlers(handle_value(ev$value),
warning = wHandler, error = eHandler, message = mHandler))
}
# Always capture last plot, even if incomplete
if (last) {
handle_output(TRUE, TRUE)
}
output
}
|
\name{sdfFGN}
\alias{sdfFGN}
\title{
Spectral density function for FGN
}
\description{
Computes the spectral density function for the FGN model with parameter H
at the Fourier frequencies, 2*Pi*j/n, j=1,...,[n/2],
where n is the length of the time series.
The evaluation is very fast since bivariate interpolation is used.
}
\usage{
sdfFGN(H, n)
}
\arguments{
\item{H}{
FGN parameter
}
\item{n}{
length of time series
}
}
\details{
The details of the implementation are discussed in the accompanying vignette.
}
\value{
a vector of length [n/2] of the spectral density values.
}
\author{
A. I. McLeod and J. Veenstra
}
\seealso{
\code{\link{sdfFD}}
}
\examples{
sdfFGN(0.7, 100)
}
\keyword{ ts }
| /man/sdfFGN.Rd | no_license | cran/FGN | R | false | false | 752 | rd | \name{sdfFGN}
\alias{sdfFGN}
\title{
Spectral density function for FGN
}
\description{
Computes the spectral density function for the FGN model with parameter H
at the Fourier frequencies, 2*Pi*j/n, j=1,...,[n/2],
where n is the length of the time series.
The evaluation is very fast since bivariate interpolation is used.
}
\usage{
sdfFGN(H, n)
}
\arguments{
\item{H}{
FGN parameter
}
\item{n}{
length of time series
}
}
\details{
The details of the implementation are discussed in the accompanying vignette.
}
\value{
a vector of length [n/2] of the spectral density values.
}
\author{
A. I. McLeod and J. Veenstra
}
\seealso{
\code{\link{sdfFD}}
}
\examples{
sdfFGN(0.7, 100)
}
\keyword{ ts }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mnist-data.R
\docType{data}
\name{mnist.test.x}
\alias{mnist.test.x}
\title{Arabidopsis QTL data on gravitropism}
\format{An object of class `"cross"`; see [qtl::read.cross()].}
\usage{
data(mnist.test.x)
}
\description{
Data from a QTL experiment on gravitropism in
Arabidopsis, with data on 162 recombinant inbred lines (Ler x
Cvi). The outcome is the root tip angle (in degrees) at two-minute
increments over eight hours.
}
\examples{
data(mnist.test.x)
data(mnist.test.y)
data(mnist.train.x)
data(mnist.train.y)
}
\references{
Yann LeCun, Corinna Cortes, Christopher J.C. Burges,
THE MNIST DATABASE of handwritten digits
([MNIST](http://yann.lecun.com/exdb/mnist/))
}
\keyword{datasets}
| /man/mnist.test.x.Rd | no_license | chansigit/scFly | R | false | true | 771 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mnist-data.R
\docType{data}
\name{mnist.test.x}
\alias{mnist.test.x}
\title{Arabidopsis QTL data on gravitropism}
\format{An object of class `"cross"`; see [qtl::read.cross()].}
\usage{
data(mnist.test.x)
}
\description{
Data from a QTL experiment on gravitropism in
Arabidopsis, with data on 162 recombinant inbred lines (Ler x
Cvi). The outcome is the root tip angle (in degrees) at two-minute
increments over eight hours.
}
\examples{
data(mnist.test.x)
data(mnist.test.y)
data(mnist.train.x)
data(mnist.train.y)
}
\references{
Yann LeCun, Corinna Cortes, Christopher J.C. Burges,
THE MNIST DATABASE of handwritten digits
([MNIST](http://yann.lecun.com/exdb/mnist/))
}
\keyword{datasets}
|
# Load the NEI & SCC data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot3.png",width=480,height=480,units="px",bg="transparent")
library(ggplot2)
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
dev.off() | /plot3.R | no_license | dhrubajyoti-mishra/Exploratory-Data-Analysis-Assignment | R | false | false | 807 | r |
# Load the NEI & SCC data frames.
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Subset NEI data by Baltimore's fip.
baltimoreNEI <- NEI[NEI$fips=="24510",]
# Aggregate using sum the Baltimore emissions data by year
aggTotalsBaltimore <- aggregate(Emissions ~ year, baltimoreNEI,sum)
png("plot3.png",width=480,height=480,units="px",bg="transparent")
library(ggplot2)
ggp <- ggplot(baltimoreNEI,aes(factor(year),Emissions,fill=type)) +
geom_bar(stat="identity") +
theme_bw() + guides(fill=FALSE)+
facet_grid(.~type,scales = "free",space="free") +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (Tons)")) +
labs(title=expression("PM"[2.5]*" Emissions, Baltimore City 1999-2008 by Source Type"))
print(ggp)
dev.off() |
testlist <- list(b = c(-2848394305499268608, -7.87284991918862e+274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::centroid,testlist)
str(result) | /metacoder/inst/testfiles/centroid/AFL_centroid/centroid_valgrind_files/1615765853-test.R | permissive | akhikolla/updatedatatype-list3 | R | false | false | 399 | r | testlist <- list(b = c(-2848394305499268608, -7.87284991918862e+274, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(metacoder:::centroid,testlist)
str(result) |
#' @title Read NIfTI file reoriented to RPI
#'
#' @description This function calls the \code{\link{readnii}} function after
#' calling \code{\link{rpi_orient_file}} to force RPI orientation.
#' @param file file name of the NIfTI file.
#' @param ... Arguments to pass to \code{\link{readnii}}
#' @param verbose print diagnostics, passed to \code{\link{rpi_orient_file}}
#' @export
#' @examples
#' if (have.fsl()){
#' print(fsl_version())
#' in_ci <- function() {
#' nzchar(Sys.getenv("CI"))
#' }
#' if (in_ci()) {
#' destfile = tempfile(fileext = ".nii.gz")
#' url = paste0("https://ndownloader.figshare.com/",
#' "files/18068546")
#' old_url = paste0("https://github.com/muschellij2/",
#' "Neurohacking/files/3454385/113-01-MPRAGE2.nii.gz")
#' dl = tryCatch(download.file(url,
#' destfile = destfile))
#' if (inherits(dl, "try-error") || dl != 0) {
#' dl = download.file(old_url, destfile = destfile)
#' }
#' res = readrpi(destfile)
#' }
#' }
readrpi <- function(file, ..., verbose = TRUE) {
args = list(...)
n_args = names(args)
if ("fname" %in% n_args) {
stop("fname cannot be specified in readrpi!")
}
L = rpi_orient_file(file = file, verbose = verbose)
file = L$img
nim = readnii(fname = file, ...)
return(nim)
} | /R/readrpi.R | no_license | muschellij2/fslr | R | false | false | 1,264 | r | #' @title Read NIfTI file reoriented to RPI
#'
#' @description This function calls the \code{\link{readnii}} function after
#' calling \code{\link{rpi_orient_file}} to force RPI orientation.
#' @param file file name of the NIfTI file.
#' @param ... Arguments to pass to \code{\link{readnii}}
#' @param verbose print diagnostics, passed to \code{\link{rpi_orient_file}}
#' @export
#' @examples
#' if (have.fsl()){
#' print(fsl_version())
#' in_ci <- function() {
#' nzchar(Sys.getenv("CI"))
#' }
#' if (in_ci()) {
#' destfile = tempfile(fileext = ".nii.gz")
#' url = paste0("https://ndownloader.figshare.com/",
#' "files/18068546")
#' old_url = paste0("https://github.com/muschellij2/",
#' "Neurohacking/files/3454385/113-01-MPRAGE2.nii.gz")
#' dl = tryCatch(download.file(url,
#' destfile = destfile))
#' if (inherits(dl, "try-error") || dl != 0) {
#' dl = download.file(old_url, destfile = destfile)
#' }
#' res = readrpi(destfile)
#' }
#' }
readrpi <- function(file, ..., verbose = TRUE) {
args = list(...)
n_args = names(args)
if ("fname" %in% n_args) {
stop("fname cannot be specified in readrpi!")
}
L = rpi_orient_file(file = file, verbose = verbose)
file = L$img
nim = readnii(fname = file, ...)
return(nim)
} |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/calpost_get_concentrations_from_time_series_file.R
\name{calpost_get_concentrations_from_time_series_file}
\alias{calpost_get_concentrations_from_time_series_file}
\title{Create a data frame of discrete receptor concentrations}
\usage{
calpost_get_concentrations_from_time_series_file(time_series_file = NULL,
location_name, source_id, pollutant_id, create_hourly_CSV = TRUE,
create_hourly_rda = TRUE, return_large_df = FALSE,
resume_from_set_hour = NULL, autoresume_processing = TRUE,
autoresume_year = NULL)
}
\arguments{
\item{time_series_file}{a path to a binary time series data file that was generated by CALPOST with time series options set.}
\item{location_name}{the name of the location in which the receptors reside.}
\item{source_id}{the ID value for the source emissions.}
\item{pollutant_id}{the ID value for the emitted pollutant.}
\item{create_hourly_CSV}{an option to create hourly CSV files describing pollutant concentrations at every receptor.}
\item{create_hourly_rda}{an option to create hourly R data (.rda) files describing pollutant concentrations at every receptor.}
\item{return_large_df}{an option to invisibly return a large data frame object.}
\item{resume_from_set_hour}{an option to resume processing of concentrations from a set hour of the year.}
\item{autoresume_processing}{}
\item{autoresume_year}{}
}
\description{
Create a data frame of discrete receptor concentrations using a CALPOST time series outfile file.
}
| /man/calpost_get_concentrations_from_time_series_file.Rd | permissive | yosukefk/PuffR | R | false | false | 1,556 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/calpost_get_concentrations_from_time_series_file.R
\name{calpost_get_concentrations_from_time_series_file}
\alias{calpost_get_concentrations_from_time_series_file}
\title{Create a data frame of discrete receptor concentrations}
\usage{
calpost_get_concentrations_from_time_series_file(time_series_file = NULL,
location_name, source_id, pollutant_id, create_hourly_CSV = TRUE,
create_hourly_rda = TRUE, return_large_df = FALSE,
resume_from_set_hour = NULL, autoresume_processing = TRUE,
autoresume_year = NULL)
}
\arguments{
\item{time_series_file}{a path to a binary time series data file that was generated by CALPOST with time series options set.}
\item{location_name}{the name of the location in which the receptors reside.}
\item{source_id}{the ID value for the source emissions.}
\item{pollutant_id}{the ID value for the emitted pollutant.}
\item{create_hourly_CSV}{an option to create hourly CSV files describing pollutant concentrations at every receptor.}
\item{create_hourly_rda}{an option to create hourly R data (.rda) files describing pollutant concentrations at every receptor.}
\item{return_large_df}{an option to invisibly return a large data frame object.}
\item{resume_from_set_hour}{an option to resume processing of concentrations from a set hour of the year.}
\item{autoresume_processing}{}
\item{autoresume_year}{}
}
\description{
Create a data frame of discrete receptor concentrations using a CALPOST time series outfile file.
}
|
setwd ("C:/Users/szabo/OneDrive/Dokumentumok/Károli 2020-21-2/R/RStudo/Beadandó")
data <- read.csv ("master.csv")
data
data$country <- data$ď.żcountry
library(ggplot2)
#1.abra
idosor <- aggregate (suicides.100k.pop ~ country.year,data=data, mean, na.rm=T)
idosor_alt <- aggregate(suicides.100k.pop ~ country+year, data=data, mean, na.rm=T)
idosor_alt
png(file="idosor.png", width = 2048, height = 768)
ggplot(idosor_alt, aes(x = year, y = suicides.100k.pop, colour = country)) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#2.abra
idosor_nemek <- aggregate ( suicides.100k.pop ~ country+year+sex, data = data, mean, na.rm=T)
idosor_nemek
png (file="idosor_ffi.png", width = 2048, height = 768)
ggplot(idosor_nemek[idosor_nemek$sex == "male",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor férfiak") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#3.abra
png (file="idosor_no.png", width = 2048, height = 768)
ggplot(idosor_nemek[idosor_nemek$sex == "female",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor nők") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#4.abra
idosor_gen <- aggregate(suicides.100k.pop ~ generation+year, data=data, mean, na.rm=T)
png(file = "idosor_gen.png", width = 2048, height = 768)
ggplot(idosor_gen, aes(x = year, y= suicides.100k.pop, colour = generation)) + geom_line() + scale_color_discrete(name="generációk") + ggtitle("Idősor generációk") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#5.abra
idosor_orszag_ongyilk <- aggregate(suicides.100k.pop ~ country, data = data, mean, na.rm=T)
idosor_orszag_ongyilk
orsz_sorb <- idosor_orszag_ongyilk[order(-idosor_orszag_ongyilk$suicides.100k.pop),]
orsz_sorb
hatorszag <- orsz_sorb$country[1:6]
hatorszag
hat_teljes <- idosor_alt[idosor_alt$country %in% hatorszag,]
hat_teljes
png(file="6orszag_evek.png", width=2048, height = 768)
ggplot(hat_teljes, aes(fill = factor(year), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge") + scale_fill_discrete(name = "év") + ggtitle("6 ország év") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#6.abra
idosor_gen_orszag <- aggregate(suicides.100k.pop ~ country+generation, data=data, mean, na.rm=T)
hat_teljes2 <- idosor_gen_orszag[idosor_gen_orszag$country %in% hatorszag,]
hat_teljes2
png(file="6orszag_gen.png", width = 2048, height = 768)
ggplot(hat_teljes2, aes(fill = factor(generation), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge")+ scale_fill_discrete(name="generáció") + ggtitle("6 ország generáció") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#összerakás
idosor_ffi_plot <- ggplot(idosor_nemek[idosor_nemek$sex == "male",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor férfiak") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_gen_plot <- ggplot(idosor_gen, aes(x = year, y= suicides.100k.pop, colour = generation)) + geom_line() + scale_color_discrete(name="generációk") + ggtitle("Idősor generációk") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_no_plot <- ggplot(idosor_nemek[idosor_nemek$sex == "female",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor nők") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
hatorszag_evek_plot <- ggplot(hat_teljes, aes(fill = factor(year), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge") + scale_fill_discrete(name = "év") + ggtitle("6 ország év") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
hatorszag_gen_plot <- ggplot(hat_teljes2, aes(fill = factor(generation), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge")+ scale_fill_discrete(name="generáció") + ggtitle("6 ország generáció") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_plot <- ggplot(idosor_alt, aes(x = year, y = suicides.100k.pop, colour = country)) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
install.packages("gridExtra")
library("gridExtra")
png(file = "osszes.png", width = 2048, height = 768)
grid.arrange(idosor_ffi_plot,idosor_gen_plot,idosor_no_plot,idosor_plot,hatorszag_evek_plot,hatorszag_gen_plot, ncol=2, nrow=3, top = "Beadanó összes")
dev.off()
#Értelmezés:
## Generációk alapján megállítható, hogy a legfiatalabb korosztályok kevésbé hajlamosak az öngyilkosságra, mint az idősebb generációk, és jól látszik, hogy 75 év felettieknél a legmagasabb az öngyilkosságok aránya. (G.I.Generation)
## A 6 legnagyobb arányú öngyilkossági ráátával rendelkező országokról mind elmondható, hogy nagyjából a 2000 évek óta csökkenő tendencia mutatkozik az öngyilkosságok számában.
## Magyarországon összesítve nagyon kiemelkedik, hogy a 75 év felettiek öngyilkossági aránya a többi generációhoz képest.
| /megoldas.R | no_license | szabonelly/R_ongyi | R | false | false | 5,772 | r | setwd ("C:/Users/szabo/OneDrive/Dokumentumok/Károli 2020-21-2/R/RStudo/Beadandó")
data <- read.csv ("master.csv")
data
data$country <- data$ď.żcountry
library(ggplot2)
#1.abra
idosor <- aggregate (suicides.100k.pop ~ country.year,data=data, mean, na.rm=T)
idosor_alt <- aggregate(suicides.100k.pop ~ country+year, data=data, mean, na.rm=T)
idosor_alt
png(file="idosor.png", width = 2048, height = 768)
ggplot(idosor_alt, aes(x = year, y = suicides.100k.pop, colour = country)) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#2.abra
idosor_nemek <- aggregate ( suicides.100k.pop ~ country+year+sex, data = data, mean, na.rm=T)
idosor_nemek
png (file="idosor_ffi.png", width = 2048, height = 768)
ggplot(idosor_nemek[idosor_nemek$sex == "male",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor férfiak") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#3.abra
png (file="idosor_no.png", width = 2048, height = 768)
ggplot(idosor_nemek[idosor_nemek$sex == "female",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor nők") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#4.abra
idosor_gen <- aggregate(suicides.100k.pop ~ generation+year, data=data, mean, na.rm=T)
png(file = "idosor_gen.png", width = 2048, height = 768)
ggplot(idosor_gen, aes(x = year, y= suicides.100k.pop, colour = generation)) + geom_line() + scale_color_discrete(name="generációk") + ggtitle("Idősor generációk") + xlab ("év") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#5.abra
idosor_orszag_ongyilk <- aggregate(suicides.100k.pop ~ country, data = data, mean, na.rm=T)
idosor_orszag_ongyilk
orsz_sorb <- idosor_orszag_ongyilk[order(-idosor_orszag_ongyilk$suicides.100k.pop),]
orsz_sorb
hatorszag <- orsz_sorb$country[1:6]
hatorszag
hat_teljes <- idosor_alt[idosor_alt$country %in% hatorszag,]
hat_teljes
png(file="6orszag_evek.png", width=2048, height = 768)
ggplot(hat_teljes, aes(fill = factor(year), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge") + scale_fill_discrete(name = "év") + ggtitle("6 ország év") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#6.abra
idosor_gen_orszag <- aggregate(suicides.100k.pop ~ country+generation, data=data, mean, na.rm=T)
hat_teljes2 <- idosor_gen_orszag[idosor_gen_orszag$country %in% hatorszag,]
hat_teljes2
png(file="6orszag_gen.png", width = 2048, height = 768)
ggplot(hat_teljes2, aes(fill = factor(generation), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge")+ scale_fill_discrete(name="generáció") + ggtitle("6 ország generáció") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő")
dev.off()
#összerakás
idosor_ffi_plot <- ggplot(idosor_nemek[idosor_nemek$sex == "male",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor férfiak") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_gen_plot <- ggplot(idosor_gen, aes(x = year, y= suicides.100k.pop, colour = generation)) + geom_line() + scale_color_discrete(name="generációk") + ggtitle("Idősor generációk") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_no_plot <- ggplot(idosor_nemek[idosor_nemek$sex == "female",], aes(x = year, y = suicides.100k.pop, colour = country )) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor nők") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
hatorszag_evek_plot <- ggplot(hat_teljes, aes(fill = factor(year), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge") + scale_fill_discrete(name = "év") + ggtitle("6 ország év") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
hatorszag_gen_plot <- ggplot(hat_teljes2, aes(fill = factor(generation), x= country, y=suicides.100k.pop )) + geom_col(position = "dodge")+ scale_fill_discrete(name="generáció") + ggtitle("6 ország generáció") + xlab ("ország") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
idosor_plot <- ggplot(idosor_alt, aes(x = year, y = suicides.100k.pop, colour = country)) + geom_line() + scale_color_discrete(name="ország") + ggtitle("Idősor") + xlab ("év") + ylab ("öngyilkosság/100.000 fő") + theme(legend.position = "none")
install.packages("gridExtra")
library("gridExtra")
png(file = "osszes.png", width = 2048, height = 768)
grid.arrange(idosor_ffi_plot,idosor_gen_plot,idosor_no_plot,idosor_plot,hatorszag_evek_plot,hatorszag_gen_plot, ncol=2, nrow=3, top = "Beadanó összes")
dev.off()
#Értelmezés:
## Generációk alapján megállítható, hogy a legfiatalabb korosztályok kevésbé hajlamosak az öngyilkosságra, mint az idősebb generációk, és jól látszik, hogy 75 év felettieknél a legmagasabb az öngyilkosságok aránya. (G.I.Generation)
## A 6 legnagyobb arányú öngyilkossági ráátával rendelkező országokról mind elmondható, hogy nagyjából a 2000 évek óta csökkenő tendencia mutatkozik az öngyilkosságok számában.
## Magyarországon összesítve nagyon kiemelkedik, hogy a 75 év felettiek öngyilkossági aránya a többi generációhoz képest.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AGCCombine.R
\name{AGCCombine}
\alias{AGCCombine}
\title{Inference using accelerated g-computation}
\usage{
AGCCombine(results, level = 0.95)
}
\arguments{
\item{results}{A data.frame with columns Iteration giving the iteration of
the sample parameter value, CopyID giving the copy number,and Param giving the
estimate of the desired causal parameter, and ParamName giving the name of
the parameter.}
\item{level}{The confidence level to be used for constructing confidence
intervals and performing hypothesis tests.}
}
\value{
A data.frame object with the following columns
\itemize{
\item ParamName - The name of the parameter
\item estimate - The aggregated estimate of the parameter
\item standard_error - The estimated standard error of the estimate
\item between_var - The "between" component of the variance
\item within_var - The "within" component of the variance
\item var_est - The estimated variance (square of standard_error)
\item dof - The degrees-of-freedom of the Sattherthwaite approximation
\item Z - Test statistic for testing if the parameter is zero
\item p_value - The (two-sided) p-value for the test that the parameter is zero.
}
}
\description{
Combines the results of analysis on multiple simulated datasets. Associated
to each simulated dataset is an estimate of the parameters of interest.
Datasets are indexed by (i) the iteration of the bootstrap/MCMC scheme used
to generate the simulated data and (ii) an id indicating which of K
simulations the dataset corresponds to for a particular iteration.
}
| /man/AGCCombine.Rd | permissive | theodds/AGC | R | false | true | 1,631 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AGCCombine.R
\name{AGCCombine}
\alias{AGCCombine}
\title{Inference using accelerated g-computation}
\usage{
AGCCombine(results, level = 0.95)
}
\arguments{
\item{results}{A data.frame with columns Iteration giving the iteration of
the sample parameter value, CopyID giving the copy number,and Param giving the
estimate of the desired causal parameter, and ParamName giving the name of
the parameter.}
\item{level}{The confidence level to be used for constructing confidence
intervals and performing hypothesis tests.}
}
\value{
A data.frame object with the following columns
\itemize{
\item ParamName - The name of the parameter
\item estimate - The aggregated estimate of the parameter
\item standard_error - The estimated standard error of the estimate
\item between_var - The "between" component of the variance
\item within_var - The "within" component of the variance
\item var_est - The estimated variance (square of standard_error)
\item dof - The degrees-of-freedom of the Sattherthwaite approximation
\item Z - Test statistic for testing if the parameter is zero
\item p_value - The (two-sided) p-value for the test that the parameter is zero.
}
}
\description{
Combines the results of analysis on multiple simulated datasets. Associated
to each simulated dataset is an estimate of the parameters of interest.
Datasets are indexed by (i) the iteration of the bootstrap/MCMC scheme used
to generate the simulated data and (ii) an id indicating which of K
simulations the dataset corresponds to for a particular iteration.
}
|
#' @title split_index
#' @description splits the indeces into several blocks
#' @param total_len is a total length of the index vector
#' @param block_len is a length of the block
#' @param nb_split is a number of the splits
#' @keywords internal
#' @return A list of the splits that contains lower, upper block indeces and block size
split_index <- function(total_len, block_len,
nb_split = ceiling(total_len / block_len)) {
#assert_one_int(total_len); assert_pos(total_len)
if (nb_split > total_len) {
nb_split <- total_len
} else if (nb_split == 0) { ## block_len = Inf
nb_split <- 1
}
#assert_one_int(nb_split); assert_pos(nb_split)
int <- total_len / nb_split
upper <- round(1:nb_split * int)
lower <- c(1, upper[-nb_split] + 1)
size <- c(upper[1], diff(upper))
cbind(lower, upper, size)
}
#'@title split_foreach
#' @description implements block foreach loop
#' @param FUN is a function that is executed in the loop
#' @param ind is a index vector
#' @param ... are parameters of the function FUN
#' @param .combine is a rule to combine the result
#' @param ncores is a nuber of cores
#' @param nb_split is a nuber of splits
#' @return The result of the FUN function
#' @importFrom foreach %dopar% foreach
#' @keywords internal
#' @export
split_foreach <- function(FUN, ind, ...,
.combine = NULL,
ncores =4,
nb_split = ncores) {
switch (Sys.info()['sysname'],
'Linux' = {
cluster_Type = 'FORK'
},
'Windows' = {
cluster_Type = 'PSOCK'
},
"Darwin" = {
cluster_Type = 'FORK'
},
"SunOS" = {
cluster_Type = 'PSOCK'
},
stop(paste("Package is not compatible with", Sys.info()['sysname']))
)
cl <- parallel::makeCluster(ncores, type = cluster_Type)
doParallel::registerDoParallel(cl)
on.exit(parallel::stopCluster(cl))
intervals <- split_index(length(ind), nb_split = ncores)
ic <- NULL
res <- foreach(ic = bigstatsr::rows_along(intervals)) %dopar% {
ind.part <- ind[seq(intervals[ic, "lower"], intervals[ic, "upper"])]
FUN(ind = ind.part, ...)
}
`if`(is.null(.combine), res, do.call(.combine, res))
}
#'@title big_parallelize
#' @description parallel call a function on file-backed matrix
#'
#' @param X is a file-backed data matrix
#' @param p.FUN is a function to apply
#' @param p.combine is a rule to combine the results from each core
#' @param ind is a vector of column indeces on which the computations are performed
#' @param ncores is a nuber of cores
#' @param ... are additional parameters
#' @return The result of the parallel computations
#' @keywords internal
#' @export
big_parallelize <- function(X, p.FUN,
p.combine = NULL,
ind = bigstatsr::cols_along(X),
ncores = 4,
...) {
split_foreach(
p.FUN, ind, X, ...,
.combine = p.combine,
ncores = ncores)
}
#' @title Sketch
#'
#' @description The data sketch computation.
#' @param Data A Filebacked Big Matrix n x N. Data signals are stored in the matrix columns.
#' @param W A frequency matrix m x n. The frequency vectors are stored in the matrix rows.
#' @param ind.col Column indeces for which the data sketch is computed. By default all matrix columns.
#' @param ncores Number of used cores. By default 1. If \code{parallel} = FALSE, ncores defines a number of data splits
#' on which the sketch is computed separatelly.
#' @param parallel logical parameter that indicates whether computations are performed on several cores in parallel or not.
#' @return The data sketch vector.
#' @details The sketch of the given data collection \eqn{x_1, \dots, x_N} is a vector of the length 2m.
#' First m components of the data sketch vector correspond to its real part, \emph{i.e.} \eqn{\frac{1}{N} \sum_{i=1}^N \cos(W x_i)}.
#' Last m components are its imaginary part, \emph{i.e.} \eqn{\frac{1}{N} \sum_{i=1}^N \sin(W x_i)}.
#' @examples
#' X = matrix(rnorm(1000), ncol=100, nrow = 10)
#' X_FBM = bigstatsr::FBM(init = X, ncol=100, nrow = 10)
#' W = GenerateFrequencies(Data = X_FBM, m = 20, N0 = 100, TypeDist = "AR")$W
#' SK1 = Sketch(X_FBM, W)
#' SK2 = Sketch(X_FBM, W, parallel = TRUE, ncores = 2)
#' all.equal(SK1, SK2)
#' @references \insertRef{DBLP:journals/corr/KerivenBGP16}{chickn}.
#' @export
Sketch<-function(Data,W,ind.col = 1:ncol(Data), ncores = 1, parallel = FALSE){
m = nrow(W)
N = length(ind.col)
switch(class(Data),
'FBM' = {
if(!parallel){
weight = rep(1/N, ncol(Data))
# SK = bigstatsr::big_apply(X = Data, a.FUN = function(X, ind, W, weight){
# return(exp(1i*W%*%X[,ind])%*%weight[ind])
# }, a.combine = 'cbind',ind = ind.col,weight = weight, W = W, ncores = ncores)
SK = bigstatsr::big_apply(X = Data, a.FUN = function(X, ind, W, weight){
Z <- W%*%X[,ind]
return(c(cos(Z)%*%weight[ind], sin(Z)%*%weight[ind]))
}, a.combine = 'plus', ind = ind.col, weight = weight, W = W, ncores = ncores)
}else{
SK = big_parallelize(X = Data, p.FUN = function(X, ind, W, N){
Z <- W%*%X[,ind]
return(c(rowSums(cos(Z))/N, rowSums(sin(Z))/N))
}, p.combine = 'cbind',ind = ind.col, W = W, N= N, ncores = ncores)
SK = rowSums(SK[])
}
# if (ncores >1)
# Sk = matrix(rowSums(Sk), ncol = 1)
},
"matrix" = {
weight = rep(1/N, N)
SK = exp(1i*W%*%Data[])%*%weight ### QQQQ -1i or 1i????? R: it is +1i
},
"numeric" = {
SK = exp(1i*W%*%Data[])
},
"integer" = {
SK = exp(1i*W%*%Data[])
},
stop("Unkown class of Data"))
return(SK) #/sqrt(m)
}
| /chickn/R/Sketch.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 6,033 | r | #' @title split_index
#' @description splits the indeces into several blocks
#' @param total_len is a total length of the index vector
#' @param block_len is a length of the block
#' @param nb_split is a number of the splits
#' @keywords internal
#' @return A list of the splits that contains lower, upper block indeces and block size
split_index <- function(total_len, block_len,
nb_split = ceiling(total_len / block_len)) {
#assert_one_int(total_len); assert_pos(total_len)
if (nb_split > total_len) {
nb_split <- total_len
} else if (nb_split == 0) { ## block_len = Inf
nb_split <- 1
}
#assert_one_int(nb_split); assert_pos(nb_split)
int <- total_len / nb_split
upper <- round(1:nb_split * int)
lower <- c(1, upper[-nb_split] + 1)
size <- c(upper[1], diff(upper))
cbind(lower, upper, size)
}
#'@title split_foreach
#' @description implements block foreach loop
#' @param FUN is a function that is executed in the loop
#' @param ind is a index vector
#' @param ... are parameters of the function FUN
#' @param .combine is a rule to combine the result
#' @param ncores is a nuber of cores
#' @param nb_split is a nuber of splits
#' @return The result of the FUN function
#' @importFrom foreach %dopar% foreach
#' @keywords internal
#' @export
split_foreach <- function(FUN, ind, ...,
.combine = NULL,
ncores =4,
nb_split = ncores) {
switch (Sys.info()['sysname'],
'Linux' = {
cluster_Type = 'FORK'
},
'Windows' = {
cluster_Type = 'PSOCK'
},
"Darwin" = {
cluster_Type = 'FORK'
},
"SunOS" = {
cluster_Type = 'PSOCK'
},
stop(paste("Package is not compatible with", Sys.info()['sysname']))
)
cl <- parallel::makeCluster(ncores, type = cluster_Type)
doParallel::registerDoParallel(cl)
on.exit(parallel::stopCluster(cl))
intervals <- split_index(length(ind), nb_split = ncores)
ic <- NULL
res <- foreach(ic = bigstatsr::rows_along(intervals)) %dopar% {
ind.part <- ind[seq(intervals[ic, "lower"], intervals[ic, "upper"])]
FUN(ind = ind.part, ...)
}
`if`(is.null(.combine), res, do.call(.combine, res))
}
#'@title big_parallelize
#' @description parallel call a function on file-backed matrix
#'
#' @param X is a file-backed data matrix
#' @param p.FUN is a function to apply
#' @param p.combine is a rule to combine the results from each core
#' @param ind is a vector of column indeces on which the computations are performed
#' @param ncores is a nuber of cores
#' @param ... are additional parameters
#' @return The result of the parallel computations
#' @keywords internal
#' @export
big_parallelize <- function(X, p.FUN,
p.combine = NULL,
ind = bigstatsr::cols_along(X),
ncores = 4,
...) {
split_foreach(
p.FUN, ind, X, ...,
.combine = p.combine,
ncores = ncores)
}
#' @title Sketch
#'
#' @description The data sketch computation.
#' @param Data A Filebacked Big Matrix n x N. Data signals are stored in the matrix columns.
#' @param W A frequency matrix m x n. The frequency vectors are stored in the matrix rows.
#' @param ind.col Column indeces for which the data sketch is computed. By default all matrix columns.
#' @param ncores Number of used cores. By default 1. If \code{parallel} = FALSE, ncores defines a number of data splits
#' on which the sketch is computed separatelly.
#' @param parallel logical parameter that indicates whether computations are performed on several cores in parallel or not.
#' @return The data sketch vector.
#' @details The sketch of the given data collection \eqn{x_1, \dots, x_N} is a vector of the length 2m.
#' First m components of the data sketch vector correspond to its real part, \emph{i.e.} \eqn{\frac{1}{N} \sum_{i=1}^N \cos(W x_i)}.
#' Last m components are its imaginary part, \emph{i.e.} \eqn{\frac{1}{N} \sum_{i=1}^N \sin(W x_i)}.
#' @examples
#' X = matrix(rnorm(1000), ncol=100, nrow = 10)
#' X_FBM = bigstatsr::FBM(init = X, ncol=100, nrow = 10)
#' W = GenerateFrequencies(Data = X_FBM, m = 20, N0 = 100, TypeDist = "AR")$W
#' SK1 = Sketch(X_FBM, W)
#' SK2 = Sketch(X_FBM, W, parallel = TRUE, ncores = 2)
#' all.equal(SK1, SK2)
#' @references \insertRef{DBLP:journals/corr/KerivenBGP16}{chickn}.
#' @export
Sketch<-function(Data,W,ind.col = 1:ncol(Data), ncores = 1, parallel = FALSE){
m = nrow(W)
N = length(ind.col)
switch(class(Data),
'FBM' = {
if(!parallel){
weight = rep(1/N, ncol(Data))
# SK = bigstatsr::big_apply(X = Data, a.FUN = function(X, ind, W, weight){
# return(exp(1i*W%*%X[,ind])%*%weight[ind])
# }, a.combine = 'cbind',ind = ind.col,weight = weight, W = W, ncores = ncores)
SK = bigstatsr::big_apply(X = Data, a.FUN = function(X, ind, W, weight){
Z <- W%*%X[,ind]
return(c(cos(Z)%*%weight[ind], sin(Z)%*%weight[ind]))
}, a.combine = 'plus', ind = ind.col, weight = weight, W = W, ncores = ncores)
}else{
SK = big_parallelize(X = Data, p.FUN = function(X, ind, W, N){
Z <- W%*%X[,ind]
return(c(rowSums(cos(Z))/N, rowSums(sin(Z))/N))
}, p.combine = 'cbind',ind = ind.col, W = W, N= N, ncores = ncores)
SK = rowSums(SK[])
}
# if (ncores >1)
# Sk = matrix(rowSums(Sk), ncol = 1)
},
"matrix" = {
weight = rep(1/N, N)
SK = exp(1i*W%*%Data[])%*%weight ### QQQQ -1i or 1i????? R: it is +1i
},
"numeric" = {
SK = exp(1i*W%*%Data[])
},
"integer" = {
SK = exp(1i*W%*%Data[])
},
stop("Unkown class of Data"))
return(SK) #/sqrt(m)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patch_shift.R
\name{sample_patch_shift}
\alias{sample_patch_shift}
\title{Randomly sample from a distribution of shift patches}
\usage{
sample_patch_shift(df, rdist = stats::rnorm, relative_shift = NULL,
exclude_cols = integer(0), seed, ...)
}
\arguments{
\item{df}{A data frame}
\item{rdist}{A function for randomly generating the shift parameter. Must
contain an argument \code{n} for the number of samples. Defaults to the
standard Normal distribution function \code{rnorm}.}
\item{relative_shift}{(Optional) A number specifying a relative shift size. If non-\code{NULL}, the
\code{rdist} argument is ignored and the shift is sampled from the Normal
distribution with mean (and standard deviation, respectively) given by
\eqn{relative_shift} multiplied by the median (respectively standard deviation)
of the values in the selected column of \code{df} (ignoring NAs). The
\code{relative_shift} argument defaults to \code{NULL} to produce a shift
which is absolute, not relative, and is sampled using the \code{rdist}
function.}
\item{exclude_cols}{An integer vector of column indices to be excluded from the set of possible
target columns for the returned patch.}
\item{seed}{A random seed.}
\item{...}{Additional arguments passed to the \code{rdist} function.}
}
\value{
A shift patch with randomly sampled parameters.
}
\description{
Randomly sample from a distribution of shift patches
}
\examples{
# By default, the shift parameter is sampled from a standard Normal distribution:
sample_patch_shift(mtcars)
sample_patch_shift(mtcars, mean = 2)
}
| /man/sample_patch_shift.Rd | permissive | tpetricek/datadiff | R | false | true | 1,638 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/patch_shift.R
\name{sample_patch_shift}
\alias{sample_patch_shift}
\title{Randomly sample from a distribution of shift patches}
\usage{
sample_patch_shift(df, rdist = stats::rnorm, relative_shift = NULL,
exclude_cols = integer(0), seed, ...)
}
\arguments{
\item{df}{A data frame}
\item{rdist}{A function for randomly generating the shift parameter. Must
contain an argument \code{n} for the number of samples. Defaults to the
standard Normal distribution function \code{rnorm}.}
\item{relative_shift}{(Optional) A number specifying a relative shift size. If non-\code{NULL}, the
\code{rdist} argument is ignored and the shift is sampled from the Normal
distribution with mean (and standard deviation, respectively) given by
\eqn{relative_shift} multiplied by the median (respectively standard deviation)
of the values in the selected column of \code{df} (ignoring NAs). The
\code{relative_shift} argument defaults to \code{NULL} to produce a shift
which is absolute, not relative, and is sampled using the \code{rdist}
function.}
\item{exclude_cols}{An integer vector of column indices to be excluded from the set of possible
target columns for the returned patch.}
\item{seed}{A random seed.}
\item{...}{Additional arguments passed to the \code{rdist} function.}
}
\value{
A shift patch with randomly sampled parameters.
}
\description{
Randomly sample from a distribution of shift patches
}
\examples{
# By default, the shift parameter is sampled from a standard Normal distribution:
sample_patch_shift(mtcars)
sample_patch_shift(mtcars, mean = 2)
}
|
#####################################################################################
# Author: Houston F. Lester
# Description: This is the plot that was created for the finite population correction
#####################################################################################
library(ggplot2)
library(dplyr)
library(tidyr)
FPC <- function(k, K){
mult <- 1-k/K
mult
}
#assuming residual variance is one and the corresponding diagonal element of solve(t(X)%*%X) is also 1
mult_factor <- FPC(1:99, 100)
FPC_plot <- data.frame(mult_factor, sampled = 1:99, t_stat = 1/mult_factor)
ggplot(data = FPC_plot, aes(x = sampled, y = mult_factor)) +
geom_line() +
geom_point() +
theme_bw() +
ylab("Squared Standard Error") +
xlab("Percentage of the Population Sampled") +
geom_vline(xintercept = 49, linetype = "dashed) +
ggtitle("Top Panel")
ggplot(data = FPC_plot, aes(x = sampled, y = t_stat)) +
geom_line() +
geom_point() +
theme_bw() +
ylab("t/z Statistic") +
xlab("Percentage of the Population Sampled") +
geom_vline(xintercept = 49, linetype = "dashed) +
ggtitle("Bottom Panel")
| /ScriptsClean/FPC_plots.R | no_license | HoustonFLester/PowerSIOP2019 | R | false | false | 1,244 | r | #####################################################################################
# Author: Houston F. Lester
# Description: This is the plot that was created for the finite population correction
#####################################################################################
library(ggplot2)
library(dplyr)
library(tidyr)
FPC <- function(k, K){
mult <- 1-k/K
mult
}
#assuming residual variance is one and the corresponding diagonal element of solve(t(X)%*%X) is also 1
mult_factor <- FPC(1:99, 100)
FPC_plot <- data.frame(mult_factor, sampled = 1:99, t_stat = 1/mult_factor)
ggplot(data = FPC_plot, aes(x = sampled, y = mult_factor)) +
geom_line() +
geom_point() +
theme_bw() +
ylab("Squared Standard Error") +
xlab("Percentage of the Population Sampled") +
geom_vline(xintercept = 49, linetype = "dashed) +
ggtitle("Top Panel")
ggplot(data = FPC_plot, aes(x = sampled, y = t_stat)) +
geom_line() +
geom_point() +
theme_bw() +
ylab("t/z Statistic") +
xlab("Percentage of the Population Sampled") +
geom_vline(xintercept = 49, linetype = "dashed) +
ggtitle("Bottom Panel")
|
## Binary verification and three ensemble members with FTE histogram
library(RandomFields)
library(fields)
library(tidyverse)
library(gridExtra)
library(RColorBrewer)
# Setup -------------------------------------------------------------------
## Functions to numerically determine rho value that produces desired xi
rho_root <- function(rho, xi, smooth, rng, var) {
model <- RMbiwm(nu = smooth, s = rng, cdiag = var, rhored = rho)
return (RFcov(model, x=0)[1,1,2] - xi)
}
rhored_search <- function(xi, smooth, rng, var) {
# xi (float): desired weight ratio between ensemble mean and perturbation
# NOTE: other model parameters passed to RMbiwm() are assumed to be set and constant
if(rng[1]==rng[3]){
return(xi)
} else{
rhored <- uniroot(rho_root, c(xi, 1), xi=xi, smooth=smooth, rng=rng, var=var)$root
return (rhored)
}
}
demo_ens_sim <- function(a1, a2) {
## grid
x <- y <- seq(-20, 20, 0.2)
## model params
xi <- 0.8; smooth <- c(1.5, 1.5, 1.5); var <- c(1, 1)
rng <- c(a1, sqrt(a1*a2), a2)
rho <- rhored_search(xi, smooth, rng, var)
# model
set.seed(0)
model_biwm <- RMbiwm(nu=smooth, s=rng, cdiag=var, rhored=rho)
sim <- RFsimulate(model_biwm, x, y)
## ensemble perturbation
model_whittle <- RMwhittle(nu=smooth[3], notinvnu=TRUE,
scale=rng[3], var=var[2])
omega <- RFsimulate(model_whittle, x, y, n=3)
omega <- as.matrix(data.frame(omega))
ensemble_mean <- replicate(3, sim$variable2)
ensemble <- xi*ensemble_mean + sqrt(1-xi^2)*omega
fields <- data.frame(sim$variable1, ensemble)
return(fields)
}
disagg_rank <- function(r) {
return(runif(1, r-1/24, r+1/24))
}
## data for histogram
rank_tab <- read.table('../data/rank_tab.RData')
rank_tab <- rank_tab %>% mutate(rank = (rank-0.5)/12)
## range parameters and grid
a1 <- 2
a2 <- c(1, 2, 3)
tau <- 0
x <- y <- seq(-20, 20, 0.2)
# Make figure -------------------------------------------------------------
pl <- list()
for (i in seq(1,11,5)){
fields <- demo_ens_sim(a1, a2[round(i/5)+1])
dat <- expand.grid(x = x, y = y)
dat["z"] <- fields[,1]
## binary fields
for (j in 1:ncol(fields))
local({
j <- j
# update data being plotted
dat$z <- fields[,j]
p <- ggplot(dat, aes(x, y)) +
geom_raster(aes(fill = z > tau)) +
scale_fill_manual(values = c("TRUE" = "#08306B", "FALSE" = "#F7FBFF")) +
theme_void() +
theme(plot.title = element_blank(),
aspect.ratio = 1/1,
legend.position="none") +
labs(x=NULL, y=NULL)
pl[[i+j-1]] <<- p
})
## fte ranks for given range pair
j <- round(i/5)+1
df <- rank_tab %>% filter(s1==a1, s2==a2[j], tau==0)
params <- df %>%
mutate(rank = sapply(rank, disagg_rank)) %>%
summarise(params=paste(fitdist(rank,'beta')$estimate, collapse=" ")) %>%
separate(params, c('a', 'b'), sep=" ") %>%
mutate(a=round(as.numeric(a), 3), b=round(as.numeric(b),3)) %>%
unite(params, a:b, sep = ", ")
p <- ggplot(df, aes(rank)) +
geom_hline(yintercept=1, linetype=3, size=0.5, color="grey") +
geom_histogram(aes(y=..density..), bins=12, fill="black", color="white") +
theme_void() +
theme(plot.title = element_blank(),
aspect.ratio = 1/1) +
labs(x=NULL, y=NULL)
if (j == 1) {
p <- p + annotate("text", x=0.48, y=3, size=3.5, label=params$params)
} else if (j == 2) {
p <- p + ylim(0, 1.25) +
annotate("text", x=0.48, y=1.2, size=3.5, label=params$params)
} else {
p <- p + ylim(0, 1.45) +
annotate("text", x=0.48, y=1.4, size=3.5, label=params$params)
}
pl[[i+4]] <- p
}
## create row labels
row_labs <- c("A", "B", "C")
col_labs <- c("", "Verification", "Member 1", "Member 2", "Member 3", "FTE Histogram")
## figure matrix
tt <- ttheme_minimal(core = list(fg_params=list(fontface="bold")))
grd <- rbind(tableGrob(t(col_labs), theme = tt),
cbind(tableGrob(row_labs, theme = tt),
arrangeGrob(grobs = pl, ncol=5), size = "last"), size = "last")
png('ver_ens_fte_1.png', units='in', width=8, height=5, res=400, pointsize=9)
grid.draw(grd)
dev.off()
| /figs/ver_ens_fte_1.R | no_license | joshhjacobson/masters-thesis | R | false | false | 4,194 | r |
## Binary verification and three ensemble members with FTE histogram
library(RandomFields)
library(fields)
library(tidyverse)
library(gridExtra)
library(RColorBrewer)
# Setup -------------------------------------------------------------------
## Functions to numerically determine rho value that produces desired xi
rho_root <- function(rho, xi, smooth, rng, var) {
model <- RMbiwm(nu = smooth, s = rng, cdiag = var, rhored = rho)
return (RFcov(model, x=0)[1,1,2] - xi)
}
rhored_search <- function(xi, smooth, rng, var) {
# xi (float): desired weight ratio between ensemble mean and perturbation
# NOTE: other model parameters passed to RMbiwm() are assumed to be set and constant
if(rng[1]==rng[3]){
return(xi)
} else{
rhored <- uniroot(rho_root, c(xi, 1), xi=xi, smooth=smooth, rng=rng, var=var)$root
return (rhored)
}
}
demo_ens_sim <- function(a1, a2) {
## grid
x <- y <- seq(-20, 20, 0.2)
## model params
xi <- 0.8; smooth <- c(1.5, 1.5, 1.5); var <- c(1, 1)
rng <- c(a1, sqrt(a1*a2), a2)
rho <- rhored_search(xi, smooth, rng, var)
# model
set.seed(0)
model_biwm <- RMbiwm(nu=smooth, s=rng, cdiag=var, rhored=rho)
sim <- RFsimulate(model_biwm, x, y)
## ensemble perturbation
model_whittle <- RMwhittle(nu=smooth[3], notinvnu=TRUE,
scale=rng[3], var=var[2])
omega <- RFsimulate(model_whittle, x, y, n=3)
omega <- as.matrix(data.frame(omega))
ensemble_mean <- replicate(3, sim$variable2)
ensemble <- xi*ensemble_mean + sqrt(1-xi^2)*omega
fields <- data.frame(sim$variable1, ensemble)
return(fields)
}
disagg_rank <- function(r) {
return(runif(1, r-1/24, r+1/24))
}
## data for histogram
rank_tab <- read.table('../data/rank_tab.RData')
rank_tab <- rank_tab %>% mutate(rank = (rank-0.5)/12)
## range parameters and grid
a1 <- 2
a2 <- c(1, 2, 3)
tau <- 0
x <- y <- seq(-20, 20, 0.2)
# Make figure -------------------------------------------------------------
pl <- list()
for (i in seq(1,11,5)){
fields <- demo_ens_sim(a1, a2[round(i/5)+1])
dat <- expand.grid(x = x, y = y)
dat["z"] <- fields[,1]
## binary fields
for (j in 1:ncol(fields))
local({
j <- j
# update data being plotted
dat$z <- fields[,j]
p <- ggplot(dat, aes(x, y)) +
geom_raster(aes(fill = z > tau)) +
scale_fill_manual(values = c("TRUE" = "#08306B", "FALSE" = "#F7FBFF")) +
theme_void() +
theme(plot.title = element_blank(),
aspect.ratio = 1/1,
legend.position="none") +
labs(x=NULL, y=NULL)
pl[[i+j-1]] <<- p
})
## fte ranks for given range pair
j <- round(i/5)+1
df <- rank_tab %>% filter(s1==a1, s2==a2[j], tau==0)
params <- df %>%
mutate(rank = sapply(rank, disagg_rank)) %>%
summarise(params=paste(fitdist(rank,'beta')$estimate, collapse=" ")) %>%
separate(params, c('a', 'b'), sep=" ") %>%
mutate(a=round(as.numeric(a), 3), b=round(as.numeric(b),3)) %>%
unite(params, a:b, sep = ", ")
p <- ggplot(df, aes(rank)) +
geom_hline(yintercept=1, linetype=3, size=0.5, color="grey") +
geom_histogram(aes(y=..density..), bins=12, fill="black", color="white") +
theme_void() +
theme(plot.title = element_blank(),
aspect.ratio = 1/1) +
labs(x=NULL, y=NULL)
if (j == 1) {
p <- p + annotate("text", x=0.48, y=3, size=3.5, label=params$params)
} else if (j == 2) {
p <- p + ylim(0, 1.25) +
annotate("text", x=0.48, y=1.2, size=3.5, label=params$params)
} else {
p <- p + ylim(0, 1.45) +
annotate("text", x=0.48, y=1.4, size=3.5, label=params$params)
}
pl[[i+4]] <- p
}
## create row labels
row_labs <- c("A", "B", "C")
col_labs <- c("", "Verification", "Member 1", "Member 2", "Member 3", "FTE Histogram")
## figure matrix
tt <- ttheme_minimal(core = list(fg_params=list(fontface="bold")))
grd <- rbind(tableGrob(t(col_labs), theme = tt),
cbind(tableGrob(row_labs, theme = tt),
arrangeGrob(grobs = pl, ncol=5), size = "last"), size = "last")
png('ver_ens_fte_1.png', units='in', width=8, height=5, res=400, pointsize=9)
grid.draw(grd)
dev.off()
|
library(data.table)
library(dplyr)
library(lubridate)
library(reshape2)
library(vars)
library(caret)
library(iml)
library(DALEX)
library(breakDown)
library(iBreakDown)
library(ingredients)
library(drifter)
library(ggplot2)
library(plotly)
library(ggridges)
library(viridis)
library(formula.tools)
library(xgboost)
library(DT)
library(gridExtra)
library(kableExtra)
col_pal <- magma(5)
source("train_xgb.R")
source("create_Z_varest.R")
# Daimler colors
dci_palette <- function() {
m_blue_grey <- rgb(52, 64, 77, maxColorValue = 255)
m_blue_green_1 <- rgb(113, 190, 196, maxColorValue = 255)
m_blue_green_2 <- rgb(94, 126, 144, maxColorValue = 255)
m_green <- rgb(80, 188, 138, maxColorValue = 255)
m_ice_blue <- rgb(161, 179, 192, maxColorValue = 255)
m_red <- rgb(239, 95, 91, maxColorValue = 255)
m_black <- rgb(0, 0, 0, maxColorValue = 255)
return(c(m_blue_grey,
m_blue_green_1,
m_blue_green_2,
m_green,
m_ice_blue,
m_red,
m_black))
}
# custom function to plot lime objects
plot_lime <- function(lime_object, model_type){
title_string <- plot(lime_object)$labels$title
lime_object$results %>%
ggplot() +
geom_bar(aes(x = reorder(feature.value, effect),
y = effect),
stat = "identity",
fill = "#5E7E90",
alpha = 0.95) +
coord_flip() +
theme_minimal() +
labs(x = "",
y = "Feature Attribution",
title = title_string,
subtitle = model_type)
}
# custom function to plot breakdwon plots
plot_waterfall <- function(explain_object, title_string){
df_explained <- as.data.frame(explain_object)
intercept <- df_explained %>%
dplyr::filter(variable == "intercept") %>%
dplyr::pull(contribution)
prediction <- df_explained %>%
dplyr::filter(variable == "prediction") %>%
dplyr::pull(contribution)
df_explained <- df_explained %>%
dplyr::select(-label) %>%
dplyr::select(variable, contribution, end = cummulative, value = variable_value, sign, position) %>%
dplyr::mutate(start = dplyr::lag(end),
sign_new = case_when(sign == "1" ~ "+",
sign == "0" ~ "",
sign == "-1" ~ "-",
TRUE ~ "+"),
value = as.numeric(as.character(value)),
label = paste(gsub(pattern = " =.*", replacement = "", x = variable),
"=",
round(value)),
label = ifelse(variable == "prediction", "prediction", label),
label = ifelse(variable == "intercept", "intercept", label)) %>%
dplyr::mutate(start = ifelse(variable == "intercept", intercept, start)) %>%
dplyr::arrange(position)
df_explained %>%
ggplot(., aes(x = reorder(label, position)),
alpha = 0.9) +
geom_segment(aes(xend = label,
y = ifelse(label == "intercept", end, start),
yend = end,
colour = sign_new),
size = 7) +
geom_segment(data = dplyr::filter(df_explained, variable == "prediction"),
aes(xend = label,
y = end,
yend = intercept),
size = 7,
color = "#34404D") +
geom_text(aes(x = label,
y = ifelse(sign_new == "-", start, end),
label = round(contribution, 3),
fontface = ifelse(variable == "prediction", 2, 1)),
size = 18,
nudge_y = mean(abs(df_explained$contribution))/8) +
geom_hline(aes(yintercept = intercept),
lty = "dashed",
alpha = 0.5) +
scale_color_manual("",
values = c("-" = "firebrick4",
"+" = "#50BC8A",
"0" = "black"),
labels = c("-" = "Decreasing Prediction",
"+" = "Increasing Prediction",
"0" = "Neutral")) +
coord_flip() +
theme_minimal() +
theme(legend.position = "none") +
guides(color = guide_legend(override.aes = list(size = 5))) +
labs(x = "", y = "", subtitle = title_string)
}
| /setup.R | no_license | fabianmax/Forecasting-XAI | R | false | false | 4,402 | r | library(data.table)
library(dplyr)
library(lubridate)
library(reshape2)
library(vars)
library(caret)
library(iml)
library(DALEX)
library(breakDown)
library(iBreakDown)
library(ingredients)
library(drifter)
library(ggplot2)
library(plotly)
library(ggridges)
library(viridis)
library(formula.tools)
library(xgboost)
library(DT)
library(gridExtra)
library(kableExtra)
col_pal <- magma(5)
source("train_xgb.R")
source("create_Z_varest.R")
# Daimler colors
dci_palette <- function() {
m_blue_grey <- rgb(52, 64, 77, maxColorValue = 255)
m_blue_green_1 <- rgb(113, 190, 196, maxColorValue = 255)
m_blue_green_2 <- rgb(94, 126, 144, maxColorValue = 255)
m_green <- rgb(80, 188, 138, maxColorValue = 255)
m_ice_blue <- rgb(161, 179, 192, maxColorValue = 255)
m_red <- rgb(239, 95, 91, maxColorValue = 255)
m_black <- rgb(0, 0, 0, maxColorValue = 255)
return(c(m_blue_grey,
m_blue_green_1,
m_blue_green_2,
m_green,
m_ice_blue,
m_red,
m_black))
}
# custom function to plot lime objects
plot_lime <- function(lime_object, model_type){
title_string <- plot(lime_object)$labels$title
lime_object$results %>%
ggplot() +
geom_bar(aes(x = reorder(feature.value, effect),
y = effect),
stat = "identity",
fill = "#5E7E90",
alpha = 0.95) +
coord_flip() +
theme_minimal() +
labs(x = "",
y = "Feature Attribution",
title = title_string,
subtitle = model_type)
}
# custom function to plot breakdwon plots
plot_waterfall <- function(explain_object, title_string){
df_explained <- as.data.frame(explain_object)
intercept <- df_explained %>%
dplyr::filter(variable == "intercept") %>%
dplyr::pull(contribution)
prediction <- df_explained %>%
dplyr::filter(variable == "prediction") %>%
dplyr::pull(contribution)
df_explained <- df_explained %>%
dplyr::select(-label) %>%
dplyr::select(variable, contribution, end = cummulative, value = variable_value, sign, position) %>%
dplyr::mutate(start = dplyr::lag(end),
sign_new = case_when(sign == "1" ~ "+",
sign == "0" ~ "",
sign == "-1" ~ "-",
TRUE ~ "+"),
value = as.numeric(as.character(value)),
label = paste(gsub(pattern = " =.*", replacement = "", x = variable),
"=",
round(value)),
label = ifelse(variable == "prediction", "prediction", label),
label = ifelse(variable == "intercept", "intercept", label)) %>%
dplyr::mutate(start = ifelse(variable == "intercept", intercept, start)) %>%
dplyr::arrange(position)
df_explained %>%
ggplot(., aes(x = reorder(label, position)),
alpha = 0.9) +
geom_segment(aes(xend = label,
y = ifelse(label == "intercept", end, start),
yend = end,
colour = sign_new),
size = 7) +
geom_segment(data = dplyr::filter(df_explained, variable == "prediction"),
aes(xend = label,
y = end,
yend = intercept),
size = 7,
color = "#34404D") +
geom_text(aes(x = label,
y = ifelse(sign_new == "-", start, end),
label = round(contribution, 3),
fontface = ifelse(variable == "prediction", 2, 1)),
size = 18,
nudge_y = mean(abs(df_explained$contribution))/8) +
geom_hline(aes(yintercept = intercept),
lty = "dashed",
alpha = 0.5) +
scale_color_manual("",
values = c("-" = "firebrick4",
"+" = "#50BC8A",
"0" = "black"),
labels = c("-" = "Decreasing Prediction",
"+" = "Increasing Prediction",
"0" = "Neutral")) +
coord_flip() +
theme_minimal() +
theme(legend.position = "none") +
guides(color = guide_legend(override.aes = list(size = 5))) +
labs(x = "", y = "", subtitle = title_string)
}
|
#' (Modified) TWINSPAN in R
#'
#' Calculates TWINSPAN (TWo-way INdicator SPecies ANalaysis, Hill 1979) and its modified version according to Rolecek et al. (2009)
#' @author Mark O. Hill wrote the original Fortran code, which has been compiled by Stephan M. Hennekens into \emph{twinspan.exe} to be used within his application MEGATAB (which was, in the past, part of Turboveg for Windows; Hennekens & Schaminee 2001). This version of \emph{twinspan.exe} was later used also in JUICE program (Tichy 2002) and fixed by Petr Smilauer for issues related to order instability. The \code{twinspanR} package was written by David Zeleny (zeleny.david@@gmail.com); it is basically an R wrapper around \emph{twinspan.exe} program maintaining the communication between \emph{twinspan.exe} and R, with some added functionality (e.g. implementing the algorithm of modified TWINSPAN by Rolecek et al. 2009).
#' @name twinspan
#' @param com Community data (\code{data.frame} or \code{matrix}).
#' @param modif Should the modified TWINSPAN algorithm be used? (logical, value, default = FALSE, i.e. standard TWINSPAN)
#' @param cut.levels Pseudospecies cut levels (default = \code{c(0,2,5,10,20)}). Should not exceed 9 cut levels.
#' @param min.group.size Minimum size of the group, which should not be further divided (default = 5).
#' @param levels Number of hierarchical levels of divisions (default = 6, should be between 0 and 15). Applies only for standard TWINSPAN (\code{modif = FALSE}).
#' @param clusters Number of clusters generated by modified TWINSPAN (default = 5). Applies only for modified TWINSPAN (\code{modif = TRUE}).
#' @param diss Dissimilarity (default = 'bray') used to calculate cluster heterogeneity for modified TWINSPAN (\code{modif = TRUE}). Available options are: \code{'total.inertia'} for total inertia measured by correspondence analysis; \code{'whittaker'} for Whittaker's multiplicative measure of beta diversity; \code{'bray'}, \code{'jaccard'} and some other (see Details) for pairwise measure of betadiversity; \code{'multi.jaccard'} and \code{'multi.sorensen'} for multi-site measures of beta diversity (sensu Baselga et al. 2007). Details for more information. Applies only for modified TWINSPAN (\code{modif = TRUE}).
#' @param min.diss Minimum dissimilarity under which the cluster will not be divided further (default = NULL, which means that the stopping rule is based on number of clusters (parameter \code{clusters})). Currently not implemented.
#' @param mean.median Should be the average dissimilarity of cluster calculated as mean or median of all between sample dissimilarities within the cluster? (default = \code{'mean'}, alternative is \code{'median'})
#' @param show.output.on.console Logical; should the function communicating with \code{twinspan.exe} show the output (rather long) of TWINSPAN program on console? Default = \code{FALSE}. Argument passsed via function \code{shell} into \code{system}.
#' @param quiet Logical; should the function reading TWINSPAN output files (tw.TWI and tw.PUN) be quiet and not report on console number of items it has read? Default = \code{TRUE}, means the function is quiet. Argument passed into function \code{scan}.
#' @param object Object of the class \code{'tw'}.
#' @param ... Other (rarely used) TWINSPAN parameters passed into function \code{\link{create.tw.dat}} (see the help file of this function for complete list of modifiable arguments).
#' @details The function \code{twinspan} calculates TWINSPAN classification algorithm introduced by Hill (1979) and alternatively also modified TWINSPAN algorithm introduced by Rolecek et al. (2009). It generates object of the class \code{tw}, with generic \code{print} function printing results, \code{summary} for overview of parameters and \code{cut} defining which sample should be classified into which cluster.
#'
#' Default values for arguments used in \code{twinspan} function (e.g. definition of pseudospecies cut levels, number of hierarchical divisions etc.) are the same as the default values of the original TWINSPAN program (Hill 1979) and also WinTWINS (Hill & Smilauer 2005).
#'
#' When calculating modified TWINSPAN (\code{modif = TRUE}), one needs to choose number of target clusters (argument \code{cluster}) instead of hierarchical levels of divisions as in standard TWINSPAN (argument \code{levels}), and also the measure of dissimilarity (\code{diss}) to calculate heterogeneity of clusters at each step of division (the most heterogeneous one is divided in the next step). Several groups of beta diversity measures are currently implemented:
#' \itemize{
#' \item\code{'total.inertia'} - total inertia, calculated by correspondence analysis (\code{cca} function from \code{vegan}) and applied on original quantitative species data (abundances);
#' \item\code{'whittaker'} - Whittaker's beta diversity, calculated as gamma/mean(alpha)-1 (Whittaker 1960), applied on species data transformed into presences-absences;
#' \item\code{'manhattan'}, \code{'euclidean'}, \code{'canberra'}, \code{'bray'}, \code{'kulczynski'}, \code{'jaccard'}, \code{'gower'}, \code{'altGower'}, \code{'morisita'}, \code{'horn'}, \code{'mountford'}, \code{'raup'}, \code{'binomial'}, \code{'chao'}, \code{'cao'} or \code{'mahalanobis'} - mean of beta diversities calculated among pairs of samples - argument is passed into argument \code{diss} in \code{\link{vegdist}} function of \code{vegan}, applied on original quantitative species data (abundances);
#' \item\code{'multi.sorensen'} or \code{'multi.jaccard'} - multi-site beta diversity, calculated from group of sites according to Baselga et al. (2007) and using function \code{beta.multi} from library \code{betapart}.
#' }
#'
#' If the row names in community matrix (\code{com}) contain spaces, these names will be transformed into syntactically valid names using function \code{make.names} (syntactically valid name consists of letters, numbers and the dot or underline characters and starts with a letter or the dot not followed by a number).
#'
#' Arguments \code{show.output.on.console} and \code{quiet} regulates how "verbal" will be \code{twinspan} function while running. Default setting (\code{show.output.on.console = FALSE, quiet = TRUE}) supress all the output. Setting \code{quiet = FALSE} has only minor effect - it reports how many items have been read in each step of analysis from the output files (tw.TWI and tw.PUN) using function \code{scan} (the argument \code{quiet} is directly passed into this function). In contrary setting \code{show.output.on.console = TRUE} prints complete output generated by \code{twinspan.exe} program on console. Argument \code{show.output.on.console} has similar behavior as the argument of the same name in function \code{\link{system}}, but the value is not directly passed to this function. Output could be captured using function \code{capture.output} from package \code{utils} - see examples below.
#'
#'
#' @return \code{twinspan} returns object of the class \code{'tw'}, which is a list with the following items:
#' \itemize{
#' \item \code{classif} data frame with three columns: \code{order} - sequential number of plot, \code{plot.no} - original number of plot (\code{row.names} in community matrix \code{com}), \code{class} - binomial code with hieararchical result of TWINSPAN classification.
#' \item \code{twi} vector (if \code{modif = FALSE}) or list (if \code{modif = TRUE}) of complete machine-readable output of TWINSPAN algorithm read from *.TWI file. In case of modified TWINSPAN (\code{modif = TRUE}) it is a list with number of items equals to number of clusters.
#' \item \code{spnames} data frame with two columns: \code{full.name} - full scientific species name (\code{names (com)}), \code{abbrev.name} - eight-digits abbreviation created by \code{make.cepnames} function from \code{vegan}.
#' \item \code{modif} logical; was the result calculated using standard TWINSPAN (\code{modif = FALSE}) or its modified version (\code{modif = TRUE})?
#' }
#' @references \itemize{
#' \item Baselga A., Jimenez-Valverde A. & Niccolini G. (2007): A multiple-site similarity measure independent of richness. \emph{Biology Letters}, 3: 642-645.
#' \item Hennekens S.M. & Schaminee J.H.J. (2001): TURBOVEG, a comprehensive data base management system for vegetation data. \emph{Journal of Vegetation Science}, 12: 589-591.
#' \item Hill M.O. (1979): \emph{TWINSPAN - A FORTRAN program for arranging multivariate data in an ordered two-way table by classification of the individuals and attributes}. Section of Ecology and Systematics, Cornel University, Ithaca, New York.
#' \item Hill M.O. & Smilauer P. (2005): \emph{TWINSPAN for Windows version 2.3}. Centre for Ecology and Hydrology & University of South Bohemia, Huntingdon & Ceske Budejovice.
#' \item Rolecek J., Tichy L., Zeleny D. & Chytry M. (2009): Modified TWINSPAN classification in which the hierarchy respects cluster heterogeneity. \emph{Journal of Vegetation Science}, 20: 596-602.
#' \item Tichy L. (2002): JUICE, software for vegetation classification. \emph{Journal of Vegetation Science}, 13: 451-453.
#' \item Whittaker R.H. (1960): Vegetation of the Siskiyou mountains, Oregon and California. \emph{Ecological Monographs}, 30:279-338.
#' }
#' @examples
#' ## Modified TWINSPAN on traditional Ellenberg's Danube meadow dataset, projected on DCA
#' ## and compared with original classification into three vegetation types made by tabular sorting:
#' library (twinspanR)
#' library (vegan)
#' data (danube)
#' res <- twinspan (danube$spe, modif = TRUE, clusters = 4)
#' k <- cut (res)
#' dca <- decorana (danube$spe)
#' par (mfrow = c(1,2))
#' ordiplot (dca, type = 'n', display = 'si', main = 'Modified TWINSPAN')
#' points (dca, col = k)
#' for (i in c(1,2,4)) ordihull (dca, groups = k, show.group = i, col = i,
#' draw = 'polygon', label = TRUE)
#' ordiplot (dca, type = 'n', display = 'si', main = 'Original assignment\n (Ellenberg 1954)')
#' points (dca, col = danube$env$veg.type)
#' for (i in c(1:3)) ordihull (dca, groups = danube$env$veg.type,
#' show.group = unique (danube$env$veg.type)[i], col = i,
#' draw = 'polygon', label = TRUE)
#'
#' ## To capture the console output of twinspan.exe into R object, use the following:
#' \dontrun{
#' out <- capture.output (tw <- twinspan (danube$spe, show.output.on.console = T))
#' summary (tw) # returns summary of twinspan algorithm
#' cat (out, sep = '\n') # prints the captured output
#' write.table (out, file = 'out.txt', quot = F, row.names = F) # writes output to 'out.txt' file
#' }
#' @seealso \code{\link{create.tw.dat}}, \code{\link{cut.tw}}, \code{\link{print.tw}}.
#' @importFrom riojaExtra write.CEP
#' @importFrom stats median
#' @importFrom vegan make.cepnames cca vegdist
#' @importFrom betapart beta.multi
#' @export
twinspan <- function (com, modif = F, cut.levels = c(0,2,5,10,20), min.group.size = 5, levels = 6, clusters = 5, diss = 'bray', min.diss = NULL, mean.median = 'mean', show.output.on.console = FALSE, quiet = TRUE, ...)
{
# DEFINITION OF FUNCTIONS
cluster.heter.fun <- function (com, tw.class.level, diss, mean.median)
unlist (lapply (sort (unique (tw.class.level)), FUN = function (x)
{
if (diss == 'total.inertia') vegan::cca (com[tw.class.level == x, ])$tot.chi else
if (diss == 'whittaker') {com.t <- vegan::decostand (com[tw.class.level == x, ], 'pa'); gamma <- sum (colSums (com.t) > 0); alpha <- mean (rowSums (com.t)); (gamma/alpha)-1} else
if (diss == 'multi.jaccard' || diss == 'multi.sorensen') {com.t <- vegan::decostand (com[tw.class.level == x, ], 'pa'); betapart::beta.multi (com.t, index.family = unlist (strsplit ('multi.sorensen', split = '.', fixed = T))[2])[[3]]} else
if (mean.median == 'mean') mean (vegan::vegdist (com[tw.class.level == x,], method = diss)) else median (vegan::vegdist (com[tw.class.level == x,], method = diss))
}))
#PREPARATION OF DATA
DISS <- c("manhattan", "euclidean", "canberra", "bray",
"kulczynski", "gower", "morisita", "horn", "mountford",
"jaccard", "raup", "binomial", "chao", "altGower", "cao",
"mahalanobis", "total.inertia", "whittaker", "multi.jaccard", "multi.sorensen")
diss <- pmatch(diss, DISS)
if (is.na (diss)) stop ('invalid distance method')
if (diss == -1) stop ('ambiguous distance method')
diss <- DISS[diss]
com <- as.data.frame (com)
species.names <- names (com)
tw.heter <- list ()
names (com) <- vegan::make.cepnames (species.names)
rownames (com) <- make.names (rownames (com)) # this is added to fix the bug with cut.tw, which requires not to use white spaces in row names.
# STANDARD TWINSPAN
if (!modif) tw <- twinspan0 (com, cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, show.output.on.console = show.output.on.console, quiet = quiet, ...)
# MODIFIED TWINSPAN
if (modif)
{
groups01 <- matrix (ncol = clusters-1, nrow = nrow (com))
tw.temp <- list ()
tw <- list ()
tw.temp[[1]] <- twinspan0 (com, cut.levels = cut.levels, min.group.size = min.group.size, levels = 1, show.output.on.console = show.output.on.console, quiet = quiet, ...)
tw.heter[[1]] <- list (tw.class.level = 1, cluster.heter = cluster.heter.fun (com = com, tw.class.level = rep (1, nrow (com)), diss = diss, mean.median = mean.median), no.samples.per.group = nrow (com), which.most.heter = 1)
groups01[,1] <- cut (tw.temp[[1]], level = 1)-1
clusters.temp <- 2
while (clusters.temp != clusters)
{
tw.class.level <- as.numeric (as.factor (apply (groups01[, 1:clusters.temp-1, drop = F], 1, paste, collapse = '')))
cluster.heter <- cluster.heter.fun (com = com, tw.class.level = tw.class.level, diss = diss, mean.median = mean.median)
sort.by.heter <- sort (unique (tw.class.level))[order (cluster.heter,decreasing = T)]
no.samples.per.group <- unlist (lapply (sort.by.heter, FUN = function (no) sum (tw.class.level == no)))
which.most.heter <- sort.by.heter[no.samples.per.group >= min.group.size][1]
tw.heter[[clusters.temp]] <- list (tw.class.level = sort(unique(tw.class.level)), cluster.heter = cluster.heter, no.samples.per.group = no.samples.per.group[order (sort.by.heter)], which.most.heter = which.most.heter)
tw.temp[[clusters.temp]] <- twinspan0 (com[tw.class.level == which.most.heter,], cut.levels = cut.levels, min.group.size = min.group.size, levels = 1, show.output.on.console = show.output.on.console, quiet = quiet, ...)
groups01[,clusters.temp] <- groups01[,clusters.temp-1]
groups01[tw.class.level == which.most.heter, clusters.temp] <- cut (tw.temp[[clusters.temp]], level = 1)-1
clusters.temp <- clusters.temp + 1
}
# for the last group (which is not going to be divided further)
tw.class.level <- as.numeric (as.factor (apply (groups01[, 1:clusters.temp-1, drop = F], 1, paste, collapse = '')))
cluster.heter <- cluster.heter.fun (com = com, tw.class.level = tw.class.level, diss = diss, mean.median = mean.median)
sort.by.heter <- sort (unique (tw.class.level))[order (cluster.heter,decreasing = T)]
no.samples.per.group <- unlist (lapply (sort.by.heter, FUN = function (no) sum (tw.class.level == no)))
which.most.heter <- sort.by.heter[no.samples.per.group >= min.group.size][1]
tw.heter[[clusters.temp]] <- list (tw.class.level = sort(unique(tw.class.level)), cluster.heter = cluster.heter, no.samples.per.group = no.samples.per.group[order (sort.by.heter)], which.most.heter = which.most.heter)
res <- list (order = tw.temp[[1]]$classif$order, plot.no = tw.temp[[1]]$classif$plot.no, class = apply (groups01, 1, FUN = function (x) paste ('*', paste(x, collapse = ''), sep = '')))
tw$classif <- as.data.frame (res)
class (tw) <- c('tw')
tw$twi <- lapply (tw.temp, FUN = function (x) x$twi)
}
tw$spnames <- as.data.frame (cbind (full.name = species.names, abbrev.name = vegan::make.cepnames (species.names)))
tw$modif <- modif
tw$summary <- list (modif = modif, cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, clusters = clusters, diss = diss, min.diss = min.diss, mean.median = mean.median, heter = tw.heter)
class (tw) <- 'tw'
return (tw)
}
twinspan0 <- function (com, cut.levels, min.group.size, levels, show.output.on.console, quiet, ...)
{
actual.wd <- getwd ()
# setwd (paste (.libPaths (), '/twinspanR/exec/', sep = ''))
setwd (paste (find.package ('twinspanR'), '/exec/', sep = ''))
if (is.integer (com[1,1])) com <- sapply (com, as.numeric) # if the data frame contains integers instead of reals, it converts them to real (because of write.CEP in riojaExtra can't handle integers)
com <- com[,colSums (com) > 0]
riojaExtra::write.CEP (com, fName = 'tw.cc!')
create.tw.dat (cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, ...)
output <- shell ('tw.bat', intern = T)
if (show.output.on.console)
{
output [output %in% ' \f'] <- '\n'
output [output %in% '\f Input parameters:'] <- ' Input parameters:'
output [output %in% '\f Name of inputfile? '] <- '\n Name of inputfile? '
cat (output[], sep = '\n')
}
scanned.PUN <- scan (file = 'tw.PUN', what = 'raw', quiet = quiet)
scanned.TWI <- scan (file = 'tw.TWI', what = 'raw', sep = '\n', quiet = quiet)
if (length (scanned.PUN) == 0) res <- 0 else res <- scanned.PUN[10:(which (scanned.PUN == 'SPECIES')-1)]
tw0 <- list ()
tw0$classif <- as.data.frame (matrix (res, ncol = 4, byrow = T))[,-3]
names (tw0$classif) <- c('order', 'plot.no', 'class')
tw0$twi <- scanned.TWI
class (tw0) <- c('tw0')
setwd (actual.wd)
return (tw0)
}
#' @name twinspan
#' @export
summary.tw <- function (object, ...)
{
if (!object$modif)
{
cat ('Standard TWINSPAN (Hill 1979)', spe = '\n\n')
cat ('Basic setting:\n')
cat (c('Pseudospecies cut levels: ', object$summary$cut.levels, '\n'))
cat (c('Minimum group size: ', object$summary$min.group.size, '\n'))
cat (c('Number of hierarchical levels: ', object$summary$levels, '\n'))
}
if (object$modif)
{
cat ('TWINSPAN (Hill 1979) modified according to Rolecek et al. (2009)', spe = '\n\n')
cat ('Basic setting:\n')
cat (c('Pseudospecies cut levels: ', object$summary$cut.levels, '\n'))
cat (c('Minimum group size: ', object$summary$min.group.size, '\n'))
cat (c('Required number of clusters: ', object$summary$clusters, '\n'))
cat (c('Dissimilarity measure: ', object$summary$diss, '\n'))
cat (c('Mean or median of dissimilarity calculated? ', object$summary$mean.median, '\n'))
cat (c('\nResults of modified TWINSPAN algorithm:\n'))
no_print <- lapply (object$summary$heter, FUN = function (het)
{
for_print <- rbind ('Cluster heterogeneity' = formatC(het$cluster.heter, format = 'f', digits = 3), 'No of samples' = formatC(het$no.samples.per.group))
colnames (for_print) <- paste ('CLUST', het$tw.class.level, sep = ' ')
print.default (for_print, quote = F, print.gap = 2, justify = 'right')
cat (c('Candidate cluster for next division: ', het$which.most.heter, '\n\n'))
})
}
} | /R/twinspan.r | no_license | Tharaka18/twinspanR | R | false | false | 19,128 | r | #' (Modified) TWINSPAN in R
#'
#' Calculates TWINSPAN (TWo-way INdicator SPecies ANalaysis, Hill 1979) and its modified version according to Rolecek et al. (2009)
#' @author Mark O. Hill wrote the original Fortran code, which has been compiled by Stephan M. Hennekens into \emph{twinspan.exe} to be used within his application MEGATAB (which was, in the past, part of Turboveg for Windows; Hennekens & Schaminee 2001). This version of \emph{twinspan.exe} was later used also in JUICE program (Tichy 2002) and fixed by Petr Smilauer for issues related to order instability. The \code{twinspanR} package was written by David Zeleny (zeleny.david@@gmail.com); it is basically an R wrapper around \emph{twinspan.exe} program maintaining the communication between \emph{twinspan.exe} and R, with some added functionality (e.g. implementing the algorithm of modified TWINSPAN by Rolecek et al. 2009).
#' @name twinspan
#' @param com Community data (\code{data.frame} or \code{matrix}).
#' @param modif Should the modified TWINSPAN algorithm be used? (logical, value, default = FALSE, i.e. standard TWINSPAN)
#' @param cut.levels Pseudospecies cut levels (default = \code{c(0,2,5,10,20)}). Should not exceed 9 cut levels.
#' @param min.group.size Minimum size of the group, which should not be further divided (default = 5).
#' @param levels Number of hierarchical levels of divisions (default = 6, should be between 0 and 15). Applies only for standard TWINSPAN (\code{modif = FALSE}).
#' @param clusters Number of clusters generated by modified TWINSPAN (default = 5). Applies only for modified TWINSPAN (\code{modif = TRUE}).
#' @param diss Dissimilarity (default = 'bray') used to calculate cluster heterogeneity for modified TWINSPAN (\code{modif = TRUE}). Available options are: \code{'total.inertia'} for total inertia measured by correspondence analysis; \code{'whittaker'} for Whittaker's multiplicative measure of beta diversity; \code{'bray'}, \code{'jaccard'} and some other (see Details) for pairwise measure of betadiversity; \code{'multi.jaccard'} and \code{'multi.sorensen'} for multi-site measures of beta diversity (sensu Baselga et al. 2007). Details for more information. Applies only for modified TWINSPAN (\code{modif = TRUE}).
#' @param min.diss Minimum dissimilarity under which the cluster will not be divided further (default = NULL, which means that the stopping rule is based on number of clusters (parameter \code{clusters})). Currently not implemented.
#' @param mean.median Should be the average dissimilarity of cluster calculated as mean or median of all between sample dissimilarities within the cluster? (default = \code{'mean'}, alternative is \code{'median'})
#' @param show.output.on.console Logical; should the function communicating with \code{twinspan.exe} show the output (rather long) of TWINSPAN program on console? Default = \code{FALSE}. Argument passsed via function \code{shell} into \code{system}.
#' @param quiet Logical; should the function reading TWINSPAN output files (tw.TWI and tw.PUN) be quiet and not report on console number of items it has read? Default = \code{TRUE}, means the function is quiet. Argument passed into function \code{scan}.
#' @param object Object of the class \code{'tw'}.
#' @param ... Other (rarely used) TWINSPAN parameters passed into function \code{\link{create.tw.dat}} (see the help file of this function for complete list of modifiable arguments).
#' @details The function \code{twinspan} calculates TWINSPAN classification algorithm introduced by Hill (1979) and alternatively also modified TWINSPAN algorithm introduced by Rolecek et al. (2009). It generates object of the class \code{tw}, with generic \code{print} function printing results, \code{summary} for overview of parameters and \code{cut} defining which sample should be classified into which cluster.
#'
#' Default values for arguments used in \code{twinspan} function (e.g. definition of pseudospecies cut levels, number of hierarchical divisions etc.) are the same as the default values of the original TWINSPAN program (Hill 1979) and also WinTWINS (Hill & Smilauer 2005).
#'
#' When calculating modified TWINSPAN (\code{modif = TRUE}), one needs to choose number of target clusters (argument \code{cluster}) instead of hierarchical levels of divisions as in standard TWINSPAN (argument \code{levels}), and also the measure of dissimilarity (\code{diss}) to calculate heterogeneity of clusters at each step of division (the most heterogeneous one is divided in the next step). Several groups of beta diversity measures are currently implemented:
#' \itemize{
#' \item\code{'total.inertia'} - total inertia, calculated by correspondence analysis (\code{cca} function from \code{vegan}) and applied on original quantitative species data (abundances);
#' \item\code{'whittaker'} - Whittaker's beta diversity, calculated as gamma/mean(alpha)-1 (Whittaker 1960), applied on species data transformed into presences-absences;
#' \item\code{'manhattan'}, \code{'euclidean'}, \code{'canberra'}, \code{'bray'}, \code{'kulczynski'}, \code{'jaccard'}, \code{'gower'}, \code{'altGower'}, \code{'morisita'}, \code{'horn'}, \code{'mountford'}, \code{'raup'}, \code{'binomial'}, \code{'chao'}, \code{'cao'} or \code{'mahalanobis'} - mean of beta diversities calculated among pairs of samples - argument is passed into argument \code{diss} in \code{\link{vegdist}} function of \code{vegan}, applied on original quantitative species data (abundances);
#' \item\code{'multi.sorensen'} or \code{'multi.jaccard'} - multi-site beta diversity, calculated from group of sites according to Baselga et al. (2007) and using function \code{beta.multi} from library \code{betapart}.
#' }
#'
#' If the row names in community matrix (\code{com}) contain spaces, these names will be transformed into syntactically valid names using function \code{make.names} (syntactically valid name consists of letters, numbers and the dot or underline characters and starts with a letter or the dot not followed by a number).
#'
#' Arguments \code{show.output.on.console} and \code{quiet} regulates how "verbal" will be \code{twinspan} function while running. Default setting (\code{show.output.on.console = FALSE, quiet = TRUE}) supress all the output. Setting \code{quiet = FALSE} has only minor effect - it reports how many items have been read in each step of analysis from the output files (tw.TWI and tw.PUN) using function \code{scan} (the argument \code{quiet} is directly passed into this function). In contrary setting \code{show.output.on.console = TRUE} prints complete output generated by \code{twinspan.exe} program on console. Argument \code{show.output.on.console} has similar behavior as the argument of the same name in function \code{\link{system}}, but the value is not directly passed to this function. Output could be captured using function \code{capture.output} from package \code{utils} - see examples below.
#'
#'
#' @return \code{twinspan} returns object of the class \code{'tw'}, which is a list with the following items:
#' \itemize{
#' \item \code{classif} data frame with three columns: \code{order} - sequential number of plot, \code{plot.no} - original number of plot (\code{row.names} in community matrix \code{com}), \code{class} - binomial code with hieararchical result of TWINSPAN classification.
#' \item \code{twi} vector (if \code{modif = FALSE}) or list (if \code{modif = TRUE}) of complete machine-readable output of TWINSPAN algorithm read from *.TWI file. In case of modified TWINSPAN (\code{modif = TRUE}) it is a list with number of items equals to number of clusters.
#' \item \code{spnames} data frame with two columns: \code{full.name} - full scientific species name (\code{names (com)}), \code{abbrev.name} - eight-digits abbreviation created by \code{make.cepnames} function from \code{vegan}.
#' \item \code{modif} logical; was the result calculated using standard TWINSPAN (\code{modif = FALSE}) or its modified version (\code{modif = TRUE})?
#' }
#' @references \itemize{
#' \item Baselga A., Jimenez-Valverde A. & Niccolini G. (2007): A multiple-site similarity measure independent of richness. \emph{Biology Letters}, 3: 642-645.
#' \item Hennekens S.M. & Schaminee J.H.J. (2001): TURBOVEG, a comprehensive data base management system for vegetation data. \emph{Journal of Vegetation Science}, 12: 589-591.
#' \item Hill M.O. (1979): \emph{TWINSPAN - A FORTRAN program for arranging multivariate data in an ordered two-way table by classification of the individuals and attributes}. Section of Ecology and Systematics, Cornel University, Ithaca, New York.
#' \item Hill M.O. & Smilauer P. (2005): \emph{TWINSPAN for Windows version 2.3}. Centre for Ecology and Hydrology & University of South Bohemia, Huntingdon & Ceske Budejovice.
#' \item Rolecek J., Tichy L., Zeleny D. & Chytry M. (2009): Modified TWINSPAN classification in which the hierarchy respects cluster heterogeneity. \emph{Journal of Vegetation Science}, 20: 596-602.
#' \item Tichy L. (2002): JUICE, software for vegetation classification. \emph{Journal of Vegetation Science}, 13: 451-453.
#' \item Whittaker R.H. (1960): Vegetation of the Siskiyou mountains, Oregon and California. \emph{Ecological Monographs}, 30:279-338.
#' }
#' @examples
#' ## Modified TWINSPAN on traditional Ellenberg's Danube meadow dataset, projected on DCA
#' ## and compared with original classification into three vegetation types made by tabular sorting:
#' library (twinspanR)
#' library (vegan)
#' data (danube)
#' res <- twinspan (danube$spe, modif = TRUE, clusters = 4)
#' k <- cut (res)
#' dca <- decorana (danube$spe)
#' par (mfrow = c(1,2))
#' ordiplot (dca, type = 'n', display = 'si', main = 'Modified TWINSPAN')
#' points (dca, col = k)
#' for (i in c(1,2,4)) ordihull (dca, groups = k, show.group = i, col = i,
#' draw = 'polygon', label = TRUE)
#' ordiplot (dca, type = 'n', display = 'si', main = 'Original assignment\n (Ellenberg 1954)')
#' points (dca, col = danube$env$veg.type)
#' for (i in c(1:3)) ordihull (dca, groups = danube$env$veg.type,
#' show.group = unique (danube$env$veg.type)[i], col = i,
#' draw = 'polygon', label = TRUE)
#'
#' ## To capture the console output of twinspan.exe into R object, use the following:
#' \dontrun{
#' out <- capture.output (tw <- twinspan (danube$spe, show.output.on.console = T))
#' summary (tw) # returns summary of twinspan algorithm
#' cat (out, sep = '\n') # prints the captured output
#' write.table (out, file = 'out.txt', quot = F, row.names = F) # writes output to 'out.txt' file
#' }
#' @seealso \code{\link{create.tw.dat}}, \code{\link{cut.tw}}, \code{\link{print.tw}}.
#' @importFrom riojaExtra write.CEP
#' @importFrom stats median
#' @importFrom vegan make.cepnames cca vegdist
#' @importFrom betapart beta.multi
#' @export
twinspan <- function (com, modif = F, cut.levels = c(0,2,5,10,20), min.group.size = 5, levels = 6, clusters = 5, diss = 'bray', min.diss = NULL, mean.median = 'mean', show.output.on.console = FALSE, quiet = TRUE, ...)
{
# DEFINITION OF FUNCTIONS
cluster.heter.fun <- function (com, tw.class.level, diss, mean.median)
unlist (lapply (sort (unique (tw.class.level)), FUN = function (x)
{
if (diss == 'total.inertia') vegan::cca (com[tw.class.level == x, ])$tot.chi else
if (diss == 'whittaker') {com.t <- vegan::decostand (com[tw.class.level == x, ], 'pa'); gamma <- sum (colSums (com.t) > 0); alpha <- mean (rowSums (com.t)); (gamma/alpha)-1} else
if (diss == 'multi.jaccard' || diss == 'multi.sorensen') {com.t <- vegan::decostand (com[tw.class.level == x, ], 'pa'); betapart::beta.multi (com.t, index.family = unlist (strsplit ('multi.sorensen', split = '.', fixed = T))[2])[[3]]} else
if (mean.median == 'mean') mean (vegan::vegdist (com[tw.class.level == x,], method = diss)) else median (vegan::vegdist (com[tw.class.level == x,], method = diss))
}))
#PREPARATION OF DATA
DISS <- c("manhattan", "euclidean", "canberra", "bray",
"kulczynski", "gower", "morisita", "horn", "mountford",
"jaccard", "raup", "binomial", "chao", "altGower", "cao",
"mahalanobis", "total.inertia", "whittaker", "multi.jaccard", "multi.sorensen")
diss <- pmatch(diss, DISS)
if (is.na (diss)) stop ('invalid distance method')
if (diss == -1) stop ('ambiguous distance method')
diss <- DISS[diss]
com <- as.data.frame (com)
species.names <- names (com)
tw.heter <- list ()
names (com) <- vegan::make.cepnames (species.names)
rownames (com) <- make.names (rownames (com)) # this is added to fix the bug with cut.tw, which requires not to use white spaces in row names.
# STANDARD TWINSPAN
if (!modif) tw <- twinspan0 (com, cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, show.output.on.console = show.output.on.console, quiet = quiet, ...)
# MODIFIED TWINSPAN
if (modif)
{
groups01 <- matrix (ncol = clusters-1, nrow = nrow (com))
tw.temp <- list ()
tw <- list ()
tw.temp[[1]] <- twinspan0 (com, cut.levels = cut.levels, min.group.size = min.group.size, levels = 1, show.output.on.console = show.output.on.console, quiet = quiet, ...)
tw.heter[[1]] <- list (tw.class.level = 1, cluster.heter = cluster.heter.fun (com = com, tw.class.level = rep (1, nrow (com)), diss = diss, mean.median = mean.median), no.samples.per.group = nrow (com), which.most.heter = 1)
groups01[,1] <- cut (tw.temp[[1]], level = 1)-1
clusters.temp <- 2
while (clusters.temp != clusters)
{
tw.class.level <- as.numeric (as.factor (apply (groups01[, 1:clusters.temp-1, drop = F], 1, paste, collapse = '')))
cluster.heter <- cluster.heter.fun (com = com, tw.class.level = tw.class.level, diss = diss, mean.median = mean.median)
sort.by.heter <- sort (unique (tw.class.level))[order (cluster.heter,decreasing = T)]
no.samples.per.group <- unlist (lapply (sort.by.heter, FUN = function (no) sum (tw.class.level == no)))
which.most.heter <- sort.by.heter[no.samples.per.group >= min.group.size][1]
tw.heter[[clusters.temp]] <- list (tw.class.level = sort(unique(tw.class.level)), cluster.heter = cluster.heter, no.samples.per.group = no.samples.per.group[order (sort.by.heter)], which.most.heter = which.most.heter)
tw.temp[[clusters.temp]] <- twinspan0 (com[tw.class.level == which.most.heter,], cut.levels = cut.levels, min.group.size = min.group.size, levels = 1, show.output.on.console = show.output.on.console, quiet = quiet, ...)
groups01[,clusters.temp] <- groups01[,clusters.temp-1]
groups01[tw.class.level == which.most.heter, clusters.temp] <- cut (tw.temp[[clusters.temp]], level = 1)-1
clusters.temp <- clusters.temp + 1
}
# for the last group (which is not going to be divided further)
tw.class.level <- as.numeric (as.factor (apply (groups01[, 1:clusters.temp-1, drop = F], 1, paste, collapse = '')))
cluster.heter <- cluster.heter.fun (com = com, tw.class.level = tw.class.level, diss = diss, mean.median = mean.median)
sort.by.heter <- sort (unique (tw.class.level))[order (cluster.heter,decreasing = T)]
no.samples.per.group <- unlist (lapply (sort.by.heter, FUN = function (no) sum (tw.class.level == no)))
which.most.heter <- sort.by.heter[no.samples.per.group >= min.group.size][1]
tw.heter[[clusters.temp]] <- list (tw.class.level = sort(unique(tw.class.level)), cluster.heter = cluster.heter, no.samples.per.group = no.samples.per.group[order (sort.by.heter)], which.most.heter = which.most.heter)
res <- list (order = tw.temp[[1]]$classif$order, plot.no = tw.temp[[1]]$classif$plot.no, class = apply (groups01, 1, FUN = function (x) paste ('*', paste(x, collapse = ''), sep = '')))
tw$classif <- as.data.frame (res)
class (tw) <- c('tw')
tw$twi <- lapply (tw.temp, FUN = function (x) x$twi)
}
tw$spnames <- as.data.frame (cbind (full.name = species.names, abbrev.name = vegan::make.cepnames (species.names)))
tw$modif <- modif
tw$summary <- list (modif = modif, cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, clusters = clusters, diss = diss, min.diss = min.diss, mean.median = mean.median, heter = tw.heter)
class (tw) <- 'tw'
return (tw)
}
twinspan0 <- function (com, cut.levels, min.group.size, levels, show.output.on.console, quiet, ...)
{
actual.wd <- getwd ()
# setwd (paste (.libPaths (), '/twinspanR/exec/', sep = ''))
setwd (paste (find.package ('twinspanR'), '/exec/', sep = ''))
if (is.integer (com[1,1])) com <- sapply (com, as.numeric) # if the data frame contains integers instead of reals, it converts them to real (because of write.CEP in riojaExtra can't handle integers)
com <- com[,colSums (com) > 0]
riojaExtra::write.CEP (com, fName = 'tw.cc!')
create.tw.dat (cut.levels = cut.levels, min.group.size = min.group.size, levels = levels, ...)
output <- shell ('tw.bat', intern = T)
if (show.output.on.console)
{
output [output %in% ' \f'] <- '\n'
output [output %in% '\f Input parameters:'] <- ' Input parameters:'
output [output %in% '\f Name of inputfile? '] <- '\n Name of inputfile? '
cat (output[], sep = '\n')
}
scanned.PUN <- scan (file = 'tw.PUN', what = 'raw', quiet = quiet)
scanned.TWI <- scan (file = 'tw.TWI', what = 'raw', sep = '\n', quiet = quiet)
if (length (scanned.PUN) == 0) res <- 0 else res <- scanned.PUN[10:(which (scanned.PUN == 'SPECIES')-1)]
tw0 <- list ()
tw0$classif <- as.data.frame (matrix (res, ncol = 4, byrow = T))[,-3]
names (tw0$classif) <- c('order', 'plot.no', 'class')
tw0$twi <- scanned.TWI
class (tw0) <- c('tw0')
setwd (actual.wd)
return (tw0)
}
#' @name twinspan
#' @export
summary.tw <- function (object, ...)
{
if (!object$modif)
{
cat ('Standard TWINSPAN (Hill 1979)', spe = '\n\n')
cat ('Basic setting:\n')
cat (c('Pseudospecies cut levels: ', object$summary$cut.levels, '\n'))
cat (c('Minimum group size: ', object$summary$min.group.size, '\n'))
cat (c('Number of hierarchical levels: ', object$summary$levels, '\n'))
}
if (object$modif)
{
cat ('TWINSPAN (Hill 1979) modified according to Rolecek et al. (2009)', spe = '\n\n')
cat ('Basic setting:\n')
cat (c('Pseudospecies cut levels: ', object$summary$cut.levels, '\n'))
cat (c('Minimum group size: ', object$summary$min.group.size, '\n'))
cat (c('Required number of clusters: ', object$summary$clusters, '\n'))
cat (c('Dissimilarity measure: ', object$summary$diss, '\n'))
cat (c('Mean or median of dissimilarity calculated? ', object$summary$mean.median, '\n'))
cat (c('\nResults of modified TWINSPAN algorithm:\n'))
no_print <- lapply (object$summary$heter, FUN = function (het)
{
for_print <- rbind ('Cluster heterogeneity' = formatC(het$cluster.heter, format = 'f', digits = 3), 'No of samples' = formatC(het$no.samples.per.group))
colnames (for_print) <- paste ('CLUST', het$tw.class.level, sep = ' ')
print.default (for_print, quote = F, print.gap = 2, justify = 'right')
cat (c('Candidate cluster for next division: ', het$which.most.heter, '\n\n'))
})
}
} |
testlist <- list(n = 201326591L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609960923-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 97 | r | testlist <- list(n = 201326591L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampelatorFunctions.R
\name{getStratified}
\alias{getStratified}
\title{Calculate design statistics and sample allocation for stratified sampling}
\usage{
getStratified(purpose = c("estimation", "testing"), sampleSize = NA,
desiredDifference = NA, power = NA, typeIerror,
allocation = c("proportional", "neyman"), stratumVariances,
stratumProportions, populationSize = NA, adjustFinitePopulation = FALSE,
inflationFactor = NA, exactSum = TRUE, roundEndResult = TRUE)
}
\arguments{
\item{purpose}{character string, the purpose of the study is one of
"estimation" or "testing"}
\item{sampleSize}{integer, the total sample size; default is NA}
\item{desiredDifference}{numeric, for "estimation" the desired margin of error,
i.e. half width of the desired confidence interval or for "testing" the
true difference in means that is tested; default is NA}
\item{power}{numeric, statistical power to detect the predefined difference
"desiredDifference" when purpose is "testing"; default is NA}
\item{typeIerror}{numeric, the type I error}
\item{allocation}{method to allocate the total sample size over the strata;
one of "proportional" or "neyman"}
\item{stratumVariances}{numeric vector, estimated variance within each stratum}
\item{stratumProportions}{numeric vector, the expected proportion of the
population in each stratum}
\item{populationSize}{numeric, the population size; default is NA}
\item{adjustFinitePopulation}{boolean, adjust for finite population?;
default is FALSE}
\item{inflationFactor}{numeric, the inflation factor with which the uncorrected
sample size should be multiplied to account e.g. for missingness;
default is NA}
\item{exactSum}{boolean, if TRUE the total sampleSize is exactly the sum of
all stratumSizes; if FALSE the sampleSize may be smaller than the the sum of
all stratumSizes; default is TRUE}
\item{roundEndResult}{boolean, if FALSE raw calculation numbers are given as
output; default is TRUE}
}
\value{
a list with two dataframes: one with sampleAllocation info
(stratumVariances, stratumProportions, availableSamples, stratumSize);
one with design statistics (sampleSize, desiredDifference, power)
}
\description{
Calculate design statistics and sample allocation for stratified sampling
}
| /sampelator/man/getStratified.Rd | permissive | openefsa/sampelator | R | false | true | 2,336 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sampelatorFunctions.R
\name{getStratified}
\alias{getStratified}
\title{Calculate design statistics and sample allocation for stratified sampling}
\usage{
getStratified(purpose = c("estimation", "testing"), sampleSize = NA,
desiredDifference = NA, power = NA, typeIerror,
allocation = c("proportional", "neyman"), stratumVariances,
stratumProportions, populationSize = NA, adjustFinitePopulation = FALSE,
inflationFactor = NA, exactSum = TRUE, roundEndResult = TRUE)
}
\arguments{
\item{purpose}{character string, the purpose of the study is one of
"estimation" or "testing"}
\item{sampleSize}{integer, the total sample size; default is NA}
\item{desiredDifference}{numeric, for "estimation" the desired margin of error,
i.e. half width of the desired confidence interval or for "testing" the
true difference in means that is tested; default is NA}
\item{power}{numeric, statistical power to detect the predefined difference
"desiredDifference" when purpose is "testing"; default is NA}
\item{typeIerror}{numeric, the type I error}
\item{allocation}{method to allocate the total sample size over the strata;
one of "proportional" or "neyman"}
\item{stratumVariances}{numeric vector, estimated variance within each stratum}
\item{stratumProportions}{numeric vector, the expected proportion of the
population in each stratum}
\item{populationSize}{numeric, the population size; default is NA}
\item{adjustFinitePopulation}{boolean, adjust for finite population?;
default is FALSE}
\item{inflationFactor}{numeric, the inflation factor with which the uncorrected
sample size should be multiplied to account e.g. for missingness;
default is NA}
\item{exactSum}{boolean, if TRUE the total sampleSize is exactly the sum of
all stratumSizes; if FALSE the sampleSize may be smaller than the the sum of
all stratumSizes; default is TRUE}
\item{roundEndResult}{boolean, if FALSE raw calculation numbers are given as
output; default is TRUE}
}
\value{
a list with two dataframes: one with sampleAllocation info
(stratumVariances, stratumProportions, availableSamples, stratumSize);
one with design statistics (sampleSize, desiredDifference, power)
}
\description{
Calculate design statistics and sample allocation for stratified sampling
}
|
#!/usr/bin/Rscript
# MinionQC version 1.0
# Copyright (C) 2017 Robert Lanfear
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# supress warnings
options(warn=-1)
library(ggplot2)
suppressPackageStartupMessages(library(viridis))
library(plyr)
library(reshape2)
library(readr)
library(yaml)
library(scales)
library(parallel)
library(futile.logger)
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(optparse))
# option parsing #
parser <- OptionParser()
parser <- add_option(parser,
opt_str = c("-i", "--input"),
type = "character",
dest = 'input.file',
help="Input file or directory (required). Either a full path to a sequence_summary.txt file, or a full path to a directory containing one or more such files. In the latter case the directory is searched recursively."
)
parser <- add_option(parser,
opt_str = c("-o", "--outputdirectory"),
type = "character",
dest = 'output.dir',
help="Output directory (required). If a single sequencing_summary.txt file is passed as input, then the output directory will contain just the plots associated with that file. If a directory containing more than one sequencing_summary.txt files is passed as input, then the plots will be put into sub-directories that have the same names as the parent directories of each sequencing_summary.txt file"
)
parser <- add_option(parser,
opt_str = c("-q", "--qscore_cutoff"),
type="double",
default=7.0,
dest = 'q',
help="The cutoff value for the mean Q score of a read (default 7). Used to create separate plots for reads above and below this threshold"
)
parser <- add_option(parser,
opt_str = c("-p", "--processors"),
type="integer",
default=1,
dest = 'cores',
help="Number of processors to use for the anlaysis (default 1). Only helps when you are analysing more than one sequencing_summary.txt file at a time"
)
opt = parse_args(parser)
input.file = opt$input.file
output.dir = opt$output.dir
q = opt$q
cores = opt$cores
# this is how we label the reads at least as good as q
q_title = paste("Q>=", q, sep="")
# build the map for R9.5
p1 = data.frame(channel=33:64, row=rep(1:4, each=8), col=rep(1:8, 4))
p2 = data.frame(channel=481:512, row=rep(5:8, each=8), col=rep(1:8, 4))
p3 = data.frame(channel=417:448, row=rep(9:12, each=8), col=rep(1:8, 4))
p4 = data.frame(channel=353:384, row=rep(13:16, each=8), col=rep(1:8, 4))
p5 = data.frame(channel=289:320, row=rep(17:20, each=8), col=rep(1:8, 4))
p6 = data.frame(channel=225:256, row=rep(21:24, each=8), col=rep(1:8, 4))
p7 = data.frame(channel=161:192, row=rep(25:28, each=8), col=rep(1:8, 4))
p8 = data.frame(channel=97:128, row=rep(29:32, each=8), col=rep(1:8, 4))
q1 = data.frame(channel=1:32, row=rep(1:4, each=8), col=rep(16:9, 4))
q2 = data.frame(channel=449:480, row=rep(5:8, each=8), col=rep(16:9, 4))
q3 = data.frame(channel=385:416, row=rep(9:12, each=8), col=rep(16:9, 4))
q4 = data.frame(channel=321:352, row=rep(13:16, each=8), col=rep(16:9, 4))
q5 = data.frame(channel=257:288, row=rep(17:20, each=8), col=rep(16:9, 4))
q6 = data.frame(channel=193:224, row=rep(21:24, each=8), col=rep(16:9, 4))
q7 = data.frame(channel=129:160, row=rep(25:28, each=8), col=rep(16:9, 4))
q8 = data.frame(channel=65:96, row=rep(29:32, each=8), col=rep(16:9, 4))
map = rbind(p1, p2, p3, p4, p5, p6, p7, p8, q1, q2, q3, q4, q5, q6, q7, q8)
add_cols <- function(d, min.q){
# take a sequencing sumamry file (d), and a minimum Q value you are interested in (min.q)
# return the same data frame with the following columns added
# cumulative.bases
# hour of run
# reads.per.hour
d = subset(d, mean_qscore_template >= min.q)
if(nrow(d)==0){
flog.error(paste("There are no reads with a mean Q score higher than your cutoff of ", min.q, ". Please choose a lower cutoff and try again.", sep = ""))
quit()
}
d = merge(d, map, by="channel")
d = d[with(d, order(-sequence_length_template)), ] # sort by read length
d$cumulative.bases = cumsum(as.numeric(d$sequence_length_template))
d$hour = d$start_time %/% 3600
# add the reads generated for each hour
reads.per.hour = as.data.frame(table(d$hour))
names(reads.per.hour) = c("hour", "reads_per_hour")
reads.per.hour$hour = as.numeric(as.character(reads.per.hour$hour))
d = merge(d, reads.per.hour, by = c("hour"))
return(d)
}
load_summary <- function(filepath, min.q){
# load a sequencing summary and add some info
# min.q is a vector of length 2 defining 2 levels of min.q to have
# by default the lowest value is -Inf, i.e. includes all reads. The
# other value in min.q is set by the user at the command line
d = read_tsv(filepath, col_types = cols_only(channel = 'i',
num_events_template = 'i',
sequence_length_template = 'i',
mean_qscore_template = 'n',
sequence_length_2d = 'i',
mean_qscore_2d = 'n',
start_time = 'n'))
if("sequence_length_2d" %in% names(d)){
# it's a 1D2 or 2D run
d$sequence_length_template = as.numeric(as.character(d$sequence_length_2d))
d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_2d))
d$num_events_template = NA
d$start_time = as.numeric(as.character(d$start_time))
}else{
d$sequence_length_template = as.numeric(as.character(d$sequence_length_template))
d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_template))
d$num_events_template = as.numeric(as.character(d$num_events_template))
d$start_time = as.numeric(as.character(d$start_time))
}
d$events_per_base = d$num_events_template/d$sequence_length_template
flowcell = basename(dirname(filepath))
# add columns for all the reads
d1 = add_cols(d, min.q[1])
d1$Q_cutoff = "All reads"
# add columns for just the reads that pass the user Q threshold
d2 = add_cols(d, min.q[2])
d2$Q_cutoff = q_title
# bind those two together into one data frame
d = as.data.frame(rbindlist(list(d1, d2)))
# name the flowcell (useful for analyses with >1 flowcell)
d$flowcell = flowcell
# make sure this is a factor
d$Q_cutoff = as.factor(d$Q_cutoff)
keep = c("hour","start_time", "channel", "sequence_length_template", "mean_qscore_template", "row", "col", "cumulative.bases", "reads_per_hour", "Q_cutoff", "flowcell", "events_per_base")
d = d[keep]
return(d)
}
reads.gt <- function(d, len){
# return the number of reads in data frame d
# that are at least as long as length len
return(length(which(d$sequence_length_template>=len)))
}
bases.gt <- function(d, len){
# return the number of bases contained in reads from
# data frame d
# that are at least as long as length len
reads = subset(d, sequence_length_template >= len)
return(sum(as.numeric(reads$sequence_length_template)))
}
log10_minor_break = function (...){
# function to add minor breaks to a log10 graph
# hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot
function(x) {
minx = floor(min(log10(x), na.rm=T))-1;
maxx = ceiling(max(log10(x), na.rm=T))+1;
n_major = maxx-minx+1;
major_breaks = seq(minx, maxx, by=1)
minor_breaks =
rep(log10(seq(1, 9, by=1)), times = n_major)+
rep(major_breaks, each = 9)
return(10^(minor_breaks))
}
}
binSearch <- function(min, max, df, t = 100000) {
# binary search algorithm, thanks to https://stackoverflow.com/questions/46292438/optimising-a-calculation-on-every-cumulative-subset-of-a-vector-in-r/46303384#46303384
# the aim is to return the number of reads in a dataset (df)
# that comprise the largest subset of reads with an N50 of t
# we use this to calculte the number of 'ultra long' reads
# which are defined as those with N50 > 100KB
mid = floor(mean(c(min, max)))
if (mid == min) {
if (df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[min]/2))] < t) {
return(min - 1)
} else {
return(max - 1)
}
}
n = df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[mid]/2))]
if (n >= t) {
return(binSearch(mid, max, df))
} else {
return(binSearch(min, mid, df))
}
}
summary.stats <- function(d, Q_cutoff="All reads"){
# Calculate summary stats for a single value of min.q
rows = which(as.character(d$Q_cutoff)==Q_cutoff)
d = d[rows,]
d = d[with(d, order(-sequence_length_template)), ] # sort by read length, just in case
total.bases = sum(as.numeric(d$sequence_length_template))
total.reads = nrow(d)
N50.length = d$sequence_length_template[min(which(d$cumulative.bases > (total.bases/2)))]
mean.length = round(mean(as.numeric(d$sequence_length_template)), digits = 1)
median.length = round(median(as.numeric(d$sequence_length_template)), digits = 1)
max.length = max(as.numeric(d$sequence_length_template))
mean.q = round(mean(d$mean_qscore_template), digits = 1)
median.q = round(median(d$mean_qscore_template), digits = 1)
#calculate ultra-long reads and bases (max amount of data with N50>100KB)
ultra.reads = binSearch(1, nrow(d), d, t = 100000)
if(ultra.reads>=1){
ultra.gigabases = sum(as.numeric(d$sequence_length_template[1:ultra.reads]))/1000000000
}else{
ultra.gigabases = 0
}
reads = list(
reads.gt(d, 10000),
reads.gt(d, 20000),
reads.gt(d, 50000),
reads.gt(d, 100000),
reads.gt(d, 200000),
reads.gt(d, 500000),
reads.gt(d, 1000000),
ultra.reads)
names(reads) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong")
bases = list(
bases.gt(d, 10000)/1000000000,
bases.gt(d, 20000)/1000000000,
bases.gt(d, 50000)/1000000000,
bases.gt(d, 100000)/1000000000,
bases.gt(d, 200000)/1000000000,
bases.gt(d, 500000)/1000000000,
bases.gt(d, 1000000)/1000000000,
ultra.gigabases)
names(bases) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong")
return(list('total.gigabases' = total.bases/1000000000,
'total.reads' = total.reads,
'N50.length' = N50.length,
'mean.length' = mean.length,
'median.length' = median.length,
'max.length' = max.length,
'mean.q' = mean.q,
'median.q' = median.q,
'reads' = reads,
'gigabases' = bases
))
}
channel.summary <- function(d){
# calculate summaries of what happened in each of the channels
# of a flowcell
a = ddply(d, .(channel),
summarize,
total.bases = sum(sequence_length_template),
total.reads = sum(which(sequence_length_template>=0)),
mean.read.length = mean(sequence_length_template),
median.read.length = median(sequence_length_template))
b = melt(a, id.vars = c("channel"))
return(b)
}
single.flowcell <- function(input.file, output.dir, q=8){
# wrapper function to analyse data from a single flowcell
# input.file is a sequencing_summary.txt file from a 1D run
# output.dir is the output directory into which to write results
# q is the cutoff used for Q values, set by the user
flog.info(paste("Loading input file:", input.file))
d = load_summary(input.file, min.q=c(-Inf, q))
flowcell = unique(d$flowcell)
flog.info(paste(sep = "", flowcell, ": creating output directory:", output.dir))
dir.create(output.dir)
out.txt = file.path(output.dir, "summary.yaml")
flog.info(paste(sep = "", flowcell, ": summarising input file for flowcell"))
all.reads.summary = summary.stats(d, Q_cutoff = "All reads")
q10.reads.summary = summary.stats(d, Q_cutoff = q_title)
summary = list("input file" = input.file,
"All reads" = all.reads.summary,
cutoff = q10.reads.summary,
"notes" = 'ultralong reads refers to the largest set of reads with N50>100KB')
names(summary)[3] = q_title
write(as.yaml(summary), out.txt)
muxes = seq(from = 0, to = max(d$hour), by = 8)
# make plots
flog.info(paste(sep = "", flowcell, ": plotting length histogram"))
p1 = ggplot(d, aes(x = sequence_length_template)) +
geom_histogram(bins = 300) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "length_histogram.png"), width = 960/75, height = 960/75, plot = p1)
flog.info(paste(sep = "", flowcell, ": plotting mean Q score histogram"))
p2 = ggplot(d, aes(x = mean_qscore_template)) +
geom_histogram(bins = 300) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "q_histogram.png"), width = 960/75, height = 960/75, plot = p2)
flog.info(paste(sep = "", flowcell, ": plotting flowcell overview"))
p5 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x=start_time/3600, y=sequence_length_template, colour = mean_qscore_template)) +
geom_point(size=1.5, alpha=0.35) +
scale_colour_viridis() +
labs(colour='Q') +
scale_y_log10() +
facet_grid(row~col) +
theme(panel.spacing = unit(0.5, "lines")) +
xlab("Hours into run") +
ylab("Read length") +
theme(text = element_text(size = 40), axis.text.x = element_text(size=12), axis.text.y = element_text(size=12), legend.text=element_text(size=12))
ggsave(filename = file.path(output.dir, "flowcell_overview.png"), width = 2500/75, height = 2400/75, plot = p5)
flog.info(paste(sep = "", flowcell, ": plotting flowcell yield summary"))
p6 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases, colour = Q_cutoff)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
scale_colour_discrete(guide = guide_legend(title = "Reads")) +
theme(text = element_text(size = 15))
xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))])
p6 = p6 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "yield_summary.png"), width = 960/75, height = 960/75, plot = p6)
flog.info(paste(sep = "", flowcell, ": plotting sequence length over time"))
e = subset(d, Q_cutoff=="All reads")
e$Q = paste(">=", q, sep="")
e$Q[which(e$mean_qscore_template<q)] = paste("<", q, sep="")
p7 = ggplot(e, aes(x=start_time/3600, y=sequence_length_template, colour = Q, group = Q)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean read length") +
ylim(0, NA)
ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = 960/75, height = 480/75, plot = p7)
flog.info(paste(sep = "", flowcell, ": plotting Q score over time"))
p8 = ggplot(e, aes(x=start_time/3600, y=mean_qscore_template, colour = Q, group = Q)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean Q score") +
ylim(0, NA)
ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = 960/75, height = 480/75, plot = p8)
flog.info(paste(sep = "", flowcell, ": plotting reads per hour"))
f = d[c("hour", "reads_per_hour", "Q_cutoff")]
f = f[!duplicated(f),]
g = subset(f, Q_cutoff=="All reads")
h = subset(f, Q_cutoff==q_title)
max = max(f$hour)
# all of this is just to fill in hours with no reads recorded
all = 0:max
add.g = all[which(all %in% g$hour == FALSE)]
if(length(add.g)>0){
add.g = data.frame(hour = add.g, reads_per_hour = 0, Q_cutoff = "All reads")
g = rbind(g, add.g)
}
add.h = all[which(all %in% h$hour == FALSE)]
if(length(add.h)>0){
add.h = data.frame(hour = add.h, reads_per_hour = 0, Q_cutoff = q_title)
h = rbind(h, add.h)
}
i = rbind(g, h)
i$Q_cutoff = as.character(i$Q_cutoff)
i$Q_cutoff[which(i$Q_cutoff==q_title)] = paste("Q>=", q, sep="")
p9 = ggplot(i, aes(x=hour, y=reads_per_hour, colour = Q_cutoff, group = Q_cutoff)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_point() +
geom_line() +
xlab("Hours into run") +
ylab("Number of reads per hour") +
ylim(0, NA) +
scale_color_discrete(guide = guide_legend(title = "Reads"))
ggsave(filename = file.path(output.dir, "reads_per_hour.png"), width = 960/75, height = 480/75, plot = p9)
flog.info(paste(sep = "", flowcell, ": plotting read length vs. q score scatterplot"))
p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) +
geom_point(alpha=0.05, size = 0.4) +
scale_x_log10(minor_breaks=log10_minor_break()) +
labs(colour='Events per base\n(log scale)\n') +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Mean Q score of read")
if(max(d$events_per_base, na.rm=T)>0){
# a catch for 1D2 runs which don't have events per base
p10 = p10 + scale_colour_viridis(trans = "log", labels = scientific, option = 'inferno')
}
ggsave(filename = file.path(output.dir, "length_vs_q.png"), width = 960/75, height = 960/75, plot = p10)
flog.info(paste(sep = "", flowcell, ": plotting flowcell channels summary histograms"))
c = channel.summary(subset(d, Q_cutoff=="All reads"))
c10 = channel.summary(subset(d, Q_cutoff==q_title))
c$Q_cutoff = "All reads"
c10$Q_cutoff = q_title
cc = rbind(c, c10)
cc$variable = as.character(cc$variable)
cc$variable[which(cc$variable=="total.bases")] = "Number of bases per channel"
cc$variable[which(cc$variable=="total.reads")] = "Number of reads per channel"
cc$variable[which(cc$variable=="mean.read.length")] = "Mean read length per channel"
cc$variable[which(cc$variable=="median.read.length")] = "Median read length per channel"
p11 = ggplot(cc, aes(x = value)) + geom_histogram(bins = 30) +
facet_grid(Q_cutoff~variable, scales = "free_x") +
theme(text = element_text(size = 20))
ggsave(filename = file.path(output.dir, "channel_summary.png"), width = 2400/75, height = 960/75, plot = p11)
return(d)
}
combined.flowcell <- function(d, output.dir, q=8){
# function to analyse combined data from multiple flowcells
# useful for getting an overall impression of the combined data
flog.info("Creating output directory")
out.txt = file.path(output.dir, "summary.yaml")
# write summaries
flog.info(paste("Summarising combined data from all flowcells, saving to:", out.txt))
# tidy up and remove added stuff
drops = c("cumulative.bases", "hour", "reads.per.hour")
d = d[ , !(names(d) %in% drops)]
d1 = subset(d, Q_cutoff == "All reads")
d1 = d1[with(d1, order(-sequence_length_template)), ] # sort by read length
d1$cumulative.bases = cumsum(as.numeric(d1$sequence_length_template))
d2 = subset(d, Q_cutoff == q_title)
d2 = d2[with(d2, order(-sequence_length_template)), ] # sort by read length
d2$cumulative.bases = cumsum(as.numeric(d2$sequence_length_template))
d1$Q_cutoff = as.factor(d1$Q_cutoff)
d2$Q_cutoff = as.factor(d2$Q_cutoff)
all.reads.summary = summary.stats(d1, Q_cutoff = "All reads")
q10.reads.summary = summary.stats(d2, Q_cutoff = q_title)
summary = list("input file" = input.file,
"All reads" = all.reads.summary,
cutoff = q10.reads.summary,
"notes" = 'ultralong reads refers to the largest set of reads with N50>100KB')
names(summary)[3] = q_title
write(as.yaml(summary), out.txt)
d = rbind(d1, d2)
d$Q_cutoff = as.factor(d$Q_cutoff)
d1 = 0
d2 = 0
# make plots
flog.info("Plotting combined length histogram")
p1 = ggplot(d, aes(x = sequence_length_template)) +
geom_histogram(bins = 300) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "combined_length_histogram.png"), width = 960/75, height = 960/75, plot = p1)
flog.info("Plotting combined mean Q score histogram")
p2 = ggplot(d, aes(x = mean_qscore_template)) +
geom_histogram(bins = 300) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "combined_q_histogram.png"), width = 960/75, height = 960/75, plot = p2)
flog.info("Plotting combined flowcell yield summary")
p4 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases, colour = Q_cutoff)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
scale_colour_discrete(guide = guide_legend(title = "Reads")) +
theme(text = element_text(size = 15))
xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))])
p4 = p4 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "combined_yield_summary.png"), width = 960/75, height = 960/75, plot = p4)
}
multi.flowcell = function(input.file, output.base, q){
# wrapper function to allow parallelisation of single-flowcell
# analyses when >1 flowcell is analysed in one run
dir.create(output.base)
flowcell = basename(dirname(input.file))
dir.create(file.path(output.base, flowcell))
output.dir = file.path(output.base, flowcell, "minionQC")
d = single.flowcell(input.file, output.dir, q)
return(d)
}
multi.plots = function(dm, output.dir){
# function to plot data from multiple flowcells,
# where the data is not combined (as in combined.flowcell() )
# but instead just uses multiple lines on each plot.
muxes = seq(from = 0, to = max(dm$hour), by = 8)
# make plots
flog.info("Plotting length distributions")
p1 = ggplot(dm, aes(x = sequence_length_template)) +
geom_line(stat="density", aes(colour = flowcell)) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Density")
ggsave(filename = file.path(output.dir, "length_distributions.png"), width = 960/75, height = 960/75, plot = p1)
flog.info("Plotting mean Q score distributions")
p2 = ggplot(dm, aes(x = mean_qscore_template)) +
geom_line(stat="density", aes(colour = flowcell)) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Density")
ggsave(filename = file.path(output.dir, "q_distributions.png"), width = 960/75, height = 960/75, plot = p2)
flog.info("Plotting flowcell yield summary")
p6 = ggplot(dm, aes(x=sequence_length_template, y=cumulative.bases, colour = flowcell)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
theme(text = element_text(size = 15)) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y")
xmax = max(dm$sequence_length_template[which(dm$cumulative.bases > 0.01 * max(dm$cumulative.bases))])
p6 = p6 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "yield_summary.png"), width = 960/75, height = 960/75, plot = p6)
flog.info("Plotting sequence length over time")
e = subset(dm, Q_cutoff=="All reads")
e$Q = paste("Q>=", q, sep="")
e$Q[which(e$mean_qscore_template<q)] = paste("Q<", q, sep="")
p7 = ggplot(e, aes(x=start_time/3600, y=sequence_length_template, colour = flowcell)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean read length") +
ylim(0, NA) +
facet_wrap(~Q, ncol = 1, scales = "free_y")
ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = 960/75, height = 480/75, plot = p7)
flog.info("Plotting Q score over time")
p8 = ggplot(e, aes(x=start_time/3600, y=mean_qscore_template, colour = flowcell)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean Q score") +
facet_wrap(~Q, ncol = 1, scales = "free_y")
ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = 960/75, height = 480/75, plot = p8)
}
# Choose how to act depending on whether we have a single input file or mulitple input files
if(file_test("-f", input.file)==TRUE){
# if it's an existing file (not a folder) just run one analysis
d = single.flowcell(input.file, output.dir, q)
}else if(file_test("-d", input.file)==TRUE){
# it's a directory, recursively analyse all sequencing_summary.txt files
# get a list of all sequencing_summary.txt files, recursively
summaries = list.files(path = input.file, pattern = "sequencing_summary.txt", recursive = TRUE, full.names = TRUE)
flog.info("")
flog.info("**** Analysing the following files ****")
flog.info(summaries)
# if the user passes a directory with only one sequencing_summary.txt file...
if(length(summaries) == 1){
d = single.flowcell(summaries[1], output.dir, q)
flog.info('**** Analysis complete ****')
}else{
# analyse each one and keep the returns in a list
results = mclapply(summaries, multi.flowcell, output.dir, q, mc.cores = cores)
# rbind that list
flog.info('**** Analysing data from all flowcells combined ****')
dm = as.data.frame(rbindlist(results))
# now do the single plot on ALL the output
combined.output = file.path(output.dir, "combinedQC")
flog.info(paste("Plots from the combined output will be saved in", combined.output))
dir.create(combined.output)
combined.flowcell(dm, combined.output, q)
multi.plots(dm, combined.output)
flog.info('**** Analysis complete ****')
}
}else{
#WTF
flog.warn(paste("Could find a sequencing summary file in your input which was: ",
input.file,
"\nThe input must be either a sequencing_summary.txt file, or a directory containing one or more such files"))
}
| /home_bin/MinionQC.R | no_license | ambuechlein/work_scripts | R | false | false | 29,264 | r | #!/usr/bin/Rscript
# MinionQC version 1.0
# Copyright (C) 2017 Robert Lanfear
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details. You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# supress warnings
options(warn=-1)
library(ggplot2)
suppressPackageStartupMessages(library(viridis))
library(plyr)
library(reshape2)
library(readr)
library(yaml)
library(scales)
library(parallel)
library(futile.logger)
suppressPackageStartupMessages(library(data.table))
suppressPackageStartupMessages(library(optparse))
# option parsing #
parser <- OptionParser()
parser <- add_option(parser,
opt_str = c("-i", "--input"),
type = "character",
dest = 'input.file',
help="Input file or directory (required). Either a full path to a sequence_summary.txt file, or a full path to a directory containing one or more such files. In the latter case the directory is searched recursively."
)
parser <- add_option(parser,
opt_str = c("-o", "--outputdirectory"),
type = "character",
dest = 'output.dir',
help="Output directory (required). If a single sequencing_summary.txt file is passed as input, then the output directory will contain just the plots associated with that file. If a directory containing more than one sequencing_summary.txt files is passed as input, then the plots will be put into sub-directories that have the same names as the parent directories of each sequencing_summary.txt file"
)
parser <- add_option(parser,
opt_str = c("-q", "--qscore_cutoff"),
type="double",
default=7.0,
dest = 'q',
help="The cutoff value for the mean Q score of a read (default 7). Used to create separate plots for reads above and below this threshold"
)
parser <- add_option(parser,
opt_str = c("-p", "--processors"),
type="integer",
default=1,
dest = 'cores',
help="Number of processors to use for the anlaysis (default 1). Only helps when you are analysing more than one sequencing_summary.txt file at a time"
)
opt = parse_args(parser)
input.file = opt$input.file
output.dir = opt$output.dir
q = opt$q
cores = opt$cores
# this is how we label the reads at least as good as q
q_title = paste("Q>=", q, sep="")
# build the map for R9.5
p1 = data.frame(channel=33:64, row=rep(1:4, each=8), col=rep(1:8, 4))
p2 = data.frame(channel=481:512, row=rep(5:8, each=8), col=rep(1:8, 4))
p3 = data.frame(channel=417:448, row=rep(9:12, each=8), col=rep(1:8, 4))
p4 = data.frame(channel=353:384, row=rep(13:16, each=8), col=rep(1:8, 4))
p5 = data.frame(channel=289:320, row=rep(17:20, each=8), col=rep(1:8, 4))
p6 = data.frame(channel=225:256, row=rep(21:24, each=8), col=rep(1:8, 4))
p7 = data.frame(channel=161:192, row=rep(25:28, each=8), col=rep(1:8, 4))
p8 = data.frame(channel=97:128, row=rep(29:32, each=8), col=rep(1:8, 4))
q1 = data.frame(channel=1:32, row=rep(1:4, each=8), col=rep(16:9, 4))
q2 = data.frame(channel=449:480, row=rep(5:8, each=8), col=rep(16:9, 4))
q3 = data.frame(channel=385:416, row=rep(9:12, each=8), col=rep(16:9, 4))
q4 = data.frame(channel=321:352, row=rep(13:16, each=8), col=rep(16:9, 4))
q5 = data.frame(channel=257:288, row=rep(17:20, each=8), col=rep(16:9, 4))
q6 = data.frame(channel=193:224, row=rep(21:24, each=8), col=rep(16:9, 4))
q7 = data.frame(channel=129:160, row=rep(25:28, each=8), col=rep(16:9, 4))
q8 = data.frame(channel=65:96, row=rep(29:32, each=8), col=rep(16:9, 4))
map = rbind(p1, p2, p3, p4, p5, p6, p7, p8, q1, q2, q3, q4, q5, q6, q7, q8)
add_cols <- function(d, min.q){
# take a sequencing sumamry file (d), and a minimum Q value you are interested in (min.q)
# return the same data frame with the following columns added
# cumulative.bases
# hour of run
# reads.per.hour
d = subset(d, mean_qscore_template >= min.q)
if(nrow(d)==0){
flog.error(paste("There are no reads with a mean Q score higher than your cutoff of ", min.q, ". Please choose a lower cutoff and try again.", sep = ""))
quit()
}
d = merge(d, map, by="channel")
d = d[with(d, order(-sequence_length_template)), ] # sort by read length
d$cumulative.bases = cumsum(as.numeric(d$sequence_length_template))
d$hour = d$start_time %/% 3600
# add the reads generated for each hour
reads.per.hour = as.data.frame(table(d$hour))
names(reads.per.hour) = c("hour", "reads_per_hour")
reads.per.hour$hour = as.numeric(as.character(reads.per.hour$hour))
d = merge(d, reads.per.hour, by = c("hour"))
return(d)
}
load_summary <- function(filepath, min.q){
# load a sequencing summary and add some info
# min.q is a vector of length 2 defining 2 levels of min.q to have
# by default the lowest value is -Inf, i.e. includes all reads. The
# other value in min.q is set by the user at the command line
d = read_tsv(filepath, col_types = cols_only(channel = 'i',
num_events_template = 'i',
sequence_length_template = 'i',
mean_qscore_template = 'n',
sequence_length_2d = 'i',
mean_qscore_2d = 'n',
start_time = 'n'))
if("sequence_length_2d" %in% names(d)){
# it's a 1D2 or 2D run
d$sequence_length_template = as.numeric(as.character(d$sequence_length_2d))
d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_2d))
d$num_events_template = NA
d$start_time = as.numeric(as.character(d$start_time))
}else{
d$sequence_length_template = as.numeric(as.character(d$sequence_length_template))
d$mean_qscore_template = as.numeric(as.character(d$mean_qscore_template))
d$num_events_template = as.numeric(as.character(d$num_events_template))
d$start_time = as.numeric(as.character(d$start_time))
}
d$events_per_base = d$num_events_template/d$sequence_length_template
flowcell = basename(dirname(filepath))
# add columns for all the reads
d1 = add_cols(d, min.q[1])
d1$Q_cutoff = "All reads"
# add columns for just the reads that pass the user Q threshold
d2 = add_cols(d, min.q[2])
d2$Q_cutoff = q_title
# bind those two together into one data frame
d = as.data.frame(rbindlist(list(d1, d2)))
# name the flowcell (useful for analyses with >1 flowcell)
d$flowcell = flowcell
# make sure this is a factor
d$Q_cutoff = as.factor(d$Q_cutoff)
keep = c("hour","start_time", "channel", "sequence_length_template", "mean_qscore_template", "row", "col", "cumulative.bases", "reads_per_hour", "Q_cutoff", "flowcell", "events_per_base")
d = d[keep]
return(d)
}
reads.gt <- function(d, len){
# return the number of reads in data frame d
# that are at least as long as length len
return(length(which(d$sequence_length_template>=len)))
}
bases.gt <- function(d, len){
# return the number of bases contained in reads from
# data frame d
# that are at least as long as length len
reads = subset(d, sequence_length_template >= len)
return(sum(as.numeric(reads$sequence_length_template)))
}
log10_minor_break = function (...){
# function to add minor breaks to a log10 graph
# hat-tip: https://stackoverflow.com/questions/30179442/plotting-minor-breaks-on-a-log-scale-with-ggplot
function(x) {
minx = floor(min(log10(x), na.rm=T))-1;
maxx = ceiling(max(log10(x), na.rm=T))+1;
n_major = maxx-minx+1;
major_breaks = seq(minx, maxx, by=1)
minor_breaks =
rep(log10(seq(1, 9, by=1)), times = n_major)+
rep(major_breaks, each = 9)
return(10^(minor_breaks))
}
}
binSearch <- function(min, max, df, t = 100000) {
# binary search algorithm, thanks to https://stackoverflow.com/questions/46292438/optimising-a-calculation-on-every-cumulative-subset-of-a-vector-in-r/46303384#46303384
# the aim is to return the number of reads in a dataset (df)
# that comprise the largest subset of reads with an N50 of t
# we use this to calculte the number of 'ultra long' reads
# which are defined as those with N50 > 100KB
mid = floor(mean(c(min, max)))
if (mid == min) {
if (df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[min]/2))] < t) {
return(min - 1)
} else {
return(max - 1)
}
}
n = df$sequence_length_template[min(which(df$cumulative.bases>df$cumulative.bases[mid]/2))]
if (n >= t) {
return(binSearch(mid, max, df))
} else {
return(binSearch(min, mid, df))
}
}
summary.stats <- function(d, Q_cutoff="All reads"){
# Calculate summary stats for a single value of min.q
rows = which(as.character(d$Q_cutoff)==Q_cutoff)
d = d[rows,]
d = d[with(d, order(-sequence_length_template)), ] # sort by read length, just in case
total.bases = sum(as.numeric(d$sequence_length_template))
total.reads = nrow(d)
N50.length = d$sequence_length_template[min(which(d$cumulative.bases > (total.bases/2)))]
mean.length = round(mean(as.numeric(d$sequence_length_template)), digits = 1)
median.length = round(median(as.numeric(d$sequence_length_template)), digits = 1)
max.length = max(as.numeric(d$sequence_length_template))
mean.q = round(mean(d$mean_qscore_template), digits = 1)
median.q = round(median(d$mean_qscore_template), digits = 1)
#calculate ultra-long reads and bases (max amount of data with N50>100KB)
ultra.reads = binSearch(1, nrow(d), d, t = 100000)
if(ultra.reads>=1){
ultra.gigabases = sum(as.numeric(d$sequence_length_template[1:ultra.reads]))/1000000000
}else{
ultra.gigabases = 0
}
reads = list(
reads.gt(d, 10000),
reads.gt(d, 20000),
reads.gt(d, 50000),
reads.gt(d, 100000),
reads.gt(d, 200000),
reads.gt(d, 500000),
reads.gt(d, 1000000),
ultra.reads)
names(reads) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong")
bases = list(
bases.gt(d, 10000)/1000000000,
bases.gt(d, 20000)/1000000000,
bases.gt(d, 50000)/1000000000,
bases.gt(d, 100000)/1000000000,
bases.gt(d, 200000)/1000000000,
bases.gt(d, 500000)/1000000000,
bases.gt(d, 1000000)/1000000000,
ultra.gigabases)
names(bases) = c(">10kb", ">20kb", ">50kb", ">100kb", ">200kb", ">500kb", ">1m", "ultralong")
return(list('total.gigabases' = total.bases/1000000000,
'total.reads' = total.reads,
'N50.length' = N50.length,
'mean.length' = mean.length,
'median.length' = median.length,
'max.length' = max.length,
'mean.q' = mean.q,
'median.q' = median.q,
'reads' = reads,
'gigabases' = bases
))
}
channel.summary <- function(d){
# calculate summaries of what happened in each of the channels
# of a flowcell
a = ddply(d, .(channel),
summarize,
total.bases = sum(sequence_length_template),
total.reads = sum(which(sequence_length_template>=0)),
mean.read.length = mean(sequence_length_template),
median.read.length = median(sequence_length_template))
b = melt(a, id.vars = c("channel"))
return(b)
}
single.flowcell <- function(input.file, output.dir, q=8){
# wrapper function to analyse data from a single flowcell
# input.file is a sequencing_summary.txt file from a 1D run
# output.dir is the output directory into which to write results
# q is the cutoff used for Q values, set by the user
flog.info(paste("Loading input file:", input.file))
d = load_summary(input.file, min.q=c(-Inf, q))
flowcell = unique(d$flowcell)
flog.info(paste(sep = "", flowcell, ": creating output directory:", output.dir))
dir.create(output.dir)
out.txt = file.path(output.dir, "summary.yaml")
flog.info(paste(sep = "", flowcell, ": summarising input file for flowcell"))
all.reads.summary = summary.stats(d, Q_cutoff = "All reads")
q10.reads.summary = summary.stats(d, Q_cutoff = q_title)
summary = list("input file" = input.file,
"All reads" = all.reads.summary,
cutoff = q10.reads.summary,
"notes" = 'ultralong reads refers to the largest set of reads with N50>100KB')
names(summary)[3] = q_title
write(as.yaml(summary), out.txt)
muxes = seq(from = 0, to = max(d$hour), by = 8)
# make plots
flog.info(paste(sep = "", flowcell, ": plotting length histogram"))
p1 = ggplot(d, aes(x = sequence_length_template)) +
geom_histogram(bins = 300) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "length_histogram.png"), width = 960/75, height = 960/75, plot = p1)
flog.info(paste(sep = "", flowcell, ": plotting mean Q score histogram"))
p2 = ggplot(d, aes(x = mean_qscore_template)) +
geom_histogram(bins = 300) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "q_histogram.png"), width = 960/75, height = 960/75, plot = p2)
flog.info(paste(sep = "", flowcell, ": plotting flowcell overview"))
p5 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x=start_time/3600, y=sequence_length_template, colour = mean_qscore_template)) +
geom_point(size=1.5, alpha=0.35) +
scale_colour_viridis() +
labs(colour='Q') +
scale_y_log10() +
facet_grid(row~col) +
theme(panel.spacing = unit(0.5, "lines")) +
xlab("Hours into run") +
ylab("Read length") +
theme(text = element_text(size = 40), axis.text.x = element_text(size=12), axis.text.y = element_text(size=12), legend.text=element_text(size=12))
ggsave(filename = file.path(output.dir, "flowcell_overview.png"), width = 2500/75, height = 2400/75, plot = p5)
flog.info(paste(sep = "", flowcell, ": plotting flowcell yield summary"))
p6 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases, colour = Q_cutoff)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
scale_colour_discrete(guide = guide_legend(title = "Reads")) +
theme(text = element_text(size = 15))
xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))])
p6 = p6 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "yield_summary.png"), width = 960/75, height = 960/75, plot = p6)
flog.info(paste(sep = "", flowcell, ": plotting sequence length over time"))
e = subset(d, Q_cutoff=="All reads")
e$Q = paste(">=", q, sep="")
e$Q[which(e$mean_qscore_template<q)] = paste("<", q, sep="")
p7 = ggplot(e, aes(x=start_time/3600, y=sequence_length_template, colour = Q, group = Q)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean read length") +
ylim(0, NA)
ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = 960/75, height = 480/75, plot = p7)
flog.info(paste(sep = "", flowcell, ": plotting Q score over time"))
p8 = ggplot(e, aes(x=start_time/3600, y=mean_qscore_template, colour = Q, group = Q)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean Q score") +
ylim(0, NA)
ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = 960/75, height = 480/75, plot = p8)
flog.info(paste(sep = "", flowcell, ": plotting reads per hour"))
f = d[c("hour", "reads_per_hour", "Q_cutoff")]
f = f[!duplicated(f),]
g = subset(f, Q_cutoff=="All reads")
h = subset(f, Q_cutoff==q_title)
max = max(f$hour)
# all of this is just to fill in hours with no reads recorded
all = 0:max
add.g = all[which(all %in% g$hour == FALSE)]
if(length(add.g)>0){
add.g = data.frame(hour = add.g, reads_per_hour = 0, Q_cutoff = "All reads")
g = rbind(g, add.g)
}
add.h = all[which(all %in% h$hour == FALSE)]
if(length(add.h)>0){
add.h = data.frame(hour = add.h, reads_per_hour = 0, Q_cutoff = q_title)
h = rbind(h, add.h)
}
i = rbind(g, h)
i$Q_cutoff = as.character(i$Q_cutoff)
i$Q_cutoff[which(i$Q_cutoff==q_title)] = paste("Q>=", q, sep="")
p9 = ggplot(i, aes(x=hour, y=reads_per_hour, colour = Q_cutoff, group = Q_cutoff)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_point() +
geom_line() +
xlab("Hours into run") +
ylab("Number of reads per hour") +
ylim(0, NA) +
scale_color_discrete(guide = guide_legend(title = "Reads"))
ggsave(filename = file.path(output.dir, "reads_per_hour.png"), width = 960/75, height = 480/75, plot = p9)
flog.info(paste(sep = "", flowcell, ": plotting read length vs. q score scatterplot"))
p10 = ggplot(subset(d, Q_cutoff=="All reads"), aes(x = sequence_length_template, y = mean_qscore_template, colour = events_per_base)) +
geom_point(alpha=0.05, size = 0.4) +
scale_x_log10(minor_breaks=log10_minor_break()) +
labs(colour='Events per base\n(log scale)\n') +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Mean Q score of read")
if(max(d$events_per_base, na.rm=T)>0){
# a catch for 1D2 runs which don't have events per base
p10 = p10 + scale_colour_viridis(trans = "log", labels = scientific, option = 'inferno')
}
ggsave(filename = file.path(output.dir, "length_vs_q.png"), width = 960/75, height = 960/75, plot = p10)
flog.info(paste(sep = "", flowcell, ": plotting flowcell channels summary histograms"))
c = channel.summary(subset(d, Q_cutoff=="All reads"))
c10 = channel.summary(subset(d, Q_cutoff==q_title))
c$Q_cutoff = "All reads"
c10$Q_cutoff = q_title
cc = rbind(c, c10)
cc$variable = as.character(cc$variable)
cc$variable[which(cc$variable=="total.bases")] = "Number of bases per channel"
cc$variable[which(cc$variable=="total.reads")] = "Number of reads per channel"
cc$variable[which(cc$variable=="mean.read.length")] = "Mean read length per channel"
cc$variable[which(cc$variable=="median.read.length")] = "Median read length per channel"
p11 = ggplot(cc, aes(x = value)) + geom_histogram(bins = 30) +
facet_grid(Q_cutoff~variable, scales = "free_x") +
theme(text = element_text(size = 20))
ggsave(filename = file.path(output.dir, "channel_summary.png"), width = 2400/75, height = 960/75, plot = p11)
return(d)
}
combined.flowcell <- function(d, output.dir, q=8){
# function to analyse combined data from multiple flowcells
# useful for getting an overall impression of the combined data
flog.info("Creating output directory")
out.txt = file.path(output.dir, "summary.yaml")
# write summaries
flog.info(paste("Summarising combined data from all flowcells, saving to:", out.txt))
# tidy up and remove added stuff
drops = c("cumulative.bases", "hour", "reads.per.hour")
d = d[ , !(names(d) %in% drops)]
d1 = subset(d, Q_cutoff == "All reads")
d1 = d1[with(d1, order(-sequence_length_template)), ] # sort by read length
d1$cumulative.bases = cumsum(as.numeric(d1$sequence_length_template))
d2 = subset(d, Q_cutoff == q_title)
d2 = d2[with(d2, order(-sequence_length_template)), ] # sort by read length
d2$cumulative.bases = cumsum(as.numeric(d2$sequence_length_template))
d1$Q_cutoff = as.factor(d1$Q_cutoff)
d2$Q_cutoff = as.factor(d2$Q_cutoff)
all.reads.summary = summary.stats(d1, Q_cutoff = "All reads")
q10.reads.summary = summary.stats(d2, Q_cutoff = q_title)
summary = list("input file" = input.file,
"All reads" = all.reads.summary,
cutoff = q10.reads.summary,
"notes" = 'ultralong reads refers to the largest set of reads with N50>100KB')
names(summary)[3] = q_title
write(as.yaml(summary), out.txt)
d = rbind(d1, d2)
d$Q_cutoff = as.factor(d$Q_cutoff)
d1 = 0
d2 = 0
# make plots
flog.info("Plotting combined length histogram")
p1 = ggplot(d, aes(x = sequence_length_template)) +
geom_histogram(bins = 300) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "combined_length_histogram.png"), width = 960/75, height = 960/75, plot = p1)
flog.info("Plotting combined mean Q score histogram")
p2 = ggplot(d, aes(x = mean_qscore_template)) +
geom_histogram(bins = 300) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Number of reads")
ggsave(filename = file.path(output.dir, "combined_q_histogram.png"), width = 960/75, height = 960/75, plot = p2)
flog.info("Plotting combined flowcell yield summary")
p4 = ggplot(d, aes(x=sequence_length_template, y=cumulative.bases, colour = Q_cutoff)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
scale_colour_discrete(guide = guide_legend(title = "Reads")) +
theme(text = element_text(size = 15))
xmax = max(d$sequence_length_template[which(d$cumulative.bases > 0.01 * max(d$cumulative.bases))])
p4 = p4 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "combined_yield_summary.png"), width = 960/75, height = 960/75, plot = p4)
}
multi.flowcell = function(input.file, output.base, q){
# wrapper function to allow parallelisation of single-flowcell
# analyses when >1 flowcell is analysed in one run
dir.create(output.base)
flowcell = basename(dirname(input.file))
dir.create(file.path(output.base, flowcell))
output.dir = file.path(output.base, flowcell, "minionQC")
d = single.flowcell(input.file, output.dir, q)
return(d)
}
multi.plots = function(dm, output.dir){
# function to plot data from multiple flowcells,
# where the data is not combined (as in combined.flowcell() )
# but instead just uses multiple lines on each plot.
muxes = seq(from = 0, to = max(dm$hour), by = 8)
# make plots
flog.info("Plotting length distributions")
p1 = ggplot(dm, aes(x = sequence_length_template)) +
geom_line(stat="density", aes(colour = flowcell)) +
scale_x_log10(minor_breaks=log10_minor_break()) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Read length") +
ylab("Density")
ggsave(filename = file.path(output.dir, "length_distributions.png"), width = 960/75, height = 960/75, plot = p1)
flog.info("Plotting mean Q score distributions")
p2 = ggplot(dm, aes(x = mean_qscore_template)) +
geom_line(stat="density", aes(colour = flowcell)) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y") +
theme(text = element_text(size = 15)) +
xlab("Mean Q score of read") +
ylab("Density")
ggsave(filename = file.path(output.dir, "q_distributions.png"), width = 960/75, height = 960/75, plot = p2)
flog.info("Plotting flowcell yield summary")
p6 = ggplot(dm, aes(x=sequence_length_template, y=cumulative.bases, colour = flowcell)) +
geom_line(size = 1) +
xlab("Minimum read length") +
ylab("Total yield in bases") +
theme(text = element_text(size = 15)) +
facet_wrap(~Q_cutoff, ncol = 1, scales = "free_y")
xmax = max(dm$sequence_length_template[which(dm$cumulative.bases > 0.01 * max(dm$cumulative.bases))])
p6 = p6 + scale_x_continuous(limits = c(0, xmax))
ggsave(filename = file.path(output.dir, "yield_summary.png"), width = 960/75, height = 960/75, plot = p6)
flog.info("Plotting sequence length over time")
e = subset(dm, Q_cutoff=="All reads")
e$Q = paste("Q>=", q, sep="")
e$Q[which(e$mean_qscore_template<q)] = paste("Q<", q, sep="")
p7 = ggplot(e, aes(x=start_time/3600, y=sequence_length_template, colour = flowcell)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean read length") +
ylim(0, NA) +
facet_wrap(~Q, ncol = 1, scales = "free_y")
ggsave(filename = file.path(output.dir, "length_by_hour.png"), width = 960/75, height = 480/75, plot = p7)
flog.info("Plotting Q score over time")
p8 = ggplot(e, aes(x=start_time/3600, y=mean_qscore_template, colour = flowcell)) +
geom_vline(xintercept = muxes, colour = 'red', linetype = 'dashed', alpha = 0.5) +
geom_smooth() +
xlab("Hours into run") +
ylab("Mean Q score") +
facet_wrap(~Q, ncol = 1, scales = "free_y")
ggsave(filename = file.path(output.dir, "q_by_hour.png"), width = 960/75, height = 480/75, plot = p8)
}
# Choose how to act depending on whether we have a single input file or mulitple input files
if(file_test("-f", input.file)==TRUE){
# if it's an existing file (not a folder) just run one analysis
d = single.flowcell(input.file, output.dir, q)
}else if(file_test("-d", input.file)==TRUE){
# it's a directory, recursively analyse all sequencing_summary.txt files
# get a list of all sequencing_summary.txt files, recursively
summaries = list.files(path = input.file, pattern = "sequencing_summary.txt", recursive = TRUE, full.names = TRUE)
flog.info("")
flog.info("**** Analysing the following files ****")
flog.info(summaries)
# if the user passes a directory with only one sequencing_summary.txt file...
if(length(summaries) == 1){
d = single.flowcell(summaries[1], output.dir, q)
flog.info('**** Analysis complete ****')
}else{
# analyse each one and keep the returns in a list
results = mclapply(summaries, multi.flowcell, output.dir, q, mc.cores = cores)
# rbind that list
flog.info('**** Analysing data from all flowcells combined ****')
dm = as.data.frame(rbindlist(results))
# now do the single plot on ALL the output
combined.output = file.path(output.dir, "combinedQC")
flog.info(paste("Plots from the combined output will be saved in", combined.output))
dir.create(combined.output)
combined.flowcell(dm, combined.output, q)
multi.plots(dm, combined.output)
flog.info('**** Analysis complete ****')
}
}else{
#WTF
flog.warn(paste("Could find a sequencing summary file in your input which was: ",
input.file,
"\nThe input must be either a sequencing_summary.txt file, or a directory containing one or more such files"))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers-merge.R
\name{layer_maximum}
\alias{layer_maximum}
\title{Layer that computes the maximum (element-wise) a list of inputs.}
\usage{
layer_maximum(inputs)
}
\arguments{
\item{inputs}{A list of input tensors (at least 2).}
}
\value{
A tensor, the element-wise maximum of the inputs.
}
\description{
It takes as input a list of tensors, all of the same shape, and returns a
single tensor (also of the same shape).
}
\seealso{
Other merge layers: \code{\link{layer_add}},
\code{\link{layer_average}},
\code{\link{layer_concatenate}}, \code{\link{layer_dot}},
\code{\link{layer_multiply}}
}
| /man/layer_maximum.Rd | no_license | cinneesol/keras-1 | R | false | true | 677 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers-merge.R
\name{layer_maximum}
\alias{layer_maximum}
\title{Layer that computes the maximum (element-wise) a list of inputs.}
\usage{
layer_maximum(inputs)
}
\arguments{
\item{inputs}{A list of input tensors (at least 2).}
}
\value{
A tensor, the element-wise maximum of the inputs.
}
\description{
It takes as input a list of tensors, all of the same shape, and returns a
single tensor (also of the same shape).
}
\seealso{
Other merge layers: \code{\link{layer_add}},
\code{\link{layer_average}},
\code{\link{layer_concatenate}}, \code{\link{layer_dot}},
\code{\link{layer_multiply}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/library.R
\name{save_tracks_to_library}
\alias{save_tracks_to_library}
\title{Save Tracks for Current User}
\usage{
save_tracks_to_library(
ids,
authorization = get_spotify_authorization_code(),
echo = FALSE
)
}
\arguments{
\item{ids}{Required. \cr
A comma-separated list of the \href{https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids}{Spotify IDs} for the albums. Maximum: 50 IDs.}
\item{authorization}{Required. \cr
A valid access token from the Spotify Accounts service. See the \href{https://developer.spotify.com/documentation/general/guides/authorization-guide/}{Web API authorization Guide} for more details. Defaults to \code{spotifyr::get_spotify_authorization_code()}. The access token must have been issued on behalf of the current user.}
\item{echo}{Optional.\cr
Boolean indicating whether to return the response or work silently.}
}
\value{
Returns a respinse status. See \url{https://developer.spotify.com/documentation/web-api/#response-status-codes} for more information.
}
\description{
Save Tracks for User Save one or more tracks to the current user’s ‘Your Music’ library.
}
| /man/save_tracks_to_library.Rd | no_license | womeimingzi11/spotifyr | R | false | true | 1,211 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/library.R
\name{save_tracks_to_library}
\alias{save_tracks_to_library}
\title{Save Tracks for Current User}
\usage{
save_tracks_to_library(
ids,
authorization = get_spotify_authorization_code(),
echo = FALSE
)
}
\arguments{
\item{ids}{Required. \cr
A comma-separated list of the \href{https://developer.spotify.com/documentation/web-api/#spotify-uris-and-ids}{Spotify IDs} for the albums. Maximum: 50 IDs.}
\item{authorization}{Required. \cr
A valid access token from the Spotify Accounts service. See the \href{https://developer.spotify.com/documentation/general/guides/authorization-guide/}{Web API authorization Guide} for more details. Defaults to \code{spotifyr::get_spotify_authorization_code()}. The access token must have been issued on behalf of the current user.}
\item{echo}{Optional.\cr
Boolean indicating whether to return the response or work silently.}
}
\value{
Returns a respinse status. See \url{https://developer.spotify.com/documentation/web-api/#response-status-codes} for more information.
}
\description{
Save Tracks for User Save one or more tracks to the current user’s ‘Your Music’ library.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_graph.R
\name{import_graph}
\alias{import_graph}
\title{Import a graph from various graph formats}
\usage{
import_graph(graph_file, file_type = NULL, edges_extra_attr_names = NULL,
edges_extra_attr_coltypes = NULL)
}
\arguments{
\item{graph_file}{a connection to a graph file.
When provided as a path to a file, it will read the
file from disk. Files starting with \code{http://},
\code{https://}, \code{ftp://}, or \code{ftps://}
will be automatically downloaded.}
\item{file_type}{the type of file to be imported.
Options are: \code{gml} (GML), \code{sif} (SIF),
\code{edges} (a .edges file), and \code{mtx}
(MatrixMarket format). If not supplied, the type
of graph file will be inferred by its file extension.}
\item{edges_extra_attr_names}{for \code{edges} files,
a vector of attribute names beyond the \code{from}
and \code{to} data columns can be provided in the
order they appear in the input data file.}
\item{edges_extra_attr_coltypes}{for \code{edges}
files, this is a string of column types for any
attribute columns provided for
\code{edges_extra_attr_names}. This string
representation is where each character represents
each of the extra columns of data and the mappings
are: \code{c} -> character, \code{i} -> integer,
\code{n} -> number, \code{d} -> double,
\code{l} -> logical, \code{D} -> date, \code{T} ->
date time, \code{t} -> time, \code{?} -> guess,
or \code{_/-}, which skips the column.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
Import a variety of graphs from
different graph formats and create a graph object.
}
\examples{
\dontrun{
# Import a GML graph file
gml_graph <-
import_graph(
system.file(
"extdata/karate.gml",
package = "DiagrammeR"))
# Get a count of the graph's nodes
gml_graph \%>\%
count_nodes()
# Get a count of the graph's edges
gml_graph \%>\%
count_edges()
}
}
| /man/import_graph.Rd | permissive | akkalbist55/DiagrammeR | R | false | true | 1,948 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/import_graph.R
\name{import_graph}
\alias{import_graph}
\title{Import a graph from various graph formats}
\usage{
import_graph(graph_file, file_type = NULL, edges_extra_attr_names = NULL,
edges_extra_attr_coltypes = NULL)
}
\arguments{
\item{graph_file}{a connection to a graph file.
When provided as a path to a file, it will read the
file from disk. Files starting with \code{http://},
\code{https://}, \code{ftp://}, or \code{ftps://}
will be automatically downloaded.}
\item{file_type}{the type of file to be imported.
Options are: \code{gml} (GML), \code{sif} (SIF),
\code{edges} (a .edges file), and \code{mtx}
(MatrixMarket format). If not supplied, the type
of graph file will be inferred by its file extension.}
\item{edges_extra_attr_names}{for \code{edges} files,
a vector of attribute names beyond the \code{from}
and \code{to} data columns can be provided in the
order they appear in the input data file.}
\item{edges_extra_attr_coltypes}{for \code{edges}
files, this is a string of column types for any
attribute columns provided for
\code{edges_extra_attr_names}. This string
representation is where each character represents
each of the extra columns of data and the mappings
are: \code{c} -> character, \code{i} -> integer,
\code{n} -> number, \code{d} -> double,
\code{l} -> logical, \code{D} -> date, \code{T} ->
date time, \code{t} -> time, \code{?} -> guess,
or \code{_/-}, which skips the column.}
}
\value{
a graph object of class \code{dgr_graph}.
}
\description{
Import a variety of graphs from
different graph formats and create a graph object.
}
\examples{
\dontrun{
# Import a GML graph file
gml_graph <-
import_graph(
system.file(
"extdata/karate.gml",
package = "DiagrammeR"))
# Get a count of the graph's nodes
gml_graph \%>\%
count_nodes()
# Get a count of the graph's edges
gml_graph \%>\%
count_edges()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unscale.R
\name{unscale}
\alias{unscale}
\title{Reverse a scale}
\usage{
unscale(z, center = attr(z, "scaled:center"), scale = attr(z,
"scaled:scale"))
}
\arguments{
\item{z}{a numeric matrix(like) object}
\item{center}{either NULL or a numeric vector of length
equal to the number of columns of z}
\item{scale}{either NULL or a numeric vector of length
equal to the number of columns of z}
}
\description{
Computes x = sz+c, which is the inverse of z = (x - c)/s
provided by the \code{scale} function.
}
\examples{
mtcs <- scale(mtcars)
all.equal(
unscale(mtcs),
as.matrix(mtcars),
check.attributes=FALSE
)
oldSeed <- .Random.seed
z <- unscale(rnorm(10), 2, .5)
.Random.seed <- oldSeed
x <- rnorm(10, 2, .5)
all.equal(z, x, check.attributes=FALSE)
}
\references{
\url{https://stackoverflow.com/questions/10287545/backtransform-scale-for-plotting/46840073}
}
\seealso{
\code{\link{scale}}
}
\author{
Neal Fultz
}
| /man/unscale.Rd | no_license | cran/stackoverflow | R | false | true | 1,023 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unscale.R
\name{unscale}
\alias{unscale}
\title{Reverse a scale}
\usage{
unscale(z, center = attr(z, "scaled:center"), scale = attr(z,
"scaled:scale"))
}
\arguments{
\item{z}{a numeric matrix(like) object}
\item{center}{either NULL or a numeric vector of length
equal to the number of columns of z}
\item{scale}{either NULL or a numeric vector of length
equal to the number of columns of z}
}
\description{
Computes x = sz+c, which is the inverse of z = (x - c)/s
provided by the \code{scale} function.
}
\examples{
mtcs <- scale(mtcars)
all.equal(
unscale(mtcs),
as.matrix(mtcars),
check.attributes=FALSE
)
oldSeed <- .Random.seed
z <- unscale(rnorm(10), 2, .5)
.Random.seed <- oldSeed
x <- rnorm(10, 2, .5)
all.equal(z, x, check.attributes=FALSE)
}
\references{
\url{https://stackoverflow.com/questions/10287545/backtransform-scale-for-plotting/46840073}
}
\seealso{
\code{\link{scale}}
}
\author{
Neal Fultz
}
|
# developersBox -----------------------------------------------------------
output[['userBox_Mathias']] = renderUI({
widgetUserBox(
title = "Mathías Verano",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
# src = "img/profile_photo/mathias_profile.jpg",
src = "img/profile_photo/mathias_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_mathias.profile']]() )
)
})
output[['userBox_Monica']] = renderUI({
widgetUserBox(
title = "Mónica Rodríguez",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/monica_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_monica.profile']]() )
)
})
output[['userBox_Ricardo']] = renderUI({
widgetUserBox(
title = "Ricardo Bonilla",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/ricardo_profile.png",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_ricardo.profile']]() )
)
})
output[['userBox_Camila']] = renderUI({
widgetUserBox(
title = "Camila Lozano",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/camila_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_camila.profile']]() )
)
})
output[['userBox_Jesus']] = renderUI({
widgetUserBox(
title = "Jesús Parra",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/jesus_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_jesus.profile']]() )
)
})
output[['userBox_Julian']] = renderUI({
widgetUserBox(
title = "Julián Gutierrez",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/julian_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_julian.profile']]() )
)
}) | /R_scripts/src/App/ui_elements/body/DEVELOPERS_components.R | no_license | monicarodguev/ds4a_team80 | R | false | false | 2,238 | r | # developersBox -----------------------------------------------------------
output[['userBox_Mathias']] = renderUI({
widgetUserBox(
title = "Mathías Verano",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
# src = "img/profile_photo/mathias_profile.jpg",
src = "img/profile_photo/mathias_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_mathias.profile']]() )
)
})
output[['userBox_Monica']] = renderUI({
widgetUserBox(
title = "Mónica Rodríguez",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/monica_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_monica.profile']]() )
)
})
output[['userBox_Ricardo']] = renderUI({
widgetUserBox(
title = "Ricardo Bonilla",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/ricardo_profile.png",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_ricardo.profile']]() )
)
})
output[['userBox_Camila']] = renderUI({
widgetUserBox(
title = "Camila Lozano",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/camila_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_camila.profile']]() )
)
})
output[['userBox_Jesus']] = renderUI({
widgetUserBox(
title = "Jesús Parra",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/jesus_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_jesus.profile']]() )
)
})
output[['userBox_Julian']] = renderUI({
widgetUserBox(
title = "Julián Gutierrez",
subtitle = "Team 80 - One Correlation",
type = NULL,
width = 12,
src = "img/profile_photo/julian_profile.jpg",
color = "red",
closable = FALSE,
#"add text here",
footer = p( lenguage_outputs[['bodyText_julian.profile']]() )
)
}) |
| pc = 0xc002 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc004 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0010] = 0xff |
| pc = 0xc006 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110101 |
| pc = 0xc008 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 | MEM[0x0010] = 0x7f |
| pc = 0xc00a | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc00c | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc00e | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc010 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc012 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc014 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc016 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 | MEM[0x0010] = 0x01 |
| /res/lsr-2.r | no_license | HeitorBRaymundo/861 | R | false | false | 1,031 | r | | pc = 0xc002 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 |
| pc = 0xc004 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110100 | MEM[0x0010] = 0xff |
| pc = 0xc006 | a = 0xff | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 10110101 |
| pc = 0xc008 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 | MEM[0x0010] = 0x7f |
| pc = 0xc00a | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc00c | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc00e | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc010 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc012 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc014 | a = 0x7f | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 |
| pc = 0xc016 | a = 0x01 | x = 0x00 | y = 0x00 | sp = 0x01fd | p[NV-BDIZC] = 00110101 | MEM[0x0010] = 0x01 |
|
\name{pCMax}
\alias{pCMax}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ pCMax}
\description{ Cumulative copula Frechet's bound, pCMax}
\usage{
pCMax(theta, delta, s, t)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta}{ is missing }
\item{delta}{ is missing }
\item{s}{ real vector }
\item{t}{ real vector }
}
\value{
returns the values from bidimensional cumulative for (s,t) sample.
}
\references{ Harry Joe. \sQuote{Multivariate Models and Dependence Concepts} Monogra. Stat. & Appl. Probab. 73. Chapman and Hall (1997) }
\author{ Veronica A. Gonzalez-Lopez }
\seealso{ \code{\link{pCMin}}}
\examples{#a<-pCMax(s=matrix(c(0.9,0.2,0.4,0.5),nrow=4),t=matrix(c(0.2,0.33,0.5,0.2),nrow=4))
}
\keyword{multivariate}
| /man/pCMax.Rd | no_license | cran/fgac | R | false | false | 792 | rd | \name{pCMax}
\alias{pCMax}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{ pCMax}
\description{ Cumulative copula Frechet's bound, pCMax}
\usage{
pCMax(theta, delta, s, t)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{theta}{ is missing }
\item{delta}{ is missing }
\item{s}{ real vector }
\item{t}{ real vector }
}
\value{
returns the values from bidimensional cumulative for (s,t) sample.
}
\references{ Harry Joe. \sQuote{Multivariate Models and Dependence Concepts} Monogra. Stat. & Appl. Probab. 73. Chapman and Hall (1997) }
\author{ Veronica A. Gonzalez-Lopez }
\seealso{ \code{\link{pCMin}}}
\examples{#a<-pCMax(s=matrix(c(0.9,0.2,0.4,0.5),nrow=4),t=matrix(c(0.2,0.33,0.5,0.2),nrow=4))
}
\keyword{multivariate}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.