content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library("caret")
library(corrplot)
library(C50)
library(dummies)
library(gmodels)
library(Metrics)
library(neuralnet)
library(plDTr)
library(rpart)
library(tree)
library(e1071)
library(rpart.plot)
library(fastDummies)
################################## Load Files #############################################
x <-
read.csv(
"C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\LeagueELO\\BPL\\BPLELO14-15.csv",
stringsAsFactors = FALSE
)
################################# Clean Data ##############################################
x$B365H <- as.numeric(x$B365H)
x$B365D <- as.numeric(x$B365D)
x$B365A <- as.numeric(x$B365A)
x$BWH <- as.numeric(x$BWH)
x$BWD <- as.numeric(x$BWD)
x$BWA <- as.numeric(x$BWA)
x$IWH <- as.numeric(x$IWH)
x$IWD <- as.numeric(x$IWD)
x$IWA <- as.numeric(x$IWA)
x$LBH <- as.numeric(x$LBH)
x$LBD <- as.numeric(x$LBD)
x$LBA <- as.numeric(x$LBA)
x$PSH <- as.numeric(x$PSH)
x$PSD <- as.numeric(x$PSD)
x$PSA <- as.numeric(x$PSA)
x$WHH <- as.numeric(x$WHH)
x$WHD <- as.numeric(x$WHD)
x$WHA <- as.numeric(x$WHA)
x$VCH <- as.numeric(x$VCH)
x$VCD <- as.numeric(x$VCD)
x$VCA <- as.numeric(x$VCA)
x <- na.exclude(x)
################################## Rename Columns #########################################
colnames(x)[1] <- "Season"
################################ Create DummDT Vars ########################################
x <- cbind.data.frame(x, dummy(x$Home))
x <- cbind.data.frame(x, dummy(x$Away))
########################### Remove Cols After DummDT Vars ##################################
x$Home <- NULL
x$Away <- NULL
x$Season <- NULL
x$date <- NULL
##################################### All Bookies #########################################
DT1 <- x
set.seed(123)
DT1$FTR <- as.factor(DT1$FTR)
DT1.rows <- nrow(DT1)
DT1.sample <- sample(DT1.rows, DT1.rows * 0.6)
DT1.train <- DT1[DT1.sample, ]
DT1.test <- DT1[-DT1.sample, ]
DT1.model <- C5.0(DT1.train[, -1], DT1.train$FTR, trails = 100)
plot(DT1.model)
summary(DT1.model)
DT1.predict <- predict (DT1.model, DT1.test[, -1])
CrossTable(
DT1.test$FTR,
DT1.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
############################################################################################
DT2 <- x[-c(7:24)]
set.seed(123)
DT2$FTR <- as.factor(DT2$FTR)
DT2.rows <- nrow(DT2)
DT2.sample <- sample(DT2.rows, DT2.rows * 0.6)
DT2.train <- DT2[DT2.sample, ]
DT2.test <- DT2[-DT2.sample, ]
DT2.model <- C5.0(DT2.train[, -1], DT2.train$FTR, trails = 100)
plot(DT2.model)
summary(DT2.model)
DT2.predict <- predict (DT2.model, DT2.test[, -1])
CrossTable(
DT2.test$FTR,
DT2.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
DT3 <- x[-c(4:6, 10:24)]
set.seed(123)
DT3$FTR <- as.factor(DT3$FTR)
DT3.rows <- nrow(DT3)
DT3.sample <- sample(DT3.rows, DT3.rows * 0.6)
DT3.train <- DT3[DT3.sample, ]
DT3.test <- DT3[-DT3.sample, ]
DT3.model <- C5.0(DT3.train[, -1], DT3.train$FTR, trails = 100)
plot(DT3.model)
summary(DT3.model)
DT3.predict <- predict (DT3.model, DT3.test[, -1])
CrossTable(
DT3.test$FTR,
DT3.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
DT4 <- x[-c(4:9, 13:24)]
set.seed(123)
DT4$FTR <- as.factor(DT4$FTR)
DT4.rows <- nrow(DT4)
DT4.sample <- sample(DT4.rows, DT4.rows * 0.6)
DT4.train <- DT4[DT4.sample, ]
DT4.test <- DT4[-DT4.sample, ]
DT4.model <- C5.0(DT4.train[, -1], DT4.train$FTR, trails = 100)
plot(DT4.model)
summary(DT4.model)
DT4.predict <- predict (DT4.model, DT4.test[, -1])
CrossTable(
DT4.test$FTR,
DT4.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
DT5 <- x[-c(4:12, 16:24)]
set.seed(123)
DT5$FTR <- as.factor(DT5$FTR)
DT5.rows <- nrow(DT5)
DT5.sample <- sample(DT5.rows, DT5.rows * 0.6)
DT5.train <- DT5[DT5.sample, ]
DT5.test <- DT5[-DT5.sample, ]
DT5.model <- C5.0(DT5.train[, -1], DT5.train$FTR, trails = 100)
plot(DT5.model)
summary(DT5.model)
DT5.predict <- predict (DT5.model, DT5.test[, -1])
CrossTable(
DT5.test$FTR,
DT5.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################
DT6 <- x[-c(4:15,19:24)]
set.seed(123)
DT6$FTR <- as.factor(DT6$FTR)
DT6.rows <- nrow(DT6)
DT6.sample <- sample(DT6.rows, DT6.rows * 0.6)
DT6.train <- DT6[DT6.sample, ]
DT6.test <- DT6[-DT6.sample, ]
DT6.model <- C5.0(DT6.train[, -1], DT6.train$FTR, trails = 100)
plot(DT6.model)
summary(DT6.model)
DT6.predict <- predict (DT6.model, DT6.test[, -1])
CrossTable(
DT6.test$FTR,
DT6.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################
DT7 <- x[-c(4:18,22:24)]
set.seed(123)
DT7$FTR <- as.factor(DT7$FTR)
DT7.rows <- nrow(DT7)
DT7.sample <- sample(DT7.rows, DT7.rows * 0.6)
DT7.train <- DT7[DT7.sample, ]
DT7.test <- DT7[-DT7.sample, ]
DT7.model <- C5.0(DT7.train[, -1], DT7.train$FTR, trails = 100)
plot(DT7.model)
summary(DT7.model)
DT7.predict <- predict (DT7.model, DT7.test[, -1])
CrossTable(
DT7.test$FTR,
DT7.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################
DT8 <- x[-c(2:21)]
set.seed(123)
DT8$FTR <- as.factor(DT8$FTR)
DT8.rows <- nrow(DT8)
DT8.sample <- sample(DT8.rows, DT8.rows * 0.6)
DT8.train <- DT8[DT8.sample, ]
DT8.test <- DT8[-DT8.sample, ]
DT8.model <- C5.0(DT8.train[, -1], DT8.train$FTR, trails = 100)
plot(DT8.model)
summary(DT8.model)
DT8.predict <- predict (DT8.model, DT8.test[, -1])
CrossTable(
DT8.test$FTR,
DT8.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################### | /0-Implementation/R Files/Decision Trees/League - ELO/BPL/14-15.R | no_license | Chanter08/Thesis | R | false | false | 6,128 | r | library("caret")
library(corrplot)
library(C50)
library(dummies)
library(gmodels)
library(Metrics)
library(neuralnet)
library(plDTr)
library(rpart)
library(tree)
library(e1071)
library(rpart.plot)
library(fastDummies)
################################## Load Files #############################################
x <-
read.csv(
"C:\\Users\\User\\Documents\\Thesis\\Data\\Models\\LeagueELO\\BPL\\BPLELO14-15.csv",
stringsAsFactors = FALSE
)
################################# Clean Data ##############################################
x$B365H <- as.numeric(x$B365H)
x$B365D <- as.numeric(x$B365D)
x$B365A <- as.numeric(x$B365A)
x$BWH <- as.numeric(x$BWH)
x$BWD <- as.numeric(x$BWD)
x$BWA <- as.numeric(x$BWA)
x$IWH <- as.numeric(x$IWH)
x$IWD <- as.numeric(x$IWD)
x$IWA <- as.numeric(x$IWA)
x$LBH <- as.numeric(x$LBH)
x$LBD <- as.numeric(x$LBD)
x$LBA <- as.numeric(x$LBA)
x$PSH <- as.numeric(x$PSH)
x$PSD <- as.numeric(x$PSD)
x$PSA <- as.numeric(x$PSA)
x$WHH <- as.numeric(x$WHH)
x$WHD <- as.numeric(x$WHD)
x$WHA <- as.numeric(x$WHA)
x$VCH <- as.numeric(x$VCH)
x$VCD <- as.numeric(x$VCD)
x$VCA <- as.numeric(x$VCA)
x <- na.exclude(x)
################################## Rename Columns #########################################
colnames(x)[1] <- "Season"
################################ Create DummDT Vars ########################################
x <- cbind.data.frame(x, dummy(x$Home))
x <- cbind.data.frame(x, dummy(x$Away))
########################### Remove Cols After DummDT Vars ##################################
x$Home <- NULL
x$Away <- NULL
x$Season <- NULL
x$date <- NULL
##################################### All Bookies #########################################
DT1 <- x
set.seed(123)
DT1$FTR <- as.factor(DT1$FTR)
DT1.rows <- nrow(DT1)
DT1.sample <- sample(DT1.rows, DT1.rows * 0.6)
DT1.train <- DT1[DT1.sample, ]
DT1.test <- DT1[-DT1.sample, ]
DT1.model <- C5.0(DT1.train[, -1], DT1.train$FTR, trails = 100)
plot(DT1.model)
summary(DT1.model)
DT1.predict <- predict (DT1.model, DT1.test[, -1])
CrossTable(
DT1.test$FTR,
DT1.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
############################################################################################
DT2 <- x[-c(7:24)]
set.seed(123)
DT2$FTR <- as.factor(DT2$FTR)
DT2.rows <- nrow(DT2)
DT2.sample <- sample(DT2.rows, DT2.rows * 0.6)
DT2.train <- DT2[DT2.sample, ]
DT2.test <- DT2[-DT2.sample, ]
DT2.model <- C5.0(DT2.train[, -1], DT2.train$FTR, trails = 100)
plot(DT2.model)
summary(DT2.model)
DT2.predict <- predict (DT2.model, DT2.test[, -1])
CrossTable(
DT2.test$FTR,
DT2.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################################
DT3 <- x[-c(4:6, 10:24)]
set.seed(123)
DT3$FTR <- as.factor(DT3$FTR)
DT3.rows <- nrow(DT3)
DT3.sample <- sample(DT3.rows, DT3.rows * 0.6)
DT3.train <- DT3[DT3.sample, ]
DT3.test <- DT3[-DT3.sample, ]
DT3.model <- C5.0(DT3.train[, -1], DT3.train$FTR, trails = 100)
plot(DT3.model)
summary(DT3.model)
DT3.predict <- predict (DT3.model, DT3.test[, -1])
CrossTable(
DT3.test$FTR,
DT3.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
DT4 <- x[-c(4:9, 13:24)]
set.seed(123)
DT4$FTR <- as.factor(DT4$FTR)
DT4.rows <- nrow(DT4)
DT4.sample <- sample(DT4.rows, DT4.rows * 0.6)
DT4.train <- DT4[DT4.sample, ]
DT4.test <- DT4[-DT4.sample, ]
DT4.model <- C5.0(DT4.train[, -1], DT4.train$FTR, trails = 100)
plot(DT4.model)
summary(DT4.model)
DT4.predict <- predict (DT4.model, DT4.test[, -1])
CrossTable(
DT4.test$FTR,
DT4.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
###########################################################################################################
DT5 <- x[-c(4:12, 16:24)]
set.seed(123)
DT5$FTR <- as.factor(DT5$FTR)
DT5.rows <- nrow(DT5)
DT5.sample <- sample(DT5.rows, DT5.rows * 0.6)
DT5.train <- DT5[DT5.sample, ]
DT5.test <- DT5[-DT5.sample, ]
DT5.model <- C5.0(DT5.train[, -1], DT5.train$FTR, trails = 100)
plot(DT5.model)
summary(DT5.model)
DT5.predict <- predict (DT5.model, DT5.test[, -1])
CrossTable(
DT5.test$FTR,
DT5.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################
DT6 <- x[-c(4:15,19:24)]
set.seed(123)
DT6$FTR <- as.factor(DT6$FTR)
DT6.rows <- nrow(DT6)
DT6.sample <- sample(DT6.rows, DT6.rows * 0.6)
DT6.train <- DT6[DT6.sample, ]
DT6.test <- DT6[-DT6.sample, ]
DT6.model <- C5.0(DT6.train[, -1], DT6.train$FTR, trails = 100)
plot(DT6.model)
summary(DT6.model)
DT6.predict <- predict (DT6.model, DT6.test[, -1])
CrossTable(
DT6.test$FTR,
DT6.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################
DT7 <- x[-c(4:18,22:24)]
set.seed(123)
DT7$FTR <- as.factor(DT7$FTR)
DT7.rows <- nrow(DT7)
DT7.sample <- sample(DT7.rows, DT7.rows * 0.6)
DT7.train <- DT7[DT7.sample, ]
DT7.test <- DT7[-DT7.sample, ]
DT7.model <- C5.0(DT7.train[, -1], DT7.train$FTR, trails = 100)
plot(DT7.model)
summary(DT7.model)
DT7.predict <- predict (DT7.model, DT7.test[, -1])
CrossTable(
DT7.test$FTR,
DT7.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
##########################################################################################
DT8 <- x[-c(2:21)]
set.seed(123)
DT8$FTR <- as.factor(DT8$FTR)
DT8.rows <- nrow(DT8)
DT8.sample <- sample(DT8.rows, DT8.rows * 0.6)
DT8.train <- DT8[DT8.sample, ]
DT8.test <- DT8[-DT8.sample, ]
DT8.model <- C5.0(DT8.train[, -1], DT8.train$FTR, trails = 100)
plot(DT8.model)
summary(DT8.model)
DT8.predict <- predict (DT8.model, DT8.test[, -1])
CrossTable(
DT8.test$FTR,
DT8.predict,
prop.c = FALSE,
prop.r = FALSE,
prop.chisq = FALSE
)
########################################################################################### |
##' @name metric.timeseries.plot
##' @title metric.timeseries.plot
##' @export
##' @param dat dataframe
##' @param var character
##'
##' @author Betsy Cowdery
metric.timeseries.plot <- function(dat, var, filename = NA, draw.plot = FALSE){
library(ggplot2)
localenv <- environment()
dat$time <- as.Date(dat$time)
# p <- ggplot(data = dat, aes(x=time)) +
# geom_path(aes(y=model),colour = "#666666", size=2) +
# geom_point(aes(y=model),colour = "#666666", size=4) +
# geom_path(aes(y=obvs), colour = "#619CFF", size=2) +
# geom_point(aes(y=obvs), colour = "#619CFF", size=4) +
# labs(title=var, y="")
p <- ggplot(data = dat, aes(x=time)) +
labs(title=var, y="") +
geom_path(aes(y=model,colour = "Model"), size=2) +
geom_point(aes(y=model,colour = "Model"), size=4) +
geom_path(aes(y=obvs, colour = "Observed"), size=2) +
geom_point(aes(y=obvs, colour = "Observed"), size=4)
if(!is.na(filename)){
pdf(filename, width = 10, height = 6)
plot(p)
dev.off()
}
if(draw.plot){
plot(p)
}
} | /modules/benchmark/R/metric.timeseries.plot.R | permissive | Viskari/pecan | R | false | false | 1,080 | r | ##' @name metric.timeseries.plot
##' @title metric.timeseries.plot
##' @export
##' @param dat dataframe
##' @param var character
##'
##' @author Betsy Cowdery
metric.timeseries.plot <- function(dat, var, filename = NA, draw.plot = FALSE){
library(ggplot2)
localenv <- environment()
dat$time <- as.Date(dat$time)
# p <- ggplot(data = dat, aes(x=time)) +
# geom_path(aes(y=model),colour = "#666666", size=2) +
# geom_point(aes(y=model),colour = "#666666", size=4) +
# geom_path(aes(y=obvs), colour = "#619CFF", size=2) +
# geom_point(aes(y=obvs), colour = "#619CFF", size=4) +
# labs(title=var, y="")
p <- ggplot(data = dat, aes(x=time)) +
labs(title=var, y="") +
geom_path(aes(y=model,colour = "Model"), size=2) +
geom_point(aes(y=model,colour = "Model"), size=4) +
geom_path(aes(y=obvs, colour = "Observed"), size=2) +
geom_point(aes(y=obvs, colour = "Observed"), size=4)
if(!is.na(filename)){
pdf(filename, width = 10, height = 6)
plot(p)
dev.off()
}
if(draw.plot){
plot(p)
}
} |
library(tidyverse)
library(trac)
small <- new.env()
large <- new.env()
load("../../Marine/marine_leucine_small_trac_fixed_level_multi_splits.Rdata", envir = small)
load("../../Marine/marine_leucine_large_trac_fixed_level_multi_splits.Rdata", envir = large)
small$dat <- readRDS("../../Marine/marine_leucine_small.RDS")
large$dat <- readRDS("../../Marine/marine_leucine_large.RDS")
# verify that the ordering of samples in train/test
# is what it should be:
stopifnot(small$dat$sample_data$leucine == small$dat$y)
# add a column of our predictions across entire data set:
i1se <- small$cvfit[[1]]$OTU$cv[[1]]$i1se
small$dat$sample_data$yhat <- NA
small$dat$sample_data$yhat[small$tr[[1]]] <- small$yhat_tr[[1]]$OTU[, i1se]
small$dat$sample_data$yhat[-small$tr[[1]]] <- small$yhat_te[[1]]$OTU[, i1se]
# verify that the ordering of samples in train/test
# is what it should be:
stopifnot(large$dat$sample_data$leucine == large$dat$y)
# add a column of our predictions across entire data set:
i1se <- large$cvfit[[1]]$OTU$cv[[1]]$i1se
large$dat$sample_data$yhat <- NA
large$dat$sample_data$yhat[large$tr[[1]]] <- large$yhat_tr[[1]]$OTU[, i1se]
large$dat$sample_data$yhat[-large$tr[[1]]] <- large$yhat_te[[1]]$OTU[, i1se]
both <- list(small$dat$sample_data,
large$dat$sample_data) %>%
set_names(c("Free living", "Particle associated")) %>%
bind_rows(.id = "Type")
both %>%
ggplot(aes(x = yhat,
y = leucine,
color = Region)) +
geom_point(size = 3) +
geom_abline(slope = 1, intercept = 0) +
labs(x = "Predicted Leucine",
y = "Actual Leucine",
title = "All Samples") +
facet_wrap(~ Type) +
theme(strip.text = element_text(size = 8)) +
coord_fixed() +
ggsave("marine.pdf", width = 7.5, height = 3)
cor(small$yhat_te[[1]]$OTU[, small$cvfit[[1]]$OTU$cv[[1]]$i1se],
small$dat$y[-small$tr[[1]]])
cor(large$yhat_te[[1]]$OTU[, large$cvfit[[1]]$OTU$cv[[1]]$i1se],
large$dat$y[-large$tr[[1]]])
| /plots_and_tables/y_vs_yhat/marine.R | no_license | jacobbien/trac-reproducible | R | false | false | 1,977 | r | library(tidyverse)
library(trac)
small <- new.env()
large <- new.env()
load("../../Marine/marine_leucine_small_trac_fixed_level_multi_splits.Rdata", envir = small)
load("../../Marine/marine_leucine_large_trac_fixed_level_multi_splits.Rdata", envir = large)
small$dat <- readRDS("../../Marine/marine_leucine_small.RDS")
large$dat <- readRDS("../../Marine/marine_leucine_large.RDS")
# verify that the ordering of samples in train/test
# is what it should be:
stopifnot(small$dat$sample_data$leucine == small$dat$y)
# add a column of our predictions across entire data set:
i1se <- small$cvfit[[1]]$OTU$cv[[1]]$i1se
small$dat$sample_data$yhat <- NA
small$dat$sample_data$yhat[small$tr[[1]]] <- small$yhat_tr[[1]]$OTU[, i1se]
small$dat$sample_data$yhat[-small$tr[[1]]] <- small$yhat_te[[1]]$OTU[, i1se]
# verify that the ordering of samples in train/test
# is what it should be:
stopifnot(large$dat$sample_data$leucine == large$dat$y)
# add a column of our predictions across entire data set:
i1se <- large$cvfit[[1]]$OTU$cv[[1]]$i1se
large$dat$sample_data$yhat <- NA
large$dat$sample_data$yhat[large$tr[[1]]] <- large$yhat_tr[[1]]$OTU[, i1se]
large$dat$sample_data$yhat[-large$tr[[1]]] <- large$yhat_te[[1]]$OTU[, i1se]
both <- list(small$dat$sample_data,
large$dat$sample_data) %>%
set_names(c("Free living", "Particle associated")) %>%
bind_rows(.id = "Type")
both %>%
ggplot(aes(x = yhat,
y = leucine,
color = Region)) +
geom_point(size = 3) +
geom_abline(slope = 1, intercept = 0) +
labs(x = "Predicted Leucine",
y = "Actual Leucine",
title = "All Samples") +
facet_wrap(~ Type) +
theme(strip.text = element_text(size = 8)) +
coord_fixed() +
ggsave("marine.pdf", width = 7.5, height = 3)
cor(small$yhat_te[[1]]$OTU[, small$cvfit[[1]]$OTU$cv[[1]]$i1se],
small$dat$y[-small$tr[[1]]])
cor(large$yhat_te[[1]]$OTU[, large$cvfit[[1]]$OTU$cv[[1]]$i1se],
large$dat$y[-large$tr[[1]]])
|
getQualityScore.default <- function(x, sds,
w, type, iter = 10000, threshold = 0.1,
...) {
mu <- x
J <- length(mu)
qs.type <- charmatch(tolower(type), tolower(c("class",
"CNVtools", "CANARY")))
if (is.na(qs.type))
stop(" argument 'type' must be either 'class' , 'CNVtools' or
'CANARY'")
if (qs.type == 1) {
p <- c()
for (j in seq_len(J)) {
X <- rnorm(iter, mu[j], sds[j])
Y <- vapply(seq(1,J), function(s) w[s] *
dnorm(X, mu[s], sds[s]), rep(0, iter))
p <- c(p, mean(apply(Y, 1, which.max) == j))
}
out <- sum(p * w)
}
if (qs.type == 2) {
sds <- sds[order(mu)]
w <- w[order(mu)]
mu <- sort(mu)
dmu <- abs(mu[seq_len(J - 1)] - mu[seq(2,J)])
av.sds <- (w[seq_len(J - 1)] * sds[seq_len(J - 1)] +
w[seq(2,J)] * sds[seq(2,J)])/(w[seq_len(J - 1)] + w[seq(2,J)])
weights <- w[seq_len(J - 1)] * w[seq(2,J)]
out <- sum(weights * dmu/av.sds)/sum(weights)
}
if (qs.type == 3) {
f <- function(x) {
Y <- vapply(seq(1,J), function(s) w[s] *
dnorm(x, mu[s], sds[s]), rep(0, length(x)))
index <- which.max(Y)
max1 <- Y[index]
max2 <- max(Y[-index])
ratio <- max2/max1
sum(Y) * as.integer(ratio > threshold)
}
minim <- which.min(mu)
maxim <- which.max(mu)
limits <- c(mu[minim] - 3 * sds[minim],
mu[maxim] + 3 * sds[maxim])
fapply <- function(x) vapply(x, f, 0)
out <- integrate(fapply, limits[1],
limits[2])$value
attr(out, "threshold") <- threshold
}
attr(out, "type") <- qs.type
return(out)
}
| /R/getQualityScore.default.R | no_license | isglobal-brge/CNVassoc | R | false | false | 2,001 | r | getQualityScore.default <- function(x, sds,
w, type, iter = 10000, threshold = 0.1,
...) {
mu <- x
J <- length(mu)
qs.type <- charmatch(tolower(type), tolower(c("class",
"CNVtools", "CANARY")))
if (is.na(qs.type))
stop(" argument 'type' must be either 'class' , 'CNVtools' or
'CANARY'")
if (qs.type == 1) {
p <- c()
for (j in seq_len(J)) {
X <- rnorm(iter, mu[j], sds[j])
Y <- vapply(seq(1,J), function(s) w[s] *
dnorm(X, mu[s], sds[s]), rep(0, iter))
p <- c(p, mean(apply(Y, 1, which.max) == j))
}
out <- sum(p * w)
}
if (qs.type == 2) {
sds <- sds[order(mu)]
w <- w[order(mu)]
mu <- sort(mu)
dmu <- abs(mu[seq_len(J - 1)] - mu[seq(2,J)])
av.sds <- (w[seq_len(J - 1)] * sds[seq_len(J - 1)] +
w[seq(2,J)] * sds[seq(2,J)])/(w[seq_len(J - 1)] + w[seq(2,J)])
weights <- w[seq_len(J - 1)] * w[seq(2,J)]
out <- sum(weights * dmu/av.sds)/sum(weights)
}
if (qs.type == 3) {
f <- function(x) {
Y <- vapply(seq(1,J), function(s) w[s] *
dnorm(x, mu[s], sds[s]), rep(0, length(x)))
index <- which.max(Y)
max1 <- Y[index]
max2 <- max(Y[-index])
ratio <- max2/max1
sum(Y) * as.integer(ratio > threshold)
}
minim <- which.min(mu)
maxim <- which.max(mu)
limits <- c(mu[minim] - 3 * sds[minim],
mu[maxim] + 3 * sds[maxim])
fapply <- function(x) vapply(x, f, 0)
out <- integrate(fapply, limits[1],
limits[2])$value
attr(out, "threshold") <- threshold
}
attr(out, "type") <- qs.type
return(out)
}
|
\name{pSDCFlig}
\alias{pSDCFlig}
\title{
Dwass, Steel, Critchlow, Fligner
}
\description{
Function to compute the P-value for the observed Dwass, Steel, Critchlow, Fligner W statistic.
}
\usage{
pSDCFlig(x,g=NA,method=NA,n.mc=10000)
}
\arguments{
\item{x}{Either a list or a vector containing the data.}
\item{g}{If x is a vector, g is a required vector of group labels. Otherwise, not used.}
\item{method}{Either "Exact", "Monte Carlo", or "Asymptotic", indicating the desired distribution. When method=NA, "Exact" will be used if the number of permutations is 10,000 or less. Otherwise, "Monte Carlo" will be used.
}
\item{n.mc}{
If method="Monte Carlo", the number of Monte Carlo samples used to estimate the distribution. Otherwise, not used.
}
}
\details{
The data entry is intended to be flexible, so that the groups of data can be entered in either of two ways. For data a=1,2 and b=3,4,5 the following are equivalent:
\code{pSDCFlig(x=list(c(1,2),c(3,4,5)))}
\code{pSDCFlig(x=c(1,2,3,4,5),g=c(1,1,2,2,2))}
}
\value{
Returns a list with "NSM3Ch6MCp" class containing the following components:
\item{n}{a vector containing the number of observations in each of the k data groups}
\item{obs.stat}{the observed W statistic for each of the k*(k-1)/2 comparisons}
\item{p.val}{upper tail P-value corresponding to each W statistic}
}
\author{
Grant Schneider
}
\examples{
gizzards<-list(site.I=c(46,28,46,37,32,41,42,45,38,44),
site.II=c(42,60,32,42,45,58,27,51,42,52),
site.III=c(38,33,26,25,28,28,26,27,27,27),
site.IV=c(31,30,27,29,30,25,25,24,27,30))
##Takes a little while
#pSDCFlig(gizzards,method="Monte Carlo")
##Shorter version for demonstration
pSDCFlig(gizzards[1:2],method="Asymptotic")
}
\keyword{Dwass}
\keyword{Steel}
\keyword{Critchlow-Fligner} | /man/pSDCFlig.Rd | no_license | cran/NSM3 | R | false | false | 1,861 | rd | \name{pSDCFlig}
\alias{pSDCFlig}
\title{
Dwass, Steel, Critchlow, Fligner
}
\description{
Function to compute the P-value for the observed Dwass, Steel, Critchlow, Fligner W statistic.
}
\usage{
pSDCFlig(x,g=NA,method=NA,n.mc=10000)
}
\arguments{
\item{x}{Either a list or a vector containing the data.}
\item{g}{If x is a vector, g is a required vector of group labels. Otherwise, not used.}
\item{method}{Either "Exact", "Monte Carlo", or "Asymptotic", indicating the desired distribution. When method=NA, "Exact" will be used if the number of permutations is 10,000 or less. Otherwise, "Monte Carlo" will be used.
}
\item{n.mc}{
If method="Monte Carlo", the number of Monte Carlo samples used to estimate the distribution. Otherwise, not used.
}
}
\details{
The data entry is intended to be flexible, so that the groups of data can be entered in either of two ways. For data a=1,2 and b=3,4,5 the following are equivalent:
\code{pSDCFlig(x=list(c(1,2),c(3,4,5)))}
\code{pSDCFlig(x=c(1,2,3,4,5),g=c(1,1,2,2,2))}
}
\value{
Returns a list with "NSM3Ch6MCp" class containing the following components:
\item{n}{a vector containing the number of observations in each of the k data groups}
\item{obs.stat}{the observed W statistic for each of the k*(k-1)/2 comparisons}
\item{p.val}{upper tail P-value corresponding to each W statistic}
}
\author{
Grant Schneider
}
\examples{
gizzards<-list(site.I=c(46,28,46,37,32,41,42,45,38,44),
site.II=c(42,60,32,42,45,58,27,51,42,52),
site.III=c(38,33,26,25,28,28,26,27,27,27),
site.IV=c(31,30,27,29,30,25,25,24,27,30))
##Takes a little while
#pSDCFlig(gizzards,method="Monte Carlo")
##Shorter version for demonstration
pSDCFlig(gizzards[1:2],method="Asymptotic")
}
\keyword{Dwass}
\keyword{Steel}
\keyword{Critchlow-Fligner} |
#Load packages
library("qiime2R")
library("phyloseq")
library("ggplot2")
library("vegan")
library("tidyverse")
library("ape")
library("RColorBrewer")
library("viridis")
library("ggsci")
#import bacteriome data
Physeq=qza_to_phyloseq(features="C:/Users/Tanweer/Documents/FilesForR/table_deblur.qza", taxonomy ="C:/Users/Tanweer/Documents/FilesForR/taxonomy_1.qza")
Meta1=read.delim(file ="C:/Users/Tanweer/Documents/FilesForR/sample_metadata_Bac_vir_3.txt" , header = TRUE, sep = "\t", row.names = 1)
Meta2=sample_data(Meta1)
Bacseq=merge_phyloseq(Physeq, Meta2)
plot_richness(Bacseq, x= "DiseaseState", color = "DiseaseState", measures = c("Chao1", "Simpson")) + geom_boxplot() + scale_color_brewer(palette = "Paired") + theme(legend.position="right", legend.title = element_blank(), legend.text = element_text(size = 10, face = "bold.italic"), axis.text.x = element_text (size=10, face="bold", angle = 360), axis.text.y = element_text (size=10, face="bold.italic"), axis.title = element_text(size = 12, face="bold"), strip.text.x = element_text(size = 14, color = "black", face = "bold"))
p <- plot_richness(Bacseq, "DiseaseState", "Participant", measures = c("Chao1", "Simpson"))
(p <- p + geom_boxplot(data = p$data, aes(x = DiseaseState, y = value, color = NULL), alpha = 0.1)) + theme(legend.position="right", legend.title = element_blank(), legend.text = element_text(size = 10, face = "bold.italic"), axis.text.x = element_text (size=10, face="bold", angle = 360), axis.text.y = element_text (size=10, face="bold.italic"), axis.title = element_text(size = 12, face="bold"), strip.text.x = element_text(size = 14, color = "black", face = "bold"))
richness=estimate_richness(Bacseq)
Bacseq_Stable=subset_samples(Bacseq, DiseaseState=="Stable")
Bacseq_Ex=subset_samples(Bacseq, DiseaseState=="Exacerbation")
mean(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
mean(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
mean(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
mean(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
median(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
median(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
median(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
median(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
IQR(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
IQR(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
IQR(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
IQR(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
| /Microbiome_AlphaDiv_Fig.R | no_license | tgmahomed/COPDMicrobiome | R | false | false | 2,656 | r | #Load packages
library("qiime2R")
library("phyloseq")
library("ggplot2")
library("vegan")
library("tidyverse")
library("ape")
library("RColorBrewer")
library("viridis")
library("ggsci")
#import bacteriome data
Physeq=qza_to_phyloseq(features="C:/Users/Tanweer/Documents/FilesForR/table_deblur.qza", taxonomy ="C:/Users/Tanweer/Documents/FilesForR/taxonomy_1.qza")
Meta1=read.delim(file ="C:/Users/Tanweer/Documents/FilesForR/sample_metadata_Bac_vir_3.txt" , header = TRUE, sep = "\t", row.names = 1)
Meta2=sample_data(Meta1)
Bacseq=merge_phyloseq(Physeq, Meta2)
plot_richness(Bacseq, x= "DiseaseState", color = "DiseaseState", measures = c("Chao1", "Simpson")) + geom_boxplot() + scale_color_brewer(palette = "Paired") + theme(legend.position="right", legend.title = element_blank(), legend.text = element_text(size = 10, face = "bold.italic"), axis.text.x = element_text (size=10, face="bold", angle = 360), axis.text.y = element_text (size=10, face="bold.italic"), axis.title = element_text(size = 12, face="bold"), strip.text.x = element_text(size = 14, color = "black", face = "bold"))
p <- plot_richness(Bacseq, "DiseaseState", "Participant", measures = c("Chao1", "Simpson"))
(p <- p + geom_boxplot(data = p$data, aes(x = DiseaseState, y = value, color = NULL), alpha = 0.1)) + theme(legend.position="right", legend.title = element_blank(), legend.text = element_text(size = 10, face = "bold.italic"), axis.text.x = element_text (size=10, face="bold", angle = 360), axis.text.y = element_text (size=10, face="bold.italic"), axis.title = element_text(size = 12, face="bold"), strip.text.x = element_text(size = 14, color = "black", face = "bold"))
richness=estimate_richness(Bacseq)
Bacseq_Stable=subset_samples(Bacseq, DiseaseState=="Stable")
Bacseq_Ex=subset_samples(Bacseq, DiseaseState=="Exacerbation")
mean(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
mean(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
mean(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
mean(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
median(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
median(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
median(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
median(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
IQR(estimate_richness(Bacseq_Stable, measures = c("Chao1"))[,1])
IQR(estimate_richness(Bacseq_Ex, measures = c("Chao1"))[,1])
IQR(estimate_richness(Bacseq_Stable, measures = c("Simpson"))[,1])
IQR(estimate_richness(Bacseq_Ex, measures = c("Simpson"))[,1])
|
##Originally ran on 10.6.8 Mac and R version 3.1.0
##Download data, unzip the file and read it into a table
fileURL<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL,destfile="./Electric.zip",method="curl")
unzip("./Electric.zip")
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
##Convert "Date" into date character and subset
data$Date<-as.Date(data$Date,format="%d/%m/%Y")
powersub<-subset(data,data$Date=="2007/02/01")
powersub2<-subset(data,data$Date=="2007/02/02")
powerset<-rbind(powersub,powersub2)
##Convert "Global_active_power" into numeric and create DateTime
powerset$Global_active_power<-as.numeric(powerset$Global_active_power)
require("lubridate")
powerset$DateTime <- ymd_hms(paste(powerset$Date,powerset$Time,sep="_"))
##Plot and save
png("plot3.png",width=480,height=480)
##Creates work space
plot(powerset$DateTime,powerset$Global_active_power,type="n",ylab="Energy sub metering",xlab="")
##Plots first black line
plot(powerset$DateTime, powerset$Sub_metering_1,col="black", type="l",ylab="Energy sub metering",xlab="")
##Adds red line
lines(powerset$DateTime,powerset$Sub_metering_2, type="l",col="red")
##Addds blue line
lines(powerset$DateTime,powerset$Sub_metering_3, type="l",col="blue")
##Adds legend
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),lwd=c(1.5,1.5,1.5),col=c("black","red","blue"),pt.cex=2,cex=1)
dev.off() | /plot3.R | no_license | wingloklam/ExData_Plotting1 | R | false | false | 1,498 | r | ##Originally ran on 10.6.8 Mac and R version 3.1.0
##Download data, unzip the file and read it into a table
fileURL<-"https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileURL,destfile="./Electric.zip",method="curl")
unzip("./Electric.zip")
data<-read.table("household_power_consumption.txt",sep=";",header=TRUE,stringsAsFactors=FALSE)
##Convert "Date" into date character and subset
data$Date<-as.Date(data$Date,format="%d/%m/%Y")
powersub<-subset(data,data$Date=="2007/02/01")
powersub2<-subset(data,data$Date=="2007/02/02")
powerset<-rbind(powersub,powersub2)
##Convert "Global_active_power" into numeric and create DateTime
powerset$Global_active_power<-as.numeric(powerset$Global_active_power)
require("lubridate")
powerset$DateTime <- ymd_hms(paste(powerset$Date,powerset$Time,sep="_"))
##Plot and save
png("plot3.png",width=480,height=480)
##Creates work space
plot(powerset$DateTime,powerset$Global_active_power,type="n",ylab="Energy sub metering",xlab="")
##Plots first black line
plot(powerset$DateTime, powerset$Sub_metering_1,col="black", type="l",ylab="Energy sub metering",xlab="")
##Adds red line
lines(powerset$DateTime,powerset$Sub_metering_2, type="l",col="red")
##Addds blue line
lines(powerset$DateTime,powerset$Sub_metering_3, type="l",col="blue")
##Adds legend
legend(x="topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),lwd=c(1.5,1.5,1.5),col=c("black","red","blue"),pt.cex=2,cex=1)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VSS.R
\name{VSS}
\alias{VSS}
\title{Volume Distribution at Steady-State (observed)}
\usage{
VSS(MRT, CL, Safe = TRUE)
}
\arguments{
\item{MRT}{Single numeric value of time}
\item{CL}{Single numeric value of clearance}
\item{Safe}{Single logical value declaring whether to perform redundant data checks (default is TRUE).}
}
\value{
Single numeric value
}
\description{
Calculate Volume of Distribution at Steady-State.
}
\details{
This function simply calculates VSS from MRT and CL when they have already been calculated.
\deqn{ VSS = MRT * CL }
}
\examples{
VSS(MRT = 20, CL = 0.1)
}
\author{
Mango Solutions
}
\keyword{math}
| /mangoNCA/man/VSS.Rd | no_license | isabella232/mangoNCA | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/VSS.R
\name{VSS}
\alias{VSS}
\title{Volume Distribution at Steady-State (observed)}
\usage{
VSS(MRT, CL, Safe = TRUE)
}
\arguments{
\item{MRT}{Single numeric value of time}
\item{CL}{Single numeric value of clearance}
\item{Safe}{Single logical value declaring whether to perform redundant data checks (default is TRUE).}
}
\value{
Single numeric value
}
\description{
Calculate Volume of Distribution at Steady-State.
}
\details{
This function simply calculates VSS from MRT and CL when they have already been calculated.
\deqn{ VSS = MRT * CL }
}
\examples{
VSS(MRT = 20, CL = 0.1)
}
\author{
Mango Solutions
}
\keyword{math}
|
modify_ast_if <- function(.ast, .p, .f, ..., .recurse = TRUE) {
UseMethod("modify_ast_if", .ast)
}
#' @export
modify_ast_if.default <- function(.ast, .p, .f, ..., .recurse = TRUE) {
if (isTRUE(.p(.ast))) {
return(.f(.ast, ...))
} else {
return(.ast)
}
}
#' @export
modify_ast_if.ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- lapply(.ast$args, modify_ast_if, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.function_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- modify_ast_if(.ast$args, .p, .f, ..., .recurse = .recurse)
.ast$fargs[mutable_fargs(.ast)] <-
lapply(.ast$fargs[mutable_fargs(.ast)], modify_ast_if, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.qualified_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- lapply(.ast$args, modify_ast_if, .p, .f, ..., .recurse = .recurse)
.ast$qual_head <- modify_ast_if(.ast$qual_head, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.formula_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- modify_ast_if(.ast$args, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
| /R/ast-modify_ast_if.R | permissive | nyuglobalties/blueprintr | R | false | false | 2,306 | r | modify_ast_if <- function(.ast, .p, .f, ..., .recurse = TRUE) {
UseMethod("modify_ast_if", .ast)
}
#' @export
modify_ast_if.default <- function(.ast, .p, .f, ..., .recurse = TRUE) {
if (isTRUE(.p(.ast))) {
return(.f(.ast, ...))
} else {
return(.ast)
}
}
#' @export
modify_ast_if.ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- lapply(.ast$args, modify_ast_if, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.function_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- modify_ast_if(.ast$args, .p, .f, ..., .recurse = .recurse)
.ast$fargs[mutable_fargs(.ast)] <-
lapply(.ast$fargs[mutable_fargs(.ast)], modify_ast_if, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.qualified_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- lapply(.ast$args, modify_ast_if, .p, .f, ..., .recurse = .recurse)
.ast$qual_head <- modify_ast_if(.ast$qual_head, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
#' @export
modify_ast_if.formula_ast <- function(.ast, .p, .f, ..., .recurse = TRUE) {
# Do a deep search. Presumably, .p(.ast, ...) will not be true for
# child elements, if the parent was intended to be changed. This
# design will make the logic more straightforward.
if (isTRUE(.recurse)) {
.ast$args <- modify_ast_if(.ast$args, .p, .f, ..., .recurse = .recurse)
}
if (isTRUE(.p(.ast))) {
.ast <- .f(.ast, ...)
}
.ast
}
|
#####################################################
# This code perform simulations for the profile
# likelihood estimation procedure, following the idea
# in Ma et al.(2017)
#####################################################
# Read the command line arguments
command_args <- as.numeric(commandArgs(trailingOnly = TRUE))
# Define command line arguments as a variable
dataset <- command_args[1] #seed for dataset
bootstrap <- command_args[2] #seed for bootstrap
# Source code
source("/users/ecui/riskscore/code/application_PL_update_parameters.R")
## Load packages
library(rlang)
library(mgcv)
library(HRW)
##########################################################################
# Organize real data
##########################################################################
## Load NHANES data
data_analysis <- read.csv("/users/ecui/riskscore/data/NHANES_data.csv")
nhanes_data <- data_analysis
## Windsorize PA vairables by upper 95th percentile and 5th percentile
nhanes_data[which(data_analysis$MVPA > quantile(data_analysis$MVPA, 0.95)), "MVPA"] <- quantile(data_analysis$MVPA, 0.95)
nhanes_data[which(data_analysis$ASTP > quantile(data_analysis$ASTP, 0.95)), "ASTP"] <- quantile(data_analysis$ASTP, 0.95)
nhanes_data[which(data_analysis$MVPA < quantile(data_analysis$MVPA, 0.05)), "MVPA"] <- quantile(data_analysis$MVPA, 0.05)
nhanes_data[which(data_analysis$ASTP < quantile(data_analysis$ASTP, 0.05)), "ASTP"] <- quantile(data_analysis$ASTP, 0.05)
## Scale PA variables
nhanes_data[,c("MVPA", "ASTP")] <- apply(nhanes_data[,c("MVPA", "ASTP")] , 2, scale)
## Surival outcome
dep_vars <- "yr9_mort"
## remove people who are censored before interested survival time -- 0 subject
nhanes_data <- nhanes_data[which(!is.na(nhanes_data[,dep_vars])),]
## Reorganize the covariates
# Divide age by 100 for numeric stability
nhanes_data$Age <- nhanes_data$Age / 100
# Create indicator for smoker
nhanes_data$Smoker <- ifelse(nhanes_data$SmokeCigs == "Current", 1, 0)
## Separate data by gender
dataM <- nhanes_data[which(nhanes_data$Gender == 'Male'), ]
dataW <- nhanes_data[which(nhanes_data$Gender == 'Female'), ]
## Derive components of single index model
### Response
yM <- as.matrix(dataM[,dep_vars], ncol = 1)
yW <- as.matrix(dataW[,dep_vars], ncol = 1)
colnames(yM) <- colnames(yW) <- "All-Cause Mortality"
### Non-accelerometry covariates
zM <- as.matrix(cbind(dataM$Age, dataM$Smoker), ncol = 2)
zW <- as.matrix(cbind(dataW$Age, dataW$Smoker), ncol = 2)
colnames(zM) <- colnames(zW) <- c("Age", "Smoker")
### Accelerometry variables
var <- c("MVPA", "ASTP")
xM <- as.matrix(dataM[,var], ncol = length(var))
xW <- as.matrix(dataW[,var], ncol = length(var))
colnames(xM) <- colnames(xW) <- var
## remove unnecessary variables
rm(dep_vars, data_analysis, nhanes_data, dataM, dataW)
# Copies of original matrices
yMO <- yM; yWO <- yW
xMO <- xM; xWO <- xW
zMO <- zM; zWO <- zW
# Define spline type and knots
spline <- "os"
nknots <- 8
## Specify true values of parameters
sMVPA <- function(x){ -(-0.2*(x-0.8)^3-0.4) }
sASTP <- function(x){ -(0.3*exp(x)-1.25) }
beta1.m <- -1 ## identifiability constraint
theta.age.m <- 8
theta.age.w <- 9
theta.smoking.m <- 0.6
theta.smoking.w <- 0.7
beta1.w <- 0.8
beta0.m <- -6
beta0.w <- -7
##########################################################################
# Generate simulated dataset
##########################################################################
set.seed(dataset)
n.sample = 1000
## Sample X and Z from n.sample men and n.sample women without replacement
sample.ind.m <- sample(1:nrow(xMO), n.sample, replace = FALSE)
sample.ind.w <- sample(1:nrow(xWO), n.sample, replace = FALSE)
xM.sample <- xMO[sample.ind.m,]
zM.sample <- zMO[sample.ind.m,]
xW.sample <- xWO[sample.ind.w,]
zW.sample <- zWO[sample.ind.w,]
Xall.sample <- rbind(xM.sample, xW.sample)
## Generate binary responses
eta.m <- beta0.m + beta1.m*(s.MVPA(xM.sample[,1]) + s.ASTP(xM.sample[,2])) +
theta.age.m * zM.sample[,1] + theta.smoking.m * zM.sample[,2]
p.m <- plogis(eta.m)
yM.sample <- matrix(rbinom(length(eta.m), size = 1, prob = p.m), ncol = 1)
eta.w <- beta0.w + beta1.w*(s.MVPA(xW.sample[,1]) + s.ASTP(xW.sample[,2])) +
theta.age.w * zW.sample[,1] + theta.smoking.w * zW.sample[,2]
p.w <- plogis(eta.w)
yW.sample <- matrix(rbinom(length(eta.w), size = 1, prob = p.w), ncol = 1)
## remove unnecessary variables
rm(eta.m, eta.w, p.m, p.w)
##########################################################################
# Fit the model with bootstrap sample on dataset
##########################################################################
set.seed(bootstrap)
# Get number of men and women for sampling
nrM <- nrow(yM.sample)
nrW <- nrow(yW.sample)
# Obtain indices of bootstrap samples
nM <- sample(1:nrM, nrM, replace = TRUE)
nW <- sample(1:nrW, nrW, replace = TRUE)
# Create new data with indices
yM <- yM.sample[nM, ]; yW <- yW.sample[nW, ]
xM <- xM.sample[nM, ]; xW <- xW.sample[nW, ]
zM <- zM.sample[nM, ]; zW <- zW.sample[nW, ]
## Organize iteration-invariant variables to fit single index model
betaM <- -1
betaW <- 0.5
Y <- matrix(c(yM, yW), ncol = 1) ## response
X.all = rbind(xM, xW) ## PA variables
X01 <- matrix(c(rep(1, length(yM)), rep(0, length(yW))), ncol = 1) ## design matrix of intercept term
X02 <- matrix(c(rep(0, length(yM)), rep(1, length(yW))), ncol = 1)
Z1 <- rbind(zM, matrix(0, nrow = length(yW), ncol = ncol(zM))) ## design matrix of offset
Z2 <- rbind(matrix(0, nrow = length(yM), ncol = ncol(zW)), zW)
dummyID <- factor(rep(1, length(Y)))
# Get spline basis matrix
B <- c()
for(i in 1:ncol(X.all)){
x <- matrix(X.all[,i], ncol = 1)
if(spline == "os"){
# O'Sullivan splines
# Set range of X values and interior knots
a <- 1.01*min(x) - 0.01*max(x)
b <- 1.01*max(x) - 0.01*min(x)
intKnots <- quantile(unique(x), seq(0, 1, length = (nknots + 2))[-c(1,(nknots + 2))])
# Get O'Sullivan spline basis functions
Bg <- ZOSull(x, range.x = c(a,b), intKnots = intKnots)
B <- cbind(B, Bg)
}else{
# Spline from mgcv
fit <- gam(Y ~ -1 + s(x, k = nknots, fx = TRUE, bs = spline), family = "gaussian") ## fit a fake model
B <- cbind(B, predict(fit, type = "lpmatrix"))
rm(fit)
}
}
rm(x)
# Do iteration
threshold <- 1e-6 ## change in log-likelihood
loglike_old <- 1 ## initial value of log likelihood
nc <- 0
epsilon <- 1
while(nc < 1000 & epsilon > threshold){
# Reweight B and X by beta
B.new <- rbind(betaM * B[1:nrow(xM),], betaW * B[(nrow(xM)+1):nrow(X.all),])
X.all.new <- rbind(betaM * matrix(X.all[1:nrow(xM),], ncol = ncol(xM)),
betaW * matrix(X.all[(nrow(xM)+1):nrow(X.all),], ncol = ncol(xW)) )
# Obtain updated estimates
updated_estimates = update_parameters()
alpha <- updated_estimates$alphahat
alpha_int <- updated_estimates$alphahat_int
beta01 <- updated_estimates$beta01hat
beta02 <- updated_estimates$beta02hat
thetaM <- updated_estimates$thetaMhat
thetaW <- updated_estimates$thetaWhat
betaM <- updated_estimates$betaMhat
betaW <- updated_estimates$betaWhat
eta <- updated_estimates$eta
score <- updated_estimates$score
scoreM <- score[1:nrow(xM)]
scoreW <- score[(nrow(xM)+1):(nrow(xM) + nrow(xW))]
curve = updated_estimates$curve
sigma_a = updated_estimates$sigma_a
# Update log likelihood
loglike <- sum(Y*log(plogis(eta)) + (1-Y)*log(1-plogis(eta))) ## log likelihood
epsilon <- abs((loglike-loglike_old)/loglike_old)
loglike_old <- loglike
nc <- nc + 1
}
# Get results from simulation
result <- list()
for(pa in var){
X.samp <- X.all[, pa]
X <- Xall.sample[, pa]
result[[pa]] <- data.frame(x = X[order(X)], s.x = curve[[pa]]$s.x[order(X.samp)])
}
result$param.est <- c(beta01, beta02, betaW, thetaM, thetaW)
# Create job-specific output so we don't overwrite things!
filename <- sprintf("/users/ecui/riskscore/sim_output/dataset_%s_simulation_%s.RData", dataset, bootstrap)
save("result", file = filename) | /codePL/simulation_PL.R | no_license | christithomp/semiparametric-risk-score | R | false | false | 8,163 | r | #####################################################
# This code perform simulations for the profile
# likelihood estimation procedure, following the idea
# in Ma et al.(2017)
#####################################################
# Read the command line arguments
command_args <- as.numeric(commandArgs(trailingOnly = TRUE))
# Define command line arguments as a variable
dataset <- command_args[1] #seed for dataset
bootstrap <- command_args[2] #seed for bootstrap
# Source code
source("/users/ecui/riskscore/code/application_PL_update_parameters.R")
## Load packages
library(rlang)
library(mgcv)
library(HRW)
##########################################################################
# Organize real data
##########################################################################
## Load NHANES data
data_analysis <- read.csv("/users/ecui/riskscore/data/NHANES_data.csv")
nhanes_data <- data_analysis
## Windsorize PA vairables by upper 95th percentile and 5th percentile
nhanes_data[which(data_analysis$MVPA > quantile(data_analysis$MVPA, 0.95)), "MVPA"] <- quantile(data_analysis$MVPA, 0.95)
nhanes_data[which(data_analysis$ASTP > quantile(data_analysis$ASTP, 0.95)), "ASTP"] <- quantile(data_analysis$ASTP, 0.95)
nhanes_data[which(data_analysis$MVPA < quantile(data_analysis$MVPA, 0.05)), "MVPA"] <- quantile(data_analysis$MVPA, 0.05)
nhanes_data[which(data_analysis$ASTP < quantile(data_analysis$ASTP, 0.05)), "ASTP"] <- quantile(data_analysis$ASTP, 0.05)
## Scale PA variables
nhanes_data[,c("MVPA", "ASTP")] <- apply(nhanes_data[,c("MVPA", "ASTP")] , 2, scale)
## Surival outcome
dep_vars <- "yr9_mort"
## remove people who are censored before interested survival time -- 0 subject
nhanes_data <- nhanes_data[which(!is.na(nhanes_data[,dep_vars])),]
## Reorganize the covariates
# Divide age by 100 for numeric stability
nhanes_data$Age <- nhanes_data$Age / 100
# Create indicator for smoker
nhanes_data$Smoker <- ifelse(nhanes_data$SmokeCigs == "Current", 1, 0)
## Separate data by gender
dataM <- nhanes_data[which(nhanes_data$Gender == 'Male'), ]
dataW <- nhanes_data[which(nhanes_data$Gender == 'Female'), ]
## Derive components of single index model
### Response
yM <- as.matrix(dataM[,dep_vars], ncol = 1)
yW <- as.matrix(dataW[,dep_vars], ncol = 1)
colnames(yM) <- colnames(yW) <- "All-Cause Mortality"
### Non-accelerometry covariates
zM <- as.matrix(cbind(dataM$Age, dataM$Smoker), ncol = 2)
zW <- as.matrix(cbind(dataW$Age, dataW$Smoker), ncol = 2)
colnames(zM) <- colnames(zW) <- c("Age", "Smoker")
### Accelerometry variables
var <- c("MVPA", "ASTP")
xM <- as.matrix(dataM[,var], ncol = length(var))
xW <- as.matrix(dataW[,var], ncol = length(var))
colnames(xM) <- colnames(xW) <- var
## remove unnecessary variables
rm(dep_vars, data_analysis, nhanes_data, dataM, dataW)
# Copies of original matrices
yMO <- yM; yWO <- yW
xMO <- xM; xWO <- xW
zMO <- zM; zWO <- zW
# Define spline type and knots
spline <- "os"
nknots <- 8
## Specify true values of parameters
sMVPA <- function(x){ -(-0.2*(x-0.8)^3-0.4) }
sASTP <- function(x){ -(0.3*exp(x)-1.25) }
beta1.m <- -1 ## identifiability constraint
theta.age.m <- 8
theta.age.w <- 9
theta.smoking.m <- 0.6
theta.smoking.w <- 0.7
beta1.w <- 0.8
beta0.m <- -6
beta0.w <- -7
##########################################################################
# Generate simulated dataset
##########################################################################
set.seed(dataset)
n.sample = 1000
## Sample X and Z from n.sample men and n.sample women without replacement
sample.ind.m <- sample(1:nrow(xMO), n.sample, replace = FALSE)
sample.ind.w <- sample(1:nrow(xWO), n.sample, replace = FALSE)
xM.sample <- xMO[sample.ind.m,]
zM.sample <- zMO[sample.ind.m,]
xW.sample <- xWO[sample.ind.w,]
zW.sample <- zWO[sample.ind.w,]
Xall.sample <- rbind(xM.sample, xW.sample)
## Generate binary responses
eta.m <- beta0.m + beta1.m*(s.MVPA(xM.sample[,1]) + s.ASTP(xM.sample[,2])) +
theta.age.m * zM.sample[,1] + theta.smoking.m * zM.sample[,2]
p.m <- plogis(eta.m)
yM.sample <- matrix(rbinom(length(eta.m), size = 1, prob = p.m), ncol = 1)
eta.w <- beta0.w + beta1.w*(s.MVPA(xW.sample[,1]) + s.ASTP(xW.sample[,2])) +
theta.age.w * zW.sample[,1] + theta.smoking.w * zW.sample[,2]
p.w <- plogis(eta.w)
yW.sample <- matrix(rbinom(length(eta.w), size = 1, prob = p.w), ncol = 1)
## remove unnecessary variables
rm(eta.m, eta.w, p.m, p.w)
##########################################################################
# Fit the model with bootstrap sample on dataset
##########################################################################
set.seed(bootstrap)
# Get number of men and women for sampling
nrM <- nrow(yM.sample)
nrW <- nrow(yW.sample)
# Obtain indices of bootstrap samples
nM <- sample(1:nrM, nrM, replace = TRUE)
nW <- sample(1:nrW, nrW, replace = TRUE)
# Create new data with indices
yM <- yM.sample[nM, ]; yW <- yW.sample[nW, ]
xM <- xM.sample[nM, ]; xW <- xW.sample[nW, ]
zM <- zM.sample[nM, ]; zW <- zW.sample[nW, ]
## Organize iteration-invariant variables to fit single index model
betaM <- -1
betaW <- 0.5
Y <- matrix(c(yM, yW), ncol = 1) ## response
X.all = rbind(xM, xW) ## PA variables
X01 <- matrix(c(rep(1, length(yM)), rep(0, length(yW))), ncol = 1) ## design matrix of intercept term
X02 <- matrix(c(rep(0, length(yM)), rep(1, length(yW))), ncol = 1)
Z1 <- rbind(zM, matrix(0, nrow = length(yW), ncol = ncol(zM))) ## design matrix of offset
Z2 <- rbind(matrix(0, nrow = length(yM), ncol = ncol(zW)), zW)
dummyID <- factor(rep(1, length(Y)))
# Get spline basis matrix
B <- c()
for(i in 1:ncol(X.all)){
x <- matrix(X.all[,i], ncol = 1)
if(spline == "os"){
# O'Sullivan splines
# Set range of X values and interior knots
a <- 1.01*min(x) - 0.01*max(x)
b <- 1.01*max(x) - 0.01*min(x)
intKnots <- quantile(unique(x), seq(0, 1, length = (nknots + 2))[-c(1,(nknots + 2))])
# Get O'Sullivan spline basis functions
Bg <- ZOSull(x, range.x = c(a,b), intKnots = intKnots)
B <- cbind(B, Bg)
}else{
# Spline from mgcv
fit <- gam(Y ~ -1 + s(x, k = nknots, fx = TRUE, bs = spline), family = "gaussian") ## fit a fake model
B <- cbind(B, predict(fit, type = "lpmatrix"))
rm(fit)
}
}
rm(x)
# Do iteration
threshold <- 1e-6 ## change in log-likelihood
loglike_old <- 1 ## initial value of log likelihood
nc <- 0
epsilon <- 1
while(nc < 1000 & epsilon > threshold){
# Reweight B and X by beta
B.new <- rbind(betaM * B[1:nrow(xM),], betaW * B[(nrow(xM)+1):nrow(X.all),])
X.all.new <- rbind(betaM * matrix(X.all[1:nrow(xM),], ncol = ncol(xM)),
betaW * matrix(X.all[(nrow(xM)+1):nrow(X.all),], ncol = ncol(xW)) )
# Obtain updated estimates
updated_estimates = update_parameters()
alpha <- updated_estimates$alphahat
alpha_int <- updated_estimates$alphahat_int
beta01 <- updated_estimates$beta01hat
beta02 <- updated_estimates$beta02hat
thetaM <- updated_estimates$thetaMhat
thetaW <- updated_estimates$thetaWhat
betaM <- updated_estimates$betaMhat
betaW <- updated_estimates$betaWhat
eta <- updated_estimates$eta
score <- updated_estimates$score
scoreM <- score[1:nrow(xM)]
scoreW <- score[(nrow(xM)+1):(nrow(xM) + nrow(xW))]
curve = updated_estimates$curve
sigma_a = updated_estimates$sigma_a
# Update log likelihood
loglike <- sum(Y*log(plogis(eta)) + (1-Y)*log(1-plogis(eta))) ## log likelihood
epsilon <- abs((loglike-loglike_old)/loglike_old)
loglike_old <- loglike
nc <- nc + 1
}
# Get results from simulation
result <- list()
for(pa in var){
X.samp <- X.all[, pa]
X <- Xall.sample[, pa]
result[[pa]] <- data.frame(x = X[order(X)], s.x = curve[[pa]]$s.x[order(X.samp)])
}
result$param.est <- c(beta01, beta02, betaW, thetaM, thetaW)
# Create job-specific output so we don't overwrite things!
filename <- sprintf("/users/ecui/riskscore/sim_output/dataset_%s_simulation_%s.RData", dataset, bootstrap)
save("result", file = filename) |
library(foreign)
demo <- read.xport("/home/wangcc-me/Downloads/NHANES/demo_c.xpt")
names(demo)
library(survey)
data(api)
srs_design <- svydesign(id = ~1, fpc = ~fpc, data = apisrs)
srs_design
svytotal(~enroll, srs_design)
svymean(~enroll, srs_design)
# summ(apisrs$enroll)
# sqrt(sd(apisrs$enroll))
nofpc <- svydesign(id = ~1, weights = ~pw, data = apisrs)
nofpc
svytotal(~enroll, nofpc)
svymean(~enroll, nofpc)
svytotal(~stype, srs_design)
strat_design <- svydesign(id = ~1, strata = ~stype, fpc = ~fpc, data = apistrat)
strat_design
| /results/survey.R | no_license | winterwang/LSHTMproject | R | false | false | 545 | r | library(foreign)
demo <- read.xport("/home/wangcc-me/Downloads/NHANES/demo_c.xpt")
names(demo)
library(survey)
data(api)
srs_design <- svydesign(id = ~1, fpc = ~fpc, data = apisrs)
srs_design
svytotal(~enroll, srs_design)
svymean(~enroll, srs_design)
# summ(apisrs$enroll)
# sqrt(sd(apisrs$enroll))
nofpc <- svydesign(id = ~1, weights = ~pw, data = apisrs)
nofpc
svytotal(~enroll, nofpc)
svymean(~enroll, nofpc)
svytotal(~stype, srs_design)
strat_design <- svydesign(id = ~1, strata = ~stype, fpc = ~fpc, data = apistrat)
strat_design
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{seff_init}
\alias{seff_init}
\title{seff_init}
\usage{
seff_init(B, X, Y, bw, ncore)
}
\description{
semiparametric efficient method initial value function
}
\keyword{internal}
| /orthoDr/man/seff_init.Rd | no_license | vincentskywalkers/orthoDr | R | false | true | 287 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{seff_init}
\alias{seff_init}
\title{seff_init}
\usage{
seff_init(B, X, Y, bw, ncore)
}
\description{
semiparametric efficient method initial value function
}
\keyword{internal}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/summary.R
\name{biomass_con_1}
\alias{biomass_con_1}
\title{Equilibrium biomass for consumer
Type I Functional Response}
\usage{
biomass_con_1(m, a, e, r, K)
}
\description{
Equilibrium biomass for consumer
Type I Functional Response
}
| /man/biomass_con_1.Rd | permissive | mwpennell/tsr | R | false | false | 323 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/summary.R
\name{biomass_con_1}
\alias{biomass_con_1}
\title{Equilibrium biomass for consumer
Type I Functional Response}
\usage{
biomass_con_1(m, a, e, r, K)
}
\description{
Equilibrium biomass for consumer
Type I Functional Response
}
|
# Setup For correctly using python in this R reproducible compendium package
# Ensure all R packages are installed that are needed
devtools::install_dev_deps()
devtools::load_all(".")
# Installing any python packages not available as default by reticulate
reticulate::py_install("seaborn")
| /analysis/00_setup.R | permissive | OJWatson/art_resistance_consensus_modelling | R | false | false | 292 | r | # Setup For correctly using python in this R reproducible compendium package
# Ensure all R packages are installed that are needed
devtools::install_dev_deps()
devtools::load_all(".")
# Installing any python packages not available as default by reticulate
reticulate::py_install("seaborn")
|
# Create the 5 SST figures for the Bering Sea ESR.
library(tidyverse)
library(doParallel)
library(heatwaveR)
library(lubridate)
library(viridis)
library(cowplot)
# Load 508 compliant NOAA colors
OceansBlue1='#0093D0'
OceansBlue2='#0055A4' # rebecca dark blue
CoralRed1='#FF4438'
Crustacean1='#FF8300'
SeagrassGreen1='#93D500'
SeagrassGreen4='#D0D0D0' # This is just grey
UrchinPurple1='#7F7FFF'
WavesTeal1='#1ECAD3'
mytheme <- theme(strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans",color="black"),
axis.text.y = element_text(size=10,family="sans",color="black"),
axis.text.x = element_text(size=9,family="sans",color="black",hjust=0.75),
panel.border=element_rect(colour="black",fill=NA,size=0.5),
panel.background = element_blank(),
plot.margin=unit(c(0.65,0,0.65,0),"cm"),
legend.position=c(0.6,0.7),
legend.background = element_blank(),
legend.key.size = unit(1,"line"))
newdat <- httr::content(httr::GET('https://apex.psmfc.org/akfin/data_marts/akmp/ecosystem_sub_crw_avg_sst?ecosystem_sub=Southeastern%20Bering%20Sea,Northern%20Bering%20Sea&start_date=19850101&end_date=20211231'), type = "application/json") %>%
bind_rows %>%
mutate(date=as_date(READ_DATE)) %>%
data.frame %>%
dplyr::select(date,meansst=MEANSST,Ecosystem_sub=ECOSYSTEM_SUB) %>%
mutate(doy=yday(date),
year=year(date),
month=month(date),
day=day(date),
newdate=as.Date(ifelse(month>=9,as.character(as.Date(paste("1999",month,day,sep="-"),format="%Y-%m-%d")),
as.character(as.Date(paste("2000",month,day,sep="-"),format="%Y-%m-%d"))),format("%Y-%m-%d")),
year2=ifelse(month>=9,year+1,year)) %>%
arrange(date)
# Figure 1; Anomaly of cumulative SST for years that go from Sept - Aug
mymean <- newdat %>%
filter(!year2%in%c(1985)) %>%
group_by(year2,Ecosystem_sub) %>%
arrange(newdate) %>%
summarise(cumheat=sum(meansst)) %>%
group_by(Ecosystem_sub) %>%
mutate(meanheat=mean(cumheat[between(year2,1986,2015)]),
sdheat=sd(cumheat[between(year2,1986,2015)]),
anomaly=cumheat-meanheat)
png("SST_ESR/2021/EBS/Watson_Fig1.png",width=6,height=3.375,units="in",res=300)
mymean %>%
ggplot(aes(year2,anomaly)) +
geom_bar(stat="identity",fill=OceansBlue2) +
geom_hline(aes(yintercept=0),linetype=2) +
geom_hline(aes(yintercept=sdheat),linetype=2) +
geom_hline(aes(yintercept=-sdheat),linetype=2) +
facet_wrap(~Ecosystem_sub) +
mytheme +
scale_x_continuous(expand=c(0.01,0.75)) +
xlab("") +
ylab("Cumulative Annual SST Anomaly (°C)") +
theme(plot.margin=unit(c(0.15,0.25,0.05,0),"cm"))
dev.off()
# Create Figure 2. Total cumulative sea surface temperature (sum of daily temperatures) for each year, apportioned
# by season: summer (Jun–Aug), fall (Sept–Nov), winter (Dec–Feb), spring (Mar–May). Negative
# values are the result of sea surface temperatures below zero
png("SST_ESR/2021/EBS/Watson_Fig2.png",width=6,height=4,units="in",res=300)
newdat %>%
filter(year2>1985) %>%
mutate(Season=case_when(
month%in%c(9,10,11)~"Fall",
month%in%c(12,1,2)~"Winter",
month%in%c(3,4,5)~"Spring",
month%in%c(6,7,8)~"Summer")) %>%
data.frame %>%
mutate(Season=factor(Season),
Season=fct_relevel(Season,"Fall","Winter","Spring","Summer")) %>%
group_by(year2,Ecosystem_sub,Season) %>%
summarise(cumheat=sum(meansst)) %>%
data.frame %>%
mutate(Season=fct_relevel(Season,"Summer","Fall","Winter","Spring")) %>%
ggplot(aes(year2,cumheat,fill=Season)) +
geom_bar(stat="identity") +
#geom_hline(data=mymean,aes(yintercept=meanheat),linetype=2) +
scale_fill_manual(name="",labels=c("Summer","Fall","Winter","Spring"),values=c(OceansBlue2,Crustacean1,UrchinPurple1,WavesTeal1)) +
facet_wrap(~Ecosystem_sub) +
mytheme +
scale_x_continuous(expand=c(0.01,0.75)) +
xlab("") +
ylab("Total Annual Cumulative Sea Surface Temperature (°C)") +
theme(plot.margin=unit(c(0.15,0.25,0.05,0),"cm"),
legend.position=c(0.1,0.9))
dev.off()
# Figure 3. Marine heatwaves in the southeastern and northern Bering Sea since September 2018
mhw <- (detect_event(ts2clm(newdat %>%
filter(Ecosystem_sub=="Southeastern Bering Sea") %>%
rename(t=date,temp=meansst) %>%
arrange(t), climatologyPeriod = c("1985-09-01", "2015-08-31"))))$clim %>%
mutate(region="Southeastern Bering Sea") %>%
bind_rows((detect_event(ts2clm(newdat %>%
filter(Ecosystem_sub=="Northern Bering Sea") %>%
rename(t=date,temp=meansst) %>%
arrange(t), climatologyPeriod = c("1985-09-01", "2015-08-31"))))$clim %>%
mutate(region="Northern Bering Sea"))
clim_cat <- mhw %>%
#mutate(region=fct_relevel(region,"Western Gulf of Alaska")) %>%
group_by(region) %>%
dplyr::mutate(diff = thresh - seas,
thresh_2x = thresh + diff,
thresh_3x = thresh_2x + diff,
thresh_4x = thresh_3x + diff,
year=year(t))
# Set line colours
lineColCat <- c(
"Temperature" = "black",
"Climatology" = "gray20",
"Moderate" = "gray60",
"Strong" = "gray60",
"Severe" = "gray60",
"Extreme" = "gray60"
)
fillColCat <- c(
"Moderate" = "#ffc866",
"Strong" = "#ff6900",
"Severe" = "#9e0000",
"Extreme" = "#2d0000"
)
#png("SST_ESR/2020/EBS/Watson_Fig5_010421.png",width=7,height=5,units="in",res=300)
png("SST_ESR/2021/EBS/Watson_Fig3.png",width=7,height=5,units="in",res=300)
ggplot(data = clim_cat %>% filter(t>=as.Date("2018-09-01")), aes(x = t, y = temp)) +
geom_flame(aes(y2 = thresh, fill = "Moderate")) +
geom_flame(aes(y2 = thresh_2x, fill = "Strong")) +
geom_flame(aes(y2 = thresh_3x, fill = "Severe")) +
geom_flame(aes(y2 = thresh_4x, fill = "Extreme")) +
geom_line(aes(y = thresh_2x, col = "Strong"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = thresh_3x, col = "Severe"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = thresh_4x, col = "Extreme"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = seas, col = "Climatology"), size = 0.5) +
geom_line(aes(y = thresh, col = "Moderate"), size = 0.5,linetype= "dotted") +
geom_line(aes(y = temp, col = "Temperature"), size = 0.5) +
scale_colour_manual(name = NULL, values = lineColCat,
breaks = c("Temperature", "Climatology", "Moderate",
"Strong", "Severe", "Extreme")) +
scale_fill_manual(name = NULL, values = fillColCat, guide = FALSE) +
scale_x_date(date_labels = "%b %Y",expand=c(0.01,0)) +
guides(colour = guide_legend(override.aes = list(linetype = c("solid", "solid", "dotted",
"dotted", "dotted", "dotted"),
size = c(0.6, 0.7, 0.7, 0.7, 0.7, 0.7)),
ncol=6)) +
labs(y = "Sea Surface Temperature (°C)", x = NULL) +
theme(legend.position="none") +
facet_wrap(~region,ncol=1,scales="free_y") +
mytheme +
theme(#legend.position="top",
legend.key=element_blank(),
legend.text = element_text(size=10),
axis.title.x=element_blank(),
legend.margin=margin(l=-2.75,t = -8.5, unit='cm'),
plot.margin=unit(c(0.65,0,0.0,0),"cm"))
dev.off()
#-------------------------------------------------------------------------------------
# Figure 4. Mean SST for the northern (left) and southeastern (right) Bering Sea shelves.
# Create plotting function that will allow selection of 2 ESR regions
# Load 508 compliant NOAA colors
OceansBlue1='#0093D0'
OceansBlue2='#0055A4' # dark blue
Crustacean1='#FF8300'
SeagrassGreen4='#D0D0D0' # This is just grey
# Assign colors to different time series.
current.year.color <- "black"
last.year.color <- OceansBlue1
mean.color <- UrchinPurple1
# Set default plot theme
theme_set(theme_cowplot())
# Specify legend position coordinates
mylegx <- 0.625
mylegy <- 0.865
current.year <- max(newdat$year)
last.year <- current.year-1
mean.years <- 1985:2014
mean.lab <- "Mean 1985-2014"
png("SST_ESR/2021/EBS/Watson_Fig4.png",width=7,height=5,units="in",res=300)
ggplot() +
geom_line(data=newdat %>% filter(year2<last.year),
aes(newdate,meansst,group=factor(year2),col='mygrey'),size=0.3) +
geom_line(data=newdat %>% filter(year2==last.year),
aes(newdate,meansst,color='last.year.color'),size=0.75) +
geom_line(data=newdat %>%
filter(year%in%mean.years) %>%
group_by(Ecosystem_sub ,newdate) %>%
summarise(meantemp=mean(meansst,na.rm=TRUE)),
aes(newdate,meantemp,col='mean.color'),size=0.5) +
geom_line(data=newdat %>% filter(year2==current.year),
aes(newdate,meansst,color='current.year.color'),size=0.95) +
facet_wrap(~Ecosystem_sub ,ncol=2) +
scale_color_manual(name="",
breaks=c('current.year.color','last.year.color','mygrey','mean.color'),
values=c('current.year.color'=current.year.color,'last.year.color'=last.year.color,'mygrey'=SeagrassGreen4,'mean.color'=mean.color),
labels=c(current.year,last.year,paste0('1985-',last.year-1),mean.lab)) +
ylab("Mean Sea Surface Temperature (C)") +
xlab("") +
scale_x_date(date_breaks="1 month",
date_labels = "%b",
expand = c(0.025,0.025)) +
theme(legend.position=c(mylegx,mylegy),
legend.text = element_text(size=8,family="sans"),
legend.background = element_blank(),
legend.title = element_blank(),
strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans"),
axis.text = element_text(size=10,family="sans"),
panel.border=element_rect(colour="black",size=0.75),
axis.text.x=element_text(color=c("black",NA,NA,"black",NA,NA,"black",NA,NA,"black",NA,NA,NA)),
legend.key.size = unit(0.35,"cm"),
plot.margin=unit(c(0.65,0,0.65,0),"cm"))
dev.off()
# Figure 5. Time series decomposition
#devtools::install_github("brisneve/ggplottimeseries")
library(ggplottimeseries)
# The following could all be combined but I have left it separated out to be more transparent.
df1 <- newdat %>%
filter(Ecosystem_sub=="Southeastern Bering Sea")
# Perform the time series decomposition for the EGOA, setting the frequency as 365.25 because we have daily data with leap years.
df1 <- dts1(df1$date,df1$meansst,365.25, type = "additive") %>%
mutate(Ecosystem_sub="Southeastern Bering Sea",
year=year(date))
# Repeat for the wgoa
df2 <- newdat %>%
filter(Ecosystem_sub=="Northern Bering Sea")
df2 <- dts1(df2$date,df2$meansst,365.25, type = "additive") %>%
mutate(Ecosystem_sub="Northern Bering Sea",
year=year(date))
# Combine the time series decompositions for each area and reorder the factors.
df <- df1 %>%
bind_rows(df2)
# Create the horizontal mean and sd lines for the 30 year baseline period.
dfmean <- df %>%
group_by(Ecosystem_sub) %>%
summarise(meantrend=mean(trend[between(year,1985,2014)],na.rm=TRUE),
sdtrend=sd(trend[between(year,1985,2014)],na.rm=TRUE))
png("SST_ESR/2021/EBS/Watson_Fig5.png",width=7,height=5,units="in",res=300)
df %>%
ggplot(aes(x = date, y = trend)) +
geom_line() +
geom_hline(data=dfmean,aes(yintercept=meantrend),linetype=2) +
geom_hline(data=dfmean,aes(yintercept=meantrend+sdtrend),linetype=2,color="red") +
geom_hline(data=dfmean,aes(yintercept=meantrend-sdtrend),linetype=2,color="red") +
facet_wrap(~Ecosystem_sub) +
theme(strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans"),
axis.text = element_text(size=10,family="sans"),
panel.border=element_rect(colour="black",size=0.5),
plot.margin=unit(c(0.65,0,0.65,0),"cm")) +
ylab("Sea surface temperature (C)") +
xlab("Date")
dev.off()
| /SST_ESR/2021/EBS/MHW_SST_Bering_2021.R | no_license | jordanwatson/EcosystemStatusReports | R | false | false | 12,507 | r | # Create the 5 SST figures for the Bering Sea ESR.
library(tidyverse)
library(doParallel)
library(heatwaveR)
library(lubridate)
library(viridis)
library(cowplot)
# Load 508 compliant NOAA colors
OceansBlue1='#0093D0'
OceansBlue2='#0055A4' # rebecca dark blue
CoralRed1='#FF4438'
Crustacean1='#FF8300'
SeagrassGreen1='#93D500'
SeagrassGreen4='#D0D0D0' # This is just grey
UrchinPurple1='#7F7FFF'
WavesTeal1='#1ECAD3'
mytheme <- theme(strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans",color="black"),
axis.text.y = element_text(size=10,family="sans",color="black"),
axis.text.x = element_text(size=9,family="sans",color="black",hjust=0.75),
panel.border=element_rect(colour="black",fill=NA,size=0.5),
panel.background = element_blank(),
plot.margin=unit(c(0.65,0,0.65,0),"cm"),
legend.position=c(0.6,0.7),
legend.background = element_blank(),
legend.key.size = unit(1,"line"))
newdat <- httr::content(httr::GET('https://apex.psmfc.org/akfin/data_marts/akmp/ecosystem_sub_crw_avg_sst?ecosystem_sub=Southeastern%20Bering%20Sea,Northern%20Bering%20Sea&start_date=19850101&end_date=20211231'), type = "application/json") %>%
bind_rows %>%
mutate(date=as_date(READ_DATE)) %>%
data.frame %>%
dplyr::select(date,meansst=MEANSST,Ecosystem_sub=ECOSYSTEM_SUB) %>%
mutate(doy=yday(date),
year=year(date),
month=month(date),
day=day(date),
newdate=as.Date(ifelse(month>=9,as.character(as.Date(paste("1999",month,day,sep="-"),format="%Y-%m-%d")),
as.character(as.Date(paste("2000",month,day,sep="-"),format="%Y-%m-%d"))),format("%Y-%m-%d")),
year2=ifelse(month>=9,year+1,year)) %>%
arrange(date)
# Figure 1; Anomaly of cumulative SST for years that go from Sept - Aug
mymean <- newdat %>%
filter(!year2%in%c(1985)) %>%
group_by(year2,Ecosystem_sub) %>%
arrange(newdate) %>%
summarise(cumheat=sum(meansst)) %>%
group_by(Ecosystem_sub) %>%
mutate(meanheat=mean(cumheat[between(year2,1986,2015)]),
sdheat=sd(cumheat[between(year2,1986,2015)]),
anomaly=cumheat-meanheat)
png("SST_ESR/2021/EBS/Watson_Fig1.png",width=6,height=3.375,units="in",res=300)
mymean %>%
ggplot(aes(year2,anomaly)) +
geom_bar(stat="identity",fill=OceansBlue2) +
geom_hline(aes(yintercept=0),linetype=2) +
geom_hline(aes(yintercept=sdheat),linetype=2) +
geom_hline(aes(yintercept=-sdheat),linetype=2) +
facet_wrap(~Ecosystem_sub) +
mytheme +
scale_x_continuous(expand=c(0.01,0.75)) +
xlab("") +
ylab("Cumulative Annual SST Anomaly (°C)") +
theme(plot.margin=unit(c(0.15,0.25,0.05,0),"cm"))
dev.off()
# Create Figure 2. Total cumulative sea surface temperature (sum of daily temperatures) for each year, apportioned
# by season: summer (Jun–Aug), fall (Sept–Nov), winter (Dec–Feb), spring (Mar–May). Negative
# values are the result of sea surface temperatures below zero
png("SST_ESR/2021/EBS/Watson_Fig2.png",width=6,height=4,units="in",res=300)
newdat %>%
filter(year2>1985) %>%
mutate(Season=case_when(
month%in%c(9,10,11)~"Fall",
month%in%c(12,1,2)~"Winter",
month%in%c(3,4,5)~"Spring",
month%in%c(6,7,8)~"Summer")) %>%
data.frame %>%
mutate(Season=factor(Season),
Season=fct_relevel(Season,"Fall","Winter","Spring","Summer")) %>%
group_by(year2,Ecosystem_sub,Season) %>%
summarise(cumheat=sum(meansst)) %>%
data.frame %>%
mutate(Season=fct_relevel(Season,"Summer","Fall","Winter","Spring")) %>%
ggplot(aes(year2,cumheat,fill=Season)) +
geom_bar(stat="identity") +
#geom_hline(data=mymean,aes(yintercept=meanheat),linetype=2) +
scale_fill_manual(name="",labels=c("Summer","Fall","Winter","Spring"),values=c(OceansBlue2,Crustacean1,UrchinPurple1,WavesTeal1)) +
facet_wrap(~Ecosystem_sub) +
mytheme +
scale_x_continuous(expand=c(0.01,0.75)) +
xlab("") +
ylab("Total Annual Cumulative Sea Surface Temperature (°C)") +
theme(plot.margin=unit(c(0.15,0.25,0.05,0),"cm"),
legend.position=c(0.1,0.9))
dev.off()
# Figure 3. Marine heatwaves in the southeastern and northern Bering Sea since September 2018
mhw <- (detect_event(ts2clm(newdat %>%
filter(Ecosystem_sub=="Southeastern Bering Sea") %>%
rename(t=date,temp=meansst) %>%
arrange(t), climatologyPeriod = c("1985-09-01", "2015-08-31"))))$clim %>%
mutate(region="Southeastern Bering Sea") %>%
bind_rows((detect_event(ts2clm(newdat %>%
filter(Ecosystem_sub=="Northern Bering Sea") %>%
rename(t=date,temp=meansst) %>%
arrange(t), climatologyPeriod = c("1985-09-01", "2015-08-31"))))$clim %>%
mutate(region="Northern Bering Sea"))
clim_cat <- mhw %>%
#mutate(region=fct_relevel(region,"Western Gulf of Alaska")) %>%
group_by(region) %>%
dplyr::mutate(diff = thresh - seas,
thresh_2x = thresh + diff,
thresh_3x = thresh_2x + diff,
thresh_4x = thresh_3x + diff,
year=year(t))
# Set line colours
lineColCat <- c(
"Temperature" = "black",
"Climatology" = "gray20",
"Moderate" = "gray60",
"Strong" = "gray60",
"Severe" = "gray60",
"Extreme" = "gray60"
)
fillColCat <- c(
"Moderate" = "#ffc866",
"Strong" = "#ff6900",
"Severe" = "#9e0000",
"Extreme" = "#2d0000"
)
#png("SST_ESR/2020/EBS/Watson_Fig5_010421.png",width=7,height=5,units="in",res=300)
png("SST_ESR/2021/EBS/Watson_Fig3.png",width=7,height=5,units="in",res=300)
ggplot(data = clim_cat %>% filter(t>=as.Date("2018-09-01")), aes(x = t, y = temp)) +
geom_flame(aes(y2 = thresh, fill = "Moderate")) +
geom_flame(aes(y2 = thresh_2x, fill = "Strong")) +
geom_flame(aes(y2 = thresh_3x, fill = "Severe")) +
geom_flame(aes(y2 = thresh_4x, fill = "Extreme")) +
geom_line(aes(y = thresh_2x, col = "Strong"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = thresh_3x, col = "Severe"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = thresh_4x, col = "Extreme"), size = 0.5, linetype = "dotted") +
geom_line(aes(y = seas, col = "Climatology"), size = 0.5) +
geom_line(aes(y = thresh, col = "Moderate"), size = 0.5,linetype= "dotted") +
geom_line(aes(y = temp, col = "Temperature"), size = 0.5) +
scale_colour_manual(name = NULL, values = lineColCat,
breaks = c("Temperature", "Climatology", "Moderate",
"Strong", "Severe", "Extreme")) +
scale_fill_manual(name = NULL, values = fillColCat, guide = FALSE) +
scale_x_date(date_labels = "%b %Y",expand=c(0.01,0)) +
guides(colour = guide_legend(override.aes = list(linetype = c("solid", "solid", "dotted",
"dotted", "dotted", "dotted"),
size = c(0.6, 0.7, 0.7, 0.7, 0.7, 0.7)),
ncol=6)) +
labs(y = "Sea Surface Temperature (°C)", x = NULL) +
theme(legend.position="none") +
facet_wrap(~region,ncol=1,scales="free_y") +
mytheme +
theme(#legend.position="top",
legend.key=element_blank(),
legend.text = element_text(size=10),
axis.title.x=element_blank(),
legend.margin=margin(l=-2.75,t = -8.5, unit='cm'),
plot.margin=unit(c(0.65,0,0.0,0),"cm"))
dev.off()
#-------------------------------------------------------------------------------------
# Figure 4. Mean SST for the northern (left) and southeastern (right) Bering Sea shelves.
# Create plotting function that will allow selection of 2 ESR regions
# Load 508 compliant NOAA colors
OceansBlue1='#0093D0'
OceansBlue2='#0055A4' # dark blue
Crustacean1='#FF8300'
SeagrassGreen4='#D0D0D0' # This is just grey
# Assign colors to different time series.
current.year.color <- "black"
last.year.color <- OceansBlue1
mean.color <- UrchinPurple1
# Set default plot theme
theme_set(theme_cowplot())
# Specify legend position coordinates
mylegx <- 0.625
mylegy <- 0.865
current.year <- max(newdat$year)
last.year <- current.year-1
mean.years <- 1985:2014
mean.lab <- "Mean 1985-2014"
png("SST_ESR/2021/EBS/Watson_Fig4.png",width=7,height=5,units="in",res=300)
ggplot() +
geom_line(data=newdat %>% filter(year2<last.year),
aes(newdate,meansst,group=factor(year2),col='mygrey'),size=0.3) +
geom_line(data=newdat %>% filter(year2==last.year),
aes(newdate,meansst,color='last.year.color'),size=0.75) +
geom_line(data=newdat %>%
filter(year%in%mean.years) %>%
group_by(Ecosystem_sub ,newdate) %>%
summarise(meantemp=mean(meansst,na.rm=TRUE)),
aes(newdate,meantemp,col='mean.color'),size=0.5) +
geom_line(data=newdat %>% filter(year2==current.year),
aes(newdate,meansst,color='current.year.color'),size=0.95) +
facet_wrap(~Ecosystem_sub ,ncol=2) +
scale_color_manual(name="",
breaks=c('current.year.color','last.year.color','mygrey','mean.color'),
values=c('current.year.color'=current.year.color,'last.year.color'=last.year.color,'mygrey'=SeagrassGreen4,'mean.color'=mean.color),
labels=c(current.year,last.year,paste0('1985-',last.year-1),mean.lab)) +
ylab("Mean Sea Surface Temperature (C)") +
xlab("") +
scale_x_date(date_breaks="1 month",
date_labels = "%b",
expand = c(0.025,0.025)) +
theme(legend.position=c(mylegx,mylegy),
legend.text = element_text(size=8,family="sans"),
legend.background = element_blank(),
legend.title = element_blank(),
strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans"),
axis.text = element_text(size=10,family="sans"),
panel.border=element_rect(colour="black",size=0.75),
axis.text.x=element_text(color=c("black",NA,NA,"black",NA,NA,"black",NA,NA,"black",NA,NA,NA)),
legend.key.size = unit(0.35,"cm"),
plot.margin=unit(c(0.65,0,0.65,0),"cm"))
dev.off()
# Figure 5. Time series decomposition
#devtools::install_github("brisneve/ggplottimeseries")
library(ggplottimeseries)
# The following could all be combined but I have left it separated out to be more transparent.
df1 <- newdat %>%
filter(Ecosystem_sub=="Southeastern Bering Sea")
# Perform the time series decomposition for the EGOA, setting the frequency as 365.25 because we have daily data with leap years.
df1 <- dts1(df1$date,df1$meansst,365.25, type = "additive") %>%
mutate(Ecosystem_sub="Southeastern Bering Sea",
year=year(date))
# Repeat for the wgoa
df2 <- newdat %>%
filter(Ecosystem_sub=="Northern Bering Sea")
df2 <- dts1(df2$date,df2$meansst,365.25, type = "additive") %>%
mutate(Ecosystem_sub="Northern Bering Sea",
year=year(date))
# Combine the time series decompositions for each area and reorder the factors.
df <- df1 %>%
bind_rows(df2)
# Create the horizontal mean and sd lines for the 30 year baseline period.
dfmean <- df %>%
group_by(Ecosystem_sub) %>%
summarise(meantrend=mean(trend[between(year,1985,2014)],na.rm=TRUE),
sdtrend=sd(trend[between(year,1985,2014)],na.rm=TRUE))
png("SST_ESR/2021/EBS/Watson_Fig5.png",width=7,height=5,units="in",res=300)
df %>%
ggplot(aes(x = date, y = trend)) +
geom_line() +
geom_hline(data=dfmean,aes(yintercept=meantrend),linetype=2) +
geom_hline(data=dfmean,aes(yintercept=meantrend+sdtrend),linetype=2,color="red") +
geom_hline(data=dfmean,aes(yintercept=meantrend-sdtrend),linetype=2,color="red") +
facet_wrap(~Ecosystem_sub) +
theme(strip.text = element_text(size=10,color="white",family="sans",face="bold"),
strip.background = element_rect(fill=OceansBlue2),
axis.title = element_text(size=10,family="sans"),
axis.text = element_text(size=10,family="sans"),
panel.border=element_rect(colour="black",size=0.5),
plot.margin=unit(c(0.65,0,0.65,0),"cm")) +
ylab("Sea surface temperature (C)") +
xlab("Date")
dev.off()
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./endometrium_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/endometrium/endometrium_050.R | no_license | esbgkannan/QSMART | R | false | false | 354 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Lasso/endometrium.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.4,family="gaussian",standardize=FALSE)
sink('./endometrium_050.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
# aRt_maps
library(mapview)
library(mapedit)
library(sf)
library(tidyverse)
# Make Some Points or Draw a Polygon --------------------------------------
# here we make points manually
yose <- tibble("site"=c("yose_valley_head", "clouds_rest"),
"lon"= c(-119.71047, -119.46836),
"lat" = c(37.65552, 37.80483))
# make spatial with sf
yose <- st_as_sf(yose, coords = c("lon","lat"), crs=4326)
# and take a look using mapview
(m1 <- mapview(yose))
# Use MAPEDIT HERE!
# here we can draw a polygon around the area we are interested in with mapedit
# this opens a map and we draw some points or a polygon for what we want and click "Done!"
yose_poly <- drawFeatures(m1) # click Done!
class(yose_poly) # now have an sf feature
# look at plot
m1 + yose_poly
# Elevatr -----------------------------------------------------------------
library(elevatr)
# get raster of YOSEMITE VALLEY
ynp_elev <- elevatr::get_elev_raster(yose, z = 12,
clip = "bbox",
src="aws")
# tried z=14 and it took forever for rayshader stuff...this works better and still gives good resolution
# take a look at the extent using mapview
# do this first to make sure tile in right place
viewExtent(ynp_elev) + mapview(yose)
# then can view actual raster
mapview(ynp_elev)
# take look using cubeview (best with raster stack/time)
cubeview::cubeview(ynp_elev)
# Using {stars} package for ggplot ----------------------------------------
library(stars)
ynp_elev_df <- st_as_stars(ynp_elev) # make "stars" raster
# make some points for labels
sites <- c("El Capitan", "Half Dome", "Clouds Rest")
lats <- c(37.7339, 37.7459, 37.7677)
lons <- c(-119.6377, -119.5332, -119.4894)
yose_sites <- tibble(site=sites, lat=lats, lon=lons)
# ggplot
library(viridis)
gg1 <- ggplot() +
geom_stars(data = ynp_elev_df) +
# add the corner points
geom_sf(data=yose, color="orange", pch=16, size=5)+
# site labels + points
ggrepel::geom_label_repel(data=yose_sites, aes(x=lon, y=lat, label=site), nudge_y = c(-.02, -.02, .02), min.segment.length = .2)+
#geom_point(data=yose_sites, aes(x=lon, y=lat), color="maroon", pch=16, size=3, alpha=0.5)+
#ADD PICTURESSSSSSSSS BECAUSE WE CAN!
ggimage::geom_image(data = yose_sites[1,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d0/El_Capitan_Yosemite_72.jpg/345px-El_Capitan_Yosemite_72.jpg", size=0.1)+
ggimage::geom_image(data = yose_sites[2,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/2/25/Half_dome_yosemite_national_park.jpg/640px-Half_dome_yosemite_national_park.jpg", size=.12)+
ggimage::geom_image(data = yose_sites[3,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Yosemite_Valley%2C_as_seen_from_Cloud%27s_Rest.jpg/640px-Yosemite_Valley%2C_as_seen_from_Cloud%27s_Rest.jpg", size=.11)+
#coord_equal() +
coord_sf() +
theme_void() +
theme(legend.position = "bottom", legend.direction = "horizontal")+
scale_fill_viridis("Elevation (m)")
# print it!
gg1 # This is cool
# Try 3d GGplot With Rayshader --------------------------------------------
# try with rayshader:
library(rayshader)
# need to drop photos
gg2 <- ggplot() +
geom_stars(data = ynp_elev_df) +
# add the corner points
geom_sf(data=yose, color="orange", pch=16, size=5)+
# site labels + points
#ggrepel::geom_label_repel(data=yose_sites, aes(x=lon, y=lat, label=site), nudge_y = c(-.02, -.02, .02), min.segment.length = .2)+
geom_point(data=yose_sites, aes(x=lon, y=lat), color="maroon", pch=16, size=3, alpha=0.9)+
coord_sf() +
theme_void() +
theme(legend.position = "bottom", legend.direction = "horizontal")+
scale_fill_viridis("Elevation (m)")
gg2 # get a warning but works
# now make it 3d!
plot_gg(gg2, multicore=TRUE,width=6,height=5,scale=250,
windowsize = c(1000, 800))
# Rayshader ---------------------------------------------------------------
# https://wcmbishop.github.io/rayshader-demo/
library(rayshader)
ynp_elev_rs <- raster_to_matrix(ynp_elev)
# flat rayshade
ynp_elev_rs %>%
sphere_shade(sunangle = 45, colorintensity = 1.1,
texture="imhof4") %>%
plot_map()
# Custom Colors -----------------------------------------------------------
# custom colors!
coltexture1 <- create_texture(
lightcolor = "gray90",
shadowcolor = "gray30",
rightcolor = "palegreen4",
leftcolor = "seashell4",
centercolor = "darkslateblue")
# replot w colors and more color intensity
ynp_elev_rs %>%
sphere_shade(sunangle = 45, colorintensity = 2,
texture = coltexture1) %>%
plot_map()
# 3D RayShade -------------------------------------------------------------
# make a 3d option
ynp_elev_rs %>%
sphere_shade(texture = "imhof2",
colorintensity = 1.1) %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
plot_3d(ynp_elev_rs, zscale = 8,
fov = 0, theta = 300,
zoom = 0.85, phi = 45,
windowsize = c(1000, 800))
Sys.sleep(0.2)
render_snapshot(clear=TRUE)
# Hi Res Option -----------------------------------------------------------
# takes forever
# ynp_elev_rs %>%
# sphere_shade(texture = "imhof2", colorintensity = 1.1) %>%
# add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# plot_3d(ynp_elev_rs, zscale = 8,
# fov = 0, theta = 300,
# zoom = 0.85, phi = 45,
# windowsize = c(1000, 800))
# Sys.sleep(0.2)
#render_highquality() # don't do this unless you have lots of time, it made my computer really unhappy
# Adding Labels -----------------------------------------------------------
ynp_elev_rs %>%
sphere_shade(texture = coltexture1) %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# tracing takes a few moments
add_shadow(ray_shade(ynp_elev_rs,
multicore = TRUE, # much faster
zscale = 3), 0.5) %>%
#add_shadow(ambient_shade(ynp_elev_rs), 0) %>%
plot_3d(ynp_elev_rs, zscale = 10, fov = 0,
theta = 300, zoom = 0.75, phi = 45,
windowsize = c(1000, 800))
Sys.sleep(0.2)
# labels
render_label(ynp_elev_rs, x = 460, y = 570, z = 20000, zscale = 50, text = "El Cap", textsize = 2,
textcolor = "orange",
linecolor = "gray15",
linewidth = 4, clear_previous = TRUE)
render_label(ynp_elev_rs, x = 1030, y = 660, z=25000,
zscale = 50, text = "Half Dome",
textcolor = "orange",
linecolor = "gray20",
dashed = FALSE,
textsize = 2, linewidth = 4,
clear_previous = FALSE)
Sys.sleep(0.2)
# to save current view as static version
render_snapshot(clear=FALSE)
Sys.sleep(0.2)
# make a movie!
render_movie(filename = "yosemite_default.mp4",
frames = 60,
title_text = "Yosemite Valley")
render_movie(filename = "yosemite_custom.mp4",
frames = 180, phi = 33,
zoom = .2,
theta = 300, fov = 30,
title_text = "Yosemite Valley")
# Weird Effects -----------------------------------------------------------
ynp_elev_rs %>%
sphere_shade(texture = "imhof2") %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# tracing takes a few moments
add_shadow(ray_shade(ynp_elev_rs,
multicore = TRUE, # much faster
zscale = 3), 0.5) %>%
#add_shadow(ambient_shade(ynp_elev_rs), 0) %>%
plot_3d(ynp_elev_rs, zscale = 10, fov = 30,
theta = 300, zoom = 0.3, phi = 25,
windowsize = c(1000, 800))
Sys.sleep(0.2)
# labels
render_label(ynp_elev_rs, x = 460, y = 570, z = 13000, zscale = 50, text = "El Cap", textsize = 2,
textcolor = "gray15",
linecolor = "gray15",
linewidth = 4, clear_previous = TRUE)
render_label(ynp_elev_rs, x = 1030, y = 660, z=15000,
zscale = 50, text = "Half Dome",
textcolor = "gray10",
linecolor = "gray10",
dashed = FALSE,
textsize = 2, linewidth = 4,
clear_previous = FALSE)
# check field of view focus
render_depth(preview_focus = TRUE,
focus = 0.53, focallength = 100)
# on half dome
render_depth(preview_focus = FALSE, focus = 0.73,
focallength = 300, clear = FALSE)
# on el cap
render_depth(preview_focus = FALSE, focus = 0.53,
focallength = 100, clear = FALSE)
#render_snapshot(clear=TRUE)
# close out
rgl::rgl.close()
| /code/aRt_maps.R | no_license | ryanpeek/ayso_player_tracker | R | false | false | 8,662 | r | # aRt_maps
library(mapview)
library(mapedit)
library(sf)
library(tidyverse)
# Make Some Points or Draw a Polygon --------------------------------------
# here we make points manually
yose <- tibble("site"=c("yose_valley_head", "clouds_rest"),
"lon"= c(-119.71047, -119.46836),
"lat" = c(37.65552, 37.80483))
# make spatial with sf
yose <- st_as_sf(yose, coords = c("lon","lat"), crs=4326)
# and take a look using mapview
(m1 <- mapview(yose))
# Use MAPEDIT HERE!
# here we can draw a polygon around the area we are interested in with mapedit
# this opens a map and we draw some points or a polygon for what we want and click "Done!"
yose_poly <- drawFeatures(m1) # click Done!
class(yose_poly) # now have an sf feature
# look at plot
m1 + yose_poly
# Elevatr -----------------------------------------------------------------
library(elevatr)
# get raster of YOSEMITE VALLEY
ynp_elev <- elevatr::get_elev_raster(yose, z = 12,
clip = "bbox",
src="aws")
# tried z=14 and it took forever for rayshader stuff...this works better and still gives good resolution
# take a look at the extent using mapview
# do this first to make sure tile in right place
viewExtent(ynp_elev) + mapview(yose)
# then can view actual raster
mapview(ynp_elev)
# take look using cubeview (best with raster stack/time)
cubeview::cubeview(ynp_elev)
# Using {stars} package for ggplot ----------------------------------------
library(stars)
ynp_elev_df <- st_as_stars(ynp_elev) # make "stars" raster
# make some points for labels
sites <- c("El Capitan", "Half Dome", "Clouds Rest")
lats <- c(37.7339, 37.7459, 37.7677)
lons <- c(-119.6377, -119.5332, -119.4894)
yose_sites <- tibble(site=sites, lat=lats, lon=lons)
# ggplot
library(viridis)
gg1 <- ggplot() +
geom_stars(data = ynp_elev_df) +
# add the corner points
geom_sf(data=yose, color="orange", pch=16, size=5)+
# site labels + points
ggrepel::geom_label_repel(data=yose_sites, aes(x=lon, y=lat, label=site), nudge_y = c(-.02, -.02, .02), min.segment.length = .2)+
#geom_point(data=yose_sites, aes(x=lon, y=lat), color="maroon", pch=16, size=3, alpha=0.5)+
#ADD PICTURESSSSSSSSS BECAUSE WE CAN!
ggimage::geom_image(data = yose_sites[1,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/d/d0/El_Capitan_Yosemite_72.jpg/345px-El_Capitan_Yosemite_72.jpg", size=0.1)+
ggimage::geom_image(data = yose_sites[2,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/2/25/Half_dome_yosemite_national_park.jpg/640px-Half_dome_yosemite_national_park.jpg", size=.12)+
ggimage::geom_image(data = yose_sites[3,], aes(x=lon, y=lat), image="https://upload.wikimedia.org/wikipedia/commons/thumb/5/5b/Yosemite_Valley%2C_as_seen_from_Cloud%27s_Rest.jpg/640px-Yosemite_Valley%2C_as_seen_from_Cloud%27s_Rest.jpg", size=.11)+
#coord_equal() +
coord_sf() +
theme_void() +
theme(legend.position = "bottom", legend.direction = "horizontal")+
scale_fill_viridis("Elevation (m)")
# print it!
gg1 # This is cool
# Try 3d GGplot With Rayshader --------------------------------------------
# try with rayshader:
library(rayshader)
# need to drop photos
gg2 <- ggplot() +
geom_stars(data = ynp_elev_df) +
# add the corner points
geom_sf(data=yose, color="orange", pch=16, size=5)+
# site labels + points
#ggrepel::geom_label_repel(data=yose_sites, aes(x=lon, y=lat, label=site), nudge_y = c(-.02, -.02, .02), min.segment.length = .2)+
geom_point(data=yose_sites, aes(x=lon, y=lat), color="maroon", pch=16, size=3, alpha=0.9)+
coord_sf() +
theme_void() +
theme(legend.position = "bottom", legend.direction = "horizontal")+
scale_fill_viridis("Elevation (m)")
gg2 # get a warning but works
# now make it 3d!
plot_gg(gg2, multicore=TRUE,width=6,height=5,scale=250,
windowsize = c(1000, 800))
# Rayshader ---------------------------------------------------------------
# https://wcmbishop.github.io/rayshader-demo/
library(rayshader)
ynp_elev_rs <- raster_to_matrix(ynp_elev)
# flat rayshade
ynp_elev_rs %>%
sphere_shade(sunangle = 45, colorintensity = 1.1,
texture="imhof4") %>%
plot_map()
# Custom Colors -----------------------------------------------------------
# custom colors!
coltexture1 <- create_texture(
lightcolor = "gray90",
shadowcolor = "gray30",
rightcolor = "palegreen4",
leftcolor = "seashell4",
centercolor = "darkslateblue")
# replot w colors and more color intensity
ynp_elev_rs %>%
sphere_shade(sunangle = 45, colorintensity = 2,
texture = coltexture1) %>%
plot_map()
# 3D RayShade -------------------------------------------------------------
# make a 3d option
ynp_elev_rs %>%
sphere_shade(texture = "imhof2",
colorintensity = 1.1) %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
plot_3d(ynp_elev_rs, zscale = 8,
fov = 0, theta = 300,
zoom = 0.85, phi = 45,
windowsize = c(1000, 800))
Sys.sleep(0.2)
render_snapshot(clear=TRUE)
# Hi Res Option -----------------------------------------------------------
# takes forever
# ynp_elev_rs %>%
# sphere_shade(texture = "imhof2", colorintensity = 1.1) %>%
# add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# plot_3d(ynp_elev_rs, zscale = 8,
# fov = 0, theta = 300,
# zoom = 0.85, phi = 45,
# windowsize = c(1000, 800))
# Sys.sleep(0.2)
#render_highquality() # don't do this unless you have lots of time, it made my computer really unhappy
# Adding Labels -----------------------------------------------------------
ynp_elev_rs %>%
sphere_shade(texture = coltexture1) %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# tracing takes a few moments
add_shadow(ray_shade(ynp_elev_rs,
multicore = TRUE, # much faster
zscale = 3), 0.5) %>%
#add_shadow(ambient_shade(ynp_elev_rs), 0) %>%
plot_3d(ynp_elev_rs, zscale = 10, fov = 0,
theta = 300, zoom = 0.75, phi = 45,
windowsize = c(1000, 800))
Sys.sleep(0.2)
# labels
render_label(ynp_elev_rs, x = 460, y = 570, z = 20000, zscale = 50, text = "El Cap", textsize = 2,
textcolor = "orange",
linecolor = "gray15",
linewidth = 4, clear_previous = TRUE)
render_label(ynp_elev_rs, x = 1030, y = 660, z=25000,
zscale = 50, text = "Half Dome",
textcolor = "orange",
linecolor = "gray20",
dashed = FALSE,
textsize = 2, linewidth = 4,
clear_previous = FALSE)
Sys.sleep(0.2)
# to save current view as static version
render_snapshot(clear=FALSE)
Sys.sleep(0.2)
# make a movie!
render_movie(filename = "yosemite_default.mp4",
frames = 60,
title_text = "Yosemite Valley")
render_movie(filename = "yosemite_custom.mp4",
frames = 180, phi = 33,
zoom = .2,
theta = 300, fov = 30,
title_text = "Yosemite Valley")
# Weird Effects -----------------------------------------------------------
ynp_elev_rs %>%
sphere_shade(texture = "imhof2") %>%
add_water(detect_water(ynp_elev_rs), color = "imhof2") %>%
# tracing takes a few moments
add_shadow(ray_shade(ynp_elev_rs,
multicore = TRUE, # much faster
zscale = 3), 0.5) %>%
#add_shadow(ambient_shade(ynp_elev_rs), 0) %>%
plot_3d(ynp_elev_rs, zscale = 10, fov = 30,
theta = 300, zoom = 0.3, phi = 25,
windowsize = c(1000, 800))
Sys.sleep(0.2)
# labels
render_label(ynp_elev_rs, x = 460, y = 570, z = 13000, zscale = 50, text = "El Cap", textsize = 2,
textcolor = "gray15",
linecolor = "gray15",
linewidth = 4, clear_previous = TRUE)
render_label(ynp_elev_rs, x = 1030, y = 660, z=15000,
zscale = 50, text = "Half Dome",
textcolor = "gray10",
linecolor = "gray10",
dashed = FALSE,
textsize = 2, linewidth = 4,
clear_previous = FALSE)
# check field of view focus
render_depth(preview_focus = TRUE,
focus = 0.53, focallength = 100)
# on half dome
render_depth(preview_focus = FALSE, focus = 0.73,
focallength = 300, clear = FALSE)
# on el cap
render_depth(preview_focus = FALSE, focus = 0.53,
focallength = 100, clear = FALSE)
#render_snapshot(clear=TRUE)
# close out
rgl::rgl.close()
|
require(gdata)
setwd("data")
acc_2003=read.csv("Fatalities/acc_2003.csv",header=T)
acc_2004=read.csv("Fatalities/acc_2004.csv",header=T)
acc_2005=read.csv("Fatalities/acc_2005.csv",header=T)
acc_2006=read.csv("Fatalities/acc_2006.csv",header=T)
acc_2007=read.csv("Fatalities/acc_2007.csv",header=T)
acc_2008=read.csv("Fatalities/acc_2008.csv",header=T)
acc_2009=read.csv("Fatalities/acc_2009.csv",header=T)
acc_2010=read.csv("Fatalities/acc_2010.csv",header=T)
acc_2011=read.csv("Fatalities/acc_2011.csv",header=T)
acc_full=rbind(acc_2003,acc_2004,acc_2005,acc_2006,acc_2007,acc_2008,acc_2009,acc_2010,acc_2011)
acc_full=na.omit(acc_full)
acc_full=acc_full[which(acc_full$LONGITUDE<200),]
acc_full$COUNTY=sprintf("%03d", acc_full$COUNTY)
acc_full$FIPS.C.5=paste(acc_full$STATE,acc_full$COUNTY, sep="")
acc_full$FIPS.C.5=as.numeric(acc_full$FIPS.C.5)
county=read.csv("Distance/county.csv")
county=subset(county, select=c("NAME.C.32","FIPS.C.5","STATE_NAME.C.35"))
acc_full=merge(acc_full, county, by="FIPS.C.5")
acc_full=rename.vars(acc_full, from=c("STATE_NAME.C.35","NAME.C.32"), to=c("State","County"))
acc_full$County = paste(acc_full$County, "County")
acc_full$County = as.factor(acc_full$County)
write.csv(acc_full,file="acc_full.csv")
| /R_codes/acc_full.R | no_license | dtmlinh/Car-Crash-Fatalities-Exploration-Tool | R | false | false | 1,251 | r | require(gdata)
setwd("data")
acc_2003=read.csv("Fatalities/acc_2003.csv",header=T)
acc_2004=read.csv("Fatalities/acc_2004.csv",header=T)
acc_2005=read.csv("Fatalities/acc_2005.csv",header=T)
acc_2006=read.csv("Fatalities/acc_2006.csv",header=T)
acc_2007=read.csv("Fatalities/acc_2007.csv",header=T)
acc_2008=read.csv("Fatalities/acc_2008.csv",header=T)
acc_2009=read.csv("Fatalities/acc_2009.csv",header=T)
acc_2010=read.csv("Fatalities/acc_2010.csv",header=T)
acc_2011=read.csv("Fatalities/acc_2011.csv",header=T)
acc_full=rbind(acc_2003,acc_2004,acc_2005,acc_2006,acc_2007,acc_2008,acc_2009,acc_2010,acc_2011)
acc_full=na.omit(acc_full)
acc_full=acc_full[which(acc_full$LONGITUDE<200),]
acc_full$COUNTY=sprintf("%03d", acc_full$COUNTY)
acc_full$FIPS.C.5=paste(acc_full$STATE,acc_full$COUNTY, sep="")
acc_full$FIPS.C.5=as.numeric(acc_full$FIPS.C.5)
county=read.csv("Distance/county.csv")
county=subset(county, select=c("NAME.C.32","FIPS.C.5","STATE_NAME.C.35"))
acc_full=merge(acc_full, county, by="FIPS.C.5")
acc_full=rename.vars(acc_full, from=c("STATE_NAME.C.35","NAME.C.32"), to=c("State","County"))
acc_full$County = paste(acc_full$County, "County")
acc_full$County = as.factor(acc_full$County)
write.csv(acc_full,file="acc_full.csv")
|
flower=read.csv("https://storage.googleapis.com/dimensionless/Analytics/flower.csv",header=FALSE)
flowerMatrix=as.matrix(flower)
str(flowerMatrix)
flowerVector=as.vector(flowerMatrix)
class(flowerVector)
distance<-dist(flowerVector,method = "euclidean")
str(distance)
clusterIntensity<-hclust(distance,method="ward.D")
plot(clusterIntensity)
rect.hclust(clusterIntensity,3)
flowerClusters<-cutree(clusterIntensity,k=3)
str(flowerClusters)
tapply(flowerVector,flowerClusters,mean)
dim(flowerClusters)= c(50,50)
flowerClusters
class(flowerClusters)
image(flowerClusters,axes=FALSE)
image(flowerMatrix,axes=FALSE,col = grey(seq(0,1,length=256)))
# MRI Image segmentation
# Let's try this with an MRI image of the brain
healthy = read.csv("healthy.csv", header=FALSE)
healthyMatrix = as.matrix(healthy)
str(healthyMatrix)
# Hierarchial clustering
healthyVector = as.vector(healthyMatrix)
distance = dist(healthyVector, method = "euclidean")
# We have an error - why?
str(healthyVector)
# Plot image
image(healthyMatrix,axes=FALSE,col=grey(seq(0,1,length=256)))
# K means clustering
# Specify number of clusters
k = 5
# Run k-means
set.seed(1)
KMC = kmeans(healthyVector, centers = k, iter.max = 1000)
str(KMC)
healthyClusters<-KMC$cluster
# Extract clusters
healthyClusters = KMC$cluster
KMC$centers[2]
# Plot the image with the clusters
dim(healthyClusters) = c(nrow(healthyMatrix), ncol(healthyMatrix))
#Image
image(healthyClusters,axes=FALSE,col = rainbow(k))
# Scree Plots
SumWithinss = sapply(2:10, function(x) sum(kmeans(healthyVector, centers=x, iter.max=1000)$withinss))
NumClusters = seq(2,10,1)
plot(NumClusters,SumWithinss,type="b")
# Tumor
tumor<-read.csv("https://storage.googleapis.com/dimensionless/Analytics/tumor.csv",header=FALSE)
str(tumor)
tumorMatrix<-as.matrix(tumor)
tumorVector<-as.vector(tumorMatrix)
image(tumorMatrix,axes=FALSE,col=grey(seq(0,1,length=256)))
KMC.kcca = as.kcca(KMC, healthyVector)
KMC.kcca
summary(KMC.kcca)
tumorClusters<-predict(KMC.kcca,newdata=tumorVector)
summary(tumorClusters)
str(tumorClusters)
table(tumorClusters)
# Segmented Image
dim(tumorClusters)<-c(nrow(tumorMatrix),ncol(tumorMatrix))
dim(tumorMatrix)
image(tumorClusters,axes=FALSE,col=rainbow(k))
| /Machine Learning Unit 6 Image SegmentationFlower_segment.R | no_license | Deva-Vid/R-github | R | false | false | 2,319 | r | flower=read.csv("https://storage.googleapis.com/dimensionless/Analytics/flower.csv",header=FALSE)
flowerMatrix=as.matrix(flower)
str(flowerMatrix)
flowerVector=as.vector(flowerMatrix)
class(flowerVector)
distance<-dist(flowerVector,method = "euclidean")
str(distance)
clusterIntensity<-hclust(distance,method="ward.D")
plot(clusterIntensity)
rect.hclust(clusterIntensity,3)
flowerClusters<-cutree(clusterIntensity,k=3)
str(flowerClusters)
tapply(flowerVector,flowerClusters,mean)
dim(flowerClusters)= c(50,50)
flowerClusters
class(flowerClusters)
image(flowerClusters,axes=FALSE)
image(flowerMatrix,axes=FALSE,col = grey(seq(0,1,length=256)))
# MRI Image segmentation
# Let's try this with an MRI image of the brain
healthy = read.csv("healthy.csv", header=FALSE)
healthyMatrix = as.matrix(healthy)
str(healthyMatrix)
# Hierarchial clustering
healthyVector = as.vector(healthyMatrix)
distance = dist(healthyVector, method = "euclidean")
# We have an error - why?
str(healthyVector)
# Plot image
image(healthyMatrix,axes=FALSE,col=grey(seq(0,1,length=256)))
# K means clustering
# Specify number of clusters
k = 5
# Run k-means
set.seed(1)
KMC = kmeans(healthyVector, centers = k, iter.max = 1000)
str(KMC)
healthyClusters<-KMC$cluster
# Extract clusters
healthyClusters = KMC$cluster
KMC$centers[2]
# Plot the image with the clusters
dim(healthyClusters) = c(nrow(healthyMatrix), ncol(healthyMatrix))
#Image
image(healthyClusters,axes=FALSE,col = rainbow(k))
# Scree Plots
SumWithinss = sapply(2:10, function(x) sum(kmeans(healthyVector, centers=x, iter.max=1000)$withinss))
NumClusters = seq(2,10,1)
plot(NumClusters,SumWithinss,type="b")
# Tumor
tumor<-read.csv("https://storage.googleapis.com/dimensionless/Analytics/tumor.csv",header=FALSE)
str(tumor)
tumorMatrix<-as.matrix(tumor)
tumorVector<-as.vector(tumorMatrix)
image(tumorMatrix,axes=FALSE,col=grey(seq(0,1,length=256)))
KMC.kcca = as.kcca(KMC, healthyVector)
KMC.kcca
summary(KMC.kcca)
tumorClusters<-predict(KMC.kcca,newdata=tumorVector)
summary(tumorClusters)
str(tumorClusters)
table(tumorClusters)
# Segmented Image
dim(tumorClusters)<-c(nrow(tumorMatrix),ncol(tumorMatrix))
dim(tumorMatrix)
image(tumorClusters,axes=FALSE,col=rainbow(k))
|
#R PACKAGES
###scraped the html page
library(rvest)
library(xml2)
library(RCurl)
library(XML)
library(httr)
library(jsonlite)
library(tidyverse)
API_Key <- "719e7a58daf25647275ff58942226b56"
Housing <- read.csv("/Users/reinachau/Documents/Spark Project/housing.csv", stringsAsFactors=FALSE)
#Create a dataframe to store the Zmarket values
Housing_WalkScore_TransitScore <- data.frame(
index = Housing$index,
mapc_id = Housing$mapc_id,
owner_city = Housing$owner_city,
owner_state = Housing$owner_stat,
latitude = Housing$latitude,
longitude = Housing$longitude,
walkscore=NA,
walkscore_status=NA,
transitscore=NA,
transitscore_status=NA,
transit_score_description=NA,
transit_score_summary=NA
)
##################################################################################################################################################
#
#
# CALL WALK SCORE API
#
##################################################################################################################################################
listpos_1 <- which(Housing_WalkScore_TransitScore$walkscore %in% c(NA, "") & !Housing_WalkScore_TransitScore$latitude %in% c(NA, "") & !Housing_WalkScore_TransitScore$longitude %in% c(NA, "", " "))
for(k in listpos_1){
#k=900;
print(k)
latitude = Housing_WalkScore_TransitScore$latitude[k]
longitude = Housing_WalkScore_TransitScore$longitude[k]
walkscore_URL <- paste0("http://api.walkscore.com/score?format=json&lat=", latitude, "&lon=", longitude, "&wsapikey=", API_Key)
walkscore_res <- GET(
url = walkscore_URL,
accept_json()
)
#extract the url from Zillow
test_request_1 <- tryCatch({
stop_for_status(walkscore_res)
"pass"
}, error = function(e) {
"fail"
})
Housing_WalkScore_TransitScore$walkscore_status[k] <- test_request_1
if(test_request_1 == "pass"){
request_1 <- fromJSON(rawToChar(walkscore_res$content))
if(length(request_1[["walkscore"]]) > 0){
Housing_WalkScore_TransitScore$walkscore[k] <- request_1[["walkscore"]]
}
}
}
##################################################################################################################################################
#
#
# CALL TRANSIT SCORE API
#
##################################################################################################################################################
listpos_2 <- which(Housing_WalkScore_TransitScore$transitscore %in% c(NA, "") & !Housing_WalkScore_TransitScore$owner_city %in% c(NA, "") & !Housing_WalkScore_TransitScore$owner_state %in% c(NA, "") & !Housing_WalkScore_TransitScore$latitude %in% c(NA, "") & !Housing_WalkScore_TransitScore$longitude %in% c(NA, "", " "))
for(k in listpos_2){
#k=1;
latitute = Housing_WalkScore_TransitScore$latitude[k]
longitude = Housing_WalkScore_TransitScore$longitude[k]
city = gsub(" ", "%20", Housing_WalkScore_TransitScore$owner_city[k]) %>% gsub(",", "", .)
state = gsub(" ", "%20", Housing_WalkScore_TransitScore$owner_state[k])
transitscore_URL <- paste0("https://transit.walkscore.com/transit/score/?lat=", latitute, "&lon=", longitude, "&city=", city, "&state=", state, "&wsapikey=", API_Key)
transitscore_res <- GET(url=transitscore_URL, accept_json())
#extract the url from Zillow
test_request_2 <- tryCatch({
stop_for_status(transitscore_res)
"pass"
}, error = function(e) {
"fail"
})
Housing_WalkScore_TransitScore$transitscore_status[k] <- test_request_2
if(test_request_2 == "pass"){
print(k)
request_2 <- fromJSON(rawToChar(transitscore_res$content))
Housing_WalkScore_TransitScore$transitscore[k] <- request_2[["transit_score"]]
Housing_WalkScore_TransitScore$transit_score_description[k] <- request_2[["description"]]
Housing_WalkScore_TransitScore$transit_score_summary[k] <- request_2[["summary"]]
}
}
write.csv(Housing_WalkScore_TransitScore, "/Users/reinachau/Documents/Spark Project/Housing_WalkScore_TransitScore_1.csv", row.names=FALSE)
| /affordable_housing/Deliverables/Final Project Deliverable/code/housing_walk_score_API.R | no_license | HongXin123456/CS506-Fall2020-Projects | R | false | false | 4,087 | r |
#R PACKAGES
###scraped the html page
library(rvest)
library(xml2)
library(RCurl)
library(XML)
library(httr)
library(jsonlite)
library(tidyverse)
API_Key <- "719e7a58daf25647275ff58942226b56"
Housing <- read.csv("/Users/reinachau/Documents/Spark Project/housing.csv", stringsAsFactors=FALSE)
#Create a dataframe to store the Zmarket values
Housing_WalkScore_TransitScore <- data.frame(
index = Housing$index,
mapc_id = Housing$mapc_id,
owner_city = Housing$owner_city,
owner_state = Housing$owner_stat,
latitude = Housing$latitude,
longitude = Housing$longitude,
walkscore=NA,
walkscore_status=NA,
transitscore=NA,
transitscore_status=NA,
transit_score_description=NA,
transit_score_summary=NA
)
##################################################################################################################################################
#
#
# CALL WALK SCORE API
#
##################################################################################################################################################
listpos_1 <- which(Housing_WalkScore_TransitScore$walkscore %in% c(NA, "") & !Housing_WalkScore_TransitScore$latitude %in% c(NA, "") & !Housing_WalkScore_TransitScore$longitude %in% c(NA, "", " "))
for(k in listpos_1){
#k=900;
print(k)
latitude = Housing_WalkScore_TransitScore$latitude[k]
longitude = Housing_WalkScore_TransitScore$longitude[k]
walkscore_URL <- paste0("http://api.walkscore.com/score?format=json&lat=", latitude, "&lon=", longitude, "&wsapikey=", API_Key)
walkscore_res <- GET(
url = walkscore_URL,
accept_json()
)
#extract the url from Zillow
test_request_1 <- tryCatch({
stop_for_status(walkscore_res)
"pass"
}, error = function(e) {
"fail"
})
Housing_WalkScore_TransitScore$walkscore_status[k] <- test_request_1
if(test_request_1 == "pass"){
request_1 <- fromJSON(rawToChar(walkscore_res$content))
if(length(request_1[["walkscore"]]) > 0){
Housing_WalkScore_TransitScore$walkscore[k] <- request_1[["walkscore"]]
}
}
}
##################################################################################################################################################
#
#
# CALL TRANSIT SCORE API
#
##################################################################################################################################################
listpos_2 <- which(Housing_WalkScore_TransitScore$transitscore %in% c(NA, "") & !Housing_WalkScore_TransitScore$owner_city %in% c(NA, "") & !Housing_WalkScore_TransitScore$owner_state %in% c(NA, "") & !Housing_WalkScore_TransitScore$latitude %in% c(NA, "") & !Housing_WalkScore_TransitScore$longitude %in% c(NA, "", " "))
for(k in listpos_2){
#k=1;
latitute = Housing_WalkScore_TransitScore$latitude[k]
longitude = Housing_WalkScore_TransitScore$longitude[k]
city = gsub(" ", "%20", Housing_WalkScore_TransitScore$owner_city[k]) %>% gsub(",", "", .)
state = gsub(" ", "%20", Housing_WalkScore_TransitScore$owner_state[k])
transitscore_URL <- paste0("https://transit.walkscore.com/transit/score/?lat=", latitute, "&lon=", longitude, "&city=", city, "&state=", state, "&wsapikey=", API_Key)
transitscore_res <- GET(url=transitscore_URL, accept_json())
#extract the url from Zillow
test_request_2 <- tryCatch({
stop_for_status(transitscore_res)
"pass"
}, error = function(e) {
"fail"
})
Housing_WalkScore_TransitScore$transitscore_status[k] <- test_request_2
if(test_request_2 == "pass"){
print(k)
request_2 <- fromJSON(rawToChar(transitscore_res$content))
Housing_WalkScore_TransitScore$transitscore[k] <- request_2[["transit_score"]]
Housing_WalkScore_TransitScore$transit_score_description[k] <- request_2[["description"]]
Housing_WalkScore_TransitScore$transit_score_summary[k] <- request_2[["summary"]]
}
}
write.csv(Housing_WalkScore_TransitScore, "/Users/reinachau/Documents/Spark Project/Housing_WalkScore_TransitScore_1.csv", row.names=FALSE)
|
get_optimal_dose_combination = function(df, DLT_scenario, EFF_scenario){
optimal_dose_combinations_df = as.data.frame(do.call(rbind, map(df, "optimal_dose_combination")))
optimal_dose_combinations_df = optimal_dose_combinations_df %>%
dplyr::select(x,y) %>%
mutate(DLT_scenario = DLT_scenario,
EFF_scenario = EFF_scenario)
return(optimal_dose_combinations_df)
} | /get_optimal_dose_combination.R | no_license | jjimenezm1989/Phase-I-II-design-combining-two-cytotoxics | R | false | false | 399 | r | get_optimal_dose_combination = function(df, DLT_scenario, EFF_scenario){
optimal_dose_combinations_df = as.data.frame(do.call(rbind, map(df, "optimal_dose_combination")))
optimal_dose_combinations_df = optimal_dose_combinations_df %>%
dplyr::select(x,y) %>%
mutate(DLT_scenario = DLT_scenario,
EFF_scenario = EFF_scenario)
return(optimal_dose_combinations_df)
} |
\name{get.gamma.par}
\alias{get.gamma.par}
\title{Fitting parameters of a gamma distribution from two or more quantiles}
\usage{
get.gamma.par(p=c(0.025,0.5,0.975), q, show.output=TRUE,
plot=TRUE, tol=0.001,
fit.weights=rep(1,length(p)),scaleX=c(0.1,0.9),...)
}
\arguments{
\item{p}{numeric, single value or vector of
probabilities.}
\item{q}{numeric, single value or vector of quantiles
corresponding to p.}
\item{show.output}{logical, if \code{TRUE} the
\code{optim} result will be printed (default value is
\code{TRUE}).}
\item{plot}{logical, if \code{TRUE} the graphical
diagnostics will be plotted (default value is
\code{TRUE}).}
\item{tol}{numeric, single positive value giving the
absolute convergence tolerance for reaching zero (default
value is \code{0.001}).}
\item{fit.weights}{numerical vector of the same length as
a probabilities vector \code{p} containing positive
values for weighting quantiles. By default all quantiles
will be weighted by 1.}
\item{scaleX}{numerical vector of the length 2 containing
values (from the open interval (0,1)). for scaling
quantile-axis (relevant only if \code{plot=TRUE}). The
smaller the left value, the further the graph is
extrapolated within the lower percentile, the greater the
right value, the further it goes within the upper
percentile.}
\item{...}{further arguments passed to the functions
\code{plot} and \code{points} (relevant only if
\code{plot=TRUE}).}
}
\value{
Returns fitted parameters of a gamma distribution or
missing values (\code{NA}'s) if the distribution cannot
fit the specified quantiles.
}
\description{
\code{get.gamma.par} returns the parameters of a gamma
distribution where the \code{p}th percentiles match with
the quantiles \code{q}.
}
\details{
The number of probabilities, the number of quantiles and
the number of weightings must be identical and should be
at least two. Using the default \code{p}, the three
corresponding quantiles are the 2.5th percentile, the
median and the 97.5th percentile, respectively.
\code{get.gamma.par} uses the R function \code{optim}
with the method \code{L-BFGS-B}. If this method fails the
optimization method \code{BFGS} will be invoked. \cr \cr
If \code{show.output=TRUE} the output of the function
\code{optim} will be shown. The item \code{convergence}
equal to 0 means the successful completion of the
optimization procedure, otherwise it indicates a
convergence error. The item \code{value} displays the
achieved minimal value of the functions that were
minimized. \cr \cr The estimated distribution parameters
returned by the function \code{optim} are accepted if the
achieved value of the minimized function (output
component \code{value} of \code{optim}) is smaller than
the argument \code{tol}. \cr \cr The items of the
probability vector \code{p} should lie between 0 and 1.
\cr \cr The items of the weighting vector
\code{fit.weights} should be positive values. \cr \cr The
function which will be minimized is defined as a sum of
squared differences between the given probabilities and
the theoretical probabilities of the specified
distribution evaluated at the given quantile points
(least squares estimation).
}
\note{
it should be noted that there might be deviations between
the estimated and the theoretical distribution parameters
in certain circumstances. This is because the estimation
of the parameters is based on a numerical optimization
method and depends strongly on the initial values. In
addition, one must always keep in mind that a
distribution for different combinations of parameters may
look very similar. Therefore, the optimization method
cannot always find the "right" distribution, but a
"similar" one. \cr \cr If the function terminates with
the error massage "convergence error occured or specified
tolerance not achieved", one may try to set the
convergence tolerance to a higher value. It is yet to be
noted, that good till very good fits of parameters could
only be obtained for tolerance values that are smaller
than 0.001.
}
\examples{
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=10,rate=10)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,scaleX=c(0.00001,0.9999))
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=0.1,rate=0.1)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=1,rate=1)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
# example with only two quantiles
\donttest{q<-qgamma(p=c(0.025,0.975),shape=10,rate=10)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(p=c(0.025,0.975),q=q)
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(100,1))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(1,100))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(10,1))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(1,10))}
}
\author{
Matthias Greiner \email{matthias.greiner@bfr.bund.de}
(BfR), \cr Katharina Schueller
\email{schueller@stat-up.de} (\acronym{STAT-UP}
Statistical Consulting), \cr Natalia Belgorodski
\email{belgorodski@stat-up.de} (\acronym{STAT-UP}
Statistical Consulting)
}
\seealso{
See \code{pgamma} for distribution implementation
details.
}
\keyword{fitpercentiles}
| /man/get.gamma.par.Rd | no_license | l-goehring/rriskDistributions | R | false | false | 5,854 | rd | \name{get.gamma.par}
\alias{get.gamma.par}
\title{Fitting parameters of a gamma distribution from two or more quantiles}
\usage{
get.gamma.par(p=c(0.025,0.5,0.975), q, show.output=TRUE,
plot=TRUE, tol=0.001,
fit.weights=rep(1,length(p)),scaleX=c(0.1,0.9),...)
}
\arguments{
\item{p}{numeric, single value or vector of
probabilities.}
\item{q}{numeric, single value or vector of quantiles
corresponding to p.}
\item{show.output}{logical, if \code{TRUE} the
\code{optim} result will be printed (default value is
\code{TRUE}).}
\item{plot}{logical, if \code{TRUE} the graphical
diagnostics will be plotted (default value is
\code{TRUE}).}
\item{tol}{numeric, single positive value giving the
absolute convergence tolerance for reaching zero (default
value is \code{0.001}).}
\item{fit.weights}{numerical vector of the same length as
a probabilities vector \code{p} containing positive
values for weighting quantiles. By default all quantiles
will be weighted by 1.}
\item{scaleX}{numerical vector of the length 2 containing
values (from the open interval (0,1)). for scaling
quantile-axis (relevant only if \code{plot=TRUE}). The
smaller the left value, the further the graph is
extrapolated within the lower percentile, the greater the
right value, the further it goes within the upper
percentile.}
\item{...}{further arguments passed to the functions
\code{plot} and \code{points} (relevant only if
\code{plot=TRUE}).}
}
\value{
Returns fitted parameters of a gamma distribution or
missing values (\code{NA}'s) if the distribution cannot
fit the specified quantiles.
}
\description{
\code{get.gamma.par} returns the parameters of a gamma
distribution where the \code{p}th percentiles match with
the quantiles \code{q}.
}
\details{
The number of probabilities, the number of quantiles and
the number of weightings must be identical and should be
at least two. Using the default \code{p}, the three
corresponding quantiles are the 2.5th percentile, the
median and the 97.5th percentile, respectively.
\code{get.gamma.par} uses the R function \code{optim}
with the method \code{L-BFGS-B}. If this method fails the
optimization method \code{BFGS} will be invoked. \cr \cr
If \code{show.output=TRUE} the output of the function
\code{optim} will be shown. The item \code{convergence}
equal to 0 means the successful completion of the
optimization procedure, otherwise it indicates a
convergence error. The item \code{value} displays the
achieved minimal value of the functions that were
minimized. \cr \cr The estimated distribution parameters
returned by the function \code{optim} are accepted if the
achieved value of the minimized function (output
component \code{value} of \code{optim}) is smaller than
the argument \code{tol}. \cr \cr The items of the
probability vector \code{p} should lie between 0 and 1.
\cr \cr The items of the weighting vector
\code{fit.weights} should be positive values. \cr \cr The
function which will be minimized is defined as a sum of
squared differences between the given probabilities and
the theoretical probabilities of the specified
distribution evaluated at the given quantile points
(least squares estimation).
}
\note{
it should be noted that there might be deviations between
the estimated and the theoretical distribution parameters
in certain circumstances. This is because the estimation
of the parameters is based on a numerical optimization
method and depends strongly on the initial values. In
addition, one must always keep in mind that a
distribution for different combinations of parameters may
look very similar. Therefore, the optimization method
cannot always find the "right" distribution, but a
"similar" one. \cr \cr If the function terminates with
the error massage "convergence error occured or specified
tolerance not achieved", one may try to set the
convergence tolerance to a higher value. It is yet to be
noted, that good till very good fits of parameters could
only be obtained for tolerance values that are smaller
than 0.001.
}
\examples{
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=10,rate=10)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,scaleX=c(0.00001,0.9999))
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=0.1,rate=0.1)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
\donttest{q<-qgamma(p=c(0.025,0.5,0.975),shape=1,rate=1)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(q=q)
get.gamma.par(q=q,fit.weights=c(100,1,100))
get.gamma.par(q=q,fit.weights=c(10,1,10))
get.gamma.par(q=q,fit.weights=c(1,100,1))
get.gamma.par(q=q,fit.weights=c(1,10,1))}
# example with only two quantiles
\donttest{q<-qgamma(p=c(0.025,0.975),shape=10,rate=10)
X11(width=9,height=6)
par(mfrow=c(2,3))
get.gamma.par(p=c(0.025,0.975),q=q)
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(100,1))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(1,100))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(10,1))
get.gamma.par(p=c(0.025,0.975),q=q,fit.weights=c(1,10))}
}
\author{
Matthias Greiner \email{matthias.greiner@bfr.bund.de}
(BfR), \cr Katharina Schueller
\email{schueller@stat-up.de} (\acronym{STAT-UP}
Statistical Consulting), \cr Natalia Belgorodski
\email{belgorodski@stat-up.de} (\acronym{STAT-UP}
Statistical Consulting)
}
\seealso{
See \code{pgamma} for distribution implementation
details.
}
\keyword{fitpercentiles}
|
\name{MEnvelope}
\alias{MEnvelope}
\title{
Estimation of the confidence envelope of the M function under its null hypothesis
}
\description{
Simulates point patterns according to the null hypothesis and returns the envelope of \emph{M} according to the confidence level.
}
\usage{
MEnvelope(X, r = NULL, NumberOfSimulations = 100, Alpha = 0.05,
ReferenceType, NeighborType = ReferenceType,
CaseControl = FALSE, SimulationType = "RandomLocation", Global = FALSE)
}
\arguments{
\item{X}{
A point pattern (\code{\link{wmppp.object}}) or a \code{\link{Dtable}} object.
}
\item{r}{
A vector of distances. If \code{NULL}, a default value is set: 32 unequally spaced values are used up to half the maximum distance between points \eqn{d_m}. The first value is 0, first steps are small (\eqn{d_m/200}) then incresase progressively up to \eqn{d_m/20}.
}
\item{NumberOfSimulations}{
The number of simulations to run, 100 by default.
}
\item{Alpha}{
The risk level, 5\% by default.
}
\item{ReferenceType}{
One of the point types.
}
\item{NeighborType}{
One of the point types, equal to the reference type by default to caculate univariate M.
}
\item{CaseControl}{
Logical; if \code{TRUE}, the case-control version of \emph{M} is computed. \emph{ReferenceType} points are cases, \emph{NeighborType} points are controls.
}
\item{SimulationType}{
A string describing the null hypothesis to simulate. The null hypothesis may be
"\emph{RandomLocation}": points are redistributed on the actual locations (default);
"\emph{RandomLabeling}": randomizes point types, keeping locations and weights unchanged;
"\emph{PopulationIndependence}": keeps reference points unchanged, randomizes other point locations.
}
\item{Global}{
Logical; if \code{TRUE}, a global envelope sensu Duranton and Overman (2005) is calculated.
}
}
\details{
This envelope is local by default, that is to say it is computed separately at each distance. See Loosmore and Ford (2006) for a discussion.
The global envelope is calculated by iteration: the simulations reaching one of the upper or lower values at any distance are eliminated at each step. The process is repeated until \emph{Alpha / Number of simulations} simulations are dropped. The remaining upper and lower bounds at all distances constitute the global envelope. Interpolation is used if the exact ratio cannot be reached.
}
\value{
An envelope object (\code{\link{envelope}}). There are methods for print and plot for this class.
The \code{fv} contains the observed value of the function, its average simulated value and the confidence envelope.
}
\references{
Duranton, G. and Overman, H. G. (2005). Testing for Localisation Using Micro-Geographic Data. \emph{Review of Economic Studies} 72(4): 1077-1106.
Kenkel, N. C. (1988). Pattern of Self-Thinning in Jack Pine: Testing the Random Mortality Hypothesis. \emph{Ecology} 69(4): 1017-1024.
Loosmore, N. B. and Ford, E. D. (2006). Statistical inference using the G or K point pattern spatial statistics. \emph{Ecology} 87(8): 1925-1931.
Marcon, E. and F. Puech (2017). A typology of distance-based measures of spatial concentration. \emph{Regional Science and Urban Economics}. 62:56-67.
}
\author{
Eric Marcon <Eric.Marcon@ecofog.gf>
}
\seealso{
\code{\link{Mhat}}
}
\examples{
data(paracou16)
# Keep only 50\% of points to run this example
X <- as.wmppp(rthin(paracou16, 0.5))
plot(X)
# Calculate confidence envelope (should be 1000 simulations, reduced to 4 to save time)
NumberOfSimulations <- 4
Alpha <- .10
plot(MEnvelope(X, , NumberOfSimulations, Alpha,
"V. Americana", "Q. Rosea", FALSE, "RandomLabeling"))
}
| /dbmss/man/MEnvelope.Rd | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 3,801 | rd | \name{MEnvelope}
\alias{MEnvelope}
\title{
Estimation of the confidence envelope of the M function under its null hypothesis
}
\description{
Simulates point patterns according to the null hypothesis and returns the envelope of \emph{M} according to the confidence level.
}
\usage{
MEnvelope(X, r = NULL, NumberOfSimulations = 100, Alpha = 0.05,
ReferenceType, NeighborType = ReferenceType,
CaseControl = FALSE, SimulationType = "RandomLocation", Global = FALSE)
}
\arguments{
\item{X}{
A point pattern (\code{\link{wmppp.object}}) or a \code{\link{Dtable}} object.
}
\item{r}{
A vector of distances. If \code{NULL}, a default value is set: 32 unequally spaced values are used up to half the maximum distance between points \eqn{d_m}. The first value is 0, first steps are small (\eqn{d_m/200}) then incresase progressively up to \eqn{d_m/20}.
}
\item{NumberOfSimulations}{
The number of simulations to run, 100 by default.
}
\item{Alpha}{
The risk level, 5\% by default.
}
\item{ReferenceType}{
One of the point types.
}
\item{NeighborType}{
One of the point types, equal to the reference type by default to caculate univariate M.
}
\item{CaseControl}{
Logical; if \code{TRUE}, the case-control version of \emph{M} is computed. \emph{ReferenceType} points are cases, \emph{NeighborType} points are controls.
}
\item{SimulationType}{
A string describing the null hypothesis to simulate. The null hypothesis may be
"\emph{RandomLocation}": points are redistributed on the actual locations (default);
"\emph{RandomLabeling}": randomizes point types, keeping locations and weights unchanged;
"\emph{PopulationIndependence}": keeps reference points unchanged, randomizes other point locations.
}
\item{Global}{
Logical; if \code{TRUE}, a global envelope sensu Duranton and Overman (2005) is calculated.
}
}
\details{
This envelope is local by default, that is to say it is computed separately at each distance. See Loosmore and Ford (2006) for a discussion.
The global envelope is calculated by iteration: the simulations reaching one of the upper or lower values at any distance are eliminated at each step. The process is repeated until \emph{Alpha / Number of simulations} simulations are dropped. The remaining upper and lower bounds at all distances constitute the global envelope. Interpolation is used if the exact ratio cannot be reached.
}
\value{
An envelope object (\code{\link{envelope}}). There are methods for print and plot for this class.
The \code{fv} contains the observed value of the function, its average simulated value and the confidence envelope.
}
\references{
Duranton, G. and Overman, H. G. (2005). Testing for Localisation Using Micro-Geographic Data. \emph{Review of Economic Studies} 72(4): 1077-1106.
Kenkel, N. C. (1988). Pattern of Self-Thinning in Jack Pine: Testing the Random Mortality Hypothesis. \emph{Ecology} 69(4): 1017-1024.
Loosmore, N. B. and Ford, E. D. (2006). Statistical inference using the G or K point pattern spatial statistics. \emph{Ecology} 87(8): 1925-1931.
Marcon, E. and F. Puech (2017). A typology of distance-based measures of spatial concentration. \emph{Regional Science and Urban Economics}. 62:56-67.
}
\author{
Eric Marcon <Eric.Marcon@ecofog.gf>
}
\seealso{
\code{\link{Mhat}}
}
\examples{
data(paracou16)
# Keep only 50\% of points to run this example
X <- as.wmppp(rthin(paracou16, 0.5))
plot(X)
# Calculate confidence envelope (should be 1000 simulations, reduced to 4 to save time)
NumberOfSimulations <- 4
Alpha <- .10
plot(MEnvelope(X, , NumberOfSimulations, Alpha,
"V. Americana", "Q. Rosea", FALSE, "RandomLabeling"))
}
|
#script is combining the data from stage one and SAP
#this currently has muliple records for each employee
#another scritp will be produced for the response team to use for those who did not respond
library(tidyverse)
stage_one <- read_csv("data/stage_one_sms.csv")
sap_data <- read_csv("data/SAP_CONTACTINFO.CSV")
stage_two <- read_csv("data/stage_two_phone_calls.csv")
#select only useful information from these data sources
stage_one_cut <- stage_one %>%
select("AdditionalTeamName","FirstName", "LastName",
"Last Updated Time", "Created Time", "Message Label", "Message Subject" ,
"Message Sent Time", "Response Channel" ,"SMS Sent Time",
"SMS Received Time" ,"SMS Undeliverable Time",
"Voice Sent Time", "Voice Received Time" ,"Voice Acknowledged Time",
"Voice Undeliverable Time" ,"Web Sent Time", "Response")
#incorporate SAP data to joined data set with contact details
combined_data <- full_join(sap_data,stage_one_cut, by = c ("Ident" = "AdditionalTeamName"))
remove_duplicate <- combined_data[!duplicated(combined_data$Ident),]
missing_from_whisper <- filter(remove_duplicate, is.na(FirstName))
missing_from_SAP <- filter(remove_duplicate, is.na(Surname))
#TRYING TO FIX THE FILTER FOR THE DATA WE PROVIDE TO HR REPSONSE TEAM
#********************************
filter_response_stage_2 <- filter(stage_two, !is.na(Response)) %>%
mutate(responded = "stage 2")
filter_response_stage_1 <- filter(stage_one, !is.na(Response)) %>%
mutate(responded = "stage 1") %>%
mutate(Response = as.numeric(Response))
people_who_responded <- bind_rows(filter_response_stage_1,filter_response_stage_2) %>%
mutate(Response_recieved = "yes") %>%
select("AdditionalTeamName","responded","Response_recieved")
remove_dup_test <- people_who_responded[!duplicated(people_who_responded$AdditionalTeamName),]
test_join <- left_join(sap_data,remove_dup_test, by = c ("Ident" = "AdditionalTeamName")) %>%
filter(is.na(Response_recieved))
results_from_whispir_both_stages <- left_join(stage_one_cut, remove_dup_test, by = c ("AdditionalTeamName"))
hr_response_team_file <- left_join(sap_data, results_from_whispir_both_stages, by = c ("Ident" = "AdditionalTeamName")) %>%
filter(is.na(Response_recieved))
remove_duplicates_no_response <- cleaned[!duplicated(cleaned$AdditionalTeamName),]
data_for_HR <- remove_duplicates_no_response %>%
rename(EmployeeIdent = AdditionalTeamName) %>%
mutate(Row_ID = 1:n()) %>%
select(Row_ID, everything()) %>%
mutate(comments = " ")
sort_hr_data <- data_for_HR [c(1,2,12:43,3:11)]
write_csv(sort_hr_data, path = "processed_data/Final_Data_for_response_Team.csv")
write_csv(remove_duplicate, path = "processed_data/Final_data_output_all_data.csv")
write_csv(missing_from_whisper, path = "processed_data/Final_data_missing_from_whispir.csv")
write_csv(missing_from_SAP, path = "processed_data/Final_data_missing_from_sap.csv")
#******************************************
#playing with Summary generation
whisper_stage_One_Summary <-
| /Scripts/old_FINAL_SCRIPT_4_OUTPUTS_REPORTS.R | no_license | elisebehn/csiro_whipsir_alert | R | false | false | 3,061 | r | #script is combining the data from stage one and SAP
#this currently has muliple records for each employee
#another scritp will be produced for the response team to use for those who did not respond
library(tidyverse)
stage_one <- read_csv("data/stage_one_sms.csv")
sap_data <- read_csv("data/SAP_CONTACTINFO.CSV")
stage_two <- read_csv("data/stage_two_phone_calls.csv")
#select only useful information from these data sources
stage_one_cut <- stage_one %>%
select("AdditionalTeamName","FirstName", "LastName",
"Last Updated Time", "Created Time", "Message Label", "Message Subject" ,
"Message Sent Time", "Response Channel" ,"SMS Sent Time",
"SMS Received Time" ,"SMS Undeliverable Time",
"Voice Sent Time", "Voice Received Time" ,"Voice Acknowledged Time",
"Voice Undeliverable Time" ,"Web Sent Time", "Response")
#incorporate SAP data to joined data set with contact details
combined_data <- full_join(sap_data,stage_one_cut, by = c ("Ident" = "AdditionalTeamName"))
remove_duplicate <- combined_data[!duplicated(combined_data$Ident),]
missing_from_whisper <- filter(remove_duplicate, is.na(FirstName))
missing_from_SAP <- filter(remove_duplicate, is.na(Surname))
#TRYING TO FIX THE FILTER FOR THE DATA WE PROVIDE TO HR REPSONSE TEAM
#********************************
filter_response_stage_2 <- filter(stage_two, !is.na(Response)) %>%
mutate(responded = "stage 2")
filter_response_stage_1 <- filter(stage_one, !is.na(Response)) %>%
mutate(responded = "stage 1") %>%
mutate(Response = as.numeric(Response))
people_who_responded <- bind_rows(filter_response_stage_1,filter_response_stage_2) %>%
mutate(Response_recieved = "yes") %>%
select("AdditionalTeamName","responded","Response_recieved")
remove_dup_test <- people_who_responded[!duplicated(people_who_responded$AdditionalTeamName),]
test_join <- left_join(sap_data,remove_dup_test, by = c ("Ident" = "AdditionalTeamName")) %>%
filter(is.na(Response_recieved))
results_from_whispir_both_stages <- left_join(stage_one_cut, remove_dup_test, by = c ("AdditionalTeamName"))
hr_response_team_file <- left_join(sap_data, results_from_whispir_both_stages, by = c ("Ident" = "AdditionalTeamName")) %>%
filter(is.na(Response_recieved))
remove_duplicates_no_response <- cleaned[!duplicated(cleaned$AdditionalTeamName),]
data_for_HR <- remove_duplicates_no_response %>%
rename(EmployeeIdent = AdditionalTeamName) %>%
mutate(Row_ID = 1:n()) %>%
select(Row_ID, everything()) %>%
mutate(comments = " ")
sort_hr_data <- data_for_HR [c(1,2,12:43,3:11)]
write_csv(sort_hr_data, path = "processed_data/Final_Data_for_response_Team.csv")
write_csv(remove_duplicate, path = "processed_data/Final_data_output_all_data.csv")
write_csv(missing_from_whisper, path = "processed_data/Final_data_missing_from_whispir.csv")
write_csv(missing_from_SAP, path = "processed_data/Final_data_missing_from_sap.csv")
#******************************************
#playing with Summary generation
whisper_stage_One_Summary <-
|
# #####################################################################################
# R package stochvol by
# Gregor Kastner Copyright (C) 2013-2018
# Gregor Kastner and Darjus Hosszejni Copyright (C) 2019-
#
# This file is part of the R package stochvol: Efficient Bayesian
# Inference for Stochastic Volatility Models.
#
# The R package stochvol is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2 or
# any later version of the License.
#
# The R package stochvol is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the R package stochvol. If that is not the case, please
# refer to <http://www.gnu.org/licenses/>.
# #####################################################################################
.onAttach <- function(lib, pkg) {
if(interactive() || getOption("verbose")){
packageStartupMessage(sprintf("Package %s %s attached. To cite, see citation(\"%s\").", pkg, utils::packageDescription(pkg)$Version, pkg))
}
}
.onUnload <- function (libpath) {
library.dynam.unload("stochvol", libpath)
}
| /R/zzz.R | no_license | gregorkastner/stochvol | R | false | false | 1,428 | r | # #####################################################################################
# R package stochvol by
# Gregor Kastner Copyright (C) 2013-2018
# Gregor Kastner and Darjus Hosszejni Copyright (C) 2019-
#
# This file is part of the R package stochvol: Efficient Bayesian
# Inference for Stochastic Volatility Models.
#
# The R package stochvol is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 2 or
# any later version of the License.
#
# The R package stochvol is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty
# of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the R package stochvol. If that is not the case, please
# refer to <http://www.gnu.org/licenses/>.
# #####################################################################################
.onAttach <- function(lib, pkg) {
if(interactive() || getOption("verbose")){
packageStartupMessage(sprintf("Package %s %s attached. To cite, see citation(\"%s\").", pkg, utils::packageDescription(pkg)$Version, pkg))
}
}
.onUnload <- function (libpath) {
library.dynam.unload("stochvol", libpath)
}
|
#PAGE=334
t=200
x1=75
s1=10
x2=24
s2=5
x3=15
s3=3
x4=36
s4=6
r12=0.9
r13=0.75
r14=0.8
r23=0.7
r24=0.7
r34=0.85
n1=t*s2**2
n2=t*s3**2
n3=t*s4**2
n4=t*s2*s1*r12
n5=t*s1*s3*r13
n6=t*s1*s4*r14
n7=t*s1*s3*r23
n7=n7/2
n8=t*s2*s4*r24
n9=t*s4*s3*r34
y <- matrix(c(n1,n7,n8,n7,n2,n9,n8,n9,n3),ncol=3,nrow=3)
y
b <- matrix(c(n4,n5,n6),nrow=3,ncol=1)
e=solve(y,b)
e1=e[1]
e1=round(e1,digits = 4)
e2=e[2]
e2=round(e2,digits = 2)
e3=e[3]
e3=round(e3,digits = 4)
c=x1-x2*e1-e3*x4
c=round(c,digits = 0)
cat('X1 =',c,'+',e1,'X2',e3,'X4')
| /Schaum'S_Outline_Series_-_Theory_And_Problems_Of_Statistics_by_Murray_R._Spiegel/CH15/EX15.15.20/Ex15_15_20.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 575 | r | #PAGE=334
t=200
x1=75
s1=10
x2=24
s2=5
x3=15
s3=3
x4=36
s4=6
r12=0.9
r13=0.75
r14=0.8
r23=0.7
r24=0.7
r34=0.85
n1=t*s2**2
n2=t*s3**2
n3=t*s4**2
n4=t*s2*s1*r12
n5=t*s1*s3*r13
n6=t*s1*s4*r14
n7=t*s1*s3*r23
n7=n7/2
n8=t*s2*s4*r24
n9=t*s4*s3*r34
y <- matrix(c(n1,n7,n8,n7,n2,n9,n8,n9,n3),ncol=3,nrow=3)
y
b <- matrix(c(n4,n5,n6),nrow=3,ncol=1)
e=solve(y,b)
e1=e[1]
e1=round(e1,digits = 4)
e2=e[2]
e2=round(e2,digits = 2)
e3=e[3]
e3=round(e3,digits = 4)
c=x1-x2*e1-e3*x4
c=round(c,digits = 0)
cat('X1 =',c,'+',e1,'X2',e3,'X4')
|
testlist <- list(n = 177930240L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) | /breakfast/inst/testfiles/setBitNumber/libFuzzer_setBitNumber/setBitNumber_valgrind_files/1609961646-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 97 | r | testlist <- list(n = 177930240L)
result <- do.call(breakfast:::setBitNumber,testlist)
str(result) |
print("Hello World")
install.packages("httr")
install.packages("jsnolite")
library(httr)
library(jsonlite)
installed.packages()
endpoint <-"api.openweathermap.org/data/2.5/weather?q=Lublin&units=metric&appid=ccd2c7f8b414cadf0c4383ce0a541dc2"
getWeather <- GET(endpoint)
wetherText<-content(getWeather, "text"); wetherText
weatherJson<-fromJSON(wetherText, flatten = TRUE)
weatheerDF<-as.data.frame(weatherJson)
View(weatheerDF)
x<-"to jest zmienna"
x <-123
?vector
mojVector <- vector("numeric", 10)
mojVector
mojVector<- c(1,2,3,4, FALSE)
mojVector
mojVector<- as.logical(mojVector); mojVector
mojVector2 <- c(-1, 0, 2, 3)
mojVector2 <-as.logical(mojVector2); mojVector2
a<-c(1,2,3,4,5)
b <-c(6,7)
c <- a+b
print(c)
plec<-c("mezczyzna","kobieta","mezczyzna","kobieta","kobieta")
plec<-factor(c("mezczyzna","kobieta","mezczyzna","kobieta","kobieta", levels = c("mezczyzna", "kobieta"))); plec
class(plec)
plecf <- as.factor(plec)
unclass(plec)
plecf[3:5]<-NA
is.na(plecf)
plecf[is.na(plecf)]
complete.cases(plecf)
df <- data.frame(index = c(1,2,3), imie =c("jan", "marek", "Sonia"), plec = factor(c("mezczyzna", "mezczyzna", "kobieta")))
df
getwd()
### wczytywanie danych
dfn <-read.csv("dane.csv",sep = ";"); View(dfn)
dfn2<-read.csv2("dane.csv"); View(dfn)
dfn2$waga
len <-length(dfn2$wzrost)
for(i in 1: len){
print(dfn$wzrost[i])
print(dfn2$waga[i]/(dfn2$wzrost[i]/100)^2)
}
i<-1
while(i<=len){
dfn2$bmi[i]<-( dfn2$waga[i] / ( dfn2$wzrost[i]/100)^2 )
i<-i+1
}
dfn2$bmi2<-(dfn2$waga/(dfn2$wzrost/100)^2)
hello <-function(x = "hello"){
print(paste(x, "RSTUDIO", sep = " "))
}
hello(c("Witam", "pPrivaet"))
liczBMI <-function(masa, wzrost){
k<-match(0, masa)
if(!is.na(k)){
bmi<-"dzielenie przez zero"
}else{ bmi<-masa/(wzrost/100)^2}
bmi
}
liczBMI(masa = c(100,200), wzrost = c(200, 200))
liczBMI2<- function( ){
komunikat<- "podaj mase i wzrost oddzielone przecinkiem:"
wektorOdp<- as.numeric( strsplit( readline(komunikat),",")[[1]] )
bmi<- wektorOdp[1] / ( wektorOdp[2]/100)^2
bmi
}
liczBMI2() | /rpierwszy.R | no_license | Viaceslav/pjwstk | R | false | false | 2,073 | r | print("Hello World")
install.packages("httr")
install.packages("jsnolite")
library(httr)
library(jsonlite)
installed.packages()
endpoint <-"api.openweathermap.org/data/2.5/weather?q=Lublin&units=metric&appid=ccd2c7f8b414cadf0c4383ce0a541dc2"
getWeather <- GET(endpoint)
wetherText<-content(getWeather, "text"); wetherText
weatherJson<-fromJSON(wetherText, flatten = TRUE)
weatheerDF<-as.data.frame(weatherJson)
View(weatheerDF)
x<-"to jest zmienna"
x <-123
?vector
mojVector <- vector("numeric", 10)
mojVector
mojVector<- c(1,2,3,4, FALSE)
mojVector
mojVector<- as.logical(mojVector); mojVector
mojVector2 <- c(-1, 0, 2, 3)
mojVector2 <-as.logical(mojVector2); mojVector2
a<-c(1,2,3,4,5)
b <-c(6,7)
c <- a+b
print(c)
plec<-c("mezczyzna","kobieta","mezczyzna","kobieta","kobieta")
plec<-factor(c("mezczyzna","kobieta","mezczyzna","kobieta","kobieta", levels = c("mezczyzna", "kobieta"))); plec
class(plec)
plecf <- as.factor(plec)
unclass(plec)
plecf[3:5]<-NA
is.na(plecf)
plecf[is.na(plecf)]
complete.cases(plecf)
df <- data.frame(index = c(1,2,3), imie =c("jan", "marek", "Sonia"), plec = factor(c("mezczyzna", "mezczyzna", "kobieta")))
df
getwd()
### wczytywanie danych
dfn <-read.csv("dane.csv",sep = ";"); View(dfn)
dfn2<-read.csv2("dane.csv"); View(dfn)
dfn2$waga
len <-length(dfn2$wzrost)
for(i in 1: len){
print(dfn$wzrost[i])
print(dfn2$waga[i]/(dfn2$wzrost[i]/100)^2)
}
i<-1
while(i<=len){
dfn2$bmi[i]<-( dfn2$waga[i] / ( dfn2$wzrost[i]/100)^2 )
i<-i+1
}
dfn2$bmi2<-(dfn2$waga/(dfn2$wzrost/100)^2)
hello <-function(x = "hello"){
print(paste(x, "RSTUDIO", sep = " "))
}
hello(c("Witam", "pPrivaet"))
liczBMI <-function(masa, wzrost){
k<-match(0, masa)
if(!is.na(k)){
bmi<-"dzielenie przez zero"
}else{ bmi<-masa/(wzrost/100)^2}
bmi
}
liczBMI(masa = c(100,200), wzrost = c(200, 200))
liczBMI2<- function( ){
komunikat<- "podaj mase i wzrost oddzielone przecinkiem:"
wektorOdp<- as.numeric( strsplit( readline(komunikat),",")[[1]] )
bmi<- wektorOdp[1] / ( wektorOdp[2]/100)^2
bmi
}
liczBMI2() |
# Tornado Plot Zoomed IN
g1 <- ggplot(data = df, aes(x = Mid , y = Order))
a <- subset(df, df$Alternative == "RWH" & df$Order >13)
b <- subset(df, df$Alternative == "Pond" & df$Order >13)
c <- subset(df, df$Alternative == "PSF" & df$Order >13)
d <- subset(df, df$Alternative == "MAR" & df$Order >13)
e <- subset(df, df$Alternative == "TW" & df$Order >13)
#======================================================================================
g2 <- g1 +
geom_errorbarh(data = a, aes(xmax = High, xmin = Low), color = a$Color, size = 3, height = 0.0)+
geom_line(data = a, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="RWH", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = a$Order, labels = a$Criteria)+
#theme(axis.text.y=element_text(colour = a$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g3 <- g1 +
geom_errorbarh(data = b, aes(xmax = High, xmin = Low), color = b$Color, size = 3, height = 0.0)+
geom_line(data = b, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="Pond", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = b$Order, labels = b$Criteria)+
# theme(axis.text.y=element_text(colour = b$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g4 <- g1 +
geom_errorbarh(data = c, aes(xmax = High, xmin = Low), color = c$Color, size = 3, height = 0.0)+
geom_line(data = c, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="PSF", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = c$Order, labels = c$Criteria)+
# theme(axis.text.y=element_text(colour = c$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g5 <- g1 +
geom_errorbarh(data = d, aes(xmax = High, xmin = Low), colour = d$Color, size = 3, height = 0.0)+
geom_line(data = d, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="MAR", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = d$Order, labels = d$Criteria)+
# theme(axis.text.y=element_text(colour = d$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g6 <- g1 +
geom_errorbarh(data = e, aes(xmax = High, xmin = Low), color = e$Color, size = 3, height = 0.0)+
geom_line(data = e, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="TW", x = "Ranking", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color ="black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = e$Order, labels = e$Criteria)+
# theme(axis.text.y=element_text(colour = e$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
grid.arrange(g2, g3, g4, g5, g6, nrow = 5, ncol = 1)
| /TornadoPlotZoom.R | no_license | chelspeters7/BanglaMCDA | R | false | false | 5,148 | r | # Tornado Plot Zoomed IN
g1 <- ggplot(data = df, aes(x = Mid , y = Order))
a <- subset(df, df$Alternative == "RWH" & df$Order >13)
b <- subset(df, df$Alternative == "Pond" & df$Order >13)
c <- subset(df, df$Alternative == "PSF" & df$Order >13)
d <- subset(df, df$Alternative == "MAR" & df$Order >13)
e <- subset(df, df$Alternative == "TW" & df$Order >13)
#======================================================================================
g2 <- g1 +
geom_errorbarh(data = a, aes(xmax = High, xmin = Low), color = a$Color, size = 3, height = 0.0)+
geom_line(data = a, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="RWH", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = a$Order, labels = a$Criteria)+
#theme(axis.text.y=element_text(colour = a$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g3 <- g1 +
geom_errorbarh(data = b, aes(xmax = High, xmin = Low), color = b$Color, size = 3, height = 0.0)+
geom_line(data = b, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="Pond", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = b$Order, labels = b$Criteria)+
# theme(axis.text.y=element_text(colour = b$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g4 <- g1 +
geom_errorbarh(data = c, aes(xmax = High, xmin = Low), color = c$Color, size = 3, height = 0.0)+
geom_line(data = c, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="PSF", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = c$Order, labels = c$Criteria)+
# theme(axis.text.y=element_text(colour = c$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g5 <- g1 +
geom_errorbarh(data = d, aes(xmax = High, xmin = Low), colour = d$Color, size = 3, height = 0.0)+
geom_line(data = d, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="MAR", x = "", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color = "black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = d$Order, labels = d$Criteria)+
# theme(axis.text.y=element_text(colour = d$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
g6 <- g1 +
geom_errorbarh(data = e, aes(xmax = High, xmin = Low), color = e$Color, size = 3, height = 0.0)+
geom_line(data = e, aes(y = Order), color = "grey", size = 1.2) +#fill = e$Color, pch=21, size = 3) +
labs(title ="TW", x = "Ranking", y = "") +
theme_classic() +
theme_bw() +
theme(axis.text = element_text(color ="black", size = 10)) +
theme(text = element_text(size = 10, family = "Garamond", color = "black")) +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
scale_y_continuous(limits = c(13, 19), name = "", breaks = e$Order, labels = e$Criteria)+
# theme(axis.text.y=element_text(colour = e$Color))+
scale_x_continuous(limits= c(-1.0, 1.0), breaks = c(-0.8, 0.8), labels = c("Low", "High"))+
theme(plot.title = element_text(margin = margin(b = -12)))+
guides(colour=FALSE)
#======================================================================================
grid.arrange(g2, g3, g4, g5, g6, nrow = 5, ncol = 1)
|
gaussian.CARlinear <- function(formula, data=NULL, W, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
Y.DA <- Y
#### Check on the rho arguments
if(is.null(rho.int))
{
rho <- runif(1)
fix.rho.int <- FALSE
}else
{
rho <- rho.int
fix.rho.int <- TRUE
}
if(!is.numeric(rho)) stop("rho.int is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.slo))
{
lambda <- runif(1)
fix.rho.slo <- FALSE
}else
{
lambda <- rho.slo
fix.rho.slo <- TRUE
}
if(!is.numeric(lambda)) stop("rho.slo is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
if(is.null(prior.mean.alpha)) prior.mean.alpha <- rep(0, 1)
if(is.null(prior.var.alpha)) prior.var.alpha <- rep(100000, 1)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
if(length(prior.mean.alpha)!=1) stop("the prior mean for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.alpha)) stop("the prior mean for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.alpha))!=0) stop("the prior mean for alpha has missing values.", call.=FALSE)
if(length(prior.var.alpha)!=1) stop("the prior variance for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.alpha)) stop("the prior variance for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.alpha))!=0) stop("the prior variance for alpha has missing values.", call.=FALSE)
if(min(prior.var.alpha) <=0) stop("the prior variance for alpha has elements less than zero", call.=FALSE)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
time <-(1:N - mean(1:N))/N
time.all <- kronecker(time, rep(1,K))
mod.glm <- glm(Y~X.standardised-1 + time.all, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
temp <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
beta <- temp[1:p]
alpha <- temp[(p+1)]
res.temp <- Y - as.numeric(X.standardised %*% beta) - time.all * alpha - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=K, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
nu2 <- runif(1, 0, res.sd)
#### Specify matrix quantities
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", delta)
alpha.offset1 <- sum(time.mat^2)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.alpha <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, K))
if(!fix.rho.int) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.slo) samples.lambda <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.int", "tau2.slo")
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept.all <- rep(0,4)
accept <- accept.all
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
nu2.shape <- prior.nu2[1] + N*K/2
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + K/2
##############################
#### Specify spatial quantites
##############################
#### Create the determinant
if(!fix.rho.int | !fix.rho.slo)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
}else
{}
if(!fix.rho.int) det.Q.rho <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
if(!fix.rho.slo) det.Q.lambda <- 0.5 * sum(log((lambda * Wstar.val + (1-lambda))))
#### Check for islands
W.list<- mat2listw(W)
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat - alpha * time.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat - delta.time.mat - alpha * time.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from alpha
####################
fc.var <- 1 / (1 / prior.var.alpha + alpha.offset1 / nu2)
alpha.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat) * time.mat
alpha.offset2 <- sum(alpha.offset, na.rm=TRUE) / nu2
fc.mean <- fc.var * (alpha.offset2 + prior.mean.alpha / prior.var.alpha)
alpha <- rnorm(n=1, mean=fc.mean, sd=sqrt(fc.var))
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat - delta.time.mat - alpha * time.mat
phi.offset2 <- apply(phi.offset,1, sum, na.rm=TRUE)
temp1 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, nu2, phi.offset2, rho, N)
phi <- temp1
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
####################
## Sample from delta
####################
delta.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - alpha * time.mat) * time.mat
delta.offset2 <- apply(delta.offset,1, sum, na.rm=TRUE)
temp2 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, delta, tau2.delta, nu2, delta.offset2, lambda, sum(time^2))
delta <- temp2
if(lambda <1)
{
delta <- delta - mean(delta)
}else
{
delta[which(islands==1)] <- delta[which(islands==1)] - mean(delta[which(islands==1)])
}
delta.time.mat <- apply(time.mat, 2, "*", delta)
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.int)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.rho - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.rho <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.slo)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Wstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.lambda - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.lambda <- det.Q.proposal
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
samples.alpha[ele, ] <- alpha
if(!fix.rho.int) samples.rho[ele, ] <- rho
if(!fix.rho.slo) samples.lambda[ele, ] <- lambda
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
k <- j/100
if(ceiling(k)==floor(k))
{
if(!fix.rho.int) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.slo) proposal.sd.lambda <- common.accceptrates2(accept[3:4], proposal.sd.lambda, 40, 50, 0.5)
accept.all <- accept.all + accept
accept <- rep(0,4)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
if(!fix.rho.int)
{
accept.rho <- 100 * accept.all[1] / accept.all[2]
}else
{
accept.rho <- NA
}
if(!fix.rho.slo)
{
accept.lambda <- 100 * accept.all[3] / accept.all[4]
}else
{
accept.lambda <- NA
}
accept.final <- c(rep(100,4), accept.rho, accept.lambda)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
#### Compute the fitted deviance
mean.phi <- apply(samples.phi, 2, mean)
mean.delta <- apply(samples.delta, 2, mean)
mean.alpha <- mean(samples.alpha)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.beta <- apply(samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
LP <- offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat
fitted.mean <- as.numeric(LP)
nu2.mean <- mean(samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(100,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(6, 7))
summary.hyper[1,1:3] <- quantile(samples.alpha, c(0.5, 0.025, 0.975))
summary.hyper[2,1:3] <- quantile(samples.tau2[ ,1], c(0.5, 0.025, 0.975))
summary.hyper[3,1:3] <- quantile(samples.tau2[ ,2], c(0.5, 0.025, 0.975))
summary.hyper[4,1:3] <- quantile(samples.nu2, c(0.5, 0.025, 0.975))
rownames(summary.hyper) <- c("alpha", "tau2.int", "tau2.slo", "nu2", "rho.int", "rho.slo")
summary.hyper[1, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.alpha)), geweke.diag(mcmc(samples.alpha))$z)
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2[ ,1])), geweke.diag(mcmc(samples.tau2[ ,1]))$z)
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2[ ,2])), geweke.diag(mcmc(samples.tau2[ ,2]))$z)
summary.hyper[4, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.nu2)), geweke.diag(mcmc(samples.nu2))$z)
if(!fix.rho.int)
{
summary.hyper[5, 1:3] <- quantile(samples.rho, c(0.5, 0.025, 0.975))
summary.hyper[5, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples.rho), geweke.diag(samples.rho)$z)
}else
{
summary.hyper[5, 1:3] <- c(rho, rho, rho)
summary.hyper[5, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
summary.hyper[6, 1:3] <- quantile(samples.lambda, c(0.5, 0.025, 0.975))
summary.hyper[6, 4:7] <- c(n.keep, accept.lambda, effectiveSize(samples.lambda), geweke.diag(samples.lambda)$z)
}else
{
summary.hyper[6, 1:3] <- c(lambda, lambda, lambda)
summary.hyper[6, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
#### Compile and return the results
#### Harmonise samples in case of them not being generated
if(fix.rho.int & fix.rho.slo)
{
samples.rhoext <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
samples.rhoext <- samples.lambda
names(samples.rhoext) <- "rho.slo"
}else if(!fix.rho.int & fix.rho.slo)
{
samples.rhoext <- samples.rho
names(samples.rhoext) <- "rho.int"
}else
{
samples.rhoext <- cbind(samples.rho, samples.lambda)
colnames(samples.rhoext) <- c("rho.int", "rho.slo")
}
if(n.miss==0) samples.Y = NA
samples <- list(beta=mcmc(samples.beta.orig), alpha=mcmc(samples.alpha), phi=mcmc(samples.phi), delta=mcmc(samples.delta), tau2=mcmc(samples.tau2), nu2=mcmc(samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(samples.fitted), Y=mcmc(samples.Y))
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - Spatially autocorrelated linear time trends\n")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
| /CARBayesST/R/gaussian.CARlinear.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 20,117 | r | gaussian.CARlinear <- function(formula, data=NULL, W, burnin, n.sample, thin=1, prior.mean.beta=NULL, prior.var.beta=NULL, prior.mean.alpha=NULL, prior.var.alpha=NULL, prior.nu2=NULL, prior.tau2=NULL, rho.slo=NULL, rho.int=NULL, verbose=TRUE)
{
##############################################
#### Format the arguments and check for errors
##############################################
#### Verbose
a <- common.verbose(verbose)
#### Frame object
frame.results <- common.frame(formula, data, "gaussian")
N.all <- frame.results$n
p <- frame.results$p
X <- frame.results$X
X.standardised <- frame.results$X.standardised
X.sd <- frame.results$X.sd
X.mean <- frame.results$X.mean
X.indicator <- frame.results$X.indicator
offset <- frame.results$offset
Y <- frame.results$Y
which.miss <- frame.results$which.miss
n.miss <- frame.results$n.miss
Y.DA <- Y
#### Check on the rho arguments
if(is.null(rho.int))
{
rho <- runif(1)
fix.rho.int <- FALSE
}else
{
rho <- rho.int
fix.rho.int <- TRUE
}
if(!is.numeric(rho)) stop("rho.int is fixed but is not numeric.", call.=FALSE)
if(rho<0 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(rho>1 ) stop("rho.int is outside the range [0, 1].", call.=FALSE)
if(is.null(rho.slo))
{
lambda <- runif(1)
fix.rho.slo <- FALSE
}else
{
lambda <- rho.slo
fix.rho.slo <- TRUE
}
if(!is.numeric(lambda)) stop("rho.slo is fixed but is not numeric.", call.=FALSE)
if(lambda<0 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
if(lambda>1 ) stop("rho.slo is outside the range [0, 1].", call.=FALSE)
#### CAR quantities
W.quants <- common.Wcheckformat.leroux(W)
K <- W.quants$n
N <- N.all / K
W <- W.quants$W
W.triplet <- W.quants$W.triplet
W.n.triplet <- W.quants$n.triplet
W.triplet.sum <- W.quants$W.triplet.sum
n.neighbours <- W.quants$n.neighbours
W.begfin <- W.quants$W.begfin
#### Priors
if(is.null(prior.mean.beta)) prior.mean.beta <- rep(0, p)
if(is.null(prior.var.beta)) prior.var.beta <- rep(100000, p)
if(is.null(prior.tau2)) prior.tau2 <- c(1, 0.01)
if(is.null(prior.nu2)) prior.nu2 <- c(1, 0.01)
if(is.null(prior.mean.alpha)) prior.mean.alpha <- rep(0, 1)
if(is.null(prior.var.alpha)) prior.var.alpha <- rep(100000, 1)
prior.beta.check(prior.mean.beta, prior.var.beta, p)
prior.var.check(prior.tau2)
prior.var.check(prior.nu2)
if(length(prior.mean.alpha)!=1) stop("the prior mean for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.mean.alpha)) stop("the prior mean for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.mean.alpha))!=0) stop("the prior mean for alpha has missing values.", call.=FALSE)
if(length(prior.var.alpha)!=1) stop("the prior variance for alpha is the wrong length.", call.=FALSE)
if(!is.numeric(prior.var.alpha)) stop("the prior variance for alpha is not numeric.", call.=FALSE)
if(sum(is.na(prior.var.alpha))!=0) stop("the prior variance for alpha has missing values.", call.=FALSE)
if(min(prior.var.alpha) <=0) stop("the prior variance for alpha has elements less than zero", call.=FALSE)
#### MCMC quantities - burnin, n.sample, thin
common.burnin.nsample.thin.check(burnin, n.sample, thin)
#############################
#### Initial parameter values
#############################
time <-(1:N - mean(1:N))/N
time.all <- kronecker(time, rep(1,K))
mod.glm <- glm(Y~X.standardised-1 + time.all, offset=offset)
beta.mean <- mod.glm$coefficients
beta.sd <- sqrt(diag(summary(mod.glm)$cov.scaled))
temp <- rnorm(n=length(beta.mean), mean=beta.mean, sd=beta.sd)
beta <- temp[1:p]
alpha <- temp[(p+1)]
res.temp <- Y - as.numeric(X.standardised %*% beta) - time.all * alpha - offset
res.sd <- sd(res.temp, na.rm=TRUE)/5
phi <- rnorm(n=K, mean=0, sd = res.sd)
delta <- rnorm(n=K, mean=0, sd = res.sd)
tau2.phi <- var(phi)/10
tau2.delta <- var(delta)/10
nu2 <- runif(1, 0, res.sd)
#### Specify matrix quantities
offset.mat <- matrix(offset, nrow=K, ncol=N, byrow=FALSE)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
time.mat <- matrix(rep(time, K), byrow=TRUE, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", delta)
alpha.offset1 <- sum(time.mat^2)
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
###############################
#### Set up the MCMC quantities
###############################
#### Matrices to store samples
n.keep <- floor((n.sample - burnin)/thin)
samples.beta <- array(NA, c(n.keep, p))
samples.alpha <- array(NA, c(n.keep, 1))
samples.phi <- array(NA, c(n.keep, K))
samples.delta <- array(NA, c(n.keep, K))
if(!fix.rho.int) samples.rho <- array(NA, c(n.keep, 1))
if(!fix.rho.slo) samples.lambda <- array(NA, c(n.keep, 1))
samples.nu2 <- array(NA, c(n.keep, 1))
samples.tau2 <- array(NA, c(n.keep, 2))
colnames(samples.tau2) <- c("tau2.int", "tau2.slo")
samples.fitted <- array(NA, c(n.keep, N.all))
samples.loglike <- array(NA, c(n.keep, N.all))
if(n.miss>0) samples.Y <- array(NA, c(n.keep, n.miss))
#### Specify the Metropolis quantities
accept.all <- rep(0,4)
accept <- accept.all
proposal.sd.rho <- 0.02
proposal.sd.lambda <- 0.02
nu2.shape <- prior.nu2[1] + N*K/2
tau2.phi.shape <- prior.tau2[1] + K/2
tau2.delta.shape <- prior.tau2[1] + K/2
##############################
#### Specify spatial quantites
##############################
#### Create the determinant
if(!fix.rho.int | !fix.rho.slo)
{
Wstar <- diag(apply(W,1,sum)) - W
Wstar.eigen <- eigen(Wstar)
Wstar.val <- Wstar.eigen$values
}else
{}
if(!fix.rho.int) det.Q.rho <- 0.5 * sum(log((rho * Wstar.val + (1-rho))))
if(!fix.rho.slo) det.Q.lambda <- 0.5 * sum(log((lambda * Wstar.val + (1-lambda))))
#### Check for islands
W.list<- mat2listw(W)
W.nb <- W.list$neighbours
W.islands <- n.comp.nb(W.nb)
islands <- W.islands$comp.id
n.islands <- max(W.islands$nc)
if(rho==1) tau2.phi.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
if(lambda==1) tau2.delta.shape <- prior.tau2[1] + 0.5 * (K-n.islands)
#### Beta update quantities
data.precision.beta <- t(X.standardised) %*% X.standardised
if(length(prior.var.beta)==1)
{
prior.precision.beta <- 1 / prior.var.beta
}else
{
prior.precision.beta <- solve(diag(prior.var.beta))
}
###########################
#### Run the Bayesian model
###########################
#### Start timer
if(verbose)
{
cat("Generating", n.keep, "post burnin and thinned (if requested) samples.\n", sep = " ")
progressBar <- txtProgressBar(style = 3)
percentage.points<-round((1:100/100)*n.sample)
}else
{
percentage.points<-round((1:100/100)*n.sample)
}
#### Create the MCMC samples
for(j in 1:n.sample)
{
####################################
## Sample from Y - data augmentation
####################################
if(n.miss>0)
{
Y.DA[which.miss==0] <- rnorm(n=n.miss, mean=fitted[which.miss==0], sd=sqrt(nu2))
}else
{}
Y.DA.mat <- matrix(Y.DA, nrow=K, ncol=N, byrow=FALSE)
##################
## Sample from nu2
##################
nu2.offset <- as.numeric(Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat - alpha * time.mat)
nu2.scale <- prior.nu2[2] + sum(nu2.offset^2)/2
nu2 <- 1 / rgamma(1, nu2.shape, scale=(1/nu2.scale))
####################
## Sample from beta
####################
fc.precision <- prior.precision.beta + data.precision.beta / nu2
fc.var <- solve(fc.precision)
beta.offset <- as.numeric(Y.DA.mat - offset.mat - phi.mat - delta.time.mat - alpha * time.mat)
beta.offset2 <- t(X.standardised) %*% beta.offset / nu2 + prior.precision.beta %*% prior.mean.beta
fc.mean <- fc.var %*% beta.offset2
chol.var <- t(chol(fc.var))
beta <- fc.mean + chol.var %*% rnorm(p)
regression.mat <- matrix(X.standardised %*% beta, nrow=K, ncol=N, byrow=FALSE)
####################
## Sample from alpha
####################
fc.var <- 1 / (1 / prior.var.alpha + alpha.offset1 / nu2)
alpha.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - delta.time.mat) * time.mat
alpha.offset2 <- sum(alpha.offset, na.rm=TRUE) / nu2
fc.mean <- fc.var * (alpha.offset2 + prior.mean.alpha / prior.var.alpha)
alpha <- rnorm(n=1, mean=fc.mean, sd=sqrt(fc.var))
####################
## Sample from phi
####################
phi.offset <- Y.DA.mat - offset.mat - regression.mat - delta.time.mat - alpha * time.mat
phi.offset2 <- apply(phi.offset,1, sum, na.rm=TRUE)
temp1 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, phi, tau2.phi, nu2, phi.offset2, rho, N)
phi <- temp1
if(rho<1)
{
phi <- phi - mean(phi)
}else
{
phi[which(islands==1)] <- phi[which(islands==1)] - mean(phi[which(islands==1)])
}
phi.mat <- matrix(rep(phi, N), byrow=F, nrow=K)
####################
## Sample from delta
####################
delta.offset <- (Y.DA.mat - offset.mat - regression.mat - phi.mat - alpha * time.mat) * time.mat
delta.offset2 <- apply(delta.offset,1, sum, na.rm=TRUE)
temp2 <- gaussiancarupdate(W.triplet, W.begfin, W.triplet.sum, K, delta, tau2.delta, nu2, delta.offset2, lambda, sum(time^2))
delta <- temp2
if(lambda <1)
{
delta <- delta - mean(delta)
}else
{
delta[which(islands==1)] <- delta[which(islands==1)] - mean(delta[which(islands==1)])
}
delta.time.mat <- apply(time.mat, 2, "*", delta)
#######################
## Sample from tau2.phi
#######################
temp2.phi <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, rho)
tau2.phi.scale <- temp2.phi + prior.tau2[2]
tau2.phi <- 1 / rgamma(1, tau2.phi.shape, scale=(1/tau2.phi.scale))
#########################
## Sample from tau2.delta
#########################
temp2.delta <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, lambda)
tau2.delta.scale <- temp2.delta + prior.tau2[2]
tau2.delta <- 1 / rgamma(1, tau2.delta.shape, scale=(1/tau2.delta.scale))
##################
## Sample from rho
##################
if(!fix.rho.int)
{
proposal.rho <- rtruncnorm(n=1, a=0, b=1, mean=rho, sd=proposal.sd.rho)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, phi, phi, proposal.rho)
det.Q.proposal <- 0.5 * sum(log((proposal.rho * Wstar.val + (1-proposal.rho))))
logprob.current <- det.Q.rho - temp2.phi / tau2.phi
logprob.proposal <- det.Q.proposal - temp3 / tau2.phi
hastings <- log(dtruncnorm(x=rho, a=0, b=1, mean=proposal.rho, sd=proposal.sd.rho)) - log(dtruncnorm(x=proposal.rho, a=0, b=1, mean=rho, sd=proposal.sd.rho))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
rho <- proposal.rho
det.Q.rho <- det.Q.proposal
accept[1] <- accept[1] + 1
}else
{}
accept[2] <- accept[2] + 1
}else
{}
#####################
## Sample from lambda
#####################
if(!fix.rho.slo)
{
proposal.lambda <- rtruncnorm(n=1, a=0, b=1, mean=lambda, sd=proposal.sd.lambda)
temp3 <- quadform(W.triplet, W.triplet.sum, W.n.triplet, K, delta, delta, proposal.lambda)
det.Q.proposal <- 0.5 * sum(log((proposal.lambda * Wstar.val + (1-proposal.lambda))))
logprob.current <- det.Q.lambda - temp2.delta / tau2.delta
logprob.proposal <- det.Q.proposal - temp3 / tau2.delta
hastings <- log(dtruncnorm(x=lambda, a=0, b=1, mean=proposal.lambda, sd=proposal.sd.lambda)) - log(dtruncnorm(x=proposal.lambda, a=0, b=1, mean=lambda, sd=proposal.sd.lambda))
prob <- exp(logprob.proposal - logprob.current + hastings)
#### Accept or reject the proposal
if(prob > runif(1))
{
lambda <- proposal.lambda
det.Q.lambda <- det.Q.proposal
accept[3] <- accept[3] + 1
}else
{}
accept[4] <- accept[4] + 1
}else
{}
#########################
## Calculate the deviance
#########################
fitted <- as.numeric(offset.mat + regression.mat + phi.mat + delta.time.mat + alpha * time.mat)
loglike <- dnorm(Y, mean = fitted, sd = rep(sqrt(nu2),N.all), log=TRUE)
###################
## Save the results
###################
if(j > burnin & (j-burnin)%%thin==0)
{
ele <- (j - burnin) / thin
samples.beta[ele, ] <- beta
samples.phi[ele, ] <- phi
samples.delta[ele, ] <- delta
samples.alpha[ele, ] <- alpha
if(!fix.rho.int) samples.rho[ele, ] <- rho
if(!fix.rho.slo) samples.lambda[ele, ] <- lambda
samples.nu2[ele, ] <- nu2
samples.tau2[ele, ] <- c(tau2.phi, tau2.delta)
samples.fitted[ele, ] <- fitted
samples.loglike[ele, ] <- loglike
if(n.miss>0) samples.Y[ele, ] <- Y.DA[which.miss==0]
}else
{}
########################################
## Self tune the acceptance probabilties
########################################
k <- j/100
if(ceiling(k)==floor(k))
{
if(!fix.rho.int) proposal.sd.rho <- common.accceptrates2(accept[1:2], proposal.sd.rho, 40, 50, 0.5)
if(!fix.rho.slo) proposal.sd.lambda <- common.accceptrates2(accept[3:4], proposal.sd.lambda, 40, 50, 0.5)
accept.all <- accept.all + accept
accept <- rep(0,4)
}else
{}
################################
## print progress to the console
################################
if(j %in% percentage.points & verbose)
{
setTxtProgressBar(progressBar, j/n.sample)
}
}
#### end timer
if(verbose)
{
cat("\nSummarising results.")
close(progressBar)
}else
{}
###################################
#### Summarise and save the results
###################################
#### Compute the acceptance rates
if(!fix.rho.int)
{
accept.rho <- 100 * accept.all[1] / accept.all[2]
}else
{
accept.rho <- NA
}
if(!fix.rho.slo)
{
accept.lambda <- 100 * accept.all[3] / accept.all[4]
}else
{
accept.lambda <- NA
}
accept.final <- c(rep(100,4), accept.rho, accept.lambda)
names(accept.final) <- c("beta", "alpha", "phi", "delta", "rho.int", "rho.slo")
#### Compute the fitted deviance
mean.phi <- apply(samples.phi, 2, mean)
mean.delta <- apply(samples.delta, 2, mean)
mean.alpha <- mean(samples.alpha)
mean.phi.mat <- matrix(rep(mean.phi, N), byrow=F, nrow=K)
delta.time.mat <- apply(time.mat, 2, "*", mean.delta)
mean.beta <- apply(samples.beta,2,mean)
regression.mat <- matrix(X.standardised %*% mean.beta, nrow=K, ncol=N, byrow=FALSE)
LP <- offset.mat + regression.mat + mean.phi.mat + delta.time.mat + mean.alpha * time.mat
fitted.mean <- as.numeric(LP)
nu2.mean <- mean(samples.nu2)
deviance.fitted <- -2 * sum(dnorm(Y, mean = fitted.mean, sd = rep(sqrt(nu2.mean),N.all), log = TRUE), na.rm=TRUE)
#### Model fit criteria
modelfit <- common.modelfit(samples.loglike, deviance.fitted)
#### Create the fitted values and residuals
fitted.values <- apply(samples.fitted, 2, mean)
response.residuals <- as.numeric(Y) - fitted.values
pearson.residuals <- response.residuals /sqrt(nu2.mean)
residuals <- data.frame(response=response.residuals, pearson=pearson.residuals)
#### transform the parameters back to the origianl covariate scale.
samples.beta.orig <- common.betatransform(samples.beta, X.indicator, X.mean, X.sd, p, FALSE)
#### Create a summary object
samples.beta.orig <- mcmc(samples.beta.orig)
summary.beta <- t(apply(samples.beta.orig, 2, quantile, c(0.5, 0.025, 0.975)))
summary.beta <- cbind(summary.beta, rep(n.keep, p), rep(100,p), effectiveSize(samples.beta.orig), geweke.diag(samples.beta.orig)$z)
rownames(summary.beta) <- colnames(X)
colnames(summary.beta) <- c("Median", "2.5%", "97.5%", "n.sample", "% accept", "n.effective", "Geweke.diag")
summary.hyper <- array(NA, c(6, 7))
summary.hyper[1,1:3] <- quantile(samples.alpha, c(0.5, 0.025, 0.975))
summary.hyper[2,1:3] <- quantile(samples.tau2[ ,1], c(0.5, 0.025, 0.975))
summary.hyper[3,1:3] <- quantile(samples.tau2[ ,2], c(0.5, 0.025, 0.975))
summary.hyper[4,1:3] <- quantile(samples.nu2, c(0.5, 0.025, 0.975))
rownames(summary.hyper) <- c("alpha", "tau2.int", "tau2.slo", "nu2", "rho.int", "rho.slo")
summary.hyper[1, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.alpha)), geweke.diag(mcmc(samples.alpha))$z)
summary.hyper[2, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2[ ,1])), geweke.diag(mcmc(samples.tau2[ ,1]))$z)
summary.hyper[3, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.tau2[ ,2])), geweke.diag(mcmc(samples.tau2[ ,2]))$z)
summary.hyper[4, 4:7] <- c(n.keep, 100, effectiveSize(mcmc(samples.nu2)), geweke.diag(mcmc(samples.nu2))$z)
if(!fix.rho.int)
{
summary.hyper[5, 1:3] <- quantile(samples.rho, c(0.5, 0.025, 0.975))
summary.hyper[5, 4:7] <- c(n.keep, accept.rho, effectiveSize(samples.rho), geweke.diag(samples.rho)$z)
}else
{
summary.hyper[5, 1:3] <- c(rho, rho, rho)
summary.hyper[5, 4:7] <- rep(NA, 4)
}
if(!fix.rho.slo)
{
summary.hyper[6, 1:3] <- quantile(samples.lambda, c(0.5, 0.025, 0.975))
summary.hyper[6, 4:7] <- c(n.keep, accept.lambda, effectiveSize(samples.lambda), geweke.diag(samples.lambda)$z)
}else
{
summary.hyper[6, 1:3] <- c(lambda, lambda, lambda)
summary.hyper[6, 4:7] <- rep(NA, 4)
}
summary.results <- rbind(summary.beta, summary.hyper)
summary.results[ , 1:3] <- round(summary.results[ , 1:3], 4)
summary.results[ , 4:7] <- round(summary.results[ , 4:7], 1)
#### Compile and return the results
#### Harmonise samples in case of them not being generated
if(fix.rho.int & fix.rho.slo)
{
samples.rhoext <- NA
}else if(fix.rho.int & !fix.rho.slo)
{
samples.rhoext <- samples.lambda
names(samples.rhoext) <- "rho.slo"
}else if(!fix.rho.int & fix.rho.slo)
{
samples.rhoext <- samples.rho
names(samples.rhoext) <- "rho.int"
}else
{
samples.rhoext <- cbind(samples.rho, samples.lambda)
colnames(samples.rhoext) <- c("rho.int", "rho.slo")
}
if(n.miss==0) samples.Y = NA
samples <- list(beta=mcmc(samples.beta.orig), alpha=mcmc(samples.alpha), phi=mcmc(samples.phi), delta=mcmc(samples.delta), tau2=mcmc(samples.tau2), nu2=mcmc(samples.nu2), rho=mcmc(samples.rhoext), fitted=mcmc(samples.fitted), Y=mcmc(samples.Y))
model.string <- c("Likelihood model - Gaussian (identity link function)", "\nLatent structure model - Spatially autocorrelated linear time trends\n")
results <- list(summary.results=summary.results, samples=samples, fitted.values=fitted.values, residuals=residuals, modelfit=modelfit, accept=accept.final, localised.structure=NULL, formula=formula, model=model.string, X=X)
class(results) <- "CARBayesST"
#### Finish by stating the time taken
if(verbose)
{
b<-proc.time()
cat("Finished in ", round(b[3]-a[3], 1), "seconds.\n")
}else
{}
return(results)
}
|
library(igraph)
#' monta o grafo
G <- make_graph(c(1,2, 1,3, 2,4, 3,4, 3,5, 4,6, 5,7, 6,7), directed = T)
plot(G)
print("aaa")
d <- c(1,4,5,7,2,1,1)
p<- all_simple_paths(G, from = 1, to = 7)
# pega o primeiro caminho do all_simple_path
print(p[[1]])
# pega o subset do vetor de duracao do primeiro caminho do all_simple_path
print(d[p[[1]]])
# faz a soma da duracao
print(sum(d[p[[1]]])) | /grupo-1/projeto-lista-7/exemplos-grafo.R | no_license | afcosta-ibm/analise-risco | R | false | false | 395 | r | library(igraph)
#' monta o grafo
G <- make_graph(c(1,2, 1,3, 2,4, 3,4, 3,5, 4,6, 5,7, 6,7), directed = T)
plot(G)
print("aaa")
d <- c(1,4,5,7,2,1,1)
p<- all_simple_paths(G, from = 1, to = 7)
# pega o primeiro caminho do all_simple_path
print(p[[1]])
# pega o subset do vetor de duracao do primeiro caminho do all_simple_path
print(d[p[[1]]])
# faz a soma da duracao
print(sum(d[p[[1]]])) |
library(lmQCM)
library(Biobase)
data(sample.ExpressionSet)
data = assayData(sample.ExpressionSet)$exprs
lmQCM(data)
| /tests/run_test_lmQCM.R | no_license | huangzhii/lmQCM | R | false | false | 116 | r | library(lmQCM)
library(Biobase)
data(sample.ExpressionSet)
data = assayData(sample.ExpressionSet)$exprs
lmQCM(data)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Publications.R
\name{deNewman2009Long-Term}
\alias{deNewman2009Long-Term}
\title{Long-Term Function in an Older Cohort-The Cardiovascular Health Study All Stars Study (2009) \emph{JOURNAL OF THE AMERICAN GERIATRICS SOCIETY}}
\description{
Newman, AB; Arnold, AM; Sachs, MC; Ives, DG; Cushman, M; Strotmeyer, ES; Ding, JZ; Kritchevsky, SB; Chaves, PHM; Fried, LP; Robbins, J
}
\details{
JOURNAL OF THE AMERICAN GERIATRICS SOCIETY (2009). 432-40:57. \url{http://www.ncbi.nlm.nih.gov/pubmed/19187412}
}
\concept{applied}
| /man/deNewman2009Long-Term.Rd | no_license | sachsmc/sachsmc.github.io | R | false | true | 596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Publications.R
\name{deNewman2009Long-Term}
\alias{deNewman2009Long-Term}
\title{Long-Term Function in an Older Cohort-The Cardiovascular Health Study All Stars Study (2009) \emph{JOURNAL OF THE AMERICAN GERIATRICS SOCIETY}}
\description{
Newman, AB; Arnold, AM; Sachs, MC; Ives, DG; Cushman, M; Strotmeyer, ES; Ding, JZ; Kritchevsky, SB; Chaves, PHM; Fried, LP; Robbins, J
}
\details{
JOURNAL OF THE AMERICAN GERIATRICS SOCIETY (2009). 432-40:57. \url{http://www.ncbi.nlm.nih.gov/pubmed/19187412}
}
\concept{applied}
|
library(testthat)
library(nflscrapR)
# Testing why OT is not being scraped
OTgame <- game_play_by_play(2017092403)
OTgame09 <- game_play_by_play(2009100401)
games2017 <- season_games(2017)
bal_rost_17 <- season_rosters(2017, "BAL", "RUNNING_BACK")
data.frame(colnames(OTgame09),
sapply(OTgame09, class)) %>%
write.csv(file = "/Users/maksimhorowitz/Documents/PBP_Columns.csv")
urlstring <- proper_jsonurl_formatting(GameID)
nfl_api_raw <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
names(nfl_api_raw[[1]]$home$stats)
data.frame(do.call(rbind,nfl_api_raw[[1]]$drives[[1]]$plays)) %>% View
data.frame(do.call(rbind,nfl_api_raw[[1]][[3]][[1]][["plays"]][[3]][["players"]][[2]])) %>% View
nfl_api_raw[[1]][[3]][[1]][["plays"]][[4]][["players"]] %>% names
| /tests/testthat.R | no_license | ryurko/nflscrapR | R | false | false | 779 | r | library(testthat)
library(nflscrapR)
# Testing why OT is not being scraped
OTgame <- game_play_by_play(2017092403)
OTgame09 <- game_play_by_play(2009100401)
games2017 <- season_games(2017)
bal_rost_17 <- season_rosters(2017, "BAL", "RUNNING_BACK")
data.frame(colnames(OTgame09),
sapply(OTgame09, class)) %>%
write.csv(file = "/Users/maksimhorowitz/Documents/PBP_Columns.csv")
urlstring <- proper_jsonurl_formatting(GameID)
nfl_api_raw <- RJSONIO::fromJSON(RCurl::getURL(urlstring))
names(nfl_api_raw[[1]]$home$stats)
data.frame(do.call(rbind,nfl_api_raw[[1]]$drives[[1]]$plays)) %>% View
data.frame(do.call(rbind,nfl_api_raw[[1]][[3]][[1]][["plays"]][[3]][["players"]][[2]])) %>% View
nfl_api_raw[[1]][[3]][[1]][["plays"]][[4]][["players"]] %>% names
|
test_that("geos_unnest() works", {
expect_identical(
geos_unnest(NA_character_),
structure(list(NULL), class = "geos_geometry", lengths = 1L)
)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION(MULTIPOINT (30 10, 10 10), LINESTRING (0 0, 1 1), GEOMETRYCOLLECTION EMPTY)",
keep_multi = FALSE, keep_empty = FALSE, max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
c("POINT (30 10)", "POINT (10 10)", "LINESTRING (0 0, 1 1)")
)
expect_identical(attr(unnested, "lengths"), 3L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION(MULTIPOINT (30 10, 10 10), LINESTRING (0 0, 1 1), GEOMETRYCOLLECTION EMPTY)",
keep_multi = FALSE, keep_empty = TRUE, max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
c("POINT (30 10)", "POINT (10 10)", "LINESTRING (0 0, 1 1)", "GEOMETRYCOLLECTION EMPTY")
)
expect_identical(attr(unnested, "lengths"), 4L)
})
test_that("geos_unnest() propagates CRS", {
expect_identical(
wk::wk_crs(geos_unnest(as_geos_geometry("POINT (1 2)", crs = 784))),
784
)
expect_identical(
wk::wk_crs(geos_unnest(as_geos_geometry("GEOMETRYCOLLECTION(POINT (1 2))", crs = 784))),
784
)
})
test_that("wk*_unnest(max_depth) is respected", {
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 0
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 1
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1)))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (POINT (0 1))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 3
)
expect_identical(
geos_write_wkt(unnested),
"POINT (0 1)"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 4
)
expect_identical(
geos_write_wkt(unnested),
"POINT (0 1)"
)
expect_identical(attr(unnested, "lengths"), 1L)
})
| /tests/testthat/test-geos-unnest.R | permissive | morandiaye/geos | R | false | false | 2,641 | r |
test_that("geos_unnest() works", {
expect_identical(
geos_unnest(NA_character_),
structure(list(NULL), class = "geos_geometry", lengths = 1L)
)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION(MULTIPOINT (30 10, 10 10), LINESTRING (0 0, 1 1), GEOMETRYCOLLECTION EMPTY)",
keep_multi = FALSE, keep_empty = FALSE, max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
c("POINT (30 10)", "POINT (10 10)", "LINESTRING (0 0, 1 1)")
)
expect_identical(attr(unnested, "lengths"), 3L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION(MULTIPOINT (30 10, 10 10), LINESTRING (0 0, 1 1), GEOMETRYCOLLECTION EMPTY)",
keep_multi = FALSE, keep_empty = TRUE, max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
c("POINT (30 10)", "POINT (10 10)", "LINESTRING (0 0, 1 1)", "GEOMETRYCOLLECTION EMPTY")
)
expect_identical(attr(unnested, "lengths"), 4L)
})
test_that("geos_unnest() propagates CRS", {
expect_identical(
wk::wk_crs(geos_unnest(as_geos_geometry("POINT (1 2)", crs = 784))),
784
)
expect_identical(
wk::wk_crs(geos_unnest(as_geos_geometry("GEOMETRYCOLLECTION(POINT (1 2))", crs = 784))),
784
)
})
test_that("wk*_unnest(max_depth) is respected", {
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 0
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 1
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1)))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 2
)
expect_identical(
geos_write_wkt(unnested),
"GEOMETRYCOLLECTION (POINT (0 1))"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 3
)
expect_identical(
geos_write_wkt(unnested),
"POINT (0 1)"
)
expect_identical(attr(unnested, "lengths"), 1L)
unnested <- geos_unnest(
"GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (GEOMETRYCOLLECTION (POINT (0 1))))",
max_depth = 4
)
expect_identical(
geos_write_wkt(unnested),
"POINT (0 1)"
)
expect_identical(attr(unnested, "lengths"), 1L)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_generation_functions.R
\name{sampleruleS}
\alias{sampleruleS}
\title{Return atomic rotation group 1 for seed i (1000 possible values)}
\usage{
sampleruleS(i0)
}
\arguments{
\item{i}{An integer between 1 and 1000}
}
\value{
The atomic rotation group 1 for seed i
}
\description{
Return atomic rotation group 1 for seed i (1000 possible values)
}
\examples{
sampleruleS(1,1:160,1)
identical(sampleruleS(i0=1),samplerule(i=1,j=1:100,m=1))
identical(sampleruleS(i0=16),samplerule(i=1,1:100,m=16))
(checkrule<-function(){
i=sample(1:1000,1);m=sample(1:85,1);h=sample(1:8,1);
return(list(i=i,m=m,h=h,check=identical(sampleruleS(i0=i+(m-1)+c(0:3,12:15)[9-h]),samplerule(i,(8-h)*100+(1:100),m))))})()
identical((function(i,m){c(sapply((m+i-2)+c(1:4,13:16),sampleruleS))})(1,1),samplerule(1,1:800,1))
}
| /man/sampleruleS.Rd | no_license | DanielBonnery/pubBonneryChengLahiri2016 | R | false | true | 879 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/table_generation_functions.R
\name{sampleruleS}
\alias{sampleruleS}
\title{Return atomic rotation group 1 for seed i (1000 possible values)}
\usage{
sampleruleS(i0)
}
\arguments{
\item{i}{An integer between 1 and 1000}
}
\value{
The atomic rotation group 1 for seed i
}
\description{
Return atomic rotation group 1 for seed i (1000 possible values)
}
\examples{
sampleruleS(1,1:160,1)
identical(sampleruleS(i0=1),samplerule(i=1,j=1:100,m=1))
identical(sampleruleS(i0=16),samplerule(i=1,1:100,m=16))
(checkrule<-function(){
i=sample(1:1000,1);m=sample(1:85,1);h=sample(1:8,1);
return(list(i=i,m=m,h=h,check=identical(sampleruleS(i0=i+(m-1)+c(0:3,12:15)[9-h]),samplerule(i,(8-h)*100+(1:100),m))))})()
identical((function(i,m){c(sapply((m+i-2)+c(1:4,13:16),sampleruleS))})(1,1),samplerule(1,1:800,1))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RiskFactor_plot.linear.R
\name{RiskFactor_plot.linear}
\alias{RiskFactor_plot.linear}
\title{To plot Risk Factor plot for linear regression}
\usage{
RiskFactor_plot.linear(data, y, patient.names = "name",
scatter.margin = c(low = 0, left = 4, top = 3, right = 8),
scatter.color = c(Prediction = "red", Reality = "black"),
scatter.location = c(legend.x = 10, legend.y = 20),
scatter.size = c(axis = 1.5, lab = 1.5, points = 1.5, legend = 1.6,
cutoff = 1.9), heatmap.cellheight = 20, heatmap.cellwidth = 10,
cluster_cols = FALSE, cluster_rows = FALSE,
heatmap.color = c("green", "black", "red"), heatmap.size = c(row =
15, column = 15, legend = 10))
}
\arguments{
\item{data}{the data you want to plot}
\item{y}{the purpose var}
\item{patient.names}{patient id}
\item{scatter.margin}{scatter.margin}
\item{scatter.color}{scatter.color}
\item{scatter.location}{scatter.location}
\item{scatter.size}{scatter.size}
\item{heatmap.cellheight}{heatmap.cellheight}
\item{heatmap.cellwidth}{heatmap.cellwidth}
\item{cluster_cols}{cluster_cols}
\item{cluster_rows}{cluster_rows}
\item{heatmap.color}{heatmap.color}
\item{heatmap.size}{heatmap.size}
}
\value{
four pictures
}
\description{
To plot Risk Factor plot for linear regression
}
\examples{
RiskFactor_plot.linear(
data=linearData,
y="y2",
patient.names="name",
scatter.margin = c(low=0,left=4,top=3,right=8),
scatter.color=c(Prediction="red",Reality="black"),
scatter.location=c(legend.x=10,legend.y=20),
scatter.size=c(axis=1.5,lab=1.5,
points=1.5,legend=1.6,
cutoff=1.9),
heatmap.cellheight=20,
heatmap.cellwidth=10,
cluster_cols = FALSE,
cluster_rows = FALSE,
heatmap.color=c("green", "black", "red"),
heatmap.size=c(row=15,column=15,legend=10)
)
}
| /man/RiskFactor_plot.linear.Rd | no_license | yikeshu0611/onetree | R | false | true | 2,082 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RiskFactor_plot.linear.R
\name{RiskFactor_plot.linear}
\alias{RiskFactor_plot.linear}
\title{To plot Risk Factor plot for linear regression}
\usage{
RiskFactor_plot.linear(data, y, patient.names = "name",
scatter.margin = c(low = 0, left = 4, top = 3, right = 8),
scatter.color = c(Prediction = "red", Reality = "black"),
scatter.location = c(legend.x = 10, legend.y = 20),
scatter.size = c(axis = 1.5, lab = 1.5, points = 1.5, legend = 1.6,
cutoff = 1.9), heatmap.cellheight = 20, heatmap.cellwidth = 10,
cluster_cols = FALSE, cluster_rows = FALSE,
heatmap.color = c("green", "black", "red"), heatmap.size = c(row =
15, column = 15, legend = 10))
}
\arguments{
\item{data}{the data you want to plot}
\item{y}{the purpose var}
\item{patient.names}{patient id}
\item{scatter.margin}{scatter.margin}
\item{scatter.color}{scatter.color}
\item{scatter.location}{scatter.location}
\item{scatter.size}{scatter.size}
\item{heatmap.cellheight}{heatmap.cellheight}
\item{heatmap.cellwidth}{heatmap.cellwidth}
\item{cluster_cols}{cluster_cols}
\item{cluster_rows}{cluster_rows}
\item{heatmap.color}{heatmap.color}
\item{heatmap.size}{heatmap.size}
}
\value{
four pictures
}
\description{
To plot Risk Factor plot for linear regression
}
\examples{
RiskFactor_plot.linear(
data=linearData,
y="y2",
patient.names="name",
scatter.margin = c(low=0,left=4,top=3,right=8),
scatter.color=c(Prediction="red",Reality="black"),
scatter.location=c(legend.x=10,legend.y=20),
scatter.size=c(axis=1.5,lab=1.5,
points=1.5,legend=1.6,
cutoff=1.9),
heatmap.cellheight=20,
heatmap.cellwidth=10,
cluster_cols = FALSE,
cluster_rows = FALSE,
heatmap.color=c("green", "black", "red"),
heatmap.size=c(row=15,column=15,legend=10)
)
}
|
library('bnlearn')
library('rhandsontable')
library('shiny')
library('shinydashboard')
library('dplyr')
library('visNetwork')
library('shinyWidgets')
library('tools')
library('shinyalert')
library('shinycssloaders')
library('rintrojs')
library('arules')
library('psych')
library("DT")
library("linkcomm")
library('igraph')
library("shinyBS")
library("HydeNet")
library("leaflet")
source('error.bar.R')
source('graph.custom.R')
source('custom.Modules.R')
source('dashboardthemes.R')
nm<-read.csv("name.txt")
th<-read.csv("theme.txt")
myDashboardHeader <- function (..., title = NULL, titleWidth = NULL, disable = FALSE,
.list = NULL) {
items <- c(list(...), .list)
# lapply(items, tagAssert, type = "li", class = "dropdown")
titleWidth <- validateCssUnit(titleWidth)
custom_css <- NULL
if (!is.null(titleWidth)) {
custom_css <- tags$head(tags$style(HTML(gsub("_WIDTH_",
titleWidth, fixed = TRUE, "\n @media (min-width: 768px) {\n .main-header > .navbar {\n margin-left: _WIDTH_;text-align: left;\n }\n .main-header .logo {\n width: _WIDTH_;\n }\n }\n "))))
}
tags$header(class = "main-header", custom_css, style = if (disable)
"display: none;", span(class = "logo", title), tags$nav(class = "navbar navbar-static-top",
role = "navigation", span(shiny::icon("bars"), style = "display:none;"),
# a(href = "#", class = "sidebar-toggle", `data-toggle` = "offcanvas",
# role = "button", span(class = "sr-only", "Toggle navigation")),
div(class = "navbar-custom-menu", tags$ul(class = "nav navbar-nav",
items))))
}
dashboardPage(skin = "blue",
myDashboardHeader(title = nm$x,
titleWidth = "400"
#,tags$li(class = "dropdown", bsButton("homeIntro", label = NULL, icon = icon("question-circle", lib="font-awesome"), style = "primary", size = "large"))
),
dashboardSidebar(width = 50,
sidebarMenu(id = "sidebarMenu",
menuItem(text = "",
icon = shiny::icon("globe"),
tabName = "Structure"
)
)
),
dashboardBody(id ="dashboardBody",
# Include shinyalert Ui
useShinyalert(),
shinyDashboardThemes(
theme = th$x
),
# Include introjs UI
rintrojs::introjsUI(),
#shinythemes::themeSelector(),
#theme = shinytheme("united"),
tags$script(HTML("$('body').addClass('fixed');")),
shinydashboard::tabItems(
shinydashboard::tabItem(tabName = "Structure",
tabBox(id = "visula_tabs",
width = 12,
tabPanel("Bayesian Network",
fluidPage(
shiny::fluidRow(
shiny::column(2, dropdownButton(
fluidRow(column(6,actionButton("exactInference","Learn Exact Inference",class="butt")),column(6,materialSwitch(inputId = "exact", label = "Enable Exact Inferences", status = "primary", right = TRUE))),
hr(),
h4("Select evidence to add to the model"),
shiny::fluidRow(shiny::column(6,actionButton('insertBtn', 'Insert', class = "butt")),
shiny::column(6,actionButton('removeBtn', 'Remove', class = "butt"))
),
shiny::fluidRow(shiny::column(6,tags$div(id = 'placeholder1')),
shiny::column(6,tags$div(id = 'placeholder2'))
),
hr(),
h4("Select an event of interest"),
shiny::h5("Event Node:"),
shiny::selectInput("event",
label = NULL,
""),
shiny::h4("Display inference plot"),
shiny::fluidRow(shiny::column(5,actionButton('plotBtn', 'without error bars', class = "butt")),shiny::column(4,actionButton('plotStrengthBtn', 'with error bars', class = "butt"))),
hr(),
shiny::h4("No. of resampling iterations for error bars"),
textInput("numInterval", label = NULL,placeholder = 25),
selectInput('plotFont',label = "axis label font size",choices = c(0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10),selected = 1.5),
selectInput('valueFont',label = "plot value font size",choices = c(0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10),selected = 1.5),
label = "Inference Learning",circle = F, status = "primary", icon = icon("bar-chart-o"), width = "500px",tooltip = tooltipOptions(title = "Learn Inferences")
)),
shiny::column(9,shinyWidgets::radioGroupButtons(inputId = "bayesianOption",
choices = c("Bayesian Network","Fitted Local Distributions", "Infer Decisions"),
selected = "Bayesian Network",
justified = FALSE
))
),
shiny::conditionalPanel(
"input.bayesianOption=='Bayesian Network'",
shiny::column(11,
shiny::fluidRow(
shiny::column(2,
div(
h5("Nth Neigbors(of selection)"))),
shiny::column(2,style="padding-right:0px",
shiny::selectInput("neighbornodes",label = NULL,choices = "")),
shiny::column(1,
div(style = "position:absolute;right:0em;",
h5("Modules:"))),
shiny::column(1,style="padding-right:0px",
shiny::selectInput("moduleSelection",label = NULL,"graph")),
shiny::column(2,style="margin-right:20px",dropdownButton(
shiny::fluidRow(shiny::column(6,selectInput('moduleAlgo',label = NULL,choices = c("ward.D","ward.D2", "single", "complete", "average", "mcquitty", "median","centroid"))),shiny::column(1,bsButton("Bcommunities","Build Modules", style="primary"))),
label = "Module Detection",circle = F, status = "primary", width = "300px",tooltip = tooltipOptions(title = "Build modules in the graph")
)),
shiny::column(2,style = "margin-right:8px",
dropdownButton(
div(id="Bgraph",
h4('Group of variables:'),
shiny::fluidRow(shiny::column(6,selectizeInput('varselect',label = "Variables","",multiple = T)),
shiny::column(3,selectInput('varshape',label = "Shape","")),
shiny::column(3, actionButton('group','Group', style="margin-top:25px;"))
),
hr(),
h4('Vector of index:'),
shiny::fluidRow(shiny::column(6,textInput('varselectvector',label = "Variables")),
shiny::column(3,selectInput('varshape2',label = "Shape","")),
shiny::column(3, actionButton('group2','Group', style="margin-top:25px;"))
),
shiny::fluidRow(shiny::column(6,selectInput('modGroup',label = "modules",choices = "")),
shiny::column(3,selectInput('varshape3',label = "Shape","")),
shiny::column(3, actionButton('group3','Group', style="margin-top:25px;"))
),
shiny::fluidRow(shiny::column(6,h4('Visible Neighbors'),div(id = "graphChain",
sliderInput("degree", label = NULL,
min = 1, max = 10,
value = 2
))),
shiny::column(6,h4('Nth Neighbors'), div(id = "NChain",
sliderInput("degreeN", label = NULL,
min = 1, max = 10,
value = 2
)))
),
hr(),
div(id="graphLayout",
h4("Select Graph Layout"),
shiny::selectInput('graph_layout',label = NULL,"layout_nicely")),
selectInput('bayesFont',label = "Node Font",choices = c(1:100),selected = 20)
),
label = "Visual Settings",circle = F, status = "primary", icon = icon("gear"), width = "400px",tooltip = tooltipOptions(title = "graph settings")
)
),
shiny::column(1, bsButton('graphBtn', 'Refresh', icon = icon("refresh"),style = "primary"))),
withSpinner(visNetworkOutput("netPlot",height = "480px"), color= "#2E86C1")
)
),
shiny::conditionalPanel(
"input.bayesianOption=='Infer Decisions'",
dropdownButton(
sliderInput("NumBar", label = "No. of bars",min = 0, max = 1,value = 1,step=1),
actionButton("sortPlot","Sort X-axis"),
label = "Plot",circle = F, status = "primary", icon = icon("gear"), width = "400px",tooltip = tooltipOptions(title = "plot settings")
),
withSpinner(plotOutput("distPlot",height = "450px"), color="#2E86C1")
),
shiny::conditionalPanel(
"input.bayesianOption=='Fitted Local Distributions'",
selectInput("paramSelect",label = "Variable",""),
withSpinner(plotOutput("parameterPlot",height = "450px"),color="#2E86C1")
)
)
),
tabPanel("Decision Networks",
shinyWidgets::radioGroupButtons(inputId = "decisionOption",
choices = c("Decision Network","Policy Table"),
selected = "Decision Network",
justified = FALSE
),
conditionalPanel(
"input.decisionOption=='Decision Network'",
shiny::fluidRow(
shiny::column(2,dropdownButton(
shiny::fluidRow(shiny::column(6,selectInput("parents",selected = "", label = "Create payoff Node For:",choices = "",multiple = F))),
shiny::fluidRow(shiny::column(10,rHandsontableOutput("payoff"))),
br(),
shiny::fluidRow(shiny::column(6,actionButton("buildDecisionNet2",'build decision net', class = "butt"))),
h5("Set Decision Node"),
shiny::fluidRow(shiny::column(6,selectInput("decisionNode",label = NULL,choices = c())),shiny::column(6,actionButton("set_decision","Set Node",class = "butt"))),
br(),
shiny::fluidRow(shiny::column(6,actionButton("set_policy","Best Policy",class="butt"))),
br(),
label = "Build Network",circle = F, status = "primary", icon = icon("gear"), width = "500px",tooltip = tooltipOptions(title = "Build Network")
))),
shinycssloaders::withSpinner(visNetworkOutput("decisionPlot",height = "450px"),color="#2E86C1")
),
conditionalPanel(
"input.decisionOption=='Policy Table'",
shinycssloaders::withSpinner(DT::dataTableOutput("policyPlot",height = "150px"),color="#2E86C1")
)
),
tabPanel("States", fluidPage(shiny::fluidRow(shiny::column(12, leafletOutput("myPlot2", height = 1000)))))
)
)
)
)
)
| /inst/cd/ui.R | no_license | SAFE-ICU/Longevity_Gap_Action | R | false | false | 23,486 | r | library('bnlearn')
library('rhandsontable')
library('shiny')
library('shinydashboard')
library('dplyr')
library('visNetwork')
library('shinyWidgets')
library('tools')
library('shinyalert')
library('shinycssloaders')
library('rintrojs')
library('arules')
library('psych')
library("DT")
library("linkcomm")
library('igraph')
library("shinyBS")
library("HydeNet")
library("leaflet")
source('error.bar.R')
source('graph.custom.R')
source('custom.Modules.R')
source('dashboardthemes.R')
nm<-read.csv("name.txt")
th<-read.csv("theme.txt")
myDashboardHeader <- function (..., title = NULL, titleWidth = NULL, disable = FALSE,
.list = NULL) {
items <- c(list(...), .list)
# lapply(items, tagAssert, type = "li", class = "dropdown")
titleWidth <- validateCssUnit(titleWidth)
custom_css <- NULL
if (!is.null(titleWidth)) {
custom_css <- tags$head(tags$style(HTML(gsub("_WIDTH_",
titleWidth, fixed = TRUE, "\n @media (min-width: 768px) {\n .main-header > .navbar {\n margin-left: _WIDTH_;text-align: left;\n }\n .main-header .logo {\n width: _WIDTH_;\n }\n }\n "))))
}
tags$header(class = "main-header", custom_css, style = if (disable)
"display: none;", span(class = "logo", title), tags$nav(class = "navbar navbar-static-top",
role = "navigation", span(shiny::icon("bars"), style = "display:none;"),
# a(href = "#", class = "sidebar-toggle", `data-toggle` = "offcanvas",
# role = "button", span(class = "sr-only", "Toggle navigation")),
div(class = "navbar-custom-menu", tags$ul(class = "nav navbar-nav",
items))))
}
dashboardPage(skin = "blue",
myDashboardHeader(title = nm$x,
titleWidth = "400"
#,tags$li(class = "dropdown", bsButton("homeIntro", label = NULL, icon = icon("question-circle", lib="font-awesome"), style = "primary", size = "large"))
),
dashboardSidebar(width = 50,
sidebarMenu(id = "sidebarMenu",
menuItem(text = "",
icon = shiny::icon("globe"),
tabName = "Structure"
)
)
),
dashboardBody(id ="dashboardBody",
# Include shinyalert Ui
useShinyalert(),
shinyDashboardThemes(
theme = th$x
),
# Include introjs UI
rintrojs::introjsUI(),
#shinythemes::themeSelector(),
#theme = shinytheme("united"),
tags$script(HTML("$('body').addClass('fixed');")),
shinydashboard::tabItems(
shinydashboard::tabItem(tabName = "Structure",
tabBox(id = "visula_tabs",
width = 12,
tabPanel("Bayesian Network",
fluidPage(
shiny::fluidRow(
shiny::column(2, dropdownButton(
fluidRow(column(6,actionButton("exactInference","Learn Exact Inference",class="butt")),column(6,materialSwitch(inputId = "exact", label = "Enable Exact Inferences", status = "primary", right = TRUE))),
hr(),
h4("Select evidence to add to the model"),
shiny::fluidRow(shiny::column(6,actionButton('insertBtn', 'Insert', class = "butt")),
shiny::column(6,actionButton('removeBtn', 'Remove', class = "butt"))
),
shiny::fluidRow(shiny::column(6,tags$div(id = 'placeholder1')),
shiny::column(6,tags$div(id = 'placeholder2'))
),
hr(),
h4("Select an event of interest"),
shiny::h5("Event Node:"),
shiny::selectInput("event",
label = NULL,
""),
shiny::h4("Display inference plot"),
shiny::fluidRow(shiny::column(5,actionButton('plotBtn', 'without error bars', class = "butt")),shiny::column(4,actionButton('plotStrengthBtn', 'with error bars', class = "butt"))),
hr(),
shiny::h4("No. of resampling iterations for error bars"),
textInput("numInterval", label = NULL,placeholder = 25),
selectInput('plotFont',label = "axis label font size",choices = c(0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10),selected = 1.5),
selectInput('valueFont',label = "plot value font size",choices = c(0.5,1,1.5,2,2.5,3,3.5,4,4.5,5,5.5,6,6.5,7,7.5,8,8.5,9,9.5,10),selected = 1.5),
label = "Inference Learning",circle = F, status = "primary", icon = icon("bar-chart-o"), width = "500px",tooltip = tooltipOptions(title = "Learn Inferences")
)),
shiny::column(9,shinyWidgets::radioGroupButtons(inputId = "bayesianOption",
choices = c("Bayesian Network","Fitted Local Distributions", "Infer Decisions"),
selected = "Bayesian Network",
justified = FALSE
))
),
shiny::conditionalPanel(
"input.bayesianOption=='Bayesian Network'",
shiny::column(11,
shiny::fluidRow(
shiny::column(2,
div(
h5("Nth Neigbors(of selection)"))),
shiny::column(2,style="padding-right:0px",
shiny::selectInput("neighbornodes",label = NULL,choices = "")),
shiny::column(1,
div(style = "position:absolute;right:0em;",
h5("Modules:"))),
shiny::column(1,style="padding-right:0px",
shiny::selectInput("moduleSelection",label = NULL,"graph")),
shiny::column(2,style="margin-right:20px",dropdownButton(
shiny::fluidRow(shiny::column(6,selectInput('moduleAlgo',label = NULL,choices = c("ward.D","ward.D2", "single", "complete", "average", "mcquitty", "median","centroid"))),shiny::column(1,bsButton("Bcommunities","Build Modules", style="primary"))),
label = "Module Detection",circle = F, status = "primary", width = "300px",tooltip = tooltipOptions(title = "Build modules in the graph")
)),
shiny::column(2,style = "margin-right:8px",
dropdownButton(
div(id="Bgraph",
h4('Group of variables:'),
shiny::fluidRow(shiny::column(6,selectizeInput('varselect',label = "Variables","",multiple = T)),
shiny::column(3,selectInput('varshape',label = "Shape","")),
shiny::column(3, actionButton('group','Group', style="margin-top:25px;"))
),
hr(),
h4('Vector of index:'),
shiny::fluidRow(shiny::column(6,textInput('varselectvector',label = "Variables")),
shiny::column(3,selectInput('varshape2',label = "Shape","")),
shiny::column(3, actionButton('group2','Group', style="margin-top:25px;"))
),
shiny::fluidRow(shiny::column(6,selectInput('modGroup',label = "modules",choices = "")),
shiny::column(3,selectInput('varshape3',label = "Shape","")),
shiny::column(3, actionButton('group3','Group', style="margin-top:25px;"))
),
shiny::fluidRow(shiny::column(6,h4('Visible Neighbors'),div(id = "graphChain",
sliderInput("degree", label = NULL,
min = 1, max = 10,
value = 2
))),
shiny::column(6,h4('Nth Neighbors'), div(id = "NChain",
sliderInput("degreeN", label = NULL,
min = 1, max = 10,
value = 2
)))
),
hr(),
div(id="graphLayout",
h4("Select Graph Layout"),
shiny::selectInput('graph_layout',label = NULL,"layout_nicely")),
selectInput('bayesFont',label = "Node Font",choices = c(1:100),selected = 20)
),
label = "Visual Settings",circle = F, status = "primary", icon = icon("gear"), width = "400px",tooltip = tooltipOptions(title = "graph settings")
)
),
shiny::column(1, bsButton('graphBtn', 'Refresh', icon = icon("refresh"),style = "primary"))),
withSpinner(visNetworkOutput("netPlot",height = "480px"), color= "#2E86C1")
)
),
shiny::conditionalPanel(
"input.bayesianOption=='Infer Decisions'",
dropdownButton(
sliderInput("NumBar", label = "No. of bars",min = 0, max = 1,value = 1,step=1),
actionButton("sortPlot","Sort X-axis"),
label = "Plot",circle = F, status = "primary", icon = icon("gear"), width = "400px",tooltip = tooltipOptions(title = "plot settings")
),
withSpinner(plotOutput("distPlot",height = "450px"), color="#2E86C1")
),
shiny::conditionalPanel(
"input.bayesianOption=='Fitted Local Distributions'",
selectInput("paramSelect",label = "Variable",""),
withSpinner(plotOutput("parameterPlot",height = "450px"),color="#2E86C1")
)
)
),
tabPanel("Decision Networks",
shinyWidgets::radioGroupButtons(inputId = "decisionOption",
choices = c("Decision Network","Policy Table"),
selected = "Decision Network",
justified = FALSE
),
conditionalPanel(
"input.decisionOption=='Decision Network'",
shiny::fluidRow(
shiny::column(2,dropdownButton(
shiny::fluidRow(shiny::column(6,selectInput("parents",selected = "", label = "Create payoff Node For:",choices = "",multiple = F))),
shiny::fluidRow(shiny::column(10,rHandsontableOutput("payoff"))),
br(),
shiny::fluidRow(shiny::column(6,actionButton("buildDecisionNet2",'build decision net', class = "butt"))),
h5("Set Decision Node"),
shiny::fluidRow(shiny::column(6,selectInput("decisionNode",label = NULL,choices = c())),shiny::column(6,actionButton("set_decision","Set Node",class = "butt"))),
br(),
shiny::fluidRow(shiny::column(6,actionButton("set_policy","Best Policy",class="butt"))),
br(),
label = "Build Network",circle = F, status = "primary", icon = icon("gear"), width = "500px",tooltip = tooltipOptions(title = "Build Network")
))),
shinycssloaders::withSpinner(visNetworkOutput("decisionPlot",height = "450px"),color="#2E86C1")
),
conditionalPanel(
"input.decisionOption=='Policy Table'",
shinycssloaders::withSpinner(DT::dataTableOutput("policyPlot",height = "150px"),color="#2E86C1")
)
),
tabPanel("States", fluidPage(shiny::fluidRow(shiny::column(12, leafletOutput("myPlot2", height = 1000)))))
)
)
)
)
)
|
library(rgdal)
library(rgeos)
library(raster)
library(sp)
library(spdep) # for calculating gabriel graph
library(leastcostpath) # for calculating LCPs
library(rgrass7) # for calculating viewsheds
# use_sp() ensures that rgrass7 uses sp rather than stars library
use_sp()
library(xml2) # for creating cairnfield SpatialPoints from ADS records
library(tmap) # for producing maps
#### READ DDV FUNCTION AND MAKE AVAILABLE TO CURRENT SCRIPT ####
source("./R/Direction Dependent Visibility.R")
#### LOAD AND MODIFY NECESSARY FILES ####
NCA <- readOGR("./Data/National_Character_Areas_England/National_Character_Areas_England.shp")
high_fells <- NCA[NCA$JCANAME == "Cumbria High Fells",]
elev_files <- list.files(path = "./Data/OS50/", pattern = ".asc$", full.names = TRUE, recursive = TRUE)
elev_list <- lapply(elev_files, raster::raster)
elev_osgb <- do.call(raster::merge, elev_list)
elev_osgb <- raster::crop(elev_osgb, rgeos::gBuffer(as(raster::extent(high_fells), "SpatialPolygons"), width = 1000))
crs(high_fells) <- crs(elev_osgb)
waterbodies <- readOGR("./Data/Waterbodies/WFD_Lake_Water_Bodies_Cycle_2.shp")
waterbodies <- raster::crop(waterbodies, elev_osgb)
crs(waterbodies) <- crs(elev_osgb)
#### CREATE SPATIALPOINTS OF BRONZE AGE CAIRNS ####
cairns_files <- list.files(path = "./Data/Bronze Age Cairnfields/", pattern = ".xml", full.names = TRUE)
cairns = list()
for (i in 1:length(cairns_files)) {
xml_doc <- xml2::read_xml(cairns_files[i])
x <- as.numeric(xml2::xml_text(xml2::xml_find_all(xml_doc, "//ns:x")))
y <- as.numeric(xml2::xml_text(xml2::xml_find_all(xml_doc, "//ns:y")))
cairns[[i]] <- SpatialPoints(coords = cbind(x,y))
}
cairns <- do.call(rbind, cairns)
# remove cairns with duplicate coordinates
cairns <- cairns[!duplicated(cairns@coords),]
# remove cairns that are in the same raster cell - this is to ensure that LCPs aren't calculated from two cairns within the same raster cell
cairns <- cairns[!duplicated(cellFromXY(elev_osgb, cairns)),]
cairns <- raster::crop(x = cairns, high_fells)
cairns$ID <- 1:length(cairns)
writeOGR(obj = cairns, dsn = "./Data/Bronze Age Cairnfields", layer = "cairns", driver = "ESRI Shapefile")
#### CALCULATE VIEWSHEDS FROM BRONZE AGE CAIRNS ####
GRASS_loc <- "D:/GRASS GIS 7.6.0"
temp_loc <- "C:/"
initGRASS(gisBase = GRASS_loc,
gisDbase = temp_loc, location = "visibility",
mapset = "PERMANENT", override = TRUE)
# set coordinate system as OSGB
execGRASS("g.proj", flags = c("c"), proj4 = "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +towgs84=446.448,-125.157,542.06,0.15,0.247,0.842,-20.489 +units=m +no_defs")
# write dem to GRASS
writeRAST(as(elev_osgb, "SpatialGridDataFrame"), "dem", overwrite = TRUE)
execGRASS("g.region", raster = "dem", flags = "p")
viewshed <- elev_osgb
viewshed[] <- 0
observer_height <- 1.65
distance <- 6000
locations <- cairns@coords
for (i in 1:length(cairns)) {
print(paste0("Iteration Number: ", i))
execGRASS("r.viewshed", flags = c("overwrite","b"), parameters = list(input = "dem", output = "viewshed", coordinates = unlist(c(locations[i,])), observer_elevation = observer_height, max_distance = distance))
single.viewshed <- readRAST("viewshed")
single.viewshed <- raster(single.viewshed, layer=1, values=TRUE)
viewshed <- viewshed + single.viewshed
}
# calculate how much of the study area is visible
(sum(viewshed[] > 0) * res(elev_osgb)[1]^2) / (ncell(elev_osgb) * res(elev_osgb)[1]^2) * 100
viewshed[viewshed == 0] <- NA
#writeRaster(x = viewshed, filename = "./Outputs/viewsheds/cumulative_viewshed.tif", overwrite = TRUE)
#### CALCULATE DELAUNEY TRIANGLE FROM CAIRN LOCATIONS ####
neighbour_pts <- spdep::gabrielneigh(cairns)
locs_matrix <- base::cbind(neighbour_pts$from, neighbour_pts$to)
#### CREATE SLOPE AND WATERBODIES COST SURFACES ####
slope_cs <- leastcostpath::create_slope_cs(dem = elev_osgb, cost_function = "modified tobler", neighbours = 16)
waterbodies_cs <- leastcostpath::create_barrier_cs(raster = elev_osgb, barrier = waterbodies, neighbours = 16, field = 0, background = 1)
final_cs <- slope_cs * waterbodies_cs
#### CALCULATE LEAST COST PATHS BETWEEN DELAUNEY-DERIVED CAIRN LOCATIONS
lcps <- leastcostpath::create_lcp_network(cost_surface = final_cs, locations = cairns, nb_matrix = locs_matrix, cost_distance = FALSE, parallel = TRUE)
#writeOGR(obj = lcps, dsn = "./Outputs/lcps", layer = "lcps", driver = "ESRI Shapefile", overwrite_layer = TRUE)
#### CALCULATE DIRECTION DEPENDENT VISIBILITY ALONG EACH LEAST COST PATH
AtoB <- list()
BtoA <- list()
# length(lcps)
for (i in 1:5) {
print(paste0("Iteration Number: ", i))
AtoB[[i]] <- DDV(route = lcps[i,], dem = elev_osgb, max_dist = distance, horizontal_angle = 62, locs_futher_along_route = 1, observer_elev = observer_height, reverse = FALSE, binary = FALSE)
BtoA[[i]] <- DDV(route = lcps[i,], dem = elev_osgb, max_dist = distance, horizontal_angle = 62, locs_futher_along_route = 1, observer_elev = observer_height, reverse = TRUE, binary = FALSE)
}
DDV_AtoB <- Reduce(`+`, AtoB)
DDV_BtoA <- Reduce(`+`, BtoA)
#writeRaster(x = DDV_AtoB, filename = "./Outputs/viewsheds/DDV_AtoB.tif", overwrite = TRUE)
#writeRaster(x = DDV_BtoA, filename = "./Outputs/viewsheds/DDV_BtoA.tif", overwrite = TRUE)
final_DDV <- (DDV_AtoB + DDV_BtoA) / 2
# calculate how much of the study area is visible
(sum(final_DDV[] > 0) * res(elev_osgb)[1]^2) / (ncell(elev_osgb) * res(elev_osgb)[1]^2) * 100
final_DDV[final_DDV == 0] <- NA
#writeRaster(x = final_DDV, filename = "./Outputs/viewsheds/DDV_mean.tif", overwrite = TRUE)
elev_osgb[elev_osgb < 0] <- NA
high_fells_line <- as(high_fells, "SpatialLines")
crs(high_fells_line) <- crs(elev_osgb)
viewshed_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE, alpha = 0.6) +
tm_shape(viewshed, raster.downsample = FALSE) +
tm_raster(palette = viridis::plasma(n = 10, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Cumulative Visibility") +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "black") +
tm_layout(
main.title = "A",
main.title.position = "left")
lcp_routes_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE) +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_shape(lcps) +
tm_lines(col = "red", lwd = 4) +
tm_shape(cairns) +
tm_dots(col = "black", size = 0.2, legend.show = TRUE) +
tm_add_legend(type = "line", labels = "Least Cost Path", col = "red", lwd = 4) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "symbol", labels = "Cairnfields", col = "black", border.col = "black") +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "white") +
tm_layout(
main.title = "B",
main.title.position = "left")
lcp_viewshed_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE, alpha = 0.6) +
tm_shape(final_DDV, raster.downsample = FALSE) +
tm_raster(palette = viridis::plasma(n = 10, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Cumulative Visibility") +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "black") +
tm_layout(
main.title = "C",
main.title.position = "left")
# tmap::tmap_save(lcp_routes_map, "./outputs/plots/lpc_routes.png")
# tmap::tmap_save(viewshed_map, "./outputs/plots/viewshed.png")
# tmap::tmap_save(lcp_viewshed_map, "./outputs/plots/lcp_viewshed.png")
quantile(extract(viewshed, cairns), seq(0, 1, 0.25))
quantile(extract(final_DDV, cairns), seq(0, 1, 0.25))
| /R/main.R | permissive | josephlewis/Seeing-While-Moving-Direction-Dependent-Visibility | R | false | false | 9,746 | r | library(rgdal)
library(rgeos)
library(raster)
library(sp)
library(spdep) # for calculating gabriel graph
library(leastcostpath) # for calculating LCPs
library(rgrass7) # for calculating viewsheds
# use_sp() ensures that rgrass7 uses sp rather than stars library
use_sp()
library(xml2) # for creating cairnfield SpatialPoints from ADS records
library(tmap) # for producing maps
#### READ DDV FUNCTION AND MAKE AVAILABLE TO CURRENT SCRIPT ####
source("./R/Direction Dependent Visibility.R")
#### LOAD AND MODIFY NECESSARY FILES ####
NCA <- readOGR("./Data/National_Character_Areas_England/National_Character_Areas_England.shp")
high_fells <- NCA[NCA$JCANAME == "Cumbria High Fells",]
elev_files <- list.files(path = "./Data/OS50/", pattern = ".asc$", full.names = TRUE, recursive = TRUE)
elev_list <- lapply(elev_files, raster::raster)
elev_osgb <- do.call(raster::merge, elev_list)
elev_osgb <- raster::crop(elev_osgb, rgeos::gBuffer(as(raster::extent(high_fells), "SpatialPolygons"), width = 1000))
crs(high_fells) <- crs(elev_osgb)
waterbodies <- readOGR("./Data/Waterbodies/WFD_Lake_Water_Bodies_Cycle_2.shp")
waterbodies <- raster::crop(waterbodies, elev_osgb)
crs(waterbodies) <- crs(elev_osgb)
#### CREATE SPATIALPOINTS OF BRONZE AGE CAIRNS ####
cairns_files <- list.files(path = "./Data/Bronze Age Cairnfields/", pattern = ".xml", full.names = TRUE)
cairns = list()
for (i in 1:length(cairns_files)) {
xml_doc <- xml2::read_xml(cairns_files[i])
x <- as.numeric(xml2::xml_text(xml2::xml_find_all(xml_doc, "//ns:x")))
y <- as.numeric(xml2::xml_text(xml2::xml_find_all(xml_doc, "//ns:y")))
cairns[[i]] <- SpatialPoints(coords = cbind(x,y))
}
cairns <- do.call(rbind, cairns)
# remove cairns with duplicate coordinates
cairns <- cairns[!duplicated(cairns@coords),]
# remove cairns that are in the same raster cell - this is to ensure that LCPs aren't calculated from two cairns within the same raster cell
cairns <- cairns[!duplicated(cellFromXY(elev_osgb, cairns)),]
cairns <- raster::crop(x = cairns, high_fells)
cairns$ID <- 1:length(cairns)
writeOGR(obj = cairns, dsn = "./Data/Bronze Age Cairnfields", layer = "cairns", driver = "ESRI Shapefile")
#### CALCULATE VIEWSHEDS FROM BRONZE AGE CAIRNS ####
GRASS_loc <- "D:/GRASS GIS 7.6.0"
temp_loc <- "C:/"
initGRASS(gisBase = GRASS_loc,
gisDbase = temp_loc, location = "visibility",
mapset = "PERMANENT", override = TRUE)
# set coordinate system as OSGB
execGRASS("g.proj", flags = c("c"), proj4 = "+proj=tmerc +lat_0=49 +lon_0=-2 +k=0.9996012717 +x_0=400000 +y_0=-100000 +ellps=airy +towgs84=446.448,-125.157,542.06,0.15,0.247,0.842,-20.489 +units=m +no_defs")
# write dem to GRASS
writeRAST(as(elev_osgb, "SpatialGridDataFrame"), "dem", overwrite = TRUE)
execGRASS("g.region", raster = "dem", flags = "p")
viewshed <- elev_osgb
viewshed[] <- 0
observer_height <- 1.65
distance <- 6000
locations <- cairns@coords
for (i in 1:length(cairns)) {
print(paste0("Iteration Number: ", i))
execGRASS("r.viewshed", flags = c("overwrite","b"), parameters = list(input = "dem", output = "viewshed", coordinates = unlist(c(locations[i,])), observer_elevation = observer_height, max_distance = distance))
single.viewshed <- readRAST("viewshed")
single.viewshed <- raster(single.viewshed, layer=1, values=TRUE)
viewshed <- viewshed + single.viewshed
}
# calculate how much of the study area is visible
(sum(viewshed[] > 0) * res(elev_osgb)[1]^2) / (ncell(elev_osgb) * res(elev_osgb)[1]^2) * 100
viewshed[viewshed == 0] <- NA
#writeRaster(x = viewshed, filename = "./Outputs/viewsheds/cumulative_viewshed.tif", overwrite = TRUE)
#### CALCULATE DELAUNEY TRIANGLE FROM CAIRN LOCATIONS ####
neighbour_pts <- spdep::gabrielneigh(cairns)
locs_matrix <- base::cbind(neighbour_pts$from, neighbour_pts$to)
#### CREATE SLOPE AND WATERBODIES COST SURFACES ####
slope_cs <- leastcostpath::create_slope_cs(dem = elev_osgb, cost_function = "modified tobler", neighbours = 16)
waterbodies_cs <- leastcostpath::create_barrier_cs(raster = elev_osgb, barrier = waterbodies, neighbours = 16, field = 0, background = 1)
final_cs <- slope_cs * waterbodies_cs
#### CALCULATE LEAST COST PATHS BETWEEN DELAUNEY-DERIVED CAIRN LOCATIONS
lcps <- leastcostpath::create_lcp_network(cost_surface = final_cs, locations = cairns, nb_matrix = locs_matrix, cost_distance = FALSE, parallel = TRUE)
#writeOGR(obj = lcps, dsn = "./Outputs/lcps", layer = "lcps", driver = "ESRI Shapefile", overwrite_layer = TRUE)
#### CALCULATE DIRECTION DEPENDENT VISIBILITY ALONG EACH LEAST COST PATH
AtoB <- list()
BtoA <- list()
# length(lcps)
for (i in 1:5) {
print(paste0("Iteration Number: ", i))
AtoB[[i]] <- DDV(route = lcps[i,], dem = elev_osgb, max_dist = distance, horizontal_angle = 62, locs_futher_along_route = 1, observer_elev = observer_height, reverse = FALSE, binary = FALSE)
BtoA[[i]] <- DDV(route = lcps[i,], dem = elev_osgb, max_dist = distance, horizontal_angle = 62, locs_futher_along_route = 1, observer_elev = observer_height, reverse = TRUE, binary = FALSE)
}
DDV_AtoB <- Reduce(`+`, AtoB)
DDV_BtoA <- Reduce(`+`, BtoA)
#writeRaster(x = DDV_AtoB, filename = "./Outputs/viewsheds/DDV_AtoB.tif", overwrite = TRUE)
#writeRaster(x = DDV_BtoA, filename = "./Outputs/viewsheds/DDV_BtoA.tif", overwrite = TRUE)
final_DDV <- (DDV_AtoB + DDV_BtoA) / 2
# calculate how much of the study area is visible
(sum(final_DDV[] > 0) * res(elev_osgb)[1]^2) / (ncell(elev_osgb) * res(elev_osgb)[1]^2) * 100
final_DDV[final_DDV == 0] <- NA
#writeRaster(x = final_DDV, filename = "./Outputs/viewsheds/DDV_mean.tif", overwrite = TRUE)
elev_osgb[elev_osgb < 0] <- NA
high_fells_line <- as(high_fells, "SpatialLines")
crs(high_fells_line) <- crs(elev_osgb)
viewshed_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE, alpha = 0.6) +
tm_shape(viewshed, raster.downsample = FALSE) +
tm_raster(palette = viridis::plasma(n = 10, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Cumulative Visibility") +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "black") +
tm_layout(
main.title = "A",
main.title.position = "left")
lcp_routes_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE) +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_shape(lcps) +
tm_lines(col = "red", lwd = 4) +
tm_shape(cairns) +
tm_dots(col = "black", size = 0.2, legend.show = TRUE) +
tm_add_legend(type = "line", labels = "Least Cost Path", col = "red", lwd = 4) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "symbol", labels = "Cairnfields", col = "black", border.col = "black") +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "white") +
tm_layout(
main.title = "B",
main.title.position = "left")
lcp_viewshed_map <- tm_shape(elev_osgb, raster.downsample = FALSE) +
tm_raster(palette = viridis::cividis(n = 100, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Elevation (m)", colorNA = "#9ECAE1", showNA = FALSE, alpha = 0.6) +
tm_shape(final_DDV, raster.downsample = FALSE) +
tm_raster(palette = viridis::plasma(n = 10, begin = 0, end = 1), n = 10, legend.show = TRUE, legend.reverse = TRUE, title = "Cumulative Visibility") +
tm_shape(waterbodies) +
tm_polygons(col = "#9ECAE1", border.col = "#9ECAE1", legend.show = TRUE) +
tm_shape(high_fells_line) +
tm_lines(col = "black", lwd = 2, lty = 2, legend.show = TRUE) +
tm_legend(show = TRUE, outside = TRUE, legend.position = c("right", "bottom")) +
tm_add_legend(type = "fill", labels = "Water", col = "#9ECAE1", border.col = "#9ECAE1") +
tm_add_legend(type = "line", labels = "High Fells", col = "black", lty = 2, lwd = 2) +
tm_scale_bar(position = c("right", "bottom"),breaks = c(0, 5, 10), text.color = "black") +
tm_layout(
main.title = "C",
main.title.position = "left")
# tmap::tmap_save(lcp_routes_map, "./outputs/plots/lpc_routes.png")
# tmap::tmap_save(viewshed_map, "./outputs/plots/viewshed.png")
# tmap::tmap_save(lcp_viewshed_map, "./outputs/plots/lcp_viewshed.png")
quantile(extract(viewshed, cairns), seq(0, 1, 0.25))
quantile(extract(final_DDV, cairns), seq(0, 1, 0.25))
|
testlist <- list(lims = structure(c(6.05706623128535e-309, 4.94065645841247e-324, 0, 0, 2.12198061623303e-314, 2.09674943521173e-231, 7.29112712698821e-304, 1.35264507619519e-309, 7.29144885433481e-304, 2.54556605715115e-313, 5.78790420946554e-275, 9.08440531110103e+133, 8.21491790095636e-227, 5.95750278984877e+228, 5.9575027961836e+228, 7.07219232619538e-304, 4.10490640372769e+204, 4.14103566795568e+204, 9.10545742538589e-259, 1.42706986115138e+48, 8.10541286676906e+228, 5.71229768251201e+151, 1.39137526939423e+93, 1.2136247081529e+132, 3.64031741345465e+133, 2.35569228911976e+251, 8.43594713548578e+252, 0), .Dim = c(4L, 7L)), points = structure(c(NaN, 4.94065645841247e-324, 4.94065645841247e-324, NaN, NA, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, -Inf), .Dim = c(1L, 9L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) | /palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987905-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 883 | r | testlist <- list(lims = structure(c(6.05706623128535e-309, 4.94065645841247e-324, 0, 0, 2.12198061623303e-314, 2.09674943521173e-231, 7.29112712698821e-304, 1.35264507619519e-309, 7.29144885433481e-304, 2.54556605715115e-313, 5.78790420946554e-275, 9.08440531110103e+133, 8.21491790095636e-227, 5.95750278984877e+228, 5.9575027961836e+228, 7.07219232619538e-304, 4.10490640372769e+204, 4.14103566795568e+204, 9.10545742538589e-259, 1.42706986115138e+48, 8.10541286676906e+228, 5.71229768251201e+151, 1.39137526939423e+93, 1.2136247081529e+132, 3.64031741345465e+133, 2.35569228911976e+251, 8.43594713548578e+252, 0), .Dim = c(4L, 7L)), points = structure(c(NaN, 4.94065645841247e-324, 4.94065645841247e-324, NaN, NA, 4.94065645841247e-324, 4.94065645841247e-324, 4.94065645841247e-324, -Inf), .Dim = c(1L, 9L)))
result <- do.call(palm:::pbc_distances,testlist)
str(result) |
library(ggplot2)
library(reshape2)
files<-commandArgs(trailingOnly=TRUE)
dat5k<-read.delim(files[1], header=T)
dat3k<-read.delim(files[2], header=T)
dat1k<-read.delim(files[3], header=T)
outfilebase<-files[4]
#head(dat)
## Gap score plot over time
## 1000 RT
recruit <- c(30000, 31000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_1000_gapscore.pdf"))
ggplot(dat1k[dat1k$RecruitTime == 1000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 1000 ( 1 cell cycle )")
dev.off()
## 3000 RT
recruit <- c(30000, 33000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_3000_gapscore.pdf"))
ggplot(dat3k[dat3k$RecruitTime == 3000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 3000 ( 3 cell cycles )")
dev.off()
## 5000 RT
recruit <- c(30000, 35000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_5000_gapscore.pdf"))
ggplot(dat5k[dat5k$RecruitTime == 5000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 5000 ( 5 cell cycles )")
dev.off()
## Transcription score plot over time
## 1000 RT
recruit <- c(30000, 31000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_1000_transcriptionscore.pdf"))
ggplot(dat1k[dat1k$RecruitTime == 1000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>=60% M)") + ggtitle("% Off Over Time: RT = 1000 ( 1 cell cycle )")
dev.off()
## 3000 RT
recruit <- c(30000, 33000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_3000_transcriptionscore.pdf"))
ggplot(dat3k[dat3k$RecruitTime == 3000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>=60% M)") + ggtitle("% Off Over Time: RT = 3000 ( 3 cell cycles )")
dev.off()
## 5000 RT
recruit <- c(30000, 35000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_5000_transcriptionscore.pdf"))
ggplot(dat5k[dat5k$RecruitTime == 5000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>= 60% M)") + ggtitle("% Off Over Time: RT = 5000 ( 5 cell cycles )")
dev.off()
| /model_src/code/plot.R | no_license | aparna-arr/HistoneModSpreadingModel | R | false | false | 3,131 | r | library(ggplot2)
library(reshape2)
files<-commandArgs(trailingOnly=TRUE)
dat5k<-read.delim(files[1], header=T)
dat3k<-read.delim(files[2], header=T)
dat1k<-read.delim(files[3], header=T)
outfilebase<-files[4]
#head(dat)
## Gap score plot over time
## 1000 RT
recruit <- c(30000, 31000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_1000_gapscore.pdf"))
ggplot(dat1k[dat1k$RecruitTime == 1000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 1000 ( 1 cell cycle )")
dev.off()
## 3000 RT
recruit <- c(30000, 33000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_3000_gapscore.pdf"))
ggplot(dat3k[dat3k$RecruitTime == 3000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 3000 ( 3 cell cycles )")
dev.off()
## 5000 RT
recruit <- c(30000, 35000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_5000_gapscore.pdf"))
ggplot(dat5k[dat5k$RecruitTime == 5000,], aes(Timestep, AvgGapScore, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("AvgGapScore (M - A) / (M + A)") + ggtitle("Gap Scores Over Time: RT = 5000 ( 5 cell cycles )")
dev.off()
## Transcription score plot over time
## 1000 RT
recruit <- c(30000, 31000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_1000_transcriptionscore.pdf"))
ggplot(dat1k[dat1k$RecruitTime == 1000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>=60% M)") + ggtitle("% Off Over Time: RT = 1000 ( 1 cell cycle )")
dev.off()
## 3000 RT
recruit <- c(30000, 33000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_3000_transcriptionscore.pdf"))
ggplot(dat3k[dat3k$RecruitTime == 3000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>=60% M)") + ggtitle("% Off Over Time: RT = 3000 ( 3 cell cycles )")
dev.off()
## 5000 RT
recruit <- c(30000, 35000)
recruit.df <- data.frame(recruit)
pdf(paste0(outfilebase, "rt_5000_transcriptionscore.pdf"))
ggplot(dat5k[dat5k$RecruitTime == 5000,], aes(Timestep, PercOff, col=factor(FValue))) + geom_line() + geom_vline(aes(xintercept=recruit), data=recruit.df, col="red", linetype="dashed") + geom_hline(aes(yintercept=0), linetype="dashed") + ylab("% cells off (>= 60% M)") + ggtitle("% Off Over Time: RT = 5000 ( 5 cell cycles )")
dev.off()
|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
print_diff <- function(r, h2o) {
if (!isTRUE(all.equal(r,h2o))) {
Log.info (paste("R :", r))
Log.info (paste("H2O :" , h2o))
}
}
#
# This test creates some dates on the server, copies the results
# locally. It then changes the timezone reloads the original
# data, changes them to dates, and gets a loal copies.
# The two local copies are then checked for the correct time
# offset.
#
test.rdoc_settimezone.golden <- function() {
origTZ = h2o.getTimezone()
#test 1
h2o.setTimezone("Etc/UTC")
rdf = data.frame(c("Fri Jan 10 00:00:00 1969", "Tue Jan 10 04:00:00 2068", "Mon Dec 30 01:00:00 2002", "Wed Jan 1 12:00:00 2003"))
colnames(rdf) <- c("c1")
hdf = as.h2o(rdf, "hdf")
hdf$c1 <- as.Date(hdf$c1, "%c")
ldfUTC <- as.data.frame(hdf)
h2o.rm(hdf)
h2o.setTimezone("America/Los_Angeles")
hdf = as.h2o(rdf, "hdf")
hdf$c1 <- as.Date(hdf$c1, "%c")
ldfPST <- as.data.frame(hdf)
diff = ldfUTC - ldfPST
act = rep(-28800000, 4)
print_diff(act, diff[,1])
expect_that(act, equals(diff[,1]))
#test 2 - make sure returned years/months have the same timezone as interpretation
h2o.setTimezone("Etc/UTC")
rdf <- data.frame(dates = c("2014-01-07", "2014-01-30", "2014-01-31", "2014-02-01", "2014-02-02", "2014-10-31", "2014-11-01"), stringsAsFactors = FALSE)
hdf <- as.h2o(rdf, "hdf")
hdf$dates <- as.Date(hdf$dates,"%Y-%m-%d")
hdf$year <- year(hdf$dates)
hdf$month <- month(hdf$dates)
hdf$day <- day(hdf$dates)
hdf$hour <- hour(hdf$dates)
ldf <- as.data.frame(hdf)
edf <- data.frame(year = c(114, 114, 114, 114, 114, 114, 114),
month = c(1, 1, 1, 2, 2, 10, 11),
day = c(7, 30, 31, 1, 2, 31, 1),
hour = c(0, 0, 0, 0, 0, 0, 0))
print_diff(edf$year, ldf$year)
expect_that(edf$year, equals(ldf$year))
print_diff(edf$month, ldf$month)
expect_that(edf$month, equals(ldf$month))
print_diff(edf$day, ldf$day)
expect_that(edf$day, equals(ldf$day))
print_diff(edf$hour, ldf$hour)
expect_that(edf$hour, equals(ldf$hour))
# erase side effect of test
h2o.setTimezone(origTZ)
h2o.rm(hdf)
}
doTest("R Doc setTimezone", test.rdoc_settimezone.golden)
| /h2o-r/tests/testdir_docexamples/runit_Rdoc_set_time_zone.R | permissive | voltek62/h2o-3 | R | false | false | 2,295 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../scripts/h2o-r-test-setup.R")
print_diff <- function(r, h2o) {
if (!isTRUE(all.equal(r,h2o))) {
Log.info (paste("R :", r))
Log.info (paste("H2O :" , h2o))
}
}
#
# This test creates some dates on the server, copies the results
# locally. It then changes the timezone reloads the original
# data, changes them to dates, and gets a loal copies.
# The two local copies are then checked for the correct time
# offset.
#
test.rdoc_settimezone.golden <- function() {
origTZ = h2o.getTimezone()
#test 1
h2o.setTimezone("Etc/UTC")
rdf = data.frame(c("Fri Jan 10 00:00:00 1969", "Tue Jan 10 04:00:00 2068", "Mon Dec 30 01:00:00 2002", "Wed Jan 1 12:00:00 2003"))
colnames(rdf) <- c("c1")
hdf = as.h2o(rdf, "hdf")
hdf$c1 <- as.Date(hdf$c1, "%c")
ldfUTC <- as.data.frame(hdf)
h2o.rm(hdf)
h2o.setTimezone("America/Los_Angeles")
hdf = as.h2o(rdf, "hdf")
hdf$c1 <- as.Date(hdf$c1, "%c")
ldfPST <- as.data.frame(hdf)
diff = ldfUTC - ldfPST
act = rep(-28800000, 4)
print_diff(act, diff[,1])
expect_that(act, equals(diff[,1]))
#test 2 - make sure returned years/months have the same timezone as interpretation
h2o.setTimezone("Etc/UTC")
rdf <- data.frame(dates = c("2014-01-07", "2014-01-30", "2014-01-31", "2014-02-01", "2014-02-02", "2014-10-31", "2014-11-01"), stringsAsFactors = FALSE)
hdf <- as.h2o(rdf, "hdf")
hdf$dates <- as.Date(hdf$dates,"%Y-%m-%d")
hdf$year <- year(hdf$dates)
hdf$month <- month(hdf$dates)
hdf$day <- day(hdf$dates)
hdf$hour <- hour(hdf$dates)
ldf <- as.data.frame(hdf)
edf <- data.frame(year = c(114, 114, 114, 114, 114, 114, 114),
month = c(1, 1, 1, 2, 2, 10, 11),
day = c(7, 30, 31, 1, 2, 31, 1),
hour = c(0, 0, 0, 0, 0, 0, 0))
print_diff(edf$year, ldf$year)
expect_that(edf$year, equals(ldf$year))
print_diff(edf$month, ldf$month)
expect_that(edf$month, equals(ldf$month))
print_diff(edf$day, ldf$day)
expect_that(edf$day, equals(ldf$day))
print_diff(edf$hour, ldf$hour)
expect_that(edf$hour, equals(ldf$hour))
# erase side effect of test
h2o.setTimezone(origTZ)
h2o.rm(hdf)
}
doTest("R Doc setTimezone", test.rdoc_settimezone.golden)
|
packages<-c('dplyr','mice','vtreat','Amelia','caret','doParallel','foreach',
'ggplot2','rms','parallel','pec','matrixStats','prodlim','qs','ranger','survival','timeROC')
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
lapply(packages, require, character.only = TRUE)
####Prepare and clean dataset
survdata
####Impute for missing data, 10 imputations, 10 iterations
mice<- mice(survdata,m=10,maxit=10,cluster.seed=500)
qsave(mice,'mice.q')
####Train model on full dataset and tune hyperparameters in tgrid
{
mice<-qread('mice.q')
tgrid<-expand.grid(
num.trees=c(200,250,300),
.mtry=5:10,
.splitrule=c("logrank"),
.min.node.size=c(20,25,30,35,40)
)
oob<-1
for (i in 1:length(mice)){
###For each imputed dataset train random forest model
data<-mice::complete(mice,i)
cl <- makeForkCluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms)))
system.time(list<-foreach(j=1:nrow(tgrid)) %dopar%{
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid$num.trees[j],mtry=tgrid$.mtry[j],min.node.size=tgrid$.min.node.size[j],splitrule=tgrid$.splitrule)
###Calculate out of bag error (equivalent to 1-cindex in out of bag samples as measure of performance) for each combination of tuning parameters
oob[j]<-rsf$prediction.error
rm(rsf)
return(list(oob))
})
stopCluster(cl)
registerDoSEQ()
for (i in 1:nrow(tgrid)){
oob[i]<-(list[[i]][[1]][[i]])
}
####select combination of hyperparameters that gives the lowest prediction error and train final model
rsf<-ranger(Surv(monthssurv,ons_death)~.,data,num.trees=tgrid[which.min(oob),]$num.trees,mtry=tgrid[which.min(oob),]$.mtry,min.node.size=tgrid[which.min(oob),]$.min.node.size,splitrule='logrank',importance='permutation')
rsfintlist[i]<-list(rsf)
}
qsave(rsfintlist,'rsfintlist.q')
}
####Variable selection by bootstrap
{
mice<-qread('survivalmicetrain.q')
rsflist<-qread('rsfintlist.q')
###Calculate Raw VIMP for 1st imputation dataset
{
folds<-1:nrow(mice::complete(mice,1))
finalvimp<-1
cl <- makeForkCluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia)))
system.time(list<-foreach(t=1:1)%dopar%{
vimp<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (n in 1:mice$m){
datax<-mice::complete(mice,n)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
vimp<-as.data.frame(list[[1]])
rownames(vimp)<-colnames(mice::complete(mice,1)[,1:(ncol(mice::complete(mice,1))-2)])
qsave(vimp,'mivimp.q')
}
####Create bootstrap resampling
{
data<-mice::complete(mice,1)
folds<-caret::createResample(1:nrow(mice::complete(mice,1)),times=1000)
qsave(folds,'folds.q')
folds<-qread('folds.q')
}
####Calculate boostrap vimp. Uses hyperparameters from full sample for computational reasons
{
finalvimp<-1
cl <- makeForkCluster(3)
registerDoParallel(cl)
system.time(list<-foreach(t=1:1000)%dopar%{
vimp<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (n in 1:mice$m){
datax<-mice::complete(mice,n)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
df<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
rownames(df)<-colnames(mice::complete(mice,1)[,1:(ncol(mice::complete(mice,1))-2)])
vimpdf<-array(unlist(df),dim=c((ncol(mice::complete(mice,1))-2),mice$m,length(folds)))
list
for (i in 1:1000){
for (j in 1:mice$m){
vimpdf[,j,i]<-(list[[i]][[1]][[i]][,j])
}
}
vimpdf
qsave(vimpdf,'vimpdf.q')
}
###Calculate standard error of vimp from bootstrap samples
{
df<-vimpdf
stderr<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (j in 1:mice$m){
for (k in 1:(ncol(mice::complete(mice,1))-2)){
stderr[k,j]<-std.error(as.matrix(df[k,j,]))
}
}
rownames(stderr)<-colnames(mice::complete(mice,1)[,c(1:40,43)])
stderr
}
###Combine across imputation datasets using Ruben's rules
{
mivimp<-qread('mivimp.q')
rownames(mivimp)<-rownames(stderr)
vimp<-mi.meld(q=mivimp,se=stderr,byrow=FALSE)
vimpa<-as.data.frame(t(vimp$q.mi))
vimpse<-as.data.frame(t(vimp$se.mi))
vimpf<-cbind2(vimpa,vimpse)
colnames(vimpf)<-c('VIMP','SE')
}
####Select only variables with 99% LCI of >0
vimpf$UCI<-(vimpf$VIMP+(2.576*vimpf$SE))
vimpf$LCI<-(vimpf$VIMP-(2.576*vimpf$SE))
vimpf2<-vimpf[order(-vimpf$VIMP),,drop=FALSE]
vimpf3<-subset(vimpf2,vimpf2$LCI>0)
vimpf3
finalvar<-rownames(vimpf3)
qsave(finalvar,'finalvar.q')
}
####create dummy variables
{
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvar.q')
####Create dummy coding scheme with mortality indicator specified
OSct<-mkCrossFrameNExperiment(mice::complete(mice,1),finalvar,'ons_death')
dummies<-OSct$treatments
qsave(dummies,'dummiesOS.q')
rm(OSct,dummies)
}
###Train full Random Forest models
{
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvar.q')
dummies<-qread('dummiesOS.q')
tgrid<-expand.grid(
num.trees=c(200,300,400),
.mtry=10:20,
.splitrule=c("logrank"),
.min.node.size=c(10,20,30,40)
)
####For each mice imputation, train model, train hyperparameters and put into list
for (i in 1:length(mice)){
data<-vtreat::prepare(dummies,mice::complete(mice,i),pruneSig=c())
data$monthssurv<-mice::complete(mice,i)$monthssurv
data$monthssurv<-ceiling(data$monthssurv)
oob<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms)))
system.time(list<-foreach(j=1:nrow(tgrid)) %dopar%{
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid$num.trees[j],mtry=tgrid$.mtry[j],min.node.size=tgrid$.min.node.size[j],splitrule=tgrid$.splitrule)
oob[j]<-rsf$prediction.error
rm(rsf)
return(list(oob))
})
stopCluster(cl)
registerDoSEQ()
for (i in 1:nrow(tgrid)){
oob[i]<-(list[[i]][[1]][[i]])
}
oob1<-oob
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid[which.min(oob),]$num.trees,mtry=tgrid[which.min(oob),]$.mtry,min.node.size=tgrid[which.min(oob),]$.min.node.size,splitrule='logrank',importance='none')
rsflist[i]<-list(rsf)
}
qsave(rsflist,'rsflist.q')
}
####Variable importance in final models by bootstrap (similar to original VIMP calculation)
{
mice<-qread('survivalmicetrain.q')
rsflist<-qread('rsflist.q')
finalvar<-qread('finalvaros.q')
finalvimp<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(dplyr)))
n<-1
system.time(list<-foreach(t=1:1)%dopar%{
vimp<-as.data.frame(1:(ncol(dplyr::select(mice::complete(mice,n),finalvar))))
for (n in 1:mice$m){
datax<-dplyr::select(mice::complete(mice,n),finalvar,ons_death)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=ifelse(rsflist[[n]]$mtry<15,rsflist[[n]]$mtry,14),min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
vimp<-as.data.frame(list[[1]])
rownames(vimp)<-finalvar
vimp
list
qsave(vimp,'mivimp2.q')
data<-mice::complete(mice,1)
folds<-qread('folds.q')
n<-1
t<-1
{
finalvimp<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(dplyr)))
system.time(list<-foreach(t=1:1000)%dopar%{
vimp<-as.data.frame(1:(ncol(dplyr::select(mice::complete(mice,n),finalvar))))
for (n in 1:mice$m){
datax<-dplyr::select(mice::complete(mice,n),finalvar,ons_death)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=ifelse(rsflist[[n]]$mtry<15,rsflist[[n]]$mtry,14),min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
i<-100
list[[i]][[1]][[i]][,j]
df<-as.data.frame(1:14)
rownames(df)<-finalvar
dfa<-array(unlist(df),dim=c(14,mice$m,200))
for (i in 1:200){
for (j in 1:mice$m){
dfa[,j,i]<-(list[[i]][[1]][[i]][,j])
}
}
dfa
}
stderr<-as.data.frame(1:14)
for (j in 1:mice$m){
for (k in 1:14){
stderr[k,j]<-std.error(as.matrix(dfa[k,j,]))
}
}
rownames(stderr)<-finalvar
stderr
mivimp<-qread('mivimp2.q')
vimp<-mi.meld(q=mivimp,se=stderr,byrow=FALSE)
vimpa<-as.data.frame(t(vimp$q.mi))
vimpse<-as.data.frame(t(vimp$se.mi))
vimpf<-cbind2(vimpa,vimpse)
colnames(vimpf)<-c('VIMP','SE')
vimpf$UCI<-(vimpf$VIMP+(1.96*vimpf$SE))
vimpf$LCI<-(vimpf$VIMP-(1.96*vimpf$SE))
vimpf2<-vimpf[order(-vimpf$VIMP),,drop=FALSE]
vimpf2
qsave(vimpf2,'fullvimpfin.q')
}
####Calculate censoring times
{
rsflist<-qread('rsflist.q')
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvarOS.q')
dummies<-qread('dummiesOS.q')
data<-vtreat::prepare(dummies,mice::complete(mice,1),pruneSig=c())
data$monthssurv<-mice::complete(mice,1)$monthssurv
rsf1<-rsflist[1]
deathtimes<-rsflist[[1]][[3]]
censortimes<-as.data.frame(1)
system.time(for (j in 1:length(deathtimes)){
for (i in 1:nrow(data)){
ifelse((data[i,"ons_death"]==1 & data[i,'monthssurv']<=deathtimes[j]), censortimes[i,j]<-1 , (ifelse(data[i,'monthssurv']<=deathtimes[j],censortimes[i,j]<-NA,censortimes[i,j]<-0)))
}
})
qsave(censortimes,'censortimesfin.q')
}
####Internal validation - Generate bootstrap predictions
{
pw<-'Adalimumab3!'
modellist<-decrypt_object(qread('modellistEON.q'),key=pw)
rsflist<-modellist$rsflist
mice<-modellist$mice
finalvar<-modellist$Finalvars
dummies<-modellist$dummies
censortimes<-modellist$censortimes
rsflist<-qread('rsflist.q')
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvarOS.q')
dummies<-qread('dummiesOS.q')
censortimes<-qread('censortimesfin.q')
id<-as.data.frame(1:nrow(mice::complete(mice,1)))
folds<-caret::createResample(1:nrow(mice::complete(mice,1)),times=1000)
t<-1
cl <- makePSOCKcluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(CORElearn),library(miceadds),library(splines),library(matrixStats)))
system.time(list<-foreach(t=1:1)%dopar%{
estimates<-1
stander<-1
estimates2<-1
stander2<-1
for (n in 1:length(mice)){
datax<-vtreat::prepare(dummies,mice::complete(mice,n),pruneSig=c())
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='none')
deathtimes<-predict(rsf1,datax[-folds[[t]],],type='response')$unique.death.times
###Generate log predictions and standard errors on out of training samples
estimates[n]<-list(log(predict(rsf1,datax[-folds[[t]],],type='response')$survival))
survfull<-log(predict(rsf1,datax[-folds[[t]],],type='response',predict.all=TRUE)$survival)
standerx<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (p in 1:length(deathtimes)){
for (q in 1:nrow(datax[-folds[[t]],])){
standerx[q,p]<-sd(survfull[q,p,c(1:rsf1$num.trees)])
}
}
stander[n]<-list(standerx)
deathtimes2<-predict(rsf1,datax[folds[[t]],],type='response')$unique.death.times
###Generate log predictions and standard errors on training samples
estimates2[n]<-list(log(predict(rsf1,datax[folds[[t]],],type='response')$survival))
survfull2<-log(predict(rsf1,datax[folds[[t]],],type='response',predict.all=TRUE)$survival)
standerx2<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (p in 1:length(deathtimes2)){
for (q in 1:nrow(datax[folds[[t]],])){
standerx2[q,p]<-sd(survfull2[q,p,c(1:rsf1$num.trees)])
}
}
stander2[n]<-list(standerx2)
}
####Combine predictions from imputed datasets for final out of training and training predictions
mipreds<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (m in 1:(length(deathtimes)-2)){
mipreds[,m]<-as.data.frame(t(as.data.frame(mi.meld(q=cbind(estimates[[1]][,m],estimates[[2]][,m],estimates[[3]][,m],estimates[[5]][,m],
estimates[[6]][,m],estimates[[7]][,m],estimates[[8]][,m],estimates[[9]][,m],estimates[[10]][,m]),
se=cbind(stander[[1]][,m],stander[[2]][,m],stander[[3]][,m],stander[[5]][,m],
stander[[6]][,m],stander[[7]][,m],stander[[8]][,m],stander[[9]][,m],stander[[10]][,m]),byrow=FALSE)[1])))
}
mipredsa<-as.data.frame(1:nrow(datax[folds[[t]],]))
for (m in 1:(length(deathtimes2)-2)){
mipredsa[,m]<-as.data.frame(t(as.data.frame(mi.meld(q=cbind(estimates2[[1]][,m],estimates2[[2]][,m],estimates2[[3]][,m],estimates2[[5]][,m],
estimates2[[6]][,m],estimates2[[7]][,m],estimates2[[8]][,m],estimates2[[9]][,m],estimates2[[10]][,m]),
se=cbind(stander2[[1]][,m],stander2[[2]][,m],stander2[[3]][,m],stander2[[5]][,m],
stander2[[6]][,m],stander2[[7]][,m],stander2[[8]][,m],stander2[[9]][,m],stander2[[10]][,m]),byrow=FALSE)[1])))
}
testingpreds<-exp(mipreds)
rm(mipreds)
trainingpreds<-exp(mipredsa)
rm(mipredsa)
testingpreds$id<-id[-folds[[t]],]
trainingpreds$id<-id[folds[[t]],]
output<-list(testingpreds,trainingpreds)
return(list(output))
})
stopCluster(cl)
registerDoSEQ()
qsave(list,'finallist.q')
}
####Internal validation - evaluation metrics
{
###Create id/survival dataframe
ids<-select(survdata,monthssurv,onsdeath)
ids$id<-1:nrow(ids)
simplebootstrap<-1
boot.632<-1
calibplot<-1
calibplot632<-1
fullbt<-1
full632<-1
###Separate out dataframes of predictions
{
df<-1
for (i in 1:length(finlist)){
df[i]<-list(finlist[[i]][[1]][[1]])
}
for (i in 1:length(df)){
df[i]<-list(merge(df[[i]],ids,by='id'))
}
df2<-1
for (i in 1:length(finlist)){
df2[i]<-list(dplyr::distinct(finlist[[i]][[1]][[2]]))
}
for (i in 1:length(df2)){
df2[i]<-list(merge(df2[[i]],ids,by='id'))
}
###Recreate dataframe of predictions on original dataset for each bootstrap resample
{
df3<-1
for (i in 1:length(df)){
x<-df[[i]][,-2]
y<-df2[[i]][,-2]
x1<-rbind(x,y)
df3[i]<-list(dplyr::distinct(x1))
}
}
###Simple Bootstrap validation
{
###Calculate tROC, c-index and ibrier for bootstrap sample
troc3<-1
cid<-1
ibrier<-1
for (i in 1:length(df3)){
###set to 59 as all patients censored at 60
troc3[i]<-timeROC(df3[[i]]$monthssurv,df3[[i]]$ons_death,1-df3[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df3[[i]],Surv(df3[[i]]$monthssurv,df3[[i]]$ons_death))
cid[i]<-rcorr.cens(x=df3[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df3[[i]])
####may need to edit column numbers in dat[,c(2:62)]
ibrier[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
###Generate calibration chart for each bootstrap resample
plots<-1
for (k in 1:length(df3)){
{
calibrsf<-df3[[k]]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df3[[k]]$ons_death)
censordata$monthssurv<-df3[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,59],
breaks = quantile(calibrsf[,59],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
full
plot<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
plots[k]<-list(plot)
}
###Average calibration chart for final simple bootstrap calibration chart
surv<-as.data.frame(1:615)
for (i in 1:length(plots)){
surv[,i]<-plots[[i]]$data$survival
}
survs<-rowMeans(surv)
full$survival<-survs
fullx<-full[full$Time==1,]
fullx$Time<-0
fullx$survival<-1
full2<-rbind(full,fullx)
plot2<-ggplot(full2,aes(Time,survival))+
geom_line(data=full2,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
bootstrapcalibration<-plot2
###Combine validation metrics across bootstrap resamples
out<-as.data.frame(t(as.data.frame(c(mean(troc3),quantile(troc3,probs=c(0.025,0.975))))))
out<-rbind(out,c(mean(cid),quantile(cid,probs=c(0.025,0.975))))
out<-rbind(out,c(mean(ibrier),quantile(ibrier,probs=c(0.025,0.975))))
colnames(out)<-c('mean','2.5%','97.5%')
rownames(out)<-c('tROC','CiD','iBrier')
simplebootstrap<-list(out)
rm(out)
}
simplebootstrap
bootstrapcalibration
###0.632 bootstrap validation
{
###Calculate tROC, c-index and ibrier for Testing Samples
{
trocTE<-1
cidTE<-1
ibrierTE<-1
for (i in 1:length(df)){
trocTE[i]<-timeROC(df[[i]]$monthssurv,df[[i]]$ons_death,1-df[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df[[i]],Surv(df[[i]]$monthssurv,df[[i]]$ons_death))
cidTE[i]<-rcorr.cens(x=df[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df[[i]])
ibrierTE[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
}
###Calculate tROC, c-index and ibrier for Training Samples
{
trocTR<-1
cidTR<-1
ibrierTR<-1
for (i in 1:length(df2)){
trocTR[i]<-timeROC(df2[[i]]$monthssurv,df2[[i]]$ons_death,1-df2[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df2[[i]],Surv(df2[[i]]$monthssurv,df2[[i]]$ons_death))
cidTR[i]<-rcorr.cens(x=df2[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df2[[i]])
ibrierTR[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
}
###Combine testing and training in 0.632/0.368 ratio
{
troc632<-((trocTR*0.368)+(trocTE*0.632))
cid632<-((cidTR*0.368)+(cidTE*0.632))
ibrier632<-((ibrierTR*0.368)+(ibrierTE*0.632))
out1<-as.data.frame(t(as.data.frame(c(mean(troc632),quantile(troc632,probs=c(0.025,0.975))))))
out1<-rbind(out1,c(mean(cid632),quantile(cid632,probs=c(0.025,0.975))))
out1<-rbind(out1,c(mean(ibrier632),quantile(ibrier632,probs=c(0.025,0.975))))
colnames(out1)<-c('mean','2.5%','97.5%')
rownames(out1)<-c('tROC','CiD','iBrier')
boot.632<-list(out1)
rm(out1)
}
###Quintile calibration plots
plots<-1
{
for (k in 1:length(df)){
{
###Generate calibration plots for testing cases
calibrsf<-df[[k]]
calibrsf
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df[[k]]$ons_death)
censordata$monthssurv<-df[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,59],
breaks = quantile(calibrsf[,59],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
full
plot<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
####0.632 Calibration plots in each resample
{
{###Generate calibration plots for training cases
calibrsf<-df2[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calibrsf
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df2[[k]]$ons_death)
censordata$monthssurv<-df2[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
plot1<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
####0.632 combination of testing and training cases
plot3<-plot
plot3$data$survival<-(0.632*(plot$data$survival)+0.368*(plot1$data$survival))
plots[k]<-list(plot3)
}
}
####Average calibration plots across all bootstrap resamples
{
surv<-as.data.frame(1:615)
for (i in 1:length(plots)){
surv[,i]<-plots[[i]]$data$survival
}
survs<-rowMeans(surv)
full$survival<-survs
plot3<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
calibplot632<-list(plot3)
}
}
}
boot.632
calibplot632
###decile plots RSF/oCPH 12s for 10reps
system.time({
decdata632<-1
oskmdata632<-1
modelcalprob<-1
modelkmprob<-1
for (m in c(2,4)){
z<-m*2
df<-1
for (i in 1:length(finlist)){
df[i]<-list(finlist[[i]][[1]][[z]])
}
for (i in 1:length(df)){
df[i]<-list(merge(df[[i]],ids,by='id'))
}
df2<-1
for (i in 1:length(finlist)){
df2[i]<-list(dplyr::distinct(finlist[[i]][[1]][[z+1]]))
}
for (i in 1:length(df2)){
df2[i]<-list(merge(df2[[i]],ids,by='id'))
}
for (k in 1:length(finlist)){
{
calibrsf<-df[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df[[k]]$ons_death)
censordata$monthssurv<-df[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.1)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-10','10-20','20-30','30-40','40-50','50-60','60-70','70-80','80-90','90-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
group<-'0-10'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
a<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
aa<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
ab<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'10-20'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
b<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ba<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
bb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'20-30'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
c<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ca<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
cb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'30-40'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
d<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
da<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
db<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'40-50'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
e<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ea<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
eb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'50-60'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
f<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
fa<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
fb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'60-70'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
g<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ga<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
gb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'70-80'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
h<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ha<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
hb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'80-90'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
ii<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ia<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
ib<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'90-100'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
j<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ja<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
jb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
}
}
{
calibrsf<-df2[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df2[[k]]$ons_death)
censordata$monthssurv<-df2[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.1)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-10','10-20','20-30','30-40','40-50','50-60','60-70','70-80','80-90','90-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
group<-'0-10'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
a2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
a2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
a2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'10-20'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
b2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
b2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
b2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'20-30'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
c2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
c2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
c2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'30-40'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
d2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
d2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
d2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'40-50'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
e2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
e2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
e2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'50-60'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
f2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
f2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
f2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'60-70'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
g2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
g2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
g2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'70-80'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
h2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
h2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
h2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'80-90'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
i2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
i2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
i2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'90-100'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
j2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
j2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
j2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
}
}
blackplotte<-list(a,b,c,d,e,f,g,h,ii,j)
blackplottr<-list(a2,b2,c2,d2,e2,f2,g2,h2,i2,j2)
redplotte<-list(aa,ba,ca,da,ea,fa,ga,ha,ia,ja)
redplottr<-list(a2a,b2a,c2a,d2a,e2a,f2a,g2a,h2a,i2a,j2a)
blackplot632<-1
for (qq in 1:10){
newdf<-as.data.frame((0.632*approx(blackplotte[[qq]]$data[,-1],n=100)$y)+(0.368*approx(blackplottr[[qq]]$data[,-1],n=100)$y))
newdf$time<-((0.632*approx(blackplotte[[qq]]$data,n=100)$x)+(0.368*approx(blackplottr[[qq]]$data,n=100)$x))
colnames(newdf)<-c('surv','time')
blackplot632[qq]<-list(newdf)
}
redplot632<-1
for (qqq in 1:10){
newdf<-as.data.frame((0.632*approx(redplotte[[qqq]]$data,n=100)$y)+(0.368*approx(redplottr[[qqq]]$data,n=100)$y))
newdf$time<-((0.632*approx(redplotte[[qqq]]$data,n=100)$x)+(0.368*approx(redplottr[[qqq]]$data,n=100)$x))
colnames(newdf)<-c('surv','time')
redplot632[qqq]<-list(newdf)
}
decdata632[k]<-list(blackplot632)
oskmdata632[k]<-list(redplot632)
}
calprobdec<-1
probdec<-1
for (t in 1:10){
probdec<-decdata632[[1]][[t]]
surv<-as.data.frame(1:100)
for (i in 1:length(finlist)){
surv[,i]<-decdata632[[i]][[t]]$surv
}
probdec$surv<-rowMeans(surv)
calprobdec[t]<-list(probdec)
}
kmprobdec<-1
probdec<-1
for (t in 1:10){
probdec<-oskmdata632[[1]][[t]]
surv<-as.data.frame(1:100)
for (i in 1:length(finlist)){
surv[,i]<-oskmdata632[[i]][[t]]$surv
}
probdec$surv<-rowMeans(surv)
kmprobdec[t]<-list(probdec)
}
modelcalprob[m]<-list(calprobdec)
modelkmprob[m]<-list(kmprobdec)
}
plots<-1
for (i in 1:10){
plots[i]<-list(ggplot(modelcalprob[[2]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[2]][[i]],aes(time,surv),colour='red')
)
}
plots2<-1
for (i in 1:10){
plots2[i]<-list(ggplot(modelcalprob[[4]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[4]][[i]],aes(time,surv),colour='red'))
}
plots3<-1
for (i in 1:10){
plots3[i]<-list(ggplot(modelcalprob[[2]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[2]][[i]],aes(time,surv),colour='red')+
geom_line(data=modelcalprob[[4]][[i]],aes(time,surv),colour='green'))
}
})
qsave(modelcalprob,'modelcalprob.q')
qsave(modelkmprob,'modelkmprob.q')
qsave(plots,'deccalplotrsf.q')
qsave(plots2,'deccalplotocph.q')
qsave(plots3,'deccalplotorsfcph.q')
qsave(simplebootstrap,'simplebootstrap.q')
qsave(boot.632,'boot.632.q')
qsave(calibplot,'calibplot1.q')
qsave(calibplot632,'calibplot632.q')
qsave(fullbt,'fullbt.q')
qsave(full632,'full632.q')
qsave(trocplots,'trocplots.q')
}
} | /RSFtraining.R | no_license | LihaoDuan/AUGIS-Surv | R | false | false | 81,834 | r |
packages<-c('dplyr','mice','vtreat','Amelia','caret','doParallel','foreach',
'ggplot2','rms','parallel','pec','matrixStats','prodlim','qs','ranger','survival','timeROC')
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
lapply(packages, require, character.only = TRUE)
####Prepare and clean dataset
survdata
####Impute for missing data, 10 imputations, 10 iterations
mice<- mice(survdata,m=10,maxit=10,cluster.seed=500)
qsave(mice,'mice.q')
####Train model on full dataset and tune hyperparameters in tgrid
{
mice<-qread('mice.q')
tgrid<-expand.grid(
num.trees=c(200,250,300),
.mtry=5:10,
.splitrule=c("logrank"),
.min.node.size=c(20,25,30,35,40)
)
oob<-1
for (i in 1:length(mice)){
###For each imputed dataset train random forest model
data<-mice::complete(mice,i)
cl <- makeForkCluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms)))
system.time(list<-foreach(j=1:nrow(tgrid)) %dopar%{
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid$num.trees[j],mtry=tgrid$.mtry[j],min.node.size=tgrid$.min.node.size[j],splitrule=tgrid$.splitrule)
###Calculate out of bag error (equivalent to 1-cindex in out of bag samples as measure of performance) for each combination of tuning parameters
oob[j]<-rsf$prediction.error
rm(rsf)
return(list(oob))
})
stopCluster(cl)
registerDoSEQ()
for (i in 1:nrow(tgrid)){
oob[i]<-(list[[i]][[1]][[i]])
}
####select combination of hyperparameters that gives the lowest prediction error and train final model
rsf<-ranger(Surv(monthssurv,ons_death)~.,data,num.trees=tgrid[which.min(oob),]$num.trees,mtry=tgrid[which.min(oob),]$.mtry,min.node.size=tgrid[which.min(oob),]$.min.node.size,splitrule='logrank',importance='permutation')
rsfintlist[i]<-list(rsf)
}
qsave(rsfintlist,'rsfintlist.q')
}
####Variable selection by bootstrap
{
mice<-qread('survivalmicetrain.q')
rsflist<-qread('rsfintlist.q')
###Calculate Raw VIMP for 1st imputation dataset
{
folds<-1:nrow(mice::complete(mice,1))
finalvimp<-1
cl <- makeForkCluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia)))
system.time(list<-foreach(t=1:1)%dopar%{
vimp<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (n in 1:mice$m){
datax<-mice::complete(mice,n)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
vimp<-as.data.frame(list[[1]])
rownames(vimp)<-colnames(mice::complete(mice,1)[,1:(ncol(mice::complete(mice,1))-2)])
qsave(vimp,'mivimp.q')
}
####Create bootstrap resampling
{
data<-mice::complete(mice,1)
folds<-caret::createResample(1:nrow(mice::complete(mice,1)),times=1000)
qsave(folds,'folds.q')
folds<-qread('folds.q')
}
####Calculate boostrap vimp. Uses hyperparameters from full sample for computational reasons
{
finalvimp<-1
cl <- makeForkCluster(3)
registerDoParallel(cl)
system.time(list<-foreach(t=1:1000)%dopar%{
vimp<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (n in 1:mice$m){
datax<-mice::complete(mice,n)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
df<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
rownames(df)<-colnames(mice::complete(mice,1)[,1:(ncol(mice::complete(mice,1))-2)])
vimpdf<-array(unlist(df),dim=c((ncol(mice::complete(mice,1))-2),mice$m,length(folds)))
list
for (i in 1:1000){
for (j in 1:mice$m){
vimpdf[,j,i]<-(list[[i]][[1]][[i]][,j])
}
}
vimpdf
qsave(vimpdf,'vimpdf.q')
}
###Calculate standard error of vimp from bootstrap samples
{
df<-vimpdf
stderr<-as.data.frame(1:(ncol(mice::complete(mice,1))-2))
for (j in 1:mice$m){
for (k in 1:(ncol(mice::complete(mice,1))-2)){
stderr[k,j]<-std.error(as.matrix(df[k,j,]))
}
}
rownames(stderr)<-colnames(mice::complete(mice,1)[,c(1:40,43)])
stderr
}
###Combine across imputation datasets using Ruben's rules
{
mivimp<-qread('mivimp.q')
rownames(mivimp)<-rownames(stderr)
vimp<-mi.meld(q=mivimp,se=stderr,byrow=FALSE)
vimpa<-as.data.frame(t(vimp$q.mi))
vimpse<-as.data.frame(t(vimp$se.mi))
vimpf<-cbind2(vimpa,vimpse)
colnames(vimpf)<-c('VIMP','SE')
}
####Select only variables with 99% LCI of >0
vimpf$UCI<-(vimpf$VIMP+(2.576*vimpf$SE))
vimpf$LCI<-(vimpf$VIMP-(2.576*vimpf$SE))
vimpf2<-vimpf[order(-vimpf$VIMP),,drop=FALSE]
vimpf3<-subset(vimpf2,vimpf2$LCI>0)
vimpf3
finalvar<-rownames(vimpf3)
qsave(finalvar,'finalvar.q')
}
####create dummy variables
{
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvar.q')
####Create dummy coding scheme with mortality indicator specified
OSct<-mkCrossFrameNExperiment(mice::complete(mice,1),finalvar,'ons_death')
dummies<-OSct$treatments
qsave(dummies,'dummiesOS.q')
rm(OSct,dummies)
}
###Train full Random Forest models
{
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvar.q')
dummies<-qread('dummiesOS.q')
tgrid<-expand.grid(
num.trees=c(200,300,400),
.mtry=10:20,
.splitrule=c("logrank"),
.min.node.size=c(10,20,30,40)
)
####For each mice imputation, train model, train hyperparameters and put into list
for (i in 1:length(mice)){
data<-vtreat::prepare(dummies,mice::complete(mice,i),pruneSig=c())
data$monthssurv<-mice::complete(mice,i)$monthssurv
data$monthssurv<-ceiling(data$monthssurv)
oob<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms)))
system.time(list<-foreach(j=1:nrow(tgrid)) %dopar%{
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid$num.trees[j],mtry=tgrid$.mtry[j],min.node.size=tgrid$.min.node.size[j],splitrule=tgrid$.splitrule)
oob[j]<-rsf$prediction.error
rm(rsf)
return(list(oob))
})
stopCluster(cl)
registerDoSEQ()
for (i in 1:nrow(tgrid)){
oob[i]<-(list[[i]][[1]][[i]])
}
oob1<-oob
rsf<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=tgrid[which.min(oob),]$num.trees,mtry=tgrid[which.min(oob),]$.mtry,min.node.size=tgrid[which.min(oob),]$.min.node.size,splitrule='logrank',importance='none')
rsflist[i]<-list(rsf)
}
qsave(rsflist,'rsflist.q')
}
####Variable importance in final models by bootstrap (similar to original VIMP calculation)
{
mice<-qread('survivalmicetrain.q')
rsflist<-qread('rsflist.q')
finalvar<-qread('finalvaros.q')
finalvimp<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(dplyr)))
n<-1
system.time(list<-foreach(t=1:1)%dopar%{
vimp<-as.data.frame(1:(ncol(dplyr::select(mice::complete(mice,n),finalvar))))
for (n in 1:mice$m){
datax<-dplyr::select(mice::complete(mice,n),finalvar,ons_death)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=ifelse(rsflist[[n]]$mtry<15,rsflist[[n]]$mtry,14),min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
vimp<-as.data.frame(list[[1]])
rownames(vimp)<-finalvar
vimp
list
qsave(vimp,'mivimp2.q')
data<-mice::complete(mice,1)
folds<-qread('folds.q')
n<-1
t<-1
{
finalvimp<-1
cl <- makePSOCKcluster(8)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(dplyr)))
system.time(list<-foreach(t=1:1000)%dopar%{
vimp<-as.data.frame(1:(ncol(dplyr::select(mice::complete(mice,n),finalvar))))
for (n in 1:mice$m){
datax<-dplyr::select(mice::complete(mice,n),finalvar,ons_death)
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=ifelse(rsflist[[n]]$mtry<15,rsflist[[n]]$mtry,14),min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='permutation')
vimp[,n]<-rsf1$variable.importance
}
finalvimp[t]<-list(vimp)
return(list(finalvimp))
})
stopCluster(cl)
registerDoSEQ()
i<-100
list[[i]][[1]][[i]][,j]
df<-as.data.frame(1:14)
rownames(df)<-finalvar
dfa<-array(unlist(df),dim=c(14,mice$m,200))
for (i in 1:200){
for (j in 1:mice$m){
dfa[,j,i]<-(list[[i]][[1]][[i]][,j])
}
}
dfa
}
stderr<-as.data.frame(1:14)
for (j in 1:mice$m){
for (k in 1:14){
stderr[k,j]<-std.error(as.matrix(dfa[k,j,]))
}
}
rownames(stderr)<-finalvar
stderr
mivimp<-qread('mivimp2.q')
vimp<-mi.meld(q=mivimp,se=stderr,byrow=FALSE)
vimpa<-as.data.frame(t(vimp$q.mi))
vimpse<-as.data.frame(t(vimp$se.mi))
vimpf<-cbind2(vimpa,vimpse)
colnames(vimpf)<-c('VIMP','SE')
vimpf$UCI<-(vimpf$VIMP+(1.96*vimpf$SE))
vimpf$LCI<-(vimpf$VIMP-(1.96*vimpf$SE))
vimpf2<-vimpf[order(-vimpf$VIMP),,drop=FALSE]
vimpf2
qsave(vimpf2,'fullvimpfin.q')
}
####Calculate censoring times
{
rsflist<-qread('rsflist.q')
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvarOS.q')
dummies<-qread('dummiesOS.q')
data<-vtreat::prepare(dummies,mice::complete(mice,1),pruneSig=c())
data$monthssurv<-mice::complete(mice,1)$monthssurv
rsf1<-rsflist[1]
deathtimes<-rsflist[[1]][[3]]
censortimes<-as.data.frame(1)
system.time(for (j in 1:length(deathtimes)){
for (i in 1:nrow(data)){
ifelse((data[i,"ons_death"]==1 & data[i,'monthssurv']<=deathtimes[j]), censortimes[i,j]<-1 , (ifelse(data[i,'monthssurv']<=deathtimes[j],censortimes[i,j]<-NA,censortimes[i,j]<-0)))
}
})
qsave(censortimes,'censortimesfin.q')
}
####Internal validation - Generate bootstrap predictions
{
pw<-'Adalimumab3!'
modellist<-decrypt_object(qread('modellistEON.q'),key=pw)
rsflist<-modellist$rsflist
mice<-modellist$mice
finalvar<-modellist$Finalvars
dummies<-modellist$dummies
censortimes<-modellist$censortimes
rsflist<-qread('rsflist.q')
mice<-qread('survivalmicetrain.q')
finalvar<-qread('finalvarOS.q')
dummies<-qread('dummiesOS.q')
censortimes<-qread('censortimesfin.q')
id<-as.data.frame(1:nrow(mice::complete(mice,1)))
folds<-caret::createResample(1:nrow(mice::complete(mice,1)),times=1000)
t<-1
cl <- makePSOCKcluster(3)
registerDoParallel(cl)
clusterEvalQ(cl,c(library(ranger),library(rms),library(vtreat),library(plotrix),library(mice),library(Amelia),library(CORElearn),library(miceadds),library(splines),library(matrixStats)))
system.time(list<-foreach(t=1:1)%dopar%{
estimates<-1
stander<-1
estimates2<-1
stander2<-1
for (n in 1:length(mice)){
datax<-vtreat::prepare(dummies,mice::complete(mice,n),pruneSig=c())
datax$monthssurv<-mice::complete(mice,n)$monthssurv
datax$monthssurv<-ceiling(datax$monthssurv)
data<-datax[folds[[t]],]
rsf1<-ranger(Surv(data$monthssurv,data$ons_death)~.,data,num.trees=rsflist[[n]]$num.trees,mtry=rsflist[[n]]$mtry,min.node.size=rsflist[[n]]$min.node.size,splitrule='logrank',importance='none')
deathtimes<-predict(rsf1,datax[-folds[[t]],],type='response')$unique.death.times
###Generate log predictions and standard errors on out of training samples
estimates[n]<-list(log(predict(rsf1,datax[-folds[[t]],],type='response')$survival))
survfull<-log(predict(rsf1,datax[-folds[[t]],],type='response',predict.all=TRUE)$survival)
standerx<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (p in 1:length(deathtimes)){
for (q in 1:nrow(datax[-folds[[t]],])){
standerx[q,p]<-sd(survfull[q,p,c(1:rsf1$num.trees)])
}
}
stander[n]<-list(standerx)
deathtimes2<-predict(rsf1,datax[folds[[t]],],type='response')$unique.death.times
###Generate log predictions and standard errors on training samples
estimates2[n]<-list(log(predict(rsf1,datax[folds[[t]],],type='response')$survival))
survfull2<-log(predict(rsf1,datax[folds[[t]],],type='response',predict.all=TRUE)$survival)
standerx2<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (p in 1:length(deathtimes2)){
for (q in 1:nrow(datax[folds[[t]],])){
standerx2[q,p]<-sd(survfull2[q,p,c(1:rsf1$num.trees)])
}
}
stander2[n]<-list(standerx2)
}
####Combine predictions from imputed datasets for final out of training and training predictions
mipreds<-as.data.frame(1:nrow(datax[-folds[[t]],]))
for (m in 1:(length(deathtimes)-2)){
mipreds[,m]<-as.data.frame(t(as.data.frame(mi.meld(q=cbind(estimates[[1]][,m],estimates[[2]][,m],estimates[[3]][,m],estimates[[5]][,m],
estimates[[6]][,m],estimates[[7]][,m],estimates[[8]][,m],estimates[[9]][,m],estimates[[10]][,m]),
se=cbind(stander[[1]][,m],stander[[2]][,m],stander[[3]][,m],stander[[5]][,m],
stander[[6]][,m],stander[[7]][,m],stander[[8]][,m],stander[[9]][,m],stander[[10]][,m]),byrow=FALSE)[1])))
}
mipredsa<-as.data.frame(1:nrow(datax[folds[[t]],]))
for (m in 1:(length(deathtimes2)-2)){
mipredsa[,m]<-as.data.frame(t(as.data.frame(mi.meld(q=cbind(estimates2[[1]][,m],estimates2[[2]][,m],estimates2[[3]][,m],estimates2[[5]][,m],
estimates2[[6]][,m],estimates2[[7]][,m],estimates2[[8]][,m],estimates2[[9]][,m],estimates2[[10]][,m]),
se=cbind(stander2[[1]][,m],stander2[[2]][,m],stander2[[3]][,m],stander2[[5]][,m],
stander2[[6]][,m],stander2[[7]][,m],stander2[[8]][,m],stander2[[9]][,m],stander2[[10]][,m]),byrow=FALSE)[1])))
}
testingpreds<-exp(mipreds)
rm(mipreds)
trainingpreds<-exp(mipredsa)
rm(mipredsa)
testingpreds$id<-id[-folds[[t]],]
trainingpreds$id<-id[folds[[t]],]
output<-list(testingpreds,trainingpreds)
return(list(output))
})
stopCluster(cl)
registerDoSEQ()
qsave(list,'finallist.q')
}
####Internal validation - evaluation metrics
{
###Create id/survival dataframe
ids<-select(survdata,monthssurv,onsdeath)
ids$id<-1:nrow(ids)
simplebootstrap<-1
boot.632<-1
calibplot<-1
calibplot632<-1
fullbt<-1
full632<-1
###Separate out dataframes of predictions
{
df<-1
for (i in 1:length(finlist)){
df[i]<-list(finlist[[i]][[1]][[1]])
}
for (i in 1:length(df)){
df[i]<-list(merge(df[[i]],ids,by='id'))
}
df2<-1
for (i in 1:length(finlist)){
df2[i]<-list(dplyr::distinct(finlist[[i]][[1]][[2]]))
}
for (i in 1:length(df2)){
df2[i]<-list(merge(df2[[i]],ids,by='id'))
}
###Recreate dataframe of predictions on original dataset for each bootstrap resample
{
df3<-1
for (i in 1:length(df)){
x<-df[[i]][,-2]
y<-df2[[i]][,-2]
x1<-rbind(x,y)
df3[i]<-list(dplyr::distinct(x1))
}
}
###Simple Bootstrap validation
{
###Calculate tROC, c-index and ibrier for bootstrap sample
troc3<-1
cid<-1
ibrier<-1
for (i in 1:length(df3)){
###set to 59 as all patients censored at 60
troc3[i]<-timeROC(df3[[i]]$monthssurv,df3[[i]]$ons_death,1-df3[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df3[[i]],Surv(df3[[i]]$monthssurv,df3[[i]]$ons_death))
cid[i]<-rcorr.cens(x=df3[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df3[[i]])
####may need to edit column numbers in dat[,c(2:62)]
ibrier[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
###Generate calibration chart for each bootstrap resample
plots<-1
for (k in 1:length(df3)){
{
calibrsf<-df3[[k]]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df3[[k]]$ons_death)
censordata$monthssurv<-df3[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,59],
breaks = quantile(calibrsf[,59],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
full
plot<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
plots[k]<-list(plot)
}
###Average calibration chart for final simple bootstrap calibration chart
surv<-as.data.frame(1:615)
for (i in 1:length(plots)){
surv[,i]<-plots[[i]]$data$survival
}
survs<-rowMeans(surv)
full$survival<-survs
fullx<-full[full$Time==1,]
fullx$Time<-0
fullx$survival<-1
full2<-rbind(full,fullx)
plot2<-ggplot(full2,aes(Time,survival))+
geom_line(data=full2,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
bootstrapcalibration<-plot2
###Combine validation metrics across bootstrap resamples
out<-as.data.frame(t(as.data.frame(c(mean(troc3),quantile(troc3,probs=c(0.025,0.975))))))
out<-rbind(out,c(mean(cid),quantile(cid,probs=c(0.025,0.975))))
out<-rbind(out,c(mean(ibrier),quantile(ibrier,probs=c(0.025,0.975))))
colnames(out)<-c('mean','2.5%','97.5%')
rownames(out)<-c('tROC','CiD','iBrier')
simplebootstrap<-list(out)
rm(out)
}
simplebootstrap
bootstrapcalibration
###0.632 bootstrap validation
{
###Calculate tROC, c-index and ibrier for Testing Samples
{
trocTE<-1
cidTE<-1
ibrierTE<-1
for (i in 1:length(df)){
trocTE[i]<-timeROC(df[[i]]$monthssurv,df[[i]]$ons_death,1-df[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df[[i]],Surv(df[[i]]$monthssurv,df[[i]]$ons_death))
cidTE[i]<-rcorr.cens(x=df[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df[[i]])
ibrierTE[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
}
###Calculate tROC, c-index and ibrier for Training Samples
{
trocTR<-1
cidTR<-1
ibrierTR<-1
for (i in 1:length(df2)){
trocTR[i]<-timeROC(df2[[i]]$monthssurv,df2[[i]]$ons_death,1-df2[[i]][,59],times=59,cause=1,weighting='marginal',iid=FALSE)$AUC[2]
surv.obj<-with(df2[[i]],Surv(df2[[i]]$monthssurv,df2[[i]]$ons_death))
cidTR[i]<-rcorr.cens(x=df2[[i]][,59],S=surv.obj)[[1]]
dat<-na.omit(df2[[i]])
ibrierTR[i]<-crps(pec(list(calib=as.matrix(dat[,c(2:62)])),Hist(monthssurv,ons_death)~1,data=dat))[2]
}
}
###Combine testing and training in 0.632/0.368 ratio
{
troc632<-((trocTR*0.368)+(trocTE*0.632))
cid632<-((cidTR*0.368)+(cidTE*0.632))
ibrier632<-((ibrierTR*0.368)+(ibrierTE*0.632))
out1<-as.data.frame(t(as.data.frame(c(mean(troc632),quantile(troc632,probs=c(0.025,0.975))))))
out1<-rbind(out1,c(mean(cid632),quantile(cid632,probs=c(0.025,0.975))))
out1<-rbind(out1,c(mean(ibrier632),quantile(ibrier632,probs=c(0.025,0.975))))
colnames(out1)<-c('mean','2.5%','97.5%')
rownames(out1)<-c('tROC','CiD','iBrier')
boot.632<-list(out1)
rm(out1)
}
###Quintile calibration plots
plots<-1
{
for (k in 1:length(df)){
{
###Generate calibration plots for testing cases
calibrsf<-df[[k]]
calibrsf
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df[[k]]$ons_death)
censordata$monthssurv<-df[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,59],
breaks = quantile(calibrsf[,59],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
full
plot<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
####0.632 Calibration plots in each resample
{
{###Generate calibration plots for training cases
calibrsf<-df2[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calibrsf
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df2[[k]]$ons_death)
censordata$monthssurv<-df2[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.2)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-20','20-40','40-60','60-80','80-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='0-20')
dec1a<-select(dec1,-decile)
estimatesdec1<-as.data.frame(1:63)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
OSKMa <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2a<-as.data.frame(cbind(OSKMa$time,OSKMa$surv))
names(OSKM2a)<-c('time','surv')
if (nrow(OSKM2a)!=60) OSKM2a<-merge(times,OSKM2a,by='time',all=TRUE)
if (is.na(OSKM2a[1,2])) OSKM2a[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2a[i,2]))
OSKM2a[i,2]<-OSKM2a[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='20-40')
dec1a<-select(dec1,-decile)
estimatesdec1b<-as.data.frame(1:63)
estimatesdec1b$Time<-estimatesdec1b[,1]
estimatesdec1b$survival<-colMeans(dec1a)
OSKMb <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2b<-as.data.frame(cbind(OSKMb$time,OSKMb$surv))
names(OSKM2b)<-c('time','surv')
if (nrow(OSKM2b)!=60) OSKM2b<-merge(times,OSKM2b,by='time',all=TRUE)
if (is.na(OSKM2b[1,2])) OSKM2b[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2b[i,2]))
OSKM2b[i,2]<-OSKM2b[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='40-60')
dec1a<-select(dec1,-decile)
estimatesdec1c<-as.data.frame(1:63)
estimatesdec1c$Time<-estimatesdec1c[,1]
estimatesdec1c$survival<-colMeans(dec1a)
OSKMc <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2c<-as.data.frame(cbind(OSKMc$time,OSKMc$surv))
names(OSKM2c)<-c('time','surv')
if (nrow(OSKM2c)!=60) OSKM2c<-merge(times,OSKM2c,by='time',all=TRUE)
if (is.na(OSKM2c[1,2])) OSKM2c[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2c[i,2]))
OSKM2c[i,2]<-OSKM2c[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='60-80')
dec1a<-select(dec1,-decile)
estimatesdec1d<-as.data.frame(1:63)
estimatesdec1d$Time<-estimatesdec1d[,1]
estimatesdec1d$survival<-colMeans(dec1a)
OSKMd <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2d<-as.data.frame(cbind(OSKMd$time,OSKMd$surv))
names(OSKM2d)<-c('time','surv')
if (nrow(OSKM2d)!=60) OSKM2d<-merge(times,OSKM2d,by='time',all=TRUE)
if (is.na(OSKM2d[1,2])) OSKM2d[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2d[i,2]))
OSKM2d[i,2]<-OSKM2d[i-1,2]
}
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile=='80-100')
dec1a<-select(dec1,-decile)
estimatesdec1e<-as.data.frame(1:63)
estimatesdec1e$Time<-estimatesdec1e[,1]
estimatesdec1e$survival<-colMeans(dec1a)
OSKMe <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
times<-as.data.frame(1:60)
colnames(times)<-'time'
OSKM2e<-as.data.frame(cbind(OSKMe$time,OSKMe$surv))
names(OSKM2e)<-c('time','surv')
if (nrow(OSKM2e)!=60) OSKM2e<-merge(times,OSKM2e,by='time',all=TRUE)
if (is.na(OSKM2e[1,2])) OSKM2e[1,2]<-1
for (i in 1:60){
if (is.na(OSKM2e[i,2]))
OSKM2e[i,2]<-OSKM2e[i-1,2]
}
estimatesdec1$col='Predicted'
estimatesdec1b$col='Predicted'
estimatesdec1c$col='Predicted'
estimatesdec1d$col='Predicted'
estimatesdec1e$col='Predicted'
estimatesdec1$lt='1'
estimatesdec1b$lt='2'
estimatesdec1c$lt='3'
estimatesdec1d$lt='4'
estimatesdec1e$lt='5'
estimatesdec1<-select(estimatesdec1,-1)
estimatesdec1b<-select(estimatesdec1b,-1)
estimatesdec1c<-select(estimatesdec1c,-1)
estimatesdec1d<-select(estimatesdec1d,-1)
estimatesdec1e<-select(estimatesdec1e,-1)
colnames(OSKM2a)<-c('Time','survival')
colnames(OSKM2b)<-c('Time','survival')
colnames(OSKM2c)<-c('Time','survival')
colnames(OSKM2d)<-c('Time','survival')
colnames(OSKM2e)<-c('Time','survival')
OSKM2a$col='Observed'
OSKM2b$col='Observed'
OSKM2c$col='Observed'
OSKM2d$col='Observed'
OSKM2e$col='Observed'
OSKM2a$lt='1'
OSKM2b$lt='2'
OSKM2c$lt='3'
OSKM2d$lt='4'
OSKM2e$lt='5'
colnames(estimatesdec1)
colnames(OSKM2a)
full<-rbind(estimatesdec1,estimatesdec1b,estimatesdec1c,estimatesdec1d,estimatesdec1e,
OSKM2a,OSKM2b,OSKM2c,OSKM2d,OSKM2e)
plot1<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
}
}
####0.632 combination of testing and training cases
plot3<-plot
plot3$data$survival<-(0.632*(plot$data$survival)+0.368*(plot1$data$survival))
plots[k]<-list(plot3)
}
}
####Average calibration plots across all bootstrap resamples
{
surv<-as.data.frame(1:615)
for (i in 1:length(plots)){
surv[,i]<-plots[[i]]$data$survival
}
survs<-rowMeans(surv)
full$survival<-survs
plot3<-ggplot(full,aes(Time,survival))+
geom_line(data=full,mapping=aes(colour=col,linetype=lt))+
xlim(0,60)+ylim(0,1)+
scale_color_discrete(name='')+
scale_linetype_discrete(name='Probability Quintile',
labels=c('0-20','20-40','40-60','60-80','80-100'))+
theme_bw()
calibplot632<-list(plot3)
}
}
}
boot.632
calibplot632
###decile plots RSF/oCPH 12s for 10reps
system.time({
decdata632<-1
oskmdata632<-1
modelcalprob<-1
modelkmprob<-1
for (m in c(2,4)){
z<-m*2
df<-1
for (i in 1:length(finlist)){
df[i]<-list(finlist[[i]][[1]][[z]])
}
for (i in 1:length(df)){
df[i]<-list(merge(df[[i]],ids,by='id'))
}
df2<-1
for (i in 1:length(finlist)){
df2[i]<-list(dplyr::distinct(finlist[[i]][[1]][[z+1]]))
}
for (i in 1:length(df2)){
df2[i]<-list(merge(df2[[i]],ids,by='id'))
}
for (k in 1:length(finlist)){
{
calibrsf<-df[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df[[k]]$ons_death)
censordata$monthssurv<-df[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.1)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-10','10-20','20-30','30-40','40-50','50-60','60-70','70-80','80-90','90-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
group<-'0-10'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
a<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
aa<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
ab<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'10-20'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
b<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ba<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
bb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'20-30'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
c<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ca<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
cb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'30-40'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
d<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
da<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
db<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'40-50'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
e<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ea<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
eb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'50-60'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
f<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
fa<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
fb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'60-70'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
g<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ga<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
gb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'70-80'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
h<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ha<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
hb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'80-90'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
ii<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ia<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
ib<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'90-100'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
j<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
ja<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
jb<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
}
}
{
calibrsf<-df2[[k]]
calibrsf<-calibrsf[,-c(1,2,81,82)]
calibrsf<-calibrsf[,1:61]
calib<-calibrsf
calib$id<-1:nrow(calib)
censordata<-as.data.frame(df2[[k]]$ons_death)
censordata$monthssurv<-df2[[k]]$monthssurv
colnames(censordata)<-c('ons_death','monthssurv')
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$ons_death[i]<-0,censordata$ons_death[i]<-censordata$ons_death[i])
}
for (i in 1:nrow(censordata)){
ifelse(censordata$monthssurv[i]>60, censordata$monthssurv[i]<-60,censordata$monthssurv[i]<-censordata$monthssurv[i])
}
calib$death<-censordata$ons_death
calib$months<-censordata$monthssurv
colnames(calib)
calib$decile<-with(calibrsf,cut(calibrsf[,37],
breaks = quantile(calibrsf[,37],probs=seq(0,1,by=0.1)),
include.lowest = TRUE))
levels(calib$decile)<-c('0-10','10-20','20-30','30-40','40-50','50-60','60-70','70-80','80-90','90-100')
###calib$decile<-with(calib,cut(V1.50,breaks = quantile(V1.50,probs=seq(0,1,by=0.25)),include.lowest = TRUE))
###levels(calib$decile)<-c('0-10','10-20','20-30','30-40')
mts<-as.data.frame(cbind(censordata$monthssurv,censordata$ons_death))
names(mts)<-c('monthssurv','ons_death')
predscalib2<-calibrsf
calib1<-calibrsf
calib1$ons_death<-censordata$ons_death
calib1$monthssurv<-censordata$monthssurv
{
group<-'0-10'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
a2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
a2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
a2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'10-20'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
b2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
b2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
b2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'20-30'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
c2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
c2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
c2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'30-40'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
d2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
d2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
d2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'40-50'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
e2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
e2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
e2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'50-60'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
f2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
f2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
f2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'60-70'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
g2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
g2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
g2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'70-80'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
h2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
h2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
h2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'80-90'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
i2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
i2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
i2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
group<-'90-100'
{
predscalib2$decile<-calib$decile
predscalib2$death<-calib1$ons_death
predscalib2$months<-calib1$monthssurv
dec1<-subset(predscalib2,predscalib2$decile==group)
dec1a<-select(dec1,-decile,-months,-death)
dec2<-subset(calib,calib$decile==group)
dec2a<-select(dec2,-decile,-months,-death,-id)
estimatesdec1<-as.data.frame(1:61)
estimatesdec1$Time<-estimatesdec1[,1]
estimatesdec1$survival<-colMeans(dec1a)
estimatesdec2<-as.data.frame(1:61)
estimatesdec2$Time<-estimatesdec2[,1]
estimatesdec2$survival<-colMeans(dec2a)
OSKM <- survfit(Surv(dec1$months, dec1$death)~1, data=dec1)
OSKM2<-as.data.frame(cbind(OSKM$time,OSKM$surv))
names(OSKM2)<-c('time','surv')
j2<-ggplot(estimatesdec1,aes(Time,survival))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)
j2a<-ggplot(OSKM2,aes(time,surv))+geom_line(colour='red')+xlim(0,60)+ylim(0,1)
j2b<-ggplot(estimatesdec2,aes(Time,survival))+geom_line(colour='green')+xlim(0,60)+ylim(0,1)
}
}
}
blackplotte<-list(a,b,c,d,e,f,g,h,ii,j)
blackplottr<-list(a2,b2,c2,d2,e2,f2,g2,h2,i2,j2)
redplotte<-list(aa,ba,ca,da,ea,fa,ga,ha,ia,ja)
redplottr<-list(a2a,b2a,c2a,d2a,e2a,f2a,g2a,h2a,i2a,j2a)
blackplot632<-1
for (qq in 1:10){
newdf<-as.data.frame((0.632*approx(blackplotte[[qq]]$data[,-1],n=100)$y)+(0.368*approx(blackplottr[[qq]]$data[,-1],n=100)$y))
newdf$time<-((0.632*approx(blackplotte[[qq]]$data,n=100)$x)+(0.368*approx(blackplottr[[qq]]$data,n=100)$x))
colnames(newdf)<-c('surv','time')
blackplot632[qq]<-list(newdf)
}
redplot632<-1
for (qqq in 1:10){
newdf<-as.data.frame((0.632*approx(redplotte[[qqq]]$data,n=100)$y)+(0.368*approx(redplottr[[qqq]]$data,n=100)$y))
newdf$time<-((0.632*approx(redplotte[[qqq]]$data,n=100)$x)+(0.368*approx(redplottr[[qqq]]$data,n=100)$x))
colnames(newdf)<-c('surv','time')
redplot632[qqq]<-list(newdf)
}
decdata632[k]<-list(blackplot632)
oskmdata632[k]<-list(redplot632)
}
calprobdec<-1
probdec<-1
for (t in 1:10){
probdec<-decdata632[[1]][[t]]
surv<-as.data.frame(1:100)
for (i in 1:length(finlist)){
surv[,i]<-decdata632[[i]][[t]]$surv
}
probdec$surv<-rowMeans(surv)
calprobdec[t]<-list(probdec)
}
kmprobdec<-1
probdec<-1
for (t in 1:10){
probdec<-oskmdata632[[1]][[t]]
surv<-as.data.frame(1:100)
for (i in 1:length(finlist)){
surv[,i]<-oskmdata632[[i]][[t]]$surv
}
probdec$surv<-rowMeans(surv)
kmprobdec[t]<-list(probdec)
}
modelcalprob[m]<-list(calprobdec)
modelkmprob[m]<-list(kmprobdec)
}
plots<-1
for (i in 1:10){
plots[i]<-list(ggplot(modelcalprob[[2]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[2]][[i]],aes(time,surv),colour='red')
)
}
plots2<-1
for (i in 1:10){
plots2[i]<-list(ggplot(modelcalprob[[4]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[4]][[i]],aes(time,surv),colour='red'))
}
plots3<-1
for (i in 1:10){
plots3[i]<-list(ggplot(modelcalprob[[2]][[i]],aes(time,surv))+geom_line(colour='black')+xlim(0,60)+ylim(0,1)+
geom_line(data=modelkmprob[[2]][[i]],aes(time,surv),colour='red')+
geom_line(data=modelcalprob[[4]][[i]],aes(time,surv),colour='green'))
}
})
qsave(modelcalprob,'modelcalprob.q')
qsave(modelkmprob,'modelkmprob.q')
qsave(plots,'deccalplotrsf.q')
qsave(plots2,'deccalplotocph.q')
qsave(plots3,'deccalplotorsfcph.q')
qsave(simplebootstrap,'simplebootstrap.q')
qsave(boot.632,'boot.632.q')
qsave(calibplot,'calibplot1.q')
qsave(calibplot632,'calibplot632.q')
qsave(fullbt,'fullbt.q')
qsave(full632,'full632.q')
qsave(trocplots,'trocplots.q')
}
} |
.onAttach <- function(libname,pkgname)
{
required <<- 0
actualData <<- NULL
}
#Required function for every methods
#' Data extrapolation
#'
#' \code{RequiredFunction} returns the data set actualData.
#'
#' @return Returns \code{actualData}.
#'
#' @export
RequiredFunction <- function() {
#' @import utils
# devtools::use_package("utils")
# convert from pdf to txt -----------------
#' @import tm
# devtools::use_package("tm")
# data text
uri2 <- system.file("extdata", "bulletin2013_14.txt", package = "AverageAge")
# two cases for having pdftotext and not having pdftotext
if (all(file.exists(Sys.which(c("pdfinfo", "pdftotext"))))) {
#download the file
url <- "http://web.williams.edu/admin/registrar/catalog/bulletin2013_14.pdf"
dest <- tempfile(fileext = ".pdf")
download.file(url, dest, mode = "wb")
#temp file uri
uri1 <- sprintf("file://%s", dest)
#convert file
pdf <- (tm::readPDF(control = list(text = "-layout")))(elem = list(uri = uri1),
language = "en", id = "id1")
txt <- toString(pdf[1])
} else {
txt <- paste(readLines(uri2), sep="\n", collapse="\n")
}
# string manipulation and data extraction --------
#' @import stringr
# devtools::use_package("stringr")
# getting string sets of BA and BS(seperated)
BaTxt <- stringr::str_match_all(txt, "[:digit:]{4}, (BA|AB|BS)")
# converting to (lists of strings with just number)
BaTxt <- stringr::str_match_all(BaTxt, "[:digit:]{4}")
# converting to numeric
BaData <- lapply(BaTxt, as.numeric)
# converting to matrix
BaMat <- as.matrix(data.frame(BaData))
# data of prof's age matrix
actualData <<- 2015 - BaMat + 22
required <<- 1
}
#calculating the average age
#' Average of faculty ages
#'
#' \code{Average} returns the average of the data set actualData.
#'
#' @return Returns the average of \code{actualData}.
#'
#' @export
Average <- function() {
if(required!=1){RequiredFunction()}
AveAge <- mean(actualData)
print(AveAge)
}
#calculates the range of faculty ages
#' Range of faculty ages
#'
#' \code{Range} returns the range of the data set actualData.
#'
#' @return Returns the range of \code{actualData}.
#'
#' @export
Range <- function() {
if(required!=1){RequiredFunction()}
AgeRange <- max(actualData)-min(actualData)
print(AgeRange)
}
#calculates the maximum value of faculty ages
#' Maximum value of faculty ages
#'
#' \code{Max} returns the maximum value of the data set actualData.
#'
#' @return Returns the maximum of \code{actualData}.
#'
#' @export
Max <- function() {
if(required!=1){RequiredFunction()}
AgeMax <- max(actualData)
print(AgeMax)
}
#calculates the minimum value of faculty ages
#' Minimum value of faculty ages
#'
#' \code{Min} returns the minimum value of the data set actualData.
#'
#' @return Returns the minimum of \code{actualData}.
#'
#' @export
Min <- function() {
if(required!=1){RequiredFunction()}
AgeMin <- min(actualData)
print(AgeMin)
}
#plots the histogram of faculty ages
#' Histogram of faculty ages
#'
#' \code{AgeHist} returns the histogram of the data set actualData.
#'
#' @return Returns the histogram of \code{actualData}.
#'
#' @export
PlotHist <- function(){
if(required!=1){RequiredFunction()}
AgeHist <- hist(actualData,main="Distribution of Age",xlab="Age")
}
#prints all the statistics of faculty ages
#' Information of faculty ages
#'
#' \code{AgeHist} returns all the above information of actualData.
#'
#' @return Returns the information of \code{actualData}.
#'
#' @export
PrintAll <- function(){
if(required!=1){RequiredFunction()}
print('Average:')
Average()
print('Range:')
Range()
print('Maximum:')
Max()
print('Minimum:')
Min()
PlotHist()
}
| /R/AverageAge.R | no_license | lubyrex/AverageAge | R | false | false | 3,847 | r |
.onAttach <- function(libname,pkgname)
{
required <<- 0
actualData <<- NULL
}
#Required function for every methods
#' Data extrapolation
#'
#' \code{RequiredFunction} returns the data set actualData.
#'
#' @return Returns \code{actualData}.
#'
#' @export
RequiredFunction <- function() {
#' @import utils
# devtools::use_package("utils")
# convert from pdf to txt -----------------
#' @import tm
# devtools::use_package("tm")
# data text
uri2 <- system.file("extdata", "bulletin2013_14.txt", package = "AverageAge")
# two cases for having pdftotext and not having pdftotext
if (all(file.exists(Sys.which(c("pdfinfo", "pdftotext"))))) {
#download the file
url <- "http://web.williams.edu/admin/registrar/catalog/bulletin2013_14.pdf"
dest <- tempfile(fileext = ".pdf")
download.file(url, dest, mode = "wb")
#temp file uri
uri1 <- sprintf("file://%s", dest)
#convert file
pdf <- (tm::readPDF(control = list(text = "-layout")))(elem = list(uri = uri1),
language = "en", id = "id1")
txt <- toString(pdf[1])
} else {
txt <- paste(readLines(uri2), sep="\n", collapse="\n")
}
# string manipulation and data extraction --------
#' @import stringr
# devtools::use_package("stringr")
# getting string sets of BA and BS(seperated)
BaTxt <- stringr::str_match_all(txt, "[:digit:]{4}, (BA|AB|BS)")
# converting to (lists of strings with just number)
BaTxt <- stringr::str_match_all(BaTxt, "[:digit:]{4}")
# converting to numeric
BaData <- lapply(BaTxt, as.numeric)
# converting to matrix
BaMat <- as.matrix(data.frame(BaData))
# data of prof's age matrix
actualData <<- 2015 - BaMat + 22
required <<- 1
}
#calculating the average age
#' Average of faculty ages
#'
#' \code{Average} returns the average of the data set actualData.
#'
#' @return Returns the average of \code{actualData}.
#'
#' @export
Average <- function() {
if(required!=1){RequiredFunction()}
AveAge <- mean(actualData)
print(AveAge)
}
#calculates the range of faculty ages
#' Range of faculty ages
#'
#' \code{Range} returns the range of the data set actualData.
#'
#' @return Returns the range of \code{actualData}.
#'
#' @export
Range <- function() {
if(required!=1){RequiredFunction()}
AgeRange <- max(actualData)-min(actualData)
print(AgeRange)
}
#calculates the maximum value of faculty ages
#' Maximum value of faculty ages
#'
#' \code{Max} returns the maximum value of the data set actualData.
#'
#' @return Returns the maximum of \code{actualData}.
#'
#' @export
Max <- function() {
if(required!=1){RequiredFunction()}
AgeMax <- max(actualData)
print(AgeMax)
}
#calculates the minimum value of faculty ages
#' Minimum value of faculty ages
#'
#' \code{Min} returns the minimum value of the data set actualData.
#'
#' @return Returns the minimum of \code{actualData}.
#'
#' @export
Min <- function() {
if(required!=1){RequiredFunction()}
AgeMin <- min(actualData)
print(AgeMin)
}
#plots the histogram of faculty ages
#' Histogram of faculty ages
#'
#' \code{AgeHist} returns the histogram of the data set actualData.
#'
#' @return Returns the histogram of \code{actualData}.
#'
#' @export
PlotHist <- function(){
if(required!=1){RequiredFunction()}
AgeHist <- hist(actualData,main="Distribution of Age",xlab="Age")
}
#prints all the statistics of faculty ages
#' Information of faculty ages
#'
#' \code{AgeHist} returns all the above information of actualData.
#'
#' @return Returns the information of \code{actualData}.
#'
#' @export
PrintAll <- function(){
if(required!=1){RequiredFunction()}
print('Average:')
Average()
print('Range:')
Range()
print('Maximum:')
Max()
print('Minimum:')
Min()
PlotHist()
}
|
##This function here will create the cache every time there is a
##function call
makeCacheMatrix <- function(x = matrix()) {
##This part of the function is important for initialization
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
#This part of the function retrieves the matrix
get <- function() x
#This part stores the cache
setinv <- function(inv) m <<- inv
#This part retrieves the cache
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## This function will return the cached output if already stored
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#Retrieve the cache
m <- x$getinv()
#If cache is not empty return the cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#Get the value of the input matrix
data <- x$get()
#Solve for the inverse
m <- solve(data)
#Setting the inverse in memory
x$setinv(m)
#Return the inverse
m
}
#Testing the functions
#Creating the square matrix
a <- matrix(rnorm(9),nrow = 3,ncol = 3)
#Creating the cache
ghy <- makeCacheMatrix(x = a)
#Running the cacheSolve function
cacheSolve(x = ghy)
| /cachematrix.R | no_license | diptarshis/MatrixCache_project | R | false | false | 1,290 | r | ##This function here will create the cache every time there is a
##function call
makeCacheMatrix <- function(x = matrix()) {
##This part of the function is important for initialization
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
#This part of the function retrieves the matrix
get <- function() x
#This part stores the cache
setinv <- function(inv) m <<- inv
#This part retrieves the cache
getinv <- function() m
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## Write a short comment describing this function
## This function will return the cached output if already stored
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
#Retrieve the cache
m <- x$getinv()
#If cache is not empty return the cache
if(!is.null(m)) {
message("getting cached data")
return(m)
}
#Get the value of the input matrix
data <- x$get()
#Solve for the inverse
m <- solve(data)
#Setting the inverse in memory
x$setinv(m)
#Return the inverse
m
}
#Testing the functions
#Creating the square matrix
a <- matrix(rnorm(9),nrow = 3,ncol = 3)
#Creating the cache
ghy <- makeCacheMatrix(x = a)
#Running the cacheSolve function
cacheSolve(x = ghy)
|
library(e1071)
library(caret)
ep <- read.csv("../data/tema3/electronics-purchase.csv")
set.seed(2018)
t.ids <- createDataPartition(ep$Purchase, p = 0.67, list = F)
mod <- naiveBayes(Purchase ~ ., data = ep[t.ids,])
mod
pred <- predict(mod, ep[-t.ids,])
tab <- table(ep[-t.ids,]$Purchase, pred, dnn = c("Actual", "Predicha"))
confusionMatrix(tab)
| /scripts/tema3/07-naive-bayes.R | permissive | dabamascodes/r-course | R | false | false | 349 | r | library(e1071)
library(caret)
ep <- read.csv("../data/tema3/electronics-purchase.csv")
set.seed(2018)
t.ids <- createDataPartition(ep$Purchase, p = 0.67, list = F)
mod <- naiveBayes(Purchase ~ ., data = ep[t.ids,])
mod
pred <- predict(mod, ep[-t.ids,])
tab <- table(ep[-t.ids,]$Purchase, pred, dnn = c("Actual", "Predicha"))
confusionMatrix(tab)
|
setwd("~/fmsc/ChooseInstruments")
library(Rcpp)
library(RcppArmadillo)
sourceCpp("function_pointers.cpp")
#Simple example with scalars
foo(1, 1) #Passes Plus to ResultPlusOne -> a + b + 1
bar(1, 1) #Passes Minus to ResultPlusOne -> a - b + 1
#More complicated example with matrix inner products
k <- 4
b <- 1:k / 10
V <- matrix(rnorm(k^2), k, k)
baz1(b, V) - V[1,1]
baz2(b, V) - V[2,2]
baz3(b, V) - (t(b^2) %*% V %*% b^2)
#Passing a function pointer *through* one function and *into* another
PassThrough(1, 1) #Should give same result as foo
foo(1, 1)
| /SimulationChooseIVs/test/function_pointers.R | no_license | fditraglia/fmsc | R | false | false | 557 | r | setwd("~/fmsc/ChooseInstruments")
library(Rcpp)
library(RcppArmadillo)
sourceCpp("function_pointers.cpp")
#Simple example with scalars
foo(1, 1) #Passes Plus to ResultPlusOne -> a + b + 1
bar(1, 1) #Passes Minus to ResultPlusOne -> a - b + 1
#More complicated example with matrix inner products
k <- 4
b <- 1:k / 10
V <- matrix(rnorm(k^2), k, k)
baz1(b, V) - V[1,1]
baz2(b, V) - V[2,2]
baz3(b, V) - (t(b^2) %*% V %*% b^2)
#Passing a function pointer *through* one function and *into* another
PassThrough(1, 1) #Should give same result as foo
foo(1, 1)
|
pow<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
str(pow)
#make combined date/time variable
pow$newdate <- with(pow, as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M:%S"))
head(pow$newdate)
class(pow$newdate)
#converting Date column to correct date format and date class
pow$Date<-strptime(as.character(pow$Date), "%d/%m/%Y")
pow$Date<- format(pow$Date, "%Y-%m-%d")
pow$Date<-as.Date(pow$Date)
#convert to numeric
pow$Global_active_power<-as.numeric(as.character(pow$Global_active_power))
pow$Global_reactive_power<-as.numeric(as.character(pow$Global_reactive_power))
pow$Voltage<-as.numeric(as.character(pow$Voltage))
pow$Global_intensity<-as.numeric(as.character(pow$Global_intensity))
pow$Sub_metering_1<-as.numeric(as.character(pow$Sub_metering_1))
pow$Sub_metering_2<-as.numeric(as.character(pow$Sub_metering_2))
pow$Sub_metering_3<-as.numeric(as.character(pow$Sub_metering_3))
#subset data for only 2007-02-01 and 2007-02-02
powdat<-subset(pow, Date=="2007-02-01"|Date=="2007-02-02")
par(mfrow=c(2,2)) # 2 rows, 2 columns
#1st panel
plot(powdat$newdate,powdat$Global_active_power,type="l",
ylab="Global Active Power (kilowatts)", xlab="",
cex.lab=0.75)
#2nd panel
plot(powdat$newdate,powdat$Voltage,type="l",xlab="datetime",
ylab="Voltage (volts)",cex.lab=0.75)
#3rd panel
plot(powdat$newdate,powdat$Sub_metering_1,type="n",
ylab="Energy sub metering",xlab="",cex.lab=0.75)
lines(powdat$newdate,powdat$Sub_metering_1)
lines(powdat$newdate,powdat$Sub_metering_2,col="red")
lines(powdat$newdate,powdat$Sub_metering_3,col="blue")
legend(1170400000,39,c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","blue","red"),cex=0.5,lty=c(1,1,1),bty="n")
#4th panel
plot(powdat$newdate,powdat$Global_reactive_power,type="l",
ylab="Global Reactive Power (kilowatts)",
xlab="datetime",cex.lab=0.75)
#copy to png file 480 x 480 pixels
dev.copy(png,"plot4.png",height=480,width=480)
dev.off() | /plot4.R | no_license | jlapple/ExData_Plotting1 | R | false | false | 1,977 | r | pow<-read.table("household_power_consumption.txt", header=TRUE, sep=";")
str(pow)
#make combined date/time variable
pow$newdate <- with(pow, as.POSIXct(paste(Date, Time), format="%d/%m/%Y %H:%M:%S"))
head(pow$newdate)
class(pow$newdate)
#converting Date column to correct date format and date class
pow$Date<-strptime(as.character(pow$Date), "%d/%m/%Y")
pow$Date<- format(pow$Date, "%Y-%m-%d")
pow$Date<-as.Date(pow$Date)
#convert to numeric
pow$Global_active_power<-as.numeric(as.character(pow$Global_active_power))
pow$Global_reactive_power<-as.numeric(as.character(pow$Global_reactive_power))
pow$Voltage<-as.numeric(as.character(pow$Voltage))
pow$Global_intensity<-as.numeric(as.character(pow$Global_intensity))
pow$Sub_metering_1<-as.numeric(as.character(pow$Sub_metering_1))
pow$Sub_metering_2<-as.numeric(as.character(pow$Sub_metering_2))
pow$Sub_metering_3<-as.numeric(as.character(pow$Sub_metering_3))
#subset data for only 2007-02-01 and 2007-02-02
powdat<-subset(pow, Date=="2007-02-01"|Date=="2007-02-02")
par(mfrow=c(2,2)) # 2 rows, 2 columns
#1st panel
plot(powdat$newdate,powdat$Global_active_power,type="l",
ylab="Global Active Power (kilowatts)", xlab="",
cex.lab=0.75)
#2nd panel
plot(powdat$newdate,powdat$Voltage,type="l",xlab="datetime",
ylab="Voltage (volts)",cex.lab=0.75)
#3rd panel
plot(powdat$newdate,powdat$Sub_metering_1,type="n",
ylab="Energy sub metering",xlab="",cex.lab=0.75)
lines(powdat$newdate,powdat$Sub_metering_1)
lines(powdat$newdate,powdat$Sub_metering_2,col="red")
lines(powdat$newdate,powdat$Sub_metering_3,col="blue")
legend(1170400000,39,c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
col=c("black","blue","red"),cex=0.5,lty=c(1,1,1),bty="n")
#4th panel
plot(powdat$newdate,powdat$Global_reactive_power,type="l",
ylab="Global Reactive Power (kilowatts)",
xlab="datetime",cex.lab=0.75)
#copy to png file 480 x 480 pixels
dev.copy(png,"plot4.png",height=480,width=480)
dev.off() |
library(tidyverse)
library(glue)
library(R.utils)
create_http_address <- function(state, year, level) {
# perform checks to ensure parameters were properly input
if (nchar(state) != 2) {
stop("`state` parameter must be two letter state abbreviation.")
} else {
state <- str_to_lower(state)
}
# get current year
current_year <- as.integer(format(Sys.Date(), "%Y"))
# ensure year is between 1996 (first year with data) and the current year
if (!between(year, 1996, current_year)) {
stop(glue("Year must be an integer between 1996 and {current_year}."))
}
level <- str_to_lower(level)
# create http address
if (level == 'population') {
http_address <- glue("https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_p{state}.zip")
} else if (level == 'housing') {
http_address <- glue("https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_h{state}.zip")
} else {
# level must either be population or housing
stop("Level must either be 'population' or 'housing'")
}
return(http_address)
}
# download file and delete extra files in downloaded zip file
download_delete <- function(http_address, destination_file_path, download_folder) {
# download file
download.file(http_address, destfile = destination_file_path, method="curl")
# unzip file so we can remove documentation
unzip(destination_file_path, exdir = download_folder)
# we want to delete the zip file and PDF documentation
# first get the file name of the documentation PDF
delete_files <- list.files(download_folder, pattern = "ACS.*[.]pdf", full.names = TRUE)
# delete documentation PDF and zip file
file.remove(delete_files, destination_file_path)
}
gzip_pums <- function(download_folder, destination_file_path) {
# get file name
zip_file <- list.files(download_folder, pattern = "[.]csv$", full.names = TRUE)
# some states and the US files have more than one csv file in the zip file
# get the number of csv files in the zip file
num_files <- length(zip_file)
# create vector of letters to name multiple files
num_files_seq <- letters[seq_len(num_files)]
# create output name for gz file
gz_output <- str_remove(destination_file_path, ".zip") # , ".csv.gz"
gz_output <- str_c(download_folder, "/", gz_output, "-", num_files_seq, ".csv.gz")
# iterate through each file and unzip
walk2(zip_file, gz_output, gzip, remove = T)
}
# function to download file and unzip if needed state, year, level
download_pums_files <- function(state, year, level, destination_file_path, download_folder) {
# create directory if it does not already exist
if (!dir.exists(download_folder)){
dir.create(download_folder)
}
# combine directory and downloaded zip file name into one object to create complete file path
download_zip_full_path <- str_c(download_folder, destination_file_path, sep = '/')
# create http address
http_address <- create_http_address(state, year, level)
# download PUMS file and delete unneeded PDF file
download_delete(http_address, download_zip_full_path, download_folder)
# gzip remaining csv file
gzip_pums(download_folder, download_zip_full_path)
# print statement showing operations are complete
print(glue("Downloaded {level} PUMS data for {state} in {year}."))
}
| /functions.R | permissive | shanejorr/download-PUMS-data | R | false | false | 3,366 | r | library(tidyverse)
library(glue)
library(R.utils)
create_http_address <- function(state, year, level) {
# perform checks to ensure parameters were properly input
if (nchar(state) != 2) {
stop("`state` parameter must be two letter state abbreviation.")
} else {
state <- str_to_lower(state)
}
# get current year
current_year <- as.integer(format(Sys.Date(), "%Y"))
# ensure year is between 1996 (first year with data) and the current year
if (!between(year, 1996, current_year)) {
stop(glue("Year must be an integer between 1996 and {current_year}."))
}
level <- str_to_lower(level)
# create http address
if (level == 'population') {
http_address <- glue("https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_p{state}.zip")
} else if (level == 'housing') {
http_address <- glue("https://www2.census.gov/programs-surveys/acs/data/pums/{year}/1-Year/csv_h{state}.zip")
} else {
# level must either be population or housing
stop("Level must either be 'population' or 'housing'")
}
return(http_address)
}
# download file and delete extra files in downloaded zip file
download_delete <- function(http_address, destination_file_path, download_folder) {
# download file
download.file(http_address, destfile = destination_file_path, method="curl")
# unzip file so we can remove documentation
unzip(destination_file_path, exdir = download_folder)
# we want to delete the zip file and PDF documentation
# first get the file name of the documentation PDF
delete_files <- list.files(download_folder, pattern = "ACS.*[.]pdf", full.names = TRUE)
# delete documentation PDF and zip file
file.remove(delete_files, destination_file_path)
}
gzip_pums <- function(download_folder, destination_file_path) {
# get file name
zip_file <- list.files(download_folder, pattern = "[.]csv$", full.names = TRUE)
# some states and the US files have more than one csv file in the zip file
# get the number of csv files in the zip file
num_files <- length(zip_file)
# create vector of letters to name multiple files
num_files_seq <- letters[seq_len(num_files)]
# create output name for gz file
gz_output <- str_remove(destination_file_path, ".zip") # , ".csv.gz"
gz_output <- str_c(download_folder, "/", gz_output, "-", num_files_seq, ".csv.gz")
# iterate through each file and unzip
walk2(zip_file, gz_output, gzip, remove = T)
}
# function to download file and unzip if needed state, year, level
download_pums_files <- function(state, year, level, destination_file_path, download_folder) {
# create directory if it does not already exist
if (!dir.exists(download_folder)){
dir.create(download_folder)
}
# combine directory and downloaded zip file name into one object to create complete file path
download_zip_full_path <- str_c(download_folder, destination_file_path, sep = '/')
# create http address
http_address <- create_http_address(state, year, level)
# download PUMS file and delete unneeded PDF file
download_delete(http_address, download_zip_full_path, download_folder)
# gzip remaining csv file
gzip_pums(download_folder, download_zip_full_path)
# print statement showing operations are complete
print(glue("Downloaded {level} PUMS data for {state} in {year}."))
}
|
splithalfT3 <-
function(X,n,m,p,r1,r2,r3,centopt,normopt,renormmode,wa_rel,wb_rel,wc_rel,addanal,conv,laba,labb,labc){
cat("This procedure performs a SPLIT-HALF analysis on X",fill=TRUE)
cat("NOTE:",fill=TRUE)
cat("In SPLIT-HALF analysis, the A-mode is taken as 'replication mode'",fill=TRUE)
cat("(which means that A-mode entities are considered a random sample",fill=TRUE)
cat("if this does not make sense, you should rearrange your data so that the A-mode is a replication mode)",fill=TRUE)
cat("The splitting into two halves can be done randomly (default), or into odd vs. even sequence numbers",fill=TRUE)
narg=nargs()
if (narg<16){
laba=paste("a",1:n,sep="")
labb=paste("b",1:m,sep="")
labc=paste("c",1:p,sep="")
}
X=as.matrix(X)
cat("If you prefer odd/even split, specify '1':",fill=TRUE)
nsplit=scan("",n=1)
if (length(nsplit)==0){
nsplit=0
}
if (nsplit==1){ #wi=[1:2:n 2:2:n]
check=0
wi=c(seq(1,n,2), seq(2,n,2))
cat("Splitting has been done into odd vs. even sequence numbers",fill=TRUE)
} else{
# create random splits
w=matrix(runif(n*1,0,1),n,1)
wi=ord(w)$a
cat("Splitting has been done randomly",fill=TRUE)
}
n1=ceiling(n/2)
n2=n-n1
X1=X[wi[1:n1],]
X2=X[wi[(n1+1):n],]
cat("You will now enter the split half procedure with the options specified so far",fill=TRUE)
cat("However, you may want to modify certain choices here (rather than rerunning the full Tucker3)",fill=TRUE)
cat("Do you want to modify certain choices? If so, specify '1':",fill=TRUE)
ccc=scan("",n=1)
if (length(ccc)==0){
ccc=0
}
if (ccc==1){
cat("Centering:",fill=TRUE)
cat(" 0 = none (default)", fill=TRUE)
cat(" 1= across A-mode",fill=TRUE)
cat(" 2= across B-mode",fill=TRUE)
cat(" 3= across C-mode",fill=TRUE)
cat(" 12= across A-mode and across B-mode",fill=TRUE)
cat(" 13= across A-mode and across C-mode",fill=TRUE)
cat(" 23= across B-mode and across C-mode",fill=TRUE)
cat(paste("Your centering choice was",centopt),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
while(check==0){
cat("Specify centering option:",fill=TRUE)
centopt=scan("",n=1)
if (length(centopt)==0){
centopt=0
}
if((centopt!=0) & ((centopt!=1) & (centopt!=2) & (centopt!=3) & (centopt!=12) & (centopt!=13) & (centopt!=23))){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for centering the data")
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat("Normalizing:",fill=TRUE)
cat(" 0= none (default)", fill=TRUE)
cat(" 1= within A-mode", fill=TRUE)
cat(" 2= within B-mode",fill=TRUE)
cat(" 3= within C-mode",fill=TRUE)
cat(paste("Your normalizing choice was",normopt),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
while(check==0){
cat("Specify normalizing option",fill=TRUE)
normopt=scan("",n=1)
if (length(normopt)==0){
normopt=0
}
if ((normopt!=0) & ((normopt!=1) & (normopt!=2) & (normopt!=3))){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for normalizing the data")
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
if (renormmode==1){
cat("Your choice was to renormalize A-mode",fill=TRUE)
}
if (renormmode==2){
cat("Your choice was to renormalize B-mode",fill=TRUE)
}
if (renormmode==3){
cat("Your choice was to renormalize C-mode",fill=TRUE)
} else{
if ((renormmode!=1) & (renormmode!=2) & (renormmode!=3)){
cat("Your choice was not to renormalize solution",fill=TRUE)
}
}
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check2=0
while (check2==0){
cat("Which mode do you want to renormalize? Enter 1 (=A), 2 (=B) or 3 (=C):", fill=TRUE)
renormmode=scan("",n=1)
if (length(renormmode)==0){
renormmode=0
}
if ((renormmode!=1) &(renormmode!=2) & (renormmode!=3)){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for normalizing the data")
cat(" ",fill=TRUE)
} else{
check2=1
}
}
}
cat(paste("Numbers of components for A, B and C were: ",r1,", ",r2,", ",r3,sep=""),fill=TRUE)
cat("If you want to change them, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("How many A-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r1=scan("",n=1)
if (length(r1)==0){
r1=0
}
if ((r1==0) | ((floor(r1)-r1)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many A-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
cat("How many B-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r2=scan("",n=1)
if (length(r2)==0){
r2=0
}
if ((r2==0) | ((floor(r2)-r2)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many B-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
cat("How many C-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r3=scan("",n=1)
if (length(r3)==0){
r3=0
}
if ((r3==0) | ((floor(r3)-r3)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many C-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat(paste("Your choice was to use a convergence criterion equal to",conv),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("Specify convergence criterion (default=1e-6)", fill=TRUE)
conv=scan("",n=1)
if (length(conv)==0){
conv=1e-6
}
}
cat(paste("Your choice was to use",addanal,"additional random starts in the analysis"),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
cat("How many additional runs do you want to use?",fill=TRUE)
while (check==0){
addanal=scan("",n=1)
if (length(addanal)==0){
addanal=0
check=1
}
if ((floor(addanal)-addanal)!=0){
cat(" ",fill=TRUE)
cat("Error! How many additional runs do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat(paste("Relative simplicity rotation weights for A, B and C are: ",wa_rel,", ",wb_rel,", ",wc_rel,sep=""),fill=TRUE)
cat("If you want to change them, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("Specify relative weight for simplicity of A-mode (default=0):",fill=TRUE)
wa_rel=scan("",n=1)
cat("Specify relative weight for simplicity of B-mode (default=0):",fill=TRUE)
wb_rel=scan("",n=1)
cat("Specify relative weight for simplicity of C-mode (default=0):",fill=TRUE)
wc_rel=scan("",n=1)
if (length(wa_rel)==0){
wa_rel=0
}
if (length(wb_rel)==0){
wb_rel=0
}
if (length(wc_rel)==0){
wc_rel=0
}
}
}
cat("Analysis of FULL DATA",fill=TRUE)
# Full data analysis (reference)
# Full data Preprocessing
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
} else{
cat("X has not been centered",fill=TRUE)
}
cc=normopt
if (cc==1){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
# Full data analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
print(func)
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
RNsol=renormsolT3(A,B,C,H,renormmode)
# Full data Rotation
VARM=varimcoco(RNsol$A,RNsol$B,RNsol$C,RNsol$H,wa_rel,wb_rel,wc_rel)
AS=VARM$AS
BT=VARM$BT
CU=VARM$CU
K=VARM$K
cat("Backnormalize A,B,C, and H",fill=TRUE)
if (renormmode==1){
Ds=diag(SUM(AS)$col^.5,nrow=r1)
AS=AS%*%solve(Ds)
K=Ds%*%K
}
if (renormmode==2){
Ds=diag(SUM(BT)$col^.5,nrow=r2)
BT=BT%*%solve(Ds)
K=K%*%kronecker(diag(r3),Ds)
}
if (renormmode==3){
Ds=diag(SUM(CU)$col^.5,nrow=r3)
CU=CU%*%solve(Ds)
K=K%*%kronecker(Ds,diag(r2))
}
Afull=AS
Bfull=BT
Cfull=CU
Kfull=K
# Split 1 data analysis (reference)
# Split 1 Preprocessing
cat("Analysis of SPLIT 1",fill=TRUE)
n_orig=n
n=n1
X_orig=X
X=X1
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
}
cc=normopt
if (cc==1 ){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
Xs1=X
# Split 1 analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
cat("No Postprocessing used in splits, taken care off by reference rotation!",fill=TRUE)
As1=A
Bs1=B
Cs1=C
Ks1=H
# Split 2 data analysis (reference)
# Split 2 Preprocessing
cat("Analysis of SPLIT 2",fill=TRUE)
n=n2
X=X2
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
}
cc=normopt
if (cc==1){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
Xs2=X
# Split 2 analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
cat("No Postprocessing used in splits, taken care off by reference rotation!",fill=TRUE)
As2=A
Bs2=B
Cs2=C
Ks2=H
n=n_orig
X=X_orig
# Compute stability indices:
Ss1=solve(t(As1)%*%As1)%*%t(As1)%*%Afull[wi[1:n1],]
Ss2=solve(t(As2)%*%As2)%*%t(As2)%*%Afull[wi[(n1+1):n],]
Ts1=solve(t(Bs1)%*%Bs1)%*%t(Bs1)%*%Bfull
Ts2=solve(t(Bs2)%*%Bs2)%*%t(Bs2)%*%Bfull
Us1=solve(t(Cs1)%*%Cs1)%*%t(Cs1)%*%Cfull
Us2=solve(t(Cs2)%*%Cs2)%*%t(Cs2)%*%Cfull
As1=As1%*%Ss1
As2=As2%*%Ss2
Bs1=Bs1%*%Ts1
Bs2=Bs2%*%Ts2
Cs1=Cs1%*%Us1
Cs2=Cs2%*%Us2
Ks1=solve(Ss1)%*%Ks1%*%solve(kronecker(t(Us1),t(Ts1)))
Ks2=solve(Ss2)%*%Ks2%*%solve(kronecker(t(Us2),t(Ts2)))
labCompA=paste("A",1:r1,sep="")
labCompB=paste("B",1:r2,sep="")
labCompC=paste("C",1:r3,sep="")
# make labels for columns of core
str=noquote(vector(mode="character",length=r3*r2))
i=1
for (k in 1:r3){
for (j in 1:r2){
str[i]=noquote(paste(" B",as.character(j),"xC",as.character(k),sep=""))
i=i+1
}
}
corelabelstr=str
cat("RESULTS stability analysis",fill=TRUE)
cat("Both splits are analyzed and rotated towards solution for full data",fill=TRUE)
cat("One can compare complete outcomes for different splits",fill=TRUE)
cat("or inspect correlation/congruence coefficients between corresponding columns of component matrices",fill=TRUE)
cat("Split-half solutions for B",fill=TRUE)
cat("B's next to each other, separated by column of 0's",fill=TRUE)
X=round(cbind(Bs1,0,Bs2),digits=2)
rownames(X)=labb
colnames(X)=c(labCompB,"-",labCompB)
print(X)
cat("Split-half solutions for C",fill=TRUE)
cat("C's next to each other, separated by column of 0's",fill=TRUE)
X=round(cbind(Cs1,0,Cs2),digits=2)
rownames(X)=labc
colnames(X)=c(labCompC,"-",labCompC)
print(X)
cat("Congruences for A in splits and in appropriate part of Afull",fill=TRUE)
X=round(cbind(diag(phi(Afull[wi[1:n1],],As1)),diag(phi(Afull[wi[(n1+1):n],],As2))),digits=2)
rownames(X)=labCompA
colnames(X)=c("SPL1","SPL2")
print(X)
cat("Correlations for A in splits and in appropriate part of Afull",fill=TRUE)
X=round(cbind(diag(phi(Cc(Afull[wi[1:n1],]),Cc(As1))),diag(phi(Cc(Afull[wi[(n1+1):n],]),Cc(As2)))),digits=2)
rownames(X)=labCompA
colnames(X)=c("SPL1","SPL2")
print(X)
cat("Congruence values for B-mode component matrix",fill=TRUE)
X=round(diag(phi(Bs1,Bs2)),digits=2)
names(X)=c(labCompB)
print(X)
cat("Congruence values for C-mode component matrix",fill=TRUE)
X=round(diag(phi(Cs1,Cs2)),digits=2)
names(X)=c(labCompC)
print(X)
cat("Relatively strong stability check of Core:",fill=TRUE)
cat("(simply comparing core matrices for two splits)",fill=TRUE)
cat("Core for split1",fill=TRUE)
Ks1=as.matrix(Ks1,digits=2)
rownames(Ks1)=labCompA
colnames(Ks1)=corelabelstr
print(round(Ks1,digits=2))
cat("Core for split2",fill=TRUE)
Ks2=as.matrix(Ks2,digits=2)
rownames(Ks2)=labCompA
colnames(Ks2)=corelabelstr
print(round(Ks2,digits=2))
cat("Weaker but Sufficient stability check of Core:",fill=TRUE)
cat("(computing cores in two splits, using full data solutions for A,B and C)",fill=TRUE)
A1=Afull[wi[1:n1],]
A2=Afull[wi[(n1+1):n],]
Hss1=solve(t(A1)%*%A1)%*%t(A1)%*%Xs1
Hss1=solve(t(Bfull)%*%Bfull)%*%t(Bfull)%*%permnew(Hss1,r1,m,p)
Hss1=solve(t(Cfull)%*%Cfull)%*%t(Cfull)%*%permnew(Hss1,r2,p,r1)
Hss1=permnew(Hss1,r3,r1,r2)
Hss2=solve(t(A2)%*%A2)%*%t(A2)%*%Xs2
Hss2=solve(t(Bfull)%*%Bfull)%*%t(Bfull)%*%permnew(Hss2,r1,m,p)
Hss2=solve(t(Cfull)%*%Cfull)%*%t(Cfull)%*%permnew(Hss2,r2,p,r1)
Hss2=permnew(Hss2,r3,r1,r2)
cat("Core for split1",fill=TRUE)
Hss1=as.matrix(Hss1)
rownames(Hss1)=labCompA
colnames(Hss1)=corelabelstr
print(round(Hss1,digits=2))
cat("Core for split2",fill=TRUE)
Hss2=as.matrix(Hss2)
rownames(Hss2)=labCompA
colnames(Hss2)=corelabelstr
print(round(Hss2,digits=2))
cat("Core for full data",fill=TRUE)
Kfull=as.matrix(Kfull)
rownames(Kfull)=labCompA
colnames(Kfull)=corelabelstr
print(round(Kfull,digits=2))
colnames(As1)=labCompA
colnames(As2)=labCompA
colnames(Afull)=labCompA
colnames(Bs1)=labCompB
colnames(Bs2)=labCompB
colnames(Bfull)=labCompB
colnames(Cs1)=labCompC
colnames(Cs2)=labCompC
colnames(Cfull)=labCompC
rownames(As1)=wi[1:n1]
rownames(As2)=wi[(n1+1):n]
rownames(Afull)=laba
rownames(Bs1)=labb
rownames(Bs2)=labb
rownames(Bfull)=labb
rownames(Cs1)=labc
rownames(Cs2)=labc
rownames(Cfull)=labc
out=list()
out$Afull=Afull
out$As1=As1
out$As2=As2
out$Bfull=Bfull
out$Bs1=Bs1
out$Bs2=Bs2
out$Cfull=Cfull
out$Cs1=Cs1
out$Cs2=Cs2
out$Kfull=Kfull
out$Ks1=Ks1
out$Ks2=Ks2
out$Kss1=Hss1
out$Kss2=Hss2
return(out)
}
| /ThreeWay/R/splithalfT3.R | no_license | ingted/R-Examples | R | false | false | 17,501 | r | splithalfT3 <-
function(X,n,m,p,r1,r2,r3,centopt,normopt,renormmode,wa_rel,wb_rel,wc_rel,addanal,conv,laba,labb,labc){
cat("This procedure performs a SPLIT-HALF analysis on X",fill=TRUE)
cat("NOTE:",fill=TRUE)
cat("In SPLIT-HALF analysis, the A-mode is taken as 'replication mode'",fill=TRUE)
cat("(which means that A-mode entities are considered a random sample",fill=TRUE)
cat("if this does not make sense, you should rearrange your data so that the A-mode is a replication mode)",fill=TRUE)
cat("The splitting into two halves can be done randomly (default), or into odd vs. even sequence numbers",fill=TRUE)
narg=nargs()
if (narg<16){
laba=paste("a",1:n,sep="")
labb=paste("b",1:m,sep="")
labc=paste("c",1:p,sep="")
}
X=as.matrix(X)
cat("If you prefer odd/even split, specify '1':",fill=TRUE)
nsplit=scan("",n=1)
if (length(nsplit)==0){
nsplit=0
}
if (nsplit==1){ #wi=[1:2:n 2:2:n]
check=0
wi=c(seq(1,n,2), seq(2,n,2))
cat("Splitting has been done into odd vs. even sequence numbers",fill=TRUE)
} else{
# create random splits
w=matrix(runif(n*1,0,1),n,1)
wi=ord(w)$a
cat("Splitting has been done randomly",fill=TRUE)
}
n1=ceiling(n/2)
n2=n-n1
X1=X[wi[1:n1],]
X2=X[wi[(n1+1):n],]
cat("You will now enter the split half procedure with the options specified so far",fill=TRUE)
cat("However, you may want to modify certain choices here (rather than rerunning the full Tucker3)",fill=TRUE)
cat("Do you want to modify certain choices? If so, specify '1':",fill=TRUE)
ccc=scan("",n=1)
if (length(ccc)==0){
ccc=0
}
if (ccc==1){
cat("Centering:",fill=TRUE)
cat(" 0 = none (default)", fill=TRUE)
cat(" 1= across A-mode",fill=TRUE)
cat(" 2= across B-mode",fill=TRUE)
cat(" 3= across C-mode",fill=TRUE)
cat(" 12= across A-mode and across B-mode",fill=TRUE)
cat(" 13= across A-mode and across C-mode",fill=TRUE)
cat(" 23= across B-mode and across C-mode",fill=TRUE)
cat(paste("Your centering choice was",centopt),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
while(check==0){
cat("Specify centering option:",fill=TRUE)
centopt=scan("",n=1)
if (length(centopt)==0){
centopt=0
}
if((centopt!=0) & ((centopt!=1) & (centopt!=2) & (centopt!=3) & (centopt!=12) & (centopt!=13) & (centopt!=23))){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for centering the data")
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat("Normalizing:",fill=TRUE)
cat(" 0= none (default)", fill=TRUE)
cat(" 1= within A-mode", fill=TRUE)
cat(" 2= within B-mode",fill=TRUE)
cat(" 3= within C-mode",fill=TRUE)
cat(paste("Your normalizing choice was",normopt),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
while(check==0){
cat("Specify normalizing option",fill=TRUE)
normopt=scan("",n=1)
if (length(normopt)==0){
normopt=0
}
if ((normopt!=0) & ((normopt!=1) & (normopt!=2) & (normopt!=3))){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for normalizing the data")
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
if (renormmode==1){
cat("Your choice was to renormalize A-mode",fill=TRUE)
}
if (renormmode==2){
cat("Your choice was to renormalize B-mode",fill=TRUE)
}
if (renormmode==3){
cat("Your choice was to renormalize C-mode",fill=TRUE)
} else{
if ((renormmode!=1) & (renormmode!=2) & (renormmode!=3)){
cat("Your choice was not to renormalize solution",fill=TRUE)
}
}
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check2=0
while (check2==0){
cat("Which mode do you want to renormalize? Enter 1 (=A), 2 (=B) or 3 (=C):", fill=TRUE)
renormmode=scan("",n=1)
if (length(renormmode)==0){
renormmode=0
}
if ((renormmode!=1) &(renormmode!=2) & (renormmode!=3)){
cat(" ",fill=TRUE)
cat("Error! Make a proper choice for normalizing the data")
cat(" ",fill=TRUE)
} else{
check2=1
}
}
}
cat(paste("Numbers of components for A, B and C were: ",r1,", ",r2,", ",r3,sep=""),fill=TRUE)
cat("If you want to change them, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("How many A-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r1=scan("",n=1)
if (length(r1)==0){
r1=0
}
if ((r1==0) | ((floor(r1)-r1)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many A-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
cat("How many B-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r2=scan("",n=1)
if (length(r2)==0){
r2=0
}
if ((r2==0) | ((floor(r2)-r2)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many B-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
cat("How many C-mode components do you want to use?",fill=TRUE)
check=0
while (check==0){
r3=scan("",n=1)
if (length(r3)==0){
r3=0
}
if ((r3==0) | ((floor(r3)-r3)!=0)){
cat(" ",fill=TRUE)
cat("Error! How many C-mode components do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat(paste("Your choice was to use a convergence criterion equal to",conv),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("Specify convergence criterion (default=1e-6)", fill=TRUE)
conv=scan("",n=1)
if (length(conv)==0){
conv=1e-6
}
}
cat(paste("Your choice was to use",addanal,"additional random starts in the analysis"),fill=TRUE)
cat("If you want to change it, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
check=0
cat("How many additional runs do you want to use?",fill=TRUE)
while (check==0){
addanal=scan("",n=1)
if (length(addanal)==0){
addanal=0
check=1
}
if ((floor(addanal)-addanal)!=0){
cat(" ",fill=TRUE)
cat("Error! How many additional runs do you want to use?",fill=TRUE)
cat(" ",fill=TRUE)
} else{
check=1
}
}
}
cat(paste("Relative simplicity rotation weights for A, B and C are: ",wa_rel,", ",wb_rel,", ",wc_rel,sep=""),fill=TRUE)
cat("If you want to change them, specify '1':",fill=TRUE)
cc=scan("",n=1)
if (length(cc)==0){
cc=0
}
if (cc==1){
cat("Specify relative weight for simplicity of A-mode (default=0):",fill=TRUE)
wa_rel=scan("",n=1)
cat("Specify relative weight for simplicity of B-mode (default=0):",fill=TRUE)
wb_rel=scan("",n=1)
cat("Specify relative weight for simplicity of C-mode (default=0):",fill=TRUE)
wc_rel=scan("",n=1)
if (length(wa_rel)==0){
wa_rel=0
}
if (length(wb_rel)==0){
wb_rel=0
}
if (length(wc_rel)==0){
wc_rel=0
}
}
}
cat("Analysis of FULL DATA",fill=TRUE)
# Full data analysis (reference)
# Full data Preprocessing
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
} else{
cat("X has not been centered",fill=TRUE)
}
cc=normopt
if (cc==1){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
# Full data analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
print(func)
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
RNsol=renormsolT3(A,B,C,H,renormmode)
# Full data Rotation
VARM=varimcoco(RNsol$A,RNsol$B,RNsol$C,RNsol$H,wa_rel,wb_rel,wc_rel)
AS=VARM$AS
BT=VARM$BT
CU=VARM$CU
K=VARM$K
cat("Backnormalize A,B,C, and H",fill=TRUE)
if (renormmode==1){
Ds=diag(SUM(AS)$col^.5,nrow=r1)
AS=AS%*%solve(Ds)
K=Ds%*%K
}
if (renormmode==2){
Ds=diag(SUM(BT)$col^.5,nrow=r2)
BT=BT%*%solve(Ds)
K=K%*%kronecker(diag(r3),Ds)
}
if (renormmode==3){
Ds=diag(SUM(CU)$col^.5,nrow=r3)
CU=CU%*%solve(Ds)
K=K%*%kronecker(Ds,diag(r2))
}
Afull=AS
Bfull=BT
Cfull=CU
Kfull=K
# Split 1 data analysis (reference)
# Split 1 Preprocessing
cat("Analysis of SPLIT 1",fill=TRUE)
n_orig=n
n=n1
X_orig=X
X=X1
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
}
cc=normopt
if (cc==1 ){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
Xs1=X
# Split 1 analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
cat("No Postprocessing used in splits, taken care off by reference rotation!",fill=TRUE)
As1=A
Bs1=B
Cs1=C
Ks1=H
# Split 2 data analysis (reference)
# Split 2 Preprocessing
cat("Analysis of SPLIT 2",fill=TRUE)
n=n2
X=X2
cc=centopt
if ((cc==1) | (cc==12) | (cc==13)){
X=cent3(X,n,m,p,1)
cat("X has been centered across A-mode",fill=TRUE)
}
if ((cc==2) | (cc==12) | (cc==23)){
X=cent3(X,n,m,p,2)
cat("X has been centered across B-mode",fill=TRUE)
}
if ((cc==3) | (cc==13) | (cc==23)){
X=cent3(X,n,m,p,3)
cat("X has been centered across C-mode",fill=TRUE)
}
cc=normopt
if (cc==1){
X=norm3(X,n,m,p,1)
cat("X has been normalized within A-mode",fill=TRUE)
}
if (cc==2){
X=norm3(X,n,m,p,2)
cat("X has been normalized within B-mode",fill=TRUE)
}
if (cc==3){
X=norm3(X,n,m,p,3)
cat("X has been normalized within C-mode",fill=TRUE)
}
if ((cc!=1) & (cc!=2) & (cc!=3)){
cat("X has not been normalized",fill=TRUE)
}
Xs2=X
# Split 2 analysis
Tuck3=T3func(X,n,m,p,r1,r2,r3,0,1e-6)
A=Tuck3$A
B=Tuck3$B
C=Tuck3$C
H=Tuck3$H
f=Tuck3$f
iter=Tuck3$iter
fp=Tuck3$fp
La=Tuck3$La
Lb=Tuck3$Lb
Lc=Tuck3$Lc
func=vector("numeric",length=1+addanal)
names(func)=paste("Start n.",1:(1+addanal),sep="")
func=Tuck3$fp
for (run in 1:addanal){
cat(paste("Run no.",run+1,sep=" "),fill=TRUE)
Tuck3b=T3func(X,n,m,p,r1,r2,r3,1,1e-6)
func[run+1]=Tuck3b$fp
if (Tuck3b$fp>1.0001*Tuck3$fp){ # if fit more than .01% better is found, replace solution
A=Tuck3b$A
B=Tuck3b$B
C=Tuck3b$C
H=Tuck3b$H
f=Tuck3b$f
iter=Tuck3b$iter
fp=Tuck3b$fp
La=Tuck3b$La
Lb=Tuck3b$Lb
Lc=Tuck3b$Lc
}
}
if (addanal>=1){
cat("Fit % values from all runs:",fill=TRUE)
print(round(t(func), digits=2))
}
cat("No Postprocessing used in splits, taken care off by reference rotation!",fill=TRUE)
As2=A
Bs2=B
Cs2=C
Ks2=H
n=n_orig
X=X_orig
# Compute stability indices:
Ss1=solve(t(As1)%*%As1)%*%t(As1)%*%Afull[wi[1:n1],]
Ss2=solve(t(As2)%*%As2)%*%t(As2)%*%Afull[wi[(n1+1):n],]
Ts1=solve(t(Bs1)%*%Bs1)%*%t(Bs1)%*%Bfull
Ts2=solve(t(Bs2)%*%Bs2)%*%t(Bs2)%*%Bfull
Us1=solve(t(Cs1)%*%Cs1)%*%t(Cs1)%*%Cfull
Us2=solve(t(Cs2)%*%Cs2)%*%t(Cs2)%*%Cfull
As1=As1%*%Ss1
As2=As2%*%Ss2
Bs1=Bs1%*%Ts1
Bs2=Bs2%*%Ts2
Cs1=Cs1%*%Us1
Cs2=Cs2%*%Us2
Ks1=solve(Ss1)%*%Ks1%*%solve(kronecker(t(Us1),t(Ts1)))
Ks2=solve(Ss2)%*%Ks2%*%solve(kronecker(t(Us2),t(Ts2)))
labCompA=paste("A",1:r1,sep="")
labCompB=paste("B",1:r2,sep="")
labCompC=paste("C",1:r3,sep="")
# make labels for columns of core
str=noquote(vector(mode="character",length=r3*r2))
i=1
for (k in 1:r3){
for (j in 1:r2){
str[i]=noquote(paste(" B",as.character(j),"xC",as.character(k),sep=""))
i=i+1
}
}
corelabelstr=str
cat("RESULTS stability analysis",fill=TRUE)
cat("Both splits are analyzed and rotated towards solution for full data",fill=TRUE)
cat("One can compare complete outcomes for different splits",fill=TRUE)
cat("or inspect correlation/congruence coefficients between corresponding columns of component matrices",fill=TRUE)
cat("Split-half solutions for B",fill=TRUE)
cat("B's next to each other, separated by column of 0's",fill=TRUE)
X=round(cbind(Bs1,0,Bs2),digits=2)
rownames(X)=labb
colnames(X)=c(labCompB,"-",labCompB)
print(X)
cat("Split-half solutions for C",fill=TRUE)
cat("C's next to each other, separated by column of 0's",fill=TRUE)
X=round(cbind(Cs1,0,Cs2),digits=2)
rownames(X)=labc
colnames(X)=c(labCompC,"-",labCompC)
print(X)
cat("Congruences for A in splits and in appropriate part of Afull",fill=TRUE)
X=round(cbind(diag(phi(Afull[wi[1:n1],],As1)),diag(phi(Afull[wi[(n1+1):n],],As2))),digits=2)
rownames(X)=labCompA
colnames(X)=c("SPL1","SPL2")
print(X)
cat("Correlations for A in splits and in appropriate part of Afull",fill=TRUE)
X=round(cbind(diag(phi(Cc(Afull[wi[1:n1],]),Cc(As1))),diag(phi(Cc(Afull[wi[(n1+1):n],]),Cc(As2)))),digits=2)
rownames(X)=labCompA
colnames(X)=c("SPL1","SPL2")
print(X)
cat("Congruence values for B-mode component matrix",fill=TRUE)
X=round(diag(phi(Bs1,Bs2)),digits=2)
names(X)=c(labCompB)
print(X)
cat("Congruence values for C-mode component matrix",fill=TRUE)
X=round(diag(phi(Cs1,Cs2)),digits=2)
names(X)=c(labCompC)
print(X)
cat("Relatively strong stability check of Core:",fill=TRUE)
cat("(simply comparing core matrices for two splits)",fill=TRUE)
cat("Core for split1",fill=TRUE)
Ks1=as.matrix(Ks1,digits=2)
rownames(Ks1)=labCompA
colnames(Ks1)=corelabelstr
print(round(Ks1,digits=2))
cat("Core for split2",fill=TRUE)
Ks2=as.matrix(Ks2,digits=2)
rownames(Ks2)=labCompA
colnames(Ks2)=corelabelstr
print(round(Ks2,digits=2))
cat("Weaker but Sufficient stability check of Core:",fill=TRUE)
cat("(computing cores in two splits, using full data solutions for A,B and C)",fill=TRUE)
A1=Afull[wi[1:n1],]
A2=Afull[wi[(n1+1):n],]
Hss1=solve(t(A1)%*%A1)%*%t(A1)%*%Xs1
Hss1=solve(t(Bfull)%*%Bfull)%*%t(Bfull)%*%permnew(Hss1,r1,m,p)
Hss1=solve(t(Cfull)%*%Cfull)%*%t(Cfull)%*%permnew(Hss1,r2,p,r1)
Hss1=permnew(Hss1,r3,r1,r2)
Hss2=solve(t(A2)%*%A2)%*%t(A2)%*%Xs2
Hss2=solve(t(Bfull)%*%Bfull)%*%t(Bfull)%*%permnew(Hss2,r1,m,p)
Hss2=solve(t(Cfull)%*%Cfull)%*%t(Cfull)%*%permnew(Hss2,r2,p,r1)
Hss2=permnew(Hss2,r3,r1,r2)
cat("Core for split1",fill=TRUE)
Hss1=as.matrix(Hss1)
rownames(Hss1)=labCompA
colnames(Hss1)=corelabelstr
print(round(Hss1,digits=2))
cat("Core for split2",fill=TRUE)
Hss2=as.matrix(Hss2)
rownames(Hss2)=labCompA
colnames(Hss2)=corelabelstr
print(round(Hss2,digits=2))
cat("Core for full data",fill=TRUE)
Kfull=as.matrix(Kfull)
rownames(Kfull)=labCompA
colnames(Kfull)=corelabelstr
print(round(Kfull,digits=2))
colnames(As1)=labCompA
colnames(As2)=labCompA
colnames(Afull)=labCompA
colnames(Bs1)=labCompB
colnames(Bs2)=labCompB
colnames(Bfull)=labCompB
colnames(Cs1)=labCompC
colnames(Cs2)=labCompC
colnames(Cfull)=labCompC
rownames(As1)=wi[1:n1]
rownames(As2)=wi[(n1+1):n]
rownames(Afull)=laba
rownames(Bs1)=labb
rownames(Bs2)=labb
rownames(Bfull)=labb
rownames(Cs1)=labc
rownames(Cs2)=labc
rownames(Cfull)=labc
out=list()
out$Afull=Afull
out$As1=As1
out$As2=As2
out$Bfull=Bfull
out$Bs1=Bs1
out$Bs2=Bs2
out$Cfull=Cfull
out$Cs1=Cs1
out$Cs2=Cs2
out$Kfull=Kfull
out$Ks1=Ks1
out$Ks2=Ks2
out$Kss1=Hss1
out$Kss2=Hss2
return(out)
}
|
\name{fabMix-package}
\alias{fabMix-package}
\docType{package}
\title{
\packageTitle{fabMix}
}
\description{
\packageDescription{fabMix}
The main fuction of the package is \code{\link{fabMix}}.
}
\author{
\packageAuthor{fabMix}
Maintainer: \packageMaintainer{fabMix}
}
\references{
Fokoue, E. and Titterington, D.M. (2003). Mixtures of Factor Analysers: Bayesian Estimation and Inference by Stochastic Simulation. Machine Learing, 50(1): 73-94.
McNicholas, P.D. and Murphy, T.B. Statistics and Computing (2008) 18: 285. https://doi.org/10.1007/s11222-008-9056-0.
Papastamoulis P. and Iliopoulos G. (2010). An artificial allocations based solution to the label switching problem in Bayesian analysis of mixtures of distributions. Journal of Computational and Graphical Statistics, 19: 313-331.
Rousseau, J. and Mengersen, K. (2011). Asymptotic behaviour of the posterior distribution in overfitted mixture models. Journal of the Royal Statistical Society, Series B (methodological), 73(5): 689-710.
van Havre, Z., White, N., Rousseau, J. and Mengersen, K. (2015). Overfitting Bayesian Mixture Models with an Unknown Number of Components. PLOS ONE, 10(7): 1-27.
Papastamoulis, P. (2016). \code{label.switching}: An R Package for Dealing with the Label Switching Problem in MCMC Outputs. Journal of Statistical Software, 69(1), 1-24.
Papastamoulis, P. (2018). Overfitting Bayesian mixtures of factor analyzers with an unknown number of components. Computational Statistics and Data Analysis, 124: 220-234. DOI: 10.1016/j.csda.2018.03.007.
}
\keyword{ package }
\seealso{
\code{\link{fabMix}}, \code{\link{plot.fabMix.object}}
}
\examples{
# TOY EXAMPLE (very small numbers...)
library('fabMix')
n = 8 # sample size
p = 5 # number of variables
q = 2 # number of factors
K = 2 # number of clusters
sINV_diag = 1/((1:p)) # diagonal of inverse variance of errors
set.seed(100)
syntheticDataset <- simData(sameLambda=TRUE,K.true = K, n = n, q = q, p = p,
sINV_values = sINV_diag)
colnames(syntheticDataset$data) <- paste0("x_",1:p)
qRange <- 1:2 # range of values for the number of factors
Kmax <- 20 # number of components for the overfitted mixture model
nChains <- 2 # number of parallel heated chains
# Run `fabMix` for a _small_ number of iterations for the
# `UUU` (maximal model) and `CCC` (minimal model) parameterizations,
# using the default prior parallel heating parameters `dirPriorAlphas`.
# NOTE: `dirPriorAlphas` may require some tuning in general.
set.seed(3)
fm <- fabMix( model = c("UUU", "CCC"), nChains = 2,
rawData = syntheticDataset$data, outDir = "toyExample",
Kmax = Kmax, mCycles = 4, burnCycles = 1, q = qRange,
g = 0.5, h = 0.5, alpha_sigma = 0.5, beta_sigma = 0.5,
warm_up_overfitting = 2, warm_up = 3)
# WARNING: the following parameters:
# nChains, mCycles, burnCycles, warm_up_overfitting, warm_up
# should take (much) _larger_ values. E.g. a typical implementation consists of:
# nChains = 8, mCycles = 1100, burnCycles = 100,
# warm_up_overfitting = 500, warm_up = 5000.
# Now print a run summary and produce some plots.
print(fm)
plot(fm, what = "BIC")
plot(fm, what = "classification_pairs")
}
| /fabMixPackage/version_4.2/fabMix.Rcheck/00_pkg_src/fabMix/man/fabMix-package.Rd | no_license | mqbssppe/overfittingFABMix | R | false | false | 3,252 | rd | \name{fabMix-package}
\alias{fabMix-package}
\docType{package}
\title{
\packageTitle{fabMix}
}
\description{
\packageDescription{fabMix}
The main fuction of the package is \code{\link{fabMix}}.
}
\author{
\packageAuthor{fabMix}
Maintainer: \packageMaintainer{fabMix}
}
\references{
Fokoue, E. and Titterington, D.M. (2003). Mixtures of Factor Analysers: Bayesian Estimation and Inference by Stochastic Simulation. Machine Learing, 50(1): 73-94.
McNicholas, P.D. and Murphy, T.B. Statistics and Computing (2008) 18: 285. https://doi.org/10.1007/s11222-008-9056-0.
Papastamoulis P. and Iliopoulos G. (2010). An artificial allocations based solution to the label switching problem in Bayesian analysis of mixtures of distributions. Journal of Computational and Graphical Statistics, 19: 313-331.
Rousseau, J. and Mengersen, K. (2011). Asymptotic behaviour of the posterior distribution in overfitted mixture models. Journal of the Royal Statistical Society, Series B (methodological), 73(5): 689-710.
van Havre, Z., White, N., Rousseau, J. and Mengersen, K. (2015). Overfitting Bayesian Mixture Models with an Unknown Number of Components. PLOS ONE, 10(7): 1-27.
Papastamoulis, P. (2016). \code{label.switching}: An R Package for Dealing with the Label Switching Problem in MCMC Outputs. Journal of Statistical Software, 69(1), 1-24.
Papastamoulis, P. (2018). Overfitting Bayesian mixtures of factor analyzers with an unknown number of components. Computational Statistics and Data Analysis, 124: 220-234. DOI: 10.1016/j.csda.2018.03.007.
}
\keyword{ package }
\seealso{
\code{\link{fabMix}}, \code{\link{plot.fabMix.object}}
}
\examples{
# TOY EXAMPLE (very small numbers...)
library('fabMix')
n = 8 # sample size
p = 5 # number of variables
q = 2 # number of factors
K = 2 # number of clusters
sINV_diag = 1/((1:p)) # diagonal of inverse variance of errors
set.seed(100)
syntheticDataset <- simData(sameLambda=TRUE,K.true = K, n = n, q = q, p = p,
sINV_values = sINV_diag)
colnames(syntheticDataset$data) <- paste0("x_",1:p)
qRange <- 1:2 # range of values for the number of factors
Kmax <- 20 # number of components for the overfitted mixture model
nChains <- 2 # number of parallel heated chains
# Run `fabMix` for a _small_ number of iterations for the
# `UUU` (maximal model) and `CCC` (minimal model) parameterizations,
# using the default prior parallel heating parameters `dirPriorAlphas`.
# NOTE: `dirPriorAlphas` may require some tuning in general.
set.seed(3)
fm <- fabMix( model = c("UUU", "CCC"), nChains = 2,
rawData = syntheticDataset$data, outDir = "toyExample",
Kmax = Kmax, mCycles = 4, burnCycles = 1, q = qRange,
g = 0.5, h = 0.5, alpha_sigma = 0.5, beta_sigma = 0.5,
warm_up_overfitting = 2, warm_up = 3)
# WARNING: the following parameters:
# nChains, mCycles, burnCycles, warm_up_overfitting, warm_up
# should take (much) _larger_ values. E.g. a typical implementation consists of:
# nChains = 8, mCycles = 1100, burnCycles = 100,
# warm_up_overfitting = 500, warm_up = 5000.
# Now print a run summary and produce some plots.
print(fm)
plot(fm, what = "BIC")
plot(fm, what = "classification_pairs")
}
|
if (file.exists("~/.Rprofile"))
source("~/.Rprofile")
.libPaths("./Rpackages")
if (nzchar(Sys.getenv('R_DEVPKG'))) {
cat("Loading", Sys.getenv('R_DEVPKG'), "library, run reinstall() to reinstall\n")
library(Sys.getenv('R_DEVPKG'), character.only = TRUE)
}
reinstall <- function (name = Sys.getenv('R_DEVPKG')) {
pkg <- paste0("package:", name)
if (pkg %in% search()) {
detach(name = pkg, unload=TRUE, character.only = TRUE)
}
utils::install.packages(name, repos=NULL, lib="./Rpackages")
library(name, character.only = TRUE)
}
library(logging)
addHandler(writeToConsole, logger='mfdb', level='DEBUG')
import_generated_survey <- function (mdb, data_source, count) {
choose <- function(t, n) {
t[sample.int(length(t), n, replace = TRUE), "name"]
}
mfdb_import_survey(mdb,
data.frame(
year = sample(1990:1999, 1, replace = TRUE), # NB: All in the same year
month = sample(1:12, count, replace = TRUE),
areacell = sample(c('a','b','c','d','e','f'), count, replace = TRUE),
species = choose(mfdb::species, 2),
age = sample(10:100, count, replace = TRUE),
sex = choose(mfdb::sex, count),
length = sample(100:1000, count, replace = TRUE),
weight = sample(100:1000, count, replace = TRUE),
count = 1),
institute = choose(mfdb::institute, 1),
gear = choose(mfdb::gear, 1),
vessel = choose(mfdb::vessel, 1),
sampling_type = choose(mfdb::sampling_type, 1),
data_source = data_source)
}
| /.Rprofile | no_license | mareframe/mfdb-workspace | R | false | false | 1,596 | rprofile | if (file.exists("~/.Rprofile"))
source("~/.Rprofile")
.libPaths("./Rpackages")
if (nzchar(Sys.getenv('R_DEVPKG'))) {
cat("Loading", Sys.getenv('R_DEVPKG'), "library, run reinstall() to reinstall\n")
library(Sys.getenv('R_DEVPKG'), character.only = TRUE)
}
reinstall <- function (name = Sys.getenv('R_DEVPKG')) {
pkg <- paste0("package:", name)
if (pkg %in% search()) {
detach(name = pkg, unload=TRUE, character.only = TRUE)
}
utils::install.packages(name, repos=NULL, lib="./Rpackages")
library(name, character.only = TRUE)
}
library(logging)
addHandler(writeToConsole, logger='mfdb', level='DEBUG')
import_generated_survey <- function (mdb, data_source, count) {
choose <- function(t, n) {
t[sample.int(length(t), n, replace = TRUE), "name"]
}
mfdb_import_survey(mdb,
data.frame(
year = sample(1990:1999, 1, replace = TRUE), # NB: All in the same year
month = sample(1:12, count, replace = TRUE),
areacell = sample(c('a','b','c','d','e','f'), count, replace = TRUE),
species = choose(mfdb::species, 2),
age = sample(10:100, count, replace = TRUE),
sex = choose(mfdb::sex, count),
length = sample(100:1000, count, replace = TRUE),
weight = sample(100:1000, count, replace = TRUE),
count = 1),
institute = choose(mfdb::institute, 1),
gear = choose(mfdb::gear, 1),
vessel = choose(mfdb::vessel, 1),
sampling_type = choose(mfdb::sampling_type, 1),
data_source = data_source)
}
|
save(data_fics, genres, file='train_save.Rdata')
load('хакатон/.RData')
save(dat, dat_melt, file = "Desktop/andan/pepepe.Rdata")
save.image(file = "Desktop/andan/pepepe.Rdata")
| /конспекты/repeat.R | no_license | yulqui/andan_2019 | R | false | false | 192 | r | save(data_fics, genres, file='train_save.Rdata')
load('хакатон/.RData')
save(dat, dat_melt, file = "Desktop/andan/pepepe.Rdata")
save.image(file = "Desktop/andan/pepepe.Rdata")
|
# penguins-fwf
gdata::write.fwf(penguins_df, file = "data-fwf/penguins_fwf.txt",
width = c(
9, # species
9, # island
4, # bill_length_mm
4, # bill_depth_mm
3, # flipper_length_mm
4, # body_mass_g
6, # sex
4 # year
),
sep = "",
colname = FALSE)
| /data-fwf/data-fwf.R | permissive | MonkmanMH/palmerpenguins | R | false | false | 484 | r | # penguins-fwf
gdata::write.fwf(penguins_df, file = "data-fwf/penguins_fwf.txt",
width = c(
9, # species
9, # island
4, # bill_length_mm
4, # bill_depth_mm
3, # flipper_length_mm
4, # body_mass_g
6, # sex
4 # year
),
sep = "",
colname = FALSE)
|
## Getting full dataset
## missing values are ?
complete_data <- read.csv("./data/household_power_consumption.txt",
header=T,
na.strings="?",
sep=";",
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
complete_data$Date <- as.Date(complete_data$Date, format="%d/%m/%Y")
## Filtering data
data <- subset(complete_data, subset=(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")))
remove(complete_data)
## Converting date and time to datetime
dateTime <- paste(as.Date(data$Date), data$Time)
data$DateTime <- as.POSIXct(dateTime)
## Type l for lines
plot(data$Global_active_power~data$DateTime,
type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() | /plot2.R | no_license | tprimini/ExData_Plotting1 | R | false | false | 909 | r | ## Getting full dataset
## missing values are ?
complete_data <- read.csv("./data/household_power_consumption.txt",
header=T,
na.strings="?",
sep=";",
colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
complete_data$Date <- as.Date(complete_data$Date, format="%d/%m/%Y")
## Filtering data
data <- subset(complete_data, subset=(Date >= as.Date("2007-2-1") & Date <= as.Date("2007-2-2")))
remove(complete_data)
## Converting date and time to datetime
dateTime <- paste(as.Date(data$Date), data$Time)
data$DateTime <- as.POSIXct(dateTime)
## Type l for lines
plot(data$Global_active_power~data$DateTime,
type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off() |
require(lme4)
require(plyr)
require(reshape2)
require(lmerTest)
require(languageR)
require(lattice)
# READ DATA FROM GITHUB ---------------------------------------------------
library(RCurl)
# Copy RAW url from GitHub repository
s1.WIT <- read.csv(textConnection(getURL("https://raw.githubusercontent.com/eplebel/intra-individual-MS/master/data/wit_agg_detailed_s1.csv")))
s2.WIT <- read.csv(textConnection(getURL("https://raw.githubusercontent.com/eplebel/intra-individual-MS/master/data/wit_agg_detailed_s2.csv")))
# Get the experimental trials, order them by Subject and Timestamp
s1.TOT <- subset(s1.WIT,subset = s1.WIT$Block==5)
s1.TOT <- s1.TOT[ order(s1.TOT['Subj'],s1.TOT['Started.']), ]
# Log transform RT and make False responses NA
s1.TOT$RT.log <- log(s1.TOT$RT)
s1.TOT$RT.log.c <- s1.TOT$RT.log
s1.TOT$RT.log.c[s1.TOT$Correct=="False"] <- NA
# Create and re-level some factors
# Tool and Race
s1.TOT$tool <- relevel(s1.TOT$tool, ref="tool")
s1.TOT$race <- relevel(s1.TOT$race, ref="white")
# Factor Subj
s1.TOT$Subj <- factor(s1.TOT$Subj)
# Factor Condition (Between subjects)
s1.TOT$Condition <- factor(s1.TOT$File,labels=c("Avoid Race","Control","Use Race"))
s1.TOT$Condition <- relevel(s1.TOT$Condition, ref="Control")
# Factor assigning unique number to Prime:Target combinations
s1.TOT$PrimeTarget <- with(s1.TOT, interaction(Stim.2,Stim.3,drop=T))
levels(s1.TOT$PrimeTarget) <- gsub(".bmp","",levels(s1.TOT$PrimeTarget))
# Factor for 0-based 'time' vector: Trialnumber
s1.TOT$TrialNum <- unlist(llply(unique(s1.TOT$Subj),function(s) return(0:(length(which(s==s1.TOT$Subj))-1))))
s1.TOT$TrialTime <- s1.TOT$TrialNum/max(s1.TOT$TrialNum)
# Factor for unique tool:race combinations
s1.TOT$Prime <- with(s1.TOT, interaction(race,tool,drop=TRUE))
# CHECK NESTED STRUCTURE --------------------------------------------------
# Check nesting of Prime:Target within Subjects...
with(s1.TOT, isNested(PrimeTarget,Subj))
# Prime:Target combinations are NOT nested within Subjects
xtabs(~Subj+PrimeTarget,s1.TOT,drop=T,sparse=T)
with(s1.TOT, isNested(Prime,Subj)) # Not nested, 25 duplicates for each Subject
xtabs(~tool+race,s1.TOT,drop=T,sparse=T)
with(s1.TOT, isNested(PrimeTarget,Prime)) # Prime:Target is nested within Prime
xtabs(~PrimeTarget+Prime,s1.TOT,drop=T,sparse=T)
# This will be the random effect for different stimulus combinations
s1.TOT$Stims <- with(s1.TOT, interaction(Prime,PrimeTarget,drop=TRUE))
# PLOT SLOPES -------------------------------------------------------------
xyplot(scale(RT.log.c,scale=F) ~ TrialTime | Subj, data=s1.TOT[which(s1.TOT$Subj%in%c(107,100,sample(unique(s1.TOT$Subj),14))), ], layout=c(4,4), groups=Prime, xlab = "Time (a.u.)", ylab = "Grand mean centered log(RT) of correct trials", main="WIT: 16 Random Participants\nRandom Slope Model?",
prepanel = function(x, y, groups) prepanel.lmline(x, y),
panel=function(x,y,groups,subscripts){
panel.xyplot(x,y,groups=groups,subscripts = subscripts)
#panel.superpose(x,y,panel.groups=panel.loess, groups=groups, subscripts = subscripts, lwd=2,alpha=.6)
panel.superpose(x,y,panel.groups=panel.lmline, groups=groups, subscripts = subscripts, lwd=3)},
ylim=c(-1.5,1.5),as.table=T,auto.key = list(points = FALSE, lines = TRUE, columns = 2))
# FULL MODEL --------------------------------------------------------------
#rndID <- which(s1.TOT$Subj%in%sample(unique(s1.TOT$Subj),50)) # Random sample to save time
rndID <- 1:nrow(s1.TOT) # Total sample
# s1.M01 is the "empty" model to compare against when using the Multilevel model for change
# PrimeTarget combinations were administered randomly between Subjects and TrialTime indicates the passage of time
s1.M00 <- lmer(RT.log.c ~ 1 + (1|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M01 <- lmer(RT.log.c ~ 1 + TrialTime + (1|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M02 <- lmer(RT.log.c ~ 1 + TrialTime + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M03 <- lmer(RT.log.c ~ 1 + TrialTime + (TrialTime|Subj) + (TrialTime|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M00,s1.M01,s1.M02,s1.M03)
# Use M02, refit with REML
s1.M02m <- lmer(RT.log.c ~ TrialTime + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ])
(s1.M02m.sum <- summary(s1.M02m))
s1.M10 <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M11 <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (tool+race|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
#s1.M12 <- lmer(RT.log.c ~ TrialTime * Prime + (TrialTime|Subj) + (Prime|Subj) + (1|Stims) + (Prime|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M02,s1.M10,s1.M11)
# Use M11
s1.M11m <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (tool+race|Subj) + (1|Stims), data=s1.TOT[rndID, ])
#,control=lmerControl(optimizer="Nelder_Mead"))
(s1.M11m.sum <- summary(s1.M11m))
interaction.plot(TrialTime, Subj, s1.M11m, xlab="Time", ylab="log(RT)")
s1.M20 <- lmer(RT.log.c ~ TrialTime + Prime + Condition + (TrialTime|Subj) + (Prime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M21 <- lmer(RT.log.c ~ TrialTime + Prime * Condition + (TrialTime|Subj) + (Prime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M11,s1.M20,s1.M21)
str(rr1 <- ranef(s1.M11m))
dotplot(rr1,scales = list(x = list(relation = 'free')))[["Subj"]]
which(rr1$Subj==min(rr1$Subj[[1]]))
if(FALSE) ) ## default
plot(resid(s1.M11m, scaled=T))
pr.M1 <- profile(s1.M1, which="beta_")
xyplot(pr.M1, absVal=TRUE)
fitted <- s1.M1@frame
dotplot(ranef(s1.M1, condVar = TRUE))
print(confint(pr.M1))
xyplot(pr.M1)
densityplot(pr.M1)
splom(pr.M1)
min(
fitted <- s1.M1@frame
dotplot(ranef(s1.M1, condVar = TRUE))
print(confint(pr.M1))
xyplot(pr.M1)
densityplot(pr.M1)
splom(pr.M1)
s1.M2 = lmer(RT.log ~ TrialNum + race + WIT.cond.f + (race|Subj) + (race|PrimeTarget), data=s1.WIT.tools.correct[rndID, ], REML=FALSE)
summary(s1.M2)
pr.M2 <- profile(s1.M2, optimizer="Nelder_Mead", which="beta_")
xyplot(pr.M2)
densityplot(pr.M2)
splom(pr.M2)
print(confint(pr.M2))
| /analyses/WIT_individualdifferences_fred.R | no_license | eplebel/intra-individual-WIT-MS | R | false | false | 6,124 | r | require(lme4)
require(plyr)
require(reshape2)
require(lmerTest)
require(languageR)
require(lattice)
# READ DATA FROM GITHUB ---------------------------------------------------
library(RCurl)
# Copy RAW url from GitHub repository
s1.WIT <- read.csv(textConnection(getURL("https://raw.githubusercontent.com/eplebel/intra-individual-MS/master/data/wit_agg_detailed_s1.csv")))
s2.WIT <- read.csv(textConnection(getURL("https://raw.githubusercontent.com/eplebel/intra-individual-MS/master/data/wit_agg_detailed_s2.csv")))
# Get the experimental trials, order them by Subject and Timestamp
s1.TOT <- subset(s1.WIT,subset = s1.WIT$Block==5)
s1.TOT <- s1.TOT[ order(s1.TOT['Subj'],s1.TOT['Started.']), ]
# Log transform RT and make False responses NA
s1.TOT$RT.log <- log(s1.TOT$RT)
s1.TOT$RT.log.c <- s1.TOT$RT.log
s1.TOT$RT.log.c[s1.TOT$Correct=="False"] <- NA
# Create and re-level some factors
# Tool and Race
s1.TOT$tool <- relevel(s1.TOT$tool, ref="tool")
s1.TOT$race <- relevel(s1.TOT$race, ref="white")
# Factor Subj
s1.TOT$Subj <- factor(s1.TOT$Subj)
# Factor Condition (Between subjects)
s1.TOT$Condition <- factor(s1.TOT$File,labels=c("Avoid Race","Control","Use Race"))
s1.TOT$Condition <- relevel(s1.TOT$Condition, ref="Control")
# Factor assigning unique number to Prime:Target combinations
s1.TOT$PrimeTarget <- with(s1.TOT, interaction(Stim.2,Stim.3,drop=T))
levels(s1.TOT$PrimeTarget) <- gsub(".bmp","",levels(s1.TOT$PrimeTarget))
# Factor for 0-based 'time' vector: Trialnumber
s1.TOT$TrialNum <- unlist(llply(unique(s1.TOT$Subj),function(s) return(0:(length(which(s==s1.TOT$Subj))-1))))
s1.TOT$TrialTime <- s1.TOT$TrialNum/max(s1.TOT$TrialNum)
# Factor for unique tool:race combinations
s1.TOT$Prime <- with(s1.TOT, interaction(race,tool,drop=TRUE))
# CHECK NESTED STRUCTURE --------------------------------------------------
# Check nesting of Prime:Target within Subjects...
with(s1.TOT, isNested(PrimeTarget,Subj))
# Prime:Target combinations are NOT nested within Subjects
xtabs(~Subj+PrimeTarget,s1.TOT,drop=T,sparse=T)
with(s1.TOT, isNested(Prime,Subj)) # Not nested, 25 duplicates for each Subject
xtabs(~tool+race,s1.TOT,drop=T,sparse=T)
with(s1.TOT, isNested(PrimeTarget,Prime)) # Prime:Target is nested within Prime
xtabs(~PrimeTarget+Prime,s1.TOT,drop=T,sparse=T)
# This will be the random effect for different stimulus combinations
s1.TOT$Stims <- with(s1.TOT, interaction(Prime,PrimeTarget,drop=TRUE))
# PLOT SLOPES -------------------------------------------------------------
xyplot(scale(RT.log.c,scale=F) ~ TrialTime | Subj, data=s1.TOT[which(s1.TOT$Subj%in%c(107,100,sample(unique(s1.TOT$Subj),14))), ], layout=c(4,4), groups=Prime, xlab = "Time (a.u.)", ylab = "Grand mean centered log(RT) of correct trials", main="WIT: 16 Random Participants\nRandom Slope Model?",
prepanel = function(x, y, groups) prepanel.lmline(x, y),
panel=function(x,y,groups,subscripts){
panel.xyplot(x,y,groups=groups,subscripts = subscripts)
#panel.superpose(x,y,panel.groups=panel.loess, groups=groups, subscripts = subscripts, lwd=2,alpha=.6)
panel.superpose(x,y,panel.groups=panel.lmline, groups=groups, subscripts = subscripts, lwd=3)},
ylim=c(-1.5,1.5),as.table=T,auto.key = list(points = FALSE, lines = TRUE, columns = 2))
# FULL MODEL --------------------------------------------------------------
#rndID <- which(s1.TOT$Subj%in%sample(unique(s1.TOT$Subj),50)) # Random sample to save time
rndID <- 1:nrow(s1.TOT) # Total sample
# s1.M01 is the "empty" model to compare against when using the Multilevel model for change
# PrimeTarget combinations were administered randomly between Subjects and TrialTime indicates the passage of time
s1.M00 <- lmer(RT.log.c ~ 1 + (1|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M01 <- lmer(RT.log.c ~ 1 + TrialTime + (1|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M02 <- lmer(RT.log.c ~ 1 + TrialTime + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M03 <- lmer(RT.log.c ~ 1 + TrialTime + (TrialTime|Subj) + (TrialTime|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M00,s1.M01,s1.M02,s1.M03)
# Use M02, refit with REML
s1.M02m <- lmer(RT.log.c ~ TrialTime + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ])
(s1.M02m.sum <- summary(s1.M02m))
s1.M10 <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M11 <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (tool+race|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
#s1.M12 <- lmer(RT.log.c ~ TrialTime * Prime + (TrialTime|Subj) + (Prime|Subj) + (1|Stims) + (Prime|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M02,s1.M10,s1.M11)
# Use M11
s1.M11m <- lmer(RT.log.c ~ TrialTime * tool * race + (TrialTime|Subj) + (tool+race|Subj) + (1|Stims), data=s1.TOT[rndID, ])
#,control=lmerControl(optimizer="Nelder_Mead"))
(s1.M11m.sum <- summary(s1.M11m))
interaction.plot(TrialTime, Subj, s1.M11m, xlab="Time", ylab="log(RT)")
s1.M20 <- lmer(RT.log.c ~ TrialTime + Prime + Condition + (TrialTime|Subj) + (Prime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
s1.M21 <- lmer(RT.log.c ~ TrialTime + Prime * Condition + (TrialTime|Subj) + (Prime|Subj) + (1|Stims), data=s1.TOT[rndID, ], REML=F)
anova(s1.M11,s1.M20,s1.M21)
str(rr1 <- ranef(s1.M11m))
dotplot(rr1,scales = list(x = list(relation = 'free')))[["Subj"]]
which(rr1$Subj==min(rr1$Subj[[1]]))
if(FALSE) ) ## default
plot(resid(s1.M11m, scaled=T))
pr.M1 <- profile(s1.M1, which="beta_")
xyplot(pr.M1, absVal=TRUE)
fitted <- s1.M1@frame
dotplot(ranef(s1.M1, condVar = TRUE))
print(confint(pr.M1))
xyplot(pr.M1)
densityplot(pr.M1)
splom(pr.M1)
min(
fitted <- s1.M1@frame
dotplot(ranef(s1.M1, condVar = TRUE))
print(confint(pr.M1))
xyplot(pr.M1)
densityplot(pr.M1)
splom(pr.M1)
s1.M2 = lmer(RT.log ~ TrialNum + race + WIT.cond.f + (race|Subj) + (race|PrimeTarget), data=s1.WIT.tools.correct[rndID, ], REML=FALSE)
summary(s1.M2)
pr.M2 <- profile(s1.M2, optimizer="Nelder_Mead", which="beta_")
xyplot(pr.M2)
densityplot(pr.M2)
splom(pr.M2)
print(confint(pr.M2))
|
## This program reads the frequencies files produced by the do_lda_analysis
## extracts top topics and calculates their corresponding F for X=5, X=20 and X=50
## forms an input file to plot those values
##
##
##
## Author. Jorge Lopez May 2016
library(plyr) ## to use arrange f
readFile <- function(fName) {
###########################
dat <- read.delim(file = readFromfileName, header = T, fill=T, sep = "\t",
row.names=NULL)
colnames(dat)<-c(colnames(dat)[-1],"x")
dat$x<-NULL
dat <- arrange(dat, strtoi(topicCount)) ##sorting the vector to prevent df not sorted well
return(dat)
}
calcGroups <- function(dat, X) {
###########################
## X must be <= nrow(dat)
subCountVec <- c()
j <- 0 ## initialize j index of subCountVec
i <- 1
#$# cat("*** X is ", X, "\n ")
if(X > nrow(dat)) { return(0) } ##nothing to do here
while (strtoi(dat$topicCount[i]) < X) {
i <- i + 1;
#$#cat("***dat$topicCount[", i, "]=", dat$topicCount[i], "\n")
#$#cat("***X=", X, "\n")
#$#cat("***i=", i, "\n")
} ## find element >= X
for (i in i:nrow(dat)) {
#$#cat("i =", i, "\n")
if (!is.na(dat$topicCount[i+1])) {
if (dat$topicCount[i] == dat$topicCount[i+1]) {
#$#cat ("topic.Count[", i, "] and topic.Count[", i+1, "] are the same\n")
j<-j+1
k<-dat$topicCount[i]
subCountVec[j]<-dat$topic.frequency[i]
subCountVec[j+1]<-dat$topic.frequency[i+1]
} else {
k<-dat$topicCount[i]
#$#cat("**k is:", k, "\n")
j <- j+1
#$#cat("j is", j, "\n")
subCountVec[j]<-dat$topic.frequency[i]
#$#cat("Change of TopicCount to ", dat$topicCount[i], "\n")
j<-0
#$#cat("subCountVec Sorted is:\n")
subCountVec <- sort(subCountVec, decreasing=T)
#$#cat(subCountVec)
## calculate F
F <- sum(subCountVec[1:X])/sum(subCountVec)
#$#cat(">>>>>>>>>>>>>>>>>>> K, F is", k, F, "\n")
cat(k, F, "\n", sep="\t", append = T, file = outFileFN) # to file
## readline(prompt="press any key to continue ")
} ## else
} else {
#$#cat ("EOF reached \n")
k<-dat$topicCount[i]
#$#cat("subCountVec Sorted is:\n")
subCountVec <- sort(subCountVec, decreasing=T)
#$#cat(subCountVec)
## calculate F
F <- sum(subCountVec[1:X])/sum(subCountVec)
cat(k, F, "\n", sep="\t", append = T, file = outFileFN) # to file
#$#cat("**k:", k, " and F:", F, "\n")
}
} ##for
return(1)
} ## function
################################################ main #############################################
listf <- list.files(pattern = "\\.topic_frequency$")
for (i in 1:length(listf)){
outFileFN <- ""
readFromfileName <- ""
readFromfileName <- listf[i]
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-05-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-05-", readFromfileName, sep="")
cat ("READ FILE =", readFromfileName, "\n")
cat ("OUT FILE 05= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
dat <- readFile(readFromfileName)
pg <- calcGroups(dat, 5)
outFileFN <- ""
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-20-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-20-", readFromfileName, sep="")
cat ("OUT FILE 20= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
pg <- calcGroups(dat, 20)
outFileFN <- ""
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-50-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-50-", readFromfileName, sep="")
cat ("OUT FILE 50= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
pg <- calcGroups(dat, 50)
}
| /tools/data_preparation/CalcFK_toPlot_Auto.R | no_license | RUMCS/Thesis | R | false | false | 4,623 | r | ## This program reads the frequencies files produced by the do_lda_analysis
## extracts top topics and calculates their corresponding F for X=5, X=20 and X=50
## forms an input file to plot those values
##
##
##
## Author. Jorge Lopez May 2016
library(plyr) ## to use arrange f
readFile <- function(fName) {
###########################
dat <- read.delim(file = readFromfileName, header = T, fill=T, sep = "\t",
row.names=NULL)
colnames(dat)<-c(colnames(dat)[-1],"x")
dat$x<-NULL
dat <- arrange(dat, strtoi(topicCount)) ##sorting the vector to prevent df not sorted well
return(dat)
}
calcGroups <- function(dat, X) {
###########################
## X must be <= nrow(dat)
subCountVec <- c()
j <- 0 ## initialize j index of subCountVec
i <- 1
#$# cat("*** X is ", X, "\n ")
if(X > nrow(dat)) { return(0) } ##nothing to do here
while (strtoi(dat$topicCount[i]) < X) {
i <- i + 1;
#$#cat("***dat$topicCount[", i, "]=", dat$topicCount[i], "\n")
#$#cat("***X=", X, "\n")
#$#cat("***i=", i, "\n")
} ## find element >= X
for (i in i:nrow(dat)) {
#$#cat("i =", i, "\n")
if (!is.na(dat$topicCount[i+1])) {
if (dat$topicCount[i] == dat$topicCount[i+1]) {
#$#cat ("topic.Count[", i, "] and topic.Count[", i+1, "] are the same\n")
j<-j+1
k<-dat$topicCount[i]
subCountVec[j]<-dat$topic.frequency[i]
subCountVec[j+1]<-dat$topic.frequency[i+1]
} else {
k<-dat$topicCount[i]
#$#cat("**k is:", k, "\n")
j <- j+1
#$#cat("j is", j, "\n")
subCountVec[j]<-dat$topic.frequency[i]
#$#cat("Change of TopicCount to ", dat$topicCount[i], "\n")
j<-0
#$#cat("subCountVec Sorted is:\n")
subCountVec <- sort(subCountVec, decreasing=T)
#$#cat(subCountVec)
## calculate F
F <- sum(subCountVec[1:X])/sum(subCountVec)
#$#cat(">>>>>>>>>>>>>>>>>>> K, F is", k, F, "\n")
cat(k, F, "\n", sep="\t", append = T, file = outFileFN) # to file
## readline(prompt="press any key to continue ")
} ## else
} else {
#$#cat ("EOF reached \n")
k<-dat$topicCount[i]
#$#cat("subCountVec Sorted is:\n")
subCountVec <- sort(subCountVec, decreasing=T)
#$#cat(subCountVec)
## calculate F
F <- sum(subCountVec[1:X])/sum(subCountVec)
cat(k, F, "\n", sep="\t", append = T, file = outFileFN) # to file
#$#cat("**k:", k, " and F:", F, "\n")
}
} ##for
return(1)
} ## function
################################################ main #############################################
listf <- list.files(pattern = "\\.topic_frequency$")
for (i in 1:length(listf)){
outFileFN <- ""
readFromfileName <- ""
readFromfileName <- listf[i]
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-05-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-05-", readFromfileName, sep="")
cat ("READ FILE =", readFromfileName, "\n")
cat ("OUT FILE 05= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
dat <- readFile(readFromfileName)
pg <- calcGroups(dat, 5)
outFileFN <- ""
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-20-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-20-", readFromfileName, sep="")
cat ("OUT FILE 20= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
pg <- calcGroups(dat, 20)
outFileFN <- ""
outFileFN <- paste("Z:\\Thesis\\plotFreqs\\dba-m\\plots-dba-m\\FileFN-50-", readFromfileName, sep="")
## UX outFileFN <- paste("~/thesis/plotFreqs/dba-m/plots-dba-m/FileFN-50-", readFromfileName, sep="")
cat ("OUT FILE 50= ", outFileFN, "\n")
if (file.exists(outFileFN)) file.remove(outFileFN) ## remove the output file if already exists
cat("K", "F", "\n", sep="\t", append = T, file = outFileFN) # to file
pg <- calcGroups(dat, 50)
}
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(1.1988711311556e-153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, -3.2670875268094e+126, 3.46013224973186e-111, NA, -Inf, NaN, -2.68464342817217e+45, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, 0), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 2497.54084888141, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232, 2732080554.93861, -1.31213880308799e-29, -4.14807475583356e-246, -5.38280100691954e-169, -4.51575581905204e+118, -3.18836769669394e+228), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) | /meteor/inst/testfiles/E_Penman/AFL_E_Penman/E_Penman_valgrind_files/1615918582-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,229 | r | testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = c(1.1988711311556e-153, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(NA, -3.2670875268094e+126, 3.46013224973186e-111, NA, -Inf, NaN, -2.68464342817217e+45, 4.44398299790022e+96, 4.98717333739482e-156, 4.22817184106748e-307, -4.59220199702648e-303, 0), temp = c(1.06099789548264e-311, 1.60455557047237e+82, -4.51367941637774e-141, -56857994149.4251, 2497.54084888141, 4.94594336083901e-277, 6.98556032546697e-100, -1.15874942296725e-140, 1.66802644272667e-153, 2.197831277967e+109, 2.39828050885494e-124, -4.39547783740517e+225, 2.23714316185293e+183, 2.72877695990519e+48, -2.99453656397724e+232, 2732080554.93861, -1.31213880308799e-29, -4.14807475583356e-246, -5.38280100691954e-169, -4.51575581905204e+118, -3.18836769669394e+228), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) |
# Documentation for data
#' @title Country Dictionary for \code{ClimActor}
#' @description A list of commonly found country names and their standardized equivalent
#' for use in the cleaning of non-state actor names. Other information on these
#' countries provided include ISO2 and ISO3, land area, population, and region.
#' @format country_dict is a data frame with 456 commonly country names (rows) and 9 variables
#' (columns)
#' @format \describe{
#' \item{\code{wrong}}{char Commonly found country names across different datasets.
#' One row of each country consists of the standardized version of the name}
#' \item{\code{right}}{char Standardized version of the country name}
#' \item{\code{iso}}{char 3 letter ISO codes for the country}
#' \item{\code{region}}{char}
#' \item{\code{Landarea}}{double Land area of the country}
#' \item{\code{iso2}}{char 2 letter ISO codes for the country}
#' \item{\code{Population}}{double Population for the country}
#' }
"country_dict"
#' @title Key Dictionary for non-state actors
#' @description The key dictionary contains the commonly found names for non-state climate
#' actors found across different climate databases for use in the cleaning
#' of actor names. The dictionary also includes the different phonetic codes
#' to be used in the phonetic string matching.
#' @format key_dict is a dataframe with 29215 commonly found climate actor names (rows) and
#' 22 variables (columns)
#' @format \describe{
#' \item{\code{right}}{char Commonly found climate actor names across different datasets.
#' One row of each actor consists of the standardized version of the name}
#' \item{\code{wrong}}{char Standardized version of the actor name}
#' \item{\code{iso}}{char 3 letter ISO codes for the country}
#' \item{\code{entity_type}}{char The entity type of the actor (City, Business, etc.)}
#' \item{\code{allcaps}}{char The all capital version of the standardized name}
#' \item{\code{caverphone - statcan}}{char Different phonetic codes of the actor names
#' based on different phonetic algorithms}
#' }
"key_dict"
#' @title Contextuals Database for subnational actors
#' @description The contextuals database contains important contextual data for the
#' subnational climate actors found across the different climate databases. See below for
#' details on what data is included
#' @format contextuals is a dataframe with contextuals information for 10462 unique actors
#' (rows) and 15 variables (columns)
#' @format \describe{
#' \item{\code{name}}{char Name of the subnational actor}
#' \item{\code{iso}}{char 3 letter ISO codes for the country of actor}
#' \item{\code{country}}{char Country in which actor resides in}
#' \item{\code{entity_type}}{char The entity type of the actor (City, Region, etc.)}
#' \item{\code{region}}{char The broader region which the country of the
#' subnational actor belongs to}
#' \item{\code{area}}{double Total unit area of the actor}
#' \item{\code{area_units}}{char Units which the area of the actor are expressed
#' in}
#' \item{\code{initiatives_committed}}{char Climate initiatives to which the actor pledged
#' commitments to}
#' \item{\code{num_commit}}{int Number of initiatives to which actor pledged commitments
#' to}
#' \item{\code{lat}}{double Latitude of the actor}
#' \item{\code{lng}}{double Longitude of the actor}
#' \item{\code{population}}{double Total population of the actor}
#' \item{\code{population_year}}{int Year of recorded population}
#' \item{\code{state}}{char State in which the actor is situated in}
#' }
"contextuals"
| /R/data.R | no_license | elenabagnera/ClimActor | R | false | false | 3,692 | r | # Documentation for data
#' @title Country Dictionary for \code{ClimActor}
#' @description A list of commonly found country names and their standardized equivalent
#' for use in the cleaning of non-state actor names. Other information on these
#' countries provided include ISO2 and ISO3, land area, population, and region.
#' @format country_dict is a data frame with 456 commonly country names (rows) and 9 variables
#' (columns)
#' @format \describe{
#' \item{\code{wrong}}{char Commonly found country names across different datasets.
#' One row of each country consists of the standardized version of the name}
#' \item{\code{right}}{char Standardized version of the country name}
#' \item{\code{iso}}{char 3 letter ISO codes for the country}
#' \item{\code{region}}{char}
#' \item{\code{Landarea}}{double Land area of the country}
#' \item{\code{iso2}}{char 2 letter ISO codes for the country}
#' \item{\code{Population}}{double Population for the country}
#' }
"country_dict"
#' @title Key Dictionary for non-state actors
#' @description The key dictionary contains the commonly found names for non-state climate
#' actors found across different climate databases for use in the cleaning
#' of actor names. The dictionary also includes the different phonetic codes
#' to be used in the phonetic string matching.
#' @format key_dict is a dataframe with 29215 commonly found climate actor names (rows) and
#' 22 variables (columns)
#' @format \describe{
#' \item{\code{right}}{char Commonly found climate actor names across different datasets.
#' One row of each actor consists of the standardized version of the name}
#' \item{\code{wrong}}{char Standardized version of the actor name}
#' \item{\code{iso}}{char 3 letter ISO codes for the country}
#' \item{\code{entity_type}}{char The entity type of the actor (City, Business, etc.)}
#' \item{\code{allcaps}}{char The all capital version of the standardized name}
#' \item{\code{caverphone - statcan}}{char Different phonetic codes of the actor names
#' based on different phonetic algorithms}
#' }
"key_dict"
#' @title Contextuals Database for subnational actors
#' @description The contextuals database contains important contextual data for the
#' subnational climate actors found across the different climate databases. See below for
#' details on what data is included
#' @format contextuals is a dataframe with contextuals information for 10462 unique actors
#' (rows) and 15 variables (columns)
#' @format \describe{
#' \item{\code{name}}{char Name of the subnational actor}
#' \item{\code{iso}}{char 3 letter ISO codes for the country of actor}
#' \item{\code{country}}{char Country in which actor resides in}
#' \item{\code{entity_type}}{char The entity type of the actor (City, Region, etc.)}
#' \item{\code{region}}{char The broader region which the country of the
#' subnational actor belongs to}
#' \item{\code{area}}{double Total unit area of the actor}
#' \item{\code{area_units}}{char Units which the area of the actor are expressed
#' in}
#' \item{\code{initiatives_committed}}{char Climate initiatives to which the actor pledged
#' commitments to}
#' \item{\code{num_commit}}{int Number of initiatives to which actor pledged commitments
#' to}
#' \item{\code{lat}}{double Latitude of the actor}
#' \item{\code{lng}}{double Longitude of the actor}
#' \item{\code{population}}{double Total population of the actor}
#' \item{\code{population_year}}{int Year of recorded population}
#' \item{\code{state}}{char State in which the actor is situated in}
#' }
"contextuals"
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV file
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the mean;
## either "sulfate" or "nitrate"
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
monitordata<-NULL
for (i in id) {
df<-read.csv(paste(directory,"/",sprintf("%03s",i),".csv",sep=""))
monitordata<-rbind(monitordata,df)
}
mean(monitordata[,pollutant], na.rm=T)
}
| /pollutantmean.R | no_license | oanaradulescu/ProgrammingAssignment1 | R | false | false | 776 | r |
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV file
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the mean;
## either "sulfate" or "nitrate"
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
monitordata<-NULL
for (i in id) {
df<-read.csv(paste(directory,"/",sprintf("%03s",i),".csv",sep=""))
monitordata<-rbind(monitordata,df)
}
mean(monitordata[,pollutant], na.rm=T)
}
|
# posterior.R
# draw posterior + mean + HDI
x <- seq(60,140,1)
y <- dnorm(x, 106 ,10)
q1 <- qnorm(0.025, 106,10)
q2 <- qnorm(0.975, 106,10)
y2 <- ifelse(x>q1 & x<q2,y,0)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
cols = gg_color_hue(3)
df <- data.frame(x,y,y2)
gp <- ggplot(df, aes(x=x,y=y))+geom_area(fill=cols[2])+
# geom_area(alpha=.2)+
# geom_area(aes(y=y2))+
geom_vline(xintercept = 106,lwd=1.2,lty=2)+
geom_errorbar(aes(xmin=q1,xmax=q2,y=0.006,width=0.005))+
xlab("Mittlerer IQ")+
# theme(axis.title.x=element_text(size=rel(2)))
theme(text=element_text(size=25))+ylab("")
NULL
plot(gp) | /bayessche-statistik/R/posterior.R | no_license | brandmaier/teaching-stats | R | false | false | 677 | r | # posterior.R
# draw posterior + mean + HDI
x <- seq(60,140,1)
y <- dnorm(x, 106 ,10)
q1 <- qnorm(0.025, 106,10)
q2 <- qnorm(0.975, 106,10)
y2 <- ifelse(x>q1 & x<q2,y,0)
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
cols = gg_color_hue(3)
df <- data.frame(x,y,y2)
gp <- ggplot(df, aes(x=x,y=y))+geom_area(fill=cols[2])+
# geom_area(alpha=.2)+
# geom_area(aes(y=y2))+
geom_vline(xintercept = 106,lwd=1.2,lty=2)+
geom_errorbar(aes(xmin=q1,xmax=q2,y=0.006,width=0.005))+
xlab("Mittlerer IQ")+
# theme(axis.title.x=element_text(size=rel(2)))
theme(text=element_text(size=25))+ylab("")
NULL
plot(gp) |
synthetic_nearsamples_WGS_calratio <-
function(tumor, counts,
bin.size = 100000,
rm.centromere = TRUE, centromereBins = NULL,
K = 5,
reads.threshold = 50, chrX = FALSE,
verbose = FALSE)
{
options(scipen = 50)
tumor[, 1] <- gsub("^chr", "", tumor[, 1])
tumor[, 1] <- gsub("^X$", toString(TargetAnnotations$numchrom), tumor[, 1])
tumor[, 1] <- gsub("^Y$", toString(TargetAnnotations$numchrom + 1), tumor[, 1])
if (nrow(counts) != nrow(tumor)) {
stop("Input data and \"counts\" size don't match!")
}
all.value <- NULL
for (i in 1:ncol(counts)) {
normal <- counts[, i]
tumor[tumor[, "reads"] < reads.threshold, "reads"] <-
0
normal[normal < reads.threshold] <- 0
ratio <- tumor[, "reads"] / normal
ratio <- ratio[is.finite(ratio) & ratio != 0]
ratio <- ratio / median(ratio, na.rm = T)
log.ratio <- log2(ratio[!is.na(ratio)] + 0.001)
diff.sumsquare <- abs(diff(log.ratio)) ^ 2
variance <-
mean(diff.sumsquare[diff.sumsquare < quantile(diff.sumsquare, 0.9)])
all.value <- c(all.value, variance)
}
minIs <- order(all.value)[1:K]
normal <- apply(counts[, minIs], 2, median)
normal[normal < reads.threshold] <- 0
ratio <- tumor[, "reads"] / normal
ratio <-
ratio / median(ratio[is.finite(ratio) & ratio != 0], na.rm = T)
ratio[is.infinite(ratio) | is.nan(ratio)] <- NA
ratio.res <- data.frame(tumor[, c(1:3)], ratio)
if (rm.centromere == TRUE) {
if (is.null(centromereBins)) {
if (!bin.size %in% c(10000, 25000, 50000, 100000)) {
stop(
paste0(
"SynthEx doesn't have centromere bins pre-calculated for bin size of ",
bin.size,
";\n Please use createCentromereBins() to generate the required file or use another bin size."
)
)
} else {
# data(CentromereAnnotations)
ss <-
paste0("centromere <- CentromereAnnotations$bin", bin.size)
eval(parse(text = ss))
}
} else {
centromere <- read.delim(centromereBins, header = F, stringsAsFactors = F)
}
centromere.IDs <- paste0(centromere[, 1], ":", centromere[, 2])
ratio.IDs <-
paste0(ratio.res[, "chr"], ":", ratio.res[, "start"])
ratio.res <- ratio.res[!ratio.IDs %in% centromere.IDs,]
}
ratio.bed <- ratio.res
ratio.bed[,2] <- ratio.bed[, 2] -1
if (!is.null(prefix)) {
write.table(ratio.bed, file.path (result.dir, paste0(prefix, "_Ratio.bed")),
sep = "\t", quote = FALSE, col.names = TRUE, row.names = FALSE
)
} else {
write.table(ratio.bed, file.path (result.dir, "Ratio.bed"),
sep = "\t", quote = FALSE, col.names = TRUE, row.names = FALSE
)
}
if (chrX == FALSE) {
ratio.res <-
ratio.res[ratio.res[, "chr"] != TargetAnnotations$numchrom &
ratio.res[, "chr"] != (TargetAnnotations$numchrom + 1),]
}
res <- list(ratio.res, TRUE)
names(res) <- c("Ratio", "WGS")
class(res) <- "WGSRatio"
return(res)
}
| /R/synthetic_nearsamples_WGS_calratio.R | no_license | thesushantpatil/SynthEx | R | false | false | 3,193 | r | synthetic_nearsamples_WGS_calratio <-
function(tumor, counts,
bin.size = 100000,
rm.centromere = TRUE, centromereBins = NULL,
K = 5,
reads.threshold = 50, chrX = FALSE,
verbose = FALSE)
{
options(scipen = 50)
tumor[, 1] <- gsub("^chr", "", tumor[, 1])
tumor[, 1] <- gsub("^X$", toString(TargetAnnotations$numchrom), tumor[, 1])
tumor[, 1] <- gsub("^Y$", toString(TargetAnnotations$numchrom + 1), tumor[, 1])
if (nrow(counts) != nrow(tumor)) {
stop("Input data and \"counts\" size don't match!")
}
all.value <- NULL
for (i in 1:ncol(counts)) {
normal <- counts[, i]
tumor[tumor[, "reads"] < reads.threshold, "reads"] <-
0
normal[normal < reads.threshold] <- 0
ratio <- tumor[, "reads"] / normal
ratio <- ratio[is.finite(ratio) & ratio != 0]
ratio <- ratio / median(ratio, na.rm = T)
log.ratio <- log2(ratio[!is.na(ratio)] + 0.001)
diff.sumsquare <- abs(diff(log.ratio)) ^ 2
variance <-
mean(diff.sumsquare[diff.sumsquare < quantile(diff.sumsquare, 0.9)])
all.value <- c(all.value, variance)
}
minIs <- order(all.value)[1:K]
normal <- apply(counts[, minIs], 2, median)
normal[normal < reads.threshold] <- 0
ratio <- tumor[, "reads"] / normal
ratio <-
ratio / median(ratio[is.finite(ratio) & ratio != 0], na.rm = T)
ratio[is.infinite(ratio) | is.nan(ratio)] <- NA
ratio.res <- data.frame(tumor[, c(1:3)], ratio)
if (rm.centromere == TRUE) {
if (is.null(centromereBins)) {
if (!bin.size %in% c(10000, 25000, 50000, 100000)) {
stop(
paste0(
"SynthEx doesn't have centromere bins pre-calculated for bin size of ",
bin.size,
";\n Please use createCentromereBins() to generate the required file or use another bin size."
)
)
} else {
# data(CentromereAnnotations)
ss <-
paste0("centromere <- CentromereAnnotations$bin", bin.size)
eval(parse(text = ss))
}
} else {
centromere <- read.delim(centromereBins, header = F, stringsAsFactors = F)
}
centromere.IDs <- paste0(centromere[, 1], ":", centromere[, 2])
ratio.IDs <-
paste0(ratio.res[, "chr"], ":", ratio.res[, "start"])
ratio.res <- ratio.res[!ratio.IDs %in% centromere.IDs,]
}
ratio.bed <- ratio.res
ratio.bed[,2] <- ratio.bed[, 2] -1
if (!is.null(prefix)) {
write.table(ratio.bed, file.path (result.dir, paste0(prefix, "_Ratio.bed")),
sep = "\t", quote = FALSE, col.names = TRUE, row.names = FALSE
)
} else {
write.table(ratio.bed, file.path (result.dir, "Ratio.bed"),
sep = "\t", quote = FALSE, col.names = TRUE, row.names = FALSE
)
}
if (chrX == FALSE) {
ratio.res <-
ratio.res[ratio.res[, "chr"] != TargetAnnotations$numchrom &
ratio.res[, "chr"] != (TargetAnnotations$numchrom + 1),]
}
res <- list(ratio.res, TRUE)
names(res) <- c("Ratio", "WGS")
class(res) <- "WGSRatio"
return(res)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_list_receipt_rule_sets}
\alias{ses_list_receipt_rule_sets}
\title{Lists the receipt rule sets that exist under your AWS account in the
current AWS Region}
\usage{
ses_list_receipt_rule_sets(NextToken)
}
\arguments{
\item{NextToken}{A token returned from a previous call to \code{ListReceiptRuleSets} to
indicate the position in the receipt rule set list.}
}
\description{
Lists the receipt rule sets that exist under your AWS account in the
current AWS Region. If there are additional receipt rule sets to be
retrieved, you will receive a \code{NextToken} that you can provide to the
next call to \code{ListReceiptRuleSets} to retrieve the additional entries.
}
\details{
For information about managing receipt rule sets, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html}{Amazon SES Developer Guide}.
You can execute this operation no more than once per second.
}
\section{Request syntax}{
\preformatted{svc$list_receipt_rule_sets(
NextToken = "string"
)
}
}
\examples{
\dontrun{
# The following example lists the receipt rule sets that exist under an
# AWS account:
svc$list_receipt_rule_sets(
NextToken = ""
)
}
}
\keyword{internal}
| /cran/paws.customer.engagement/man/ses_list_receipt_rule_sets.Rd | permissive | johnnytommy/paws | R | false | true | 1,314 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ses_operations.R
\name{ses_list_receipt_rule_sets}
\alias{ses_list_receipt_rule_sets}
\title{Lists the receipt rule sets that exist under your AWS account in the
current AWS Region}
\usage{
ses_list_receipt_rule_sets(NextToken)
}
\arguments{
\item{NextToken}{A token returned from a previous call to \code{ListReceiptRuleSets} to
indicate the position in the receipt rule set list.}
}
\description{
Lists the receipt rule sets that exist under your AWS account in the
current AWS Region. If there are additional receipt rule sets to be
retrieved, you will receive a \code{NextToken} that you can provide to the
next call to \code{ListReceiptRuleSets} to retrieve the additional entries.
}
\details{
For information about managing receipt rule sets, see the \href{https://docs.aws.amazon.com/ses/latest/DeveloperGuide/receiving-email-managing-receipt-rule-sets.html}{Amazon SES Developer Guide}.
You can execute this operation no more than once per second.
}
\section{Request syntax}{
\preformatted{svc$list_receipt_rule_sets(
NextToken = "string"
)
}
}
\examples{
\dontrun{
# The following example lists the receipt rule sets that exist under an
# AWS account:
svc$list_receipt_rule_sets(
NextToken = ""
)
}
}
\keyword{internal}
|
rm(list=ls())
library(ggplot2)
library(sva)
library(data.table)
library(limma)
c25 <- c("dodgerblue2","#E31A1C", # red
"green4",
"#6A3D9A", # purple
"#FF7F00", # orange
"black","gold1",
"skyblue2","#FB9A99", # lt pink
"palegreen2",
"#CAB2D6", # lt purple
"#FDBF6F", # lt orange
"gray70", "khaki2",
"maroon","orchid1","deeppink1","blue1","steelblue4",
"darkturquoise","green1","yellow4","yellow3",
"darkorange4","brown")
data=data.frame(read.table('../age.atac.counts.txt.filtered',header=TRUE,sep='\t'))
rownames(data)=paste(data$Chrom,data$Start,data$End,sep="_")
data$Chrom=NULL
data$Start=NULL
data$End=NULL
batches=data.frame(read.table("../batches.atac.txt.filtered",header=TRUE,sep='\t'))
rownames(batches)=batches$Replicate
batches$Day=factor(batches$Day)
batches$Age=factor(batches$Age)
batches$Sample=factor(batches$Sample)
batches$Batch=factor(batches$Batch)
mod0=model.matrix(~1,data=batches)
mod1=model.matrix(~Day+Age,data=batches)
v=voom(counts=data,design=mod1,normalize.method = "quantile")
sva.obj=sva(v$E,mod1,mod0,method="irw")
sur_var=data.frame(sva.obj$sv)
summary(lm(sva.obj$sv ~ batches$Age+batches$Day))
full.design.sv=cbind(mod1,sur_var)
v=voom(counts=data,design=full.design.sv)
fit <- lmFit(v)
sv_contribs=coefficients(fit)[,7:16] %*% t(fit$design[,7:16])
filtered=v$E-sv_contribs
write.table(filtered,"filtered_cpm_peaks.txt",quote=FALSE,sep='\t')
spearman_cor=cor(filtered,method="spearman")
write.csv(spearman_cor,"atac.spearman_cor.csv")
heatmap(spearman_cor,Rowv=NA,Colv=NA)
pearson_cor=cor(filtered,method="pearson")
heatmap(pearson_cor,Rowv=NA,Colv=NA)
write.csv(pearson_cor,"atac.pearson_cor.csv")
data.pca=prcomp(t(filtered),scale=FALSE,center=FALSE)
var_explained=as.character(round(100*data.pca$sdev^2/sum(data.pca$sdev^2),2))
png('atac.varexplained.png',height=10,width=10,units='in',res=300)
barplot(100*data.pca$sdev^2/sum(data.pca$sdev^2),las=2,ylab="% Variance Explained",xlab="Principal Component",ylim=c(0,100))
text(1:12,100*data.pca$sdev^2/sum(data.pca$sdev^2),labels=var_explained,pos=3,cex=1)
dev.off()
day_labels=batches[rownames(data.pca$x),]$Day
age_labels=batches[rownames(data.pca$x),]$Age
#p1 vs p2
png(filename = "age.atac.pca.1vs2.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(1,2)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(1,2)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC1 vs PC2")
dev.off()
png(filename = "age.atac.pca.2vs3.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(2,3)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(2,3)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC2 vs PC3")
dev.off()
png(filename = "age.atac.pca.1vs3.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(1,3)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(1,3)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC1 vs PC3")
dev.off()
#GET DIFFERENTIAL GENES ACROSS OLD VS YOUNG FOR SAME TIMEPOINT & BETWEEN SAME AGE FOR ADJACENT TIMEPOINTS
mod2=model.matrix(~0+Sample,data=batches)
full.design.sv=cbind(mod2,sur_var)
v=voom(counts=data,design=full.design.sv,normalize="quantile",plot=T)
fit <- lmFit(v)
cont.matrix=makeContrasts(d0_Y_vs_d0_O="Sampled0_Y - Sampled0_Old",
d1_Y_vs_d1_O="Sampled1_Y - Sampled1_Old",
d3_Y_vs_d3_O="Sampled3_Y - Sampled3_Old",
d5_Y_vs_d5_O="Sampled5_Y - Sampled5_Old",
d7_Y_vs_d7_O="Sampled7_Y - Sampled7_Old",
d1_Y_vs_d0_Y="Sampled1_Y - Sampled0_Y",
d3_Y_vs_d1_Y="Sampled3_Y - Sampled1_Y",
d5_Y_vs_d3_Y="Sampled5_Y - Sampled3_Y",
d7_Y_vs_d5_Y="Sampled7_Y - Sampled5_Y",
d1_O_vs_d0_O="Sampled1_Old - Sampled0_Old",
d3_O_vs_d1_O="Sampled3_Old - Sampled1_Old",
d5_O_vs_d3_O="Sampled5_Old - Sampled3_Old",
d7_O_vs_d5_O="Sampled7_Old - Sampled5_Old",
levels=full.design.sv)
fit2=contrasts.fit(fit,cont.matrix)
e=eBayes(fit2)
comparisons=colnames(cont.matrix)
for(i in seq(1,length(comparisons)))
{
tab<-topTable(e, number=nrow(e),coef=i,lfc=1,p.value = 0.01)
names(tab)[1]=comparisons[i]
tab$Gene=rownames(tab)
write.table(tab,file=paste("differential_peaks_",comparisons[i],".tsv",sep=""),quote=TRUE,sep='\t',row.names = FALSE)
png(paste("volcano_peaks",comparisons[i],".png",sep=""))
volcanoplot(e,coef=i,highlight =0,names=rownames(tab),main=comparisons[i])
dev.off()
}
| /age/pca/pca_atac.R | no_license | annashcherbina/nobel_lab_projects | R | false | false | 4,696 | r | rm(list=ls())
library(ggplot2)
library(sva)
library(data.table)
library(limma)
c25 <- c("dodgerblue2","#E31A1C", # red
"green4",
"#6A3D9A", # purple
"#FF7F00", # orange
"black","gold1",
"skyblue2","#FB9A99", # lt pink
"palegreen2",
"#CAB2D6", # lt purple
"#FDBF6F", # lt orange
"gray70", "khaki2",
"maroon","orchid1","deeppink1","blue1","steelblue4",
"darkturquoise","green1","yellow4","yellow3",
"darkorange4","brown")
data=data.frame(read.table('../age.atac.counts.txt.filtered',header=TRUE,sep='\t'))
rownames(data)=paste(data$Chrom,data$Start,data$End,sep="_")
data$Chrom=NULL
data$Start=NULL
data$End=NULL
batches=data.frame(read.table("../batches.atac.txt.filtered",header=TRUE,sep='\t'))
rownames(batches)=batches$Replicate
batches$Day=factor(batches$Day)
batches$Age=factor(batches$Age)
batches$Sample=factor(batches$Sample)
batches$Batch=factor(batches$Batch)
mod0=model.matrix(~1,data=batches)
mod1=model.matrix(~Day+Age,data=batches)
v=voom(counts=data,design=mod1,normalize.method = "quantile")
sva.obj=sva(v$E,mod1,mod0,method="irw")
sur_var=data.frame(sva.obj$sv)
summary(lm(sva.obj$sv ~ batches$Age+batches$Day))
full.design.sv=cbind(mod1,sur_var)
v=voom(counts=data,design=full.design.sv)
fit <- lmFit(v)
sv_contribs=coefficients(fit)[,7:16] %*% t(fit$design[,7:16])
filtered=v$E-sv_contribs
write.table(filtered,"filtered_cpm_peaks.txt",quote=FALSE,sep='\t')
spearman_cor=cor(filtered,method="spearman")
write.csv(spearman_cor,"atac.spearman_cor.csv")
heatmap(spearman_cor,Rowv=NA,Colv=NA)
pearson_cor=cor(filtered,method="pearson")
heatmap(pearson_cor,Rowv=NA,Colv=NA)
write.csv(pearson_cor,"atac.pearson_cor.csv")
data.pca=prcomp(t(filtered),scale=FALSE,center=FALSE)
var_explained=as.character(round(100*data.pca$sdev^2/sum(data.pca$sdev^2),2))
png('atac.varexplained.png',height=10,width=10,units='in',res=300)
barplot(100*data.pca$sdev^2/sum(data.pca$sdev^2),las=2,ylab="% Variance Explained",xlab="Principal Component",ylim=c(0,100))
text(1:12,100*data.pca$sdev^2/sum(data.pca$sdev^2),labels=var_explained,pos=3,cex=1)
dev.off()
day_labels=batches[rownames(data.pca$x),]$Day
age_labels=batches[rownames(data.pca$x),]$Age
#p1 vs p2
png(filename = "age.atac.pca.1vs2.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(1,2)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(1,2)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC1 vs PC2")
dev.off()
png(filename = "age.atac.pca.2vs3.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(2,3)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(2,3)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC2 vs PC3")
dev.off()
png(filename = "age.atac.pca.1vs3.png",height=10, width=10, units='in', res=300)
plot(data.pca$x[,c(1,3)],col=c25[day_labels],pch=16)
text(data.pca$x[,c(1,3)],labels=rownames(data.pca$x),pos=3, cex=0.5)
title("PC1 vs PC3")
dev.off()
#GET DIFFERENTIAL GENES ACROSS OLD VS YOUNG FOR SAME TIMEPOINT & BETWEEN SAME AGE FOR ADJACENT TIMEPOINTS
mod2=model.matrix(~0+Sample,data=batches)
full.design.sv=cbind(mod2,sur_var)
v=voom(counts=data,design=full.design.sv,normalize="quantile",plot=T)
fit <- lmFit(v)
cont.matrix=makeContrasts(d0_Y_vs_d0_O="Sampled0_Y - Sampled0_Old",
d1_Y_vs_d1_O="Sampled1_Y - Sampled1_Old",
d3_Y_vs_d3_O="Sampled3_Y - Sampled3_Old",
d5_Y_vs_d5_O="Sampled5_Y - Sampled5_Old",
d7_Y_vs_d7_O="Sampled7_Y - Sampled7_Old",
d1_Y_vs_d0_Y="Sampled1_Y - Sampled0_Y",
d3_Y_vs_d1_Y="Sampled3_Y - Sampled1_Y",
d5_Y_vs_d3_Y="Sampled5_Y - Sampled3_Y",
d7_Y_vs_d5_Y="Sampled7_Y - Sampled5_Y",
d1_O_vs_d0_O="Sampled1_Old - Sampled0_Old",
d3_O_vs_d1_O="Sampled3_Old - Sampled1_Old",
d5_O_vs_d3_O="Sampled5_Old - Sampled3_Old",
d7_O_vs_d5_O="Sampled7_Old - Sampled5_Old",
levels=full.design.sv)
fit2=contrasts.fit(fit,cont.matrix)
e=eBayes(fit2)
comparisons=colnames(cont.matrix)
for(i in seq(1,length(comparisons)))
{
tab<-topTable(e, number=nrow(e),coef=i,lfc=1,p.value = 0.01)
names(tab)[1]=comparisons[i]
tab$Gene=rownames(tab)
write.table(tab,file=paste("differential_peaks_",comparisons[i],".tsv",sep=""),quote=TRUE,sep='\t',row.names = FALSE)
png(paste("volcano_peaks",comparisons[i],".png",sep=""))
volcanoplot(e,coef=i,highlight =0,names=rownames(tab),main=comparisons[i])
dev.off()
}
|
library(readr)
library(dplyr)
library(readxl)
library(stringr)
try(dir.create("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean"))
gastos_elecciones_generales_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_elecciones_generales_2013.xlsx")
names(gastos_elecciones_generales_2013) = str_replace_all(names(gastos_elecciones_generales_2013), "[[:punct:]]", "")
names(gastos_elecciones_generales_2013) = gsub(" ","_",names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = str_to_lower(names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = iconv(names(gastos_elecciones_generales_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_elecciones_generales_2013) = gsub("\\~","",names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = gsub("\\'","",names(gastos_elecciones_generales_2013))
gastos_elecciones_generales_2013 %>%
select(-x1) %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_elecciones_generales_2013.csv")
#######
gastos_elecciones_municipales_2016 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_elecciones_municipales_2016.xlsx")
names(gastos_elecciones_municipales_2016) = str_replace_all(names(gastos_elecciones_municipales_2016), "[[:punct:]]", "")
names(gastos_elecciones_municipales_2016) = gsub(" ","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = str_to_lower(names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = iconv(names(gastos_elecciones_municipales_2016), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_elecciones_municipales_2016) = gsub("\\~","",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\\'","",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\n","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\r","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("__","_",names(gastos_elecciones_municipales_2016))
gastos_elecciones_municipales_2016 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_elecciones_municipales_2016.csv")
#######
ingresos_elecciones_generales_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_elecciones_generales_2013.xlsx")
names(ingresos_elecciones_generales_2013) = str_replace_all(names(ingresos_elecciones_generales_2013), "[[:punct:]]", "")
names(ingresos_elecciones_generales_2013) = gsub(" ","_",names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = str_to_lower(names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = iconv(names(ingresos_elecciones_generales_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_elecciones_generales_2013) = gsub("\\~","",names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = gsub("\\'","",names(ingresos_elecciones_generales_2013))
ingresos_elecciones_generales_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_elecciones_generales_2013.csv")
#######
ingresos_elecciones_municipales_2016 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_elecciones_municipales_2016.xlsx")
names(ingresos_elecciones_municipales_2016) = str_replace_all(names(ingresos_elecciones_municipales_2016), "[[:punct:]]", "")
names(ingresos_elecciones_municipales_2016) = gsub(" ","_",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = str_to_lower(names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = iconv(names(ingresos_elecciones_municipales_2016), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_elecciones_municipales_2016) = gsub("\\~","",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\\'","",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\n","_",names(gastos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\r","_",names(gastos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("__","_",names(gastos_elecciones_municipales_2016))
ingresos_elecciones_municipales_2016 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_elecciones_municipales_2016.csv")
#######
ingresos_segunda_vuelta_presidencla_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_segunda_vuelta_presidencla_2013.xlsx")
names(ingresos_segunda_vuelta_presidencla_2013) = str_replace_all(names(ingresos_segunda_vuelta_presidencla_2013), "[[:punct:]]", "")
names(ingresos_segunda_vuelta_presidencla_2013) = gsub(" ","_",names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = str_to_lower(names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = iconv(names(ingresos_segunda_vuelta_presidencla_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_segunda_vuelta_presidencla_2013) = gsub("\\~","",names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = gsub("\\'","",names(ingresos_segunda_vuelta_presidencla_2013))
ingresos_segunda_vuelta_presidencla_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_segunda_vuelta_presidencla_2013.csv")
########
gastos_segunda_vuelta_presidencial_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_segunda_vuelta_presidencial_2013.xlsx")
names(gastos_segunda_vuelta_presidencial_2013) = str_replace_all(names(gastos_segunda_vuelta_presidencial_2013), "[[:punct:]]", "")
names(gastos_segunda_vuelta_presidencial_2013) = gsub(" ","_",names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = str_to_lower(names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = iconv(names(gastos_segunda_vuelta_presidencial_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_segunda_vuelta_presidencial_2013) = gsub("\\~","",names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = gsub("\\'","",names(gastos_segunda_vuelta_presidencial_2013))
gastos_segunda_vuelta_presidencial_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_segunda_vuelta_presidencial_2013.csv")
| /scripts/6_servel_data_expenses.R | no_license | datacampfire/FCI-Challenge | R | false | false | 9,087 | r | library(readr)
library(dplyr)
library(readxl)
library(stringr)
try(dir.create("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean"))
gastos_elecciones_generales_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_elecciones_generales_2013.xlsx")
names(gastos_elecciones_generales_2013) = str_replace_all(names(gastos_elecciones_generales_2013), "[[:punct:]]", "")
names(gastos_elecciones_generales_2013) = gsub(" ","_",names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = str_to_lower(names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = iconv(names(gastos_elecciones_generales_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_elecciones_generales_2013) = gsub("\\~","",names(gastos_elecciones_generales_2013))
names(gastos_elecciones_generales_2013) = gsub("\\'","",names(gastos_elecciones_generales_2013))
gastos_elecciones_generales_2013 %>%
select(-x1) %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_elecciones_generales_2013.csv")
#######
gastos_elecciones_municipales_2016 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_elecciones_municipales_2016.xlsx")
names(gastos_elecciones_municipales_2016) = str_replace_all(names(gastos_elecciones_municipales_2016), "[[:punct:]]", "")
names(gastos_elecciones_municipales_2016) = gsub(" ","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = str_to_lower(names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = iconv(names(gastos_elecciones_municipales_2016), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_elecciones_municipales_2016) = gsub("\\~","",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\\'","",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\n","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("\r","_",names(gastos_elecciones_municipales_2016))
names(gastos_elecciones_municipales_2016) = gsub("__","_",names(gastos_elecciones_municipales_2016))
gastos_elecciones_municipales_2016 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_elecciones_municipales_2016.csv")
#######
ingresos_elecciones_generales_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_elecciones_generales_2013.xlsx")
names(ingresos_elecciones_generales_2013) = str_replace_all(names(ingresos_elecciones_generales_2013), "[[:punct:]]", "")
names(ingresos_elecciones_generales_2013) = gsub(" ","_",names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = str_to_lower(names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = iconv(names(ingresos_elecciones_generales_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_elecciones_generales_2013) = gsub("\\~","",names(ingresos_elecciones_generales_2013))
names(ingresos_elecciones_generales_2013) = gsub("\\'","",names(ingresos_elecciones_generales_2013))
ingresos_elecciones_generales_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_elecciones_generales_2013.csv")
#######
ingresos_elecciones_municipales_2016 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_elecciones_municipales_2016.xlsx")
names(ingresos_elecciones_municipales_2016) = str_replace_all(names(ingresos_elecciones_municipales_2016), "[[:punct:]]", "")
names(ingresos_elecciones_municipales_2016) = gsub(" ","_",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = str_to_lower(names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = iconv(names(ingresos_elecciones_municipales_2016), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_elecciones_municipales_2016) = gsub("\\~","",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\\'","",names(ingresos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\n","_",names(gastos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("\r","_",names(gastos_elecciones_municipales_2016))
names(ingresos_elecciones_municipales_2016) = gsub("__","_",names(gastos_elecciones_municipales_2016))
ingresos_elecciones_municipales_2016 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_elecciones_municipales_2016.csv")
#######
ingresos_segunda_vuelta_presidencla_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/ingresos_segunda_vuelta_presidencla_2013.xlsx")
names(ingresos_segunda_vuelta_presidencla_2013) = str_replace_all(names(ingresos_segunda_vuelta_presidencla_2013), "[[:punct:]]", "")
names(ingresos_segunda_vuelta_presidencla_2013) = gsub(" ","_",names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = str_to_lower(names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = iconv(names(ingresos_segunda_vuelta_presidencla_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(ingresos_segunda_vuelta_presidencla_2013) = gsub("\\~","",names(ingresos_segunda_vuelta_presidencla_2013))
names(ingresos_segunda_vuelta_presidencla_2013) = gsub("\\'","",names(ingresos_segunda_vuelta_presidencla_2013))
ingresos_segunda_vuelta_presidencla_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/ingresos_segunda_vuelta_presidencla_2013.csv")
########
gastos_segunda_vuelta_presidencial_2013 = read_excel("~/datacampfire/FCI-Challenge/servel_data/income_expenses/gastos_segunda_vuelta_presidencial_2013.xlsx")
names(gastos_segunda_vuelta_presidencial_2013) = str_replace_all(names(gastos_segunda_vuelta_presidencial_2013), "[[:punct:]]", "")
names(gastos_segunda_vuelta_presidencial_2013) = gsub(" ","_",names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = str_to_lower(names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = iconv(names(gastos_segunda_vuelta_presidencial_2013), from="", to="ASCII//TRANSLIT", sub = "")
names(gastos_segunda_vuelta_presidencial_2013) = gsub("\\~","",names(gastos_segunda_vuelta_presidencial_2013))
names(gastos_segunda_vuelta_presidencial_2013) = gsub("\\'","",names(gastos_segunda_vuelta_presidencial_2013))
gastos_segunda_vuelta_presidencial_2013 %>%
mutate_all(funs(gsub("\xe1", "\u00e1", .))) %>% #fix special characters
mutate_all(funs(gsub("\xe9", "\u00e9", .))) %>%
mutate_all(funs(gsub("\xed", "\u00ed", .))) %>%
mutate_all(funs(gsub("\xf3", "\u00f3", .))) %>%
mutate_all(funs(gsub("\xfa", "\u00fa", .))) %>%
mutate_all(funs(gsub("\xf1" , "\u00f1", .))) %>%
mutate_all(funs(gsub("\xb0" , "\u00b0", .))) %>%
write_csv("~/datacampfire/FCI-Challenge/servel_data/income_expenses_clean/gastos_segunda_vuelta_presidencial_2013.csv")
|
npbmseSFH <-
function(formula,vardir,proxmat,B=100,method="REML",MAXITER=100,PRECISION=0.0001,data)
{
result <- list(est=NA, mse=NA)
if (method!="REML")
stop(" method=\"",method, "\" must be \"REML\".")
namevar <- deparse(substitute(vardir))
if (!missing(data))
{
formuladata <- model.frame(formula,na.action = na.omit,data)
X <- model.matrix(formula,data)
vardir <- data[,namevar]
} else
{
formuladata <- model.frame(formula,na.action = na.omit)
X <- model.matrix(formula)
}
y <- formuladata[,1]
if (attr(attributes(formuladata)$terms,"response")==1)
textformula <- paste(formula[2],formula[1],formula[3])
else
textformula <- paste(formula[1],formula[2])
if (length(na.action(formuladata))>0)
stop("Argument formula=",textformula," contains NA values.")
if (any(is.na(vardir)))
stop("Argument vardir=",namevar," contains NA values.")
proxmatname <- deparse(substitute(proxmat))
if (any(is.na(proxmat)))
stop("Argument proxmat=",proxmatname," contains NA values.")
if (!is.matrix(proxmat))
proxmat <- as.matrix(proxmat)
nformula <- nrow(X)
nvardir <- length(vardir)
nproxmat <- nrow(proxmat)
if (nformula!=nvardir | nformula!=nproxmat)
stop(" formula=",textformula," [rows=",nformula,"],\n",
" vardir=",namevar," [rows=",nvardir,"] and \n",
" proxmat=",proxmatname," [rows=",nproxmat,"]\n",
" must be the same length.")
if (nproxmat!=ncol(proxmat))
stop("Argument proxmat=",proxmatname," is not a square matrix [rows=",nproxmat,",columns=",ncol(proxmat),"].")
m<-dim(X)[1] # Sample size or number of areas
p<-dim(X)[2] # Num. of auxiliary variables (including intercept)
# Fit the model to initial sample data using the given method
result$est<-try(eblupSFH(y~X-1,vardir,proxmat,method,MAXITER,PRECISION))
if (result$est$fit$convergence==FALSE)
{
warning("The fitting method does not converge.\n")
return (result);
}
# Initial estimators of model coefficients, variance and spatial
# correlation which will act as true values in the bootstrap
# procedure.
Bstim.boot <-result$est$fit$estcoef$beta
rho.boot <-result$est$fit$spatialcorr
sigma2.boot<-result$est$fit$refvar
# Auxiliary calculations
I<-diag(1,m)
proxmatt<-t(proxmat)
Xt<-t(X)
Irhoproxmat<-I-rho.boot*proxmat
Irhoproxmatt<-t(Irhoproxmat)
Ar<-solve(Irhoproxmatt%*%Irhoproxmat)
Gr<-sigma2.boot*Ar
Vr<-Gr+I*vardir
Vri<-solve(Vr)
Qr<-solve(Xt%*%Vri%*%X)
# Analytical estimators of g1 and g2, used for the bias-corrected
# PB MSE estimator.
g1sp<-rep(0,m)
g2sp<-rep(0,m)
XtVri<-Xt%*%Vri
Qr<-solve(XtVri%*%X)
# Calculate g1 and g2
Ga<-Gr-Gr%*%Vri%*%Gr
Gb<-Gr%*%t(XtVri)
Xa<-matrix(0,1,p)
for (i in 1:m) {
g1sp[i]<-Ga[i,i]
Xa[1,]<-X[i,]-Gb[i,]
g2sp[i]<-Xa%*%Qr%*%t(Xa)
}
# Residual vectors
res<-y-X%*%Bstim.boot
vstim<-Gr%*%Vri%*%res
# Calculate covariance matrices of residual vectors
VG<-Vr-Gr
P<-Vri-Vri%*%X%*%Qr%*%Xt%*%Vri
Ve<-VG%*%P%*%VG
Vu<-Irhoproxmat%*%Gr%*%P%*%Gr%*%Irhoproxmatt
# Square roots of covariance matrices
VecVe0<-eigen(Ve)$vectors
VecVe<-VecVe0[,1:(m-p)]
ValVe0<-eigen(Ve)$values
Valve<-diag(sqrt(1/ValVe0[1:(m-p)]))
Vei05<-VecVe%*%Valve%*%t(VecVe)
VecVu0<-eigen(Vu)$vectors
VecVu<-VecVu0[,1:(m-p)]
ValVu0<-1/(eigen(Vu)$values)
ValVu<-diag(sqrt(ValVu0[1:(m-p)]))
Vui05<-VecVu%*%ValVu%*%t(VecVu)
# Standardize residual vectors
ustim<-as.vector(Vui05%*%((Irhoproxmat)%*%vstim))
estim<-as.vector(Vei05%*%(res-vstim))
sdu<-sqrt(sigma2.boot)
u.std<-rep(0,m)
e.std<-rep(0,m)
for (i in 1:m){
u.std[i]<-(sdu*(ustim[i]-mean(ustim)))/
sqrt(mean((ustim-mean(ustim))^2))
e.std[i]<-(estim[i]-mean(estim))/
sqrt(mean((estim-mean(estim))^2))
}
# Bootstrap algorithm starts
difmse.npb<-matrix(0,m,1)
difg3Spat.npb<-matrix(0,m,1)
g1sp.aux<-matrix(0,m,1)
g2sp.aux<-matrix(0,m,1)
difg1sp.npb<-matrix(0,m,1)
difg2sp.npb<-matrix(0,m,1)
cat("\nBootstrap procedure with B =",B,"iterations starts.\n")
boot <- 1
while (boot<=B)
{
# Generate boostrap data
u.boot <-sample(u.std,m,replace=TRUE)
e.samp <-sample(e.std,m,replace=TRUE)
e.boot <-sqrt(vardir)*e.samp
v.boot <-solve(Irhoproxmat)%*%u.boot
theta.boot <-X%*%Bstim.boot+v.boot
direct.boot<-theta.boot+e.boot
# Fit the model to bootstrap data
results.SpFH.boot<-eblupSFH(direct.boot[,1]~X-1,vardir,proxmat,method,MAXITER,PRECISION)
# Generate a new sample if estimators are not satisfactory
if (results.SpFH.boot$fit$convergence==FALSE | results.SpFH.boot$fit$refvar<0 |
results.SpFH.boot$fit$spatialcorr<(-1) | results.SpFH.boot$fit$spatialcorr>1)
next
cat("b =",boot,"\n")
Bstim.ML.boot<-results.SpFH.boot$fit$estcoef[,1]
rho.ML.boot<-results.SpFH.boot$fit$spatialcorr
sigma2.ML.boot<-results.SpFH.boot$fit$refvar
thetaEB.SpFH.boot<-results.SpFH.boot$eblup
# Nonparametric bootstrap estimator of g3
Bstim.sblup<-Qr%*%XtVri%*%direct.boot[,1]
thetaEB.SpFH.sblup.boot<-X%*%Bstim.sblup+Gr%*%Vri%*%
(direct.boot[,1]-X%*%Bstim.sblup)
difg3Spat.npb[,1]<-difg3Spat.npb[,1]+
(thetaEB.SpFH.boot-thetaEB.SpFH.sblup.boot)^2
# Naive nonparametric bootstrap MSE
difmse.npb[,1]<-difmse.npb[,1]+(thetaEB.SpFH.boot[,1]-theta.boot)^2
# g1 and g2 for each bootstrap sample
A<-solve((I-rho.ML.boot*proxmatt)%*%(I-rho.ML.boot*proxmat))
G<-sigma2.ML.boot*A
V<-G+I*vardir
Vi<-solve(V)
XtVi<-Xt%*%Vi
Q<-solve(XtVi%*%X)
Ga<-G-G%*%Vi%*%G
Gb<-G%*%Vi%*%X
Xa<-matrix(0,1,p)
for (i in 1:m){
g1sp.aux[i]<-Ga[i,i]
Xa[1,]<-X[i,]-Gb[i,]
g2sp.aux[i]<-Xa%*%Q%*%t(Xa)
}
difg1sp.npb<-difg1sp.npb+g1sp.aux
difg2sp.npb<-difg2sp.npb+g2sp.aux
boot <- boot+1
} # End of bootstrap cycle
# Final naive nonparametric bootstrap MSE estimator
mse.npb<-difmse.npb[,1]/B
# Final bias-corrected nonparametric bootstrap MSE estimator
g3Spat.npb<-difg3Spat.npb/B
g1sp.npb<-difg1sp.npb/B
g2sp.npb<-difg2sp.npb/B
mse.npb2<-2*(g1sp+g2sp)-difg1sp.npb[,1]/B-difg2sp.npb[,1]/B+
difg3Spat.npb[,1]/B
result$mse <- data.frame(mse=mse.npb, msebc=mse.npb2)
return(result)
}
| /R/npbmseSFH.R | no_license | cran/sae | R | false | false | 7,003 | r | npbmseSFH <-
function(formula,vardir,proxmat,B=100,method="REML",MAXITER=100,PRECISION=0.0001,data)
{
result <- list(est=NA, mse=NA)
if (method!="REML")
stop(" method=\"",method, "\" must be \"REML\".")
namevar <- deparse(substitute(vardir))
if (!missing(data))
{
formuladata <- model.frame(formula,na.action = na.omit,data)
X <- model.matrix(formula,data)
vardir <- data[,namevar]
} else
{
formuladata <- model.frame(formula,na.action = na.omit)
X <- model.matrix(formula)
}
y <- formuladata[,1]
if (attr(attributes(formuladata)$terms,"response")==1)
textformula <- paste(formula[2],formula[1],formula[3])
else
textformula <- paste(formula[1],formula[2])
if (length(na.action(formuladata))>0)
stop("Argument formula=",textformula," contains NA values.")
if (any(is.na(vardir)))
stop("Argument vardir=",namevar," contains NA values.")
proxmatname <- deparse(substitute(proxmat))
if (any(is.na(proxmat)))
stop("Argument proxmat=",proxmatname," contains NA values.")
if (!is.matrix(proxmat))
proxmat <- as.matrix(proxmat)
nformula <- nrow(X)
nvardir <- length(vardir)
nproxmat <- nrow(proxmat)
if (nformula!=nvardir | nformula!=nproxmat)
stop(" formula=",textformula," [rows=",nformula,"],\n",
" vardir=",namevar," [rows=",nvardir,"] and \n",
" proxmat=",proxmatname," [rows=",nproxmat,"]\n",
" must be the same length.")
if (nproxmat!=ncol(proxmat))
stop("Argument proxmat=",proxmatname," is not a square matrix [rows=",nproxmat,",columns=",ncol(proxmat),"].")
m<-dim(X)[1] # Sample size or number of areas
p<-dim(X)[2] # Num. of auxiliary variables (including intercept)
# Fit the model to initial sample data using the given method
result$est<-try(eblupSFH(y~X-1,vardir,proxmat,method,MAXITER,PRECISION))
if (result$est$fit$convergence==FALSE)
{
warning("The fitting method does not converge.\n")
return (result);
}
# Initial estimators of model coefficients, variance and spatial
# correlation which will act as true values in the bootstrap
# procedure.
Bstim.boot <-result$est$fit$estcoef$beta
rho.boot <-result$est$fit$spatialcorr
sigma2.boot<-result$est$fit$refvar
# Auxiliary calculations
I<-diag(1,m)
proxmatt<-t(proxmat)
Xt<-t(X)
Irhoproxmat<-I-rho.boot*proxmat
Irhoproxmatt<-t(Irhoproxmat)
Ar<-solve(Irhoproxmatt%*%Irhoproxmat)
Gr<-sigma2.boot*Ar
Vr<-Gr+I*vardir
Vri<-solve(Vr)
Qr<-solve(Xt%*%Vri%*%X)
# Analytical estimators of g1 and g2, used for the bias-corrected
# PB MSE estimator.
g1sp<-rep(0,m)
g2sp<-rep(0,m)
XtVri<-Xt%*%Vri
Qr<-solve(XtVri%*%X)
# Calculate g1 and g2
Ga<-Gr-Gr%*%Vri%*%Gr
Gb<-Gr%*%t(XtVri)
Xa<-matrix(0,1,p)
for (i in 1:m) {
g1sp[i]<-Ga[i,i]
Xa[1,]<-X[i,]-Gb[i,]
g2sp[i]<-Xa%*%Qr%*%t(Xa)
}
# Residual vectors
res<-y-X%*%Bstim.boot
vstim<-Gr%*%Vri%*%res
# Calculate covariance matrices of residual vectors
VG<-Vr-Gr
P<-Vri-Vri%*%X%*%Qr%*%Xt%*%Vri
Ve<-VG%*%P%*%VG
Vu<-Irhoproxmat%*%Gr%*%P%*%Gr%*%Irhoproxmatt
# Square roots of covariance matrices
VecVe0<-eigen(Ve)$vectors
VecVe<-VecVe0[,1:(m-p)]
ValVe0<-eigen(Ve)$values
Valve<-diag(sqrt(1/ValVe0[1:(m-p)]))
Vei05<-VecVe%*%Valve%*%t(VecVe)
VecVu0<-eigen(Vu)$vectors
VecVu<-VecVu0[,1:(m-p)]
ValVu0<-1/(eigen(Vu)$values)
ValVu<-diag(sqrt(ValVu0[1:(m-p)]))
Vui05<-VecVu%*%ValVu%*%t(VecVu)
# Standardize residual vectors
ustim<-as.vector(Vui05%*%((Irhoproxmat)%*%vstim))
estim<-as.vector(Vei05%*%(res-vstim))
sdu<-sqrt(sigma2.boot)
u.std<-rep(0,m)
e.std<-rep(0,m)
for (i in 1:m){
u.std[i]<-(sdu*(ustim[i]-mean(ustim)))/
sqrt(mean((ustim-mean(ustim))^2))
e.std[i]<-(estim[i]-mean(estim))/
sqrt(mean((estim-mean(estim))^2))
}
# Bootstrap algorithm starts
difmse.npb<-matrix(0,m,1)
difg3Spat.npb<-matrix(0,m,1)
g1sp.aux<-matrix(0,m,1)
g2sp.aux<-matrix(0,m,1)
difg1sp.npb<-matrix(0,m,1)
difg2sp.npb<-matrix(0,m,1)
cat("\nBootstrap procedure with B =",B,"iterations starts.\n")
boot <- 1
while (boot<=B)
{
# Generate boostrap data
u.boot <-sample(u.std,m,replace=TRUE)
e.samp <-sample(e.std,m,replace=TRUE)
e.boot <-sqrt(vardir)*e.samp
v.boot <-solve(Irhoproxmat)%*%u.boot
theta.boot <-X%*%Bstim.boot+v.boot
direct.boot<-theta.boot+e.boot
# Fit the model to bootstrap data
results.SpFH.boot<-eblupSFH(direct.boot[,1]~X-1,vardir,proxmat,method,MAXITER,PRECISION)
# Generate a new sample if estimators are not satisfactory
if (results.SpFH.boot$fit$convergence==FALSE | results.SpFH.boot$fit$refvar<0 |
results.SpFH.boot$fit$spatialcorr<(-1) | results.SpFH.boot$fit$spatialcorr>1)
next
cat("b =",boot,"\n")
Bstim.ML.boot<-results.SpFH.boot$fit$estcoef[,1]
rho.ML.boot<-results.SpFH.boot$fit$spatialcorr
sigma2.ML.boot<-results.SpFH.boot$fit$refvar
thetaEB.SpFH.boot<-results.SpFH.boot$eblup
# Nonparametric bootstrap estimator of g3
Bstim.sblup<-Qr%*%XtVri%*%direct.boot[,1]
thetaEB.SpFH.sblup.boot<-X%*%Bstim.sblup+Gr%*%Vri%*%
(direct.boot[,1]-X%*%Bstim.sblup)
difg3Spat.npb[,1]<-difg3Spat.npb[,1]+
(thetaEB.SpFH.boot-thetaEB.SpFH.sblup.boot)^2
# Naive nonparametric bootstrap MSE
difmse.npb[,1]<-difmse.npb[,1]+(thetaEB.SpFH.boot[,1]-theta.boot)^2
# g1 and g2 for each bootstrap sample
A<-solve((I-rho.ML.boot*proxmatt)%*%(I-rho.ML.boot*proxmat))
G<-sigma2.ML.boot*A
V<-G+I*vardir
Vi<-solve(V)
XtVi<-Xt%*%Vi
Q<-solve(XtVi%*%X)
Ga<-G-G%*%Vi%*%G
Gb<-G%*%Vi%*%X
Xa<-matrix(0,1,p)
for (i in 1:m){
g1sp.aux[i]<-Ga[i,i]
Xa[1,]<-X[i,]-Gb[i,]
g2sp.aux[i]<-Xa%*%Q%*%t(Xa)
}
difg1sp.npb<-difg1sp.npb+g1sp.aux
difg2sp.npb<-difg2sp.npb+g2sp.aux
boot <- boot+1
} # End of bootstrap cycle
# Final naive nonparametric bootstrap MSE estimator
mse.npb<-difmse.npb[,1]/B
# Final bias-corrected nonparametric bootstrap MSE estimator
g3Spat.npb<-difg3Spat.npb/B
g1sp.npb<-difg1sp.npb/B
g2sp.npb<-difg2sp.npb/B
mse.npb2<-2*(g1sp+g2sp)-difg1sp.npb[,1]/B-difg2sp.npb[,1]/B+
difg3Spat.npb[,1]/B
result$mse <- data.frame(mse=mse.npb, msebc=mse.npb2)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{loadNormativeDataSet}
\alias{loadNormativeDataSet}
\title{loadNormativeDataSet}
\usage{
loadNormativeDataSet(fullXlsFile, sheet)
}
\arguments{
\item{fullXlsFile}{[String] full filename (path+fileame) of the selected dataset}
}
\value{
[]
}
\description{
load normative dataset
}
\examples{
}
| /man/loadNormativeDataSet.Rd | no_license | pyCGM2/rCGM2 | R | false | true | 384 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{loadNormativeDataSet}
\alias{loadNormativeDataSet}
\title{loadNormativeDataSet}
\usage{
loadNormativeDataSet(fullXlsFile, sheet)
}
\arguments{
\item{fullXlsFile}{[String] full filename (path+fileame) of the selected dataset}
}
\value{
[]
}
\description{
load normative dataset
}
\examples{
}
|
## Programmer: Jacques du Plessis
## Date: 24 September 2017
## Course: Exploratory Data Analysis - Week 1
## Assignment Notes:
## 1. We will only be using data from the dates 2007-02-01 and 2007-02-02.
## 2. Read the data from just those dates rather than reading in the entire dataset
## 3. Convert the Date and Time variables to Date/Time classes using strptime
## 4. Note that in this dataset missing values are coded as ?
## 5. Create a separate R code file per plot
## 6. Your code should include code for reading the data so that the plot can be fully reproduced
## 7. This file re-create plot 4
## set the working directory
setwd("c:\\Development\\R\\Coursera\\04 - Exploratory data analysis\\Week 1")
## specify the source file name and location
sourcefilename <- "./data/household_power_consumption.txt"
## search for the specific row in the dataset
startrow <- grep("1/2/2007", readLines(sourcefilename))[1]-1
## search for the last row of the subset
endrow <- grep("3/2/2007", readLines(sourcefilename))[1]-1
## read the header seperately
myheader <-read.table(sourcefilename,header = FALSE, sep = ";",nrows= 1,stringsAsFactors = FALSE)
## read the subset of data using skip & nrows. Set na.strings = "?"
subsetofdata <- read.table(sourcefilename,header = FALSE, sep = ";",na.strings = "?",skip=startrow,nrows= (endrow - startrow))
## Remove NA rows with complete.cases function
subsetofdata <- subsetofdata[which(complete.cases(subsetofdata)),]
## add the header as colnames for the dataframe
colnames(subsetofdata) <- unlist(myheader)
## create a new column by converting the concatenated V1 & V2 to datetime
subsetofdata$mydatetime <- strptime(paste(subsetofdata$Date,subsetofdata$Time),"%d/%m/%Y %H:%M:%S")
## Plot 4 graphs to plot no 4 png
png(filename = "plot4.png",width = 480, height = 480, units = "px", pointsize = 12)
## create 4 plots from left to right and top to bottom
par(mfrow=c(2,2))
## first plot - Global Active Power
plot(subsetofdata$mydatetime,subsetofdata$Global_active_power,type="l",lty = 1,xlab="",ylab="Global Active Power")
## second plot - Voltage
plot(subsetofdata$mydatetime,subsetofdata$Voltage,type="l",lty = 1,xlab="datetime",ylab="Voltage")
## third plot - Energy sub metering
plot(subsetofdata$mydatetime,subsetofdata$Sub_metering_1,type="l",lty = 1,xlab="",ylab="Energy sub metering")
lines(subsetofdata$mydatetime,subsetofdata$Sub_metering_2,type="l",lty = 1,col="red")
lines(subsetofdata$mydatetime,subsetofdata$Sub_metering_3,type="l",lty = 1,col="blue")
legend("topright",lty=1,col=c("black","red","blue"),bty="n",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## fourth plot - Global_reactive_power
plot(subsetofdata$mydatetime,subsetofdata$Global_reactive_power,type="l",lty = 1,xlab="datetime",ylab="Global_reactive_power")
## close the graphical device
dev.off()
| /plot4.R | no_license | J-DP/ExData_Plotting1 | R | false | false | 2,862 | r |
## Programmer: Jacques du Plessis
## Date: 24 September 2017
## Course: Exploratory Data Analysis - Week 1
## Assignment Notes:
## 1. We will only be using data from the dates 2007-02-01 and 2007-02-02.
## 2. Read the data from just those dates rather than reading in the entire dataset
## 3. Convert the Date and Time variables to Date/Time classes using strptime
## 4. Note that in this dataset missing values are coded as ?
## 5. Create a separate R code file per plot
## 6. Your code should include code for reading the data so that the plot can be fully reproduced
## 7. This file re-create plot 4
## set the working directory
setwd("c:\\Development\\R\\Coursera\\04 - Exploratory data analysis\\Week 1")
## specify the source file name and location
sourcefilename <- "./data/household_power_consumption.txt"
## search for the specific row in the dataset
startrow <- grep("1/2/2007", readLines(sourcefilename))[1]-1
## search for the last row of the subset
endrow <- grep("3/2/2007", readLines(sourcefilename))[1]-1
## read the header seperately
myheader <-read.table(sourcefilename,header = FALSE, sep = ";",nrows= 1,stringsAsFactors = FALSE)
## read the subset of data using skip & nrows. Set na.strings = "?"
subsetofdata <- read.table(sourcefilename,header = FALSE, sep = ";",na.strings = "?",skip=startrow,nrows= (endrow - startrow))
## Remove NA rows with complete.cases function
subsetofdata <- subsetofdata[which(complete.cases(subsetofdata)),]
## add the header as colnames for the dataframe
colnames(subsetofdata) <- unlist(myheader)
## create a new column by converting the concatenated V1 & V2 to datetime
subsetofdata$mydatetime <- strptime(paste(subsetofdata$Date,subsetofdata$Time),"%d/%m/%Y %H:%M:%S")
## Plot 4 graphs to plot no 4 png
png(filename = "plot4.png",width = 480, height = 480, units = "px", pointsize = 12)
## create 4 plots from left to right and top to bottom
par(mfrow=c(2,2))
## first plot - Global Active Power
plot(subsetofdata$mydatetime,subsetofdata$Global_active_power,type="l",lty = 1,xlab="",ylab="Global Active Power")
## second plot - Voltage
plot(subsetofdata$mydatetime,subsetofdata$Voltage,type="l",lty = 1,xlab="datetime",ylab="Voltage")
## third plot - Energy sub metering
plot(subsetofdata$mydatetime,subsetofdata$Sub_metering_1,type="l",lty = 1,xlab="",ylab="Energy sub metering")
lines(subsetofdata$mydatetime,subsetofdata$Sub_metering_2,type="l",lty = 1,col="red")
lines(subsetofdata$mydatetime,subsetofdata$Sub_metering_3,type="l",lty = 1,col="blue")
legend("topright",lty=1,col=c("black","red","blue"),bty="n",legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
## fourth plot - Global_reactive_power
plot(subsetofdata$mydatetime,subsetofdata$Global_reactive_power,type="l",lty = 1,xlab="datetime",ylab="Global_reactive_power")
## close the graphical device
dev.off()
|
# You're about to write your first function! Just like you would assign a value
# to a variable with the assignment operator, you assign functions in the following
# way:
#
# function_name <- function(arg1, arg2){
# # Manipulate arguments in some way
# # Return a value
# }
#
# The "variable name" you assign will become the name of your function. arg1 and
# arg2 represent the arguments of your function. You can manipulate the arguments
# you specify within the function. After sourcing the function, you can use the
# function by typing:
#
# function_name(value1, value2)
#
# Below we will create a function called boring_function. This function takes
# the argument `x` as input, and returns the value of x without modifying it.
# Delete the pound sign in front of the x to make the function work! Be sure to
# save this script and type submit() in the console after you make your changes.
boring_function <- function(x) {
x
}
submit()
boring_function("My first function!")
| /week_2/boring_function.R | no_license | gusahu/rprogramming_coursera | R | false | false | 987 | r | # You're about to write your first function! Just like you would assign a value
# to a variable with the assignment operator, you assign functions in the following
# way:
#
# function_name <- function(arg1, arg2){
# # Manipulate arguments in some way
# # Return a value
# }
#
# The "variable name" you assign will become the name of your function. arg1 and
# arg2 represent the arguments of your function. You can manipulate the arguments
# you specify within the function. After sourcing the function, you can use the
# function by typing:
#
# function_name(value1, value2)
#
# Below we will create a function called boring_function. This function takes
# the argument `x` as input, and returns the value of x without modifying it.
# Delete the pound sign in front of the x to make the function work! Be sure to
# save this script and type submit() in the console after you make your changes.
boring_function <- function(x) {
x
}
submit()
boring_function("My first function!")
|
# Plot Bay Area climate with CA climate as background to show change
# in climate over time
# Clear workspace
rm(list=ls())
Computer <- "HP"
#-----------------#
# Set directories #
#-----------------#
if(Computer == "EOS") {
#wdir <- 'bien/Naia/'
#cdir <- paste(wdir, 'BCM/CA_2014/Summary/', sep='')
} else if (Computer == "HP") {
wdir <- 'C:/Users/morueta/Documents/Documents_share/Projects/101_TBC3_modelling/Lead-trail_R-project/'
sdir <- paste(wdir, 'Scripts_2/', sep='')
hdir <- 'E:/BCM/CA_2014/Summary/HST/Normals_30years/'
fdir <- 'E:/BCM/CA_2014/Summary/Futures/Normals_30years/'
cdir <- 'E:/BCM/CA_2014/Summary/Futures/'
bgdir <- 'C:/Users/morueta/Documents/Documents_share/Projects/100_Postdoc/Data/Background_layers/PROCESSED/'
figdir <- 'E:/Lead-trail/Projections/Figs_Climate-Change-Space/'
}
#----------------#
# Load libraries #
#----------------#
require(raster)
source(paste(sdir,"00_Functions_trailing-edge.r",sep=""))
#------------#
# Parameters #
#------------#
allScenarios <- c("HST", "GFDL_B1","GFDL_A2","PCM_A2","CNRM_rcp85","CCSM4_rcp85","MIROC_rcp85",
"PCM_B1","MIROC3_2_A2","csiro_A1B","GISS_AOM_A1B","MIROC5_rcp26","MIROC_rcp45",
"MIROC_rcp60","GISS_rcp26","MRI_rcp26","MPI_rcp45","IPSL_rcp85","Fgoals_rcp85")
myrange=c(2:8,11:17)
myFutures=allScenarios[myrange]
#sort by increasing MAT
# Load averages of future climates to order plots
fc <- readRDS(paste(cdir, "Futures_mean_climates.rdata",sep=""))
# Order by MAT
myScenarios <- c("HST", myFutures[match(fc$mod[order(fc$MAT)],myFutures)])
climnames <- sort(c("cwd","djf","jja","ppt")) #use sort to ensure correct names used for predictors
#-------------------#
# Load climate data #
#-------------------#
env.files <- list.files(path=hdir, pattern='.Rdata', full.names=FALSE)
files <- env.files[which(substr(env.files,9,11)%in%climnames)]
predictors <- stack(lapply(files,function(x) readRDS(paste(hdir,x,sep=""))))
names(predictors) = climnames
clim <- getValues(predictors)
# Bay Area climate
sr <- readRDS(paste(bgdir, "GADM_BayArea_proj.rdata",sep=""))
yvars <- c("djf", "jja", "ppt")
xvar <- "cwd"
#----------#
# Plotting #
#----------#
for(j in 1:length(myScenarios)) {
mod <- myScenarios[j]
if(mod=="HST") {
preds=predictors
} else {
period <- "2070-2099"
files <- paste("BCM2014_",climnames,period,"_wy_ave_",mod,".Rdata", sep='')
preds <- stack(lapply(files,function(x) readRDS(paste(fdir,x,sep=""))))
names(preds) <- climnames
}
srclim <- getValues(mask(crop(preds,sr),sr))
jpeg(paste(figdir, "Clim_space_",j, "_", mod, ".jpg", sep=""),width=700,height=2100,quality=100,res=500,pointsize=8)
par(mfrow=c(length(yvars),1),mar=c(2,2,0,0), oma=c(2,2,3,1))
for(y in 1:length(yvars)) {
plotClim(C=clim,Csub=srclim,fac=c(xvar,yvars[y]),rs=1e4)
axis(2,ylab=yvars[y])
mtext(yvars[y], side = 2, line= 2.5,las = 3)
if(y==length(yvars)) {
mtext(xvar, side = 1, line= 2.5, cex=0.8)
}
if(y==1) {
mtext(paste(mod), side = 3, line=1.5, cex=0.8)
}
}
dev.off()
}
setwd(figdir)
system('"C:\\Program Files\\ImageMagick-6.9.1-Q16\\convert.exe" -delay 80 *.jpg example_1.gif"')
shell("convert -delay 80 *.jpg example_1.gif")
| /02_Plotting/07c_Plot_clim_change-climspace.R | no_license | naiamh/Lead-trail_project | R | false | false | 3,257 | r | # Plot Bay Area climate with CA climate as background to show change
# in climate over time
# Clear workspace
rm(list=ls())
Computer <- "HP"
#-----------------#
# Set directories #
#-----------------#
if(Computer == "EOS") {
#wdir <- 'bien/Naia/'
#cdir <- paste(wdir, 'BCM/CA_2014/Summary/', sep='')
} else if (Computer == "HP") {
wdir <- 'C:/Users/morueta/Documents/Documents_share/Projects/101_TBC3_modelling/Lead-trail_R-project/'
sdir <- paste(wdir, 'Scripts_2/', sep='')
hdir <- 'E:/BCM/CA_2014/Summary/HST/Normals_30years/'
fdir <- 'E:/BCM/CA_2014/Summary/Futures/Normals_30years/'
cdir <- 'E:/BCM/CA_2014/Summary/Futures/'
bgdir <- 'C:/Users/morueta/Documents/Documents_share/Projects/100_Postdoc/Data/Background_layers/PROCESSED/'
figdir <- 'E:/Lead-trail/Projections/Figs_Climate-Change-Space/'
}
#----------------#
# Load libraries #
#----------------#
require(raster)
source(paste(sdir,"00_Functions_trailing-edge.r",sep=""))
#------------#
# Parameters #
#------------#
allScenarios <- c("HST", "GFDL_B1","GFDL_A2","PCM_A2","CNRM_rcp85","CCSM4_rcp85","MIROC_rcp85",
"PCM_B1","MIROC3_2_A2","csiro_A1B","GISS_AOM_A1B","MIROC5_rcp26","MIROC_rcp45",
"MIROC_rcp60","GISS_rcp26","MRI_rcp26","MPI_rcp45","IPSL_rcp85","Fgoals_rcp85")
myrange=c(2:8,11:17)
myFutures=allScenarios[myrange]
#sort by increasing MAT
# Load averages of future climates to order plots
fc <- readRDS(paste(cdir, "Futures_mean_climates.rdata",sep=""))
# Order by MAT
myScenarios <- c("HST", myFutures[match(fc$mod[order(fc$MAT)],myFutures)])
climnames <- sort(c("cwd","djf","jja","ppt")) #use sort to ensure correct names used for predictors
#-------------------#
# Load climate data #
#-------------------#
env.files <- list.files(path=hdir, pattern='.Rdata', full.names=FALSE)
files <- env.files[which(substr(env.files,9,11)%in%climnames)]
predictors <- stack(lapply(files,function(x) readRDS(paste(hdir,x,sep=""))))
names(predictors) = climnames
clim <- getValues(predictors)
# Bay Area climate
sr <- readRDS(paste(bgdir, "GADM_BayArea_proj.rdata",sep=""))
yvars <- c("djf", "jja", "ppt")
xvar <- "cwd"
#----------#
# Plotting #
#----------#
for(j in 1:length(myScenarios)) {
mod <- myScenarios[j]
if(mod=="HST") {
preds=predictors
} else {
period <- "2070-2099"
files <- paste("BCM2014_",climnames,period,"_wy_ave_",mod,".Rdata", sep='')
preds <- stack(lapply(files,function(x) readRDS(paste(fdir,x,sep=""))))
names(preds) <- climnames
}
srclim <- getValues(mask(crop(preds,sr),sr))
jpeg(paste(figdir, "Clim_space_",j, "_", mod, ".jpg", sep=""),width=700,height=2100,quality=100,res=500,pointsize=8)
par(mfrow=c(length(yvars),1),mar=c(2,2,0,0), oma=c(2,2,3,1))
for(y in 1:length(yvars)) {
plotClim(C=clim,Csub=srclim,fac=c(xvar,yvars[y]),rs=1e4)
axis(2,ylab=yvars[y])
mtext(yvars[y], side = 2, line= 2.5,las = 3)
if(y==length(yvars)) {
mtext(xvar, side = 1, line= 2.5, cex=0.8)
}
if(y==1) {
mtext(paste(mod), side = 3, line=1.5, cex=0.8)
}
}
dev.off()
}
setwd(figdir)
system('"C:\\Program Files\\ImageMagick-6.9.1-Q16\\convert.exe" -delay 80 *.jpg example_1.gif"')
shell("convert -delay 80 *.jpg example_1.gif")
|
#Function to create Plot3 as defined in Exploratory Data Analysis - Week 1 project
#Uses a dataset which must be downloaded and extracted from the following location
#https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#Data must be downloaded and unzipped in working directory prior to executing
#Uses libraries: Lubridate,
plot4 <- function() {
#read file
power.data <- read.csv("household_power_consumption.txt",sep=";",header=TRUE)
#subset data to 2/1/2007 and 2/2/2007
power.data.sub <- subset(power.data,Date=="1/2/2007" | Date=="2/2/2007")
#convert to numeric. It's a factor so must be converted to character first
#expect warning "NAs introduced by coercion" due to "?" in the data set
#could be handled more elegantly, but sufficient for this exercise
power.data.sub$Global_active_power <- as.numeric(as.character(power.data.sub$Global_active_power))
power.data.sub$Global_reactive_power <- as.numeric(as.character(power.data.sub$Global_reactive_power))
power.data.sub$Sub_metering_1 <- as.numeric(as.character(power.data.sub$Sub_metering_1))
power.data.sub$Sub_metering_2 <- as.numeric(as.character(power.data.sub$Sub_metering_2))
power.data.sub$Sub_metering_3 <- as.numeric(as.character(power.data.sub$Sub_metering_3))
power.data.sub$Voltage <- as.numeric(as.character(power.data.sub$Voltage))
#convert to POSIXlt date
power.data.sub$Date <- as.POSIXlt(as.character(power.data.sub$Date),format="%d/%m/%Y")
#convert to POSIXct time
power.data.sub$Time <- as.POSIXlt(as.character(power.data.sub$Time),format = "%H:%M:%S")
#add time to the date in a new variable called DateTime
power.data.sub$DateTime <- ISOdatetime(
year(power.data.sub$Date)
, month(power.data.sub$Date)
, day(power.data.sub$Date)
, power.data.sub$Time$hour
,power.data.sub$Time$min
,0)
#open PNG device and create working file
png(file="plot4.png",width=480,height=480)
#create 2,2 layout
par(mfrow=c(2,2))
#create the first plot.
plot(power.data.sub$DateTime
,power.data.sub$Global_active_power
#set type to line
,type="l"
#set Y-axis label
,ylab="Global Active Power"
#turn off X-axis label
,xlab=NA)
#create the second plot in the top right.
plot(power.data.sub$DateTime
,power.data.sub$Voltage
#set type to line
,type="l"
#set Y-axis label
,ylab="Voltage"
#set the X-axis label
,xlab="datetime")
#create the third plot in the bottom left
#create the plot using Sub_metering_1
plot(power.data.sub$DateTime
,power.data.sub$Sub_metering_1
#set type to line
,type="l"
#set Y-axis label
,ylab="Energy sub metering"
#turn off X-axis label
,xlab=NA
,col="black")
#add red line for Sub_metering_2
lines(power.data.sub$DateTime
,power.data.sub$Sub_metering_2
,col="red")
#add blue line for Sub_metering_3
lines(power.data.sub$DateTime
,power.data.sub$Sub_metering_3
,col="blue")
#add legend
legend(
#set location of legend
"topright"
#set labels for legend
,c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
#set types of lines as solid
,lty = c(1,1,1)
#set width of lines
,lwd=c(2,2,2)
#set color of lines to match plot
,col=c("black","red","blue"))
#create the fourth plot in bottom right.
plot(power.data.sub$DateTime
,power.data.sub$Global_reactive_power
#set type to line
,type="l"
#set the Y-axis label
,ylab="Global_reactive_power"
#set the X-axis label
,xlab="datetime")
#close PNG device
dev.off()
}
| /plot4.R | no_license | MrBrianCronin/datasciencecoursera | R | false | false | 4,378 | r | #Function to create Plot3 as defined in Exploratory Data Analysis - Week 1 project
#Uses a dataset which must be downloaded and extracted from the following location
#https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip
#Data must be downloaded and unzipped in working directory prior to executing
#Uses libraries: Lubridate,
plot4 <- function() {
#read file
power.data <- read.csv("household_power_consumption.txt",sep=";",header=TRUE)
#subset data to 2/1/2007 and 2/2/2007
power.data.sub <- subset(power.data,Date=="1/2/2007" | Date=="2/2/2007")
#convert to numeric. It's a factor so must be converted to character first
#expect warning "NAs introduced by coercion" due to "?" in the data set
#could be handled more elegantly, but sufficient for this exercise
power.data.sub$Global_active_power <- as.numeric(as.character(power.data.sub$Global_active_power))
power.data.sub$Global_reactive_power <- as.numeric(as.character(power.data.sub$Global_reactive_power))
power.data.sub$Sub_metering_1 <- as.numeric(as.character(power.data.sub$Sub_metering_1))
power.data.sub$Sub_metering_2 <- as.numeric(as.character(power.data.sub$Sub_metering_2))
power.data.sub$Sub_metering_3 <- as.numeric(as.character(power.data.sub$Sub_metering_3))
power.data.sub$Voltage <- as.numeric(as.character(power.data.sub$Voltage))
#convert to POSIXlt date
power.data.sub$Date <- as.POSIXlt(as.character(power.data.sub$Date),format="%d/%m/%Y")
#convert to POSIXct time
power.data.sub$Time <- as.POSIXlt(as.character(power.data.sub$Time),format = "%H:%M:%S")
#add time to the date in a new variable called DateTime
power.data.sub$DateTime <- ISOdatetime(
year(power.data.sub$Date)
, month(power.data.sub$Date)
, day(power.data.sub$Date)
, power.data.sub$Time$hour
,power.data.sub$Time$min
,0)
#open PNG device and create working file
png(file="plot4.png",width=480,height=480)
#create 2,2 layout
par(mfrow=c(2,2))
#create the first plot.
plot(power.data.sub$DateTime
,power.data.sub$Global_active_power
#set type to line
,type="l"
#set Y-axis label
,ylab="Global Active Power"
#turn off X-axis label
,xlab=NA)
#create the second plot in the top right.
plot(power.data.sub$DateTime
,power.data.sub$Voltage
#set type to line
,type="l"
#set Y-axis label
,ylab="Voltage"
#set the X-axis label
,xlab="datetime")
#create the third plot in the bottom left
#create the plot using Sub_metering_1
plot(power.data.sub$DateTime
,power.data.sub$Sub_metering_1
#set type to line
,type="l"
#set Y-axis label
,ylab="Energy sub metering"
#turn off X-axis label
,xlab=NA
,col="black")
#add red line for Sub_metering_2
lines(power.data.sub$DateTime
,power.data.sub$Sub_metering_2
,col="red")
#add blue line for Sub_metering_3
lines(power.data.sub$DateTime
,power.data.sub$Sub_metering_3
,col="blue")
#add legend
legend(
#set location of legend
"topright"
#set labels for legend
,c("Sub_metering_1","Sub_metering_2","Sub_metering_3")
#set types of lines as solid
,lty = c(1,1,1)
#set width of lines
,lwd=c(2,2,2)
#set color of lines to match plot
,col=c("black","red","blue"))
#create the fourth plot in bottom right.
plot(power.data.sub$DateTime
,power.data.sub$Global_reactive_power
#set type to line
,type="l"
#set the Y-axis label
,ylab="Global_reactive_power"
#set the X-axis label
,xlab="datetime")
#close PNG device
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startSpiderSeqR.R
\name{.findFiles}
\alias{.findFiles}
\title{Find files (a wrapper around list.files)}
\usage{
.findFiles(path, pattern)
}
\arguments{
\item{path}{A path to be searched}
\item{pattern}{Regular expression pattern to search for (passed to dir())}
}
\value{
A full path with the matching file(s)
}
\description{
Find files (a wrapper around list.files)
}
\examples{
#.findFiles(getwd(), "*.sqlite")
}
\keyword{internal}
| /man/dot-findFiles.Rd | no_license | ss-lab-cancerunit/SpiderSeqR | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startSpiderSeqR.R
\name{.findFiles}
\alias{.findFiles}
\title{Find files (a wrapper around list.files)}
\usage{
.findFiles(path, pattern)
}
\arguments{
\item{path}{A path to be searched}
\item{pattern}{Regular expression pattern to search for (passed to dir())}
}
\value{
A full path with the matching file(s)
}
\description{
Find files (a wrapper around list.files)
}
\examples{
#.findFiles(getwd(), "*.sqlite")
}
\keyword{internal}
|
library(GlobalOptions)
### Name: print.GlobalOptionsFun
### Title: Print the GlobalOptionsFun object
### Aliases: print.GlobalOptionsFun
### ** Examples
# There is no example
NULL
| /data/genthat_extracted_code/GlobalOptions/examples/print.GlobalOptionsFun.rd.R | no_license | surayaaramli/typeRrh | R | false | false | 188 | r | library(GlobalOptions)
### Name: print.GlobalOptionsFun
### Title: Print the GlobalOptionsFun object
### Aliases: print.GlobalOptionsFun
### ** Examples
# There is no example
NULL
|
#' Find sequences with similar bendability profiles
#'
#' Calculates bendability profile of query sequence. Finds all other sequences
#' with same or similar bendability profile.
#'
#' @param query DNAString or a character string.
#' @param scale One from "con", "conrigid", "dnase", "dnaserigid", "nuc", "nucrigid".
#' @param k Number of consecutive trinucleotides for which to calculate the average
#' bendability coefficient.
#' @param tolerance Determines size of interval for matching. All k-mers whose
#' average bendability coefficient falls into interval <query k-mer bendability
#' +/- tolerance> are matched with query.
#' @param wsize Window size for calculations. Should be (n*k)+2. See Details.
#' @param output.list Applicable if wsize is set. Whether to output a list of
#' data.tables where each contains matches for one window, or a single
#' data.table with full-length sequences. Defaults to FALSE.
#' @param random.out If NULL (default): process and output all matches. If set to
#' value <0, 1>: matches in each window are grouped according to prefix-suffix
#' combinations and a fraction of each group is kept at random. Number of
#' retained matches in a group is determined as (size of group * random.out),
#' but never less than 1.
#' @param lookup Optional. Output of function LookupTable. If not supplied, the
#' function makes lookup table automatically for each run.
#'
#' @return Single three-column data.table, or a list of three-column data.tables:
#' \itemize{
#' \item{sequence} : {sequence which matches bendability profile of query/query window}
#' \item{deltabend} : {cummulative absolute difference between bendability coefficients of match and query/query window}
#' \item{strdist} : {generalized Levenshtein distance between match and query/query window}
#' }
#' @details Higher tolerance values are not recommended in combination with higher k.
#'
#' If parameter \strong{wsize} is set, query sequence will be split into chunks for processing.
#' That can speed up the execution and enable exporting results as a list, which is
#' useful in case a run with default parameters failed or took too long to finish.
#' Chunks are of length wsize, with a two-nucleotide overlap beetween consecutive
#' ones. Chosen value must satisfy the conditions \emph{wsize=(n*k)+2} and
#' \emph{length(query)>=2*wsize}. The downside is that last <wsize nucleotides of
#' query sequence will not be processed.
#'
#' Setting \strong{output.list} to FALSE increases the runtime and memory consumption,
#' especially for longer sequences and higher tolerance values respectively . If the
#' execution failed with default parameters, try setting output.list to TRUE, or processing
#' your sequence in chunks.
#'
#' Setting \strong{random.out} will increase speed in case output.list=FALSE. Recommended
#' if a random subset of matches is enough for your application or a run with random.out=NULL
#' already failed.
#'
#' \strong{Lookup} table depends only on parameters scale and k. If you plan to apply the
#' function on multiple sequences without changing those two parameters, providing
#' lookup as parameter speeds up the process because the table isn't created anew
#' for each run.
#' @export
#'
#' @examples
#' MatchBendability("TGATTCCTAAAGTCA", "con", k=1, tolerance=0.5)
#' MatchBendability("TGATTCCTAAAGTCA", "con", k=3, tolerance=0.1)
MatchBendability <- function(query, scale, k, tolerance, wsize=NULL, output.list=F, random.out=NULL, lookup=NULL) {
# get lookup table, if it isn't already provided:
if(is.null(lookup)) lookup <- LookupTable(scale, k, sequence.out=F)
# additional column with bendabilities (a sad artefact needed for data.table merging, which I will try to fix):
lookup <- lookup[, bend:=Lbend]
# if a window has more than size.cutoff matches, group them by prefix-suffix combination and keep a fraction
# of each group at random (size of fraction is determined by parameter frac). recommended if a random subset
# of matches is enough for your application, and/or a very large number of matches is expected (happens with
# long sequences, big k, big tolerance):
PickRandomly <- function(dt, random.out) {
dt <- dt[, .SD[sample(.N, max(1, round(.N*random.out)))], by=list(pref, suff)][, c(3:5, 1:2)]
return(dt)
}
if(is.null(wsize)){
# if wsize is not defined, query sequence is processed as a whole:
out <- Bendability(query, scale, k, tolerance, lookup)
if(!is.null(random.out)) out <- PickRandomly(out, random.out)
out <- out[, c(1,2,3)]
} else {
# if wsize is defined, query is split into windows of size wsize. function Bendability is applied to each window:
querywindows <- as.data.table(stringr::str_sub(query, seq(1, stringr::str_length(query)-wsize+1, wsize-2), seq(wsize, stringr::str_length(query), wsize-2)))
l <- apply(querywindows, 1, Bendability, scale, k, tolerance, lookup)
# resulting list is filtered to leave only entries which have a predecessor and a successor in the preceding and
# next windows, respectively:
filtfw <- sapply(seq(1, length(l)-1), function(x) which(l[[x]]$suff %in% l[[x+1]]$pref), simplify=F)
filtfw[[length(l)]] <- seq(nrow(l[[length(l)]]))
filtrev <- sapply(seq(length(l), 2), function(x) which(l[[x]]$pref %in% l[[x-1]]$suff), simplify=F)
filtrev[[length(l)]] <- seq(nrow(l[[1]]))
filtrev <- rev(filtrev)
filtall <- mapply(intersect, filtfw, filtrev)
filtered <- sapply(seq(length(l)), function(x) l[[x]][c(filtall[[x]]),], simplify = F)
if(!is.null(random.out)) filtered <- lapply(filtered, PickRandomly, random.out)
# output can be a list of data.tables, each data.table representing all matches for one window. if full sequences
# are required, the thing has to be iteratively merged, which will increase the runtime:
if(output.list==T) { out <- lapply(filtered, function(x) x[, c(1:3)])
} else {
x <- rbindlist(filtered, idcol=T)
setnames(x, ".id", "pos")
iter <- floor(log2(max(x$pos)))
# consecutive windows which overlap (suffix of one matches the prefix of next) are merged into full sequences.
# it's done iteratively by merging pairs of neighbouring sequences, until the whole thing is reconstructed:
for(i in 1:iter) {
# first, split matches according to odd or even position:
xo <- x[pos%%2==1]
xe <- x[pos%%2==0][, pos:=(pos-1)]
# (what to do if there's an odd number of groups):
if(max(xo$pos)>max(xe$pos)) {
lasteven <- xe[pos==max(pos), ][xo[pos==max(pos), ], on=list(suff=pref), allow.cartesian=T, nomatch=0
][, ':='(sequence=paste0(sequence, stringr::str_sub(i.sequence,start=3)), deltabend=(deltabend+i.deltabend), strdist=(strdist+i.strdist), pref=pref, suff=i.suff)
][, list(pos, sequence, deltabend, strdist, pref, suff)]
xo <- xo[!(pos==max(pos)), ]
xe <- rbind(xe[!(pos==max(pos)), ], lasteven)
}
# now merge pairs (first group with second, third with fourth etc.):
x <- xo[xe, on=list(pos=pos, suff=pref), allow.cartesian=T, nomatch=0
][, ':='(sequence=paste0(sequence, stringr::str_sub(i.sequence,start=3)), deltabend=(deltabend+i.deltabend), strdist=(strdist+i.strdist), pref=pref, suff=i.suff)
][, pos:=.GRP, by="pos"
][, list(pos, sequence, deltabend, strdist, pref, suff)]
}
out <- x[, list(sequence, deltabend, strdist)]
}
}
return(out)
}
utils::globalVariables(c("pref", "i.sequence", "deltabend", "i.deltabend", "strdist", "i.strdist", "suff", "i.suff"))
| /bendDNA_0.4.0.tar/bendDNA_0.4.0/bendDNA/R/MatchBendability.R | no_license | Michielc123/BIT11-Traineeship | R | false | false | 7,739 | r | #' Find sequences with similar bendability profiles
#'
#' Calculates bendability profile of query sequence. Finds all other sequences
#' with same or similar bendability profile.
#'
#' @param query DNAString or a character string.
#' @param scale One from "con", "conrigid", "dnase", "dnaserigid", "nuc", "nucrigid".
#' @param k Number of consecutive trinucleotides for which to calculate the average
#' bendability coefficient.
#' @param tolerance Determines size of interval for matching. All k-mers whose
#' average bendability coefficient falls into interval <query k-mer bendability
#' +/- tolerance> are matched with query.
#' @param wsize Window size for calculations. Should be (n*k)+2. See Details.
#' @param output.list Applicable if wsize is set. Whether to output a list of
#' data.tables where each contains matches for one window, or a single
#' data.table with full-length sequences. Defaults to FALSE.
#' @param random.out If NULL (default): process and output all matches. If set to
#' value <0, 1>: matches in each window are grouped according to prefix-suffix
#' combinations and a fraction of each group is kept at random. Number of
#' retained matches in a group is determined as (size of group * random.out),
#' but never less than 1.
#' @param lookup Optional. Output of function LookupTable. If not supplied, the
#' function makes lookup table automatically for each run.
#'
#' @return Single three-column data.table, or a list of three-column data.tables:
#' \itemize{
#' \item{sequence} : {sequence which matches bendability profile of query/query window}
#' \item{deltabend} : {cummulative absolute difference between bendability coefficients of match and query/query window}
#' \item{strdist} : {generalized Levenshtein distance between match and query/query window}
#' }
#' @details Higher tolerance values are not recommended in combination with higher k.
#'
#' If parameter \strong{wsize} is set, query sequence will be split into chunks for processing.
#' That can speed up the execution and enable exporting results as a list, which is
#' useful in case a run with default parameters failed or took too long to finish.
#' Chunks are of length wsize, with a two-nucleotide overlap beetween consecutive
#' ones. Chosen value must satisfy the conditions \emph{wsize=(n*k)+2} and
#' \emph{length(query)>=2*wsize}. The downside is that last <wsize nucleotides of
#' query sequence will not be processed.
#'
#' Setting \strong{output.list} to FALSE increases the runtime and memory consumption,
#' especially for longer sequences and higher tolerance values respectively . If the
#' execution failed with default parameters, try setting output.list to TRUE, or processing
#' your sequence in chunks.
#'
#' Setting \strong{random.out} will increase speed in case output.list=FALSE. Recommended
#' if a random subset of matches is enough for your application or a run with random.out=NULL
#' already failed.
#'
#' \strong{Lookup} table depends only on parameters scale and k. If you plan to apply the
#' function on multiple sequences without changing those two parameters, providing
#' lookup as parameter speeds up the process because the table isn't created anew
#' for each run.
#' @export
#'
#' @examples
#' MatchBendability("TGATTCCTAAAGTCA", "con", k=1, tolerance=0.5)
#' MatchBendability("TGATTCCTAAAGTCA", "con", k=3, tolerance=0.1)
MatchBendability <- function(query, scale, k, tolerance, wsize=NULL, output.list=F, random.out=NULL, lookup=NULL) {
# get lookup table, if it isn't already provided:
if(is.null(lookup)) lookup <- LookupTable(scale, k, sequence.out=F)
# additional column with bendabilities (a sad artefact needed for data.table merging, which I will try to fix):
lookup <- lookup[, bend:=Lbend]
# if a window has more than size.cutoff matches, group them by prefix-suffix combination and keep a fraction
# of each group at random (size of fraction is determined by parameter frac). recommended if a random subset
# of matches is enough for your application, and/or a very large number of matches is expected (happens with
# long sequences, big k, big tolerance):
PickRandomly <- function(dt, random.out) {
dt <- dt[, .SD[sample(.N, max(1, round(.N*random.out)))], by=list(pref, suff)][, c(3:5, 1:2)]
return(dt)
}
if(is.null(wsize)){
# if wsize is not defined, query sequence is processed as a whole:
out <- Bendability(query, scale, k, tolerance, lookup)
if(!is.null(random.out)) out <- PickRandomly(out, random.out)
out <- out[, c(1,2,3)]
} else {
# if wsize is defined, query is split into windows of size wsize. function Bendability is applied to each window:
querywindows <- as.data.table(stringr::str_sub(query, seq(1, stringr::str_length(query)-wsize+1, wsize-2), seq(wsize, stringr::str_length(query), wsize-2)))
l <- apply(querywindows, 1, Bendability, scale, k, tolerance, lookup)
# resulting list is filtered to leave only entries which have a predecessor and a successor in the preceding and
# next windows, respectively:
filtfw <- sapply(seq(1, length(l)-1), function(x) which(l[[x]]$suff %in% l[[x+1]]$pref), simplify=F)
filtfw[[length(l)]] <- seq(nrow(l[[length(l)]]))
filtrev <- sapply(seq(length(l), 2), function(x) which(l[[x]]$pref %in% l[[x-1]]$suff), simplify=F)
filtrev[[length(l)]] <- seq(nrow(l[[1]]))
filtrev <- rev(filtrev)
filtall <- mapply(intersect, filtfw, filtrev)
filtered <- sapply(seq(length(l)), function(x) l[[x]][c(filtall[[x]]),], simplify = F)
if(!is.null(random.out)) filtered <- lapply(filtered, PickRandomly, random.out)
# output can be a list of data.tables, each data.table representing all matches for one window. if full sequences
# are required, the thing has to be iteratively merged, which will increase the runtime:
if(output.list==T) { out <- lapply(filtered, function(x) x[, c(1:3)])
} else {
x <- rbindlist(filtered, idcol=T)
setnames(x, ".id", "pos")
iter <- floor(log2(max(x$pos)))
# consecutive windows which overlap (suffix of one matches the prefix of next) are merged into full sequences.
# it's done iteratively by merging pairs of neighbouring sequences, until the whole thing is reconstructed:
for(i in 1:iter) {
# first, split matches according to odd or even position:
xo <- x[pos%%2==1]
xe <- x[pos%%2==0][, pos:=(pos-1)]
# (what to do if there's an odd number of groups):
if(max(xo$pos)>max(xe$pos)) {
lasteven <- xe[pos==max(pos), ][xo[pos==max(pos), ], on=list(suff=pref), allow.cartesian=T, nomatch=0
][, ':='(sequence=paste0(sequence, stringr::str_sub(i.sequence,start=3)), deltabend=(deltabend+i.deltabend), strdist=(strdist+i.strdist), pref=pref, suff=i.suff)
][, list(pos, sequence, deltabend, strdist, pref, suff)]
xo <- xo[!(pos==max(pos)), ]
xe <- rbind(xe[!(pos==max(pos)), ], lasteven)
}
# now merge pairs (first group with second, third with fourth etc.):
x <- xo[xe, on=list(pos=pos, suff=pref), allow.cartesian=T, nomatch=0
][, ':='(sequence=paste0(sequence, stringr::str_sub(i.sequence,start=3)), deltabend=(deltabend+i.deltabend), strdist=(strdist+i.strdist), pref=pref, suff=i.suff)
][, pos:=.GRP, by="pos"
][, list(pos, sequence, deltabend, strdist, pref, suff)]
}
out <- x[, list(sequence, deltabend, strdist)]
}
}
return(out)
}
utils::globalVariables(c("pref", "i.sequence", "deltabend", "i.deltabend", "strdist", "i.strdist", "suff", "i.suff"))
|
### Statistical Learning Final Project
library(tidyverse)
#Goal 1: Use unsupervised learning to explore the data.
# eg. Dendrogram, Kmeans, Heirarchical
clinical_spectrum <- read_csv('../clinical_spectrum/clinical-spectrum.csv')
head(clinical_spectrum)
clinical_spectrum_filtered <- clinical_spectrum[,c(1:39)]
clinical_spectrum_filtered <- clinical_spectrum_filtered[,-c(21, 28)]
clinical_spectrum_filtered[,c('influenza_a')] %>%
na.omit() %>%
mutate(status = ifelse(influenza_a == 'not_detected', 0, 1)) %>%
summarize(prop = mean(status))
clinical_spectrum_filtered %>%
filter(sars_cov_2_exam_result == 'positive') %>%
summarize(prop_positive= n() / nrow(clinical_spectrum_filtered))
clinical_spectrum_clean <- clinical_spectrum_filtered %>%
na.omit()
clinical_spectrum_numbers <- clinical_spectrum_clean[,c(7:20)]
clinical_spectrum_clean %>%
filter(sars_cov_2_exam_result == 'positive')
#scale the clinical spectrum values; this data frame only includes the common objective reported stats
cs_numbers_scaled <- scale(clinical_spectrum_numbers)
#calculate a distance matrix
cs_dist <- dist(cs_numbers_scaled)
hc1 <- hclust(cs_dist)
plot(hc1, cex = .5)
#let's try to clean up the output
install.packages('dendextend')
install.packages('circlize')
library(dendextend)
library(circlize)
#display dendrogram with the positive test results underneath the clusters
hc1 %>%
as.dendrogram() %>%
place_labels(clinical_spectrum_clean$sars_cov_2_exam_result) %>%
set('labels_cex', .3) %>%
color_branches(k = 4) %>%
color_unique_labels() %>%
plot()
#ok let's try to reduce our variables using PCA
pca1 <- prcomp(clinical_spectrum_numbers,
scale. = TRUE)
#Let's make a plot predicting test result from the other values using PCA
install.packages('pls')
library(pls)
validationplot(pca1)
#first append the test results back onto the numbers data frame
cs_numbers_results <- cbind(clinical_spectrum_numbers, clinical_spectrum_clean$sars_cov_2_exam_result)
cs_for_pca <- cs_numbers_results %>%
mutate(test_result = ifelse(`clinical_spectrum_clean$sars_cov_2_exam_result` == 'negative', 0, 1))
cs_for_pca <- cs_for_pca[,-c(15)]
pca2 <- pcr(test_result ~ .,
data = cs_for_pca,
scale = TRUE)
validationplot(pca2)
biplot(pca1)
km1 <- kmeans(cs_numbers_scaled,
3)
view(cs_numbers_scaled)
cs_numbers_scaled %>%
as.data.frame() %>%
mutate(cluster = km1$cluster) %>%
ggplot(aes(x = platelets,
y = hemoglobin)) +
geom_point(aes(color = factor(cluster)))
km2 <- kmeans(cs_for_pca,
3)
cs_for_pca %>%
as.data.frame() %>%
mutate(cluster = km2$cluster) %>%
ggplot(aes(x = platelets,
y = hemoglobin)) +
geom_point(aes(color = factor(cluster))) +
geom_point(aes(color = factor(test_result)))
cs_for_pca %>%
mutate(cluster = km2$cluster) %>%
group_by(cluster) %>%
summarize(proportion_positive = mean(test_result),
cluster_size = n(),
mean_hematocrit = mean(hematocrit),
mean_hemoglobin = mean(hemoglobin))
tot <- NULL
for(i in 1:50){
km <- kmeans(cs_for_pca,
i)
tot[i] <- km$tot.withinss/i
}
plot(tot)
km3 <- kmeans(cs_for_pca,
30)
likely_positive_comparison <- cs_for_pca %>%
mutate(cluster = km3$cluster) %>%
group_by(cluster) %>%
summarize(proportion_positive = mean(test_result),
cluster_size = n(),
mean_hematocrit = mean(hematocrit),
mean_hemoglobin = mean(hemoglobin),
mean_platelets = mean(platelets),
mean_platelet_volume = mean(mean_platelet_volume),
mean_rbc = mean(red_blood_cells),
mean_lymphocytes = mean(lymphocytes),
mean_corpuscular_hemoglobin_concentration_mchc = mean(mean_corpuscular_hemoglobin_concentration_mchc),
mean_leukocytes = mean(leukocytes),
mean_basophils = mean(basophils),
mean_corpuscular_hemoglobin_mch = mean(mean_corpuscular_hemoglobin_mch),
mean_eosinophils = mean(eosinophils),
mean_mcv = mean(mean_corpuscular_volume_mcv),
mean_monocytes = mean(monocytes),
mean_rdw = mean(red_blood_cell_distribution_width_rdw)) %>%
arrange(-proportion_positive) %>%
mutate(likely_positive = ifelse(proportion_positive>= 0.25, 1, 0)) %>%
group_by(likely_positive) %>%
select(-c(cluster, cluster_size)) %>%
summarize_all(mean)
view(likely_positive_comparison)
gathered_likely_positive_comparison <- likely_positive_comparison %>%
gather(key = 'likely_positive',
value = 'blood_comparison',
-likely_positive) %>%
mutate(category = ifelse(row_number()%%2 == 0, 1, 0))
gathered_likely_positive_comparison %>%
ggplot(aes(x = likely_positive,
y = factor(blood_comparison))) +
geom_bar(stat = 'identity',
position = 'dodge',
aes(fill = category))
gathered_likely_positive_comparison %>%
ggplot(aes(x = likely_positive,
y = blood_comparison,
fill = factor(category))) +
geom_bar(stat = 'identity',
position = 'dodge') +
coord_flip()
## Let's try some different models to try to predict positive test results
#first a caret random forest
library(caret)
library(randomForest)
library(ipred)
#cleaning data before implementing random forest
clinical_spectrum_clean_rf <- clinical_spectrum_clean[,-c(1,4:6)]
clinical_spectrum_clean_rf <- clinical_spectrum_clean_rf %>%
mutate(exam_result = ifelse(exam_result == 'negative', 0, 1))
clinical_spectrum_clean_rf <- clinical_spectrum_clean_rf[,-c(2)]
#test train split
rf_indexes <- sample(nrow(clinical_spectrum_clean_rf),
.7*nrow(clinical_spectrum_clean_rf),
replace = FALSE)
rf_indexes
rf_train = clinical_spectrum_clean_rf[rf_indexes,]
rf_test = clinical_spectrum_clean_rf[-rf_indexes,]
rf1 <- randomForest(factor(exam_result) ~ .,
data = rf_train)
bag1 <- bagging(exam_result ~ .,
data = clinical_spectrum_clean_rf)
varImpPlot(rf1)
varImpPlot
?train
tune.grid = expand.grid(mtry = c(9, 12, 15),
splitrule = c('gini','extratrees'),
min.node.size = 1)
ranger1 <- train(factor(exam_result) ~ .,
data = rf_train,
method = "ranger",
tuneGrid = tune.grid)
ranger1
#create a confusion matrix
predict(ranger1, rf_test)
table(predict(ranger1, rf_test), rf_test$exam_result)
?table
plot(ranger1)
| /StatisticalLearning/COVID/clinical_spectrum.R | no_license | wconrad9/r | R | false | false | 6,582 | r | ### Statistical Learning Final Project
library(tidyverse)
#Goal 1: Use unsupervised learning to explore the data.
# eg. Dendrogram, Kmeans, Heirarchical
clinical_spectrum <- read_csv('../clinical_spectrum/clinical-spectrum.csv')
head(clinical_spectrum)
clinical_spectrum_filtered <- clinical_spectrum[,c(1:39)]
clinical_spectrum_filtered <- clinical_spectrum_filtered[,-c(21, 28)]
clinical_spectrum_filtered[,c('influenza_a')] %>%
na.omit() %>%
mutate(status = ifelse(influenza_a == 'not_detected', 0, 1)) %>%
summarize(prop = mean(status))
clinical_spectrum_filtered %>%
filter(sars_cov_2_exam_result == 'positive') %>%
summarize(prop_positive= n() / nrow(clinical_spectrum_filtered))
clinical_spectrum_clean <- clinical_spectrum_filtered %>%
na.omit()
clinical_spectrum_numbers <- clinical_spectrum_clean[,c(7:20)]
clinical_spectrum_clean %>%
filter(sars_cov_2_exam_result == 'positive')
#scale the clinical spectrum values; this data frame only includes the common objective reported stats
cs_numbers_scaled <- scale(clinical_spectrum_numbers)
#calculate a distance matrix
cs_dist <- dist(cs_numbers_scaled)
hc1 <- hclust(cs_dist)
plot(hc1, cex = .5)
#let's try to clean up the output
install.packages('dendextend')
install.packages('circlize')
library(dendextend)
library(circlize)
#display dendrogram with the positive test results underneath the clusters
hc1 %>%
as.dendrogram() %>%
place_labels(clinical_spectrum_clean$sars_cov_2_exam_result) %>%
set('labels_cex', .3) %>%
color_branches(k = 4) %>%
color_unique_labels() %>%
plot()
#ok let's try to reduce our variables using PCA
pca1 <- prcomp(clinical_spectrum_numbers,
scale. = TRUE)
#Let's make a plot predicting test result from the other values using PCA
install.packages('pls')
library(pls)
validationplot(pca1)
#first append the test results back onto the numbers data frame
cs_numbers_results <- cbind(clinical_spectrum_numbers, clinical_spectrum_clean$sars_cov_2_exam_result)
cs_for_pca <- cs_numbers_results %>%
mutate(test_result = ifelse(`clinical_spectrum_clean$sars_cov_2_exam_result` == 'negative', 0, 1))
cs_for_pca <- cs_for_pca[,-c(15)]
pca2 <- pcr(test_result ~ .,
data = cs_for_pca,
scale = TRUE)
validationplot(pca2)
biplot(pca1)
km1 <- kmeans(cs_numbers_scaled,
3)
view(cs_numbers_scaled)
cs_numbers_scaled %>%
as.data.frame() %>%
mutate(cluster = km1$cluster) %>%
ggplot(aes(x = platelets,
y = hemoglobin)) +
geom_point(aes(color = factor(cluster)))
km2 <- kmeans(cs_for_pca,
3)
cs_for_pca %>%
as.data.frame() %>%
mutate(cluster = km2$cluster) %>%
ggplot(aes(x = platelets,
y = hemoglobin)) +
geom_point(aes(color = factor(cluster))) +
geom_point(aes(color = factor(test_result)))
cs_for_pca %>%
mutate(cluster = km2$cluster) %>%
group_by(cluster) %>%
summarize(proportion_positive = mean(test_result),
cluster_size = n(),
mean_hematocrit = mean(hematocrit),
mean_hemoglobin = mean(hemoglobin))
tot <- NULL
for(i in 1:50){
km <- kmeans(cs_for_pca,
i)
tot[i] <- km$tot.withinss/i
}
plot(tot)
km3 <- kmeans(cs_for_pca,
30)
likely_positive_comparison <- cs_for_pca %>%
mutate(cluster = km3$cluster) %>%
group_by(cluster) %>%
summarize(proportion_positive = mean(test_result),
cluster_size = n(),
mean_hematocrit = mean(hematocrit),
mean_hemoglobin = mean(hemoglobin),
mean_platelets = mean(platelets),
mean_platelet_volume = mean(mean_platelet_volume),
mean_rbc = mean(red_blood_cells),
mean_lymphocytes = mean(lymphocytes),
mean_corpuscular_hemoglobin_concentration_mchc = mean(mean_corpuscular_hemoglobin_concentration_mchc),
mean_leukocytes = mean(leukocytes),
mean_basophils = mean(basophils),
mean_corpuscular_hemoglobin_mch = mean(mean_corpuscular_hemoglobin_mch),
mean_eosinophils = mean(eosinophils),
mean_mcv = mean(mean_corpuscular_volume_mcv),
mean_monocytes = mean(monocytes),
mean_rdw = mean(red_blood_cell_distribution_width_rdw)) %>%
arrange(-proportion_positive) %>%
mutate(likely_positive = ifelse(proportion_positive>= 0.25, 1, 0)) %>%
group_by(likely_positive) %>%
select(-c(cluster, cluster_size)) %>%
summarize_all(mean)
view(likely_positive_comparison)
gathered_likely_positive_comparison <- likely_positive_comparison %>%
gather(key = 'likely_positive',
value = 'blood_comparison',
-likely_positive) %>%
mutate(category = ifelse(row_number()%%2 == 0, 1, 0))
gathered_likely_positive_comparison %>%
ggplot(aes(x = likely_positive,
y = factor(blood_comparison))) +
geom_bar(stat = 'identity',
position = 'dodge',
aes(fill = category))
gathered_likely_positive_comparison %>%
ggplot(aes(x = likely_positive,
y = blood_comparison,
fill = factor(category))) +
geom_bar(stat = 'identity',
position = 'dodge') +
coord_flip()
## Let's try some different models to try to predict positive test results
#first a caret random forest
library(caret)
library(randomForest)
library(ipred)
#cleaning data before implementing random forest
clinical_spectrum_clean_rf <- clinical_spectrum_clean[,-c(1,4:6)]
clinical_spectrum_clean_rf <- clinical_spectrum_clean_rf %>%
mutate(exam_result = ifelse(exam_result == 'negative', 0, 1))
clinical_spectrum_clean_rf <- clinical_spectrum_clean_rf[,-c(2)]
#test train split
rf_indexes <- sample(nrow(clinical_spectrum_clean_rf),
.7*nrow(clinical_spectrum_clean_rf),
replace = FALSE)
rf_indexes
rf_train = clinical_spectrum_clean_rf[rf_indexes,]
rf_test = clinical_spectrum_clean_rf[-rf_indexes,]
rf1 <- randomForest(factor(exam_result) ~ .,
data = rf_train)
bag1 <- bagging(exam_result ~ .,
data = clinical_spectrum_clean_rf)
varImpPlot(rf1)
varImpPlot
?train
tune.grid = expand.grid(mtry = c(9, 12, 15),
splitrule = c('gini','extratrees'),
min.node.size = 1)
ranger1 <- train(factor(exam_result) ~ .,
data = rf_train,
method = "ranger",
tuneGrid = tune.grid)
ranger1
#create a confusion matrix
predict(ranger1, rf_test)
table(predict(ranger1, rf_test), rf_test$exam_result)
?table
plot(ranger1)
|
library(tidyverse)
# Plot the function as an interactive plot with pretty labels
# n_min = lowest sample size
# n_max = maximum sample size
# prop = proposed sample size
# ci_p = confidence interval precision (defaults to 95%)
plot_se_r<-function(data, r, n_min, n_max, prop, ci_p=.95, opt, thresh){
# Solves for the critical p used to create the confidence intervals -------
crit_p<-1-((1-ci_p)/2)
# Using the simulated data, plots the standard error curve and applies the labels
p<-data%>%
ggplot(aes(x = n, y = se_r, text = text_se, group = 1))+
geom_line()+
labs(x = "Number of Observations",
y = "SE of Correlation",
title = paste("Standard Error of the Correlation when r = ", r, "\n"))+
geom_vline(xintercept = prop, lty = 3)+
geom_text(label = paste0("Proposed Sample Size = ", prop),
x = .8*n_max ,
y = se_r(r, 3)*.9,
hjust = "left")+
geom_text(label = paste0("Standard Error: ", round(se_r(r, prop),2)),
x = .8*n_max ,
y = se_r(r, 3)*.85,
hjust = "left")+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line.x = element_line(),
axis.line.y = element_line())
if(opt){
p<-p+
geom_point(data = NULL, aes(x = thresh,
y = se_r(r, thresh),
text = data$text_se[data$n == thresh]))
}
# Makes plot interactive and uses the hover labels
plotly::ggplotly(p, tooltip = c("text"))
}
| /plot_se.R | no_license | jimmyrigby94/cor_se | R | false | false | 1,586 | r | library(tidyverse)
# Plot the function as an interactive plot with pretty labels
# n_min = lowest sample size
# n_max = maximum sample size
# prop = proposed sample size
# ci_p = confidence interval precision (defaults to 95%)
plot_se_r<-function(data, r, n_min, n_max, prop, ci_p=.95, opt, thresh){
# Solves for the critical p used to create the confidence intervals -------
crit_p<-1-((1-ci_p)/2)
# Using the simulated data, plots the standard error curve and applies the labels
p<-data%>%
ggplot(aes(x = n, y = se_r, text = text_se, group = 1))+
geom_line()+
labs(x = "Number of Observations",
y = "SE of Correlation",
title = paste("Standard Error of the Correlation when r = ", r, "\n"))+
geom_vline(xintercept = prop, lty = 3)+
geom_text(label = paste0("Proposed Sample Size = ", prop),
x = .8*n_max ,
y = se_r(r, 3)*.9,
hjust = "left")+
geom_text(label = paste0("Standard Error: ", round(se_r(r, prop),2)),
x = .8*n_max ,
y = se_r(r, 3)*.85,
hjust = "left")+
theme(panel.grid = element_blank(),
panel.background = element_blank(),
axis.line.x = element_line(),
axis.line.y = element_line())
if(opt){
p<-p+
geom_point(data = NULL, aes(x = thresh,
y = se_r(r, thresh),
text = data$text_se[data$n == thresh]))
}
# Makes plot interactive and uses the hover labels
plotly::ggplotly(p, tooltip = c("text"))
}
|
# following are analyses on samples for which either IgA-seq was performed and both IgA-bound and IgA-unbound bacterial fractions were profiled, or for samples in which autologous paired serum and stool were used and IgG-bound and IgG-unbound bacterial fractions were profiled.
library(labdsv);library(ggplot2);library(lme4);library(lmerTest);library(plyr);library(stringr);library(RColorBrewer);library(Hmisc);library(viridis);library(vegan);library(phyloseq);library(superheat)
library(ggbeeswarm);library(ape);library(reshape2);library(splitstackshape)
# read in metadata mapping file, with rows as sample IDs:
map<-read.table(file="~/Dropbox/post-doc/HIV/bacFACS/170124 seq of BM6-9_Trinch/170216_HIVBM1-9_map_forR.txt",sep="\t",header=T,row.names=1)
# read in dada2-processed, rarefied ASV table, with columns as sample IDs:
tab1<-read.table("~/Dropbox/post-doc/HIV/bacFACS/validation_experiments/180712_MBM1-4_16s_data/seqtab_mc005p_rar23k.txt",sep="\t",header=T,row.names=1)
# filter out ASVs with read abundance <0.01% of total:
tab2<-tab1[rowSums(tab1)>sum(tab1)*0.0001,]
# filter out samples in mapping file not present in ASV table:
map2<-map[rownames(map)%in%colnames(tab2),]
#filter out samples missing either a negative fraction (Ig-unbound) or positive (Ig-bound) fraction; 'expmouse' refers column in metadata that defines ID of the experimental subject, 'negpos' refers to whether sample is negative or positive fraction:
sampsw2sum1<-table(map2$negpos,map2$expmouse)
sampsw2sum<-colSums(sampsw2sum1[c("neg","pos"),])
sampsw2<-names(sampsw2sum [sampsw2sum>1])
map3<-map2[map2$expmouse%in%sampsw2,]
# add pseudocount of 1:
tab<-(tab2+1)
# define which immunoglobulin class to query, if both IgA-seq and IgG-seq was performed, with column named 'Ig' in this case presenting this information and in this case selecting samples for IgG:
map4igg<-subset(map3, Ig=="IgG")
# create Ig score matrix:
ICmat<-matrix(nrow=nrow(tab2),ncol=unique(map4igg$expmouse))
colnames(ICmat)<-unique(map4igg$expmouse)
rownames(ICmat)<-rownames(tab2)
for(i in 1:length(unique(map4igg$expmouse)))
{
subjID<-unique(map4igg$expmouse)[i]
mapsub<-subset(map4igg, expmouse ==subjID)
possamp<-rownames(subset(mapsub, negpospre =="pos"))
negsamp<-rownames(subset(mapsub, negpospre =="neg"))
#remove ASVs that were not detected in either pos or neg fractions by forcing "NaN" at log transformation calculation:
zeroesinpos<-(tab[,possamp]==1)
zeroesinneg<-(tab[,negsamp]==1)
bothzeroes<-zeroesinpos+ zeroesinneg
bothzeropositions<-(bothzeroes==2)
tab[bothzeropositions,possamp]<-0
tab[bothzeropositions,negsamp]<-0
# calculate log ratios for all taxa for the given pair of pos and neg samples:
ICmat[,i]<-log(tab[,possamp],10)-log(tab[,negsamp],10)
}
| /IgA or IgG-seq on paired samples.R | no_license | ivanvujkc/IgG-seq_translocators | R | false | false | 2,773 | r | # following are analyses on samples for which either IgA-seq was performed and both IgA-bound and IgA-unbound bacterial fractions were profiled, or for samples in which autologous paired serum and stool were used and IgG-bound and IgG-unbound bacterial fractions were profiled.
library(labdsv);library(ggplot2);library(lme4);library(lmerTest);library(plyr);library(stringr);library(RColorBrewer);library(Hmisc);library(viridis);library(vegan);library(phyloseq);library(superheat)
library(ggbeeswarm);library(ape);library(reshape2);library(splitstackshape)
# read in metadata mapping file, with rows as sample IDs:
map<-read.table(file="~/Dropbox/post-doc/HIV/bacFACS/170124 seq of BM6-9_Trinch/170216_HIVBM1-9_map_forR.txt",sep="\t",header=T,row.names=1)
# read in dada2-processed, rarefied ASV table, with columns as sample IDs:
tab1<-read.table("~/Dropbox/post-doc/HIV/bacFACS/validation_experiments/180712_MBM1-4_16s_data/seqtab_mc005p_rar23k.txt",sep="\t",header=T,row.names=1)
# filter out ASVs with read abundance <0.01% of total:
tab2<-tab1[rowSums(tab1)>sum(tab1)*0.0001,]
# filter out samples in mapping file not present in ASV table:
map2<-map[rownames(map)%in%colnames(tab2),]
#filter out samples missing either a negative fraction (Ig-unbound) or positive (Ig-bound) fraction; 'expmouse' refers column in metadata that defines ID of the experimental subject, 'negpos' refers to whether sample is negative or positive fraction:
sampsw2sum1<-table(map2$negpos,map2$expmouse)
sampsw2sum<-colSums(sampsw2sum1[c("neg","pos"),])
sampsw2<-names(sampsw2sum [sampsw2sum>1])
map3<-map2[map2$expmouse%in%sampsw2,]
# add pseudocount of 1:
tab<-(tab2+1)
# define which immunoglobulin class to query, if both IgA-seq and IgG-seq was performed, with column named 'Ig' in this case presenting this information and in this case selecting samples for IgG:
map4igg<-subset(map3, Ig=="IgG")
# create Ig score matrix:
ICmat<-matrix(nrow=nrow(tab2),ncol=unique(map4igg$expmouse))
colnames(ICmat)<-unique(map4igg$expmouse)
rownames(ICmat)<-rownames(tab2)
for(i in 1:length(unique(map4igg$expmouse)))
{
subjID<-unique(map4igg$expmouse)[i]
mapsub<-subset(map4igg, expmouse ==subjID)
possamp<-rownames(subset(mapsub, negpospre =="pos"))
negsamp<-rownames(subset(mapsub, negpospre =="neg"))
#remove ASVs that were not detected in either pos or neg fractions by forcing "NaN" at log transformation calculation:
zeroesinpos<-(tab[,possamp]==1)
zeroesinneg<-(tab[,negsamp]==1)
bothzeroes<-zeroesinpos+ zeroesinneg
bothzeropositions<-(bothzeroes==2)
tab[bothzeropositions,possamp]<-0
tab[bothzeropositions,negsamp]<-0
# calculate log ratios for all taxa for the given pair of pos and neg samples:
ICmat[,i]<-log(tab[,possamp],10)-log(tab[,negsamp],10)
}
|
# 21 May 2010: small changes to output when there is just one model. J. Fox
# 15 Aug 2010: changed name of function to compareCoefs to avoid name clash. J. Fox
# 18 May 2011: check for 'mer' objects, and handle them correctly. S. Weisberg
# 8 Sep 2011: check for 'lme' objects, and handle them correctly. S. Weisberg
# 11 Jan 2012: fix to work with any 'S4' object with a coef() method.
# suggested by David Hugh-Jones University of Warwick http://davidhughjones.googlepages.com
# 3 May 2012: fixed bug if models are less than full rank.
# 17 Sept 2012: suppressing printing calls when there are none. J. Fox
# 22 June 2013: tweaks for lme4. J. Fox
# 26 Sept 2014: cleaned up printing of calls. J. Fox
# 2016-07-20: added test for model classes. J. Fox
compareCoefs <- function (..., se = TRUE, print = TRUE, digits = 3) {
splitExpr <- function(expr, width=getOption("width") - 4, at="[ ,=]"){
if (length(grep("\n", expr)) >0 ){
cmds <- strsplit(expr, "\n")[[1]]
allcmds <- character(length(cmds))
for (i in 1:length(cmds))
allcmds[i] <- splitExpr(cmds[i], width=width, at=at)
return(paste(allcmds, collapse="\n"))
}
if (nchar(expr) <= width) return(expr)
where <- gregexpr(at, expr)[[1]]
if (where[1] < 0) return(expr)
singleQuotes <- gregexpr("'", expr)[[1]]
doubleQuotes <- gregexpr('"', expr)[[1]]
comment <- regexpr("#", expr)
if (singleQuotes[1] > 0 && (singleQuotes[1] < doubleQuotes[1] || doubleQuotes[1] < 0 ) && (singleQuotes[1] < comment[1] || comment[1] < 0 )){
nquotes <- length(singleQuotes)
if (nquotes < 2) stop("unbalanced quotes")
for(i in seq(nquotes/2))
where[(where > singleQuotes[2 * i - 1]) & (where < singleQuotes[2 * i])] <- NA
where <- na.omit(where)
}
else if (doubleQuotes[1] > 0 && (doubleQuotes[1] < singleQuotes[1] || singleQuotes[1] < 0) && (doubleQuotes[1] < comment[1] || comment[1] < 0 )){
nquotes <- length(doubleQuotes)
if (nquotes < 2) stop("unbalanced quotes")
for(i in seq(nquotes/2))
where[(where > doubleQuotes[2 * i - 1]) & (where < doubleQuotes[2 * i])] <- NA
where <- na.omit(where)
}
else if (comment > 0){
where[where > comment] <- NA
where <- na.omit(where)
}
if (length(where) == 0) return(expr)
where2 <- where[where <= width]
where2 <- if (length(where2) == 0) where[1]
else where2[length(where2)]
paste(substr(expr, 1, where2), "\n ",
Recall(substr(expr, where2 + 1, nchar(expr)), width, at), sep="")
}
removeExtraQuotes <- function(string) sub("\\\"$", "", sub("^\\\"", "", string))
squeezeMultipleSpaces <- function(string) gsub(" {2,}", " ", string)
intersection <- function(...){
args <- list(...)
if (length(args) == 2) intersect(args[[1]], args[[2]])
else intersect(args[[1]], do.call(intersection, args[-1]))
}
models <- list(...)
n.models <- length(models)
if (n.models < 1)
return(NULL)
if (n.models > 1){
classes <- lapply(models, class)
common.classes <- do.call(intersection, classes)
if (length(common.classes) == 0)
warning("models to be compared are of different classes")
}
getnames <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer") |
inherits(model, "lme"))
names(fixef(model))
else names(coef(model))
}
getcoef <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer") |
inherits(model, "lme"))
fixef(model)
else coef(model)
}
getcall <- function(model) {
paste(deparse(if (isS4(model)) model@call else model$call), collapse="")
}
getvar <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer"))
as.matrix(vcov(model))
else vcov(model)
}
coef.names <- unique(unlist(lapply(models, getnames)))
table <- matrix(NA, length(coef.names), n.models * (1 + se))
rownames(table) <- coef.names
colnames(table) <- if (se)
if (n.models > 1)
paste(rep(c("Est.", "SE"), n.models), rep(1:n.models,
each = 2))
else c("Estimate", "Std. Error")
else if (n.models > 1)
paste(rep("Est.", n.models), 1:n.models)
else "Estimate"
calls <- !any(sapply(models, getcall) == "NULL")
if (print == TRUE && calls)
cat("\nCall:")
for (i in 1:n.models) {
model <- models[[i]]
fout <- getcall(model)
mod <- if (n.models > 1)
paste(i, ": ", sep = "")
else ""
if (print && calls)
cat(splitExpr(squeezeMultipleSpaces(paste("\n", mod, removeExtraQuotes(fout[1]),
sep = ""))))
if (print && calls && length(fout) > 1)
for (f in fout[-1]) cat("\n", splitExpr(squeezeMultipleSpaces(removeExtraQuotes(f))))
if (se) {
ests <- getcoef(model)
new <- cbind(ests, rep(NA, length(ests)))
new[!is.na(ests), 2] <- sqrt(diag(getvar(model)))
table[getnames(model), 2 * (i - 1) + c(1, 2)] <- new
}
else table[getnames(model), i] <- getcoef(model)
}
if (print == TRUE) {
cat("\n")
printCoefmat(table, na.print = "", digits = digits, tst.ind = NULL)
}
else table
}
| /R/compareCoefs.R | no_license | jonathon-love/car3 | R | false | false | 5,297 | r | # 21 May 2010: small changes to output when there is just one model. J. Fox
# 15 Aug 2010: changed name of function to compareCoefs to avoid name clash. J. Fox
# 18 May 2011: check for 'mer' objects, and handle them correctly. S. Weisberg
# 8 Sep 2011: check for 'lme' objects, and handle them correctly. S. Weisberg
# 11 Jan 2012: fix to work with any 'S4' object with a coef() method.
# suggested by David Hugh-Jones University of Warwick http://davidhughjones.googlepages.com
# 3 May 2012: fixed bug if models are less than full rank.
# 17 Sept 2012: suppressing printing calls when there are none. J. Fox
# 22 June 2013: tweaks for lme4. J. Fox
# 26 Sept 2014: cleaned up printing of calls. J. Fox
# 2016-07-20: added test for model classes. J. Fox
compareCoefs <- function (..., se = TRUE, print = TRUE, digits = 3) {
splitExpr <- function(expr, width=getOption("width") - 4, at="[ ,=]"){
if (length(grep("\n", expr)) >0 ){
cmds <- strsplit(expr, "\n")[[1]]
allcmds <- character(length(cmds))
for (i in 1:length(cmds))
allcmds[i] <- splitExpr(cmds[i], width=width, at=at)
return(paste(allcmds, collapse="\n"))
}
if (nchar(expr) <= width) return(expr)
where <- gregexpr(at, expr)[[1]]
if (where[1] < 0) return(expr)
singleQuotes <- gregexpr("'", expr)[[1]]
doubleQuotes <- gregexpr('"', expr)[[1]]
comment <- regexpr("#", expr)
if (singleQuotes[1] > 0 && (singleQuotes[1] < doubleQuotes[1] || doubleQuotes[1] < 0 ) && (singleQuotes[1] < comment[1] || comment[1] < 0 )){
nquotes <- length(singleQuotes)
if (nquotes < 2) stop("unbalanced quotes")
for(i in seq(nquotes/2))
where[(where > singleQuotes[2 * i - 1]) & (where < singleQuotes[2 * i])] <- NA
where <- na.omit(where)
}
else if (doubleQuotes[1] > 0 && (doubleQuotes[1] < singleQuotes[1] || singleQuotes[1] < 0) && (doubleQuotes[1] < comment[1] || comment[1] < 0 )){
nquotes <- length(doubleQuotes)
if (nquotes < 2) stop("unbalanced quotes")
for(i in seq(nquotes/2))
where[(where > doubleQuotes[2 * i - 1]) & (where < doubleQuotes[2 * i])] <- NA
where <- na.omit(where)
}
else if (comment > 0){
where[where > comment] <- NA
where <- na.omit(where)
}
if (length(where) == 0) return(expr)
where2 <- where[where <= width]
where2 <- if (length(where2) == 0) where[1]
else where2[length(where2)]
paste(substr(expr, 1, where2), "\n ",
Recall(substr(expr, where2 + 1, nchar(expr)), width, at), sep="")
}
removeExtraQuotes <- function(string) sub("\\\"$", "", sub("^\\\"", "", string))
squeezeMultipleSpaces <- function(string) gsub(" {2,}", " ", string)
intersection <- function(...){
args <- list(...)
if (length(args) == 2) intersect(args[[1]], args[[2]])
else intersect(args[[1]], do.call(intersection, args[-1]))
}
models <- list(...)
n.models <- length(models)
if (n.models < 1)
return(NULL)
if (n.models > 1){
classes <- lapply(models, class)
common.classes <- do.call(intersection, classes)
if (length(common.classes) == 0)
warning("models to be compared are of different classes")
}
getnames <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer") |
inherits(model, "lme"))
names(fixef(model))
else names(coef(model))
}
getcoef <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer") |
inherits(model, "lme"))
fixef(model)
else coef(model)
}
getcall <- function(model) {
paste(deparse(if (isS4(model)) model@call else model$call), collapse="")
}
getvar <- function(model) {
if (inherits(model, "merMod") || inherits(model, "mer"))
as.matrix(vcov(model))
else vcov(model)
}
coef.names <- unique(unlist(lapply(models, getnames)))
table <- matrix(NA, length(coef.names), n.models * (1 + se))
rownames(table) <- coef.names
colnames(table) <- if (se)
if (n.models > 1)
paste(rep(c("Est.", "SE"), n.models), rep(1:n.models,
each = 2))
else c("Estimate", "Std. Error")
else if (n.models > 1)
paste(rep("Est.", n.models), 1:n.models)
else "Estimate"
calls <- !any(sapply(models, getcall) == "NULL")
if (print == TRUE && calls)
cat("\nCall:")
for (i in 1:n.models) {
model <- models[[i]]
fout <- getcall(model)
mod <- if (n.models > 1)
paste(i, ": ", sep = "")
else ""
if (print && calls)
cat(splitExpr(squeezeMultipleSpaces(paste("\n", mod, removeExtraQuotes(fout[1]),
sep = ""))))
if (print && calls && length(fout) > 1)
for (f in fout[-1]) cat("\n", splitExpr(squeezeMultipleSpaces(removeExtraQuotes(f))))
if (se) {
ests <- getcoef(model)
new <- cbind(ests, rep(NA, length(ests)))
new[!is.na(ests), 2] <- sqrt(diag(getvar(model)))
table[getnames(model), 2 * (i - 1) + c(1, 2)] <- new
}
else table[getnames(model), i] <- getcoef(model)
}
if (print == TRUE) {
cat("\n")
printCoefmat(table, na.print = "", digits = digits, tst.ind = NULL)
}
else table
}
|
#####################################################################################
#######This script was initially built by CRLandry, JBLeducq and modified by CEberlein
#####################################################################################
###it was further modified by GCharron
##and then edited by JFWolters
#####################################################################################
# reading data and preparing table
#####################################################################################
classes = c("numeric","numeric","character","numeric","factor","numeric","factor","factor")
# Row Col Name Size Media Timepoint Temp Array(D or H)
setwd("./data")
df = read.table("consolidated_data.txt",header=T, colClasses=classes)
##Add a column with "Condition whichis the combination of Media and Temperature
Condition = paste(df$Media, df$Temp, sep = "_")
Well=paste("r",df$Row,"c",df$Col,sep="")
df$Condition = factor(Condition)
df$Well = Well
# #log transform the size
# logsize <- log(df$Size)
# df$LogSize = logsize
####RENAME THE STRAIN NAMES
####include vectors with nuclear and mito designation
info = read.table("JW Strain List for May 2016 mailed strains.txt",
header=T, sep = "\t", colClasses = c("character"))
for(i in 1:81){
info$Foil.No.[i] = paste("h",info$Foil.No.[i],sep="")
}
for(i in 82:129){
info$Foil.No.[i] = paste("d",info$Foil.No.[i],sep="")
}
df$Nuc = vector(length=nrow(df))
df$Mito = vector(length=nrow(df))
for(i in 1:nrow(info)){
id = info$Foil.No.[i]
df$Nuc[df$Name == id] = info$Nuclear[i]
df$Mito[df$Name == id] = info$Mito[i]
df$Name[df$Name == id] = info$Strain.Name[i] #order is important, must replace name last
}
df$Nuc[df$Name == "by"] = "by"
df$Mito[df$Name == "by"] = "by"
####CLEAN AND TRANSFORM THE DATA
#SEE data_cleaning_notes.txt
# contaminated = c("h18","h70","h73","h81","d96") #foil numbers
contaminated = c("273614NY12",
"SK1BC187",
"SK1Y12",
"Y12Y12",
"YJM975YJM975 rho YJM975/Y12 G1 d99",
"YJM975YJM975 rho YJM975/Y12 G1 d111",
"YJM975YJM975 rho YJM975/Y12 d113") #actual names
#need to go back and check if h66 or h67 needs to be removed
#removing h81 not because of contamination but because I am quite sure it is not the strain
#that I think it is, unfortunately this means S1s1 is gone from the screen
df = subset(df,!df$Name %in% contaminated)
##remove time point 1 due to problems with this set
df = subset(df, !(df$Time.Point == 4.53))
##remove the outer rings of spots because this data is not meaningful
df = subset(df, !(df$Row < 3 | df$Row > 30 | df$Col < 3 | df$Col > 46))
#The L2 nuclear background was unusable due to flocculation
#removing it from the data frame
df = subset(df,df$Nuc != "SK1")
###AVERAGE THE REPLICATES
agg = aggregate(formula = Size ~ Name + Condition + Time.Point + Array + Nuc + Mito, data=df, FUN=median)
#####Determine size difference average
#Take the size difference between time at 26 hours and time 0
agg_t0 = agg[agg$Time.Point == 0 , ]
agg_t26 = agg[agg$Time.Point == 26.02 ,]
size_diff = agg_t26$Size - agg_t0$Size
diff_df = cbind(agg_t0, size_diff)
#drop columns that no longer have meaning
diff_df = diff_df[, c(1,2,4,5,6,8)]
####Subset for diploids
diff_df = subset(diff_df, diff_df$Array == "D")
#record the gsize_diff data table
write.table(diff_df,file="diploid_size_diff.tab",row.names=FALSE,sep="\t")
####REPEAT BUT THIS TIME OUTPUT ALL REPLICATES
#Take the size difference between time at 26 hours and time 0
df_t0 = df[df$Time.Point == 0 , ]
df_t26 = df[df$Time.Point == 26.02 ,]
df_t47 = df[df$Time.Point == 47.15, ]
# size_diff = df_t26$Size - df_t0$Size
size_diff = df_t47$Size - df_t0$Size
diff_df = cbind(df_t0, size_diff)
#drop columns that no longer have meaning
diff_df = diff_df[, c(1,2,3,5,7,8,9,11,12,13)]
####Subset for diploids
diff_df = subset(diff_df, diff_df$Array == "D")
#record the gsize_diff data table
write.table(diff_df,file="diploid_size_diff.all_reps.tab",row.names=FALSE,sep="\t")
| /compare_size_diploids.R | no_license | JFWolters/Wolters-Genetics-2018 | R | false | false | 4,277 | r | #####################################################################################
#######This script was initially built by CRLandry, JBLeducq and modified by CEberlein
#####################################################################################
###it was further modified by GCharron
##and then edited by JFWolters
#####################################################################################
# reading data and preparing table
#####################################################################################
classes = c("numeric","numeric","character","numeric","factor","numeric","factor","factor")
# Row Col Name Size Media Timepoint Temp Array(D or H)
setwd("./data")
df = read.table("consolidated_data.txt",header=T, colClasses=classes)
##Add a column with "Condition whichis the combination of Media and Temperature
Condition = paste(df$Media, df$Temp, sep = "_")
Well=paste("r",df$Row,"c",df$Col,sep="")
df$Condition = factor(Condition)
df$Well = Well
# #log transform the size
# logsize <- log(df$Size)
# df$LogSize = logsize
####RENAME THE STRAIN NAMES
####include vectors with nuclear and mito designation
info = read.table("JW Strain List for May 2016 mailed strains.txt",
header=T, sep = "\t", colClasses = c("character"))
for(i in 1:81){
info$Foil.No.[i] = paste("h",info$Foil.No.[i],sep="")
}
for(i in 82:129){
info$Foil.No.[i] = paste("d",info$Foil.No.[i],sep="")
}
df$Nuc = vector(length=nrow(df))
df$Mito = vector(length=nrow(df))
for(i in 1:nrow(info)){
id = info$Foil.No.[i]
df$Nuc[df$Name == id] = info$Nuclear[i]
df$Mito[df$Name == id] = info$Mito[i]
df$Name[df$Name == id] = info$Strain.Name[i] #order is important, must replace name last
}
df$Nuc[df$Name == "by"] = "by"
df$Mito[df$Name == "by"] = "by"
####CLEAN AND TRANSFORM THE DATA
#SEE data_cleaning_notes.txt
# contaminated = c("h18","h70","h73","h81","d96") #foil numbers
contaminated = c("273614NY12",
"SK1BC187",
"SK1Y12",
"Y12Y12",
"YJM975YJM975 rho YJM975/Y12 G1 d99",
"YJM975YJM975 rho YJM975/Y12 G1 d111",
"YJM975YJM975 rho YJM975/Y12 d113") #actual names
#need to go back and check if h66 or h67 needs to be removed
#removing h81 not because of contamination but because I am quite sure it is not the strain
#that I think it is, unfortunately this means S1s1 is gone from the screen
df = subset(df,!df$Name %in% contaminated)
##remove time point 1 due to problems with this set
df = subset(df, !(df$Time.Point == 4.53))
##remove the outer rings of spots because this data is not meaningful
df = subset(df, !(df$Row < 3 | df$Row > 30 | df$Col < 3 | df$Col > 46))
#The L2 nuclear background was unusable due to flocculation
#removing it from the data frame
df = subset(df,df$Nuc != "SK1")
###AVERAGE THE REPLICATES
agg = aggregate(formula = Size ~ Name + Condition + Time.Point + Array + Nuc + Mito, data=df, FUN=median)
#####Determine size difference average
#Take the size difference between time at 26 hours and time 0
agg_t0 = agg[agg$Time.Point == 0 , ]
agg_t26 = agg[agg$Time.Point == 26.02 ,]
size_diff = agg_t26$Size - agg_t0$Size
diff_df = cbind(agg_t0, size_diff)
#drop columns that no longer have meaning
diff_df = diff_df[, c(1,2,4,5,6,8)]
####Subset for diploids
diff_df = subset(diff_df, diff_df$Array == "D")
#record the gsize_diff data table
write.table(diff_df,file="diploid_size_diff.tab",row.names=FALSE,sep="\t")
####REPEAT BUT THIS TIME OUTPUT ALL REPLICATES
#Take the size difference between time at 26 hours and time 0
df_t0 = df[df$Time.Point == 0 , ]
df_t26 = df[df$Time.Point == 26.02 ,]
df_t47 = df[df$Time.Point == 47.15, ]
# size_diff = df_t26$Size - df_t0$Size
size_diff = df_t47$Size - df_t0$Size
diff_df = cbind(df_t0, size_diff)
#drop columns that no longer have meaning
diff_df = diff_df[, c(1,2,3,5,7,8,9,11,12,13)]
####Subset for diploids
diff_df = subset(diff_df, diff_df$Array == "D")
#record the gsize_diff data table
write.table(diff_df,file="diploid_size_diff.all_reps.tab",row.names=FALSE,sep="\t")
|
ponies <- c(
"Twilight Sparkle",
"Rainbow Dash",
"Pinkie Pie",
"Applejack",
"Rarity",
"FlutterShy"
)
rpony <- function(n) {
sample(ponies, n, replace = TRUE)
}
| /R/rpony.R | no_license | loubajuk/mylittlepony | R | false | false | 175 | r | ponies <- c(
"Twilight Sparkle",
"Rainbow Dash",
"Pinkie Pie",
"Applejack",
"Rarity",
"FlutterShy"
)
rpony <- function(n) {
sample(ponies, n, replace = TRUE)
}
|
testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = NaN, temp = numeric(0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) | /meteor/inst/testfiles/E_Penman/libFuzzer_E_Penman/E_Penman_valgrind_files/1612738496-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 217 | r | testlist <- list(Rext = numeric(0), Rs = numeric(0), Z = numeric(0), alpha = numeric(0), atmp = numeric(0), relh = NaN, temp = numeric(0), u = numeric(0))
result <- do.call(meteor:::E_Penman,testlist)
str(result) |
findwtdinteraction <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL,
atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL,
atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20,
type="response", approach="prototypical", data=NULL){
UseMethod("findwtdinteraction")
}
| /R/findwtdinteraction.R | no_license | davidaarmstrong/weights | R | false | false | 364 | r | findwtdinteraction <- function(x, across, by=NULL, at=NULL, acrosslevs=NULL, bylevs=NULL,
atlevs=NULL, weight=NULL, dvname=NULL, acclevnames=NULL, bylevnames=NULL,
atlevnames=NULL, stdzacross=FALSE, stdzby=FALSE, stdzat=FALSE, limitlevs=20,
type="response", approach="prototypical", data=NULL){
UseMethod("findwtdinteraction")
}
|
#' Remove probes
#'
#' Remove promiscuous probes that map to different reference ids (usually gene symbols).
#'
#' @param fdata Feature data (dataframe) with unique rows. The output of get_annotation() function
#' @param probe_col Probe column name in fdata dataframe
#' @param ref_col Column name of the reference id to be counted for each probe.
#'
#' @return ExpressionSet object with filtered assayData slot for promiscous probes
#' @export
#'
#' @examples
remove_probes <- function(fdata, probe_col, ref_col = "hgnc_symbol") {
fdata <- unique(na.omit(fdata))
id1 <- dplyr::sym(probe_col)
id2 <- dplyr::sym(ref_col)
# Remove promiscuous probes
id1 <- dplyr::sym(probe_col)
id2 <- dplyr::sym(ref_col)
rem_probes <- fdata %>%
dplyr::group_by((!!id1)) %>%
dplyr::summarise(c = dplyr::n_distinct((!!id2))) %>%
dplyr::filter(c > 1) %>%
dplyr::select((!!id1)) %>%
unlist(use.names = F)
# Subset fdata by probes that are not promiscuous
probes_in <- fdata[,probe_col] %in% rem_probes
fdata <- fdata[!probes_in,]
# Filter fdata containing not-duplicated values
dup <- sum(duplicated(fdata[,probe_col]))
fdata <- fdata %>%
dplyr::filter(!duplicated((!!id1)))
message(dup, " duplicated probes in feature data were removed.")
return(fdata)
}
| /R/remove_probes.R | no_license | cfreis/MicroarrayMethods | R | false | false | 1,298 | r | #' Remove probes
#'
#' Remove promiscuous probes that map to different reference ids (usually gene symbols).
#'
#' @param fdata Feature data (dataframe) with unique rows. The output of get_annotation() function
#' @param probe_col Probe column name in fdata dataframe
#' @param ref_col Column name of the reference id to be counted for each probe.
#'
#' @return ExpressionSet object with filtered assayData slot for promiscous probes
#' @export
#'
#' @examples
remove_probes <- function(fdata, probe_col, ref_col = "hgnc_symbol") {
fdata <- unique(na.omit(fdata))
id1 <- dplyr::sym(probe_col)
id2 <- dplyr::sym(ref_col)
# Remove promiscuous probes
id1 <- dplyr::sym(probe_col)
id2 <- dplyr::sym(ref_col)
rem_probes <- fdata %>%
dplyr::group_by((!!id1)) %>%
dplyr::summarise(c = dplyr::n_distinct((!!id2))) %>%
dplyr::filter(c > 1) %>%
dplyr::select((!!id1)) %>%
unlist(use.names = F)
# Subset fdata by probes that are not promiscuous
probes_in <- fdata[,probe_col] %in% rem_probes
fdata <- fdata[!probes_in,]
# Filter fdata containing not-duplicated values
dup <- sum(duplicated(fdata[,probe_col]))
fdata <- fdata %>%
dplyr::filter(!duplicated((!!id1)))
message(dup, " duplicated probes in feature data were removed.")
return(fdata)
}
|
library(wooldridge)
### Name: wagepan
### Title: wagepan
### Aliases: wagepan
### Keywords: datasets
### ** Examples
str(wagepan)
| /data/genthat_extracted_code/wooldridge/examples/wagepan.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 138 | r | library(wooldridge)
### Name: wagepan
### Title: wagepan
### Aliases: wagepan
### Keywords: datasets
### ** Examples
str(wagepan)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_game_info.R
\name{scrape_game_info}
\alias{scrape_game_info}
\title{Scrape MLB Players Data}
\usage{
scrape_game_info(gids)
}
\arguments{
\item{gids}{Gameday URLs vector.}
}
\value{
list
}
\description{
Function for obtaining MLB Players Data.
\href{http://gd2.mlb.com/components/game/mlb/year_2019/month_04/day_04/gid_2019_04_04_wasmlb_nynmlb_1/players.xml}{players.xml}
}
\examples{
data(game_ids, package = "pitchRx2")
gid <- str_subset(game_ids, "^gid_2019_04_05_")
scrape_game_info(gid)
}
| /man/scrape_game_info.Rd | permissive | pontsuyu/pitchRx2 | R | false | true | 579 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrape_game_info.R
\name{scrape_game_info}
\alias{scrape_game_info}
\title{Scrape MLB Players Data}
\usage{
scrape_game_info(gids)
}
\arguments{
\item{gids}{Gameday URLs vector.}
}
\value{
list
}
\description{
Function for obtaining MLB Players Data.
\href{http://gd2.mlb.com/components/game/mlb/year_2019/month_04/day_04/gid_2019_04_04_wasmlb_nynmlb_1/players.xml}{players.xml}
}
\examples{
data(game_ids, package = "pitchRx2")
gid <- str_subset(game_ids, "^gid_2019_04_05_")
scrape_game_info(gid)
}
|
##############################################################################
# BioC 3.0
# Created 23 Apr 2015
# Investigate why the LR is sometimes negative; Compare different mode
# Update 16 May 2015
# Simulate case with 2 transcripts with params that will lead to negative LR
##############################################################################
setwd("/home/gosia/Multinomial_project/Simulations_DM/")
out.dir <- "NegativeLR/"
dir.create(out.dir, showWarnings=F, recursive=T)
library(DM)
library(limma)
source("/home/gosia/R/R_Multinomial_project/DM_package_devel/0_my_printHead.R")
library(edgeR)
#### function to simulate data from DM
source("/home/gosia/R/R_Multinomial_project/Analysis_SimDM/simulate_from_DM.R")
### Source all R files in DM package
Rfiles <- list.files("/home/gosia/R/R_Multinomial_project/DM_package_devel/DM/R/", full.names=TRUE)
for(i in 1:length(Rfiles)) source(Rfiles[i])
##################################################################################
# Simulate data from two group null distribution with common dispersion
##################################################################################
### Scenario parameters
######### Check scenario
nBins <- 3
simPar <- list(s = "check", sample.size = 20, pi.org = rep(1, nBins)/nBins , g0.org = 100, nr.genes = 1e+04, nM = 150, tot = "uni")
######### Like in GEUVADIS for snp_19_17269822-ENSG00000099331.7 - one with negative LR and two transcripts
simPar <- list(s = "GEUVADIS_snp_19_17269822", sample.size = 100, pi.org = c(0.4, 0.6) , g0.org = 7, nr.genes = 1e+04, nM = 2000, tot = "uni")
######### Like in GEUVADIS for the SNP that have the most negative LR - snp_5_179056159-ENSG00000169045.13
piH <- c(0.4922335018, 0.1577883831, 0.0529355261, 0.0293984041, 0.0290847238, 0.0264224072, 0.0243318117, 0.0207018908, 0.0188256676, 0.0184684653, 0.0156071528, 0.0118260446, 0.0115589007, 0.0070278078, 0.0057829094, 0.0055859233, 0.0051052026, 0.0047757111, 0.0046757892, 0.0046102791, 0.0045847635, 0.0042218958, 0.0041134432, 0.0040331904, 0.0039526359, 0.0034897977, 0.0034433870, 0.0032155074, 0.0027587187, 0.0027284563, 0.0024924688, 0.0023142979, 0.0019838135, 0.0019837859, 0.0014639012, 0.0012791484, 0.0010303418, 0.0007425876, 0.0007168352, 0.0006505579, 0.0005237112, 0.0004658400, 0.0004015046, 0.0003565343, 0.0002020756, 0.0001042983)[1:10]
pi.org <- piH/sum(piH)
simPar <- list(s = "GEUVADIS_snp_5_179056159_10tr_df_g01", sample.size = 100, pi.org = pi.org, g0.org = 0.1092517, nr.genes = 1e+03, nM = 20000, tot = "uni")
######### Scenario with 2 transcripts and negative LR
simPar <- list(s = "negativeLR_3trans", sample.size = 100, group = factor(c(rep("C1", 80), rep("C2", 20))), pi.org = c(0.7, 0.2, 0.1) , g0.org = 5, nr.genes = 1e+03, nM = 1000, tot = "uni")
######### simulate...
mcCores <- 20
out.dir.s <- paste0(out.dir, "/", simPar$s, "/")
dir.create(out.dir.s, showWarnings=F, recursive=T)
sim <- simulate_from_DM(s = simPar$s, sample.size = simPar$sample.size, group = simPar$group, pi.org = simPar$pi.org, g0.org = simPar$g0.org, nr.genes = simPar$nr.genes, nM = simPar$nM, tot = simPar$tot, nD = simPar$nM, out.dir = out.dir.s , mc.cores=mcCores, save = FALSE)
save(sim, file = paste0(out.dir.s, "/sim.RData"))
##################################################################################
#### Run DM pipeline, but do not estimate dispersion. Use true value as common dispersion
##################################################################################
### load simulation data
# load()
### run DM
dgeDMList <- list()
modeList = c("constrOptim", "constrOptim2", "constrOptim2G", "optim2", "optim2NM", "FisherScoring")
### when using optim2:
# Error in optim(par = piInit[-k], fn = dmLogLikkm1, gr = dmScoreFunkm1, :
# L-BFGS-B needs finite values of 'fn'
# In addition: Warning messages:
# 1: In log(pi[i] * gamma0 + 1:y[i, j] - 1) : NaNs produced
# 2: In log(pi[i] * gamma0 + 1:y[i, j] - 1) : NaNs produced
### when using FisherScoring:
# * Negative piH: 0.5724435 0.1574793 0.0500953 0.04474325 0.0479419 0.01765365 0.06932008
# 0.04035913 0.0002681423 -0.0003042877
# piInit: 0.3635897 0.02963637 0.02267271 0.2320393 0.1595044 0.1328343 0.05497171 0.004189322
# 5.63362e-09 0.0005621153
mode <- modeList[3]
dge <- sim$dge
dgeDM <- dmFit(dge, group=NULL, dispersion = simPar$g0.org, mode = mode, epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode = mode, epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDMList[[mode]] <- dgeDM
save(dgeDMList, file = paste0(out.dir.s, "/dgeDMList.RData"))
cat("LR < 0 \n")
print(table(dgeDM$table$LR < 0))
head(dgeDM$table[dgeDM$table$LR < 0, ])
pdf(paste0(out.dir.s, "/hist_", mode,".pdf"))
hist(dgeDM$table$PValue, breaks = 100, col = "#1E90FF")
hist(dgeDM$table$LR, breaks = 100, col = "#1E90FF")
dev.off()
############ Compare constrOptim2 with constrOptim2G
names(dgeDMList)
table(dgeDMList[[1]]$table$LR == dgeDMList[[2]]$table$LR)
tab <- merge(dgeDMList[[1]]$table, dgeDMList[[2]]$table, by = "GeneID", suffixes = c("_co2g", "_co2"))
pdf(paste0(out.dir.s, "/hist_diffLR.pdf"))
hist(tab$LR_co2g - tab$LR_co2, breaks = 100, col = "#1E90FF")
dev.off()
tab[tab$LR_co2g < 0 | tab$LR_co2 < 0, ]
### LR < 0 for co2g
table(tab$LR_co2g < 0)
table(tab$LR_co2 < 0)
### more FP for co2
table(tab$FDR_co2g < 0.05)
table(tab$FDR_co2 < 0.05)
############ Check the piH estimates for the genes where LR < 0
library(ggplot2)
library(reshape2)
library(gridExtra)
library(RColorBrewer)
plotProportions <- function(dgeDM, genes2plot, plotPath){
pdf(plotPath, width = 10, height = 5)
for(g in 1:length(genes2plot)){
# g = 1
gene <- genes2plot[g]
# print(gene)
Condition <- dgeDM$samples$group
expr <- dgeDM$counts[[gene]]
# colnames(expr) <- metadata$SampleName
rownames(expr) <- subset(dgeDM$genes, gene_id==gene)$ete_id
tot <- colSums(expr)
labels <- strsplit2(rownames(expr), ":")[,2]
prop.smp <- data.frame( ete_id = labels, t(apply(expr, 1, function(t){ t / tot })))
n <- nrow(expr)
prop.est <- data.frame(ete_id = labels, dgeDM$fit[[gene]]$piH)
prop.est.null <- data.frame(ete_id = labels, dgeDM$fit.null[[gene]]$piH)
prop.smp.m <- melt(prop.smp, id.vars = "ete_id", variable.name = "Samples", value.name = "Proportions")
prop.smp.m$ete_id <- factor(prop.smp.m$ete_id, levels = unique(prop.smp.m$ete_id))
prop.smp.m$Samples <- factor(prop.smp.m$Samples)
prop.smp.m$Condition <- rep(Condition, each = nrow(prop.smp))
prop.est.m <- melt(prop.est, id.vars = "ete_id", variable.name = "Samples", value.name = "Proportions")
prop.est.m$ete_id <- factor(prop.est.m$ete_id, levels = unique(prop.est.m$ete_id))
prop.est.m$Samples <- factor(prop.est.m$Samples)
colnames(prop.est.null) <- c("ete_id", "Proportions")
### box plots with points - 2 groups only
ggb <- ggplot(prop.smp.m, aes(x = ete_id, y = Proportions)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 20, vjust = 0.5), axis.text=element_text(size=12), axis.title=element_text(size=12, face="bold"), legend.position="none", plot.title = element_text(size=10)) +
ggtitle(paste0(gene, "\n TagwiseDispersion = ", dgeDM$fit[[gene]]$gamma0, "\n LR = ", dgeDM$table[dgeDM$table$GeneID == gene, "LR"], " / PValue = ", dgeDM$table[dgeDM$table$GeneID == gene, "PValue"])) +
geom_jitter(aes(fill = Condition, colour = factor(Condition, labels=c("C1b", "C2b"))), position = position_jitterdodge(dodge.width = 0.75), alpha = 0.5) +
geom_boxplot(aes(colour = Condition), fill = "white", outlier.size = NA, alpha = 0) +
geom_point(data = prop.est.m, aes(x = ete_id, y = Proportions, fill = Samples), position = position_jitterdodge(jitter.width = 0, jitter.height = 0), size = 2, shape = 19, colour = "black") +
geom_point(data = prop.est.null, aes(x = ete_id, y = Proportions), size = 3, shape = 18, colour = "orange") +
scale_colour_manual(values=c("C1"="firebrick", "C2"="dodgerblue4", "C1b"="firebrick1", "C2b" = "dodgerblue")) +
coord_cartesian(ylim = c(-0.1, 1.1))
print(ggb)
}
dev.off()
}
genes2plot <- head(dgeDM$table[order(dgeDM$table$LR, decreasing = FALSE), "GeneID"])
plotPath <- paste0(out.dir.s, "/Proportions_", mode,"_negativeLR.pdf")
plotProportions(dgeDM, genes2plot, plotPath)
### plot negative LR
tab <- tab[order(tab$LR_co2g, decreasing = FALSE), ]
### plot FP
tab <- tab[order(tab$PValue_co2g, decreasing = FALSE), ]
### only for one method
tab <- dgeDMList[[1]]$table
## negative LR
tab <- tab[order(tab$LR, decreasing = FALSE), ]
## FP
tab <- tab[order(tab$PValue, decreasing = FALSE), ]
genes2plot <- as.character(tab$GeneID[1:4])
mode <- c("constrOptim2G", "constrOptim2")[1]
dgeDM <- dgeDMList[[mode]]
plotPath <- paste0(out.dir.s, "/Proportions_", mode,"_FP.pdf")
plotProportions(dgeDM, genes2plot, plotPath)
| /simulations_dm/dmPlots_negativeLR.R | no_license | gosianow/multinomial_project | R | false | false | 9,218 | r | ##############################################################################
# BioC 3.0
# Created 23 Apr 2015
# Investigate why the LR is sometimes negative; Compare different mode
# Update 16 May 2015
# Simulate case with 2 transcripts with params that will lead to negative LR
##############################################################################
setwd("/home/gosia/Multinomial_project/Simulations_DM/")
out.dir <- "NegativeLR/"
dir.create(out.dir, showWarnings=F, recursive=T)
library(DM)
library(limma)
source("/home/gosia/R/R_Multinomial_project/DM_package_devel/0_my_printHead.R")
library(edgeR)
#### function to simulate data from DM
source("/home/gosia/R/R_Multinomial_project/Analysis_SimDM/simulate_from_DM.R")
### Source all R files in DM package
Rfiles <- list.files("/home/gosia/R/R_Multinomial_project/DM_package_devel/DM/R/", full.names=TRUE)
for(i in 1:length(Rfiles)) source(Rfiles[i])
##################################################################################
# Simulate data from two group null distribution with common dispersion
##################################################################################
### Scenario parameters
######### Check scenario
nBins <- 3
simPar <- list(s = "check", sample.size = 20, pi.org = rep(1, nBins)/nBins , g0.org = 100, nr.genes = 1e+04, nM = 150, tot = "uni")
######### Like in GEUVADIS for snp_19_17269822-ENSG00000099331.7 - one with negative LR and two transcripts
simPar <- list(s = "GEUVADIS_snp_19_17269822", sample.size = 100, pi.org = c(0.4, 0.6) , g0.org = 7, nr.genes = 1e+04, nM = 2000, tot = "uni")
######### Like in GEUVADIS for the SNP that have the most negative LR - snp_5_179056159-ENSG00000169045.13
piH <- c(0.4922335018, 0.1577883831, 0.0529355261, 0.0293984041, 0.0290847238, 0.0264224072, 0.0243318117, 0.0207018908, 0.0188256676, 0.0184684653, 0.0156071528, 0.0118260446, 0.0115589007, 0.0070278078, 0.0057829094, 0.0055859233, 0.0051052026, 0.0047757111, 0.0046757892, 0.0046102791, 0.0045847635, 0.0042218958, 0.0041134432, 0.0040331904, 0.0039526359, 0.0034897977, 0.0034433870, 0.0032155074, 0.0027587187, 0.0027284563, 0.0024924688, 0.0023142979, 0.0019838135, 0.0019837859, 0.0014639012, 0.0012791484, 0.0010303418, 0.0007425876, 0.0007168352, 0.0006505579, 0.0005237112, 0.0004658400, 0.0004015046, 0.0003565343, 0.0002020756, 0.0001042983)[1:10]
pi.org <- piH/sum(piH)
simPar <- list(s = "GEUVADIS_snp_5_179056159_10tr_df_g01", sample.size = 100, pi.org = pi.org, g0.org = 0.1092517, nr.genes = 1e+03, nM = 20000, tot = "uni")
######### Scenario with 2 transcripts and negative LR
simPar <- list(s = "negativeLR_3trans", sample.size = 100, group = factor(c(rep("C1", 80), rep("C2", 20))), pi.org = c(0.7, 0.2, 0.1) , g0.org = 5, nr.genes = 1e+03, nM = 1000, tot = "uni")
######### simulate...
mcCores <- 20
out.dir.s <- paste0(out.dir, "/", simPar$s, "/")
dir.create(out.dir.s, showWarnings=F, recursive=T)
sim <- simulate_from_DM(s = simPar$s, sample.size = simPar$sample.size, group = simPar$group, pi.org = simPar$pi.org, g0.org = simPar$g0.org, nr.genes = simPar$nr.genes, nM = simPar$nM, tot = simPar$tot, nD = simPar$nM, out.dir = out.dir.s , mc.cores=mcCores, save = FALSE)
save(sim, file = paste0(out.dir.s, "/sim.RData"))
##################################################################################
#### Run DM pipeline, but do not estimate dispersion. Use true value as common dispersion
##################################################################################
### load simulation data
# load()
### run DM
dgeDMList <- list()
modeList = c("constrOptim", "constrOptim2", "constrOptim2G", "optim2", "optim2NM", "FisherScoring")
### when using optim2:
# Error in optim(par = piInit[-k], fn = dmLogLikkm1, gr = dmScoreFunkm1, :
# L-BFGS-B needs finite values of 'fn'
# In addition: Warning messages:
# 1: In log(pi[i] * gamma0 + 1:y[i, j] - 1) : NaNs produced
# 2: In log(pi[i] * gamma0 + 1:y[i, j] - 1) : NaNs produced
### when using FisherScoring:
# * Negative piH: 0.5724435 0.1574793 0.0500953 0.04474325 0.0479419 0.01765365 0.06932008
# 0.04035913 0.0002681423 -0.0003042877
# piInit: 0.3635897 0.02963637 0.02267271 0.2320393 0.1595044 0.1328343 0.05497171 0.004189322
# 5.63362e-09 0.0005621153
mode <- modeList[3]
dge <- sim$dge
dgeDM <- dmFit(dge, group=NULL, dispersion = simPar$g0.org, mode = mode, epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDM <- dmTest(dgeDM, mode = mode, epsilon = 1e-05, maxIte = 1000, verbose=FALSE, mcCores = mcCores)
dgeDMList[[mode]] <- dgeDM
save(dgeDMList, file = paste0(out.dir.s, "/dgeDMList.RData"))
cat("LR < 0 \n")
print(table(dgeDM$table$LR < 0))
head(dgeDM$table[dgeDM$table$LR < 0, ])
pdf(paste0(out.dir.s, "/hist_", mode,".pdf"))
hist(dgeDM$table$PValue, breaks = 100, col = "#1E90FF")
hist(dgeDM$table$LR, breaks = 100, col = "#1E90FF")
dev.off()
############ Compare constrOptim2 with constrOptim2G
names(dgeDMList)
table(dgeDMList[[1]]$table$LR == dgeDMList[[2]]$table$LR)
tab <- merge(dgeDMList[[1]]$table, dgeDMList[[2]]$table, by = "GeneID", suffixes = c("_co2g", "_co2"))
pdf(paste0(out.dir.s, "/hist_diffLR.pdf"))
hist(tab$LR_co2g - tab$LR_co2, breaks = 100, col = "#1E90FF")
dev.off()
tab[tab$LR_co2g < 0 | tab$LR_co2 < 0, ]
### LR < 0 for co2g
table(tab$LR_co2g < 0)
table(tab$LR_co2 < 0)
### more FP for co2
table(tab$FDR_co2g < 0.05)
table(tab$FDR_co2 < 0.05)
############ Check the piH estimates for the genes where LR < 0
library(ggplot2)
library(reshape2)
library(gridExtra)
library(RColorBrewer)
plotProportions <- function(dgeDM, genes2plot, plotPath){
pdf(plotPath, width = 10, height = 5)
for(g in 1:length(genes2plot)){
# g = 1
gene <- genes2plot[g]
# print(gene)
Condition <- dgeDM$samples$group
expr <- dgeDM$counts[[gene]]
# colnames(expr) <- metadata$SampleName
rownames(expr) <- subset(dgeDM$genes, gene_id==gene)$ete_id
tot <- colSums(expr)
labels <- strsplit2(rownames(expr), ":")[,2]
prop.smp <- data.frame( ete_id = labels, t(apply(expr, 1, function(t){ t / tot })))
n <- nrow(expr)
prop.est <- data.frame(ete_id = labels, dgeDM$fit[[gene]]$piH)
prop.est.null <- data.frame(ete_id = labels, dgeDM$fit.null[[gene]]$piH)
prop.smp.m <- melt(prop.smp, id.vars = "ete_id", variable.name = "Samples", value.name = "Proportions")
prop.smp.m$ete_id <- factor(prop.smp.m$ete_id, levels = unique(prop.smp.m$ete_id))
prop.smp.m$Samples <- factor(prop.smp.m$Samples)
prop.smp.m$Condition <- rep(Condition, each = nrow(prop.smp))
prop.est.m <- melt(prop.est, id.vars = "ete_id", variable.name = "Samples", value.name = "Proportions")
prop.est.m$ete_id <- factor(prop.est.m$ete_id, levels = unique(prop.est.m$ete_id))
prop.est.m$Samples <- factor(prop.est.m$Samples)
colnames(prop.est.null) <- c("ete_id", "Proportions")
### box plots with points - 2 groups only
ggb <- ggplot(prop.smp.m, aes(x = ete_id, y = Proportions)) +
theme_bw() +
theme(axis.text.x = element_text(angle = 20, vjust = 0.5), axis.text=element_text(size=12), axis.title=element_text(size=12, face="bold"), legend.position="none", plot.title = element_text(size=10)) +
ggtitle(paste0(gene, "\n TagwiseDispersion = ", dgeDM$fit[[gene]]$gamma0, "\n LR = ", dgeDM$table[dgeDM$table$GeneID == gene, "LR"], " / PValue = ", dgeDM$table[dgeDM$table$GeneID == gene, "PValue"])) +
geom_jitter(aes(fill = Condition, colour = factor(Condition, labels=c("C1b", "C2b"))), position = position_jitterdodge(dodge.width = 0.75), alpha = 0.5) +
geom_boxplot(aes(colour = Condition), fill = "white", outlier.size = NA, alpha = 0) +
geom_point(data = prop.est.m, aes(x = ete_id, y = Proportions, fill = Samples), position = position_jitterdodge(jitter.width = 0, jitter.height = 0), size = 2, shape = 19, colour = "black") +
geom_point(data = prop.est.null, aes(x = ete_id, y = Proportions), size = 3, shape = 18, colour = "orange") +
scale_colour_manual(values=c("C1"="firebrick", "C2"="dodgerblue4", "C1b"="firebrick1", "C2b" = "dodgerblue")) +
coord_cartesian(ylim = c(-0.1, 1.1))
print(ggb)
}
dev.off()
}
genes2plot <- head(dgeDM$table[order(dgeDM$table$LR, decreasing = FALSE), "GeneID"])
plotPath <- paste0(out.dir.s, "/Proportions_", mode,"_negativeLR.pdf")
plotProportions(dgeDM, genes2plot, plotPath)
### plot negative LR
tab <- tab[order(tab$LR_co2g, decreasing = FALSE), ]
### plot FP
tab <- tab[order(tab$PValue_co2g, decreasing = FALSE), ]
### only for one method
tab <- dgeDMList[[1]]$table
## negative LR
tab <- tab[order(tab$LR, decreasing = FALSE), ]
## FP
tab <- tab[order(tab$PValue, decreasing = FALSE), ]
genes2plot <- as.character(tab$GeneID[1:4])
mode <- c("constrOptim2G", "constrOptim2")[1]
dgeDM <- dgeDMList[[mode]]
plotPath <- paste0(out.dir.s, "/Proportions_", mode,"_FP.pdf")
plotProportions(dgeDM, genes2plot, plotPath)
|
#READ THE TEST DATA
test_data_001<-readRDS("../testdata/test_data_001.rds")
test_data_002<-test_data_001[1:8,]
test_data_003<-test_data_001[1:9,]
test_data_004<-test_data_001[2:9,]
test_that("matrix is produced",{
expect_that(oddstable(test_data_001), is_a("matrix"))
})
test_that("matrix has the correct number of rows",{
expect_that(nrow(oddstable(test_data_001)), equals(9))
expect_that(nrow(oddstable(test_data_002)), equals(4))
expect_that(nrow(oddstable(test_data_004)), equals(4))
})
test_that("takes only input with even number of rows",{
expect_error(oddstable(test_data_003), "Aborting! The number of input rows is odd!")
})
| /tests/testthat/test-orm_oddstable.R | no_license | cran/ormPlot | R | false | false | 679 | r |
#READ THE TEST DATA
test_data_001<-readRDS("../testdata/test_data_001.rds")
test_data_002<-test_data_001[1:8,]
test_data_003<-test_data_001[1:9,]
test_data_004<-test_data_001[2:9,]
test_that("matrix is produced",{
expect_that(oddstable(test_data_001), is_a("matrix"))
})
test_that("matrix has the correct number of rows",{
expect_that(nrow(oddstable(test_data_001)), equals(9))
expect_that(nrow(oddstable(test_data_002)), equals(4))
expect_that(nrow(oddstable(test_data_004)), equals(4))
})
test_that("takes only input with even number of rows",{
expect_error(oddstable(test_data_003), "Aborting! The number of input rows is odd!")
})
|
testlist <- list(holes = integer(0), numholes = integer(0), x = c(3.95252516672997e-322, 1.32515051110526e-105, 2.1644539979134e+233, 2.44047694750493e-152, 1.06399915245291e+248, 3.68069868587423e+180, 1.63591854993985e-306, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result) | /decido/inst/testfiles/earcut_cpp/libFuzzer_earcut_cpp/earcut_cpp_valgrind_files/1609874356-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 308 | r | testlist <- list(holes = integer(0), numholes = integer(0), x = c(3.95252516672997e-322, 1.32515051110526e-105, 2.1644539979134e+233, 2.44047694750493e-152, 1.06399915245291e+248, 3.68069868587423e+180, 1.63591854993985e-306, 0), y = numeric(0))
result <- do.call(decido:::earcut_cpp,testlist)
str(result) |
library(qqman)
fst50kb<-read.table(file="50kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst250kb<-read.table(file="250kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst500kb<-read.table(file="500kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst1mb<-read.table(file="1mbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst1.5mb<-read.table(file="1.5mbfst2.txt.windowed.weir.fst" ,header=TRUE)
manhattan(fst50kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"), main="Weir and Cockerham FST 50kb window")
abline(h=0.6,col="black")
manhattan(fst250kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 250kb window")
abline(h=0.6,col="black")
manhattan(fst500kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 500kb window")
abline(h=0.6,col="black")
manhattan(fst1mb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab="FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 1mb window")
abline(h=0.6,col="black")
manhattan(fst1.5mb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 1.5mb window")
abline(h=0.6,col="black")
#Rhesus Tajimas D
td50kb<-read.table(file="50kbTajD_rhe.Tajima.D" ,header=TRUE)
td250kb<-read.table(file="250kbTajD_rhe.Tajima.D" ,header=TRUE)
td500kb<-read.table(file="500kbTajD_rhe.Tajima.D" ,header=TRUE)
td750kb<-read.table(file="750kbTajD_rhe.Tajima.D" ,header=TRUE)
td1mb<-read.table(file="1mbTajD_rhe.Tajima.D" ,header=TRUE)
manhattan(td50kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 50kb window")
abline(h=0,col="black")
manhattan(td250kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 250kb window")
abline(h=0,col="black")
manhattan(td500kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 500kb window")
abline(h=0,col="black")
manhattan(td750kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 750kb window")
abline(h=0,col="black")
manhattan(td1mb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 1mb window")
abline(h=0,col="black")
##Rhesus Pi
pi50kb<-read.table(file="pi" ,header=TRUE)
pi250kb<-read.table(file="pi250kb_rhe.windowed.pi" ,header=TRUE)
pi500kb<-read.table(file="pi500kb_rhe.windowed.pi" ,header=TRUE)
pi750kb<-read.table(file="pi750kb_rhe.windowed.pi" ,header=TRUE)
pi1mb<-read.table(file="pi1mb_rhe.windowed.pi" ,header=TRUE)
manhattan(pi250kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 250kb window")
manhattan(pi500kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 500kb window")
manhattan(pi750kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 750kb window")
manhattan(pi1mb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 1mb window")
#Fascicularis Tajimas D
td50kb<-read.table(file="50kbTajD_fas.Tajima.D" ,header=TRUE)
td250kb<-read.table(file="250kbTajD_fas.Tajima.D" ,header=TRUE)
td500kb<-read.table(file="500kbTajD_fas.Tajima.D" ,header=TRUE)
td750kb<-read.table(file="750kbTajD_fas.Tajima.D" ,header=TRUE)
td1mb<-read.table(file="1mbTajD_fas.Tajima.D" ,header=TRUE)
manhattan(td50kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 50kb window")
abline(h=0,col="black")
manhattan(td250kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 250kb window")
abline(h=0,col="black")
manhattan(td500kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 500kb window")
abline(h=0,col="black")
manhattan(td750kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 750kb window")
abline(h=0,col="black")
manhattan(td1mb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 1mb window")
abline(h=0,col="black")
#Fascicularis Pi
pi50kb<-read.table(file="pi50kb_fas.windowed.pi" ,header=TRUE)
pi250kb<-read.table(file="pi250kb_fas.windowed.pi" ,header=TRUE)
pi500kb<-read.table(file="pi500kb_fas.windowed.pi" ,header=TRUE)
pi750kb<-read.table(file="pi750kb_fas.windowed.pi" ,header=TRUE)
pi1mb<-read.table(file="pi1mb_fas.windowed.pi" ,header=TRUE)
manhattan(pi50kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(-0,0.1), main="Pairwise Nucleotide Divergence 50kb window")
manhattan(pi250kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 250kb window")
manhattan(pi500kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 500kb window")
manhattan(pi750kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 750kb window")
manhattan(pi1mb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 1mb window")
#Dot plots
library(ggplot2)
chr1<-read.table("chr1.rdotplot",header=TRUE)
chr2<-read.table("chr2.rdotplot",header=TRUE)
chr3<-read.table("chr3.rdotplot",header=TRUE)
chr4<-read.table("chr4.rdotplot",header=TRUE)
chr5<-read.table("chr5.rdotplot",header=TRUE)
chr6<-read.table("chr6.rdotplot",header=TRUE)
chr7<-read.table("chr7.rdotplot",header=TRUE)
chr8<-read.table("chr8.rdotplot",header=TRUE)
chr9<-read.table("chr9.rdotplot",header=TRUE)
chr10<-read.table("chr10.rdotplot",header=TRUE)
chr11<-read.table("chr11.rdotplot",header=TRUE)
chr12<-read.table("chr12.rdotplot",header=TRUE)
chr13<-read.table("chr13.rdotplot",header=TRUE)
chr14<-read.table("chr14.rdotplot",header=TRUE)
chr15<-read.table("chr15.rdotplot",header=TRUE)
chr16<-read.table("chr16.rdotplot",header=TRUE)
chr17<-read.table("chr17.rdotplot",header=TRUE)
chr18<-read.table("chr18.rdotplot",header=TRUE)
chr19<-read.table("19.rdotplot",header=TRUE)
chr20<-read.table("chr20.rdotplot",header=TRUE)
chr21<-read.table("chr21.rdotplot",header=TRUE)
chr22<-read.table("chr22.rdotplot",header=TRUE)
library(ggplot2)
#chr1<-read.table(file="1.rdotplot", as.is=TRUE)
dev.off()
par(mfrow=c(5,4))
plot(chr1,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 1")
plot(chr2,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 2")
plot(chr3,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="3")
plot(chr4,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 4")
plot(chr5,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 5")
plot(chr6,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 6")
plot(chr7,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 7")
plot(chr8,type='l',xlab="Rhesus",ylab=" Fascicularis" , main="8")
plot(chr9,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 9")
plot(chr10,type='l',xlab="Rhesus",ylab=" Fascicularis" , main=" 10")
plot(chr11,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="11")
plot(chr12,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 12")
plot(chr13,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 13")
plot(chr14,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="14")
plot(chr15,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="15")
plot(chr16,type='l',xlab="Rhesus",ylab="Fascicularis" , main="16")
plot(chr17,type='l',xlab="Rhesus",ylab="Fascicularis" , main="17")
plot(chr18,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="18")
plot(chr19,type='l',xlab="Rhesus",ylab="Fascicularis" , main="19")
plot(chr20,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="20")
plot(chr21,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="X")
plot(chr22,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="M")
| /Manhatten_dot.R | no_license | aubcar/Masters_Work | R | false | false | 8,785 | r | library(qqman)
fst50kb<-read.table(file="50kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst250kb<-read.table(file="250kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst500kb<-read.table(file="500kbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst1mb<-read.table(file="1mbfst2.txt.windowed.weir.fst" ,header=TRUE)
fst1.5mb<-read.table(file="1.5mbfst2.txt.windowed.weir.fst" ,header=TRUE)
manhattan(fst50kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"), main="Weir and Cockerham FST 50kb window")
abline(h=0.6,col="black")
manhattan(fst250kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 250kb window")
abline(h=0.6,col="black")
manhattan(fst500kb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 500kb window")
abline(h=0.6,col="black")
manhattan(fst1mb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab="FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 1mb window")
abline(h=0.6,col="black")
manhattan(fst1.5mb,chr="CHROM",bp="BIN_START",p="WEIGHTED_FST",logp=FALSE,ylab=" FST",col=c("blue4","orange3"),main="Weir and Cockerham FST 1.5mb window")
abline(h=0.6,col="black")
#Rhesus Tajimas D
td50kb<-read.table(file="50kbTajD_rhe.Tajima.D" ,header=TRUE)
td250kb<-read.table(file="250kbTajD_rhe.Tajima.D" ,header=TRUE)
td500kb<-read.table(file="500kbTajD_rhe.Tajima.D" ,header=TRUE)
td750kb<-read.table(file="750kbTajD_rhe.Tajima.D" ,header=TRUE)
td1mb<-read.table(file="1mbTajD_rhe.Tajima.D" ,header=TRUE)
manhattan(td50kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 50kb window")
abline(h=0,col="black")
manhattan(td250kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 250kb window")
abline(h=0,col="black")
manhattan(td500kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 500kb window")
abline(h=0,col="black")
manhattan(td750kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 750kb window")
abline(h=0,col="black")
manhattan(td1mb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-2.5,2.5), main="Tajimas D 1mb window")
abline(h=0,col="black")
##Rhesus Pi
pi50kb<-read.table(file="pi" ,header=TRUE)
pi250kb<-read.table(file="pi250kb_rhe.windowed.pi" ,header=TRUE)
pi500kb<-read.table(file="pi500kb_rhe.windowed.pi" ,header=TRUE)
pi750kb<-read.table(file="pi750kb_rhe.windowed.pi" ,header=TRUE)
pi1mb<-read.table(file="pi1mb_rhe.windowed.pi" ,header=TRUE)
manhattan(pi250kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 250kb window")
manhattan(pi500kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 500kb window")
manhattan(pi750kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 750kb window")
manhattan(pi1mb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 1mb window")
#Fascicularis Tajimas D
td50kb<-read.table(file="50kbTajD_fas.Tajima.D" ,header=TRUE)
td250kb<-read.table(file="250kbTajD_fas.Tajima.D" ,header=TRUE)
td500kb<-read.table(file="500kbTajD_fas.Tajima.D" ,header=TRUE)
td750kb<-read.table(file="750kbTajD_fas.Tajima.D" ,header=TRUE)
td1mb<-read.table(file="1mbTajD_fas.Tajima.D" ,header=TRUE)
manhattan(td50kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 50kb window")
abline(h=0,col="black")
manhattan(td250kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 250kb window")
abline(h=0,col="black")
manhattan(td500kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 500kb window")
abline(h=0,col="black")
manhattan(td750kb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 750kb window")
abline(h=0,col="black")
manhattan(td1mb,chr="CHROM",bp="BIN_START",p="TajimaD",snp="N_SNPS" ,logp=FALSE,ylab=" TD",col=c("blue4","orange3"),ylim=c(-3,3), main="Tajimas D 1mb window")
abline(h=0,col="black")
#Fascicularis Pi
pi50kb<-read.table(file="pi50kb_fas.windowed.pi" ,header=TRUE)
pi250kb<-read.table(file="pi250kb_fas.windowed.pi" ,header=TRUE)
pi500kb<-read.table(file="pi500kb_fas.windowed.pi" ,header=TRUE)
pi750kb<-read.table(file="pi750kb_fas.windowed.pi" ,header=TRUE)
pi1mb<-read.table(file="pi1mb_fas.windowed.pi" ,header=TRUE)
manhattan(pi50kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(-0,0.1), main="Pairwise Nucleotide Divergence 50kb window")
manhattan(pi250kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 250kb window")
manhattan(pi500kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 500kb window")
manhattan(pi750kb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 750kb window")
manhattan(pi1mb,chr="CHROM",bp="BIN_START",p="PI",snp="N_VARIANTS",logp=FALSE,ylab="PI",col=c("blue4","orange3"),ylim=c(0,0.1), main="Pairwise Nucleotide Divergence 1mb window")
#Dot plots
library(ggplot2)
chr1<-read.table("chr1.rdotplot",header=TRUE)
chr2<-read.table("chr2.rdotplot",header=TRUE)
chr3<-read.table("chr3.rdotplot",header=TRUE)
chr4<-read.table("chr4.rdotplot",header=TRUE)
chr5<-read.table("chr5.rdotplot",header=TRUE)
chr6<-read.table("chr6.rdotplot",header=TRUE)
chr7<-read.table("chr7.rdotplot",header=TRUE)
chr8<-read.table("chr8.rdotplot",header=TRUE)
chr9<-read.table("chr9.rdotplot",header=TRUE)
chr10<-read.table("chr10.rdotplot",header=TRUE)
chr11<-read.table("chr11.rdotplot",header=TRUE)
chr12<-read.table("chr12.rdotplot",header=TRUE)
chr13<-read.table("chr13.rdotplot",header=TRUE)
chr14<-read.table("chr14.rdotplot",header=TRUE)
chr15<-read.table("chr15.rdotplot",header=TRUE)
chr16<-read.table("chr16.rdotplot",header=TRUE)
chr17<-read.table("chr17.rdotplot",header=TRUE)
chr18<-read.table("chr18.rdotplot",header=TRUE)
chr19<-read.table("19.rdotplot",header=TRUE)
chr20<-read.table("chr20.rdotplot",header=TRUE)
chr21<-read.table("chr21.rdotplot",header=TRUE)
chr22<-read.table("chr22.rdotplot",header=TRUE)
library(ggplot2)
#chr1<-read.table(file="1.rdotplot", as.is=TRUE)
dev.off()
par(mfrow=c(5,4))
plot(chr1,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 1")
plot(chr2,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 2")
plot(chr3,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="3")
plot(chr4,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 4")
plot(chr5,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 5")
plot(chr6,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 6")
plot(chr7,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 7")
plot(chr8,type='l',xlab="Rhesus",ylab=" Fascicularis" , main="8")
plot(chr9,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main=" 9")
plot(chr10,type='l',xlab="Rhesus",ylab=" Fascicularis" , main=" 10")
plot(chr11,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="11")
plot(chr12,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 12")
plot(chr13,type='l',xlab=" Rhesus",ylab="Fascicularis" , main=" 13")
plot(chr14,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="14")
plot(chr15,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="15")
plot(chr16,type='l',xlab="Rhesus",ylab="Fascicularis" , main="16")
plot(chr17,type='l',xlab="Rhesus",ylab="Fascicularis" , main="17")
plot(chr18,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="18")
plot(chr19,type='l',xlab="Rhesus",ylab="Fascicularis" , main="19")
plot(chr20,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="20")
plot(chr21,type='l',xlab=" Rhesus",ylab=" Fascicularis" , main="X")
plot(chr22,type='l',xlab=" Rhesus",ylab="Fascicularis" , main="M")
|
library(anominate)
### Name: densplot.anominate
### Title: alpha-NOMINATE Density Plot Function
### Aliases: densplot.anominate
### Keywords: ideal point estimation, NOMINATE, Bayesian latent variable
### models
### ** Examples
data(sen111)
data(sen111_anom)
densplot.anominate(sen111_anom)
| /data/genthat_extracted_code/anominate/examples/densplot.anominate.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 303 | r | library(anominate)
### Name: densplot.anominate
### Title: alpha-NOMINATE Density Plot Function
### Aliases: densplot.anominate
### Keywords: ideal point estimation, NOMINATE, Bayesian latent variable
### models
### ** Examples
data(sen111)
data(sen111_anom)
densplot.anominate(sen111_anom)
|
DF <- data.frame(
imie = c("Maja","Anna","Zosia","Anna"),
wiek = c("40","12,5","25","16.6"),
numer = c(1,2,NA,4),
oczo = factor(c("niebieskie", "jasno-niebieskie", "ciemne", "niebieskie")),
stringsAsFactors = FALSE)
colnames(DF)[4] <- "oczy"
# Dzieki nawiasom wynik przypisania zostanie
# wyswietlony na ekranie.
(tmp <- gsub(pattern = ",", replacement = ".", DF$wiek))
DF$wiek <- as.numeric(tmp) | /pogromcydanych/czyszczenie_danych.R | no_license | Transgredi/Playing-with-R | R | false | false | 409 | r | DF <- data.frame(
imie = c("Maja","Anna","Zosia","Anna"),
wiek = c("40","12,5","25","16.6"),
numer = c(1,2,NA,4),
oczo = factor(c("niebieskie", "jasno-niebieskie", "ciemne", "niebieskie")),
stringsAsFactors = FALSE)
colnames(DF)[4] <- "oczy"
# Dzieki nawiasom wynik przypisania zostanie
# wyswietlony na ekranie.
(tmp <- gsub(pattern = ",", replacement = ".", DF$wiek))
DF$wiek <- as.numeric(tmp) |
#------------------------------------------------------------------------------
plotXsize <- 480
plotYsize <- 480
readClasses <- c(rep("character",2), rep("numeric",7))
#data <- read.csv("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFactors=FALSE, colClasses=readClasses)
# Reads file into data.frame
# NOTE: for practical reasons I set a limit on nrow to read just past the range
# that includes the dates that we want to examine. It is a quick hack
# to save some time.
# The script does an actual filtering on the dates in anycase.
data <- read.csv("household_power_consumption.txt",
nrow=100000,
header=TRUE,
sep=";",
na.strings="?",
stringsAsFactors=FALSE,
colClasses=readClasses)
# Converts 'Date' column into a proper date format
data$Date <- as.POSIXct(strptime(data$Date,"%d/%m/%Y"))
# Filters the data.frame on wanted dates
data <- data[data$Date == as.POSIXct("2007-02-01") | data$Date == as.POSIXct("2007-02-02"),]
# Creates new time column combining date and time
data$newTime <- as.POSIXct(strptime( paste(as.character(data$Date),data$Time), "%F %T"))
# Actual PLOT begins here.
png(filename="plot3.png", width=plotXsize, height=plotYsize, bg="transparent")
par(cex = plotXsize/480)
plot(c(data$newTime,data$newTime,data$newTime),
c(data$Sub_metering_1,data$Sub_metering_2,data$Sub_metering_3),
type="n",
xlab="",
ylab="Energy sub metering",
main="",
pty="s",
bg="transparent")
lines(data$newTime,data$Sub_metering_1,col="black")
lines(data$newTime,data$Sub_metering_3,col="blue")
lines(data$newTime,data$Sub_metering_2,col="red")
legend(x="topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1))
dev.off()
#------------------------------------------------------------------------------
| /EDA1/plot3.R | no_license | pedrosan/DS_specialization | R | false | false | 1,931 | r | #------------------------------------------------------------------------------
plotXsize <- 480
plotYsize <- 480
readClasses <- c(rep("character",2), rep("numeric",7))
#data <- read.csv("household_power_consumption.txt", header=TRUE, sep=";", na.strings="?", stringsAsFactors=FALSE, colClasses=readClasses)
# Reads file into data.frame
# NOTE: for practical reasons I set a limit on nrow to read just past the range
# that includes the dates that we want to examine. It is a quick hack
# to save some time.
# The script does an actual filtering on the dates in anycase.
data <- read.csv("household_power_consumption.txt",
nrow=100000,
header=TRUE,
sep=";",
na.strings="?",
stringsAsFactors=FALSE,
colClasses=readClasses)
# Converts 'Date' column into a proper date format
data$Date <- as.POSIXct(strptime(data$Date,"%d/%m/%Y"))
# Filters the data.frame on wanted dates
data <- data[data$Date == as.POSIXct("2007-02-01") | data$Date == as.POSIXct("2007-02-02"),]
# Creates new time column combining date and time
data$newTime <- as.POSIXct(strptime( paste(as.character(data$Date),data$Time), "%F %T"))
# Actual PLOT begins here.
png(filename="plot3.png", width=plotXsize, height=plotYsize, bg="transparent")
par(cex = plotXsize/480)
plot(c(data$newTime,data$newTime,data$newTime),
c(data$Sub_metering_1,data$Sub_metering_2,data$Sub_metering_3),
type="n",
xlab="",
ylab="Energy sub metering",
main="",
pty="s",
bg="transparent")
lines(data$newTime,data$Sub_metering_1,col="black")
lines(data$newTime,data$Sub_metering_3,col="blue")
lines(data$newTime,data$Sub_metering_2,col="red")
legend(x="topright",
col=c("black","red","blue"),
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),
lty=c(1,1,1))
dev.off()
#------------------------------------------------------------------------------
|
library(magrittr)
library(dplyr)
library(ggplot2)
library(readr)
library(fitdistrplus)
library(DAAG)
library("ggplot2")
library(anytime)
bitqy <- read_delim('C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/networkbitqyTX.txt', delim = " ", col_names = F)
names(bitqy) <- c('fromID', 'toID', 'unixTime', 'tokenAmount')
decimals <- 2
supply <- 1 * 10^10
bitqyFiltered <-filter(bitqy,tokenAmount < decimals * supply) #filter out all outliers
#figure out how many users indruced those unnormal transcation
bitqy_outliers<- filter(bitqy,tokenAmount >= decimals * supply)
user_outliers <- bitqy_outliers %>% group_by(toID) %>% summarise(n = n()) %>% ungroup
number_users_outliers<-nrow(user_outliers)
number_users_outliers
#get top X buyers data
buys<-bitqyFiltered%>% group_by(toID) %>% summarise(n = n()) %>% ungroup #change the supply and decimals amount
buys_sorted_dec<-buys[order(-buys$n),]
#top 30 active buyers and number of buys
top_30_buyers<-buys_sorted_dec%>%head(30)
top_30_buyers
########################################Question 1############################################
#####group by user pairs#####
buys_pairs<-bitqyFiltered%>% group_by(fromID, toID) %>% summarise(n = n()) %>% ungroup
for (row in 1:nrow(buys_pairs)) {
a<-buys_pairs[row,"fromID"]
b<-buys_pairs[row,"toID"]
for (inner_row in row:nrow(buys_pairs)) {
c<-buys_pairs[inner_row,"fromID"]
d<-buys_pairs[inner_row,"toID"]
if(a==d&&b==c){
buys_pairs[inner_row,"fromID"]<-d
buys_pairs[inner_row,"toID"]<-c
}
}
}
buys_pairs<-bitqyFiltered%>% group_by(fromID*toID+fromID+toID) %>% summarise(n = n()) %>% ungroup
buys_pairs<-bitqyFiltered%>% group_by(fromID, toID) %>% summarise(n = n()) %>% ungroup
buys_pair_sorted_asc<-buys_pairs[order(buys_pairs$n),]
buys_pair_less_30<-subset(buys_pair_sorted_asc,n<30)
buys_pair_data<-buys_pair_less_30
#####find out estimates of paramaters of several distribution based on the buys_pairs data set#####
exp_dis <- fitdist(buys_pair_data$n, 'exp')
exp_dis
gamma_dis <- fitdist(buys_pair_data$n, 'gamma')
gamma_dis
lnorm_dis <- fitdist(buys_pair_data$n, 'lnorm')
lnorm_dis
pois_dis <- fitdist(buys_pair_data$n, 'pois')
pois_dis
weibull_dis <- fitdist(buys_pair_data$n, 'weibull')
weibull_dis
gofstat(list(exp_dis, gamma_dis, lnorm_dis,pois_dis,weibull_dis))
descdist(buys_pair_data$n,boot=1000)
#lognorm
fit_lnorm <- fitdist(buys_pair_less_30$n,"lnorm")
summary(fit_lnorm)
plot(fit_lnorm)
cdfcomp(fit_lnorm)
#exp
fit_exp <- fitdist(buys_pair_less_30$n,"exp")
summary(fit_exp)
plot(fit_exp)
cdfcomp(fit_exp)
#gamma
fit_gamma <- fitdist(buys_pair_less_30$n,"gamma")
summary(fit_gamma)
plot(fit_gamma)
cdfcomp(fit_gamma)
#weibull
fit_weibull <- fitdist(buys_pair_less_30$n,"weibull")
summary(fit_weibull)
#normal
fit_normal <- fitdist(buys_pair_less_30$n,"norm")
summary(fit_normal)
#pois
#normal
fit_pois <- fitdist(buys_pair_less_30$n,"pois")
summary(fit_pois)
#unif
fit_unif <- fitdist(buys_pair_less_30$n,"unif")
summary(fit_unif)
plot(fit_unif)
cdfcomp(fit_unif)
######################draw graph#######################
all_density <- ggplot(data=buys_pair_less_30) +
geom_histogram(bins=30,aes(x = buys_pair_less_30$n, ..density..)) +
stat_function(fun = dlnorm, args = list(meanlog = 0.9122759, sdlog = 0.9075324),
colour = "red")+
stat_function(fun = dgamma, args = list(shape = 1.2923088, rate=0.3361498),
colour = "blue")+
stat_function(fun=dexp, args=list(rate=0.2600901),colour="green")+
stat_function(fun=dweibull, args=list(shape=1.083871, scale=3.982783),colour="yellow")+
stat_function(fun=dpois, args=list(lambda=3.844821),colour="orange")+
xlab("No.Buys")
all_density
########################################Question 1############################################
########################################Question 2############################################
bitqy_prices <- read_delim("C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/bitqy", delim = "\t", col_names = T) #load token price data
names(bitqy_prices) <- make.names(names(bitqy_prices))
bitqy_prices <- bitqy_prices %>% mutate(date = as.Date(Date, format = '%m/%d/%Y'))
bitqy <- read_delim('C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/networkbitqyTX.txt', delim = " ", col_names = F)
names(bitqy) <- c('fromID', 'toID', 'unixTime', 'tokenAmount')
decimals <- 2
supply <- 1 * 10^10
bitqy_filtered <-filter(bitqy,tokenAmount < decimals * supply)
## convert data type of unixTime
bitqy_filtered <- bitqy_filtered %>%
mutate(date = anydate(unixTime))
names(bitqy_filtered) <- c('fromID', 'toID', 'unixTime', 'tokenAmount', 'date')
## merge the prices and edge
bitqy_merged<-merge(x = bitqy_prices, y = bitqy_filtered, by = "date", all.x = TRUE)
################Determin K##########################
top_30_buyers<-buys_sorted_dec%>%head(30)
top_K<-c(1:30)
count <- 1
for (val in top_K) {
top_K_buyers<-buys_sorted_dec%>%head(val)
filter_K_bitqy_merged<-filter(bitqy_merged,toID %in% top_K_buyers$toID)
filter_K_bitqy_merged=transform(filter_K_bitqy_merged,average_price= (Open+Close)/2)
filter_K_bitqy_merged$num_Date <- as.numeric(as.POSIXct(filter_K_bitqy_merged$date))
filered<-filter_K_bitqy_merged%>% group_by(num_Date) %>% summarise(n = n(),Close=mean(Close),tokenAmount=sum(tokenAmount),Open=mean(Open))
shift <- function(x, n){
c(x[-(seq(n))], rep(NA, n))
}
filered$new_Close<-shift(filered$Close,1)
num_rows<-nrow(filered)
filered[-num_rows,]
regression<-lm(filered$new_Close~filered$tokenAmount+filered$n+filered$Open)
setwd("C:/Users/ygaoq/Desktop/bitqy")
yourfilename=paste("W",val,".txt",sep="")
capture.output(summary(regression),append = TRUE,file = "C:/Users/ygaoq/Desktop/bitqy/Final_Result.txt")
summary(regression)
par(mfcol=c(2,2))
setwd("C:/Users/ygaoq/Desktop/bitqy")
yourfilename=paste("A",val,".png",sep="")
png(file=yourfilename)
opar <- par(mfrow=c(2,2))
plot(regression)
dev.off()
}
| /R_bitqy.R | no_license | y521gaoqi/Blockchain-Tokens-Data-Analytics | R | false | false | 6,122 | r | library(magrittr)
library(dplyr)
library(ggplot2)
library(readr)
library(fitdistrplus)
library(DAAG)
library("ggplot2")
library(anytime)
bitqy <- read_delim('C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/networkbitqyTX.txt', delim = " ", col_names = F)
names(bitqy) <- c('fromID', 'toID', 'unixTime', 'tokenAmount')
decimals <- 2
supply <- 1 * 10^10
bitqyFiltered <-filter(bitqy,tokenAmount < decimals * supply) #filter out all outliers
#figure out how many users indruced those unnormal transcation
bitqy_outliers<- filter(bitqy,tokenAmount >= decimals * supply)
user_outliers <- bitqy_outliers %>% group_by(toID) %>% summarise(n = n()) %>% ungroup
number_users_outliers<-nrow(user_outliers)
number_users_outliers
#get top X buyers data
buys<-bitqyFiltered%>% group_by(toID) %>% summarise(n = n()) %>% ungroup #change the supply and decimals amount
buys_sorted_dec<-buys[order(-buys$n),]
#top 30 active buyers and number of buys
top_30_buyers<-buys_sorted_dec%>%head(30)
top_30_buyers
########################################Question 1############################################
#####group by user pairs#####
buys_pairs<-bitqyFiltered%>% group_by(fromID, toID) %>% summarise(n = n()) %>% ungroup
for (row in 1:nrow(buys_pairs)) {
a<-buys_pairs[row,"fromID"]
b<-buys_pairs[row,"toID"]
for (inner_row in row:nrow(buys_pairs)) {
c<-buys_pairs[inner_row,"fromID"]
d<-buys_pairs[inner_row,"toID"]
if(a==d&&b==c){
buys_pairs[inner_row,"fromID"]<-d
buys_pairs[inner_row,"toID"]<-c
}
}
}
buys_pairs<-bitqyFiltered%>% group_by(fromID*toID+fromID+toID) %>% summarise(n = n()) %>% ungroup
buys_pairs<-bitqyFiltered%>% group_by(fromID, toID) %>% summarise(n = n()) %>% ungroup
buys_pair_sorted_asc<-buys_pairs[order(buys_pairs$n),]
buys_pair_less_30<-subset(buys_pair_sorted_asc,n<30)
buys_pair_data<-buys_pair_less_30
#####find out estimates of paramaters of several distribution based on the buys_pairs data set#####
exp_dis <- fitdist(buys_pair_data$n, 'exp')
exp_dis
gamma_dis <- fitdist(buys_pair_data$n, 'gamma')
gamma_dis
lnorm_dis <- fitdist(buys_pair_data$n, 'lnorm')
lnorm_dis
pois_dis <- fitdist(buys_pair_data$n, 'pois')
pois_dis
weibull_dis <- fitdist(buys_pair_data$n, 'weibull')
weibull_dis
gofstat(list(exp_dis, gamma_dis, lnorm_dis,pois_dis,weibull_dis))
descdist(buys_pair_data$n,boot=1000)
#lognorm
fit_lnorm <- fitdist(buys_pair_less_30$n,"lnorm")
summary(fit_lnorm)
plot(fit_lnorm)
cdfcomp(fit_lnorm)
#exp
fit_exp <- fitdist(buys_pair_less_30$n,"exp")
summary(fit_exp)
plot(fit_exp)
cdfcomp(fit_exp)
#gamma
fit_gamma <- fitdist(buys_pair_less_30$n,"gamma")
summary(fit_gamma)
plot(fit_gamma)
cdfcomp(fit_gamma)
#weibull
fit_weibull <- fitdist(buys_pair_less_30$n,"weibull")
summary(fit_weibull)
#normal
fit_normal <- fitdist(buys_pair_less_30$n,"norm")
summary(fit_normal)
#pois
#normal
fit_pois <- fitdist(buys_pair_less_30$n,"pois")
summary(fit_pois)
#unif
fit_unif <- fitdist(buys_pair_less_30$n,"unif")
summary(fit_unif)
plot(fit_unif)
cdfcomp(fit_unif)
######################draw graph#######################
all_density <- ggplot(data=buys_pair_less_30) +
geom_histogram(bins=30,aes(x = buys_pair_less_30$n, ..density..)) +
stat_function(fun = dlnorm, args = list(meanlog = 0.9122759, sdlog = 0.9075324),
colour = "red")+
stat_function(fun = dgamma, args = list(shape = 1.2923088, rate=0.3361498),
colour = "blue")+
stat_function(fun=dexp, args=list(rate=0.2600901),colour="green")+
stat_function(fun=dweibull, args=list(shape=1.083871, scale=3.982783),colour="yellow")+
stat_function(fun=dpois, args=list(lambda=3.844821),colour="orange")+
xlab("No.Buys")
all_density
########################################Question 1############################################
########################################Question 2############################################
bitqy_prices <- read_delim("C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/bitqy", delim = "\t", col_names = T) #load token price data
names(bitqy_prices) <- make.names(names(bitqy_prices))
bitqy_prices <- bitqy_prices %>% mutate(date = as.Date(Date, format = '%m/%d/%Y'))
bitqy <- read_delim('C:/Users/ygaoq/OneDrive/MyDocuments/2019 Spring/Statistics/Project/Blockchain-Tokens-Data-Analytics/networkbitqyTX.txt', delim = " ", col_names = F)
names(bitqy) <- c('fromID', 'toID', 'unixTime', 'tokenAmount')
decimals <- 2
supply <- 1 * 10^10
bitqy_filtered <-filter(bitqy,tokenAmount < decimals * supply)
## convert data type of unixTime
bitqy_filtered <- bitqy_filtered %>%
mutate(date = anydate(unixTime))
names(bitqy_filtered) <- c('fromID', 'toID', 'unixTime', 'tokenAmount', 'date')
## merge the prices and edge
bitqy_merged<-merge(x = bitqy_prices, y = bitqy_filtered, by = "date", all.x = TRUE)
################Determin K##########################
top_30_buyers<-buys_sorted_dec%>%head(30)
top_K<-c(1:30)
count <- 1
for (val in top_K) {
top_K_buyers<-buys_sorted_dec%>%head(val)
filter_K_bitqy_merged<-filter(bitqy_merged,toID %in% top_K_buyers$toID)
filter_K_bitqy_merged=transform(filter_K_bitqy_merged,average_price= (Open+Close)/2)
filter_K_bitqy_merged$num_Date <- as.numeric(as.POSIXct(filter_K_bitqy_merged$date))
filered<-filter_K_bitqy_merged%>% group_by(num_Date) %>% summarise(n = n(),Close=mean(Close),tokenAmount=sum(tokenAmount),Open=mean(Open))
shift <- function(x, n){
c(x[-(seq(n))], rep(NA, n))
}
filered$new_Close<-shift(filered$Close,1)
num_rows<-nrow(filered)
filered[-num_rows,]
regression<-lm(filered$new_Close~filered$tokenAmount+filered$n+filered$Open)
setwd("C:/Users/ygaoq/Desktop/bitqy")
yourfilename=paste("W",val,".txt",sep="")
capture.output(summary(regression),append = TRUE,file = "C:/Users/ygaoq/Desktop/bitqy/Final_Result.txt")
summary(regression)
par(mfcol=c(2,2))
setwd("C:/Users/ygaoq/Desktop/bitqy")
yourfilename=paste("A",val,".png",sep="")
png(file=yourfilename)
opar <- par(mfrow=c(2,2))
plot(regression)
dev.off()
}
|
library(ExpDes)
### Name: ex4
### Title: Composting: Doble Factorial scheme in CRD
### Aliases: ex4
### ** Examples
data(ex4)
## maybe str(ex4) ; plot(ex4) ...
| /data/genthat_extracted_code/ExpDes/examples/ex4.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 167 | r | library(ExpDes)
### Name: ex4
### Title: Composting: Doble Factorial scheme in CRD
### Aliases: ex4
### ** Examples
data(ex4)
## maybe str(ex4) ; plot(ex4) ...
|
library(psych)
### Name: pairwiseCount
### Title: Count number of pairwise cases for a data set with missing (NA)
### data and impute values.
### Aliases: pairwiseCount count.pairwise pairwiseDescribe pairwiseReport
### pairwiseImpute
### Keywords: models multivariate
### ** Examples
x <- matrix(rnorm(900),ncol=6)
y <- matrix(rnorm(450),ncol=3)
x[x < 0] <- NA
y[y > 1] <- NA
pairwiseCount(x)
pairwiseCount(y)
pairwiseCount(x,y)
pairwiseCount(x,diagonal=FALSE)
pairwiseDescribe(x,quant=c(.1,.25,.5,.75,.9))
#examine the structure of the ability data set
keys <- list(ICAR16=colnames(ability),reasoning = cs(reason.4,reason.16,reason.17,reason.19),
letters=cs(letter.7, letter.33,letter.34,letter.58, letter.7),
matrix=cs(matrix.45,matrix.46,matrix.47,matrix.55),
rotate=cs(rotate.3,rotate.4,rotate.6,rotate.8))
pairwiseImpute(keys,ability)
| /data/genthat_extracted_code/psych/examples/count.pairwise.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 871 | r | library(psych)
### Name: pairwiseCount
### Title: Count number of pairwise cases for a data set with missing (NA)
### data and impute values.
### Aliases: pairwiseCount count.pairwise pairwiseDescribe pairwiseReport
### pairwiseImpute
### Keywords: models multivariate
### ** Examples
x <- matrix(rnorm(900),ncol=6)
y <- matrix(rnorm(450),ncol=3)
x[x < 0] <- NA
y[y > 1] <- NA
pairwiseCount(x)
pairwiseCount(y)
pairwiseCount(x,y)
pairwiseCount(x,diagonal=FALSE)
pairwiseDescribe(x,quant=c(.1,.25,.5,.75,.9))
#examine the structure of the ability data set
keys <- list(ICAR16=colnames(ability),reasoning = cs(reason.4,reason.16,reason.17,reason.19),
letters=cs(letter.7, letter.33,letter.34,letter.58, letter.7),
matrix=cs(matrix.45,matrix.46,matrix.47,matrix.55),
rotate=cs(rotate.3,rotate.4,rotate.6,rotate.8))
pairwiseImpute(keys,ability)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coxS.R
\name{coxS}
\alias{coxS}
\title{coxS}
\usage{
coxS(Formula, data, weightsVar = 1, subset = "all", strataVar = "1",
lower = "~1")
}
\description{
function of coxph and mainly used in shiny app.
}
| /man/coxS.Rd | permissive | sontron/madis | R | false | true | 282 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coxS.R
\name{coxS}
\alias{coxS}
\title{coxS}
\usage{
coxS(Formula, data, weightsVar = 1, subset = "all", strataVar = "1",
lower = "~1")
}
\description{
function of coxph and mainly used in shiny app.
}
|
# ---- STANDARD ARIMA ----
context("TEST arima_boost: arima_xgboost")
# SETUP ----
# Data
m750 <- m4_monthly %>% filter(id == "M750")
# Split Data 80/20
splits <- initial_time_split(m750, prop = 0.8)
# Model Spec
model_spec <- arima_boost(
seasonal_period = 12,
non_seasonal_ar = 3,
non_seasonal_differences = 1,
non_seasonal_ma = 3,
seasonal_ar = 1,
seasonal_differences = 0,
seasonal_ma = 1,
mtry = 25,
trees = 250,
min_n = 4,
learn_rate = 0.1,
tree_depth = 7,
loss_reduction = 0.4,
sample_size = 0.9
) %>%
set_engine("arima_xgboost")
# PARSNIP ----
# * NO XREGS ----
# Fit Spec
model_fit <- model_spec %>%
fit(log(value) ~ date, data = training(splits))
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# TESTS
test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", {
testthat::expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl")
# $fit
testthat::expect_s3_class(model_fit$fit$models$model_1, "Arima")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(is.null(model_fit$fit$extras$xreg_recipe))
# $fit xgboost
testthat::expect_identical(model_fit$fit$models$model_2, NULL)
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
})
test_that("arima_boost: Arima, (No xregs), Test Predictions", {
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# * XREGS ----
# Fit Spec
model_fit <- model_spec %>%
fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), data = training(splits))
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# TESTS
test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", {
testthat::expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl")
# Structure
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(!is.null(model_fit$fit$extras$xreg_recipe))
# $fit arima
testthat::expect_s3_class(model_fit$fit$models$model_1, "Arima")
# $fit xgboost
testthat::expect_s3_class(model_fit$fit$models$model_2, "xgb.Booster")
testthat::expect_identical(model_fit$fit$models$model_2$params$eta, 0.1)
testthat::expect_identical(model_fit$fit$models$model_2$params$max_depth, 7)
testthat::expect_identical(model_fit$fit$models$model_2$params$gamma, 0.4)
testthat::expect_identical(model_fit$fit$models$model_2$params$colsample_bytree, 1)
testthat::expect_identical(model_fit$fit$models$model_2$params$min_child_weight, 4)
testthat::expect_identical(model_fit$fit$models$model_2$params$subsample, 0.9)
testthat::expect_identical(model_fit$fit$models$model_2$params$objective, "reg:squarederror")
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
})
test_that("arima_boost: Arima (XREGS), Test Predictions", {
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# ---- WORKFLOWS ----
# Model Spec
model_spec <- arima_boost(
seasonal_period = 12,
non_seasonal_ar = 3,
non_seasonal_differences = 1,
non_seasonal_ma = 3,
seasonal_ar = 1,
seasonal_differences = 0,
seasonal_ma = 1,
mtry = 25,
trees = 250,
min_n = 4,
learn_rate = 0.1,
tree_depth = 7,
loss_reduction = 0.4,
sample_size = 0.9
) %>%
set_engine("arima_xgboost")
# Recipe spec
recipe_spec <- recipe(value ~ date, data = training(splits)) %>%
step_log(value, skip = FALSE) %>%
step_date(date, features = "month") %>%
step_mutate(date_num = as.numeric(date))
# Workflow
wflw <- workflow() %>%
add_recipe(recipe_spec) %>%
add_model(model_spec)
wflw_fit <- wflw %>%
fit(training(splits))
# Forecast
predictions_tbl <- wflw_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>%
mutate_at(vars(.value), exp)
# TESTS
test_that("arima_boost: Arima (workflow), Test Model Fit Object", {
testthat::expect_s3_class(wflw_fit$fit$fit$fit, "arima_xgboost_fit_impl")
# Structure
testthat::expect_s3_class(wflw_fit$fit$fit$fit$data, "tbl_df")
testthat::expect_equal(names(wflw_fit$fit$fit$fit$data)[1], "date")
testthat::expect_true(!is.null(wflw_fit$fit$fit$fit$extras$xreg_recipe))
# $fit arima
testthat::expect_s3_class(wflw_fit$fit$fit$fit$models$model_1, "Arima")
# $fit xgboost
testthat::expect_s3_class(wflw_fit$fit$fit$fit$models$model_2, "xgb.Booster")
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$eta, 0.1)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$max_depth, 7)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$gamma, 0.4)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$colsample_bytree, 1)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$min_child_weight, 4)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$subsample, 0.9)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$objective, "reg:squarederror")
# $preproc
mld <- wflw_fit %>% workflows::pull_workflow_mold()
testthat::expect_equal(names(mld$outcomes), "value")
})
test_that("arima_boost: Arima (workflow), Test Predictions", {
full_data <- bind_rows(training(splits), testing(splits))
# Structure
testthat::expect_identical(nrow(full_data), nrow(predictions_tbl))
testthat::expect_identical(full_data$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
predictions_tbl <- predictions_tbl %>% filter(.key == "prediction")
resid <- testing(splits)$value - predictions_tbl$.value
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
| /tests/testthat/test-algo-arima_boost-Arima.R | permissive | peterhaglich/modeltime | R | false | false | 7,089 | r | # ---- STANDARD ARIMA ----
context("TEST arima_boost: arima_xgboost")
# SETUP ----
# Data
m750 <- m4_monthly %>% filter(id == "M750")
# Split Data 80/20
splits <- initial_time_split(m750, prop = 0.8)
# Model Spec
model_spec <- arima_boost(
seasonal_period = 12,
non_seasonal_ar = 3,
non_seasonal_differences = 1,
non_seasonal_ma = 3,
seasonal_ar = 1,
seasonal_differences = 0,
seasonal_ma = 1,
mtry = 25,
trees = 250,
min_n = 4,
learn_rate = 0.1,
tree_depth = 7,
loss_reduction = 0.4,
sample_size = 0.9
) %>%
set_engine("arima_xgboost")
# PARSNIP ----
# * NO XREGS ----
# Fit Spec
model_fit <- model_spec %>%
fit(log(value) ~ date, data = training(splits))
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# TESTS
test_that("arima_boost: Arima, (No xregs), Test Model Fit Object", {
testthat::expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl")
# $fit
testthat::expect_s3_class(model_fit$fit$models$model_1, "Arima")
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(is.null(model_fit$fit$extras$xreg_recipe))
# $fit xgboost
testthat::expect_identical(model_fit$fit$models$model_2, NULL)
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
})
test_that("arima_boost: Arima, (No xregs), Test Predictions", {
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# * XREGS ----
# Fit Spec
model_fit <- model_spec %>%
fit(log(value) ~ date + as.numeric(date) + month(date, label = TRUE), data = training(splits))
# Predictions
predictions_tbl <- model_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits))
# TESTS
test_that("arima_boost: Arima, (XREGS), Test Model Fit Object", {
testthat::expect_s3_class(model_fit$fit, "arima_xgboost_fit_impl")
# Structure
testthat::expect_s3_class(model_fit$fit$data, "tbl_df")
testthat::expect_equal(names(model_fit$fit$data)[1], "date")
testthat::expect_true(!is.null(model_fit$fit$extras$xreg_recipe))
# $fit arima
testthat::expect_s3_class(model_fit$fit$models$model_1, "Arima")
# $fit xgboost
testthat::expect_s3_class(model_fit$fit$models$model_2, "xgb.Booster")
testthat::expect_identical(model_fit$fit$models$model_2$params$eta, 0.1)
testthat::expect_identical(model_fit$fit$models$model_2$params$max_depth, 7)
testthat::expect_identical(model_fit$fit$models$model_2$params$gamma, 0.4)
testthat::expect_identical(model_fit$fit$models$model_2$params$colsample_bytree, 1)
testthat::expect_identical(model_fit$fit$models$model_2$params$min_child_weight, 4)
testthat::expect_identical(model_fit$fit$models$model_2$params$subsample, 0.9)
testthat::expect_identical(model_fit$fit$models$model_2$params$objective, "reg:squarederror")
# $preproc
testthat::expect_equal(model_fit$preproc$y_var, "value")
})
test_that("arima_boost: Arima (XREGS), Test Predictions", {
# Structure
testthat::expect_identical(nrow(testing(splits)), nrow(predictions_tbl))
testthat::expect_identical(testing(splits)$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
resid <- testing(splits)$value - exp(predictions_tbl$.value)
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
# ---- WORKFLOWS ----
# Model Spec
model_spec <- arima_boost(
seasonal_period = 12,
non_seasonal_ar = 3,
non_seasonal_differences = 1,
non_seasonal_ma = 3,
seasonal_ar = 1,
seasonal_differences = 0,
seasonal_ma = 1,
mtry = 25,
trees = 250,
min_n = 4,
learn_rate = 0.1,
tree_depth = 7,
loss_reduction = 0.4,
sample_size = 0.9
) %>%
set_engine("arima_xgboost")
# Recipe spec
recipe_spec <- recipe(value ~ date, data = training(splits)) %>%
step_log(value, skip = FALSE) %>%
step_date(date, features = "month") %>%
step_mutate(date_num = as.numeric(date))
# Workflow
wflw <- workflow() %>%
add_recipe(recipe_spec) %>%
add_model(model_spec)
wflw_fit <- wflw %>%
fit(training(splits))
# Forecast
predictions_tbl <- wflw_fit %>%
modeltime_calibrate(testing(splits)) %>%
modeltime_forecast(new_data = testing(splits), actual_data = training(splits)) %>%
mutate_at(vars(.value), exp)
# TESTS
test_that("arima_boost: Arima (workflow), Test Model Fit Object", {
testthat::expect_s3_class(wflw_fit$fit$fit$fit, "arima_xgboost_fit_impl")
# Structure
testthat::expect_s3_class(wflw_fit$fit$fit$fit$data, "tbl_df")
testthat::expect_equal(names(wflw_fit$fit$fit$fit$data)[1], "date")
testthat::expect_true(!is.null(wflw_fit$fit$fit$fit$extras$xreg_recipe))
# $fit arima
testthat::expect_s3_class(wflw_fit$fit$fit$fit$models$model_1, "Arima")
# $fit xgboost
testthat::expect_s3_class(wflw_fit$fit$fit$fit$models$model_2, "xgb.Booster")
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$eta, 0.1)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$max_depth, 7)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$gamma, 0.4)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$colsample_bytree, 1)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$min_child_weight, 4)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$subsample, 0.9)
testthat::expect_identical(wflw_fit$fit$fit$fit$models$model_2$params$objective, "reg:squarederror")
# $preproc
mld <- wflw_fit %>% workflows::pull_workflow_mold()
testthat::expect_equal(names(mld$outcomes), "value")
})
test_that("arima_boost: Arima (workflow), Test Predictions", {
full_data <- bind_rows(training(splits), testing(splits))
# Structure
testthat::expect_identical(nrow(full_data), nrow(predictions_tbl))
testthat::expect_identical(full_data$date, predictions_tbl$.index)
# Out-of-Sample Accuracy Tests
predictions_tbl <- predictions_tbl %>% filter(.key == "prediction")
resid <- testing(splits)$value - predictions_tbl$.value
# - Max Error less than 1500
testthat::expect_lte(max(abs(resid)), 1500)
# - MAE less than 700
testthat::expect_lte(mean(abs(resid)), 700)
})
|
B1= 1700#message is spam
B2=3300#message is normal
#A is words in the list
M=5000#TOtal messages
#Probability for spam message
PB1=B1/M
message("The Probability is: ",PB1)
#Probability for normal message
PB2=B2/M
message("The Probability is: ",PB2)
#Among the spam messages,1343 contain words in the list
B1IntA=1343
# from normal messages only 297 contain words in the list
B2IntA=297
#Conditional Probability
PAB1=B1IntA/B1#P(A|B1)
PAB2=B2IntA/B2#P(A|B2)
#There for by Bayes' Theorem
PB1A=(PAB1*B1)/(PAB1*B1+PAB2*B2)#P(B1|A)=P(A|B1)*P(B1)/(P(A|B1)*P(B1)+P(A|B2)*P(B2))
PB1A
print("Since the probability is large there for the message is spam")
| /Miller_And_Freund_S_Probability_And_Statistics_For_Engineers_by_Richard_A._Johnson/CH3/EX3.31/EX3_31.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 667 | r | B1= 1700#message is spam
B2=3300#message is normal
#A is words in the list
M=5000#TOtal messages
#Probability for spam message
PB1=B1/M
message("The Probability is: ",PB1)
#Probability for normal message
PB2=B2/M
message("The Probability is: ",PB2)
#Among the spam messages,1343 contain words in the list
B1IntA=1343
# from normal messages only 297 contain words in the list
B2IntA=297
#Conditional Probability
PAB1=B1IntA/B1#P(A|B1)
PAB2=B2IntA/B2#P(A|B2)
#There for by Bayes' Theorem
PB1A=(PAB1*B1)/(PAB1*B1+PAB2*B2)#P(B1|A)=P(A|B1)*P(B1)/(P(A|B1)*P(B1)+P(A|B2)*P(B2))
PB1A
print("Since the probability is large there for the message is spam")
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
context("parallelize() and collect()")
# Mock data
numVector <- c(-10:97)
numList <- list(sqrt(1), sqrt(2), sqrt(3), 4 ** 10)
strVector <- c("Dexter Morgan: I suppose I should be upset, even feel",
"violated, but I'm not. No, in fact, I think this is a friendly",
"message, like \"Hey, wanna play?\" and yes, I want to play. ",
"I really, really do.")
strList <- list("Dexter Morgan: Blood. Sometimes it sets my teeth on edge, ",
"other times it helps me control the chaos.",
"Dexter Morgan: Harry and Dorris Morgan did a wonderful job ",
"raising me. But they're both dead now. I didn't kill them. Honest.")
numPairs <- list(list(1, 1), list(1, 2), list(2, 2), list(2, 3))
strPairs <- list(list(strList, strList), list(strList, strList))
# JavaSparkContext handle
jsc <- sparkR.init()
# Tests
test_that("parallelize() on simple vectors and lists returns an RDD", {
numVectorRDD <- parallelize(jsc, numVector, 1)
numVectorRDD2 <- parallelize(jsc, numVector, 10)
numListRDD <- parallelize(jsc, numList, 1)
numListRDD2 <- parallelize(jsc, numList, 4)
strVectorRDD <- parallelize(jsc, strVector, 2)
strVectorRDD2 <- parallelize(jsc, strVector, 3)
strListRDD <- parallelize(jsc, strList, 4)
strListRDD2 <- parallelize(jsc, strList, 1)
rdds <- c(numVectorRDD,
numVectorRDD2,
numListRDD,
numListRDD2,
strVectorRDD,
strVectorRDD2,
strListRDD,
strListRDD2)
for (rdd in rdds) {
expect_true(inherits(rdd, "RDD"))
expect_true(.hasSlot(rdd, "jrdd")
&& inherits(rdd@jrdd, "jobj")
&& isInstanceOf(rdd@jrdd, "org.apache.spark.api.java.JavaRDD"))
}
})
test_that("collect(), following a parallelize(), gives back the original collections", {
numVectorRDD <- parallelize(jsc, numVector, 10)
expect_equal(collect(numVectorRDD), as.list(numVector))
numListRDD <- parallelize(jsc, numList, 1)
numListRDD2 <- parallelize(jsc, numList, 4)
expect_equal(collect(numListRDD), as.list(numList))
expect_equal(collect(numListRDD2), as.list(numList))
strVectorRDD <- parallelize(jsc, strVector, 2)
strVectorRDD2 <- parallelize(jsc, strVector, 3)
expect_equal(collect(strVectorRDD), as.list(strVector))
expect_equal(collect(strVectorRDD2), as.list(strVector))
strListRDD <- parallelize(jsc, strList, 4)
strListRDD2 <- parallelize(jsc, strList, 1)
expect_equal(collect(strListRDD), as.list(strList))
expect_equal(collect(strListRDD2), as.list(strList))
})
test_that("regression: collect() following a parallelize() does not drop elements", {
# 10 %/% 6 = 1, ceiling(10 / 6) = 2
collLen <- 10
numPart <- 6
expected <- runif(collLen)
actual <- collect(parallelize(jsc, expected, numPart))
expect_equal(actual, as.list(expected))
})
test_that("parallelize() and collect() work for lists of pairs (pairwise data)", {
# use the pairwise logical to indicate pairwise data
numPairsRDDD1 <- parallelize(jsc, numPairs, 1)
numPairsRDDD2 <- parallelize(jsc, numPairs, 2)
numPairsRDDD3 <- parallelize(jsc, numPairs, 3)
expect_equal(collect(numPairsRDDD1), numPairs)
expect_equal(collect(numPairsRDDD2), numPairs)
expect_equal(collect(numPairsRDDD3), numPairs)
# can also leave out the parameter name, if the params are supplied in order
strPairsRDDD1 <- parallelize(jsc, strPairs, 1)
strPairsRDDD2 <- parallelize(jsc, strPairs, 2)
expect_equal(collect(strPairsRDDD1), strPairs)
expect_equal(collect(strPairsRDDD2), strPairs)
})
| /R/pkg/inst/tests/test_parallelize_collect.R | permissive | uncleGen/ps-on-spark | R | false | false | 4,392 | r | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
context("parallelize() and collect()")
# Mock data
numVector <- c(-10:97)
numList <- list(sqrt(1), sqrt(2), sqrt(3), 4 ** 10)
strVector <- c("Dexter Morgan: I suppose I should be upset, even feel",
"violated, but I'm not. No, in fact, I think this is a friendly",
"message, like \"Hey, wanna play?\" and yes, I want to play. ",
"I really, really do.")
strList <- list("Dexter Morgan: Blood. Sometimes it sets my teeth on edge, ",
"other times it helps me control the chaos.",
"Dexter Morgan: Harry and Dorris Morgan did a wonderful job ",
"raising me. But they're both dead now. I didn't kill them. Honest.")
numPairs <- list(list(1, 1), list(1, 2), list(2, 2), list(2, 3))
strPairs <- list(list(strList, strList), list(strList, strList))
# JavaSparkContext handle
jsc <- sparkR.init()
# Tests
test_that("parallelize() on simple vectors and lists returns an RDD", {
numVectorRDD <- parallelize(jsc, numVector, 1)
numVectorRDD2 <- parallelize(jsc, numVector, 10)
numListRDD <- parallelize(jsc, numList, 1)
numListRDD2 <- parallelize(jsc, numList, 4)
strVectorRDD <- parallelize(jsc, strVector, 2)
strVectorRDD2 <- parallelize(jsc, strVector, 3)
strListRDD <- parallelize(jsc, strList, 4)
strListRDD2 <- parallelize(jsc, strList, 1)
rdds <- c(numVectorRDD,
numVectorRDD2,
numListRDD,
numListRDD2,
strVectorRDD,
strVectorRDD2,
strListRDD,
strListRDD2)
for (rdd in rdds) {
expect_true(inherits(rdd, "RDD"))
expect_true(.hasSlot(rdd, "jrdd")
&& inherits(rdd@jrdd, "jobj")
&& isInstanceOf(rdd@jrdd, "org.apache.spark.api.java.JavaRDD"))
}
})
test_that("collect(), following a parallelize(), gives back the original collections", {
numVectorRDD <- parallelize(jsc, numVector, 10)
expect_equal(collect(numVectorRDD), as.list(numVector))
numListRDD <- parallelize(jsc, numList, 1)
numListRDD2 <- parallelize(jsc, numList, 4)
expect_equal(collect(numListRDD), as.list(numList))
expect_equal(collect(numListRDD2), as.list(numList))
strVectorRDD <- parallelize(jsc, strVector, 2)
strVectorRDD2 <- parallelize(jsc, strVector, 3)
expect_equal(collect(strVectorRDD), as.list(strVector))
expect_equal(collect(strVectorRDD2), as.list(strVector))
strListRDD <- parallelize(jsc, strList, 4)
strListRDD2 <- parallelize(jsc, strList, 1)
expect_equal(collect(strListRDD), as.list(strList))
expect_equal(collect(strListRDD2), as.list(strList))
})
test_that("regression: collect() following a parallelize() does not drop elements", {
# 10 %/% 6 = 1, ceiling(10 / 6) = 2
collLen <- 10
numPart <- 6
expected <- runif(collLen)
actual <- collect(parallelize(jsc, expected, numPart))
expect_equal(actual, as.list(expected))
})
test_that("parallelize() and collect() work for lists of pairs (pairwise data)", {
# use the pairwise logical to indicate pairwise data
numPairsRDDD1 <- parallelize(jsc, numPairs, 1)
numPairsRDDD2 <- parallelize(jsc, numPairs, 2)
numPairsRDDD3 <- parallelize(jsc, numPairs, 3)
expect_equal(collect(numPairsRDDD1), numPairs)
expect_equal(collect(numPairsRDDD2), numPairs)
expect_equal(collect(numPairsRDDD3), numPairs)
# can also leave out the parameter name, if the params are supplied in order
strPairsRDDD1 <- parallelize(jsc, strPairs, 1)
strPairsRDDD2 <- parallelize(jsc, strPairs, 2)
expect_equal(collect(strPairsRDDD1), strPairs)
expect_equal(collect(strPairsRDDD2), strPairs)
})
|
\name{plotmirror}
\alias{plotmirror}
\title{Plots 3-d mirror output in readable format}
\usage{
plotmirror(m)
}
\arguments{
\item{m}{A matrix with 3 rows, corresponding to the
output of a call to mirror with includeInfeasible =
TRUE.}
}
\value{
A ggplot object gives the arrow plot
}
\description{
Plot the steps of a random walk on the simplex in 3
varibles, using arrows to direct
}
\examples{
A <- matrix(1, ncol = 3)
x0 <- c(.2, -.2, 1)
m <- mirror(A, x0, n = 1, includeInfeasible = TRUE)
plotmirror(m)
}
| /man/plotmirror.Rd | no_license | MichaelJFlynn/kmatching | R | false | false | 525 | rd | \name{plotmirror}
\alias{plotmirror}
\title{Plots 3-d mirror output in readable format}
\usage{
plotmirror(m)
}
\arguments{
\item{m}{A matrix with 3 rows, corresponding to the
output of a call to mirror with includeInfeasible =
TRUE.}
}
\value{
A ggplot object gives the arrow plot
}
\description{
Plot the steps of a random walk on the simplex in 3
varibles, using arrows to direct
}
\examples{
A <- matrix(1, ncol = 3)
x0 <- c(.2, -.2, 1)
m <- mirror(A, x0, n = 1, includeInfeasible = TRUE)
plotmirror(m)
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.