content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
# Load a set of HTseq output files and return a matrix
LoadHTseq<-function(fns, remove_unassigned=TRUE) {
# fns String vector of name of output files from HTseq. If the strings were named, the names will be used as column names of return matrix.
# remove_unassigned Whether to remove reads assigned to categories "alignment_not_unique", "ambiguous", "no_feature", "not_aligned", and "too_low_aQual"
ct<-lapply(fns, function(f) read.table(f, sep='\t', row=1, stringsAsFactors = FALSE));
id<-sort(unique(unlist(lapply(ct, rownames), use.names=FALSE)));
c<-matrix(0, nr=length(id), nc=length(ct), dimnames=list(id, names(fns)));
for (i in 1:length(ct)) c[rownames(ct[[i]]), i]<-ct[[i]][, 1];
if (remove_unassigned) c <- c[!grepl('__', rownames(c)), ];
if (is.null(colnames(c))) {
nm<-sapply(strsplit(fns, '/'), function(x) x[length(x)]);
pref<-sapply(strsplit(nm, '\\.'), function(x) x[1]);
suff<-sapply(strsplit(nm, '\\.'), function(x) x[length(x)]);
if (length(unique(pref)) == 1) nm<-sub(paste('^', pref[1], ".", sep=''), '', nm);
if (length(unique(suff)) == 1) nm<-sub(paste(".", suff[1], '$', sep=''), '', nm);
colnames(c)<-nm;
} else colnames(c)<-names(fns);
c;
}
|
/R/LoadHTseq.R
|
no_license
|
shijianasdf/Rnaseq
|
R
| false
| false
| 1,251
|
r
|
# Load a set of HTseq output files and return a matrix
LoadHTseq<-function(fns, remove_unassigned=TRUE) {
# fns String vector of name of output files from HTseq. If the strings were named, the names will be used as column names of return matrix.
# remove_unassigned Whether to remove reads assigned to categories "alignment_not_unique", "ambiguous", "no_feature", "not_aligned", and "too_low_aQual"
ct<-lapply(fns, function(f) read.table(f, sep='\t', row=1, stringsAsFactors = FALSE));
id<-sort(unique(unlist(lapply(ct, rownames), use.names=FALSE)));
c<-matrix(0, nr=length(id), nc=length(ct), dimnames=list(id, names(fns)));
for (i in 1:length(ct)) c[rownames(ct[[i]]), i]<-ct[[i]][, 1];
if (remove_unassigned) c <- c[!grepl('__', rownames(c)), ];
if (is.null(colnames(c))) {
nm<-sapply(strsplit(fns, '/'), function(x) x[length(x)]);
pref<-sapply(strsplit(nm, '\\.'), function(x) x[1]);
suff<-sapply(strsplit(nm, '\\.'), function(x) x[length(x)]);
if (length(unique(pref)) == 1) nm<-sub(paste('^', pref[1], ".", sep=''), '', nm);
if (length(unique(suff)) == 1) nm<-sub(paste(".", suff[1], '$', sep=''), '', nm);
colnames(c)<-nm;
} else colnames(c)<-names(fns);
c;
}
|
# TEST summary.mulTree
# Testing is.wholenumber works
test_that("is.wholenumber works", {
# Error
expect_error(
is.wholenumber("a")
)
# Testing which number is whole
# Not this one
expect_false(
is.wholenumber(1.1)
)
# But this one is
expect_true(
is.wholenumber(round(1.1))
)
expect_true(
is.wholenumber(1)
)
})
# Testing if prob.converter works
test_that("prob.converter works", {
# Error
expect_error(
prob.converter("a")
)
# Transforming one CI
expect_is(
prob.converter(50), "numeric"
)
expect_equal(
prob.converter(50), c(0.25, 0.75)
)
# Transforming multiple CIs
expect_is(
prob.converter(c(50, 95)), "numeric"
)
expect_equal(
prob.converter(c(50, 95)), c(0.025, 0.25, 0.75, 0.975)
)
# And even more!
expect_equal(
length(prob.converter(seq(1:100))), 200
)
})
# Testing lapply.quantile
test_that("lapply.quantile works", {
# Errors
expect_error(
lapply.quantile("X", prob=c(50,95), cent.tend=mean)
)
expect_error(
lapply.quantile(rnorm(100), prob="X", cent.tend=mean)
)
expect_true(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[2]] != lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=median)[[2]]
)
# Output is a list
expect_is(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean), "list"
)
# Of two elements
expect_equal(
names(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)), c("quantiles", "central")
)
# First one is length 4
expect_equal(
length(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[1]]), 4
)
# Second one is length 1
expect_equal(
length(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[2]]), 1
)
# And works with additional arguments
expect_is(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean, na.rm=TRUE), "list"
)
})
# Testing lapply.hdr
test_that("lapply.hdr works", {
## Smooth hdr (internal)
smooth_simple <- smooth.hdr(hdrcde::hdr(rnorm(100), prob=c(50,95)), prob=c(50,95), "test_hrd")
expect_is(smooth_simple, "list")
expect_equal(names(smooth_simple), c("hdr", "mode", "falpha"))
expect_warning(smooth_bimod <- smooth.hdr(hdrcde::hdr((c(rnorm(50, 1, 1), rnorm(50, 10, 1))), prob=c(50,95)), prob=c(50,95), "test_hrd"))
expect_is(smooth_bimod, "list")
expect_equal(names(smooth_bimod), c("hdr", "mode", "falpha"))
# Errors
expect_error(
lapply.hdr("X", prob=c(50,95))
)
expect_warning(
expect_error(lapply.hdr(rnorm(100), prob="X"))
)
# Output is a list
expect_is(
lapply.hdr(rnorm(100), prob=c(50,95)), "list"
)
# Of two elements
expect_equal(
names(lapply.hdr(rnorm(100), prob=c(50,95))), c("hdr", "mode", "falpha")
)
# First one is length 4
expect_equal(
length(lapply.hdr(rnorm(100), prob=c(50,95))[[1]]), 4
)
# Second one is length 1
expect_equal(
length(lapply.hdr(rnorm(100), prob=c(50,95))[[2]]), 1
)
# And works with additional arguments
expect_is(
lapply.hdr(rnorm(100), prob=c(50,95), n=100), "list"
)
})
# Testing result.list.to.table
test_that("result.list.to.table works", {
list_test <- replicate(3, list("a"=rnorm(4), "b"=rnorm(sample(1:3, 1))), simplify = FALSE)
# Errors
expect_error(
result.list.to.table(NULL)
)
# Output is a matrix
expect_is(
result.list.to.table(list_test), "matrix"
)
# of dimension 3 by 5
expect_equal(
dim(result.list.to.table(list_test)), c(3,5)
)
})
# Loading the inbuilt data
data(lifespan.mcmc)
mulTree.results <- lifespan.mcmc
# Testing example
test_that("example works", {
# Errors
expect_error(
summary.mulTree(list(1))
)
expect_error(
summary(lifespan.mcmc, prob="A")
)
expect_error(
summary(lifespan.mcmc, use.hdr="why not")
)
expect_error(
expect_warning(summary(lifespan.mcmc, use.hdr=FALSE, cent.tend=matrix))
)
expect_error(
summary(lifespan.mcmc, prob = 101)
)
test <- lifespan.mcmc
test$Intercept <- 1
test$mass <- 1
test$volancy <- 1
test$phy.var <- 1
test$res.var <- 1
expect_error(
summary(test)
)
# Default example
test_example <- summary(lifespan.mcmc)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,5)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(mode hdr)","lower.CI(2.5)","lower.CI(25)","upper.CI(75)","upper.CI(97.5)")
)
# Example with different CI
test_example <- summary(lifespan.mcmc, prob = 75)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,3)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(mode hdr)","lower.CI(12.5)","upper.CI(87.5)")
)
# Example without hdr
test_example <- summary(lifespan.mcmc, use.hdr = FALSE)
test_example2 <- summary(lifespan.mcmc, use.hdr = FALSE, cent.tend=mean)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,5)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(median)","lower.CI(2.5)","lower.CI(25)","upper.CI(75)","upper.CI(97.5)")
)
})
|
/tests/testthat/test-summary.mulTree.R
|
no_license
|
TGuillerme/mulTree
|
R
| false
| false
| 5,613
|
r
|
# TEST summary.mulTree
# Testing is.wholenumber works
test_that("is.wholenumber works", {
# Error
expect_error(
is.wholenumber("a")
)
# Testing which number is whole
# Not this one
expect_false(
is.wholenumber(1.1)
)
# But this one is
expect_true(
is.wholenumber(round(1.1))
)
expect_true(
is.wholenumber(1)
)
})
# Testing if prob.converter works
test_that("prob.converter works", {
# Error
expect_error(
prob.converter("a")
)
# Transforming one CI
expect_is(
prob.converter(50), "numeric"
)
expect_equal(
prob.converter(50), c(0.25, 0.75)
)
# Transforming multiple CIs
expect_is(
prob.converter(c(50, 95)), "numeric"
)
expect_equal(
prob.converter(c(50, 95)), c(0.025, 0.25, 0.75, 0.975)
)
# And even more!
expect_equal(
length(prob.converter(seq(1:100))), 200
)
})
# Testing lapply.quantile
test_that("lapply.quantile works", {
# Errors
expect_error(
lapply.quantile("X", prob=c(50,95), cent.tend=mean)
)
expect_error(
lapply.quantile(rnorm(100), prob="X", cent.tend=mean)
)
expect_true(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[2]] != lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=median)[[2]]
)
# Output is a list
expect_is(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean), "list"
)
# Of two elements
expect_equal(
names(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)), c("quantiles", "central")
)
# First one is length 4
expect_equal(
length(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[1]]), 4
)
# Second one is length 1
expect_equal(
length(lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean)[[2]]), 1
)
# And works with additional arguments
expect_is(
lapply.quantile(rnorm(100), prob=c(50,95), cent.tend=mean, na.rm=TRUE), "list"
)
})
# Testing lapply.hdr
test_that("lapply.hdr works", {
## Smooth hdr (internal)
smooth_simple <- smooth.hdr(hdrcde::hdr(rnorm(100), prob=c(50,95)), prob=c(50,95), "test_hrd")
expect_is(smooth_simple, "list")
expect_equal(names(smooth_simple), c("hdr", "mode", "falpha"))
expect_warning(smooth_bimod <- smooth.hdr(hdrcde::hdr((c(rnorm(50, 1, 1), rnorm(50, 10, 1))), prob=c(50,95)), prob=c(50,95), "test_hrd"))
expect_is(smooth_bimod, "list")
expect_equal(names(smooth_bimod), c("hdr", "mode", "falpha"))
# Errors
expect_error(
lapply.hdr("X", prob=c(50,95))
)
expect_warning(
expect_error(lapply.hdr(rnorm(100), prob="X"))
)
# Output is a list
expect_is(
lapply.hdr(rnorm(100), prob=c(50,95)), "list"
)
# Of two elements
expect_equal(
names(lapply.hdr(rnorm(100), prob=c(50,95))), c("hdr", "mode", "falpha")
)
# First one is length 4
expect_equal(
length(lapply.hdr(rnorm(100), prob=c(50,95))[[1]]), 4
)
# Second one is length 1
expect_equal(
length(lapply.hdr(rnorm(100), prob=c(50,95))[[2]]), 1
)
# And works with additional arguments
expect_is(
lapply.hdr(rnorm(100), prob=c(50,95), n=100), "list"
)
})
# Testing result.list.to.table
test_that("result.list.to.table works", {
list_test <- replicate(3, list("a"=rnorm(4), "b"=rnorm(sample(1:3, 1))), simplify = FALSE)
# Errors
expect_error(
result.list.to.table(NULL)
)
# Output is a matrix
expect_is(
result.list.to.table(list_test), "matrix"
)
# of dimension 3 by 5
expect_equal(
dim(result.list.to.table(list_test)), c(3,5)
)
})
# Loading the inbuilt data
data(lifespan.mcmc)
mulTree.results <- lifespan.mcmc
# Testing example
test_that("example works", {
# Errors
expect_error(
summary.mulTree(list(1))
)
expect_error(
summary(lifespan.mcmc, prob="A")
)
expect_error(
summary(lifespan.mcmc, use.hdr="why not")
)
expect_error(
expect_warning(summary(lifespan.mcmc, use.hdr=FALSE, cent.tend=matrix))
)
expect_error(
summary(lifespan.mcmc, prob = 101)
)
test <- lifespan.mcmc
test$Intercept <- 1
test$mass <- 1
test$volancy <- 1
test$phy.var <- 1
test$res.var <- 1
expect_error(
summary(test)
)
# Default example
test_example <- summary(lifespan.mcmc)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,5)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(mode hdr)","lower.CI(2.5)","lower.CI(25)","upper.CI(75)","upper.CI(97.5)")
)
# Example with different CI
test_example <- summary(lifespan.mcmc, prob = 75)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,3)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(mode hdr)","lower.CI(12.5)","upper.CI(87.5)")
)
# Example without hdr
test_example <- summary(lifespan.mcmc, use.hdr = FALSE)
test_example2 <- summary(lifespan.mcmc, use.hdr = FALSE, cent.tend=mean)
expect_is(
test_example, "matrix"
)
expect_equal(
dim(test_example), c(5,5)
)
expect_equal(
unlist(dimnames(test_example)), c("Intercept","mass","volancy","phy.var","res.var","Estimates(median)","lower.CI(2.5)","lower.CI(25)","upper.CI(75)","upper.CI(97.5)")
)
})
|
# Survey sampling code for final project on Islands Data
# Aidan Draper
# December 4, 2018
library(googlesheets)
set.seed(1337)
#authorize data sheet
gs_auth()
survey.df <- data.frame(gs_read(gs_key("1H-nJggWpOutHZG8YScab5UgrWqlnNDvaMWTDdtCytDA")))
sample_size = 61
samples = c(rep(1,14),rep(2,20),rep(3,27))
village_max = c(1016,1346,818,691,763,662,
738,1152,673,470,806,534,900,1257,315,
718,591,736,810,443,420,802,178,1610,512,1452,836)
island_names = c('top','middle','bottom')
for(island_num in samples){
if (island_num == 1){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("Thre sample village is", village_num, "and the house is", house,"\n")
} else if (island_num == 2){
village_num = sample(1:9,1)
house = sample(1:village_max[village_num+6],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("The sample village is", village_num, "and the house is", house,"\n")
}
else if (island_num == 3) {
village_num = sample(1:12,1)
house = sample(1:village_max[village_num+6+9],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("The sample village is", village_num, "and the house is", house,"\n")
}
#add to google sheets
gs_add_row(ss=gs_title("213_survey_data"),ws = "Sheet1", input = c(island_names[island_num], village_num, house))
}
# once you have your list in googlesheets, add the same columns we had and run the code below to see which person from the house to sample
num_people = 2 # change this to the number of ppl in the house
sample(1:num_people,1)
|
/final_survey.R
|
no_license
|
adraper2/stats-code
|
R
| false
| false
| 2,526
|
r
|
# Survey sampling code for final project on Islands Data
# Aidan Draper
# December 4, 2018
library(googlesheets)
set.seed(1337)
#authorize data sheet
gs_auth()
survey.df <- data.frame(gs_read(gs_key("1H-nJggWpOutHZG8YScab5UgrWqlnNDvaMWTDdtCytDA")))
sample_size = 61
samples = c(rep(1,14),rep(2,20),rep(3,27))
village_max = c(1016,1346,818,691,763,662,
738,1152,673,470,806,534,900,1257,315,
718,591,736,810,443,420,802,178,1610,512,1452,836)
island_names = c('top','middle','bottom')
for(island_num in samples){
if (island_num == 1){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("Thre sample village is", village_num, "and the house is", house,"\n")
} else if (island_num == 2){
village_num = sample(1:9,1)
house = sample(1:village_max[village_num+6],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("The sample village is", village_num, "and the house is", house,"\n")
}
else if (island_num == 3) {
village_num = sample(1:12,1)
house = sample(1:village_max[village_num+6+9],1)
# if this village house combo has already been selected, try again
village_surveyed = match(village_num, survey.df[,2])
while(house%in%survey.df[village_surveyed,3]==TRUE){
village_num = sample(1:6,1)
house = sample(1:village_max[village_num],1)
village_surveyed = match(village_num, survey.df[,2])
}
cat("The sample village is", village_num, "and the house is", house,"\n")
}
#add to google sheets
gs_add_row(ss=gs_title("213_survey_data"),ws = "Sheet1", input = c(island_names[island_num], village_num, house))
}
# once you have your list in googlesheets, add the same columns we had and run the code below to see which person from the house to sample
num_people = 2 # change this to the number of ppl in the house
sample(1:num_people,1)
|
# 明るさの対比
library(grid)
grid.newpage()
selfvector <- function () {
vec <- readline("適当なベクトルを入力(,で区切り) : ")
as.numeric(unlist(strsplit(vec, ",")))
}
x1 <- selfvector()
y1 <- selfvector()
color1 <- selfvector()
color2 <- as.numeric(readline("適当な数値を入力:"))
grid.rect(x1, y1, 1/2, 1/2, gp = gpar(col = NA, fill = gray(color1)))
grid.rect(x1, y1, 1/6, 1/6, gp = gpar(col = NA, fill = gray(0.5)))
|
/expt/brightness-contrast.r
|
no_license
|
T-Oxy/illusion-of-illusion
|
R
| false
| false
| 449
|
r
|
# 明るさの対比
library(grid)
grid.newpage()
selfvector <- function () {
vec <- readline("適当なベクトルを入力(,で区切り) : ")
as.numeric(unlist(strsplit(vec, ",")))
}
x1 <- selfvector()
y1 <- selfvector()
color1 <- selfvector()
color2 <- as.numeric(readline("適当な数値を入力:"))
grid.rect(x1, y1, 1/2, 1/2, gp = gpar(col = NA, fill = gray(color1)))
grid.rect(x1, y1, 1/6, 1/6, gp = gpar(col = NA, fill = gray(0.5)))
|
# Prepare the internal datasets
#
# This function will create and save the datasets in the \code{R/systada.rda}
# file.
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# examples
# prepare_internal_datasets
prepare_internal_datasets <- function(force = FALSE) {
sysdata <- "R/sysdata.rda"
exp_description <- data_fantom_exp_description(force = force)
enhancers_tpm <- data_fantom_enhancers("tpm", force = force)
enhancers_counts <- data_fantom_enhancers("counts", force = force)
devtools::use_data(exp_description, enhancers_tpm, enhancers_counts,
internal = TRUE, overwrite = TRUE)
}
# Prepare the enhancers dataset
#
# This will download the Fantom's enhancers file and convert it into
# \code{GRanges} format The file will be downloaded in the \code{inst/extdata/}
# directory.
#
# Download url:
# "http://fantom.gsc.riken.jp/5/datafiles/latest/extra/Enhancers/"
# Filename:
# "mouse_permissive_enhancers_phase_1_and_2_expression_tpm_matrix.txt.gz"
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# return The \code{GRanges} produced.
#
# examples
# FantomEnhancers.mm9::data_fantom_enhancers()
data_fantom_enhancers <- function(type = c("tpm", "counts"), force = FALSE) {
type = match.arg(type)
# Download enhancers tpm
if (type == "tpm") {
filename <-
"mouse_permissive_enhancers_phase_1_and_2_expression_tpm_matrix.txt.gz"
} else {
filename <-
"mouse_permissive_enhancers_phase_1_and_2_expression_count_matrix.txt.gz"
}
url <- "http://fantom.gsc.riken.jp/5/datafiles/latest/extra/Enhancers/"
url <- paste0(url, filename)
filename <- paste0("inst/extdata/", filename)
download_file(url, filename, force = force)
# Import the enhancer file
enhancers <- read.table(gzfile(filename), header = TRUE,
stringsAsFactors = FALSE)
# Convert to GRanges
df <- data.frame(do.call("rbind", strsplit(enhancers$Id, "[:-]")))
colnames(df) <- c("seqnames", "start", "end")
df[["start"]] <- as.numeric(as.character(df[["start"]]))
df[["end"]] <- as.numeric(as.character(df[["end"]]))
df[["strand"]] <- "*"
df <- cbind(df, enhancers[,-1])
GenomicRanges::makeGRangesFromDataFrame(df, keep.extra.columns = TRUE,
seqinfo = GenomeInfoDb::Seqinfo(genome = "mm9"))
}
# Prepare the exp_description dataset
#
# This will download the Fantom's experiment description file and import it as
# a data.frame. The file will be downloaded in the \code{inst/extdata/}
# directory.
#
# Download url:
# "http://fantom.gsc.riken.jp/5/datafiles/latest/basic/mouse.tissue.hCAGE/"
# Filename:
# "inst/extdata/00_mouse.cell_line.hCAGE.mm9.assay_sdrf.txt"
# "inst/extdata/00_mouse.tissue.hCAGE.mm9.assay_sdrf.txt"
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# return The \code{data.frame} produced.
#
# examples
# FantomEnhancers.hg19::data_fantom_enhancers()
data_fantom_exp_description <- function(force = FALSE) {
# Download TSS
filename <- "00_mouse.tissue.hCAGE.mm9.assay_sdrf.txt"
url <-
"http://fantom.gsc.riken.jp/5/datafiles/latest/basic/mouse.tissue.hCAGE/"
url <- paste0(url, filename)
filename <- paste0("inst/extdata/", filename)
download_file(url, filename, force = force)
# Prepare the data.frame
read.table(filename, header = TRUE, sep = "\t", quote = "",
stringsAsFactors = FALSE)
}
|
/data-raw/data.R
|
no_license
|
CharlesJB/FantomEnhancers.mm9
|
R
| false
| false
| 3,534
|
r
|
# Prepare the internal datasets
#
# This function will create and save the datasets in the \code{R/systada.rda}
# file.
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# examples
# prepare_internal_datasets
prepare_internal_datasets <- function(force = FALSE) {
sysdata <- "R/sysdata.rda"
exp_description <- data_fantom_exp_description(force = force)
enhancers_tpm <- data_fantom_enhancers("tpm", force = force)
enhancers_counts <- data_fantom_enhancers("counts", force = force)
devtools::use_data(exp_description, enhancers_tpm, enhancers_counts,
internal = TRUE, overwrite = TRUE)
}
# Prepare the enhancers dataset
#
# This will download the Fantom's enhancers file and convert it into
# \code{GRanges} format The file will be downloaded in the \code{inst/extdata/}
# directory.
#
# Download url:
# "http://fantom.gsc.riken.jp/5/datafiles/latest/extra/Enhancers/"
# Filename:
# "mouse_permissive_enhancers_phase_1_and_2_expression_tpm_matrix.txt.gz"
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# return The \code{GRanges} produced.
#
# examples
# FantomEnhancers.mm9::data_fantom_enhancers()
data_fantom_enhancers <- function(type = c("tpm", "counts"), force = FALSE) {
type = match.arg(type)
# Download enhancers tpm
if (type == "tpm") {
filename <-
"mouse_permissive_enhancers_phase_1_and_2_expression_tpm_matrix.txt.gz"
} else {
filename <-
"mouse_permissive_enhancers_phase_1_and_2_expression_count_matrix.txt.gz"
}
url <- "http://fantom.gsc.riken.jp/5/datafiles/latest/extra/Enhancers/"
url <- paste0(url, filename)
filename <- paste0("inst/extdata/", filename)
download_file(url, filename, force = force)
# Import the enhancer file
enhancers <- read.table(gzfile(filename), header = TRUE,
stringsAsFactors = FALSE)
# Convert to GRanges
df <- data.frame(do.call("rbind", strsplit(enhancers$Id, "[:-]")))
colnames(df) <- c("seqnames", "start", "end")
df[["start"]] <- as.numeric(as.character(df[["start"]]))
df[["end"]] <- as.numeric(as.character(df[["end"]]))
df[["strand"]] <- "*"
df <- cbind(df, enhancers[,-1])
GenomicRanges::makeGRangesFromDataFrame(df, keep.extra.columns = TRUE,
seqinfo = GenomeInfoDb::Seqinfo(genome = "mm9"))
}
# Prepare the exp_description dataset
#
# This will download the Fantom's experiment description file and import it as
# a data.frame. The file will be downloaded in the \code{inst/extdata/}
# directory.
#
# Download url:
# "http://fantom.gsc.riken.jp/5/datafiles/latest/basic/mouse.tissue.hCAGE/"
# Filename:
# "inst/extdata/00_mouse.cell_line.hCAGE.mm9.assay_sdrf.txt"
# "inst/extdata/00_mouse.tissue.hCAGE.mm9.assay_sdrf.txt"
#
# param force If TRUE, the inst/extdata file will be created, even if it
# exists. Default: \code{FALSE}.
#
# return The \code{data.frame} produced.
#
# examples
# FantomEnhancers.hg19::data_fantom_enhancers()
data_fantom_exp_description <- function(force = FALSE) {
# Download TSS
filename <- "00_mouse.tissue.hCAGE.mm9.assay_sdrf.txt"
url <-
"http://fantom.gsc.riken.jp/5/datafiles/latest/basic/mouse.tissue.hCAGE/"
url <- paste0(url, filename)
filename <- paste0("inst/extdata/", filename)
download_file(url, filename, force = force)
# Prepare the data.frame
read.table(filename, header = TRUE, sep = "\t", quote = "",
stringsAsFactors = FALSE)
}
|
library(datasets)
# Define a server for the Shiny app
function(input, output) {
# Fill in the spot we created for a plot
output$phonePlot <- renderPlot({
# Render a barplot
barplot(WorldPhones[,input$region]*1000,
main=input$region,
ylab="Number of Telephones",
xlab="Year")
})
}
getwd()
|
/Shiny_app_assignment/server.R
|
no_license
|
guchiguchi/course9_Shiny_Application_and_Reproducible_Pitch
|
R
| false
| false
| 351
|
r
|
library(datasets)
# Define a server for the Shiny app
function(input, output) {
# Fill in the spot we created for a plot
output$phonePlot <- renderPlot({
# Render a barplot
barplot(WorldPhones[,input$region]*1000,
main=input$region,
ylab="Number of Telephones",
xlab="Year")
})
}
getwd()
|
#Packages
library(shiny)
library(tidyverse)
library(ggplot2)
#set working directory
setwd("/Users/User_2/Desktop/bees")
#loading data
bees_raw <- read.csv("bees_raw.csv")
#filter to us records
bees_us <- bees_raw %>%
filter(place_country_name == "United States")
#map data for polygons
usa <- map_data("state")
#ui.R
ui <- fluidPage(
# Title
titlePanel("iNaturalist Bee Observations"),
# Sidebar with drop-down for family names
sidebarLayout(
sidebarPanel(
selectInput(inputId = "family",
label = "Select Family:",
choices = list("Andrenidae" = "Andrenidae", "Apidae" = "Apidae", "Colletidae" = "Colletidae", "Halictidae" = "Halictidae", "Megachilidae" = "Megachilidae", "Melittidae" = "Melittidae"))
),
# Mainpanel with ggplot output
mainPanel(
plotOutput("heatMap")
)
)
)
#Server.R
server <- function(input, output) {
#reactive data for filtering out by family
dat <- reactive(
bees_us %>%
filter(taxon_family_name == input$family))
#using filtered data to make map
output$heatMap <- renderPlot({
ggplot(dat()) +
geom_polygon(data = usa, aes(x = long, y = lat, group = group), fill="lightgray", colour = "white")+
geom_bin2d(aes(x = longitude, y = latitude), bins = 100)+
scale_fill_continuous(type = "viridis") +
xlim(-125,-65)+
ylim(20,50)+
theme_minimal()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
/bee_map_app/app.R
|
no_license
|
maggieklope/Bee_size
|
R
| false
| false
| 1,493
|
r
|
#Packages
library(shiny)
library(tidyverse)
library(ggplot2)
#set working directory
setwd("/Users/User_2/Desktop/bees")
#loading data
bees_raw <- read.csv("bees_raw.csv")
#filter to us records
bees_us <- bees_raw %>%
filter(place_country_name == "United States")
#map data for polygons
usa <- map_data("state")
#ui.R
ui <- fluidPage(
# Title
titlePanel("iNaturalist Bee Observations"),
# Sidebar with drop-down for family names
sidebarLayout(
sidebarPanel(
selectInput(inputId = "family",
label = "Select Family:",
choices = list("Andrenidae" = "Andrenidae", "Apidae" = "Apidae", "Colletidae" = "Colletidae", "Halictidae" = "Halictidae", "Megachilidae" = "Megachilidae", "Melittidae" = "Melittidae"))
),
# Mainpanel with ggplot output
mainPanel(
plotOutput("heatMap")
)
)
)
#Server.R
server <- function(input, output) {
#reactive data for filtering out by family
dat <- reactive(
bees_us %>%
filter(taxon_family_name == input$family))
#using filtered data to make map
output$heatMap <- renderPlot({
ggplot(dat()) +
geom_polygon(data = usa, aes(x = long, y = lat, group = group), fill="lightgray", colour = "white")+
geom_bin2d(aes(x = longitude, y = latitude), bins = 100)+
scale_fill_continuous(type = "viridis") +
xlim(-125,-65)+
ylim(20,50)+
theme_minimal()
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# Jojo Emerson
# March 27, 2019
# Star Dog scatter plot maker
# Purpose: create star/dog graphs for restaurant menu analytics
# Version: 1.4.0
# Changes:
# - improve ggplot download
# - add credit
library(shiny)
library(readxl)
library(ggplot2)
library(ggrepel)
library(writexl)
library(plotly)
library(tidyverse)
library(shinyjs)
library(dplyr)
#Breaks function:
#creates vector that begins at minimum axis value, increments every break #, and stops at max value
breaks <- function(min, max, breaks_every){
#define start of vector
break_vector<-c(min)
#define counter, starting at min+break
counter<-min+breaks_every
while(counter<=max){
break_vector<-c(break_vector, counter)
counter<-counter+breaks_every
}
return(break_vector)
}
#UI#
ui <- fluidPage(
#Enable shinyjs
useShinyjs(),
# Title
titlePanel("Star/Dog Graph Creator"),
# Sidebar with a template download, data upload, and create graph button
sidebarLayout(
sidebarPanel(
tags$h4("Download Excel Template:"),
downloadButton(outputId = "template_download", label = "Download template"),
tags$br(),tags$br(),tags$br(),
tags$h4("Upload Data:"),
"File must be .xlsx (Excel) format",
fileInput("data_upload", label = NULL, multiple = FALSE,
accept = c(".xlsx")),
tags$h4("Create Star/Dog Graph:"),
actionButton("create", label = "Create!"),
tags$br(), tags$br(),
tags$h4("Download current graph:"),
downloadButton(outputId = "download_plot", label = "Download plot!")
),
mainPanel(
# Scatterplot of Star/Dog analysis
fluidRow(
plotOutput("stardog"),
shinyjs::hidden(plotOutput("stardog_update")),
tags$br(),tags$br(),tags$br()
),
# Options
fluidRow(
#Graph options
column(width = 4,
uiOutput("graph_options")
),
#Y axis options
column(width = 4,
uiOutput("y_options")
),
#X axis options
column(width = 4,
uiOutput("x_options")
)
),
fluidRow(align = "center",
#update graph
uiOutput("update"),
tags$br(),tags$br(),tags$br()
)
)
),
fluidRow(
#footer
tags$footer(hr(),"Developer: ", tags$a(href="mailto:jgemerson93@gmail.com", "Joanna Emerson"),
align = "left", style = "
bottom:0;
width:100%;
padding: 20px;
"
)
)
)
#SEVER#
server <- function(input, output, session) {
## Downloadable template ##
#Upload template
template<-read_xlsx("data/StarDogTemplate.xlsx")
#Download template on click
output$template_download <- downloadHandler(
filename = function() {
paste("StarDogTemplate",".xlsx", sep="")
},
content = function(file) {
write_xlsx(template, file)
}
)
## Data ##
dfs<-reactiveValues(data = NULL, update = NULL)
observeEvent(input$data_upload, {
inFile <- input$data_upload
dfs$data<-read_xlsx(inFile$datapath)
})
## Scatterplot ##
observeEvent(input$create, {
req(input$data_upload)
#Create Star/Dog scatterplot on clickd
#X axis: Sales
#Y axis: Contribution
#Points labeled with item name
#Quadrant lines are the median value for Sales/Contribution
final_plot<-reactive({
ggplot(dfs$data, aes(x=Sales, y=Contribution)) + geom_point(color = "orangered3") +
#Horizontal line at the median Contribution point
geom_hline(yintercept=median(dfs$data$Contribution), color = "blue", alpha = .7) +
#Bertical line at the median Sales point
geom_vline(xintercept=median(dfs$data$Sales), color = "blue", alpha = .7) +
#Scale of the y axis (Contribution)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_y_continuous(breaks=waiver(),limits= NULL) +
#Scale of the y axis (Sales)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_x_continuous(breaks=waiver(), limits = NULL) +
#Add repel labels of the items to the points on the graph
geom_text_repel(label=dfs$data$Item, size = 4) +
#Title
#Default: nothing
ggtitle("")
#theme done in render plot for download size regulation
})
output$stardog <- renderPlot(final_plot()+theme_minimal(base_size = 17))
download_plot<<-final_plot()+theme_minimal(base_size = 14)
#Store min and max values
ymin<-min(dfs$data$Contribution)
ymax<-max(dfs$data$Contribution)
xmin<-min(dfs$data$Sales)
xmax<-max(dfs$data$Sales)
#Render options buttons below plot
#Y axis options
output$y_options<-renderUI({
tagList(
#Minimum: 25% lower than actual minimum
numericInput("ymin", label = "Minimum Contribution value:", value = round((ymin-.25*ymin),0)),
#Maximum: 25% higher than actual maximum
numericInput("ymax", label = "Maximum Contribution value:", value = round(ymax+.25*ymax,0)),
#Breaks
numericInput("ybreaks", label = "Contribution ticks every:", value = 1)
)
})
#X axis buttons
output$x_options<-renderUI({
tagList(
#Minimum: 25% lower than actual minimum
numericInput("xmin", label = "Minimum Sales value:", value = round(xmin-.25*xmin,0)),
#Maximum: 25% higher than actual maximum
numericInput("xmax", label = "Maximum Sales value:", value = round(xmax+.25*xmax,0)),
#Breaks
numericInput("xbreaks", label = "Sales ticks every: ", value = 500)
)
})
#Overall graph options
output$graph_options<-renderUI({
tagList(
#Title
textInput("title", label = "Graph title:"),
#Graph by item category
selectInput("by_cat", label = "Graph by item category:",
choices = ifelse(is.na(dfs$data$Category), c("All categories"), c("All categories", unique(dfs$data$Category))),
selected = TRUE))
})
#Update button
output$update<-renderUI({
#Title
actionButton("update_graph", label = "Update graph!")
})
})
#update dataaset per category selection
observeEvent(input$by_cat, {
dfs$update<-filter(dfs$data, Category == input$by_cat)
})
#observe the update click
observeEvent(input$update_graph,{
req(input$by_cat)
ifelse(input$by_cat == "All categories",
dfs$update<-dfs$data,
dfs$update<-filter(dfs$data, Category == input$by_cat)
)
#hide original
shinyjs::hide("stardog")
#show update
shinyjs::show("stardog_update")
#Update scatterplot on update click
updated_plot<-reactive({
ggplot(dfs$update, aes(x=Sales, y=Contribution)) + geom_point(color = "orangered3") +
#Horizontal line at the median Contribution point
geom_hline(yintercept=median(dfs$update$Contribution), color = "blue", alpha = .7) +
#Bertical line at the median Sales point
geom_vline(xintercept=median(dfs$update$Sales), color = "blue", alpha = .7) +
#Scale of the y axis (Contribution)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_y_continuous(breaks=breaks(input$ymin, input$ymax, input$ybreaks),
limits= c(input$ymin, input$ymax)) +
#Scale of the y axis (Sales)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_x_continuous(breaks=breaks(input$xmin, input$xmax, input$xbreaks),
limits = c(input$xmin, input$xmax)) +
#Add repel labels of the items to the points on the graph
geom_text_repel(label=dfs$update$Item, size = 4) +
#Title
#Default: nothing
ggtitle(input$title)
#theme done in render plot for download size regulation
})
#Render updated plot
output$stardog_update<-renderPlot(updated_plot()+theme_minimal(base_size = 17))
download_plot<<-updated_plot()+theme_minimal(base_size = 14)
})
#Download plot on click
output$download_plot <-downloadHandler(
filename = function() {
paste("Stardog-",input$title, ".png", sep="")
},
content = function(file) {
ggsave(file, plot=download_plot, device = 'png', width = 8, height = 6, units = "in", dpi = 72)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
/app.R
|
no_license
|
jgemerson/StarDog
|
R
| false
| false
| 9,385
|
r
|
# Jojo Emerson
# March 27, 2019
# Star Dog scatter plot maker
# Purpose: create star/dog graphs for restaurant menu analytics
# Version: 1.4.0
# Changes:
# - improve ggplot download
# - add credit
library(shiny)
library(readxl)
library(ggplot2)
library(ggrepel)
library(writexl)
library(plotly)
library(tidyverse)
library(shinyjs)
library(dplyr)
#Breaks function:
#creates vector that begins at minimum axis value, increments every break #, and stops at max value
breaks <- function(min, max, breaks_every){
#define start of vector
break_vector<-c(min)
#define counter, starting at min+break
counter<-min+breaks_every
while(counter<=max){
break_vector<-c(break_vector, counter)
counter<-counter+breaks_every
}
return(break_vector)
}
#UI#
ui <- fluidPage(
#Enable shinyjs
useShinyjs(),
# Title
titlePanel("Star/Dog Graph Creator"),
# Sidebar with a template download, data upload, and create graph button
sidebarLayout(
sidebarPanel(
tags$h4("Download Excel Template:"),
downloadButton(outputId = "template_download", label = "Download template"),
tags$br(),tags$br(),tags$br(),
tags$h4("Upload Data:"),
"File must be .xlsx (Excel) format",
fileInput("data_upload", label = NULL, multiple = FALSE,
accept = c(".xlsx")),
tags$h4("Create Star/Dog Graph:"),
actionButton("create", label = "Create!"),
tags$br(), tags$br(),
tags$h4("Download current graph:"),
downloadButton(outputId = "download_plot", label = "Download plot!")
),
mainPanel(
# Scatterplot of Star/Dog analysis
fluidRow(
plotOutput("stardog"),
shinyjs::hidden(plotOutput("stardog_update")),
tags$br(),tags$br(),tags$br()
),
# Options
fluidRow(
#Graph options
column(width = 4,
uiOutput("graph_options")
),
#Y axis options
column(width = 4,
uiOutput("y_options")
),
#X axis options
column(width = 4,
uiOutput("x_options")
)
),
fluidRow(align = "center",
#update graph
uiOutput("update"),
tags$br(),tags$br(),tags$br()
)
)
),
fluidRow(
#footer
tags$footer(hr(),"Developer: ", tags$a(href="mailto:jgemerson93@gmail.com", "Joanna Emerson"),
align = "left", style = "
bottom:0;
width:100%;
padding: 20px;
"
)
)
)
#SEVER#
server <- function(input, output, session) {
## Downloadable template ##
#Upload template
template<-read_xlsx("data/StarDogTemplate.xlsx")
#Download template on click
output$template_download <- downloadHandler(
filename = function() {
paste("StarDogTemplate",".xlsx", sep="")
},
content = function(file) {
write_xlsx(template, file)
}
)
## Data ##
dfs<-reactiveValues(data = NULL, update = NULL)
observeEvent(input$data_upload, {
inFile <- input$data_upload
dfs$data<-read_xlsx(inFile$datapath)
})
## Scatterplot ##
observeEvent(input$create, {
req(input$data_upload)
#Create Star/Dog scatterplot on clickd
#X axis: Sales
#Y axis: Contribution
#Points labeled with item name
#Quadrant lines are the median value for Sales/Contribution
final_plot<-reactive({
ggplot(dfs$data, aes(x=Sales, y=Contribution)) + geom_point(color = "orangered3") +
#Horizontal line at the median Contribution point
geom_hline(yintercept=median(dfs$data$Contribution), color = "blue", alpha = .7) +
#Bertical line at the median Sales point
geom_vline(xintercept=median(dfs$data$Sales), color = "blue", alpha = .7) +
#Scale of the y axis (Contribution)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_y_continuous(breaks=waiver(),limits= NULL) +
#Scale of the y axis (Sales)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_x_continuous(breaks=waiver(), limits = NULL) +
#Add repel labels of the items to the points on the graph
geom_text_repel(label=dfs$data$Item, size = 4) +
#Title
#Default: nothing
ggtitle("")
#theme done in render plot for download size regulation
})
output$stardog <- renderPlot(final_plot()+theme_minimal(base_size = 17))
download_plot<<-final_plot()+theme_minimal(base_size = 14)
#Store min and max values
ymin<-min(dfs$data$Contribution)
ymax<-max(dfs$data$Contribution)
xmin<-min(dfs$data$Sales)
xmax<-max(dfs$data$Sales)
#Render options buttons below plot
#Y axis options
output$y_options<-renderUI({
tagList(
#Minimum: 25% lower than actual minimum
numericInput("ymin", label = "Minimum Contribution value:", value = round((ymin-.25*ymin),0)),
#Maximum: 25% higher than actual maximum
numericInput("ymax", label = "Maximum Contribution value:", value = round(ymax+.25*ymax,0)),
#Breaks
numericInput("ybreaks", label = "Contribution ticks every:", value = 1)
)
})
#X axis buttons
output$x_options<-renderUI({
tagList(
#Minimum: 25% lower than actual minimum
numericInput("xmin", label = "Minimum Sales value:", value = round(xmin-.25*xmin,0)),
#Maximum: 25% higher than actual maximum
numericInput("xmax", label = "Maximum Sales value:", value = round(xmax+.25*xmax,0)),
#Breaks
numericInput("xbreaks", label = "Sales ticks every: ", value = 500)
)
})
#Overall graph options
output$graph_options<-renderUI({
tagList(
#Title
textInput("title", label = "Graph title:"),
#Graph by item category
selectInput("by_cat", label = "Graph by item category:",
choices = ifelse(is.na(dfs$data$Category), c("All categories"), c("All categories", unique(dfs$data$Category))),
selected = TRUE))
})
#Update button
output$update<-renderUI({
#Title
actionButton("update_graph", label = "Update graph!")
})
})
#update dataaset per category selection
observeEvent(input$by_cat, {
dfs$update<-filter(dfs$data, Category == input$by_cat)
})
#observe the update click
observeEvent(input$update_graph,{
req(input$by_cat)
ifelse(input$by_cat == "All categories",
dfs$update<-dfs$data,
dfs$update<-filter(dfs$data, Category == input$by_cat)
)
#hide original
shinyjs::hide("stardog")
#show update
shinyjs::show("stardog_update")
#Update scatterplot on update click
updated_plot<-reactive({
ggplot(dfs$update, aes(x=Sales, y=Contribution)) + geom_point(color = "orangered3") +
#Horizontal line at the median Contribution point
geom_hline(yintercept=median(dfs$update$Contribution), color = "blue", alpha = .7) +
#Bertical line at the median Sales point
geom_vline(xintercept=median(dfs$update$Sales), color = "blue", alpha = .7) +
#Scale of the y axis (Contribution)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_y_continuous(breaks=breaks(input$ymin, input$ymax, input$ybreaks),
limits= c(input$ymin, input$ymax)) +
#Scale of the y axis (Sales)
#Defaults: ggplot to fit break points max/min of data are max/min of plot
scale_x_continuous(breaks=breaks(input$xmin, input$xmax, input$xbreaks),
limits = c(input$xmin, input$xmax)) +
#Add repel labels of the items to the points on the graph
geom_text_repel(label=dfs$update$Item, size = 4) +
#Title
#Default: nothing
ggtitle(input$title)
#theme done in render plot for download size regulation
})
#Render updated plot
output$stardog_update<-renderPlot(updated_plot()+theme_minimal(base_size = 17))
download_plot<<-updated_plot()+theme_minimal(base_size = 14)
})
#Download plot on click
output$download_plot <-downloadHandler(
filename = function() {
paste("Stardog-",input$title, ".png", sep="")
},
content = function(file) {
ggsave(file, plot=download_plot, device = 'png', width = 8, height = 6, units = "in", dpi = 72)
}
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' @export
makeRLearner.classif.IBk = function() {
makeRLearnerClassif(
cl = "classif.IBk",
package = "RWeka",
par.set = makeParamSet(
makeLogicalLearnerParam(id = "I"),
makeLogicalLearnerParam(id = "F"),
makeIntegerLearnerParam(id = "K", lower = 1L, default = 1L),
makeLogicalLearnerParam(id = "E"),
makeIntegerLearnerParam(id = "W", lower = 0L),
makeLogicalLearnerParam(id = "X"),
makeUntypedLearnerParam(id = "A", default = "weka.core.neighboursearch.LinearNNSearch")
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob"),
name = "K-nearest neighbours",
short.name = "IBk",
note = ""
)
}
#' @export
trainLearner.classif.IBk = function(.learner, .task, .subset, .weights = NULL, ...) {
ctrl = Weka_control(...)
IBk(getTaskFormula(.task), data = getTaskData(.task, .subset), control = ctrl, na.action = na.pass)
}
#' @export
predictLearner.classif.IBk = function(.learner, .model, .newdata, ...) {
type = switch(.learner$predict.type, prob = "prob", "class")
predict(.model$learner.model, newdata = .newdata, type = type, ...)
}
|
/R/RLearner_classif_IBk.R
|
no_license
|
mmadsen/mlr
|
R
| false
| false
| 1,140
|
r
|
#' @export
makeRLearner.classif.IBk = function() {
makeRLearnerClassif(
cl = "classif.IBk",
package = "RWeka",
par.set = makeParamSet(
makeLogicalLearnerParam(id = "I"),
makeLogicalLearnerParam(id = "F"),
makeIntegerLearnerParam(id = "K", lower = 1L, default = 1L),
makeLogicalLearnerParam(id = "E"),
makeIntegerLearnerParam(id = "W", lower = 0L),
makeLogicalLearnerParam(id = "X"),
makeUntypedLearnerParam(id = "A", default = "weka.core.neighboursearch.LinearNNSearch")
),
properties = c("twoclass", "multiclass", "numerics", "factors", "prob"),
name = "K-nearest neighbours",
short.name = "IBk",
note = ""
)
}
#' @export
trainLearner.classif.IBk = function(.learner, .task, .subset, .weights = NULL, ...) {
ctrl = Weka_control(...)
IBk(getTaskFormula(.task), data = getTaskData(.task, .subset), control = ctrl, na.action = na.pass)
}
#' @export
predictLearner.classif.IBk = function(.learner, .model, .newdata, ...) {
type = switch(.learner$predict.type, prob = "prob", "class")
predict(.model$learner.model, newdata = .newdata, type = type, ...)
}
|
library(shiny)
library(ggplot2)
library(grid)
source("../../GetData.R")
source("../../GetImagesForCompound.R")
source("explore_compound.R")
source("explore_target.R")
source("explore_pathway.R")
source("explore_QC.R")
source("explore_QQ_density_plots.R")
source("explore_early_vs_late_acting.R")
source("lists.R")
# WARNING: The first time the app loads, it takes a minute to load images, plots. Afterwards, it runs smoothly.
#####################################################################################
###### --------------------- Define server logic ----------------------------- ######
#####################################################################################
# Note: The following error occurs when the "select compound" bar is empty
# Error in `$<-.data.frame`(`*tmp*`, "is.compound", value = FALSE) :
# replacement has 1 row, data has 0
# Define server logic
shinyServer(function(input, output) {
# Generates a sparkline for the selected compound and phenotypic marker.
output$compound.sparklines <- renderPlot({
# Get data for specified compound and marker
data_tall.compound_and_marker <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
# Plot data
plot_sparkline(data_tall.compound_and_marker, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound)
})
# Generates sparklines for the selected target and phenotypic marker.
output$target.sparklines <- renderPlot({
# Get data for specified target and marker
data_tall.target_and_marker <- subset(data_tall_each_marker[[input$target.marker]], Target.class..11Mar15. == input$target)
# Plot data
plot_target_sparklines(data_tall.target_and_marker, confidence_intervals_each_marker[[input$target.marker]], input$target.marker, input$target)
})
# Generates sparklines for the selected pathway and phenotypic marker.
output$pathway.sparklines <- renderPlot({
# Get data for specified pathway and marker
data_tall.target_and_marker <- subset(data_tall_each_marker[[input$target.marker]], Pathway == input$pathway)
# Plot data
plot_pathway_sparklines(data_tall.target_and_marker, confidence_intervals_each_marker[[input$target.marker]], input$pathway.marker, input$pathway)
})
# Generates sparklines for target of the selected compound.
output$compound.target <- renderPlot({
# Get target for specified compound
data_tall_compound.target <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
target <- data_tall_compound.target$Target.class..11Mar15.[1]
# Get all data with this target
data_tall.target <- subset(data_tall_each_marker[[input$marker]], Target.class..11Mar15. == target)
data_tall.target$is.compound <- FALSE
data_tall.target[data_tall.target$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_target(data_tall.target, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound, target)
})
# Generates sparklines for pathway of the selected compound.
output$compound.pathway <- renderPlot({
# Get pathway for specified compound
data_tall_compound.pathway <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
pathway <- data_tall_compound.pathway$Pathway[1]
# Get all data with this pathway
data_tall.pathway <- subset(data_tall_each_marker[[input$marker]], Pathway == pathway)
data_tall.pathway$is.compound <- FALSE
data_tall.pathway[data_tall.pathway$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_pathway(data_tall.pathway, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound, pathway)
})
# Hilights sparklines for target in pathways.
output$target.pathway <- renderPlot({
# Get all data with target hilights
data_tall.pathways <- data_tall_each_marker[[input$target.marker]]
data_tall.pathways$is.target <- FALSE
data_tall.pathways[data_tall.pathways$Target.class..11Mar15. == input$target, ]$is.target <- TRUE
plot_pathway_hilight_target(data_tall.pathways, confidence_intervals_each_marker[[input$target.marker]], input$target.marker, input$target)
})
# Generates clusters, hilighting selected compound.
output$compound.cluster <- renderPlot({
# Mark the compound within the data frame
data_tall.cluster <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$marker]], phenotypic_markers[[input$marker]], input$clusters)
data_tall.cluster$is.compound <- FALSE
data_tall.cluster[data_tall.cluster$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_clusters(data_tall.cluster, input$compound, input$marker)
})
# Hilights sparklines for target in clusters.
output$target.cluster <- renderPlot({
# Get all data with target hilights
data_tall.clusters <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$target.marker]], phenotypic_markers[[input$target.marker]], input$target.clusters)
data_tall.clusters$is.target <- FALSE
data_tall.clusters[data_tall.clusters$Target.class..11Mar15. == input$target, ]$is.target <- TRUE
plot_target_clusters(data_tall.clusters, input$target, input$target.marker)
})
# Hilights sparklines for pathway in clusters.
output$pathway.cluster <- renderPlot({
# Get all data with pathway hilights
data_tall.clusters <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$pathway.marker]], phenotypic_markers[[input$pathway.marker]], input$pathway.clusters)
data_tall.clusters$is.pathway <- FALSE
data_tall.clusters[data_tall.clusters$Pathway == input$pathway, ]$is.pathway <- TRUE
plot_pathway_clusters(data_tall.clusters, input$pathway, input$pathway.marker)
})
output$display.image <- renderImage({
# Get images for that compound into www folder of shiny app
suppressWarnings(get_images(input$compound))
image_file <- paste("www/",image_types[[input$image.type]],"_t_",input$time.elapsed,".jpeg",sep="")
return(list(
src = image_file,
filetype = "image/jpeg",
height = 520,
width = 696
))
}, deleteFile = FALSE)
# Plot the negative controls vs treatment for specified phenotypic marker
output$QC.by.plate <- renderPlot({
plot_QC_by_plate(data_tall_each_marker[[input$QC.marker]], phenotypic_markers[[input$QC.marker]])
})
# Plot all sparklines for specified phenotypic marker
output$all.sparklines <- renderPlot({
plot_all_sparklines(data_tall_each_marker[[input$overview.marker]], phenotypic_markers[[input$overview.marker]])
})
# QQ plot for specified phenotypic marker and curve metric
output$qq.plot.one.metric <- renderPlot({
get_single_metric_qqplot(data_tall_each_marker[[input$metric.marker]], metrics[[input$metric]])
})
# QQ plot for specified phenotypic marker and curve metric
output$density.plot.one.metric <- renderPlot({
get_single_metric_density(data_tall_each_marker[[input$metric.marker]], metrics[[input$metric]])
})
# QQ plot for specified phenotypic marker and curve metric
output$early.vs.late.acting <- renderPlot({
get_early_vs_late_acting(data_tall_each_marker[[input$early.vs.late.marker]], confidence_intervals_each_marker[[input$early.vs.late.marker]], phenotypic_markers[[input$early.vs.late.marker]], input$above.or.below.NC)
})
# # Generates table of additional information for the compound
# output$compound.additional_info <- renderDataTable(
# data_tall_each_marker[[input$marker]][data_tall_each_marker[[input$marker]]$Compound == input$compound, ]
# )
#!!!!!! would be nice to add an export button to the shiny app for exportin the jpegs of the given compound...
})
|
/Scripts/shiny_scripts/explore/server.R
|
no_license
|
mas29/ClarkeLab_github
|
R
| false
| false
| 7,978
|
r
|
library(shiny)
library(ggplot2)
library(grid)
source("../../GetData.R")
source("../../GetImagesForCompound.R")
source("explore_compound.R")
source("explore_target.R")
source("explore_pathway.R")
source("explore_QC.R")
source("explore_QQ_density_plots.R")
source("explore_early_vs_late_acting.R")
source("lists.R")
# WARNING: The first time the app loads, it takes a minute to load images, plots. Afterwards, it runs smoothly.
#####################################################################################
###### --------------------- Define server logic ----------------------------- ######
#####################################################################################
# Note: The following error occurs when the "select compound" bar is empty
# Error in `$<-.data.frame`(`*tmp*`, "is.compound", value = FALSE) :
# replacement has 1 row, data has 0
# Define server logic
shinyServer(function(input, output) {
# Generates a sparkline for the selected compound and phenotypic marker.
output$compound.sparklines <- renderPlot({
# Get data for specified compound and marker
data_tall.compound_and_marker <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
# Plot data
plot_sparkline(data_tall.compound_and_marker, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound)
})
# Generates sparklines for the selected target and phenotypic marker.
output$target.sparklines <- renderPlot({
# Get data for specified target and marker
data_tall.target_and_marker <- subset(data_tall_each_marker[[input$target.marker]], Target.class..11Mar15. == input$target)
# Plot data
plot_target_sparklines(data_tall.target_and_marker, confidence_intervals_each_marker[[input$target.marker]], input$target.marker, input$target)
})
# Generates sparklines for the selected pathway and phenotypic marker.
output$pathway.sparklines <- renderPlot({
# Get data for specified pathway and marker
data_tall.target_and_marker <- subset(data_tall_each_marker[[input$target.marker]], Pathway == input$pathway)
# Plot data
plot_pathway_sparklines(data_tall.target_and_marker, confidence_intervals_each_marker[[input$target.marker]], input$pathway.marker, input$pathway)
})
# Generates sparklines for target of the selected compound.
output$compound.target <- renderPlot({
# Get target for specified compound
data_tall_compound.target <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
target <- data_tall_compound.target$Target.class..11Mar15.[1]
# Get all data with this target
data_tall.target <- subset(data_tall_each_marker[[input$marker]], Target.class..11Mar15. == target)
data_tall.target$is.compound <- FALSE
data_tall.target[data_tall.target$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_target(data_tall.target, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound, target)
})
# Generates sparklines for pathway of the selected compound.
output$compound.pathway <- renderPlot({
# Get pathway for specified compound
data_tall_compound.pathway <- subset(data_tall_each_marker[[input$marker]], Compound == input$compound)
pathway <- data_tall_compound.pathway$Pathway[1]
# Get all data with this pathway
data_tall.pathway <- subset(data_tall_each_marker[[input$marker]], Pathway == pathway)
data_tall.pathway$is.compound <- FALSE
data_tall.pathway[data_tall.pathway$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_pathway(data_tall.pathway, confidence_intervals_each_marker[[input$marker]], input$marker, input$compound, pathway)
})
# Hilights sparklines for target in pathways.
output$target.pathway <- renderPlot({
# Get all data with target hilights
data_tall.pathways <- data_tall_each_marker[[input$target.marker]]
data_tall.pathways$is.target <- FALSE
data_tall.pathways[data_tall.pathways$Target.class..11Mar15. == input$target, ]$is.target <- TRUE
plot_pathway_hilight_target(data_tall.pathways, confidence_intervals_each_marker[[input$target.marker]], input$target.marker, input$target)
})
# Generates clusters, hilighting selected compound.
output$compound.cluster <- renderPlot({
# Mark the compound within the data frame
data_tall.cluster <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$marker]], phenotypic_markers[[input$marker]], input$clusters)
data_tall.cluster$is.compound <- FALSE
data_tall.cluster[data_tall.cluster$Compound == input$compound, ]$is.compound <- TRUE
# Plot data
plot_clusters(data_tall.cluster, input$compound, input$marker)
})
# Hilights sparklines for target in clusters.
output$target.cluster <- renderPlot({
# Get all data with target hilights
data_tall.clusters <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$target.marker]], phenotypic_markers[[input$target.marker]], input$target.clusters)
data_tall.clusters$is.target <- FALSE
data_tall.clusters[data_tall.clusters$Target.class..11Mar15. == input$target, ]$is.target <- TRUE
plot_target_clusters(data_tall.clusters, input$target, input$target.marker)
})
# Hilights sparklines for pathway in clusters.
output$pathway.cluster <- renderPlot({
# Get all data with pathway hilights
data_tall.clusters <- get_data_w_clusters(data_wide, data_tall_each_marker[[input$pathway.marker]], phenotypic_markers[[input$pathway.marker]], input$pathway.clusters)
data_tall.clusters$is.pathway <- FALSE
data_tall.clusters[data_tall.clusters$Pathway == input$pathway, ]$is.pathway <- TRUE
plot_pathway_clusters(data_tall.clusters, input$pathway, input$pathway.marker)
})
output$display.image <- renderImage({
# Get images for that compound into www folder of shiny app
suppressWarnings(get_images(input$compound))
image_file <- paste("www/",image_types[[input$image.type]],"_t_",input$time.elapsed,".jpeg",sep="")
return(list(
src = image_file,
filetype = "image/jpeg",
height = 520,
width = 696
))
}, deleteFile = FALSE)
# Plot the negative controls vs treatment for specified phenotypic marker
output$QC.by.plate <- renderPlot({
plot_QC_by_plate(data_tall_each_marker[[input$QC.marker]], phenotypic_markers[[input$QC.marker]])
})
# Plot all sparklines for specified phenotypic marker
output$all.sparklines <- renderPlot({
plot_all_sparklines(data_tall_each_marker[[input$overview.marker]], phenotypic_markers[[input$overview.marker]])
})
# QQ plot for specified phenotypic marker and curve metric
output$qq.plot.one.metric <- renderPlot({
get_single_metric_qqplot(data_tall_each_marker[[input$metric.marker]], metrics[[input$metric]])
})
# QQ plot for specified phenotypic marker and curve metric
output$density.plot.one.metric <- renderPlot({
get_single_metric_density(data_tall_each_marker[[input$metric.marker]], metrics[[input$metric]])
})
# QQ plot for specified phenotypic marker and curve metric
output$early.vs.late.acting <- renderPlot({
get_early_vs_late_acting(data_tall_each_marker[[input$early.vs.late.marker]], confidence_intervals_each_marker[[input$early.vs.late.marker]], phenotypic_markers[[input$early.vs.late.marker]], input$above.or.below.NC)
})
# # Generates table of additional information for the compound
# output$compound.additional_info <- renderDataTable(
# data_tall_each_marker[[input$marker]][data_tall_each_marker[[input$marker]]$Compound == input$compound, ]
# )
#!!!!!! would be nice to add an export button to the shiny app for exportin the jpegs of the given compound...
})
|
# sim by pastor
sim.pastor=function(seed) {
set.seed(seed)
n=2000
x=rbeta(n,4,4)
x.expit=1/(exp(60*(x-.5))+1)
x.bin.med=ifelse(x>median(x),1,0)
eta = .5*(1-2*x.expit)
y=rbern(n, expit(eta))
ind = c(which(y==1)[1:500], which(y==0)[1:500])
dat=data.frame(y,x.star=x,x.star.expit=x.expit)[ind,]
dat$x.bin.med=ifelse(dat$x.star>median(dat$x.star),1,0)
dat
}
sim.my = function (n, seed, label, alpha, beta, e.=NULL, b.=NULL, tr.=NULL) {
set.seed(seed)
if (endsWith(label,"5")) mu=5.7 else if (endsWith(label,"4")) mu=4.7 else if (endsWith(label,"3")) mu=3.7 else stop("label not recognized")
sd.x=1.6
if(startsWith(label,"sigmoidimb")) { # imbalance
x.star=c(rnorm(n-round(n/3), mu, sd=sd.x), mu-abs(rnorm(round(n/3), 0, sd=sd.x)))
} else if(startsWith(label,"sigmoidlin")) { # unif
x.star=runif(n)*4*sd.x + mu-2*sd.x
} else if(startsWith(label,"sigmoidmix")) { # mixture
x.star=c(rnorm(n*.6, mu, sd=sd.x), rep(mu-2*sd.x, n*.4))
} else if(startsWith(label,"sigmoidgam")) { # gamma
x.star=sd.x*scale(rgamma(n=n, 2.5, 1))+mu
} else {
x.star=rnorm(n, mu, sd=sd.x)
}
if (startsWith(label, "elbow")) {
x.star.tr = ifelse(x.star>tr., x.star-tr., 0)
X=cbind(1, x.star.tr)
} else if (startsWith(label, "sigmoid")) {
x.star.expit = expit.2pl(x.star, e=e., b=b.)
X=cbind(1, x.star.expit)
} else stop ("label not supported: "%+%label)
y=rbern(n, expit (X %*% c(alpha, beta)))
sd.err=ifelse(x.star>3.5, 0.05, 1.5-0.414*x.star)
x=rnorm(n, x.star, sd.err)
dat=data.frame (
y=y,
x.star=x.star,
x.bin.med=ifelse(x.star>median(x.star), 1, 0),
x.tri = ifelse(x.star>quantile(x.star,2/3),"High",ifelse(x.star>quantile(x.star,1/3),"Medium","Low")),
x=x,
x.tr.1=ifelse(x>log(100), x, 0) ,
x.tr.2=ifelse(x>log(100), x, log(100)) ,
x.tr.3=ifelse(x>3.5, x, 0) ,
x.tr.4=ifelse(x>3.5, x, 3.5),
x.tr.5=ifelse(x>3.5, x, 3.5/2),
x.tr.6=ifelse(x>5, x, 5) ,
x.ind =ifelse(x>3.5, 0, 1),
x.bin.35=ifelse(x>3.5, 1, 0),
x.bin.6=ifelse(x>6, 1, 0) ,
x.bin.log100=ifelse(x>log(100), 1, 0)
)
dat$x.tri=as.factor(dat$x.tri)
if (startsWith(label, "elbow")) {
dat[["x.star.tr"]]=x.star.tr
} else if (startsWith(label, "sigmoid")) {
dat[["x.star.expit"]]=x.star.expit
} else stop ("label not supported: "%+%label)
dat
}
|
/R/simulations.R
|
no_license
|
ChuangWan/chngpt
|
R
| false
| false
| 2,724
|
r
|
# sim by pastor
sim.pastor=function(seed) {
set.seed(seed)
n=2000
x=rbeta(n,4,4)
x.expit=1/(exp(60*(x-.5))+1)
x.bin.med=ifelse(x>median(x),1,0)
eta = .5*(1-2*x.expit)
y=rbern(n, expit(eta))
ind = c(which(y==1)[1:500], which(y==0)[1:500])
dat=data.frame(y,x.star=x,x.star.expit=x.expit)[ind,]
dat$x.bin.med=ifelse(dat$x.star>median(dat$x.star),1,0)
dat
}
sim.my = function (n, seed, label, alpha, beta, e.=NULL, b.=NULL, tr.=NULL) {
set.seed(seed)
if (endsWith(label,"5")) mu=5.7 else if (endsWith(label,"4")) mu=4.7 else if (endsWith(label,"3")) mu=3.7 else stop("label not recognized")
sd.x=1.6
if(startsWith(label,"sigmoidimb")) { # imbalance
x.star=c(rnorm(n-round(n/3), mu, sd=sd.x), mu-abs(rnorm(round(n/3), 0, sd=sd.x)))
} else if(startsWith(label,"sigmoidlin")) { # unif
x.star=runif(n)*4*sd.x + mu-2*sd.x
} else if(startsWith(label,"sigmoidmix")) { # mixture
x.star=c(rnorm(n*.6, mu, sd=sd.x), rep(mu-2*sd.x, n*.4))
} else if(startsWith(label,"sigmoidgam")) { # gamma
x.star=sd.x*scale(rgamma(n=n, 2.5, 1))+mu
} else {
x.star=rnorm(n, mu, sd=sd.x)
}
if (startsWith(label, "elbow")) {
x.star.tr = ifelse(x.star>tr., x.star-tr., 0)
X=cbind(1, x.star.tr)
} else if (startsWith(label, "sigmoid")) {
x.star.expit = expit.2pl(x.star, e=e., b=b.)
X=cbind(1, x.star.expit)
} else stop ("label not supported: "%+%label)
y=rbern(n, expit (X %*% c(alpha, beta)))
sd.err=ifelse(x.star>3.5, 0.05, 1.5-0.414*x.star)
x=rnorm(n, x.star, sd.err)
dat=data.frame (
y=y,
x.star=x.star,
x.bin.med=ifelse(x.star>median(x.star), 1, 0),
x.tri = ifelse(x.star>quantile(x.star,2/3),"High",ifelse(x.star>quantile(x.star,1/3),"Medium","Low")),
x=x,
x.tr.1=ifelse(x>log(100), x, 0) ,
x.tr.2=ifelse(x>log(100), x, log(100)) ,
x.tr.3=ifelse(x>3.5, x, 0) ,
x.tr.4=ifelse(x>3.5, x, 3.5),
x.tr.5=ifelse(x>3.5, x, 3.5/2),
x.tr.6=ifelse(x>5, x, 5) ,
x.ind =ifelse(x>3.5, 0, 1),
x.bin.35=ifelse(x>3.5, 1, 0),
x.bin.6=ifelse(x>6, 1, 0) ,
x.bin.log100=ifelse(x>log(100), 1, 0)
)
dat$x.tri=as.factor(dat$x.tri)
if (startsWith(label, "elbow")) {
dat[["x.star.tr"]]=x.star.tr
} else if (startsWith(label, "sigmoid")) {
dat[["x.star.expit"]]=x.star.expit
} else stop ("label not supported: "%+%label)
dat
}
|
testlist <- list(lims = structure(c(-Inf, NaN, Inf, -Inf), .Dim = c(2L, 2L )), points = structure(c(7.62592113861733e-315, NA, 0, 1.34178029629947e-309, 0, NaN), .Dim = 3:2))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
/palm/inst/testfiles/pbc_distances/libFuzzer_pbc_distances/pbc_distances_valgrind_files/1612987577-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 236
|
r
|
testlist <- list(lims = structure(c(-Inf, NaN, Inf, -Inf), .Dim = c(2L, 2L )), points = structure(c(7.62592113861733e-315, NA, 0, 1.34178029629947e-309, 0, NaN), .Dim = 3:2))
result <- do.call(palm:::pbc_distances,testlist)
str(result)
|
#' Calculate temporal trends of synchrony
#'
#' @description The function calculates temporal trends of spatial synchrony from a \code{data.frame} with tree-ring width chronologies using a moving window as described in Shestakova et al. (2016). This method splits the time variable (\code{varTime}) in 30 years windows plus a 5 years lag, and in each window the within- or between-group level (\code{varGroup}) synchronies are calculated. The function can also be used to find synchrony with similar time series \code{data.frame} from other fields.
#'
#' @usage sync.trend (formula, varTime="", varGroup="", data, window = 30, lag = 5,
#' null.mod = TRUE, selection.method = c("AIC", "AICc", "BIC"),
#' all.mod = FALSE, homoscedastic = TRUE, between.group = FALSE)
#'
#' @param formula a \code{formula} a typical model formula such as \code{Y ~ A}, where \code{Y} is usually tree-ring width and \code{A} may be a grouping factor such as the specific names of tree-ring width chronologies (\code{\link{conifersIP}}).
#' @param varTime a \code{character} specifying the time variable to consider in calculating synchrony estimates. Models with less than 10 different time-points may produce unreliable results.
#' @param varGroup a \code{character} grouping variable. In dendrochronological studies different grouping strategies can be used. We used here two strategies following taxonomic (i.e. species) or geographic (i.e. region) criteria.
#' @param data a \code{data.frame} with tree-ring chronologies, years and grouping variables as columns.
#' @param window an \code{integer} specifying the window size (i.e. number of years) to be used to calculate synchrony. Must be greater than 20 (>=20). Defaults to 20.
#' @param lag an \code{integer} specifying the lag that the window is moving (i.e. number of vrTirs moving window) to be used to calculate synchrony. Must be greater than 1 (>=1). Defaults to 5.
#' @param selection.method a \code{character} string of \code{"AIC"}, \code{"AICc"} or \code{"BIC"}, specifying the information criterion used for model selection.
#' @param null.mod a \code{logical} specifying if only the null model for general synchrony is fitted (broad evaluation, mBE). Default \code{TRUE}.
#' @param all.mod a \code{logical} specifying if all homoscedastic and heteroscedastic models should be fitted. Default \code{FALSE}.
#' @param homoscedastic a \code{logical} specifying if models should be an optional \code{varFunc} object or one-sided formula describing the within-group heteroscedasticity structure. Default \code{TRUE}
#' @param between.group a \code{logical} specifying if between-group synchrony is displayed instead of whitin-group synchrony. Default \code{FALSE}.
#'
#' @details The function fits by default (\code{"null.mod=T"}) the null model for general synchrony (broad evaluation, mBE) for a specified time window size and lag. If \code{"null.mod=F"} the function calculates \code{homoscedastic} or \code{heteroscedastic} versions of variance-covariance (VCOV) mixed models available (mBE, mNE, mCS, mUN, mHeNE, mHeCS, mHeUN; \code{\link{dendro.varcov}}) for each time window size and lag selected. In each window the best model is chosen based on the minimum information criterion selected between "AIC", "AICc" or "BIC".
#' When no \code{selection.method} is defined by default AIC is used.
#' If \code{"all.mod=T"} the functions fits the homoscedastic and heteroscedastic versions of the 7 models (this is a higly time consuming process).
#'
#' @return
#' The function returns a \code{data.frame} containing the following components:
#'
#' \itemize{\item{for \code{null.mod} \code{TRUE}:}}
#' \item{a_Group}{a column representing the within-group synchrony (mBE).}
#' \item{SE}{standard error of each observation.}
#' \item{Windlag}{a column representing the lag of the window used to split the time variable. A 0 value means that lag is 0, and then the defined time window starts from minimun varTime value.}
#' \item{varTime}{a column representing the \code{varTime} variable.}
#'
#' \itemize{\item{for \code{null.mod} \code{FALSE}:}}
#' \item{Modname}{a column indicating the best model fit and the information criterion used.}
#' \item{GroupName}{a column indicating levels of the \code{varGroup} for each time-window selected.}
#' \item{a_Group}{a column indicating within-group synchrony for each \code{varGroup} level at time-window selected.}
#' \item{a_betw_Grp}{a column indicating between-group synchrony for each \code{varGroup} level at time-window selected. Only if \code{between.group} is set to \code{TRUE}.}
#' \item{SE}{standard error of each observation.}
#' \item{Windlag}{a column representing the lag of the window used to split the time variable. A 0 value means that lag is 0, and then the defined time window starts from minimun varTime value.}
#' \item{varTime}{a column representing the \code{varTime} variable window mean point.}
#'
#' @author
#' Josu G. Alday, Tatiana A. Shestakova, Victor Resco de Dios, Jordi Voltas
#'
#' @references Shestakova, T.A., Aguilera, M., Ferrio, J.P., Gutierrez, E. & Voltas, J. (2014). Unravelling spatiotemporal tree-ring signals in Mediterranean oaks: a variance-covariance modelling approach of carbon and oxygen isotope ratios. Tree Physiology 34: 819-838.
#' @references Shestakova, T.A., Gutierrez, E., Kirdyanov, A.V., Camarero, J.J., Genova, M., Knorre, A.A., Linares, J.C., Resco de Dios, V., Sanchez-Salguero, R. & Voltas, J. (2016). Forests synchronize their growth in contrasting Eurasian regions in response to climate warming. \emph{Proceedings of the National Academy of Sciences of the United States of America} 113: 662-667.
#'
#' @examples ## Calculate temporal trends of spatial synchrony for conifersIP data:
#' data(conifersIP)
#'
#' ##Fit the null.model temporal trend (mBE)
#' #using taxonomic grouping criteria (i.e. Species)
#' mBE.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Species",
#' data = conifersIP, null.mod = TRUE, window = 30, lag = 5)
#'
#' mBE.trend# it returns a data.frame
#'
#' \dontrun{
#' ##Fit homoscedastic within-group trends (mBE, mNE, mCS, mUN)
#' # using geographic grouping criteria (i.e. Region)
#' geo.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Region",
#' data = conifersIP, window = 30, lag = 5,
#' null.mod = FALSE, homoscedastic = TRUE)
#'
#' geo.trend#a data.frame with varGroup syncrony for each time window.
#'
#' ##Fit heteroscedastic between-group trends (mBE, mHeNE, mHeCS, mHeUN)
#' #using geographic grouping criteria (i.e. Region) and BIC
#' geo.het.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Region",
#' data = conifersIP, window = 30, lag = 5, null.mod = FALSE,
#' selection.method = c("BIC"), homoscedastic = FALSE,
#' between.group = TRUE)
#' geo.het.trend
#'
#' ##Fit homoscedastic and heterocedastic within-group trends
#' # using taxonomic grouping criteria (i.e. Species) and BIC
#' geo.tot.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Species",
#' data = conifersIP, window = 30, lag = 5,
#' selection.method = c("BIC"), all.mod = TRUE)
#' geo.tot.trend
#' }
#'
#' @import stats
#'
#' @export sync.trend
#'
#'
#'
sync.trend <- function(formula, varTime = "", varGroup = "", data = stop("A dataset must be provided"),
window = 30, lag = 5, null.mod = TRUE, selection.method = c("AIC", "AICc", "BIC"), all.mod = FALSE, homoscedastic = TRUE, between.group = FALSE){
stopifnot(is.numeric(window), length(window) == 1, is.finite(window))
if(window < 20) {stop("'window' must be > 20")}
stopifnot(is.numeric(lag), length(lag) == 1, is.finite(lag))
if(lag < 1) {stop("'lag' must be > 1")}
data$vrTi <- data[,varTime]
yrs.len <- (max(data$vrTi)-min(data$vrTi)+1)
if(yrs.len < window) {stop("'varTime' must be longer than the window length")}
tini <- min(data$vrTi)
win.t <- (yrs.len-window)/lag
wt <- floor(win.t)+1
lwind <- c(1:wt-1)
yr <- sapply(lwind, function(S1) {Z <- tini+(window/2)+(lag*S1)} )
mod.val <- list()
if(null.mod){
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, null.mod = T)
a.mo <- rownames(mod.table(modHom))
mod.val[[i]] <- sync(modHom, modname = a.mo)[1]
}
df <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))[2:3]
df$lagwindow <- lwind
df$yr <- yr
names(df) <- c("a_Group", "SE", "Windlag", varTime)
output <- df
}
else{
if(any(selection.method == c("AIC", "AICc", "BIC"))) {
sel.met <- match.arg(selection.method, c("AIC", "AICc", "BIC"))}
else{sel.met <- c("AIC")}
if(between.group){
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, all.mod = all.mod, homoscedastic = homoscedastic)
mo.ta <- mod.table(modHom)
w <- which(mo.ta[,sel.met] == min(mo.ta[,sel.met]))
a.mo <- rownames(mo.ta[w,])
mod.val[[i]] <- sync(modHom, modname = a.mo, trend.mBE = T)[2]
}
mv <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))
nl <- sum(1:(nlevels(data[,varGroup])-1))
mv$lagwindow <- rep(1:wt-1, each = nl)
mv$yr <- rep(yr, each = nl)
ModName <- paste("Modname", sel.met, sep = "_")
names(mv) <- c(ModName, "GroupName", "a_betw_Grp", "SE", "Windlag", varTime)
output <- mv
}
else{
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, all.mod = all.mod, homoscedastic = homoscedastic)
mo.ta <- mod.table(modHom)
w <- which(mo.ta[,sel.met] == min(mo.ta[,sel.met]))
a.mo <- rownames(mo.ta[w,])
mod.val[[i]] <- sync(modHom, modname = a.mo, trend.mBE = T)[1]
}
mv <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))
nl <- nlevels(data[,varGroup])
mv$lagwindow <- rep(1:wt-1, each = nl)
mv$yr <- rep(yr, each = nl)
ModName <- paste("Modname", sel.met, sep = "_")
names(mv) <- c(ModName, "GroupName", "a_Group", "SE", "Windlag", varTime)
output <- mv
}
}
class(output) <- c("sync.trend", "data.frame")
return(invisible(output))
}
|
/R/sync.trend.R
|
no_license
|
cran/DendroSync
|
R
| false
| false
| 11,515
|
r
|
#' Calculate temporal trends of synchrony
#'
#' @description The function calculates temporal trends of spatial synchrony from a \code{data.frame} with tree-ring width chronologies using a moving window as described in Shestakova et al. (2016). This method splits the time variable (\code{varTime}) in 30 years windows plus a 5 years lag, and in each window the within- or between-group level (\code{varGroup}) synchronies are calculated. The function can also be used to find synchrony with similar time series \code{data.frame} from other fields.
#'
#' @usage sync.trend (formula, varTime="", varGroup="", data, window = 30, lag = 5,
#' null.mod = TRUE, selection.method = c("AIC", "AICc", "BIC"),
#' all.mod = FALSE, homoscedastic = TRUE, between.group = FALSE)
#'
#' @param formula a \code{formula} a typical model formula such as \code{Y ~ A}, where \code{Y} is usually tree-ring width and \code{A} may be a grouping factor such as the specific names of tree-ring width chronologies (\code{\link{conifersIP}}).
#' @param varTime a \code{character} specifying the time variable to consider in calculating synchrony estimates. Models with less than 10 different time-points may produce unreliable results.
#' @param varGroup a \code{character} grouping variable. In dendrochronological studies different grouping strategies can be used. We used here two strategies following taxonomic (i.e. species) or geographic (i.e. region) criteria.
#' @param data a \code{data.frame} with tree-ring chronologies, years and grouping variables as columns.
#' @param window an \code{integer} specifying the window size (i.e. number of years) to be used to calculate synchrony. Must be greater than 20 (>=20). Defaults to 20.
#' @param lag an \code{integer} specifying the lag that the window is moving (i.e. number of vrTirs moving window) to be used to calculate synchrony. Must be greater than 1 (>=1). Defaults to 5.
#' @param selection.method a \code{character} string of \code{"AIC"}, \code{"AICc"} or \code{"BIC"}, specifying the information criterion used for model selection.
#' @param null.mod a \code{logical} specifying if only the null model for general synchrony is fitted (broad evaluation, mBE). Default \code{TRUE}.
#' @param all.mod a \code{logical} specifying if all homoscedastic and heteroscedastic models should be fitted. Default \code{FALSE}.
#' @param homoscedastic a \code{logical} specifying if models should be an optional \code{varFunc} object or one-sided formula describing the within-group heteroscedasticity structure. Default \code{TRUE}
#' @param between.group a \code{logical} specifying if between-group synchrony is displayed instead of whitin-group synchrony. Default \code{FALSE}.
#'
#' @details The function fits by default (\code{"null.mod=T"}) the null model for general synchrony (broad evaluation, mBE) for a specified time window size and lag. If \code{"null.mod=F"} the function calculates \code{homoscedastic} or \code{heteroscedastic} versions of variance-covariance (VCOV) mixed models available (mBE, mNE, mCS, mUN, mHeNE, mHeCS, mHeUN; \code{\link{dendro.varcov}}) for each time window size and lag selected. In each window the best model is chosen based on the minimum information criterion selected between "AIC", "AICc" or "BIC".
#' When no \code{selection.method} is defined by default AIC is used.
#' If \code{"all.mod=T"} the functions fits the homoscedastic and heteroscedastic versions of the 7 models (this is a higly time consuming process).
#'
#' @return
#' The function returns a \code{data.frame} containing the following components:
#'
#' \itemize{\item{for \code{null.mod} \code{TRUE}:}}
#' \item{a_Group}{a column representing the within-group synchrony (mBE).}
#' \item{SE}{standard error of each observation.}
#' \item{Windlag}{a column representing the lag of the window used to split the time variable. A 0 value means that lag is 0, and then the defined time window starts from minimun varTime value.}
#' \item{varTime}{a column representing the \code{varTime} variable.}
#'
#' \itemize{\item{for \code{null.mod} \code{FALSE}:}}
#' \item{Modname}{a column indicating the best model fit and the information criterion used.}
#' \item{GroupName}{a column indicating levels of the \code{varGroup} for each time-window selected.}
#' \item{a_Group}{a column indicating within-group synchrony for each \code{varGroup} level at time-window selected.}
#' \item{a_betw_Grp}{a column indicating between-group synchrony for each \code{varGroup} level at time-window selected. Only if \code{between.group} is set to \code{TRUE}.}
#' \item{SE}{standard error of each observation.}
#' \item{Windlag}{a column representing the lag of the window used to split the time variable. A 0 value means that lag is 0, and then the defined time window starts from minimun varTime value.}
#' \item{varTime}{a column representing the \code{varTime} variable window mean point.}
#'
#' @author
#' Josu G. Alday, Tatiana A. Shestakova, Victor Resco de Dios, Jordi Voltas
#'
#' @references Shestakova, T.A., Aguilera, M., Ferrio, J.P., Gutierrez, E. & Voltas, J. (2014). Unravelling spatiotemporal tree-ring signals in Mediterranean oaks: a variance-covariance modelling approach of carbon and oxygen isotope ratios. Tree Physiology 34: 819-838.
#' @references Shestakova, T.A., Gutierrez, E., Kirdyanov, A.V., Camarero, J.J., Genova, M., Knorre, A.A., Linares, J.C., Resco de Dios, V., Sanchez-Salguero, R. & Voltas, J. (2016). Forests synchronize their growth in contrasting Eurasian regions in response to climate warming. \emph{Proceedings of the National Academy of Sciences of the United States of America} 113: 662-667.
#'
#' @examples ## Calculate temporal trends of spatial synchrony for conifersIP data:
#' data(conifersIP)
#'
#' ##Fit the null.model temporal trend (mBE)
#' #using taxonomic grouping criteria (i.e. Species)
#' mBE.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Species",
#' data = conifersIP, null.mod = TRUE, window = 30, lag = 5)
#'
#' mBE.trend# it returns a data.frame
#'
#' \dontrun{
#' ##Fit homoscedastic within-group trends (mBE, mNE, mCS, mUN)
#' # using geographic grouping criteria (i.e. Region)
#' geo.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Region",
#' data = conifersIP, window = 30, lag = 5,
#' null.mod = FALSE, homoscedastic = TRUE)
#'
#' geo.trend#a data.frame with varGroup syncrony for each time window.
#'
#' ##Fit heteroscedastic between-group trends (mBE, mHeNE, mHeCS, mHeUN)
#' #using geographic grouping criteria (i.e. Region) and BIC
#' geo.het.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Region",
#' data = conifersIP, window = 30, lag = 5, null.mod = FALSE,
#' selection.method = c("BIC"), homoscedastic = FALSE,
#' between.group = TRUE)
#' geo.het.trend
#'
#' ##Fit homoscedastic and heterocedastic within-group trends
#' # using taxonomic grouping criteria (i.e. Species) and BIC
#' geo.tot.trend <- sync.trend(TRW ~ Code, varTime = "Year", varGroup = "Species",
#' data = conifersIP, window = 30, lag = 5,
#' selection.method = c("BIC"), all.mod = TRUE)
#' geo.tot.trend
#' }
#'
#' @import stats
#'
#' @export sync.trend
#'
#'
#'
sync.trend <- function(formula, varTime = "", varGroup = "", data = stop("A dataset must be provided"),
window = 30, lag = 5, null.mod = TRUE, selection.method = c("AIC", "AICc", "BIC"), all.mod = FALSE, homoscedastic = TRUE, between.group = FALSE){
stopifnot(is.numeric(window), length(window) == 1, is.finite(window))
if(window < 20) {stop("'window' must be > 20")}
stopifnot(is.numeric(lag), length(lag) == 1, is.finite(lag))
if(lag < 1) {stop("'lag' must be > 1")}
data$vrTi <- data[,varTime]
yrs.len <- (max(data$vrTi)-min(data$vrTi)+1)
if(yrs.len < window) {stop("'varTime' must be longer than the window length")}
tini <- min(data$vrTi)
win.t <- (yrs.len-window)/lag
wt <- floor(win.t)+1
lwind <- c(1:wt-1)
yr <- sapply(lwind, function(S1) {Z <- tini+(window/2)+(lag*S1)} )
mod.val <- list()
if(null.mod){
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, null.mod = T)
a.mo <- rownames(mod.table(modHom))
mod.val[[i]] <- sync(modHom, modname = a.mo)[1]
}
df <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))[2:3]
df$lagwindow <- lwind
df$yr <- yr
names(df) <- c("a_Group", "SE", "Windlag", varTime)
output <- df
}
else{
if(any(selection.method == c("AIC", "AICc", "BIC"))) {
sel.met <- match.arg(selection.method, c("AIC", "AICc", "BIC"))}
else{sel.met <- c("AIC")}
if(between.group){
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, all.mod = all.mod, homoscedastic = homoscedastic)
mo.ta <- mod.table(modHom)
w <- which(mo.ta[,sel.met] == min(mo.ta[,sel.met]))
a.mo <- rownames(mo.ta[w,])
mod.val[[i]] <- sync(modHom, modname = a.mo, trend.mBE = T)[2]
}
mv <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))
nl <- sum(1:(nlevels(data[,varGroup])-1))
mv$lagwindow <- rep(1:wt-1, each = nl)
mv$yr <- rep(yr, each = nl)
ModName <- paste("Modname", sel.met, sep = "_")
names(mv) <- c(ModName, "GroupName", "a_betw_Grp", "SE", "Windlag", varTime)
output <- mv
}
else{
for(i in 1:wt) {
chopval <- data[data$vrTi >= I(tini+(lag*(i-1))) & data$vrTi <= I(tini+window+(lag*(i-1))),]
modHom <- dendro.varcov(formula, varTime = varTime, varGroup = varGroup, data = chopval, all.mod = all.mod, homoscedastic = homoscedastic)
mo.ta <- mod.table(modHom)
w <- which(mo.ta[,sel.met] == min(mo.ta[,sel.met]))
a.mo <- rownames(mo.ta[w,])
mod.val[[i]] <- sync(modHom, modname = a.mo, trend.mBE = T)[1]
}
mv <- do.call(rbind, lapply(mod.val, data.frame, stringsAsFactors = FALSE))
nl <- nlevels(data[,varGroup])
mv$lagwindow <- rep(1:wt-1, each = nl)
mv$yr <- rep(yr, each = nl)
ModName <- paste("Modname", sel.met, sep = "_")
names(mv) <- c(ModName, "GroupName", "a_Group", "SE", "Windlag", varTime)
output <- mv
}
}
class(output) <- c("sync.trend", "data.frame")
return(invisible(output))
}
|
## Neurosynth RecommendR HelpRs (could I be any lameR? [clearly yes]) ####
## This function is stolen from here: http://lamages.blogspot.com/2013/04/how-to-change-alpha-value-of-colours-in.html
## With some slight modifcations.
add.alpha <- function(col, alpha=0.65){
apply(
sapply(col, col2rgb)/255,
2,
function(x){
rgb(x[1], x[2], x[3], alpha=alpha)
}
)
}
nr.base.plot <- function(dat,...){
xy.lims <- apply(dat,2,range) * 1.1
plot(dat,xlim=xy.lims[,1],ylim=xy.lims[,2],asp=1,...)
abline(h=0,lty=2,col="grey80")
abline(v=0,lty=2,col="grey80")
}
## this can stay, but only as a helper that is never used by the App.
entry.and.neighbors <- function(dat,entry=NA,number.of.neighbors=10){
#warning(entry)
if( !(entry %in% dat[,1] ) | is.na(entry)){
warning("PMID or Stem does not exist in this list. We'll choose one for you!")
entry <- sample(dat[,1],1)
}
this.entry <- which( as.character(dat[,1])==as.character(entry) )
euc.dists <- apply(dat[,paste0("component_",1:5)], 1, function(x){ sum( (x-dat[this.entry,paste0("component_",1:5)])^2 ) } )
return(order(euc.dists)[1:(number.of.neighbors+1)]) #should be the full set. top is the entry.
}
studies.plot.panel <- function(studies.dat, studies.friends,color.selector=1,pmid="19789183",
number.of.neighbors=10,alpha=.5,x.axis=1,y.axis=2,cex=.75){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
if( !(pmid %in% studies.dat[,'pubmed']) ){
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
return(list(message="invalid",results=NULL))
}else{
friends <- studies.friends[pmid,1:(number.of.neighbors+1)]
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[friends[-1],paste0("component_",x.axis)],studies.dat[friends[-1],paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
points(studies.dat[friends[1],paste0("component_",x.axis)],studies.dat[friends[1],paste0("component_",y.axis)],bg="red",pch=21,cex=1.5)
return(list(message="valid",results=studies.dat[friends,]))
}
}
stems.plot.panel <- function(stems.dat, stems.friends,color.selector=1,stem="truth",number.of.neighbors=10,alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- stems.dat$color
}else{
col <- "grey80"
}
if( !(stem %in% stems.dat[,'word']) ){
nr.base.plot(stems.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
return(list(message="invalid",results=NULL))
}else{
friends <- stems.friends[stem,1:(number.of.neighbors+1)]
nr.base.plot(stems.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(stems.dat[friends[-1],paste0("component_",x.axis)],stems.dat[friends[-1],paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
points(stems.dat[friends[1],paste0("component_",x.axis)],stems.dat[friends[1],paste0("component_",y.axis)],bg="red",pch=21,cex=1.5)
return(list(message="valid",results=stems.dat[friends,]))
}
}
years.plot.panel <- function(studies.dat, which.year="1997",color.selector = 1, alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
#col <- "grey80"
these.studies <- which(studies.dat$year==which.year)
#par(mfrow=c(2,2),oma=c(0,0,0,0))
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[these.studies,paste0("component_",x.axis)],studies.dat[these.studies,paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
}
journal.plot.panel <- function(studies.dat, which.journal="Neuron",color.selector=1, alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
#col <- "grey80"
these.studies <- which(studies.dat$journal==which.journal)
#par(mfrow=c(2,2),oma=c(0,0,0,0))
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[these.studies,paste0("component_",x.axis)],studies.dat[these.studies,paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
}
|
/NeurosynthRecommendR/Helpers2.R
|
permissive
|
fahd09/neurosynth_semantic_map
|
R
| false
| false
| 6,746
|
r
|
## Neurosynth RecommendR HelpRs (could I be any lameR? [clearly yes]) ####
## This function is stolen from here: http://lamages.blogspot.com/2013/04/how-to-change-alpha-value-of-colours-in.html
## With some slight modifcations.
add.alpha <- function(col, alpha=0.65){
apply(
sapply(col, col2rgb)/255,
2,
function(x){
rgb(x[1], x[2], x[3], alpha=alpha)
}
)
}
nr.base.plot <- function(dat,...){
xy.lims <- apply(dat,2,range) * 1.1
plot(dat,xlim=xy.lims[,1],ylim=xy.lims[,2],asp=1,...)
abline(h=0,lty=2,col="grey80")
abline(v=0,lty=2,col="grey80")
}
## this can stay, but only as a helper that is never used by the App.
entry.and.neighbors <- function(dat,entry=NA,number.of.neighbors=10){
#warning(entry)
if( !(entry %in% dat[,1] ) | is.na(entry)){
warning("PMID or Stem does not exist in this list. We'll choose one for you!")
entry <- sample(dat[,1],1)
}
this.entry <- which( as.character(dat[,1])==as.character(entry) )
euc.dists <- apply(dat[,paste0("component_",1:5)], 1, function(x){ sum( (x-dat[this.entry,paste0("component_",1:5)])^2 ) } )
return(order(euc.dists)[1:(number.of.neighbors+1)]) #should be the full set. top is the entry.
}
studies.plot.panel <- function(studies.dat, studies.friends,color.selector=1,pmid="19789183",
number.of.neighbors=10,alpha=.5,x.axis=1,y.axis=2,cex=.75){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
if( !(pmid %in% studies.dat[,'pubmed']) ){
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
return(list(message="invalid",results=NULL))
}else{
friends <- studies.friends[pmid,1:(number.of.neighbors+1)]
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[friends[-1],paste0("component_",x.axis)],studies.dat[friends[-1],paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
points(studies.dat[friends[1],paste0("component_",x.axis)],studies.dat[friends[1],paste0("component_",y.axis)],bg="red",pch=21,cex=1.5)
return(list(message="valid",results=studies.dat[friends,]))
}
}
stems.plot.panel <- function(stems.dat, stems.friends,color.selector=1,stem="truth",number.of.neighbors=10,alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- stems.dat$color
}else{
col <- "grey80"
}
if( !(stem %in% stems.dat[,'word']) ){
nr.base.plot(stems.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
return(list(message="invalid",results=NULL))
}else{
friends <- stems.friends[stem,1:(number.of.neighbors+1)]
nr.base.plot(stems.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(stems.dat[friends[-1],paste0("component_",x.axis)],stems.dat[friends[-1],paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
points(stems.dat[friends[1],paste0("component_",x.axis)],stems.dat[friends[1],paste0("component_",y.axis)],bg="red",pch=21,cex=1.5)
return(list(message="valid",results=stems.dat[friends,]))
}
}
years.plot.panel <- function(studies.dat, which.year="1997",color.selector = 1, alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
#col <- "grey80"
these.studies <- which(studies.dat$year==which.year)
#par(mfrow=c(2,2),oma=c(0,0,0,0))
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[these.studies,paste0("component_",x.axis)],studies.dat[these.studies,paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
}
journal.plot.panel <- function(studies.dat, which.journal="Neuron",color.selector=1, alpha=.5,x.axis=1,y.axis=2,cex=cex){
if(color.selector==1){
col <- studies.dat$color
}else{
col <- "grey80"
}
#col <- "grey80"
these.studies <- which(studies.dat$journal==which.journal)
#par(mfrow=c(2,2),oma=c(0,0,0,0))
nr.base.plot(studies.dat[,c(paste0("component_",x.axis),paste0("component_",y.axis))],xlab=paste0("Component ",x.axis),ylab=paste0("Component ",y.axis),col=add.alpha(col,alpha=alpha),pch=20,axes=F,cex=cex)
if(color.selector==1){
legend("bottomleft", inset=.05, title="Cluster Colors",
c("Knowledge+Language","Development","Sensorymotor", "Cognition", "Decisions+Emotion", "Genetics"), fill=c("#F8766D","#B79F00", "#00BA38", "#00BFC4", "#619CFF", "#F564E3"))
}
points(studies.dat[these.studies,paste0("component_",x.axis)],studies.dat[these.studies,paste0("component_",y.axis)],col="black",bg="grey80",pch=21,cex=cex*1.25)
}
|
#' ---
#' title: "Optimized version of GNG"
#' author: ""
#' date: ""
#' output:
#' html_document:
#' self_contained: false
#' ---
# For advanced used you might want to consider using OptimizedGNG
# , which asymptotically much faster implementation without
# sacrifying quality of the graph
# GNG unique capability is online training
library(gmum.r)
# Construct 10^6 points spherical dataset. Might take a while!
sphere.dataset <- gng.preset.sphere(N=10000)
# If you decide to use this advanced feature, you have to pass a
# range (bounding box) in which all of passed data feature values will reside
gng <- OptimizedGNG(sphere.dataset, max.nodes=1000, max.iter=10000, dim=3,
value.range=c(0,1))
# GNG would much longer, you can check it yourself
# gng <- GNG(sphere.dataset, max.nodes=1000, max.iter=10000, dim=3)
findClosests(gng, calculateCentroids(gng), sphere.dataset)
# Plot results using spatial coordinates
plot(gng, layout=gng.plot.layout.v2d, vertex.size=6)
|
/demo/samples/gng.optimized.R
|
no_license
|
Silveryu/gng.r
|
R
| false
| false
| 1,001
|
r
|
#' ---
#' title: "Optimized version of GNG"
#' author: ""
#' date: ""
#' output:
#' html_document:
#' self_contained: false
#' ---
# For advanced used you might want to consider using OptimizedGNG
# , which asymptotically much faster implementation without
# sacrifying quality of the graph
# GNG unique capability is online training
library(gmum.r)
# Construct 10^6 points spherical dataset. Might take a while!
sphere.dataset <- gng.preset.sphere(N=10000)
# If you decide to use this advanced feature, you have to pass a
# range (bounding box) in which all of passed data feature values will reside
gng <- OptimizedGNG(sphere.dataset, max.nodes=1000, max.iter=10000, dim=3,
value.range=c(0,1))
# GNG would much longer, you can check it yourself
# gng <- GNG(sphere.dataset, max.nodes=1000, max.iter=10000, dim=3)
findClosests(gng, calculateCentroids(gng), sphere.dataset)
# Plot results using spatial coordinates
plot(gng, layout=gng.plot.layout.v2d, vertex.size=6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ergoInfo.R
\name{ergoInfo}
\alias{ergoInfo}
\title{Ergodicity Information Index}
\usage{
ergoInfo(dynEGA.object, use = c("edge.list", "unweighted", "weighted"))
}
\arguments{
\item{dynEGA.object}{A \code{\link[EGAnet]{dynEGA.ind.pop}} object}
\item{use}{Character.
A string indicating what network element will be used
to compute the algorithm complexity, the list of edges or the weights of the network.
Defaults to \code{use = "edge.list"}.
Current options are:
\itemize{
\item{\strong{\code{"edge.list"}}}
{Calculates the algorithm complexity using the list of edges.}
\item{\strong{\code{"unweighted"}}}
{Calculates the algorithm complexity using the binary weights of the network.
0 = edge absent and 1 = edge present}
\item{\strong{\code{"weighted"}}}
{Calculates the algorithm complexity using the weights of the network.}
}}
}
\value{
Returns a list containing:
\item{PrimeWeight}{The prime-weight encoding of the individual networks}
\item{PrimeWeight.pop}{The prime-weight encoding of the population network}
\item{Kcomp}{The Kolmogorov complexity of the prime-weight encoded individual networks}
\item{Kcomp.pop}{The Kolmogorov complexity of the prime-weight encoded population network}
\item{complexity}{The complexity metric proposed by Santora and Nicosia (2020)}
\item{EII}{The Ergodicity Information Index}
}
\description{
Computes the Ergodicity Information Index
}
\examples{
# Obtain data
sim.dynEGA <- sim.dynEGA # bypasses CRAN checks
\dontrun{
# Dynamic EGA individual and population structure
dyn.ega1 <- dynEGA.ind.pop(
data = sim.dynEGA[,-26], n.embed = 5, tau = 1,
delta = 1, id = 25, use.derivatives = 1,
ncores = 2, corr = "pearson"
)
# Compute empirical ergodicity information index
eii <- ergoInfo(
dynEGA.object = dyn.ega1,
use = "weighted"
)}
}
\author{
Hudson Golino <hfg9s at virginia.edu> and Alexander Christensen <alexpaulchristensen@gmail.com>
}
|
/man/ergoInfo.Rd
|
no_license
|
cran/EGAnet
|
R
| false
| true
| 1,986
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ergoInfo.R
\name{ergoInfo}
\alias{ergoInfo}
\title{Ergodicity Information Index}
\usage{
ergoInfo(dynEGA.object, use = c("edge.list", "unweighted", "weighted"))
}
\arguments{
\item{dynEGA.object}{A \code{\link[EGAnet]{dynEGA.ind.pop}} object}
\item{use}{Character.
A string indicating what network element will be used
to compute the algorithm complexity, the list of edges or the weights of the network.
Defaults to \code{use = "edge.list"}.
Current options are:
\itemize{
\item{\strong{\code{"edge.list"}}}
{Calculates the algorithm complexity using the list of edges.}
\item{\strong{\code{"unweighted"}}}
{Calculates the algorithm complexity using the binary weights of the network.
0 = edge absent and 1 = edge present}
\item{\strong{\code{"weighted"}}}
{Calculates the algorithm complexity using the weights of the network.}
}}
}
\value{
Returns a list containing:
\item{PrimeWeight}{The prime-weight encoding of the individual networks}
\item{PrimeWeight.pop}{The prime-weight encoding of the population network}
\item{Kcomp}{The Kolmogorov complexity of the prime-weight encoded individual networks}
\item{Kcomp.pop}{The Kolmogorov complexity of the prime-weight encoded population network}
\item{complexity}{The complexity metric proposed by Santora and Nicosia (2020)}
\item{EII}{The Ergodicity Information Index}
}
\description{
Computes the Ergodicity Information Index
}
\examples{
# Obtain data
sim.dynEGA <- sim.dynEGA # bypasses CRAN checks
\dontrun{
# Dynamic EGA individual and population structure
dyn.ega1 <- dynEGA.ind.pop(
data = sim.dynEGA[,-26], n.embed = 5, tau = 1,
delta = 1, id = 25, use.derivatives = 1,
ncores = 2, corr = "pearson"
)
# Compute empirical ergodicity information index
eii <- ergoInfo(
dynEGA.object = dyn.ega1,
use = "weighted"
)}
}
\author{
Hudson Golino <hfg9s at virginia.edu> and Alexander Christensen <alexpaulchristensen@gmail.com>
}
|
# MAE5870 - Análise de Séries Temporais
# Rodrigo da Silva Cunha
# NUSP 7631302
# ------------------------------------------------------------------------------
# Bibliotecas necessárias
library(latex2exp)
library(astsa)
library(gridExtra)
library(dplyr)
# ------------------------------------------------------------------------------
# Carregando os dados
seminario_path = "/home/rodra/workspace/MAT5870/Seminário/Dados/seminario.csv"
seminario = read.csv(seminario_path)
seminario = seminario[c('ibov','dexeuus','dexchus','dexrealus','dcoilwtico')]
ibov = diff(ts(seminario$ibov))
dexeuus = diff(ts(seminario$dexeuus))
dexchus = diff(ts(seminario$dexchus))
dexrealus = diff(ts(seminario$dexrealus))
dcoilwtico = diff(ts(seminario$dcoilwtico))
seminario_ts = data.frame(ibov, dexeuus, dexchus, dexrealus, dcoilwtico)
# ------------------------------------------------------------------------------
# Visualização das Séries
plot.ts(seminario, main="Análise de Séries Financeiras")
plot.ts(seminario_ts, main="Séries Diferenciadas")
# ------------------------------------------------------------------------------
# Análise Espectral
Y = seminario_ts # Y holds the transformed series
Nt = length(Y[,1])
L = 401; M = 800; alpha = .001; fdr = .001; tp = .1;
H = 31 # Horizonte de previsão
nq = 3 # number of inputs (exchange rates)
## Spectral Matrix
Yspec = mvspec(Y, spans=L, kernel="daniell", detrend=TRUE, demean=FALSE,
taper=tp, plot=FALSE)
n = Yspec$n.used # effective sample size
Fr = Yspec$freq # fundamental freqs
n.freq = length(Fr) # number of frequencies
Yspec$bandwidth*sqrt(12) # = 0.050 - the bandwidth
Pks = cbind(c('IBovespa','topleft',.0619,.1390,.22,.381,.4828), #ibov
c('EU/US','topright',.017,.112,.157, .193,.335), #euus
c('CH/US','bottomright',.019,.06,.12, .32,.407), #chus
c('REAL/US','topright',0.0475,.087,.182, .315,.393), #realus
c('Crude Oil','topleft',.07,.21, .345,.3865,.438)) #coil
# Periodogramas
for (i in 5:5){
pks = as.numeric(Pks[3:length(Pks[,i]),i])
tm = Nt/(n*pks)
lplc = Pks[2,i]
name = Pks[1,i]
lgds = paste(as.character(round(tm,2)),'dias',sep=' ')
ltys = c(1:length(pks))
plot(Fr, Yspec$spec[,i], type="l", ylab="Espectro", xlab="Frequencia",
main=paste('Periodograma',name, '| Taper=', tp, sep=' '))
abline(v = pks, lty=ltys)
legend(lplc,legend=lgds,lty=ltys,bty='o', cex = .8)
}
## Coherencies
Fq = qf(1-alpha, 2, L-2)
cn = Fq/(L-1+Fq)
plt.name = c("(a)","(b)","(c)","(d)","(e)","(f)")
par(mfrow=c(2,2), cex.lab=1.2)
# The coherencies are listed as 1,2,...,10=choose(5,2)
for (i in 2:5){
idx = 1 + (i - 1) * (i - 2)/2
plot(Fr, Yspec$coh[,idx], type="l", ylab="Sq Coherence", xlab="Frequency",
ylim=c(0,.4), main=c("Ibov with", names(seminario[i])))
abline(h = cn); text(.45,.98, plt.name[i-1], cex=1.2) }
## Multiple Coherency
par(mfrow=c(3,2))
coh.23 = stoch.reg(Y, cols.full = c(2,3), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e CH/US"))
coh.24 = stoch.reg(Y, cols.full = c(2,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e REAL/US"))
coh.25 = stoch.reg(Y, cols.full = c(2,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e Crude Oil"))
coh.34 = stoch.reg(Y, cols.full = c(3,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "CH/US e REAL/US"))
coh.35 = stoch.reg(Y, cols.full = c(3,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "CH/US e Crude Oil"))
coh.45 = stoch.reg(Y, cols.full = c(4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US e Crude Oil"))
par(mfrow=c(2,2))
coh.234 = stoch.reg(Y, cols.full = c(2,3,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US, EU/US, CH/US"))
coh.345 = stoch.reg(Y, cols.full = c(3,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, CH/US, Crude Oil"))
coh.245 = stoch.reg(Y, cols.full = c(2,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, REAL/US, Crude Oil"))
coh.235 = stoch.reg(Y, cols.full = c(2,3,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, CH/US, Crude Oil"))
par(mfrow=c(1,1))
coh.2345 = stoch.reg(Y, cols.full = c(2,3,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US, EU/US, CH/US e Crude Oil"))
# ------------------------------------------------------------------------------
# Regressão no Domínio da Frequência
## Partial F
numer.df = 2*nq; denom.df = Yspec$df-2*nq
par(mfrow=c(4,1), mar=c(3,3,2,1)+.5, mgp = c(1.5,0.4,0), cex.lab=1.2)
out.234 = stoch.reg(Y, cols.full = c(2,3,4), cols.red = 4, alpha, L, M,
plot.which = "F.stat")
eF = out.234$eF
pvals = pf(eF, numer.df, denom.df, lower.tail = FALSE)
pID = FDR(pvals, fdr); abline(h=c(eF[pID]), lty=2)
title(main = "Partial F Statistic")
# Regression Coefficients
S = seq(from = -M/2+1, to = M/2 - 1, length = M-1)
plot(S, coh.234$Betahat[,1], type = "h", xlab = "", ylab = names(seminario[2]),
ylim = c(-20, 12), lwd=2)
abline(h=0); title(main = "Impulse Response Functions")
plot(S, coh.234$Betahat[,2], type = "h", xlab = "Index", ylab =
names(seminario[3]), ylim = c(-7, 3), lwd=2)
abline(h=0)
plot(S, coh.234$Betahat[,3], type = "h", xlab = "Index", ylab =
names(seminario[4]), ylim = c(-7, 5), lwd=2)
abline(h=0)
# Selecionando os coeficientes
euus_betas = data.frame(lag=S, beta=coh.234$Betahat[,1])
chus_betas = data.frame(lag=S, beta=coh.234$Betahat[,2])
realus_betas = data.frame(lag=S, beta=coh.234$Betahat[,3])
euus_b = euus_betas %>% filter(Mod(beta) > 8)
chus_b = chus_betas %>% filter(Mod(beta) > 3)
realus_b = realus_betas %>% filter(Mod(beta) > 3)
# Regressão
sem = ts.intersect(
It=Y[,1],
E0=Y[,2],
C0=Y[,3],
C1=stats::lag(Y[,3],-1),
R0=Y[,4],
R1=stats::lag(Y[,4],-1),
R2=stats::lag(Y[,4],-2),
R3=stats::lag(Y[,4],-3))
(u = lm(sem[,1]~sem[,2:8], na.action=NULL))
acf2(ts(resid(u)), main="ACF e PACF - Vt")
mymodel = sarima(sem[,1], 1,0,1, 1,0,0,7, xreg=sem[,2:8])
# Previsão
N = length(sem[,1]) # Tamanho do dataset de treino
train = sem[1:(N-H),]
test = sem[(N-H+1):N,]
par(mfrow=c(1,1))
prev = sarima.for(
train[,1], H,
1,0,1,
1,0,0,7,
xreg=train[,2:8],
newxreg=test[,2:8])
ibov_hat = diffinv(rbind(data.matrix(train[,1]), data.matrix(prev$pred)))
se_hat = diffinv(prev$se)
se_hat = se_hat[2:length(se_hat)]
up_hat = ts(ibov_hat[(N-H+1):N,] + se_hat + mean(seminario$ibov),
start=(N-H+1),
frequency=1)
lo_hat = ts(ibov_hat[(N-H+1):N,] - se_hat + mean(seminario$ibov),
start=(N-H+1),
frequency=1)
ibov1 = ibov_hat[1:(N-H),]
pred = ts(ibov_hat[(N-H+1):N,], start=(N-H+1), frequency=1)
# Regressão com 3 variaveis: primeiro plot, com menos zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2000,2441),
type='l',
main="Regressão Modelo 1 - IBov ~ REAL/US + CH/US + EU/US",
xlab="Dia", ylab="IBovespa")
xx = c(time(up_hat), rev(time(lo_hat))); yy = c(lo_hat, rev(up_hat))
polygon(xx, yy, border = 8, col = gray(.6, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressãoc com 3 variáveis: Segundo plot, com mais zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão Modelo 1 - IBov ~ REAL/US + CH/US + EU/US",
xlab="Dia", ylab="IBovespa")
xx = c(time(up_hat), rev(time(lo_hat))); yy = c(lo_hat, rev(up_hat))
polygon(xx, yy, border = 8, col = gray(.6, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressão com apenas REAL/US
# 5) Regressão
lr = LagReg(Y[,4], Y[,1], L=L, M=M, threshold =200)
## Ajuste
sem2 = ts.intersect(
I=Y[,1],
T0=Y[,4],
T1=stats::lag(Y[,4],-1),
T2=stats::lag(Y[,4],-2),
T3=stats::lag(Y[,4],-3),
T4=stats::lag(Y[,4],-4),
T9=stats::lag(Y[,4],-9),
T11=stats::lag(Y[,4],-11))
(u2 = lm(sem2[,1]~sem2[,2:8], na.action=NULL))
# Avaliação do erro final
acf2(ts(resid(u2)), main="ACF e PAFC - Vt")
# Ajuste de SARIMA para o erro
mymodel2 = sarima(sem2[,1], 1, 0, 1, 1, 0, 0, 7, xreg=sem2[,2:8])
# Previsão
N = length(sem2[,1]) # Tamanho do dataset de treino
train2 = sem2[1:(N-H),]
test2 = sem2[(N-H+1):N,]
par(mfrow=c(1,1))
prev2 = sarima.for(
train2[,1], H,
1,0,1,
1,0,0,7,
xreg=train2[,2:8],
newxreg=test2[,2:8])
ibov_hat2 = diffinv(rbind(data.matrix(train2[,1]), data.matrix(prev2$pred)))
se_hat2 = diffinv(prev2$se)
se_hat2 = se_hat2[2:length(se_hat2)]
up_hat2 = ts(ibov_hat2[(N-H+1):N,] + se_hat2 + mean(seminario$ibov),
start=(N-H+1+8),
frequency=1)
lo_hat2 = ts(ibov_hat2[(N-H+1):N,] - se_hat2 + mean(seminario$ibov),
start=(N-H+1+8),
frequency=1)
ibov2 = ibov_hat2[1:(N-H),]
pred2 = ts(ibov_hat2[(N-H+1):N,], start=(N-H+1+8), frequency=1)
# Regressão com 1 variavel: primeiro plot, com menos zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2000,2441),
type='l',
main="Regressão Modelo 2 - IBov ~ REAL/US",
xlab="Dia", ylab="IBovespa")
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.6, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressão com 1 variável: Segundo plot, com mais zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão Modelo 2 - IBov ~ REAL/US",
xlab="Dia", ylab="IBovespa")
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.6, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Comparação dos dois modelos
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão - Modelos 1 e 2",
xlab="Dia", ylab="IBovespa")
polygon(xx, yy, border = 8, col = gray(.9, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.3, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=3)
grid(10, 5, lwd = 1) # grid only in y-direction
legend('topleft',legend=c('Modelo 1', 'Modelo 2'),col=c(2,3), lty=c(1,1), cex = .8)
rmse1 = sqrt(sum((test[,1] - pred)*(test[,1] - pred))/length(test[,1]))
rmse2 = sqrt(sum((test2[,1] - pred2)*(test2[,1] - pred2))/length(test2[,1]))
|
/seminario_spectral_analysis.R
|
no_license
|
rodra-go/mae5870_seminario
|
R
| false
| false
| 11,221
|
r
|
# MAE5870 - Análise de Séries Temporais
# Rodrigo da Silva Cunha
# NUSP 7631302
# ------------------------------------------------------------------------------
# Bibliotecas necessárias
library(latex2exp)
library(astsa)
library(gridExtra)
library(dplyr)
# ------------------------------------------------------------------------------
# Carregando os dados
seminario_path = "/home/rodra/workspace/MAT5870/Seminário/Dados/seminario.csv"
seminario = read.csv(seminario_path)
seminario = seminario[c('ibov','dexeuus','dexchus','dexrealus','dcoilwtico')]
ibov = diff(ts(seminario$ibov))
dexeuus = diff(ts(seminario$dexeuus))
dexchus = diff(ts(seminario$dexchus))
dexrealus = diff(ts(seminario$dexrealus))
dcoilwtico = diff(ts(seminario$dcoilwtico))
seminario_ts = data.frame(ibov, dexeuus, dexchus, dexrealus, dcoilwtico)
# ------------------------------------------------------------------------------
# Visualização das Séries
plot.ts(seminario, main="Análise de Séries Financeiras")
plot.ts(seminario_ts, main="Séries Diferenciadas")
# ------------------------------------------------------------------------------
# Análise Espectral
Y = seminario_ts # Y holds the transformed series
Nt = length(Y[,1])
L = 401; M = 800; alpha = .001; fdr = .001; tp = .1;
H = 31 # Horizonte de previsão
nq = 3 # number of inputs (exchange rates)
## Spectral Matrix
Yspec = mvspec(Y, spans=L, kernel="daniell", detrend=TRUE, demean=FALSE,
taper=tp, plot=FALSE)
n = Yspec$n.used # effective sample size
Fr = Yspec$freq # fundamental freqs
n.freq = length(Fr) # number of frequencies
Yspec$bandwidth*sqrt(12) # = 0.050 - the bandwidth
Pks = cbind(c('IBovespa','topleft',.0619,.1390,.22,.381,.4828), #ibov
c('EU/US','topright',.017,.112,.157, .193,.335), #euus
c('CH/US','bottomright',.019,.06,.12, .32,.407), #chus
c('REAL/US','topright',0.0475,.087,.182, .315,.393), #realus
c('Crude Oil','topleft',.07,.21, .345,.3865,.438)) #coil
# Periodogramas
for (i in 5:5){
pks = as.numeric(Pks[3:length(Pks[,i]),i])
tm = Nt/(n*pks)
lplc = Pks[2,i]
name = Pks[1,i]
lgds = paste(as.character(round(tm,2)),'dias',sep=' ')
ltys = c(1:length(pks))
plot(Fr, Yspec$spec[,i], type="l", ylab="Espectro", xlab="Frequencia",
main=paste('Periodograma',name, '| Taper=', tp, sep=' '))
abline(v = pks, lty=ltys)
legend(lplc,legend=lgds,lty=ltys,bty='o', cex = .8)
}
## Coherencies
Fq = qf(1-alpha, 2, L-2)
cn = Fq/(L-1+Fq)
plt.name = c("(a)","(b)","(c)","(d)","(e)","(f)")
par(mfrow=c(2,2), cex.lab=1.2)
# The coherencies are listed as 1,2,...,10=choose(5,2)
for (i in 2:5){
idx = 1 + (i - 1) * (i - 2)/2
plot(Fr, Yspec$coh[,idx], type="l", ylab="Sq Coherence", xlab="Frequency",
ylim=c(0,.4), main=c("Ibov with", names(seminario[i])))
abline(h = cn); text(.45,.98, plt.name[i-1], cex=1.2) }
## Multiple Coherency
par(mfrow=c(3,2))
coh.23 = stoch.reg(Y, cols.full = c(2,3), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e CH/US"))
coh.24 = stoch.reg(Y, cols.full = c(2,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e REAL/US"))
coh.25 = stoch.reg(Y, cols.full = c(2,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US e Crude Oil"))
coh.34 = stoch.reg(Y, cols.full = c(3,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "CH/US e REAL/US"))
coh.35 = stoch.reg(Y, cols.full = c(3,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "CH/US e Crude Oil"))
coh.45 = stoch.reg(Y, cols.full = c(4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US e Crude Oil"))
par(mfrow=c(2,2))
coh.234 = stoch.reg(Y, cols.full = c(2,3,4), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US, EU/US, CH/US"))
coh.345 = stoch.reg(Y, cols.full = c(3,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, CH/US, Crude Oil"))
coh.245 = stoch.reg(Y, cols.full = c(2,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, REAL/US, Crude Oil"))
coh.235 = stoch.reg(Y, cols.full = c(2,3,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "EU/US, CH/US, Crude Oil"))
par(mfrow=c(1,1))
coh.2345 = stoch.reg(Y, cols.full = c(2,3,4,5), cols.red = NULL, alpha, L, M,
plot.which = "coh")
title(main = c("IBovespa com", "REAL/US, EU/US, CH/US e Crude Oil"))
# ------------------------------------------------------------------------------
# Regressão no Domínio da Frequência
## Partial F
numer.df = 2*nq; denom.df = Yspec$df-2*nq
par(mfrow=c(4,1), mar=c(3,3,2,1)+.5, mgp = c(1.5,0.4,0), cex.lab=1.2)
out.234 = stoch.reg(Y, cols.full = c(2,3,4), cols.red = 4, alpha, L, M,
plot.which = "F.stat")
eF = out.234$eF
pvals = pf(eF, numer.df, denom.df, lower.tail = FALSE)
pID = FDR(pvals, fdr); abline(h=c(eF[pID]), lty=2)
title(main = "Partial F Statistic")
# Regression Coefficients
S = seq(from = -M/2+1, to = M/2 - 1, length = M-1)
plot(S, coh.234$Betahat[,1], type = "h", xlab = "", ylab = names(seminario[2]),
ylim = c(-20, 12), lwd=2)
abline(h=0); title(main = "Impulse Response Functions")
plot(S, coh.234$Betahat[,2], type = "h", xlab = "Index", ylab =
names(seminario[3]), ylim = c(-7, 3), lwd=2)
abline(h=0)
plot(S, coh.234$Betahat[,3], type = "h", xlab = "Index", ylab =
names(seminario[4]), ylim = c(-7, 5), lwd=2)
abline(h=0)
# Selecionando os coeficientes
euus_betas = data.frame(lag=S, beta=coh.234$Betahat[,1])
chus_betas = data.frame(lag=S, beta=coh.234$Betahat[,2])
realus_betas = data.frame(lag=S, beta=coh.234$Betahat[,3])
euus_b = euus_betas %>% filter(Mod(beta) > 8)
chus_b = chus_betas %>% filter(Mod(beta) > 3)
realus_b = realus_betas %>% filter(Mod(beta) > 3)
# Regressão
sem = ts.intersect(
It=Y[,1],
E0=Y[,2],
C0=Y[,3],
C1=stats::lag(Y[,3],-1),
R0=Y[,4],
R1=stats::lag(Y[,4],-1),
R2=stats::lag(Y[,4],-2),
R3=stats::lag(Y[,4],-3))
(u = lm(sem[,1]~sem[,2:8], na.action=NULL))
acf2(ts(resid(u)), main="ACF e PACF - Vt")
mymodel = sarima(sem[,1], 1,0,1, 1,0,0,7, xreg=sem[,2:8])
# Previsão
N = length(sem[,1]) # Tamanho do dataset de treino
train = sem[1:(N-H),]
test = sem[(N-H+1):N,]
par(mfrow=c(1,1))
prev = sarima.for(
train[,1], H,
1,0,1,
1,0,0,7,
xreg=train[,2:8],
newxreg=test[,2:8])
ibov_hat = diffinv(rbind(data.matrix(train[,1]), data.matrix(prev$pred)))
se_hat = diffinv(prev$se)
se_hat = se_hat[2:length(se_hat)]
up_hat = ts(ibov_hat[(N-H+1):N,] + se_hat + mean(seminario$ibov),
start=(N-H+1),
frequency=1)
lo_hat = ts(ibov_hat[(N-H+1):N,] - se_hat + mean(seminario$ibov),
start=(N-H+1),
frequency=1)
ibov1 = ibov_hat[1:(N-H),]
pred = ts(ibov_hat[(N-H+1):N,], start=(N-H+1), frequency=1)
# Regressão com 3 variaveis: primeiro plot, com menos zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2000,2441),
type='l',
main="Regressão Modelo 1 - IBov ~ REAL/US + CH/US + EU/US",
xlab="Dia", ylab="IBovespa")
xx = c(time(up_hat), rev(time(lo_hat))); yy = c(lo_hat, rev(up_hat))
polygon(xx, yy, border = 8, col = gray(.6, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressãoc com 3 variáveis: Segundo plot, com mais zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão Modelo 1 - IBov ~ REAL/US + CH/US + EU/US",
xlab="Dia", ylab="IBovespa")
xx = c(time(up_hat), rev(time(lo_hat))); yy = c(lo_hat, rev(up_hat))
polygon(xx, yy, border = 8, col = gray(.6, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressão com apenas REAL/US
# 5) Regressão
lr = LagReg(Y[,4], Y[,1], L=L, M=M, threshold =200)
## Ajuste
sem2 = ts.intersect(
I=Y[,1],
T0=Y[,4],
T1=stats::lag(Y[,4],-1),
T2=stats::lag(Y[,4],-2),
T3=stats::lag(Y[,4],-3),
T4=stats::lag(Y[,4],-4),
T9=stats::lag(Y[,4],-9),
T11=stats::lag(Y[,4],-11))
(u2 = lm(sem2[,1]~sem2[,2:8], na.action=NULL))
# Avaliação do erro final
acf2(ts(resid(u2)), main="ACF e PAFC - Vt")
# Ajuste de SARIMA para o erro
mymodel2 = sarima(sem2[,1], 1, 0, 1, 1, 0, 0, 7, xreg=sem2[,2:8])
# Previsão
N = length(sem2[,1]) # Tamanho do dataset de treino
train2 = sem2[1:(N-H),]
test2 = sem2[(N-H+1):N,]
par(mfrow=c(1,1))
prev2 = sarima.for(
train2[,1], H,
1,0,1,
1,0,0,7,
xreg=train2[,2:8],
newxreg=test2[,2:8])
ibov_hat2 = diffinv(rbind(data.matrix(train2[,1]), data.matrix(prev2$pred)))
se_hat2 = diffinv(prev2$se)
se_hat2 = se_hat2[2:length(se_hat2)]
up_hat2 = ts(ibov_hat2[(N-H+1):N,] + se_hat2 + mean(seminario$ibov),
start=(N-H+1+8),
frequency=1)
lo_hat2 = ts(ibov_hat2[(N-H+1):N,] - se_hat2 + mean(seminario$ibov),
start=(N-H+1+8),
frequency=1)
ibov2 = ibov_hat2[1:(N-H),]
pred2 = ts(ibov_hat2[(N-H+1):N,], start=(N-H+1+8), frequency=1)
# Regressão com 1 variavel: primeiro plot, com menos zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2000,2441),
type='l',
main="Regressão Modelo 2 - IBov ~ REAL/US",
xlab="Dia", ylab="IBovespa")
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.6, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Regressão com 1 variável: Segundo plot, com mais zoom
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão Modelo 2 - IBov ~ REAL/US",
xlab="Dia", ylab="IBovespa")
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.6, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=2)
grid(10, 5, lwd = 1) # grid only in y-direction
# Comparação dos dois modelos
plot(ibov1 + mean(seminario$ibov),
ylim=c(50000, 175000),
xlim=c(2300,2441),
type='l',
main="Regressão - Modelos 1 e 2",
xlab="Dia", ylab="IBovespa")
polygon(xx, yy, border = 8, col = gray(.9, alpha = .2))
lines(pred + mean(seminario$ibov), type="p", col=2)
xx2 = c(time(up_hat2), rev(time(lo_hat2))); yy2 = c(lo_hat2, rev(up_hat2))
polygon(xx2, yy2, border = 8, col = gray(.3, alpha = .2))
lines(pred2 + mean(seminario$ibov), type="p", col=3)
grid(10, 5, lwd = 1) # grid only in y-direction
legend('topleft',legend=c('Modelo 1', 'Modelo 2'),col=c(2,3), lty=c(1,1), cex = .8)
rmse1 = sqrt(sum((test[,1] - pred)*(test[,1] - pred))/length(test[,1]))
rmse2 = sqrt(sum((test2[,1] - pred2)*(test2[,1] - pred2))/length(test2[,1]))
|
library(sqldf)
library(plyr)
library(dplyr)
print("cleanup variables before starting")
rm(list = ls())
print("Reading data file")
file <- c("household_power_consumption.txt")
data_subset <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where (Date == '1/2/2007' OR Date == '2/2/2007')" )
##close all open connetions
closeAllConnections()
print(showConnections(all = TRUE))
## Paste date and time columns in one column
print("Transform date and time columns ")
d_dt <- as.POSIXct(paste(data_subset$Date, data_subset$Time), format="%d/%m/%Y %H:%M:%S")
d1 <- cbind(d_dt, data_subset[3:9])
colnames(d1)[1] <- "Date"
print(names(d1))
## plot 1
## open device to write to png file
print("Opening device for printing")
png(filename = "plot1.png", width = 480, height = 480, units = "px")
print("Plotting histogram")
hist(d1$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col ="red")
print("closing device")
dev.off()
|
/ExData_Plotting1/plot1.R
|
no_license
|
sheelarani/-datasciencecoursera
|
R
| false
| false
| 984
|
r
|
library(sqldf)
library(plyr)
library(dplyr)
print("cleanup variables before starting")
rm(list = ls())
print("Reading data file")
file <- c("household_power_consumption.txt")
data_subset <- read.csv.sql(file, header = T, sep=";", sql = "select * from file where (Date == '1/2/2007' OR Date == '2/2/2007')" )
##close all open connetions
closeAllConnections()
print(showConnections(all = TRUE))
## Paste date and time columns in one column
print("Transform date and time columns ")
d_dt <- as.POSIXct(paste(data_subset$Date, data_subset$Time), format="%d/%m/%Y %H:%M:%S")
d1 <- cbind(d_dt, data_subset[3:9])
colnames(d1)[1] <- "Date"
print(names(d1))
## plot 1
## open device to write to png file
print("Opening device for printing")
png(filename = "plot1.png", width = 480, height = 480, units = "px")
print("Plotting histogram")
hist(d1$Global_active_power, main = "Global Active Power", xlab = "Global Active Power (kilowatts)", col ="red")
print("closing device")
dev.off()
|
library(Cairo);
data_means <- aggregate(labkey.data, list(ParticipantID =
labkey.data$participantid), mean, na.rm = TRUE);
Cairo(file="${imgout:multiplot.png}", type="png")
op <- par(mfcol = c(2, 2)) # 2 x 2 pictures on one plot
c11 <- plot(data_means$diastolicbloodpressure, data_means$weight_kg, ,
xlab="Diastolic Blood Pressure (mm Hg)", ylab="Weight (kg)",
mfg=c(1, 1))
abline(lsfit(data_means$diastolicbloodpressure, data_means$weight_kg))
c21 <- plot(data_means$diastolicbloodpressure, data_means$systolicbloodpressure, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Systolic Blood Pressure (mm Hg)", mfg= c(2, 1))
abline(lsfit(data_means$diastolicbloodpressure, data_means$systolicbloodpressure))
c21 <- plot(data_means$diastolicbloodpressure, data_means$pulse, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Pulse Rate (Beats/Minute)", mfg= c(1, 2))
abline(lsfit(data_means$diastolicbloodpressure, data_means$pulse))
c21 <- plot(data_means$diastolicbloodpressure, data_means$temp_c, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Temperature (Degrees C)", mfg= c(2, 2))
abline(lsfit(data_means$diastolicbloodpressure, data_means$temp_c))
par(op); #Restore graphics parameters
dev.off();
|
/sampledata/FolderExport/SampleWithSubfolders.folder/reports/R MultiPlot Regressions_ Diastolic BP.db_979.R
|
no_license
|
scchess/LabKey
|
R
| false
| false
| 1,217
|
r
|
library(Cairo);
data_means <- aggregate(labkey.data, list(ParticipantID =
labkey.data$participantid), mean, na.rm = TRUE);
Cairo(file="${imgout:multiplot.png}", type="png")
op <- par(mfcol = c(2, 2)) # 2 x 2 pictures on one plot
c11 <- plot(data_means$diastolicbloodpressure, data_means$weight_kg, ,
xlab="Diastolic Blood Pressure (mm Hg)", ylab="Weight (kg)",
mfg=c(1, 1))
abline(lsfit(data_means$diastolicbloodpressure, data_means$weight_kg))
c21 <- plot(data_means$diastolicbloodpressure, data_means$systolicbloodpressure, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Systolic Blood Pressure (mm Hg)", mfg= c(2, 1))
abline(lsfit(data_means$diastolicbloodpressure, data_means$systolicbloodpressure))
c21 <- plot(data_means$diastolicbloodpressure, data_means$pulse, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Pulse Rate (Beats/Minute)", mfg= c(1, 2))
abline(lsfit(data_means$diastolicbloodpressure, data_means$pulse))
c21 <- plot(data_means$diastolicbloodpressure, data_means$temp_c, ,
xlab="Diastolic Blood Pressure (mm Hg)",
ylab="Temperature (Degrees C)", mfg= c(2, 2))
abline(lsfit(data_means$diastolicbloodpressure, data_means$temp_c))
par(op); #Restore graphics parameters
dev.off();
|
# function nested or function inside the other function.
# in this case we have inverse functions.
resu = log(exp(1))
print(resu)
# to show information about the function.
?log
# to show args about the function.
args(log)
print( log(8,base=2) )
# or compiler R assumes args are exactly order shown in the help file(x,base).
print( log(8,2) )
# show information about all the mathematical arithmetic operators.
help("+")
|
/functions.R
|
no_license
|
baleeiro17/HarvardX-s-Data-Science-
|
R
| false
| false
| 426
|
r
|
# function nested or function inside the other function.
# in this case we have inverse functions.
resu = log(exp(1))
print(resu)
# to show information about the function.
?log
# to show args about the function.
args(log)
print( log(8,base=2) )
# or compiler R assumes args are exactly order shown in the help file(x,base).
print( log(8,2) )
# show information about all the mathematical arithmetic operators.
help("+")
|
#!/usr/bin/env Rscript
#############
# LIBRARIES #
#############
library(data.table)
library(tidyverse)
########
# MAIN #
########
blast_res <- fread("output/03_deseq2/tissue_itWT_LRT/meiosis_sp/dmc1_rec8/dmc1_rec8_nr_blastx.outfmt6")
trinotate <- fread('data/mh-transcriptome/output/trinotate/sorted/longest_isoform_annots.csv', na.strings="")
setnames(blast_res, old=c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V13"),
new=c("transcript_id", "hit", "%_identical_matches", "alignment_length", "no_mismatches", "no_gap_openings",
"query_start", "query_end", "subject_start", "subject_end", "evalue", "bit_score", "annotation"))
##filter out hypothetical/unchar
unann_blast_no_hypo <- subset(blast_res, !grepl("uncharacter|hypothetical|unnamed|unknown|GSCOCG", annotation, ignore.case = TRUE))
##order so that in event of eval min. tie, which.min takes hit with highest bitscore
setorder(unann_blast_no_hypo, transcript_id, evalue, -bit_score)
##extract result with lowest evalue for each peptide, sorted by bit-value in case of e-value tie
min_evalues <- unann_blast_no_hypo[,.SD[which.min(evalue)], by=transcript_id]
dmc1_rec8 <- subset(min_evalues, grepl("DMC1|REC8", annotation, ignore.case = TRUE))
dmc1_rec8_trinotate <- merge(dmc1_rec8, trinotate, by="transcript_id")
fwrite(min_evalues, "output/03_deseq2/tissue_itWT_LRT/meiosis_sp/dmc1_rec8/dmc1_rec8_hits.csv")
|
/src/meiosis_sp/dmc1_rec8_nrblastx_res.R
|
no_license
|
sarahinwood/mh-rnaseq
|
R
| false
| false
| 1,434
|
r
|
#!/usr/bin/env Rscript
#############
# LIBRARIES #
#############
library(data.table)
library(tidyverse)
########
# MAIN #
########
blast_res <- fread("output/03_deseq2/tissue_itWT_LRT/meiosis_sp/dmc1_rec8/dmc1_rec8_nr_blastx.outfmt6")
trinotate <- fread('data/mh-transcriptome/output/trinotate/sorted/longest_isoform_annots.csv', na.strings="")
setnames(blast_res, old=c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10", "V11", "V12", "V13"),
new=c("transcript_id", "hit", "%_identical_matches", "alignment_length", "no_mismatches", "no_gap_openings",
"query_start", "query_end", "subject_start", "subject_end", "evalue", "bit_score", "annotation"))
##filter out hypothetical/unchar
unann_blast_no_hypo <- subset(blast_res, !grepl("uncharacter|hypothetical|unnamed|unknown|GSCOCG", annotation, ignore.case = TRUE))
##order so that in event of eval min. tie, which.min takes hit with highest bitscore
setorder(unann_blast_no_hypo, transcript_id, evalue, -bit_score)
##extract result with lowest evalue for each peptide, sorted by bit-value in case of e-value tie
min_evalues <- unann_blast_no_hypo[,.SD[which.min(evalue)], by=transcript_id]
dmc1_rec8 <- subset(min_evalues, grepl("DMC1|REC8", annotation, ignore.case = TRUE))
dmc1_rec8_trinotate <- merge(dmc1_rec8, trinotate, by="transcript_id")
fwrite(min_evalues, "output/03_deseq2/tissue_itWT_LRT/meiosis_sp/dmc1_rec8/dmc1_rec8_hits.csv")
|
extractTSinfo <- function(x){
ortholog <- strsplit(x,"target=new>")
ortholog <- strsplit(ortholog[[1]][2],"</a></td>")[[1]][1]
geneName <- strsplit(x,"<td nowrap>")
geneName <- strsplit(geneName[[1]][2],"</td>")[[1]][1]
totalSites <- strsplit(x,"<B>")
consSites <- strsplit(totalSites[[1]][2],"</B>")[[1]][1]
poorlySites <- strsplit(totalSites[[1]][3],"</B>")[[1]][1]
res <- data.frame(Ortholog=ortholog,
geneName=geneName,
consSites=consSites,
poorlySites=poorlySites)
res
}
targetScan <- function(mirna=NULL, species=NULL, release="7.1", maxOut=NULL){
# Starting values for later tests
origMirna <- mirna
notFound <- FALSE
allChecked <- FALSE
if(is.null(species)){
# Guessing species from mirna
if(substr(mirna,1,3)=="hsa") species <- "Human"
if(substr(mirna,1,3)=="mmu") species <- "Mouse"
if(substr(mirna,1,3)=="rno") species <- "Rat"
if(substr(mirna,1,3)=="ptr") species <- "Chimpanzee"
if(substr(mirna,1,3)=="mml") species <- "Rhesus"
if(substr(mirna,1,3)=="bta") species <- "Cow"
if(substr(mirna,1,3)=="cfa") species <- "Dog"
if(substr(mirna,1,3)=="mdo") species <- "Opossum"
if(substr(mirna,1,3)=="gga") species <- "Chicken"
if(substr(mirna,1,3)=="xtr") species <- "Frog"
}
# Input checks
if(is.null(mirna)) stop("No mirne name given. Use e.g. 'miR-9-5p'.")
species <- paste(toupper(substr(species, 1, 1)), tolower(substr(species, 2, nchar(species))), sep="")
species <- match.arg(species, c("Human", "Mouse", "Rat", "Chimpanzee", "Rhesus", "Cow", "Dog", "Opossum", "Chicken", "Frog"))
release <- gsub("\\.","",release)
# Retrieve the content
tsAddress <- paste("http://www.targetscan.org/cgi-bin/targetscan/vert_71/targetscan.cgi?species=",species,"&mirg=",mirna,sep="")
tsOut <- scan(tsAddress, what = "", sep = "\n", quiet = TRUE)
# Check first if the miRNA is in the targetScan database
if(sum(grepl("is not in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
warning(mirna," is not in the targetScan database!\n")
res <- data.frame(Ortholog=NULL,
geneName=NULL,
consSites=NULL,
poorlySites=NULL)
}else {
# Check first, if the targetScan result is unique
if(sum(grepl("matches multiple families in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
notFound <- TRUE
multFams <- tsOut[grepl("=miR",tsOut)]
newMirnas <- character(length(multFams))
for(i in 1:length(multFams)){
temp <- strsplit(multFams[i], "</A")[[1]][1]
newMirnas[i] <- strsplit(temp,'\">')[[1]][2]
}
warning("Multiple matches multiple families in the targetScan database for ",mirna,":\n",paste(newMirnas,collapse="; "),"\nOnly the first one is used!")
# Take the first unique set of miRNAs
temp <- which(grepl("/", newMirnas)==TRUE)
takeThis <- 1
if(length(temp)>0) takeThis <- temp + 1
# It can happen that none of the derivatives of a miRNA is in the target database, fetch this case here
while(notFound & !allChecked){
# Start to check the first reasonable miRNA
mirna <- newMirnas[takeThis]
tsAddress <- paste("http://www.targetscan.org/cgi-bin/targetscan/vert_71/targetscan.cgi?species=",species,"&mirg=",mirna,sep="")
tsOut <- scan(tsAddress, what = "", sep = "\n", quiet = TRUE)
# If this is in the database, stop the searching and mark as found, if not go on until all possibilities are checked
if(sum(grepl("is not in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
if(takeThis < length(newMirnas)){
takeThis <- takeThis + 1
} else {
allChecked <- TRUE
}
} else {
notFound <- FALSE
}
}
}
# If none of the miRNA derivatis was found in the database, return an emtpy result.
if(notFound){
warning(mirna," is not in the targetScan database!\nAlso none of its derivatives",paste(newMirnas,collapse=","),"could be found")
res <- data.frame(Ortholog=NULL,
geneName=NULL,
consSites=NULL,
poorlySites=NULL)
} else {
# Find the rows of interest (Assume it to be in the first 100 rows, if this isn't the case extent the search area)
startRow <- grepl("<th>total</th>",tsOut[1:min(100,length(tsOut))])
if(sum(startRow)!=1) startRow <- grepl("<th>total</th>",tsOut)
if(sum(startRow)!=1) stop("ERROR: No table provided by targetScan.org!")
startRow <- which(startRow==1)
ifelse(is.null(maxOut), maxOut <- length(tsOut)-1, maxOut <- startRow + maxOut - 1)
# Now extract the information and put them into a dataframe
# The first row is a bit different, as it is contained in the header row, all others are then standardized
temp1 <- strsplit(tsOut[startRow],"<td>")
firstEntry <- paste(temp1[[1]][2],"<td>",temp1[[1]][3],sep="")
res <- extractTSinfo(firstEntry)
for(i in (startRow+1):maxOut){
res <- rbind(res,extractTSinfo(tsOut[i]))
}
}
}
res
}
|
/R/targetScan.R
|
no_license
|
cran/hoardeR
|
R
| false
| false
| 5,469
|
r
|
extractTSinfo <- function(x){
ortholog <- strsplit(x,"target=new>")
ortholog <- strsplit(ortholog[[1]][2],"</a></td>")[[1]][1]
geneName <- strsplit(x,"<td nowrap>")
geneName <- strsplit(geneName[[1]][2],"</td>")[[1]][1]
totalSites <- strsplit(x,"<B>")
consSites <- strsplit(totalSites[[1]][2],"</B>")[[1]][1]
poorlySites <- strsplit(totalSites[[1]][3],"</B>")[[1]][1]
res <- data.frame(Ortholog=ortholog,
geneName=geneName,
consSites=consSites,
poorlySites=poorlySites)
res
}
targetScan <- function(mirna=NULL, species=NULL, release="7.1", maxOut=NULL){
# Starting values for later tests
origMirna <- mirna
notFound <- FALSE
allChecked <- FALSE
if(is.null(species)){
# Guessing species from mirna
if(substr(mirna,1,3)=="hsa") species <- "Human"
if(substr(mirna,1,3)=="mmu") species <- "Mouse"
if(substr(mirna,1,3)=="rno") species <- "Rat"
if(substr(mirna,1,3)=="ptr") species <- "Chimpanzee"
if(substr(mirna,1,3)=="mml") species <- "Rhesus"
if(substr(mirna,1,3)=="bta") species <- "Cow"
if(substr(mirna,1,3)=="cfa") species <- "Dog"
if(substr(mirna,1,3)=="mdo") species <- "Opossum"
if(substr(mirna,1,3)=="gga") species <- "Chicken"
if(substr(mirna,1,3)=="xtr") species <- "Frog"
}
# Input checks
if(is.null(mirna)) stop("No mirne name given. Use e.g. 'miR-9-5p'.")
species <- paste(toupper(substr(species, 1, 1)), tolower(substr(species, 2, nchar(species))), sep="")
species <- match.arg(species, c("Human", "Mouse", "Rat", "Chimpanzee", "Rhesus", "Cow", "Dog", "Opossum", "Chicken", "Frog"))
release <- gsub("\\.","",release)
# Retrieve the content
tsAddress <- paste("http://www.targetscan.org/cgi-bin/targetscan/vert_71/targetscan.cgi?species=",species,"&mirg=",mirna,sep="")
tsOut <- scan(tsAddress, what = "", sep = "\n", quiet = TRUE)
# Check first if the miRNA is in the targetScan database
if(sum(grepl("is not in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
warning(mirna," is not in the targetScan database!\n")
res <- data.frame(Ortholog=NULL,
geneName=NULL,
consSites=NULL,
poorlySites=NULL)
}else {
# Check first, if the targetScan result is unique
if(sum(grepl("matches multiple families in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
notFound <- TRUE
multFams <- tsOut[grepl("=miR",tsOut)]
newMirnas <- character(length(multFams))
for(i in 1:length(multFams)){
temp <- strsplit(multFams[i], "</A")[[1]][1]
newMirnas[i] <- strsplit(temp,'\">')[[1]][2]
}
warning("Multiple matches multiple families in the targetScan database for ",mirna,":\n",paste(newMirnas,collapse="; "),"\nOnly the first one is used!")
# Take the first unique set of miRNAs
temp <- which(grepl("/", newMirnas)==TRUE)
takeThis <- 1
if(length(temp)>0) takeThis <- temp + 1
# It can happen that none of the derivatives of a miRNA is in the target database, fetch this case here
while(notFound & !allChecked){
# Start to check the first reasonable miRNA
mirna <- newMirnas[takeThis]
tsAddress <- paste("http://www.targetscan.org/cgi-bin/targetscan/vert_71/targetscan.cgi?species=",species,"&mirg=",mirna,sep="")
tsOut <- scan(tsAddress, what = "", sep = "\n", quiet = TRUE)
# If this is in the database, stop the searching and mark as found, if not go on until all possibilities are checked
if(sum(grepl("is not in our miRNA database",tsOut[1:min(100,length(tsOut))]))>0){
if(takeThis < length(newMirnas)){
takeThis <- takeThis + 1
} else {
allChecked <- TRUE
}
} else {
notFound <- FALSE
}
}
}
# If none of the miRNA derivatis was found in the database, return an emtpy result.
if(notFound){
warning(mirna," is not in the targetScan database!\nAlso none of its derivatives",paste(newMirnas,collapse=","),"could be found")
res <- data.frame(Ortholog=NULL,
geneName=NULL,
consSites=NULL,
poorlySites=NULL)
} else {
# Find the rows of interest (Assume it to be in the first 100 rows, if this isn't the case extent the search area)
startRow <- grepl("<th>total</th>",tsOut[1:min(100,length(tsOut))])
if(sum(startRow)!=1) startRow <- grepl("<th>total</th>",tsOut)
if(sum(startRow)!=1) stop("ERROR: No table provided by targetScan.org!")
startRow <- which(startRow==1)
ifelse(is.null(maxOut), maxOut <- length(tsOut)-1, maxOut <- startRow + maxOut - 1)
# Now extract the information and put them into a dataframe
# The first row is a bit different, as it is contained in the header row, all others are then standardized
temp1 <- strsplit(tsOut[startRow],"<td>")
firstEntry <- paste(temp1[[1]][2],"<td>",temp1[[1]][3],sep="")
res <- extractTSinfo(firstEntry)
for(i in (startRow+1):maxOut){
res <- rbind(res,extractTSinfo(tsOut[i]))
}
}
}
res
}
|
#Step 1: "Merges the training and the test sets to create one data set."
# read subject training data
subject_train = read.table("UCI HAR Dataset/train/subject_train.txt", col.names=c("subject_id"))
# assign row number as the values of ID column
subject_train$ID <- as.numeric(rownames(subject_train))
# read training data
X_train = read.table("UCI HAR Dataset/train/X_train.txt")
# assign row number as the values of ID column
X_train$ID <- as.numeric(rownames(X_train))
# read activity training data
y_train = read.table("UCI HAR Dataset/train/y_train.txt", col.names=c("activity_id")) # max = 6
#y_train = merge(y_train, activity_labels)
# assign row number as the values of ID column
y_train$ID <- as.numeric(rownames(y_train))
# merge subject_train and y_train to train
train <- merge(subject_train, y_train, all=TRUE)
# merge train and X_train
train <- merge(train, X_train, all=TRUE)
# read subject training data
subject_test = read.table("UCI HAR Dataset/test/subject_test.txt", col.names=c("subject_id"))
# assign row number as the values of ID column
subject_test$ID <- as.numeric(rownames(subject_test))
# read testing data
X_test = read.table("UCI HAR Dataset/test/X_test.txt")
# assign row number as the values of ID column
X_test$ID <- as.numeric(rownames(X_test))
# read activity testing data
y_test = read.table("UCI HAR Dataset/test/y_test.txt", col.names=c("activity_id")) # max = 6
#y_test = merge(y_test, activity_labels)
# assign row number as the values of ID column
y_test$ID <- as.numeric(rownames(y_test))
# merge subject_test and y_test to train
test <- merge(subject_test, y_test, all=TRUE)
# merge test and X_test
test <- merge(test, X_test, all=TRUE)
#combine train and test
data1 <- rbind(train, test)
#Step 2: "Extracts only the measurements on the mean and standard deviation for each measurement."
features = read.table("UCI HAR Dataset/features.txt", col.names=c("feature_id", "feature_label"),) #561
#Extracts only the measurements on the mean and standard deviation for each measurement.
selected_features <- features[grepl("mean\\(\\)", features$feature_label) | grepl("std\\(\\)", features$feature_label), ]
data2 <- data1[, c(c(1, 2, 3), selected_features$feature_id + 3) ]
#Step 3: "Uses descriptive activity names to name the activities in the data set."
activity_labels = read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activity_id", "activity_label"),) #
data3 = merge(data2, activity_labels)
#Step 4: "Appropriately labels the data set with descriptive activity names."
selected_features$feature_label = gsub("\\(\\)", "", selected_features$feature_label)
selected_features$feature_label = gsub("-", ".", selected_features$feature_label)
for (i in 1:length(selected_features$feature_label)) {
colnames(data3)[i + 3] <- selected_features$feature_label[i]
}
data4 = data3
#Step 5: "Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
drops <- c("ID","activity_label")
data5 <- data4[,!(names(data4) %in% drops)]
aggdata <-aggregate(data5, by=list(subject = data5$subject_id, activity = data5$activity_id), FUN=mean, na.rm=TRUE)
drops <- c("subject","activity")
aggdata <- aggdata[,!(names(aggdata) %in% drops)]
aggdata = merge(aggdata, activity_labels)
write.csv(file="myData.csv", x=aggdata)
|
/run_analysis.R
|
no_license
|
Paritaoza/Getting-and-Cleaning-Data_Project
|
R
| false
| false
| 3,331
|
r
|
#Step 1: "Merges the training and the test sets to create one data set."
# read subject training data
subject_train = read.table("UCI HAR Dataset/train/subject_train.txt", col.names=c("subject_id"))
# assign row number as the values of ID column
subject_train$ID <- as.numeric(rownames(subject_train))
# read training data
X_train = read.table("UCI HAR Dataset/train/X_train.txt")
# assign row number as the values of ID column
X_train$ID <- as.numeric(rownames(X_train))
# read activity training data
y_train = read.table("UCI HAR Dataset/train/y_train.txt", col.names=c("activity_id")) # max = 6
#y_train = merge(y_train, activity_labels)
# assign row number as the values of ID column
y_train$ID <- as.numeric(rownames(y_train))
# merge subject_train and y_train to train
train <- merge(subject_train, y_train, all=TRUE)
# merge train and X_train
train <- merge(train, X_train, all=TRUE)
# read subject training data
subject_test = read.table("UCI HAR Dataset/test/subject_test.txt", col.names=c("subject_id"))
# assign row number as the values of ID column
subject_test$ID <- as.numeric(rownames(subject_test))
# read testing data
X_test = read.table("UCI HAR Dataset/test/X_test.txt")
# assign row number as the values of ID column
X_test$ID <- as.numeric(rownames(X_test))
# read activity testing data
y_test = read.table("UCI HAR Dataset/test/y_test.txt", col.names=c("activity_id")) # max = 6
#y_test = merge(y_test, activity_labels)
# assign row number as the values of ID column
y_test$ID <- as.numeric(rownames(y_test))
# merge subject_test and y_test to train
test <- merge(subject_test, y_test, all=TRUE)
# merge test and X_test
test <- merge(test, X_test, all=TRUE)
#combine train and test
data1 <- rbind(train, test)
#Step 2: "Extracts only the measurements on the mean and standard deviation for each measurement."
features = read.table("UCI HAR Dataset/features.txt", col.names=c("feature_id", "feature_label"),) #561
#Extracts only the measurements on the mean and standard deviation for each measurement.
selected_features <- features[grepl("mean\\(\\)", features$feature_label) | grepl("std\\(\\)", features$feature_label), ]
data2 <- data1[, c(c(1, 2, 3), selected_features$feature_id + 3) ]
#Step 3: "Uses descriptive activity names to name the activities in the data set."
activity_labels = read.table("UCI HAR Dataset/activity_labels.txt", col.names=c("activity_id", "activity_label"),) #
data3 = merge(data2, activity_labels)
#Step 4: "Appropriately labels the data set with descriptive activity names."
selected_features$feature_label = gsub("\\(\\)", "", selected_features$feature_label)
selected_features$feature_label = gsub("-", ".", selected_features$feature_label)
for (i in 1:length(selected_features$feature_label)) {
colnames(data3)[i + 3] <- selected_features$feature_label[i]
}
data4 = data3
#Step 5: "Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
drops <- c("ID","activity_label")
data5 <- data4[,!(names(data4) %in% drops)]
aggdata <-aggregate(data5, by=list(subject = data5$subject_id, activity = data5$activity_id), FUN=mean, na.rm=TRUE)
drops <- c("subject","activity")
aggdata <- aggdata[,!(names(aggdata) %in% drops)]
aggdata = merge(aggdata, activity_labels)
write.csv(file="myData.csv", x=aggdata)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geodimension_complete_relation_by_geography.R
\name{complete_relation_by_geography}
\alias{complete_relation_by_geography}
\alias{complete_relation_by_geography.geodimension}
\title{Complete relation by geography}
\usage{
complete_relation_by_geography(
gd,
lower_level_name = NULL,
upper_level_name = NULL
)
\method{complete_relation_by_geography}{geodimension}(
gd,
lower_level_name = NULL,
upper_level_name = NULL
)
}
\arguments{
\item{gd}{A \code{geodimension} object.}
\item{lower_level_name}{A string, name of the lower level.}
\item{upper_level_name}{A string, name of the upper lever.}
}
\value{
A \code{geodimension} object.
}
\description{
Two levels can be related by attributes or by geography (if the upper level
has polygon-type geometry). Once related, if there are unrelated instances,
you can try to relate those instances using this function, which considers
alternative geographic relationships.
}
\details{
For example, if the lower level has associated point and polygon geometries,
only point geometry is considered to establish the initial relationship.
Polygon geometry is also considered in this function.
It does not necessarily succeed trying to relate the instances.
}
\examples{
library(tidyr)
library(sf)
ui <- gd_us \%>\%
get_unrelated_instances(lower_level_name = "state",
upper_level_name = "division")
gd <- gd_us \%>\%
complete_relation_by_geography(lower_level_name = "state",
upper_level_name = "division")
}
\seealso{
Other level association functions:
\code{\link{add_level}()},
\code{\link{geodimension}()},
\code{\link{get_unrelated_instances}()},
\code{\link{relate_levels}()}
}
\concept{level association functions}
|
/man/complete_relation_by_geography.Rd
|
permissive
|
josesamos/geodimension
|
R
| false
| true
| 1,815
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geodimension_complete_relation_by_geography.R
\name{complete_relation_by_geography}
\alias{complete_relation_by_geography}
\alias{complete_relation_by_geography.geodimension}
\title{Complete relation by geography}
\usage{
complete_relation_by_geography(
gd,
lower_level_name = NULL,
upper_level_name = NULL
)
\method{complete_relation_by_geography}{geodimension}(
gd,
lower_level_name = NULL,
upper_level_name = NULL
)
}
\arguments{
\item{gd}{A \code{geodimension} object.}
\item{lower_level_name}{A string, name of the lower level.}
\item{upper_level_name}{A string, name of the upper lever.}
}
\value{
A \code{geodimension} object.
}
\description{
Two levels can be related by attributes or by geography (if the upper level
has polygon-type geometry). Once related, if there are unrelated instances,
you can try to relate those instances using this function, which considers
alternative geographic relationships.
}
\details{
For example, if the lower level has associated point and polygon geometries,
only point geometry is considered to establish the initial relationship.
Polygon geometry is also considered in this function.
It does not necessarily succeed trying to relate the instances.
}
\examples{
library(tidyr)
library(sf)
ui <- gd_us \%>\%
get_unrelated_instances(lower_level_name = "state",
upper_level_name = "division")
gd <- gd_us \%>\%
complete_relation_by_geography(lower_level_name = "state",
upper_level_name = "division")
}
\seealso{
Other level association functions:
\code{\link{add_level}()},
\code{\link{geodimension}()},
\code{\link{get_unrelated_instances}()},
\code{\link{relate_levels}()}
}
\concept{level association functions}
|
## Exploratory
initTable <- read.table("household_power_consumption.txt",nrows =10,sep = ";",header = TRUE)
tbClasses <- sapply(initTable,class)
## Loading the table
initTable <- read.table("household_power_consumption.txt",nrows =103000,sep = ";",header = TRUE,stringsAsFactors = FALSE)
## Loading the data on
powEconData <- initTable[initTable$Date=="2/2/2007" | initTable=="1/2/2007",]
## Processing the data
dateStrings <- sapply(1:length(powEconData[,1]),function(x) paste(powEconData[x,1],powEconData[x,2]))
dateStrings <- gsub("/","-",dateStrings)
datesPowEc <- strptime(dateStrings,"%d-%m-%Y %H:%M:%S")
powEconData$"DateTime" <-datesPowEc
for (i in 3:(length(powEconData)-1)) {
powEconData[,i] <- as.numeric(powEconData[,i])
}
## Second plot
png("plot2.png")
plot(powEconData$DateTime[!is.na(powEconData$Global_active_power)],powEconData$Global_active_power[!is.na(powEconData$Global_active_power)],
type = "l",xlab = "",ylab = "Global Active Power (kilowatts)")
dev.off()
|
/plot2.R
|
no_license
|
caramirezal/ExData_Plotting1
|
R
| false
| false
| 1,025
|
r
|
## Exploratory
initTable <- read.table("household_power_consumption.txt",nrows =10,sep = ";",header = TRUE)
tbClasses <- sapply(initTable,class)
## Loading the table
initTable <- read.table("household_power_consumption.txt",nrows =103000,sep = ";",header = TRUE,stringsAsFactors = FALSE)
## Loading the data on
powEconData <- initTable[initTable$Date=="2/2/2007" | initTable=="1/2/2007",]
## Processing the data
dateStrings <- sapply(1:length(powEconData[,1]),function(x) paste(powEconData[x,1],powEconData[x,2]))
dateStrings <- gsub("/","-",dateStrings)
datesPowEc <- strptime(dateStrings,"%d-%m-%Y %H:%M:%S")
powEconData$"DateTime" <-datesPowEc
for (i in 3:(length(powEconData)-1)) {
powEconData[,i] <- as.numeric(powEconData[,i])
}
## Second plot
png("plot2.png")
plot(powEconData$DateTime[!is.na(powEconData$Global_active_power)],powEconData$Global_active_power[!is.na(powEconData$Global_active_power)],
type = "l",xlab = "",ylab = "Global Active Power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tt_afun_utils.R
\name{make_afun}
\alias{make_afun}
\title{Create custom analysis function wrapping existing function}
\usage{
make_afun(
fun,
.stats = NULL,
.formats = NULL,
.labels = NULL,
.indent_mods = NULL,
.ungroup_stats = NULL,
...,
.null_ref_cells = ".in_ref_col" \%in\% names(formals(fun))
)
}
\arguments{
\item{fun}{function. The function to be wrapped in a new customized analysis fun. Should return named list.}
\item{.stats}{character. Names of elements to keep from \code{fun}'s full output.}
\item{.formats}{ANY. vector/list of formats to override any defaults applied by \code{fun}.}
\item{.labels}{character. Vector of labels to override defaults returned by \code{fun}}
\item{.indent_mods}{integer. Named vector of indent modifiers for the generated rows.}
\item{.ungroup_stats}{character. Vector of names, which must match elements of \code{.stats}}
\item{...}{dots. Additional arguments to \code{fun} which effectively become new defaults. These can still be
overriden by extra args within a split.}
\item{.null_ref_cells}{logical(1). Should cells for the reference column be NULL-ed
by the returned analysis function. Defaults to \code{TRUE} if \code{fun} accepts \code{.in_ref_col} as a formal argument. Note this argument occurs after \code{...} so it must be \emph{fully} specified by name when set.}
}
\value{
A function suitable for use in \code{\link{analyze}} with element selection, reformatting, and relabeling
performed automatically.
}
\description{
Create custom analysis function wrapping existing function
}
\note{
setting \code{.ungroup_stats} to non-null changes the \emph{structure} of the value(s) returned by
\code{fun}, rather than just labeling (\code{.labels}), formatting (\code{.formats}), and selecting amongst
(\code{.stats}) them. This means that subsequent \code{make_afun} calls to customize the output further
both can and must operate on the new structure, \emph{NOT} the original structure returned by \code{fun}.
See the final pair of examples below.
}
\examples{
s_summary <- function(x) {
stopifnot(is.numeric(x))
list(
n = sum(!is.na(x)),
mean_sd = c(mean = mean(x), sd = sd(x)),
min_max = range(x)
)
}
s_summary(iris$Sepal.Length)
a_summary <- make_afun(
fun = s_summary,
.formats = c(n = "xx", mean_sd = "xx.xx (xx.xx)", min_max = "xx.xx - xx.xx"),
.labels = c(n = "n", mean_sd = "Mean (sd)", min_max = "min - max")
)
a_summary(x = iris$Sepal.Length)
a_summary2 <- make_afun(a_summary, .stats = c("n", "mean_sd"))
a_summary2(x = iris$Sepal.Length)
a_summary3 <- make_afun(a_summary, .formats = c(mean_sd = "(xx.xxx, xx.xxx)"))
s_foo <- function(df, .N_col, a = 1, b = 2) {
list(
nrow_df = nrow(df),
.N_col = .N_col,
a = a,
b = b
)
}
s_foo(iris, 40)
a_foo <- make_afun(s_foo, b = 4,
.formats = c(nrow_df = "xx.xx", ".N_col" = "xx.", a = "xx", b = "xx.x"),
.labels = c(nrow_df = "Nrow df", ".N_col" = "n in cols", a = "a value", b = "b value"),
.indent_mods = c(nrow_df = 2L, a = 1L)
)
a_foo(iris, .N_col = 40)
a_foo2 <- make_afun(a_foo, .labels = c(nrow_df = "Number of Rows"))
a_foo(iris, .N_col = 40)
#grouping and further customization
s_grp <- function(df, .N_col, a = 1, b = 2) {
list(
nrow_df = nrow(df),
.N_col = .N_col,
letters = list(a = a,
b = b)
)
}
a_grp <- make_afun(s_grp, b = 3, .labels = c(nrow_df = "row count", .N_col = "count in column"),
.formats = c(nrow_df = "xx.", .N_col = "xx."),
.indent_mod = c(letters = 1L),
.ungroup_stats ="letters")
a_grp(iris, 40)
a_aftergrp <- make_afun(a_grp, .stats = c("nrow_df", "b"), .formats = c(b = "xx."))
a_aftergrp(iris, 40)
s_ref <- function(x, .in_ref_col, .ref_group) {
list(
mean_diff = mean(x) - mean(.ref_group)
)
}
a_ref <- make_afun(s_ref, .labels = c( mean_diff = "Mean Difference from Ref"))
a_ref(iris$Sepal.Length, .in_ref_col = TRUE, 1:10)
a_ref(iris$Sepal.Length, .in_ref_col = FALSE, 1:10)
}
\seealso{
\code{\link[=analyze]{analyze()}}
}
|
/man/make_afun.Rd
|
permissive
|
jcheng5/rtables
|
R
| false
| true
| 4,169
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tt_afun_utils.R
\name{make_afun}
\alias{make_afun}
\title{Create custom analysis function wrapping existing function}
\usage{
make_afun(
fun,
.stats = NULL,
.formats = NULL,
.labels = NULL,
.indent_mods = NULL,
.ungroup_stats = NULL,
...,
.null_ref_cells = ".in_ref_col" \%in\% names(formals(fun))
)
}
\arguments{
\item{fun}{function. The function to be wrapped in a new customized analysis fun. Should return named list.}
\item{.stats}{character. Names of elements to keep from \code{fun}'s full output.}
\item{.formats}{ANY. vector/list of formats to override any defaults applied by \code{fun}.}
\item{.labels}{character. Vector of labels to override defaults returned by \code{fun}}
\item{.indent_mods}{integer. Named vector of indent modifiers for the generated rows.}
\item{.ungroup_stats}{character. Vector of names, which must match elements of \code{.stats}}
\item{...}{dots. Additional arguments to \code{fun} which effectively become new defaults. These can still be
overriden by extra args within a split.}
\item{.null_ref_cells}{logical(1). Should cells for the reference column be NULL-ed
by the returned analysis function. Defaults to \code{TRUE} if \code{fun} accepts \code{.in_ref_col} as a formal argument. Note this argument occurs after \code{...} so it must be \emph{fully} specified by name when set.}
}
\value{
A function suitable for use in \code{\link{analyze}} with element selection, reformatting, and relabeling
performed automatically.
}
\description{
Create custom analysis function wrapping existing function
}
\note{
setting \code{.ungroup_stats} to non-null changes the \emph{structure} of the value(s) returned by
\code{fun}, rather than just labeling (\code{.labels}), formatting (\code{.formats}), and selecting amongst
(\code{.stats}) them. This means that subsequent \code{make_afun} calls to customize the output further
both can and must operate on the new structure, \emph{NOT} the original structure returned by \code{fun}.
See the final pair of examples below.
}
\examples{
s_summary <- function(x) {
stopifnot(is.numeric(x))
list(
n = sum(!is.na(x)),
mean_sd = c(mean = mean(x), sd = sd(x)),
min_max = range(x)
)
}
s_summary(iris$Sepal.Length)
a_summary <- make_afun(
fun = s_summary,
.formats = c(n = "xx", mean_sd = "xx.xx (xx.xx)", min_max = "xx.xx - xx.xx"),
.labels = c(n = "n", mean_sd = "Mean (sd)", min_max = "min - max")
)
a_summary(x = iris$Sepal.Length)
a_summary2 <- make_afun(a_summary, .stats = c("n", "mean_sd"))
a_summary2(x = iris$Sepal.Length)
a_summary3 <- make_afun(a_summary, .formats = c(mean_sd = "(xx.xxx, xx.xxx)"))
s_foo <- function(df, .N_col, a = 1, b = 2) {
list(
nrow_df = nrow(df),
.N_col = .N_col,
a = a,
b = b
)
}
s_foo(iris, 40)
a_foo <- make_afun(s_foo, b = 4,
.formats = c(nrow_df = "xx.xx", ".N_col" = "xx.", a = "xx", b = "xx.x"),
.labels = c(nrow_df = "Nrow df", ".N_col" = "n in cols", a = "a value", b = "b value"),
.indent_mods = c(nrow_df = 2L, a = 1L)
)
a_foo(iris, .N_col = 40)
a_foo2 <- make_afun(a_foo, .labels = c(nrow_df = "Number of Rows"))
a_foo(iris, .N_col = 40)
#grouping and further customization
s_grp <- function(df, .N_col, a = 1, b = 2) {
list(
nrow_df = nrow(df),
.N_col = .N_col,
letters = list(a = a,
b = b)
)
}
a_grp <- make_afun(s_grp, b = 3, .labels = c(nrow_df = "row count", .N_col = "count in column"),
.formats = c(nrow_df = "xx.", .N_col = "xx."),
.indent_mod = c(letters = 1L),
.ungroup_stats ="letters")
a_grp(iris, 40)
a_aftergrp <- make_afun(a_grp, .stats = c("nrow_df", "b"), .formats = c(b = "xx."))
a_aftergrp(iris, 40)
s_ref <- function(x, .in_ref_col, .ref_group) {
list(
mean_diff = mean(x) - mean(.ref_group)
)
}
a_ref <- make_afun(s_ref, .labels = c( mean_diff = "Mean Difference from Ref"))
a_ref(iris$Sepal.Length, .in_ref_col = TRUE, 1:10)
a_ref(iris$Sepal.Length, .in_ref_col = FALSE, 1:10)
}
\seealso{
\code{\link[=analyze]{analyze()}}
}
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqWcmUndoUndoConfigInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#' @field bundle_location
#' @field service_location
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqWcmUndoUndoConfigInfo <- R6::R6Class(
'ComDayCqWcmUndoUndoConfigInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
`bundle_location` = NULL,
`service_location` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`, `bundle_location`, `service_location`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
if (!missing(`bundle_location`)) {
stopifnot(is.character(`bundle_location`), length(`bundle_location`) == 1)
self$`bundle_location` <- `bundle_location`
}
if (!missing(`service_location`)) {
stopifnot(is.character(`service_location`), length(`service_location`) == 1)
self$`service_location` <- `service_location`
}
},
toJSON = function() {
ComDayCqWcmUndoUndoConfigInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['properties']] <- self$`properties`$toJSON()
}
if (!is.null(self$`bundle_location`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['bundle_location']] <- self$`bundle_location`
}
if (!is.null(self$`service_location`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['service_location']] <- self$`service_location`
}
ComDayCqWcmUndoUndoConfigInfoObject
},
fromJSON = function(ComDayCqWcmUndoUndoConfigInfoJson) {
ComDayCqWcmUndoUndoConfigInfoObject <- jsonlite::fromJSON(ComDayCqWcmUndoUndoConfigInfoJson)
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`pid`)) {
self$`pid` <- ComDayCqWcmUndoUndoConfigInfoObject$`pid`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`title`)) {
self$`title` <- ComDayCqWcmUndoUndoConfigInfoObject$`title`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`description`)) {
self$`description` <- ComDayCqWcmUndoUndoConfigInfoObject$`description`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`properties`)) {
propertiesObject <- ComDayCqWcmUndoUndoConfigProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmUndoUndoConfigInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`)) {
self$`bundle_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`service_location`)) {
self$`service_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`service_location`
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s,
"bundle_location": %s,
"service_location": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON(),
self$`bundle_location`,
self$`service_location`
)
},
fromJSONString = function(ComDayCqWcmUndoUndoConfigInfoJson) {
ComDayCqWcmUndoUndoConfigInfoObject <- jsonlite::fromJSON(ComDayCqWcmUndoUndoConfigInfoJson)
self$`pid` <- ComDayCqWcmUndoUndoConfigInfoObject$`pid`
self$`title` <- ComDayCqWcmUndoUndoConfigInfoObject$`title`
self$`description` <- ComDayCqWcmUndoUndoConfigInfoObject$`description`
ComDayCqWcmUndoUndoConfigPropertiesObject <- ComDayCqWcmUndoUndoConfigProperties$new()
self$`properties` <- ComDayCqWcmUndoUndoConfigPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmUndoUndoConfigInfoObject$properties, auto_unbox = TRUE))
self$`bundle_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`
self$`service_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`service_location`
}
)
)
|
/clients/r/generated/R/ComDayCqWcmUndoUndoConfigInfo.r
|
permissive
|
shinesolutions/swagger-aem-osgi
|
R
| false
| false
| 5,261
|
r
|
# Adobe Experience Manager OSGI config (AEM) API
#
# Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API
#
# OpenAPI spec version: 1.0.0-pre.0
# Contact: opensource@shinesolutions.com
# Generated by: https://openapi-generator.tech
#' ComDayCqWcmUndoUndoConfigInfo Class
#'
#' @field pid
#' @field title
#' @field description
#' @field properties
#' @field bundle_location
#' @field service_location
#'
#' @importFrom R6 R6Class
#' @importFrom jsonlite fromJSON toJSON
#' @export
ComDayCqWcmUndoUndoConfigInfo <- R6::R6Class(
'ComDayCqWcmUndoUndoConfigInfo',
public = list(
`pid` = NULL,
`title` = NULL,
`description` = NULL,
`properties` = NULL,
`bundle_location` = NULL,
`service_location` = NULL,
initialize = function(`pid`, `title`, `description`, `properties`, `bundle_location`, `service_location`){
if (!missing(`pid`)) {
stopifnot(is.character(`pid`), length(`pid`) == 1)
self$`pid` <- `pid`
}
if (!missing(`title`)) {
stopifnot(is.character(`title`), length(`title`) == 1)
self$`title` <- `title`
}
if (!missing(`description`)) {
stopifnot(is.character(`description`), length(`description`) == 1)
self$`description` <- `description`
}
if (!missing(`properties`)) {
stopifnot(R6::is.R6(`properties`))
self$`properties` <- `properties`
}
if (!missing(`bundle_location`)) {
stopifnot(is.character(`bundle_location`), length(`bundle_location`) == 1)
self$`bundle_location` <- `bundle_location`
}
if (!missing(`service_location`)) {
stopifnot(is.character(`service_location`), length(`service_location`) == 1)
self$`service_location` <- `service_location`
}
},
toJSON = function() {
ComDayCqWcmUndoUndoConfigInfoObject <- list()
if (!is.null(self$`pid`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['pid']] <- self$`pid`
}
if (!is.null(self$`title`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['title']] <- self$`title`
}
if (!is.null(self$`description`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['description']] <- self$`description`
}
if (!is.null(self$`properties`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['properties']] <- self$`properties`$toJSON()
}
if (!is.null(self$`bundle_location`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['bundle_location']] <- self$`bundle_location`
}
if (!is.null(self$`service_location`)) {
ComDayCqWcmUndoUndoConfigInfoObject[['service_location']] <- self$`service_location`
}
ComDayCqWcmUndoUndoConfigInfoObject
},
fromJSON = function(ComDayCqWcmUndoUndoConfigInfoJson) {
ComDayCqWcmUndoUndoConfigInfoObject <- jsonlite::fromJSON(ComDayCqWcmUndoUndoConfigInfoJson)
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`pid`)) {
self$`pid` <- ComDayCqWcmUndoUndoConfigInfoObject$`pid`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`title`)) {
self$`title` <- ComDayCqWcmUndoUndoConfigInfoObject$`title`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`description`)) {
self$`description` <- ComDayCqWcmUndoUndoConfigInfoObject$`description`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`properties`)) {
propertiesObject <- ComDayCqWcmUndoUndoConfigProperties$new()
propertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmUndoUndoConfigInfoObject$properties, auto_unbox = TRUE))
self$`properties` <- propertiesObject
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`)) {
self$`bundle_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`
}
if (!is.null(ComDayCqWcmUndoUndoConfigInfoObject$`service_location`)) {
self$`service_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`service_location`
}
},
toJSONString = function() {
sprintf(
'{
"pid": %s,
"title": %s,
"description": %s,
"properties": %s,
"bundle_location": %s,
"service_location": %s
}',
self$`pid`,
self$`title`,
self$`description`,
self$`properties`$toJSON(),
self$`bundle_location`,
self$`service_location`
)
},
fromJSONString = function(ComDayCqWcmUndoUndoConfigInfoJson) {
ComDayCqWcmUndoUndoConfigInfoObject <- jsonlite::fromJSON(ComDayCqWcmUndoUndoConfigInfoJson)
self$`pid` <- ComDayCqWcmUndoUndoConfigInfoObject$`pid`
self$`title` <- ComDayCqWcmUndoUndoConfigInfoObject$`title`
self$`description` <- ComDayCqWcmUndoUndoConfigInfoObject$`description`
ComDayCqWcmUndoUndoConfigPropertiesObject <- ComDayCqWcmUndoUndoConfigProperties$new()
self$`properties` <- ComDayCqWcmUndoUndoConfigPropertiesObject$fromJSON(jsonlite::toJSON(ComDayCqWcmUndoUndoConfigInfoObject$properties, auto_unbox = TRUE))
self$`bundle_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`bundle_location`
self$`service_location` <- ComDayCqWcmUndoUndoConfigInfoObject$`service_location`
}
)
)
|
/Normalidad shapiro wilk.R
|
no_license
|
JonathanF5/Prueba-shapiro-wilk-en-r
|
R
| false
| false
| 1,551
|
r
| ||
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Generate test fixtures.
#'
#' @examples
#' main();
main <- function() {
#' Get the script filepath.
#'
#' @return The absolute path of this script
#'
#' @examples
#' filepath <- get_script_path();
get_script_path <- function() {
args <- commandArgs( trailingOnly = FALSE );
needle <- "--file=";
match <- grep( needle, args );
if ( length( match ) > 0 ) {
# Rscript:
filepath <- sub( needle, "", args[match] );
} else {
ls_vars <- ls( sys.frames()[[1]] )
if ( "fileName" %in% ls_vars ) {
# Source'd via RStudio:
filepath <- sys.frames()[[1]]$fileName; # nolint
} else {
# Source'd via R console:
filepath <- sys.frames()[[1]]$ofile;
}
}
return( normalizePath( filepath ) );
}
#' Convert a data structure to JSON.
#'
#' @param x A data structure to convert
#' @return JSON blob
#'
#' @examples
#' x <- seq( -6.5, 25, 0.5 );
#' json <- to_json( x );
to_json <- function( x ) {
return( jsonlite::toJSON( x, digits = 16, auto_unbox = TRUE ) );
}
#' Generate an output absolute filepath based on the script directory.
#'
#' @param name An output filename
#' @return An absolute filepath
#'
#' @examples
#' filepath <- get_filepath( "data.json" );
get_filepath <- function( name ) {
return( paste( source_dir, "/", name, sep = "" ) );
}
# Get the directory of this script:
source_dir <- dirname( get_script_path() );
# Generate test fixture data:
rho <- 0.5;
x <- rnorm( 200 );
y <- rnorm( 200, 0.0, sqrt( 1.0 - rho*rho ) ) + rho*x;
out <- cor.test( x, y, method = "pearson" );
# Convert fixture data to JSON:
twosided <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
twosided <- to_json( twosided );
# Write the data to file...
filepath <- get_filepath( "twosided.json" );
write( twosided, filepath );
# Generate test fixture data:
x <- rnorm( 200 );
y <- rnorm( 200 ) - 0.5*x;
out <- cor.test( x, y, method = "pearson", alternative = "less" );
less <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
less <- to_json( less );
# Write the data to file...
filepath <- get_filepath( "less.json" );
write( less, filepath );
# Generate test fixture data:
x <- rnorm( 200 );
y <- rnorm( 200 ) - 0.1*x;
out <- cor.test( x, y, method = "pearson", alternative = "greater" );
greater <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
greater <- to_json( greater );
# Write the data to file...
filepath <- get_filepath( "greater.json" );
write( greater, filepath );
}
main();
|
/lib/node_modules/@stdlib/stats/pcorrtest/test/fixtures/r/runner.R
|
permissive
|
doc22940/stdlib
|
R
| false
| false
| 3,342
|
r
|
#!/usr/bin/env Rscript
#
# @license Apache-2.0
#
# Copyright (c) 2018 The Stdlib Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Set the precision to 16 digits:
options( digits = 16 );
#' Generate test fixtures.
#'
#' @examples
#' main();
main <- function() {
#' Get the script filepath.
#'
#' @return The absolute path of this script
#'
#' @examples
#' filepath <- get_script_path();
get_script_path <- function() {
args <- commandArgs( trailingOnly = FALSE );
needle <- "--file=";
match <- grep( needle, args );
if ( length( match ) > 0 ) {
# Rscript:
filepath <- sub( needle, "", args[match] );
} else {
ls_vars <- ls( sys.frames()[[1]] )
if ( "fileName" %in% ls_vars ) {
# Source'd via RStudio:
filepath <- sys.frames()[[1]]$fileName; # nolint
} else {
# Source'd via R console:
filepath <- sys.frames()[[1]]$ofile;
}
}
return( normalizePath( filepath ) );
}
#' Convert a data structure to JSON.
#'
#' @param x A data structure to convert
#' @return JSON blob
#'
#' @examples
#' x <- seq( -6.5, 25, 0.5 );
#' json <- to_json( x );
to_json <- function( x ) {
return( jsonlite::toJSON( x, digits = 16, auto_unbox = TRUE ) );
}
#' Generate an output absolute filepath based on the script directory.
#'
#' @param name An output filename
#' @return An absolute filepath
#'
#' @examples
#' filepath <- get_filepath( "data.json" );
get_filepath <- function( name ) {
return( paste( source_dir, "/", name, sep = "" ) );
}
# Get the directory of this script:
source_dir <- dirname( get_script_path() );
# Generate test fixture data:
rho <- 0.5;
x <- rnorm( 200 );
y <- rnorm( 200, 0.0, sqrt( 1.0 - rho*rho ) ) + rho*x;
out <- cor.test( x, y, method = "pearson" );
# Convert fixture data to JSON:
twosided <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
twosided <- to_json( twosided );
# Write the data to file...
filepath <- get_filepath( "twosided.json" );
write( twosided, filepath );
# Generate test fixture data:
x <- rnorm( 200 );
y <- rnorm( 200 ) - 0.5*x;
out <- cor.test( x, y, method = "pearson", alternative = "less" );
less <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
less <- to_json( less );
# Write the data to file...
filepath <- get_filepath( "less.json" );
write( less, filepath );
# Generate test fixture data:
x <- rnorm( 200 );
y <- rnorm( 200 ) - 0.1*x;
out <- cor.test( x, y, method = "pearson", alternative = "greater" );
greater <- list(
x = x,
y = y,
statistic = out$statistic,
pValue = out$p.value,
ci = out$conf.int
);
greater <- to_json( greater );
# Write the data to file...
filepath <- get_filepath( "greater.json" );
write( greater, filepath );
}
main();
|
setMethod("CenteredTTest", "oos.forecast",
function(object,...)
mapply(function(e, mu)
t.test(e^2, mu = mu, ...)$statistic,
e = ForecastErrors(object),
mu = expected.loss.test(object),...))
## returns the p-value for a t-test that the difference in the two
## models' mse is equal to its known value. Note that the default is
## a two-sided test; that can be changed as usual for the t-test.
setMethod("CenteredTTest", "oos.pair",
function(object,...)
mapply(function(e1, e2, mu)
t.test(e1^2 - e2^2, mu = mu, ...)$statistic,
e1 = ForecastErrors(model.null(object)),
e2 = ForecastErrors(model.alt(object)),
mu = expected.loss.test(object),...))
|
/package/fwPackage_1.0_source/R/CenteredTTest.R
|
no_license
|
grayclhn/oos-overfit
|
R
| false
| false
| 805
|
r
|
setMethod("CenteredTTest", "oos.forecast",
function(object,...)
mapply(function(e, mu)
t.test(e^2, mu = mu, ...)$statistic,
e = ForecastErrors(object),
mu = expected.loss.test(object),...))
## returns the p-value for a t-test that the difference in the two
## models' mse is equal to its known value. Note that the default is
## a two-sided test; that can be changed as usual for the t-test.
setMethod("CenteredTTest", "oos.pair",
function(object,...)
mapply(function(e1, e2, mu)
t.test(e1^2 - e2^2, mu = mu, ...)$statistic,
e1 = ForecastErrors(model.null(object)),
e2 = ForecastErrors(model.alt(object)),
mu = expected.loss.test(object),...))
|
\name{TopicModelcontrol-class}
\docType{class}
\alias{OPTcontrol-class}
\alias{TopicModelcontrol-class}
\alias{CTM_VEMcontrol-class}
\alias{LDAcontrol-class}
\alias{LDA_VEMcontrol-class}
\alias{LDA_Gibbscontrol-class}
\alias{coerce,NULL,LDA_VEMcontrol-method}
\alias{coerce,list,LDA_VEMcontrol-method}
\alias{coerce,NULL,LDcontrol-method}
\alias{coerce,list,LDA_VEMcontrol-method}
\alias{coerce,NULL,CTM_VEMcontrol-method}
\alias{coerce,list,CTM_VEMcontrol-method}
\alias{coerce,NULL,OPTcontrol-method}
\alias{coerce,list,OPTcontrol-method}
\title{Different classes for controlling the estimation of topic models}
\description{
Classes to control the estimation of topic models which are inheriting
from the virtual base class \code{"TopicModelcontrol"}.
}
\section{Objects from the Class}{
Objects can be created from named lists.
}
\section{Slots}{
Class \code{"TopicModelcontrol"} contains
\describe{
\item{\code{seed}:}{Object of class \code{"integer"}; used to set
the seed in the external code for VEM estimation and to call
\code{set.seed} for Gibbs sampling. For Gibbs sampling it can also
be set to \code{NA} (default) to avoid changing the seed of the
random number generator in the model fitting call.}
\item{\code{verbose}:}{Object of class \code{"integer"}. If a
positive integer, then the progress is reported every
\code{verbose} iterations. If 0 (default), no output is generated
during model fitting.}
\item{\code{save}:}{Object of class \code{"integer"}. If a positive
integer the estimated model is saved all \code{verbose}
iterations. If 0 (default), no output is generated during model
fitting.}
\item{\code{prefix}:}{Object of class \code{"character"}; path
indicating where to save the intermediate results.}
\item{\code{nstart}:}{Object of class \code{"integer"}. Number of
repeated random starts.}
\item{\code{best}:}{Object of class \code{"logical"}; if \code{TRUE}
only the model with the maximum (posterior) likelihood is returned,
by default equals \code{TRUE}.}
\item{\code{keep}:}{Object of class \code{"integer"}; if a positive
integer, the log-likelihood is saved every \code{keep} iterations.}
\item{\code{estimate.beta}:}{Object of class \code{"logical"};
controls if beta, the term distribution of the topics, is fixed,
by default equals \code{TRUE}.}
}
Class \code{"VEMcontrol"} contains
\describe{
\item{\code{var}:}{Object of class \code{"OPTcontrol"}; controls the
variational inference for a single document, by default
\code{iter.max} equals 500 and \code{tol} 10^-6.}
\item{\code{em}:}{Object of class \code{"OPTcontrol"}; controls the
variational EM algorithm, by default \code{iter.max} equals 1000
and \code{tol} 10^-4.}
\item{\code{initialize}:}{Object of class \code{"character"}; one of
\code{"random"}, \code{"seeded"} and \code{"model"}, by default
equals \code{"random"}.}
}
Class \code{"LDAcontrol"} extends class \code{"TopicModelcontrol"} and
has the additional slots
\describe{
\item{\code{alpha}:}{Object of class \code{"numeric"}; initial
value for alpha.}
}
Class \code{"LDA_VEMcontrol"} extends classes
\code{"LDAcontrol"} and \code{"VEMcontrol"} and has the
additional slots
\describe{
\item{\code{estimate.alpha}:}{Object of class \code{"logical"};
indicates if the parameter alpha is fixed a-priori or estimated, by
default equals \code{TRUE}.}
}
Class \code{"LDA_Gibbscontrol"} extends classes
\code{"LDAcontrol"} and has the additional slots
\describe{
\item{\code{delta}:}{Object of class \code{"numeric"}; initial value
for delta, by default equals 0.1.}
\item{\code{iter}:}{Object of class \code{"integer"}; number of
Gibbs iterations, by default equals 2000.}
\item{\code{thin}:}{Object of class \code{"integer"}; number of
omitted in-between Gibbs iterations, by default equals \code{iter}.}
\item{\code{burnin}:}{Object of class \code{"integer"}; number of
omitted Gibbs iterations at beginning, by default equals 0.}
\item{\code{initialize}:}{Object of class \code{"character"};
one of \code{"random"}, \code{"beta"} and \code{"z"}, by
default equals \code{"random"}.}
}
Class \code{"CTM_VEMcontrol"} extends classes
\code{"TopicModelcontrol"} and \code{"VEMcontrol"} and has the
additional slots
\describe{
\item{\code{cg}:}{Object of class \code{"OPTcontrol"}; controls the
conjugate gradient iterations in fitting the variational mean and
variance per document, by default \code{iter.max} equals 500 and
\code{tol} 10^-5.}
}
Class \code{"OPTcontrol"} contains
\describe{
\item{\code{iter.max}:}{Object of class \code{"integer"}; maximum
number of iterations.}
\item{\code{tol}:}{Object of class \code{"numeric"}; tolerance for
convergence check.}
}
}
\author{Bettina Gruen}
\keyword{classes}
|
/man/tmcontrol-class.Rd
|
no_license
|
cran/topicmodels
|
R
| false
| false
| 5,051
|
rd
|
\name{TopicModelcontrol-class}
\docType{class}
\alias{OPTcontrol-class}
\alias{TopicModelcontrol-class}
\alias{CTM_VEMcontrol-class}
\alias{LDAcontrol-class}
\alias{LDA_VEMcontrol-class}
\alias{LDA_Gibbscontrol-class}
\alias{coerce,NULL,LDA_VEMcontrol-method}
\alias{coerce,list,LDA_VEMcontrol-method}
\alias{coerce,NULL,LDcontrol-method}
\alias{coerce,list,LDA_VEMcontrol-method}
\alias{coerce,NULL,CTM_VEMcontrol-method}
\alias{coerce,list,CTM_VEMcontrol-method}
\alias{coerce,NULL,OPTcontrol-method}
\alias{coerce,list,OPTcontrol-method}
\title{Different classes for controlling the estimation of topic models}
\description{
Classes to control the estimation of topic models which are inheriting
from the virtual base class \code{"TopicModelcontrol"}.
}
\section{Objects from the Class}{
Objects can be created from named lists.
}
\section{Slots}{
Class \code{"TopicModelcontrol"} contains
\describe{
\item{\code{seed}:}{Object of class \code{"integer"}; used to set
the seed in the external code for VEM estimation and to call
\code{set.seed} for Gibbs sampling. For Gibbs sampling it can also
be set to \code{NA} (default) to avoid changing the seed of the
random number generator in the model fitting call.}
\item{\code{verbose}:}{Object of class \code{"integer"}. If a
positive integer, then the progress is reported every
\code{verbose} iterations. If 0 (default), no output is generated
during model fitting.}
\item{\code{save}:}{Object of class \code{"integer"}. If a positive
integer the estimated model is saved all \code{verbose}
iterations. If 0 (default), no output is generated during model
fitting.}
\item{\code{prefix}:}{Object of class \code{"character"}; path
indicating where to save the intermediate results.}
\item{\code{nstart}:}{Object of class \code{"integer"}. Number of
repeated random starts.}
\item{\code{best}:}{Object of class \code{"logical"}; if \code{TRUE}
only the model with the maximum (posterior) likelihood is returned,
by default equals \code{TRUE}.}
\item{\code{keep}:}{Object of class \code{"integer"}; if a positive
integer, the log-likelihood is saved every \code{keep} iterations.}
\item{\code{estimate.beta}:}{Object of class \code{"logical"};
controls if beta, the term distribution of the topics, is fixed,
by default equals \code{TRUE}.}
}
Class \code{"VEMcontrol"} contains
\describe{
\item{\code{var}:}{Object of class \code{"OPTcontrol"}; controls the
variational inference for a single document, by default
\code{iter.max} equals 500 and \code{tol} 10^-6.}
\item{\code{em}:}{Object of class \code{"OPTcontrol"}; controls the
variational EM algorithm, by default \code{iter.max} equals 1000
and \code{tol} 10^-4.}
\item{\code{initialize}:}{Object of class \code{"character"}; one of
\code{"random"}, \code{"seeded"} and \code{"model"}, by default
equals \code{"random"}.}
}
Class \code{"LDAcontrol"} extends class \code{"TopicModelcontrol"} and
has the additional slots
\describe{
\item{\code{alpha}:}{Object of class \code{"numeric"}; initial
value for alpha.}
}
Class \code{"LDA_VEMcontrol"} extends classes
\code{"LDAcontrol"} and \code{"VEMcontrol"} and has the
additional slots
\describe{
\item{\code{estimate.alpha}:}{Object of class \code{"logical"};
indicates if the parameter alpha is fixed a-priori or estimated, by
default equals \code{TRUE}.}
}
Class \code{"LDA_Gibbscontrol"} extends classes
\code{"LDAcontrol"} and has the additional slots
\describe{
\item{\code{delta}:}{Object of class \code{"numeric"}; initial value
for delta, by default equals 0.1.}
\item{\code{iter}:}{Object of class \code{"integer"}; number of
Gibbs iterations, by default equals 2000.}
\item{\code{thin}:}{Object of class \code{"integer"}; number of
omitted in-between Gibbs iterations, by default equals \code{iter}.}
\item{\code{burnin}:}{Object of class \code{"integer"}; number of
omitted Gibbs iterations at beginning, by default equals 0.}
\item{\code{initialize}:}{Object of class \code{"character"};
one of \code{"random"}, \code{"beta"} and \code{"z"}, by
default equals \code{"random"}.}
}
Class \code{"CTM_VEMcontrol"} extends classes
\code{"TopicModelcontrol"} and \code{"VEMcontrol"} and has the
additional slots
\describe{
\item{\code{cg}:}{Object of class \code{"OPTcontrol"}; controls the
conjugate gradient iterations in fitting the variational mean and
variance per document, by default \code{iter.max} equals 500 and
\code{tol} 10^-5.}
}
Class \code{"OPTcontrol"} contains
\describe{
\item{\code{iter.max}:}{Object of class \code{"integer"}; maximum
number of iterations.}
\item{\code{tol}:}{Object of class \code{"numeric"}; tolerance for
convergence check.}
}
}
\author{Bettina Gruen}
\keyword{classes}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitModels.R
\name{fitOneModel}
\alias{fitOneModel}
\title{fitOneModel
Train a single subtype model.}
\usage{
fitOneModel(Xbin, Ybin, params = list(max_depth = 2, eta = 0.5, nrounds =
33, nthread = 5))
}
\arguments{
\item{Xbin}{Gene expression matrix.}
\item{Ybin}{Phenotype vector.}
\item{params}{Params for xgboost.}
}
\value{
A single xgboost classifier.
}
\description{
fitOneModel
Train a single subtype model.
}
\examples{
modC1 <- fitOneModel(ebppGeneExpr, phenotype)
}
|
/man/fitOneModel.Rd
|
permissive
|
biodan25/ImmuneSubtypeClassifier
|
R
| false
| true
| 587
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fitModels.R
\name{fitOneModel}
\alias{fitOneModel}
\title{fitOneModel
Train a single subtype model.}
\usage{
fitOneModel(Xbin, Ybin, params = list(max_depth = 2, eta = 0.5, nrounds =
33, nthread = 5))
}
\arguments{
\item{Xbin}{Gene expression matrix.}
\item{Ybin}{Phenotype vector.}
\item{params}{Params for xgboost.}
}
\value{
A single xgboost classifier.
}
\description{
fitOneModel
Train a single subtype model.
}
\examples{
modC1 <- fitOneModel(ebppGeneExpr, phenotype)
}
|
# Infectious Disease Laboratory Statistical Data Analysis Script
# Written by: Christopher Sinclair - Research Programmer
# Supervisors: Dr. William Mattingly and Stephen Furmanek
# Install any necessary packages
install.packages('tidyverse')
install.packages('forecast')
library(tidyverse)
library(forecast)
# Set any options needed for plots or data wrangling
options(scipen = 5) # turns off scientitic notation for large numbers
# Generic query function for querying data from local MySQL
query <- function(q) { # function to query mysql
library(RMySQL) # use the RMySQL package
conn <- dbConnect(MySQL(), user="root", password="****", host="127.0.0.1") # mysql connection string
result<- dbGetQuery(conn, q) # run query
lapply(dbListConnections(dbDriver(drv = "MySQL")), dbDisconnect) # disconnects all mysql connections
result # return result
}
####### Amount billed by top ten tests over five years #######
total_charges <- query("select sum(total_charge) from labmgmt_db.invoices") # query for total charges over the five year period
all_ids <- query("select test_id from labmgmt_db.patient_test") # query for every test id in the database
top_ids <- lapply(seq_along(1:10), function(i) { # gather the top ten tests that have the most billed (sales)
names(sort(-table(all_ids)))[i] # neat little trick for finding the mode of a list
})
top_ids <- unlist(top_ids) # turn the list into a numeric vector
top_test_charges <- lapply(seq_along(top_ids), function(i,q) { # find the amount charged for the top ten tests
id_query <- paste("select amount from labmgmt_db.test where id=", q[i])
query(id_query)
}, q = top_ids)
top_test_charges <- unlist(top_test_charges)
top_test_descs <- lapply(seq_along(top_ids), function(i, q) { # grab the test descriptions for the top ten tests (this is overwritten with abbreviations)
desc_query <- paste("select description from labmgmt_db.test where id=", q[i])
query(desc_query)
}, q = top_ids)
top_test_descs <- unlist(top_test_descs)
number_of_bills <- lapply(seq_along(top_ids), function(i,q) { # query the number of times each test was actually billed
num_query <- paste("select count(distinct(id)) FROM labmgmt_db.patient_test where test_id =", q[i])
query(num_query)
}, q = top_ids)
number_of_bills <- unlist(number_of_bills)
individual_test_charges <- c(1:10)
for (i in 1:length(top_test_charges)) {
individual_test_charges[i] <- top_test_charges[i] * number_of_bills[i] # calculate aggregate billing for each test over the five year period
}
top_test_descs <- c("QFT-CI", "LPN", "MPN", "CPN", "QFT-CI (UL)", "LYME", "QFT", "RIC-G", "RIC-M", "ECP") # manually entered in abbreviated test descriptions because the descriptions queried from the database are too long for the graph
barplot(individual_test_charges, names.arg=top_test_descs, ylim=c(0,1000000), xlab="Top Ordered Tests", ylab = "Amount Billed", main="Amount Billed for Top Ordered Tests",yaxt="n")
axis(2, at=seq(0,1000000,100000), labels=paste(seq(0,1000,100), "k", sep=""))
####### Time Series analysis on the amount billed from the laboratory per month #######
months<-seq.Date(from=as.Date("2013-08-01"), to=as.Date("2018-01-01"), by="month") # create a vector with each month from 2013 to 2015
amt_billed_by_month <- lapply(seq_along(months), function(i,q) { # apply a function along all of the months between 2013 and 2018
p <- i + 1
monthly_query <- paste("select sum(charge) as charges from labmgmt_db.invoice_records where date_entered between '",q[i], "' and '",q[p],"'") # query for sums of charges per month
query(monthly_query) # query the database
}, q=months)
amt_billed_by_month <- unlist(amt_billed_by_month) # transform list into vector
amt_billed_by_month[is.na(amt_billed_by_month)] <- 0 # replace NA values with 0s
amt_billed_by_month <- amt_billed_by_month[1:53] # remove leading and trailing 0 values
months <- months[1:53] # remove leading and trailing months with 0 values
#plot(months, amt_billed_by_month, type="o", pch=20, cex=1.5, lwd=1, lty=1, ylim=c(0,160000), yaxt="n", xlab = "Month", ylab="Amount Billed", main = "Amount Billed per Month") # plot amount billed by month, lineplot with dots, size of dot = 20, width of dot = 1.5, line width = 2, line type = 1
#axis(2, at=seq(0,160000,10000), labels=paste(seq(0,160,10),"k", sep=""))
#abline(h=seq(0,160000,10000), lty=3)
amt <- ts(amt_billed_by_month, start=c(2013,8), frequency = 12) # create a time series on the amount billed by month
print(amt)
library(forecast)
plot(decompose(amt)) # if there appears to be a non-random trend, you need to difference it
# order = c(AR, I, MA); AR = no. autoregressive terms; MA = no. moving avg terms; I = is it differenced (use 1 for single-lag observation differencing)
p <- Arima(amt, order=c(1,1,1)) # create an ARIMA model with 1 autoregressive term, 1 lagging differenced term, and 1 moving average term
acf(diff(amt)) # number of peaks past 1 is number of autoregressive terms
pacf(diff(amt)) # number of peaks is number of moving average terms
forecast(p,4) # forecast of ARIMA model with 4 predicted data points
plot(forecast(p, 4), ylim=c(0,160000), yaxt="n", type="o", pch=20, cex=1.5, lwd=1, lty=1, xlab = "Month", ylab = "Amount Billed", main = "Forecasts from ARIMA(1,1,1) on Amount Billed per Month")
axis(2, at=seq(0,160000,40000), labels=paste(seq(0,160,40),"k",sep=""))
abline(h=seq(0,160000,10000), lty=3)
summary(p)
|
/lab_data_analysis.r
|
no_license
|
christophersinclair/Lab-Revenue-Analysis
|
R
| false
| false
| 5,800
|
r
|
# Infectious Disease Laboratory Statistical Data Analysis Script
# Written by: Christopher Sinclair - Research Programmer
# Supervisors: Dr. William Mattingly and Stephen Furmanek
# Install any necessary packages
install.packages('tidyverse')
install.packages('forecast')
library(tidyverse)
library(forecast)
# Set any options needed for plots or data wrangling
options(scipen = 5) # turns off scientitic notation for large numbers
# Generic query function for querying data from local MySQL
query <- function(q) { # function to query mysql
library(RMySQL) # use the RMySQL package
conn <- dbConnect(MySQL(), user="root", password="****", host="127.0.0.1") # mysql connection string
result<- dbGetQuery(conn, q) # run query
lapply(dbListConnections(dbDriver(drv = "MySQL")), dbDisconnect) # disconnects all mysql connections
result # return result
}
####### Amount billed by top ten tests over five years #######
total_charges <- query("select sum(total_charge) from labmgmt_db.invoices") # query for total charges over the five year period
all_ids <- query("select test_id from labmgmt_db.patient_test") # query for every test id in the database
top_ids <- lapply(seq_along(1:10), function(i) { # gather the top ten tests that have the most billed (sales)
names(sort(-table(all_ids)))[i] # neat little trick for finding the mode of a list
})
top_ids <- unlist(top_ids) # turn the list into a numeric vector
top_test_charges <- lapply(seq_along(top_ids), function(i,q) { # find the amount charged for the top ten tests
id_query <- paste("select amount from labmgmt_db.test where id=", q[i])
query(id_query)
}, q = top_ids)
top_test_charges <- unlist(top_test_charges)
top_test_descs <- lapply(seq_along(top_ids), function(i, q) { # grab the test descriptions for the top ten tests (this is overwritten with abbreviations)
desc_query <- paste("select description from labmgmt_db.test where id=", q[i])
query(desc_query)
}, q = top_ids)
top_test_descs <- unlist(top_test_descs)
number_of_bills <- lapply(seq_along(top_ids), function(i,q) { # query the number of times each test was actually billed
num_query <- paste("select count(distinct(id)) FROM labmgmt_db.patient_test where test_id =", q[i])
query(num_query)
}, q = top_ids)
number_of_bills <- unlist(number_of_bills)
individual_test_charges <- c(1:10)
for (i in 1:length(top_test_charges)) {
individual_test_charges[i] <- top_test_charges[i] * number_of_bills[i] # calculate aggregate billing for each test over the five year period
}
top_test_descs <- c("QFT-CI", "LPN", "MPN", "CPN", "QFT-CI (UL)", "LYME", "QFT", "RIC-G", "RIC-M", "ECP") # manually entered in abbreviated test descriptions because the descriptions queried from the database are too long for the graph
barplot(individual_test_charges, names.arg=top_test_descs, ylim=c(0,1000000), xlab="Top Ordered Tests", ylab = "Amount Billed", main="Amount Billed for Top Ordered Tests",yaxt="n")
axis(2, at=seq(0,1000000,100000), labels=paste(seq(0,1000,100), "k", sep=""))
####### Time Series analysis on the amount billed from the laboratory per month #######
months<-seq.Date(from=as.Date("2013-08-01"), to=as.Date("2018-01-01"), by="month") # create a vector with each month from 2013 to 2015
amt_billed_by_month <- lapply(seq_along(months), function(i,q) { # apply a function along all of the months between 2013 and 2018
p <- i + 1
monthly_query <- paste("select sum(charge) as charges from labmgmt_db.invoice_records where date_entered between '",q[i], "' and '",q[p],"'") # query for sums of charges per month
query(monthly_query) # query the database
}, q=months)
amt_billed_by_month <- unlist(amt_billed_by_month) # transform list into vector
amt_billed_by_month[is.na(amt_billed_by_month)] <- 0 # replace NA values with 0s
amt_billed_by_month <- amt_billed_by_month[1:53] # remove leading and trailing 0 values
months <- months[1:53] # remove leading and trailing months with 0 values
#plot(months, amt_billed_by_month, type="o", pch=20, cex=1.5, lwd=1, lty=1, ylim=c(0,160000), yaxt="n", xlab = "Month", ylab="Amount Billed", main = "Amount Billed per Month") # plot amount billed by month, lineplot with dots, size of dot = 20, width of dot = 1.5, line width = 2, line type = 1
#axis(2, at=seq(0,160000,10000), labels=paste(seq(0,160,10),"k", sep=""))
#abline(h=seq(0,160000,10000), lty=3)
amt <- ts(amt_billed_by_month, start=c(2013,8), frequency = 12) # create a time series on the amount billed by month
print(amt)
library(forecast)
plot(decompose(amt)) # if there appears to be a non-random trend, you need to difference it
# order = c(AR, I, MA); AR = no. autoregressive terms; MA = no. moving avg terms; I = is it differenced (use 1 for single-lag observation differencing)
p <- Arima(amt, order=c(1,1,1)) # create an ARIMA model with 1 autoregressive term, 1 lagging differenced term, and 1 moving average term
acf(diff(amt)) # number of peaks past 1 is number of autoregressive terms
pacf(diff(amt)) # number of peaks is number of moving average terms
forecast(p,4) # forecast of ARIMA model with 4 predicted data points
plot(forecast(p, 4), ylim=c(0,160000), yaxt="n", type="o", pch=20, cex=1.5, lwd=1, lty=1, xlab = "Month", ylab = "Amount Billed", main = "Forecasts from ARIMA(1,1,1) on Amount Billed per Month")
axis(2, at=seq(0,160000,40000), labels=paste(seq(0,160,40),"k",sep=""))
abline(h=seq(0,160000,10000), lty=3)
summary(p)
|
########################################
# processing large-scale data
# date: 2021.01.25 - 01.25
# author: Jing Xiao
# ref: https://jieandze1314.osca.top/03/03-14
########################################
# rm all objects --------------------------------------------------------------
rm(list = ls())
# set work directory ----------------------------------------------------------
# work_dir <- "/home1/jxiao/project/scRNA-seq/data/test_data"
work_dir <- "D:/JLab/project/scRNA-seq/data/test_data"
setwd(work_dir)
# load data -------------------------------------------------------------------
#################### PBMC
# 数据下载
library(BiocFileCache)
# 在当前工作目录新建raw_data目录
bfc <- BiocFileCache("raw_data", ask = FALSE)
# 提供下载链接
# http://cf.10xgenomics.com/samples/cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz
raw_path <- bfcrpath(
bfc,
file.path(
"http://cf.10xgenomics.com/samples",
"cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz"
)
)
# 解压数据至当前工作目录,并新建pbmc4k目录
untar(raw_path, exdir = file.path(getwd(), "pbmc4k"))
library(DropletUtils)
fname <- file.path(getwd(), "pbmc4k/raw_gene_bc_matrices/GRCh38")
sce_pbmc <- read10xCounts(fname, col.names = TRUE)
dim(sce_pbmc)
# [1] 33694 737280
# gene annotation -------------------------------------------------------------
# ID整合
library(scater)
rownames(sce_pbmc) <- uniquifyFeatureNames(
rowData(sce_pbmc)$ID,
rowData(sce_pbmc)$Symbol
)
# 添加位置信息
# BiocManager::install("EnsDb.Hsapiens.v86")
library(EnsDb.Hsapiens.v86)
location <- mapIds(
EnsDb.Hsapiens.v86,
keys = rowData(sce_pbmc)$ID,
column = "SEQNAME",
keytype = "GENEID"
)
# detect dropout -----------------------------------------------------------
set.seed(100)
e_out <- emptyDrops(counts(sce_pbmc))
sce_pbmc_filtered <- sce_pbmc[, which(e_out$FDR <= 0.001)]
dim(sce_pbmc_filtered)
# [1] 33694 4300
# qc, especially for mitochondrial --------------------------------------------
stats <- perCellQCMetrics(
sce_pbmc_filtered,
subsets = list(Mito = which(location == "MT"))
)
high_mito <- isOutlier(stats$subsets_Mito_percent, type = "higher")
sce_pbmc_final <- sce_pbmc_filtered[, !high_mito]
dim(sce_pbmc_final)
# [1] 33694 3985
# 归一化normalization by deconvolution -------------------------------------------
library(scran)
set.seed(1000)
clust_pbmc <- quickCluster(sce_pbmc_final)
sce_pbmc_final <- computeSumFactors(
sce_pbmc_final,
cluster = clust_pbmc
)
# logNormCounts()
sce_pbmc_final <- logNormCounts(sce_pbmc_final)
# measure the degree of change by data distribution ---------------------------
# and HVGs selection by proportion
set.seed(1001)
dec_pbmc_pois <- modelGeneVarByPoisson(sce_pbmc_final)
top_hvgs_pbmc <- getTopHVGs(dec_pbmc_pois, prop = 0.1)
length(top_hvgs_pbmc)
# [1] 1599
# dimension reduce, using three methods ---------------------------------------
##### PCA
set.seed(10000)
sce_pbmc_final <- denoisePCA(
sce_pbmc_final,
subset.row = top_hvgs_pbmc,
technical = dec_pbmc_pois
)
dim(reducedDim(sce_pbmc_final, "PCA"))
# [1] 3985 9
##### t-SNE
set.seed(100000)
sce_pbmc_final <- runTSNE(sce_pbmc_final, dimred = "PCA")
dim(reducedDim(sce_pbmc_final, "TSNE"))
# [1] 3985 2
##### UMAP
set.seed(1000000)
sce_pbmc_final <- runUMAP(sce_pbmc_final, dimred = "PCA")
dim(reducedDim(sce_pbmc_final, "UMAP"))
# [1] 3985 2
# clustering ------------------------------------------------------------------
##### graph-based clustering
# 使用PCA的前几个PCs构建SNNG
library(scran)
g_pbmc_k10 <- buildSNNGraph(sce_pbmc_final, k = 10, use.dimred = "PCA")
# 鉴定cluster
clust_pbmc_k10 <- igraph::cluster_walktrap(g_pbmc_k10)$membership
table(clust_pbmc_k10)
# clust_pbmc_k10
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# 205 508 541 56 374 125 46 432 302 867 47 155 166 61 84 16
# 把cluster信息存成SingleCellExperiment对象的一个因子
library(scater)
colLabels(sce_pbmc_final) <- factor(clust_pbmc_k10)
sce_pbmc_final
# class: SingleCellExperiment
# dim: 33694 3985
# metadata(1): Samples
# assays(2): counts logcounts
# rownames(33694): RP11-34P13.3 FAM138A ... AC213203.1 FAM231B
# rowData names(2): ID Symbol
# colnames(3985): AAACCTGAGAAGGCCT-1 AAACCTGAGACAGACC-1 ... TTTGTCAGTTAAGACA-1
# TTTGTCATCCCAAGAT-1
# colData names(4): Sample Barcode sizeFactor label
# reducedDimNames(3): PCA TSNE UMAP
# altExpNames(0):
##### 快速估算
## 使用近邻搜素
library(scran)
BiocManager::install("BiocNeighbors")
library(BiocNeighbors)
snn_gr <- buildSNNGraph(
sce_pbmc_final,
BNPARAM = AnnoyParam(),
use.dimred = "PCA"
)
# 鉴定cluster
clusters <- igraph::cluster_walktrap(snn_gr)$membership
table(Exact = colLabels(sce_pbmc_final), Approx = clusters)
# Approx
# Exact 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# 1 205 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# 2 0 479 0 0 2 0 0 0 0 27 0 0 0 0 0 0
# 3 0 0 540 0 1 0 0 0 0 0 0 0 0 0 0 0
# 4 0 0 0 55 0 0 0 0 0 1 0 0 0 0 0 0
# 5 0 25 0 0 349 0 0 0 0 0 0 0 0 0 0 0
# 6 0 0 0 0 0 125 0 0 0 0 0 0 0 0 0 0
# 7 0 0 0 0 0 0 46 0 0 0 0 0 0 0 0 0
# 8 0 0 0 0 0 0 0 432 0 0 0 0 0 0 0 0
# 9 0 0 0 1 0 0 0 10 291 0 0 0 0 0 0 0
# 10 0 28 0 0 0 0 0 0 0 839 0 0 0 0 0 0
# 11 0 0 0 0 0 0 0 0 0 0 47 0 0 0 0 0
# 12 0 0 0 0 0 0 0 0 0 0 0 155 0 0 0 0
# 13 0 0 0 0 0 0 0 0 0 0 0 0 166 0 0 0
# 14 0 0 0 0 0 0 0 0 0 0 0 0 0 61 0 0
# 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 84 0
# 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16
## 奇异值分解
library(scater)
library(BiocSingular)
# method_1, randomized SVD (RSVD)
set.seed(101000)
r_out <- runPCA(
sce_pbmc_final,
ncomponents = 20,
BSPARAM = RandomParam()
)
str(reducedDim(r_out))
# num [1:3985, 1:20] 15.05 13.43 -8.67 -7.74 6.45 ...
# - attr(*, "dimnames")=List of 2
# ..$ : chr [1:3985] "AAACCTGAGAAGGCCT-1" "AAACCTGAGACAGACC-1" "AAACCTGAGGCATGGT-1" "AAACCTGCAAGGTTCT-1" ...
# ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# - attr(*, "varExplained")= num [1:20] 85.36 40.43 23.22 8.99 6.66 ...
# - attr(*, "percentVar")= num [1:20] 19.85 9.4 5.4 2.09 1.55 ...
# - attr(*, "rotation")= num [1:500, 1:20] 0.203 0.1834 0.1779 0.1063 0.0647 ...
# ..- attr(*, "dimnames")=List of 2
# .. ..$ : chr [1:500] "LYZ" "S100A9" "S100A8" "HLA-DRA" ...
# .. ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# method_2, IRLBA
set.seed(101001)
i_out <- runPCA(
sce_pbmc_final,
ncomponents = 20,
BSPARAM = IrlbaParam()
)
str(reducedDim(i_out))
# num [1:3985, 1:20] 15.05 13.43 -8.67 -7.74 6.45 ...
# - attr(*, "dimnames")=List of 2
# ..$ : chr [1:3985] "AAACCTGAGAAGGCCT-1" "AAACCTGAGACAGACC-1" "AAACCTGAGGCATGGT-1" "AAACCTGCAAGGTTCT-1" ...
# ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# - attr(*, "varExplained")= num [1:20] 85.36 40.43 23.22 8.99 6.66 ...
# - attr(*, "percentVar")= num [1:20] 19.85 9.4 5.4 2.09 1.55 ...
# - attr(*, "rotation")= num [1:500, 1:20] 0.203 0.1834 0.1779 0.1063 0.0647 ...
# ..- attr(*, "dimnames")=List of 2
# .. ..$ : chr [1:500] "LYZ" "S100A9" "S100A8" "HLA-DRA" ...
# .. ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# 并行计算 ------------------------------------------------------------------------
library(BiocParallel)
##### 查看支持加速的类型。最顶上的是默认的。
registered()
# $SnowParam
# class: SnowParam
# bpisup: FALSE; bpnworkers: 6; bptasks: 0; bpjobname: BPJOB
# bplog: FALSE; bpthreshold: INFO; bpstopOnError: TRUE
# bpRNGseed: ; bptimeout: 2592000; bpprogressbar: FALSE
# bpexportglobals: TRUE
# bplogdir: NA
# bpresultdir: NA
# cluster type: SOCK
#
# $SerialParam
# class: SerialParam
# bpisup: FALSE; bpnworkers: 1; bptasks: 0; bpjobname: BPJOB
# bplog: FALSE; bpthreshold: INFO; bpstopOnError: TRUE
# bpRNGseed: ; bptimeout: 2592000; bpprogressbar: FALSE
# bpexportglobals: TRUE
# bplogdir: NA
# bpresultdir: NA
##### 更改加速的类型
default <- registered()
# 改为BatchtoolsParam
register(BatchtoolsParam(workers = 10), default = TRUE)
names(registered())
# 恢复原来的设置
for(param in rev(default)) {
register(param)
}
##### 使用方法
# 不同方法
dec_pbmc_mc <- modelGeneVar(sce_pbmc_final, BPPARAM = MulticoreParam(2))
dec_pbmc_snow <- modelGeneVar(sce_pbmc_final, BPPARAM = SnowParam(5))
##### 在SLURM HPC中使用BatchtoolsParam()
# 设置每个任务2h、8G内存、1CPU、总共10个任务
bpp <- BatchtoolsParam(
10,
cluster = "slurm",
resources = list(
walltime = 7200,
memory = 8000,
ncpus = 1
)
)
# 可能会遇到内存不足 -------------------------------------------------------------------
# 从130万个脑细胞数据选取2万个
BiocManager::install("TENxBrainData")
library(TENxBrainData)
sce_brain <- TENxBrainData20k()
sce_brain
# class: SingleCellExperiment
# dim: 27998 20000
# metadata(0):
# assays(1): counts
# rownames: NULL
# rowData names(2): Ensembl Symbol
# colnames: NULL
# colData names(4): Barcode Sequence Library Mouse
# reducedDimNames(0):
# altExpNames(0):
# 查看表达矩阵
counts(sce_brain)
# <27998 x 20000> matrix of class HDF5Matrix and type "integer":
# [,1] [,2] [,3] [,4] ... [,19997] [,19998] [,19999] [,20000]
# [1,] 0 0 0 0 . 0 0 0 0
# [2,] 0 0 0 0 . 0 0 0 0
# [3,] 0 0 0 0 . 0 0 0 0
# [4,] 0 0 0 0 . 0 0 0 0
# [5,] 0 0 0 0 . 0 0 0 0
# ... . . . . . . . . .
# [27994,] 0 0 0 0 . 0 0 0 0
# [27995,] 0 0 0 1 . 0 2 0 0
# [27996,] 0 0 0 0 . 0 1 0 0
# [27997,] 0 0 0 0 . 0 0 0 0
# [27998,] 0 0 0 0 . 0 0 0 0
# 查看大小
object.size(counts(sce_brain))
# 2560 bytes
# 实际大小
file.info(path(counts(sce_brain)))$size
# [1] 76264332
|
/scRNA-seq/codes/processing_large_data_14.R
|
no_license
|
hsuh001/project
|
R
| false
| false
| 10,926
|
r
|
########################################
# processing large-scale data
# date: 2021.01.25 - 01.25
# author: Jing Xiao
# ref: https://jieandze1314.osca.top/03/03-14
########################################
# rm all objects --------------------------------------------------------------
rm(list = ls())
# set work directory ----------------------------------------------------------
# work_dir <- "/home1/jxiao/project/scRNA-seq/data/test_data"
work_dir <- "D:/JLab/project/scRNA-seq/data/test_data"
setwd(work_dir)
# load data -------------------------------------------------------------------
#################### PBMC
# 数据下载
library(BiocFileCache)
# 在当前工作目录新建raw_data目录
bfc <- BiocFileCache("raw_data", ask = FALSE)
# 提供下载链接
# http://cf.10xgenomics.com/samples/cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz
raw_path <- bfcrpath(
bfc,
file.path(
"http://cf.10xgenomics.com/samples",
"cell-exp/2.1.0/pbmc4k/pbmc4k_raw_gene_bc_matrices.tar.gz"
)
)
# 解压数据至当前工作目录,并新建pbmc4k目录
untar(raw_path, exdir = file.path(getwd(), "pbmc4k"))
library(DropletUtils)
fname <- file.path(getwd(), "pbmc4k/raw_gene_bc_matrices/GRCh38")
sce_pbmc <- read10xCounts(fname, col.names = TRUE)
dim(sce_pbmc)
# [1] 33694 737280
# gene annotation -------------------------------------------------------------
# ID整合
library(scater)
rownames(sce_pbmc) <- uniquifyFeatureNames(
rowData(sce_pbmc)$ID,
rowData(sce_pbmc)$Symbol
)
# 添加位置信息
# BiocManager::install("EnsDb.Hsapiens.v86")
library(EnsDb.Hsapiens.v86)
location <- mapIds(
EnsDb.Hsapiens.v86,
keys = rowData(sce_pbmc)$ID,
column = "SEQNAME",
keytype = "GENEID"
)
# detect dropout -----------------------------------------------------------
set.seed(100)
e_out <- emptyDrops(counts(sce_pbmc))
sce_pbmc_filtered <- sce_pbmc[, which(e_out$FDR <= 0.001)]
dim(sce_pbmc_filtered)
# [1] 33694 4300
# qc, especially for mitochondrial --------------------------------------------
stats <- perCellQCMetrics(
sce_pbmc_filtered,
subsets = list(Mito = which(location == "MT"))
)
high_mito <- isOutlier(stats$subsets_Mito_percent, type = "higher")
sce_pbmc_final <- sce_pbmc_filtered[, !high_mito]
dim(sce_pbmc_final)
# [1] 33694 3985
# 归一化normalization by deconvolution -------------------------------------------
library(scran)
set.seed(1000)
clust_pbmc <- quickCluster(sce_pbmc_final)
sce_pbmc_final <- computeSumFactors(
sce_pbmc_final,
cluster = clust_pbmc
)
# logNormCounts()
sce_pbmc_final <- logNormCounts(sce_pbmc_final)
# measure the degree of change by data distribution ---------------------------
# and HVGs selection by proportion
set.seed(1001)
dec_pbmc_pois <- modelGeneVarByPoisson(sce_pbmc_final)
top_hvgs_pbmc <- getTopHVGs(dec_pbmc_pois, prop = 0.1)
length(top_hvgs_pbmc)
# [1] 1599
# dimension reduce, using three methods ---------------------------------------
##### PCA
set.seed(10000)
sce_pbmc_final <- denoisePCA(
sce_pbmc_final,
subset.row = top_hvgs_pbmc,
technical = dec_pbmc_pois
)
dim(reducedDim(sce_pbmc_final, "PCA"))
# [1] 3985 9
##### t-SNE
set.seed(100000)
sce_pbmc_final <- runTSNE(sce_pbmc_final, dimred = "PCA")
dim(reducedDim(sce_pbmc_final, "TSNE"))
# [1] 3985 2
##### UMAP
set.seed(1000000)
sce_pbmc_final <- runUMAP(sce_pbmc_final, dimred = "PCA")
dim(reducedDim(sce_pbmc_final, "UMAP"))
# [1] 3985 2
# clustering ------------------------------------------------------------------
##### graph-based clustering
# 使用PCA的前几个PCs构建SNNG
library(scran)
g_pbmc_k10 <- buildSNNGraph(sce_pbmc_final, k = 10, use.dimred = "PCA")
# 鉴定cluster
clust_pbmc_k10 <- igraph::cluster_walktrap(g_pbmc_k10)$membership
table(clust_pbmc_k10)
# clust_pbmc_k10
# 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# 205 508 541 56 374 125 46 432 302 867 47 155 166 61 84 16
# 把cluster信息存成SingleCellExperiment对象的一个因子
library(scater)
colLabels(sce_pbmc_final) <- factor(clust_pbmc_k10)
sce_pbmc_final
# class: SingleCellExperiment
# dim: 33694 3985
# metadata(1): Samples
# assays(2): counts logcounts
# rownames(33694): RP11-34P13.3 FAM138A ... AC213203.1 FAM231B
# rowData names(2): ID Symbol
# colnames(3985): AAACCTGAGAAGGCCT-1 AAACCTGAGACAGACC-1 ... TTTGTCAGTTAAGACA-1
# TTTGTCATCCCAAGAT-1
# colData names(4): Sample Barcode sizeFactor label
# reducedDimNames(3): PCA TSNE UMAP
# altExpNames(0):
##### 快速估算
## 使用近邻搜素
library(scran)
BiocManager::install("BiocNeighbors")
library(BiocNeighbors)
snn_gr <- buildSNNGraph(
sce_pbmc_final,
BNPARAM = AnnoyParam(),
use.dimred = "PCA"
)
# 鉴定cluster
clusters <- igraph::cluster_walktrap(snn_gr)$membership
table(Exact = colLabels(sce_pbmc_final), Approx = clusters)
# Approx
# Exact 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
# 1 205 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
# 2 0 479 0 0 2 0 0 0 0 27 0 0 0 0 0 0
# 3 0 0 540 0 1 0 0 0 0 0 0 0 0 0 0 0
# 4 0 0 0 55 0 0 0 0 0 1 0 0 0 0 0 0
# 5 0 25 0 0 349 0 0 0 0 0 0 0 0 0 0 0
# 6 0 0 0 0 0 125 0 0 0 0 0 0 0 0 0 0
# 7 0 0 0 0 0 0 46 0 0 0 0 0 0 0 0 0
# 8 0 0 0 0 0 0 0 432 0 0 0 0 0 0 0 0
# 9 0 0 0 1 0 0 0 10 291 0 0 0 0 0 0 0
# 10 0 28 0 0 0 0 0 0 0 839 0 0 0 0 0 0
# 11 0 0 0 0 0 0 0 0 0 0 47 0 0 0 0 0
# 12 0 0 0 0 0 0 0 0 0 0 0 155 0 0 0 0
# 13 0 0 0 0 0 0 0 0 0 0 0 0 166 0 0 0
# 14 0 0 0 0 0 0 0 0 0 0 0 0 0 61 0 0
# 15 0 0 0 0 0 0 0 0 0 0 0 0 0 0 84 0
# 16 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 16
## 奇异值分解
library(scater)
library(BiocSingular)
# method_1, randomized SVD (RSVD)
set.seed(101000)
r_out <- runPCA(
sce_pbmc_final,
ncomponents = 20,
BSPARAM = RandomParam()
)
str(reducedDim(r_out))
# num [1:3985, 1:20] 15.05 13.43 -8.67 -7.74 6.45 ...
# - attr(*, "dimnames")=List of 2
# ..$ : chr [1:3985] "AAACCTGAGAAGGCCT-1" "AAACCTGAGACAGACC-1" "AAACCTGAGGCATGGT-1" "AAACCTGCAAGGTTCT-1" ...
# ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# - attr(*, "varExplained")= num [1:20] 85.36 40.43 23.22 8.99 6.66 ...
# - attr(*, "percentVar")= num [1:20] 19.85 9.4 5.4 2.09 1.55 ...
# - attr(*, "rotation")= num [1:500, 1:20] 0.203 0.1834 0.1779 0.1063 0.0647 ...
# ..- attr(*, "dimnames")=List of 2
# .. ..$ : chr [1:500] "LYZ" "S100A9" "S100A8" "HLA-DRA" ...
# .. ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# method_2, IRLBA
set.seed(101001)
i_out <- runPCA(
sce_pbmc_final,
ncomponents = 20,
BSPARAM = IrlbaParam()
)
str(reducedDim(i_out))
# num [1:3985, 1:20] 15.05 13.43 -8.67 -7.74 6.45 ...
# - attr(*, "dimnames")=List of 2
# ..$ : chr [1:3985] "AAACCTGAGAAGGCCT-1" "AAACCTGAGACAGACC-1" "AAACCTGAGGCATGGT-1" "AAACCTGCAAGGTTCT-1" ...
# ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# - attr(*, "varExplained")= num [1:20] 85.36 40.43 23.22 8.99 6.66 ...
# - attr(*, "percentVar")= num [1:20] 19.85 9.4 5.4 2.09 1.55 ...
# - attr(*, "rotation")= num [1:500, 1:20] 0.203 0.1834 0.1779 0.1063 0.0647 ...
# ..- attr(*, "dimnames")=List of 2
# .. ..$ : chr [1:500] "LYZ" "S100A9" "S100A8" "HLA-DRA" ...
# .. ..$ : chr [1:20] "PC1" "PC2" "PC3" "PC4" ...
# 并行计算 ------------------------------------------------------------------------
library(BiocParallel)
##### 查看支持加速的类型。最顶上的是默认的。
registered()
# $SnowParam
# class: SnowParam
# bpisup: FALSE; bpnworkers: 6; bptasks: 0; bpjobname: BPJOB
# bplog: FALSE; bpthreshold: INFO; bpstopOnError: TRUE
# bpRNGseed: ; bptimeout: 2592000; bpprogressbar: FALSE
# bpexportglobals: TRUE
# bplogdir: NA
# bpresultdir: NA
# cluster type: SOCK
#
# $SerialParam
# class: SerialParam
# bpisup: FALSE; bpnworkers: 1; bptasks: 0; bpjobname: BPJOB
# bplog: FALSE; bpthreshold: INFO; bpstopOnError: TRUE
# bpRNGseed: ; bptimeout: 2592000; bpprogressbar: FALSE
# bpexportglobals: TRUE
# bplogdir: NA
# bpresultdir: NA
##### 更改加速的类型
default <- registered()
# 改为BatchtoolsParam
register(BatchtoolsParam(workers = 10), default = TRUE)
names(registered())
# 恢复原来的设置
for(param in rev(default)) {
register(param)
}
##### 使用方法
# 不同方法
dec_pbmc_mc <- modelGeneVar(sce_pbmc_final, BPPARAM = MulticoreParam(2))
dec_pbmc_snow <- modelGeneVar(sce_pbmc_final, BPPARAM = SnowParam(5))
##### 在SLURM HPC中使用BatchtoolsParam()
# 设置每个任务2h、8G内存、1CPU、总共10个任务
bpp <- BatchtoolsParam(
10,
cluster = "slurm",
resources = list(
walltime = 7200,
memory = 8000,
ncpus = 1
)
)
# 可能会遇到内存不足 -------------------------------------------------------------------
# 从130万个脑细胞数据选取2万个
BiocManager::install("TENxBrainData")
library(TENxBrainData)
sce_brain <- TENxBrainData20k()
sce_brain
# class: SingleCellExperiment
# dim: 27998 20000
# metadata(0):
# assays(1): counts
# rownames: NULL
# rowData names(2): Ensembl Symbol
# colnames: NULL
# colData names(4): Barcode Sequence Library Mouse
# reducedDimNames(0):
# altExpNames(0):
# 查看表达矩阵
counts(sce_brain)
# <27998 x 20000> matrix of class HDF5Matrix and type "integer":
# [,1] [,2] [,3] [,4] ... [,19997] [,19998] [,19999] [,20000]
# [1,] 0 0 0 0 . 0 0 0 0
# [2,] 0 0 0 0 . 0 0 0 0
# [3,] 0 0 0 0 . 0 0 0 0
# [4,] 0 0 0 0 . 0 0 0 0
# [5,] 0 0 0 0 . 0 0 0 0
# ... . . . . . . . . .
# [27994,] 0 0 0 0 . 0 0 0 0
# [27995,] 0 0 0 1 . 0 2 0 0
# [27996,] 0 0 0 0 . 0 1 0 0
# [27997,] 0 0 0 0 . 0 0 0 0
# [27998,] 0 0 0 0 . 0 0 0 0
# 查看大小
object.size(counts(sce_brain))
# 2560 bytes
# 实际大小
file.info(path(counts(sce_brain)))$size
# [1] 76264332
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{gibbsHMM_2}
\alias{gibbsHMM_2}
\title{gibbsHMM_PT}
\usage{
gibbsHMM_2(YZ, M = 2000, K = 10, alphaMAX = 1, alphaMin = 1e-30,
lab = "sim", type = "First")
}
\arguments{
\item{x,}{alpha, log=False}
}
\description{
density of dirichlet
}
\examples{
dDirichlet(c(.1, .9), c(0.1,0.1))
}
\keyword{dirichlet}
|
/man/gibbsHMM_2.Rd
|
no_license
|
zoevanhavre/Zhmm.0
|
R
| false
| false
| 363
|
rd
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{gibbsHMM_2}
\alias{gibbsHMM_2}
\title{gibbsHMM_PT}
\usage{
gibbsHMM_2(YZ, M = 2000, K = 10, alphaMAX = 1, alphaMin = 1e-30,
lab = "sim", type = "First")
}
\arguments{
\item{x,}{alpha, log=False}
}
\description{
density of dirichlet
}
\examples{
dDirichlet(c(.1, .9), c(0.1,0.1))
}
\keyword{dirichlet}
|
#' ---
#' title: "CS2102/19 Earthquake Structure Analysis"
#' author: "Francisco Castro (fgcastro@wpi.edu)"
#' date: "18 April 2016"
#' ---
#==================================================
# NOTES AND DEPENDENCIES
#==================================================
# This script is for plotting the code structure counts of students.
# Pre-load the following packages:
library(tidyr)
library(dplyr)
library(ggplot2)
library(data.table)
library(gridExtra)
library(grid)
#==================================================
# GLOBALS SETUP
#==================================================
# Set working directory
working_dir <- "C:/Git Repositories/files"
setwd(working_dir)#; getwd()
# Set file names
# file_name1 <- "coding-2102-earthquake.csv"
file_name2 <- "coding-19-earthquake.csv"
#==================================================
# MASTER DATA SETUP
#==================================================
# Read file
# coding_data1 <- read.csv(file_name1)
coding_data1 <- read.csv(file_name2)
# Convert column types as needed
coding_data1$School <- as.factor(coding_data1$School)
coding_data1$Course <- as.factor(coding_data1$Course)
coding_data1$StudyID <- as.character(coding_data1$StudyID)
coding_data1$Lang <- as.factor(coding_data1$Lang)
coding_data1$Problem <- as.factor(coding_data1$Problem)
coding_data1$Subgroup <- as.factor(coding_data1$Subgroup)
coding_data1$SolutionID <- as.factor(coding_data1$SolutionID)
coding_data1$Bin <- as.factor(coding_data1$Bin)
coding_data1$Structure <- as.factor(coding_data1$Structure)
coding_data1$Helpers <- as.character(coding_data1$Helpers)
coding_data1$Builtins <- as.character(coding_data1$Builtins)
coding_data1$Notes <- as.character(coding_data1$Notes)
# Get data information
# str(coding_data1); summary(coding_data1)
# str(coding_data2); summary(coding_data2)
#==================================================
# EXTRACT RELATED DATA POINTS
#==================================================
# EARTHQUAKE
# Create data frame with just the needed columns
clean_data1 <- coding_data1[, c("StudyID", "Subgroup", "SolutionID", "Bin", "Structure")]
# Create separate data frames for each of SolutionID = 1 and SolutionID = 2
sol_id1 <- clean_data1[clean_data1$SolutionID == "1",]
sol_id2 <- clean_data1[clean_data1$SolutionID == "2",]
# Create data frame with occurrence counts of structures for each bin
bin_structs_count1 <- data.frame(count(clean_data1, Bin, Structure))
setnames(bin_structs_count1, c("n"), c("Counts"))
# Create data frame with occurrence counts for each bin
bin_count <- data.frame(count(clean_data1, Bin))
setnames(bin_count, c("n"), c("Counts"))
# Create data frame combining solutions 1 & 2 bins and structures in each student row entry
combined_sols1 <- sol_id1[,c("StudyID", "Subgroup", "Bin", "Structure")]
setnames(combined_sols1, c("Bin", "Structure"), c("Bin1", "Structure1"))
combined_sols1$Bin2 <- sol_id2[combined_sols1$StudyID == sol_id2$StudyID, c("Bin")]
combined_sols1$Structure2 <- sol_id2[combined_sols1$StudyID == sol_id2$StudyID, c("Structure")]
# Create data frame containing students who has same Bin1 and Bin2
same_bins1 <- combined_sols1[combined_sols1$Bin1 == combined_sols1$Bin2,]
# Create data frame that counts occurrences of structures for each bin
same_bins_count1 <- data.frame(count(same_bins1, Bin1))
setnames(same_bins_count1, c("n"), c("Counts"))
# Create data frame containing students who has different Bin1 and Bin2
diff_bins1 <- combined_sols1[combined_sols1$Bin1 != combined_sols1$Bin2,]
# Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# NOTE: AllTogether is either SingleTraverse or NestedTraverse
allin1trav_both1 <- diff_bins1[(diff_bins1$Bin1 == "SingleTraverse" | diff_bins1$Bin1 == "NestedTraverse") &
(diff_bins1$Bin2 == "SingleTraverse" | diff_bins1$Bin2 == "NestedTraverse"),]
# Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
diff_bins1 <- setdiff(diff_bins1, allin1trav_both1)
# Create data frame with at least one of the solutions as an AllTogether
diff_bins_allin1trav1 <- diff_bins1[diff_bins1$Bin1 == "SingleTraverse" | diff_bins1$Bin1 == "NestedTraverse" |
diff_bins1$Bin2 == "SingleTraverse" | diff_bins1$Bin2 == "NestedTraverse",]
# Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
diff_bins_allin1trav1[diff_bins_allin1trav1$Bin2 == "SingleTraverse" | diff_bins_allin1trav1$Bin2 == "NestedTraverse",
c("Bin1","Structure1","Bin2","Structure2")] <-
diff_bins_allin1trav1[diff_bins_allin1trav1$Bin2 == "SingleTraverse" | diff_bins_allin1trav1$Bin2 == "NestedTraverse"
, c("Bin2","Structure2","Bin1","Structure1")]
diff_bins_allin1trav_count1 <- data.frame(count(diff_bins_allin1trav1, Bin1, Bin2))
setnames(diff_bins_allin1trav_count1, c("n"), c("Counts"))
# Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
diff_bins_singltrav1 <- diff_bins_allin1trav1[diff_bins_allin1trav1$Bin1 == "SingleTraverse",]
# Create data frame that counts occurrences of other bins for students with one SingleTrav
diff_bins_singltrav_count1 <- data.frame(count(diff_bins_singltrav1, Bin2))
setnames(diff_bins_singltrav_count1, c("n"), c("Counts"))
# Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
diff_bins_nstdtrav1 <- diff_bins_allin1trav1[diff_bins_allin1trav1$Bin1 == "NestedTraverse",]
# Create data frame that counts occurrences of other bins for students with one NestedTrav
diff_bins_nstdtrav_count1 <- data.frame(count(diff_bins_nstdtrav1, Bin2))
setnames(diff_bins_nstdtrav_count1, c("n"), c("Counts"))
# Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
diff_bins_other1 <- diff_bins1[diff_bins1$Bin1 != "SingleTraverse" & diff_bins1$Bin2 != "SingleTraverse" &
diff_bins1$Bin1 != "NestedTraverse" & diff_bins1$Bin2 != "NestedTraverse",]
# # FOR CS2102: 1102 BACKGROUND
#
# # Create data frame of students with 1102 background
# studs_1102 <- clean_data1[clean_data1$Subgroup == "1102-a" | clean_data1$Subgroup == "1102-b",]
#
# # Create data frame with occurrence counts of structures for each bin for students with 1102 background
# bin_structs1102_count <- data.frame(count(studs_1102, Bin, Structure))
# setnames(bin_structs1102_count, c("n"), c("Counts"))
#
# # Create separate data frames for each of SolutionID = 1 and SolutionID = 2
# sol_id1_1102 <- studs_1102[studs_1102$SolutionID == "1",]
# sol_id2_1102 <- studs_1102[studs_1102$SolutionID == "2",]
#
# # Create data frame combining solutions 1 & 2 bins and structures in each student row entry
# combined_1102 <- sol_id1_1102[,c("StudyID", "Subgroup", "Bin", "Structure")]
# setnames(combined_1102, c("Bin", "Structure"), c("Bin1", "Structure1"))
# combined_1102$Bin2 <- sol_id2_1102[combined_1102$StudyID == sol_id2_1102$StudyID, c("Bin")]
# combined_1102$Structure2 <- sol_id2_1102[combined_1102$StudyID == sol_id2_1102$StudyID, c("Structure")]
#
# # Create data frame containing students who has same Bin1 and Bin2
# same_bins1102 <- combined_1102[combined_1102$Bin1 == combined_1102$Bin2,]
#
# # Create data frame that counts occurrences of structures for each bin
# same_bins_count1102 <- data.frame(count(same_bins1102, Bin1))
# setnames(same_bins_count1102, c("n"), c("Counts"))
#
# # Create data frame containing students who has different Bin1 and Bin2
# diff_bins1102 <- combined_1102[combined_1102$Bin1 != combined_1102$Bin2,]
#
# # Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# # NOTE: AllTogether is either SingleTraverse or NestedTraverse
# allin1trav_both1102 <- diff_bins1102[(diff_bins1102$Bin1 == "SingleTraverse" | diff_bins1102$Bin1 == "NestedTraverse") &
# (diff_bins1102$Bin2 == "SingleTraverse" | diff_bins1102$Bin2 == "NestedTraverse"),]
#
# # Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
# diff_bins1102 <- setdiff(diff_bins1102, allin1trav_both1102)
#
# # Create data frame with at least one of the solutions as an AllTogether
# diff_bins_allin1trav1102 <- diff_bins1102[diff_bins1102$Bin1 == "SingleTraverse" | diff_bins1102$Bin1 == "NestedTraverse" |
# diff_bins1102$Bin2 == "SingleTraverse" | diff_bins1102$Bin2 == "NestedTraverse",]
#
# # Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
# diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin2 == "SingleTraverse" | diff_bins_allin1trav1102$Bin2 == "NestedTraverse",
# c("Bin1","Structure1","Bin2","Structure2")] <-
# diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin2 == "SingleTraverse" | diff_bins_allin1trav1102$Bin2 == "NestedTraverse",
# c("Bin2","Structure2","Bin1","Structure1")]
#
# diff_bins_allin1trav_count1102 <- data.frame(count(diff_bins_allin1trav1102, Bin1, Bin2))
# setnames(diff_bins_allin1trav_count1102, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
# diff_bins_singltrav1102 <- diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin1 == "SingleTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one SingleTrav
# diff_bins_singltrav_count1102 <- data.frame(count(diff_bins_singltrav1102, Bin2))
# setnames(diff_bins_singltrav_count1102, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
# diff_bins_nstdtrav1102 <- diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin1 == "NestedTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one NestedTrav
# diff_bins_nstdtrav_count1102 <- data.frame(count(diff_bins_nstdtrav1102, Bin2))
# setnames(diff_bins_nstdtrav_count1102, c("n"), c("Counts"))
#
# # Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
# diff_bins_other1102 <- diff_bins1102[diff_bins1102$Bin1 != "SingleTraverse" & diff_bins1102$Bin2 != "SingleTraverse" &
# diff_bins1102$Bin1 != "NestedTraverse" & diff_bins1102$Bin2 != "NestedTraverse",]
# # FOR CS2102: 1101 BACKGROUND
#
# # Create data frame of students with 1101 background
# studs_1101 <- clean_data1[clean_data1$Subgroup == "1101-a" |
# clean_data1$Subgroup == "1101-b" |
# clean_data1$Subgroup == "1101-c",]
#
# # Create data frame with occurrence counts of structures for each bin for students with 1101 background
# bin_structs1101_count <- data.frame(count(studs_1101, Bin, Structure))
# setnames(bin_structs1101_count, c("n"), c("Counts"))
#
# # Create separate data frames for each of SolutionID = 1 and SolutionID = 2
# sol_id1_1101 <- studs_1101[studs_1101$SolutionID == "1",]
# sol_id2_1101 <- studs_1101[studs_1101$SolutionID == "2",]
#
# # Create data frame combining solutions 1 & 2 bins and structures in each student row entry
# combined_1101 <- sol_id1_1101[,c("StudyID", "Subgroup", "Bin", "Structure")]
# setnames(combined_1101, c("Bin", "Structure"), c("Bin1", "Structure1"))
# combined_1101$Bin2 <- sol_id2_1101[combined_1101$StudyID == sol_id2_1101$StudyID, c("Bin")]
# combined_1101$Structure2 <- sol_id2_1101[combined_1101$StudyID == sol_id2_1101$StudyID, c("Structure")]
#
# # Create data frame containing students who has same Bin1 and Bin2
# same_bins1101 <- combined_1101[combined_1101$Bin1 == combined_1101$Bin2,]
#
# # Create data frame that counts occurrences of structures for each bin
# same_bins_count1101 <- data.frame(count(same_bins1101, Bin1))
# setnames(same_bins_count1101, c("n"), c("Counts"))
#
# # Create data frame containing students who has different Bin1 and Bin2
# diff_bins1101 <- combined_1101[combined_1101$Bin1 != combined_1101$Bin2,]
#
# # Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# # NOTE: AllTogether is either SingleTraverse or NestedTraverse
# allin1trav_both1101 <- diff_bins1101[(diff_bins1101$Bin1 == "SingleTraverse" | diff_bins1101$Bin1 == "NestedTraverse") &
# (diff_bins1101$Bin2 == "SingleTraverse" | diff_bins1101$Bin2 == "NestedTraverse"),]
#
# # Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
# diff_bins1101 <- setdiff(diff_bins1101, allin1trav_both1101)
#
# # Create data frame with at least one of the solutions as an AllTogether
# diff_bins_allin1trav1101 <- diff_bins1101[diff_bins1101$Bin1 == "SingleTraverse" | diff_bins1101$Bin1 == "NestedTraverse" |
# diff_bins1101$Bin2 == "SingleTraverse" | diff_bins1101$Bin2 == "NestedTraverse",]
#
# # Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
# diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin2 == "SingleTraverse" | diff_bins_allin1trav1101$Bin2 == "NestedTraverse",
# c("Bin1","Structure1","Bin2","Structure2")] <-
# diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin2 == "SingleTraverse" | diff_bins_allin1trav1101$Bin2 == "NestedTraverse",
# c("Bin2","Structure2","Bin1","Structure1")]
#
# diff_bins_allin1trav_count1101 <- data.frame(count(diff_bins_allin1trav1101, Bin1, Bin2))
# setnames(diff_bins_allin1trav_count1101, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
# diff_bins_singltrav1101 <- diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin1 == "SingleTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one SingleTrav
# diff_bins_singltrav_count1101 <- data.frame(count(diff_bins_singltrav1101, Bin2))
# setnames(diff_bins_singltrav_count1101, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
# diff_bins_nstdtrav1101 <- diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin1 == "NestedTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one NestedTrav
# diff_bins_nstdtrav_count1101 <- data.frame(count(diff_bins_nstdtrav1101, Bin2))
# setnames(diff_bins_nstdtrav_count1101, c("n"), c("Counts"))
#
# # Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
# diff_bins_other1101 <- diff_bins1101[diff_bins1101$Bin1 != "SingleTraverse" & diff_bins1101$Bin2 != "SingleTraverse" &
# diff_bins1101$Bin1 != "NestedTraverse" & diff_bins1101$Bin2 != "NestedTraverse",]
#==================================================
# GRAPHS
#==================================================
# EARTHQUAKE
# ALL STRUCTURE OCCURRENCES PER BIN OVER ALL SOLUTIONS
# Note: Stacked bar visualization for each code structure bin
# Compute midpoints of bars, for each structure in each bin; store in variable pos
bin_structs_midpoint1 <- group_by(bin_structs_count1, Bin) %>% mutate(pos = cumsum(Counts) - (0.5 * Counts))
# Graph all structures, grouped by bin
g_all_solns1 <- ggplot(data = bin_structs_midpoint1, aes(x = Bin, y = Counts))
g_all_solns1 + geom_bar(aes(fill = Structure), stat = "identity") +
coord_flip() +
# ggtitle("CS2102: All solutions") +
ggtitle("CS19: All solutions") +
theme(legend.position = "bottom") +
geom_text(aes(label = Counts, y = pos), color = "white")
# Graph number of occurrence per bin
# Replace 'SingleTraverse' and 'NestedTraverse' factors for uniformity
levels(bin_count$Bin)[levels(bin_count$Bin) == "SingleTraverse"] <- "SingleTraversal"
levels(bin_count$Bin)[levels(bin_count$Bin) == "NestedTraverse"] <- "NestedTraversal"
ggplot(data=bin_count, aes(x=Bin, y=Counts)) +
geom_bar(aes(fill=Bin), stat='identity') +
coord_flip() +
theme(legend.position='none',
axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),
axis.text=element_text(size=12),
axis.title=element_text(size=14,face="bold"),
aspect.ratio=0.7) +
geom_text(aes(label=Counts), hjust=1.3, color='white', fontface='bold', size=5) +
scale_y_continuous(limits=c(0,30), expand=c(0,0)) +
labs(x='Plan\n', y='\nNumber of Solutions in each Plan Type')
# STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1 <- ggplot(data = same_bins_count1, aes(x = Bin1, y = Counts))
# g_same_bins1 + geom_bar(aes(fill = Bin1), stat = "identity") +
# # ggtitle("CS2102: Students with same bins for solutions 1, 2") +
# ggtitle("CS19: Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
# STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1 <- ggplot(data = diff_bins_allin1trav_count1, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1 + geom_bar(aes(fill = Bin2), stat = "identity") +
# # ggtitle("CS2102: Students with [Single/Nested]Traverse as one of the solutions") +
# ggtitle("CS19: Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1 <- tableGrob(allin1trav_both1)
# grob_title <- textGrob("Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1)
# STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1 <- tableGrob(diff_bins_other1)
# grob_title <- textGrob("Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1)
# FOR STUDENTS WITH 1102 BACKGROUND
# # Compute midpoints of bars, for each structure in each bin; store in variable pos
# bin_structs1102_midpoint <- group_by(bin_structs1102_count, Bin) %>%
# mutate(pos = cumsum(Counts) - (0.5 * Counts))
#
# # Graph all structures, grouped by bin
# g_all_solns1102 <- ggplot(data = bin_structs1102_midpoint, aes(x = Bin, y = Counts))
# g_all_solns1102 + geom_bar(aes(fill = Structure), stat = "identity") +
# coord_flip() +
# ggtitle("CS2102: All solutions from students with 1102 background") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts, y = pos), color = "white")
#
# # 1102 STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1102 <- ggplot(data = same_bins_count1102, aes(x = Bin1, y = Counts))
# g_same_bins1102 + geom_bar(aes(fill = Bin1), stat = "identity") +
# ggtitle("CS2102: 1102 Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
#
# # 1102 STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1102 <- ggplot(data = diff_bins_allin1trav_count1102, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1102 + geom_bar(aes(fill = Bin2), stat = "identity") +
# ggtitle("CS2102: 1102 Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# # 1102 STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1102 <- tableGrob(allin1trav_both1102)
# grob_title <- textGrob("1102 Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1102)
# 1102 STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1102 <- tableGrob(diff_bins_other1102)
# grob_title <- textGrob("1102 Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1102)
# FOR STUDENTS WITH 1101 BACKGROUND
# # Compute midpoints of bars, for each structure in each bin; store in variable pos
# bin_structs1101_midpoint <- group_by(bin_structs1101_count, Bin) %>%
# mutate(pos = cumsum(Counts) - (0.5 * Counts))
# #
# # Graph all structures, grouped by bin
# g_all_solns1101 <- ggplot(data = bin_structs1101_midpoint, aes(x = Bin, y = Counts))
# g_all_solns1101 + geom_bar(aes(fill = Structure), stat = "identity") +
# coord_flip() +
# ggtitle("CS2102: All solutions from students with 1101 background") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts, y = pos), color = "white")
# #
# # 1101 STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1101 <- ggplot(data = same_bins_count1101, aes(x = Bin1, y = Counts))
# g_same_bins1101 + geom_bar(aes(fill = Bin1), stat = "identity") +
# ggtitle("CS2102: 1101 Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
# #
# # 1101 STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1101 <- ggplot(data = diff_bins_allin1trav_count1101, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1101 + geom_bar(aes(fill = Bin2), stat = "identity") +
# ggtitle("CS2102: 1101 Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# #
# # 1101 STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1101 <- tableGrob(allin1trav_both1101)
# grob_title <- textGrob("1101 Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1101)
# #
# # 1101 STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1101 <- tableGrob(diff_bins_other1101)
# grob_title <- textGrob("1101 Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1101)
|
/analysis/data-analysis-earthquake.r
|
no_license
|
franciscastro/research-plan-composition
|
R
| false
| false
| 22,293
|
r
|
#' ---
#' title: "CS2102/19 Earthquake Structure Analysis"
#' author: "Francisco Castro (fgcastro@wpi.edu)"
#' date: "18 April 2016"
#' ---
#==================================================
# NOTES AND DEPENDENCIES
#==================================================
# This script is for plotting the code structure counts of students.
# Pre-load the following packages:
library(tidyr)
library(dplyr)
library(ggplot2)
library(data.table)
library(gridExtra)
library(grid)
#==================================================
# GLOBALS SETUP
#==================================================
# Set working directory
working_dir <- "C:/Git Repositories/files"
setwd(working_dir)#; getwd()
# Set file names
# file_name1 <- "coding-2102-earthquake.csv"
file_name2 <- "coding-19-earthquake.csv"
#==================================================
# MASTER DATA SETUP
#==================================================
# Read file
# coding_data1 <- read.csv(file_name1)
coding_data1 <- read.csv(file_name2)
# Convert column types as needed
coding_data1$School <- as.factor(coding_data1$School)
coding_data1$Course <- as.factor(coding_data1$Course)
coding_data1$StudyID <- as.character(coding_data1$StudyID)
coding_data1$Lang <- as.factor(coding_data1$Lang)
coding_data1$Problem <- as.factor(coding_data1$Problem)
coding_data1$Subgroup <- as.factor(coding_data1$Subgroup)
coding_data1$SolutionID <- as.factor(coding_data1$SolutionID)
coding_data1$Bin <- as.factor(coding_data1$Bin)
coding_data1$Structure <- as.factor(coding_data1$Structure)
coding_data1$Helpers <- as.character(coding_data1$Helpers)
coding_data1$Builtins <- as.character(coding_data1$Builtins)
coding_data1$Notes <- as.character(coding_data1$Notes)
# Get data information
# str(coding_data1); summary(coding_data1)
# str(coding_data2); summary(coding_data2)
#==================================================
# EXTRACT RELATED DATA POINTS
#==================================================
# EARTHQUAKE
# Create data frame with just the needed columns
clean_data1 <- coding_data1[, c("StudyID", "Subgroup", "SolutionID", "Bin", "Structure")]
# Create separate data frames for each of SolutionID = 1 and SolutionID = 2
sol_id1 <- clean_data1[clean_data1$SolutionID == "1",]
sol_id2 <- clean_data1[clean_data1$SolutionID == "2",]
# Create data frame with occurrence counts of structures for each bin
bin_structs_count1 <- data.frame(count(clean_data1, Bin, Structure))
setnames(bin_structs_count1, c("n"), c("Counts"))
# Create data frame with occurrence counts for each bin
bin_count <- data.frame(count(clean_data1, Bin))
setnames(bin_count, c("n"), c("Counts"))
# Create data frame combining solutions 1 & 2 bins and structures in each student row entry
combined_sols1 <- sol_id1[,c("StudyID", "Subgroup", "Bin", "Structure")]
setnames(combined_sols1, c("Bin", "Structure"), c("Bin1", "Structure1"))
combined_sols1$Bin2 <- sol_id2[combined_sols1$StudyID == sol_id2$StudyID, c("Bin")]
combined_sols1$Structure2 <- sol_id2[combined_sols1$StudyID == sol_id2$StudyID, c("Structure")]
# Create data frame containing students who has same Bin1 and Bin2
same_bins1 <- combined_sols1[combined_sols1$Bin1 == combined_sols1$Bin2,]
# Create data frame that counts occurrences of structures for each bin
same_bins_count1 <- data.frame(count(same_bins1, Bin1))
setnames(same_bins_count1, c("n"), c("Counts"))
# Create data frame containing students who has different Bin1 and Bin2
diff_bins1 <- combined_sols1[combined_sols1$Bin1 != combined_sols1$Bin2,]
# Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# NOTE: AllTogether is either SingleTraverse or NestedTraverse
allin1trav_both1 <- diff_bins1[(diff_bins1$Bin1 == "SingleTraverse" | diff_bins1$Bin1 == "NestedTraverse") &
(diff_bins1$Bin2 == "SingleTraverse" | diff_bins1$Bin2 == "NestedTraverse"),]
# Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
diff_bins1 <- setdiff(diff_bins1, allin1trav_both1)
# Create data frame with at least one of the solutions as an AllTogether
diff_bins_allin1trav1 <- diff_bins1[diff_bins1$Bin1 == "SingleTraverse" | diff_bins1$Bin1 == "NestedTraverse" |
diff_bins1$Bin2 == "SingleTraverse" | diff_bins1$Bin2 == "NestedTraverse",]
# Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
diff_bins_allin1trav1[diff_bins_allin1trav1$Bin2 == "SingleTraverse" | diff_bins_allin1trav1$Bin2 == "NestedTraverse",
c("Bin1","Structure1","Bin2","Structure2")] <-
diff_bins_allin1trav1[diff_bins_allin1trav1$Bin2 == "SingleTraverse" | diff_bins_allin1trav1$Bin2 == "NestedTraverse"
, c("Bin2","Structure2","Bin1","Structure1")]
diff_bins_allin1trav_count1 <- data.frame(count(diff_bins_allin1trav1, Bin1, Bin2))
setnames(diff_bins_allin1trav_count1, c("n"), c("Counts"))
# Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
diff_bins_singltrav1 <- diff_bins_allin1trav1[diff_bins_allin1trav1$Bin1 == "SingleTraverse",]
# Create data frame that counts occurrences of other bins for students with one SingleTrav
diff_bins_singltrav_count1 <- data.frame(count(diff_bins_singltrav1, Bin2))
setnames(diff_bins_singltrav_count1, c("n"), c("Counts"))
# Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
diff_bins_nstdtrav1 <- diff_bins_allin1trav1[diff_bins_allin1trav1$Bin1 == "NestedTraverse",]
# Create data frame that counts occurrences of other bins for students with one NestedTrav
diff_bins_nstdtrav_count1 <- data.frame(count(diff_bins_nstdtrav1, Bin2))
setnames(diff_bins_nstdtrav_count1, c("n"), c("Counts"))
# Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
diff_bins_other1 <- diff_bins1[diff_bins1$Bin1 != "SingleTraverse" & diff_bins1$Bin2 != "SingleTraverse" &
diff_bins1$Bin1 != "NestedTraverse" & diff_bins1$Bin2 != "NestedTraverse",]
# # FOR CS2102: 1102 BACKGROUND
#
# # Create data frame of students with 1102 background
# studs_1102 <- clean_data1[clean_data1$Subgroup == "1102-a" | clean_data1$Subgroup == "1102-b",]
#
# # Create data frame with occurrence counts of structures for each bin for students with 1102 background
# bin_structs1102_count <- data.frame(count(studs_1102, Bin, Structure))
# setnames(bin_structs1102_count, c("n"), c("Counts"))
#
# # Create separate data frames for each of SolutionID = 1 and SolutionID = 2
# sol_id1_1102 <- studs_1102[studs_1102$SolutionID == "1",]
# sol_id2_1102 <- studs_1102[studs_1102$SolutionID == "2",]
#
# # Create data frame combining solutions 1 & 2 bins and structures in each student row entry
# combined_1102 <- sol_id1_1102[,c("StudyID", "Subgroup", "Bin", "Structure")]
# setnames(combined_1102, c("Bin", "Structure"), c("Bin1", "Structure1"))
# combined_1102$Bin2 <- sol_id2_1102[combined_1102$StudyID == sol_id2_1102$StudyID, c("Bin")]
# combined_1102$Structure2 <- sol_id2_1102[combined_1102$StudyID == sol_id2_1102$StudyID, c("Structure")]
#
# # Create data frame containing students who has same Bin1 and Bin2
# same_bins1102 <- combined_1102[combined_1102$Bin1 == combined_1102$Bin2,]
#
# # Create data frame that counts occurrences of structures for each bin
# same_bins_count1102 <- data.frame(count(same_bins1102, Bin1))
# setnames(same_bins_count1102, c("n"), c("Counts"))
#
# # Create data frame containing students who has different Bin1 and Bin2
# diff_bins1102 <- combined_1102[combined_1102$Bin1 != combined_1102$Bin2,]
#
# # Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# # NOTE: AllTogether is either SingleTraverse or NestedTraverse
# allin1trav_both1102 <- diff_bins1102[(diff_bins1102$Bin1 == "SingleTraverse" | diff_bins1102$Bin1 == "NestedTraverse") &
# (diff_bins1102$Bin2 == "SingleTraverse" | diff_bins1102$Bin2 == "NestedTraverse"),]
#
# # Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
# diff_bins1102 <- setdiff(diff_bins1102, allin1trav_both1102)
#
# # Create data frame with at least one of the solutions as an AllTogether
# diff_bins_allin1trav1102 <- diff_bins1102[diff_bins1102$Bin1 == "SingleTraverse" | diff_bins1102$Bin1 == "NestedTraverse" |
# diff_bins1102$Bin2 == "SingleTraverse" | diff_bins1102$Bin2 == "NestedTraverse",]
#
# # Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
# diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin2 == "SingleTraverse" | diff_bins_allin1trav1102$Bin2 == "NestedTraverse",
# c("Bin1","Structure1","Bin2","Structure2")] <-
# diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin2 == "SingleTraverse" | diff_bins_allin1trav1102$Bin2 == "NestedTraverse",
# c("Bin2","Structure2","Bin1","Structure1")]
#
# diff_bins_allin1trav_count1102 <- data.frame(count(diff_bins_allin1trav1102, Bin1, Bin2))
# setnames(diff_bins_allin1trav_count1102, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
# diff_bins_singltrav1102 <- diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin1 == "SingleTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one SingleTrav
# diff_bins_singltrav_count1102 <- data.frame(count(diff_bins_singltrav1102, Bin2))
# setnames(diff_bins_singltrav_count1102, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
# diff_bins_nstdtrav1102 <- diff_bins_allin1trav1102[diff_bins_allin1trav1102$Bin1 == "NestedTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one NestedTrav
# diff_bins_nstdtrav_count1102 <- data.frame(count(diff_bins_nstdtrav1102, Bin2))
# setnames(diff_bins_nstdtrav_count1102, c("n"), c("Counts"))
#
# # Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
# diff_bins_other1102 <- diff_bins1102[diff_bins1102$Bin1 != "SingleTraverse" & diff_bins1102$Bin2 != "SingleTraverse" &
# diff_bins1102$Bin1 != "NestedTraverse" & diff_bins1102$Bin2 != "NestedTraverse",]
# # FOR CS2102: 1101 BACKGROUND
#
# # Create data frame of students with 1101 background
# studs_1101 <- clean_data1[clean_data1$Subgroup == "1101-a" |
# clean_data1$Subgroup == "1101-b" |
# clean_data1$Subgroup == "1101-c",]
#
# # Create data frame with occurrence counts of structures for each bin for students with 1101 background
# bin_structs1101_count <- data.frame(count(studs_1101, Bin, Structure))
# setnames(bin_structs1101_count, c("n"), c("Counts"))
#
# # Create separate data frames for each of SolutionID = 1 and SolutionID = 2
# sol_id1_1101 <- studs_1101[studs_1101$SolutionID == "1",]
# sol_id2_1101 <- studs_1101[studs_1101$SolutionID == "2",]
#
# # Create data frame combining solutions 1 & 2 bins and structures in each student row entry
# combined_1101 <- sol_id1_1101[,c("StudyID", "Subgroup", "Bin", "Structure")]
# setnames(combined_1101, c("Bin", "Structure"), c("Bin1", "Structure1"))
# combined_1101$Bin2 <- sol_id2_1101[combined_1101$StudyID == sol_id2_1101$StudyID, c("Bin")]
# combined_1101$Structure2 <- sol_id2_1101[combined_1101$StudyID == sol_id2_1101$StudyID, c("Structure")]
#
# # Create data frame containing students who has same Bin1 and Bin2
# same_bins1101 <- combined_1101[combined_1101$Bin1 == combined_1101$Bin2,]
#
# # Create data frame that counts occurrences of structures for each bin
# same_bins_count1101 <- data.frame(count(same_bins1101, Bin1))
# setnames(same_bins_count1101, c("n"), c("Counts"))
#
# # Create data frame containing students who has different Bin1 and Bin2
# diff_bins1101 <- combined_1101[combined_1101$Bin1 != combined_1101$Bin2,]
#
# # Create data frame containing students who has an AllTogether solution for Bin1 and Bin2
# # NOTE: AllTogether is either SingleTraverse or NestedTraverse
# allin1trav_both1101 <- diff_bins1101[(diff_bins1101$Bin1 == "SingleTraverse" | diff_bins1101$Bin1 == "NestedTraverse") &
# (diff_bins1101$Bin2 == "SingleTraverse" | diff_bins1101$Bin2 == "NestedTraverse"),]
#
# # Modify diff_bins1 so that it removes all rows with AllTogether solutions for both Bin1 and Bin2
# diff_bins1101 <- setdiff(diff_bins1101, allin1trav_both1101)
#
# # Create data frame with at least one of the solutions as an AllTogether
# diff_bins_allin1trav1101 <- diff_bins1101[diff_bins1101$Bin1 == "SingleTraverse" | diff_bins1101$Bin1 == "NestedTraverse" |
# diff_bins1101$Bin2 == "SingleTraverse" | diff_bins1101$Bin2 == "NestedTraverse",]
#
# # Swap the values so that all Single/NestedTraverse-s are under Bin1, swap corresponding Structures
# diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin2 == "SingleTraverse" | diff_bins_allin1trav1101$Bin2 == "NestedTraverse",
# c("Bin1","Structure1","Bin2","Structure2")] <-
# diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin2 == "SingleTraverse" | diff_bins_allin1trav1101$Bin2 == "NestedTraverse",
# c("Bin2","Structure2","Bin1","Structure1")]
#
# diff_bins_allin1trav_count1101 <- data.frame(count(diff_bins_allin1trav1101, Bin1, Bin2))
# setnames(diff_bins_allin1trav_count1101, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in SingleTraverse
# diff_bins_singltrav1101 <- diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin1 == "SingleTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one SingleTrav
# diff_bins_singltrav_count1101 <- data.frame(count(diff_bins_singltrav1101, Bin2))
# setnames(diff_bins_singltrav_count1101, c("n"), c("Counts"))
#
# # Create data frame from diff_bins_allin1trav1 that has Bin1 in NestedTraverse
# diff_bins_nstdtrav1101 <- diff_bins_allin1trav1101[diff_bins_allin1trav1101$Bin1 == "NestedTraverse",]
#
# # Create data frame that counts occurrences of other bins for students with one NestedTrav
# diff_bins_nstdtrav_count1101 <- data.frame(count(diff_bins_nstdtrav1101, Bin2))
# setnames(diff_bins_nstdtrav_count1101, c("n"), c("Counts"))
#
# # Create data frame with all other solutions that didn't use SingleTraverse or NestedTraverse
# diff_bins_other1101 <- diff_bins1101[diff_bins1101$Bin1 != "SingleTraverse" & diff_bins1101$Bin2 != "SingleTraverse" &
# diff_bins1101$Bin1 != "NestedTraverse" & diff_bins1101$Bin2 != "NestedTraverse",]
#==================================================
# GRAPHS
#==================================================
# EARTHQUAKE
# ALL STRUCTURE OCCURRENCES PER BIN OVER ALL SOLUTIONS
# Note: Stacked bar visualization for each code structure bin
# Compute midpoints of bars, for each structure in each bin; store in variable pos
bin_structs_midpoint1 <- group_by(bin_structs_count1, Bin) %>% mutate(pos = cumsum(Counts) - (0.5 * Counts))
# Graph all structures, grouped by bin
g_all_solns1 <- ggplot(data = bin_structs_midpoint1, aes(x = Bin, y = Counts))
g_all_solns1 + geom_bar(aes(fill = Structure), stat = "identity") +
coord_flip() +
# ggtitle("CS2102: All solutions") +
ggtitle("CS19: All solutions") +
theme(legend.position = "bottom") +
geom_text(aes(label = Counts, y = pos), color = "white")
# Graph number of occurrence per bin
# Replace 'SingleTraverse' and 'NestedTraverse' factors for uniformity
levels(bin_count$Bin)[levels(bin_count$Bin) == "SingleTraverse"] <- "SingleTraversal"
levels(bin_count$Bin)[levels(bin_count$Bin) == "NestedTraverse"] <- "NestedTraversal"
ggplot(data=bin_count, aes(x=Bin, y=Counts)) +
geom_bar(aes(fill=Bin), stat='identity') +
coord_flip() +
theme(legend.position='none',
axis.ticks.x=element_blank(),
panel.grid.major = element_blank(),
axis.text=element_text(size=12),
axis.title=element_text(size=14,face="bold"),
aspect.ratio=0.7) +
geom_text(aes(label=Counts), hjust=1.3, color='white', fontface='bold', size=5) +
scale_y_continuous(limits=c(0,30), expand=c(0,0)) +
labs(x='Plan\n', y='\nNumber of Solutions in each Plan Type')
# STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1 <- ggplot(data = same_bins_count1, aes(x = Bin1, y = Counts))
# g_same_bins1 + geom_bar(aes(fill = Bin1), stat = "identity") +
# # ggtitle("CS2102: Students with same bins for solutions 1, 2") +
# ggtitle("CS19: Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
# STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1 <- ggplot(data = diff_bins_allin1trav_count1, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1 + geom_bar(aes(fill = Bin2), stat = "identity") +
# # ggtitle("CS2102: Students with [Single/Nested]Traverse as one of the solutions") +
# ggtitle("CS19: Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1 <- tableGrob(allin1trav_both1)
# grob_title <- textGrob("Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1)
# STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1 <- tableGrob(diff_bins_other1)
# grob_title <- textGrob("Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1)
# FOR STUDENTS WITH 1102 BACKGROUND
# # Compute midpoints of bars, for each structure in each bin; store in variable pos
# bin_structs1102_midpoint <- group_by(bin_structs1102_count, Bin) %>%
# mutate(pos = cumsum(Counts) - (0.5 * Counts))
#
# # Graph all structures, grouped by bin
# g_all_solns1102 <- ggplot(data = bin_structs1102_midpoint, aes(x = Bin, y = Counts))
# g_all_solns1102 + geom_bar(aes(fill = Structure), stat = "identity") +
# coord_flip() +
# ggtitle("CS2102: All solutions from students with 1102 background") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts, y = pos), color = "white")
#
# # 1102 STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1102 <- ggplot(data = same_bins_count1102, aes(x = Bin1, y = Counts))
# g_same_bins1102 + geom_bar(aes(fill = Bin1), stat = "identity") +
# ggtitle("CS2102: 1102 Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
#
# # 1102 STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1102 <- ggplot(data = diff_bins_allin1trav_count1102, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1102 + geom_bar(aes(fill = Bin2), stat = "identity") +
# ggtitle("CS2102: 1102 Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# # 1102 STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1102 <- tableGrob(allin1trav_both1102)
# grob_title <- textGrob("1102 Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1102)
# 1102 STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1102 <- tableGrob(diff_bins_other1102)
# grob_title <- textGrob("1102 Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1102)
# FOR STUDENTS WITH 1101 BACKGROUND
# # Compute midpoints of bars, for each structure in each bin; store in variable pos
# bin_structs1101_midpoint <- group_by(bin_structs1101_count, Bin) %>%
# mutate(pos = cumsum(Counts) - (0.5 * Counts))
# #
# # Graph all structures, grouped by bin
# g_all_solns1101 <- ggplot(data = bin_structs1101_midpoint, aes(x = Bin, y = Counts))
# g_all_solns1101 + geom_bar(aes(fill = Structure), stat = "identity") +
# coord_flip() +
# ggtitle("CS2102: All solutions from students with 1101 background") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts, y = pos), color = "white")
# #
# # 1101 STUDENTS WITH SAME BINS FOR BOTH SOLUTIONS
# g_same_bins1101 <- ggplot(data = same_bins_count1101, aes(x = Bin1, y = Counts))
# g_same_bins1101 + geom_bar(aes(fill = Bin1), stat = "identity") +
# ggtitle("CS2102: 1101 Students with same bins for solutions 1, 2") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15))
# #
# # 1101 STUDENTS WITH DIFFERENT BINS, BUT ONE IN SINGLETRAVERSE/NESTEDTRAVERSE BIN
# g_diff_bins_allin1trav1101 <- ggplot(data = diff_bins_allin1trav_count1101, aes(x = Bin2, y = Counts))
# g_diff_bins_allin1trav1101 + geom_bar(aes(fill = Bin2), stat = "identity") +
# ggtitle("CS2102: 1101 Students with [Single/Nested]Traverse as one of the solutions") +
# theme(legend.position = "bottom") +
# geom_text(aes(label = Counts), vjust = -0.5) +
# scale_y_continuous(limits = c(0,15)) +
# facet_grid(Bin1 ~ .)
# #
# # 1101 STUDENTS WITH DIFFERENT SOLUTIONS BUT BOTH IN ALLTOGETHER
# g_allin1trav_both1101 <- tableGrob(allin1trav_both1101)
# grob_title <- textGrob("1101 Students with different solutions but both in AllTogether")
# grid.arrange(grob_title, g_allin1trav_both1101)
# #
# # 1101 STUDENTS WHO DID NOT DO SINGLETRAVERSE OR NESTED TRAVERSE
# g_diff_bins_other1101 <- tableGrob(diff_bins_other1101)
# grob_title <- textGrob("1101 Students who did not do SingleTraverse or NestedTraverse")
# grid.arrange(grob_title, g_diff_bins_other1101)
|
##' Compute the minimum sample size to reach a specified target confidence using either the
##' exact method or the asymptotic approximation.
##'
##' @author Alex Venzin
##' @export
##' @param N The total number of items in the population
##' @param nh The number of judgmental samples
##' @param fnr The false negative probability
##' @param rho The expectation relation multiplier
##' @param thresh The number of items that one is willing to accept following sampling that
##' are unacceptable
##' @param prior.prob The prior probability of observing an unacceptable item in the judgmental
##' subpopulation
##' @param targetConf The target confidence you are trying to achieve
##' @param method Can be one of 'exact' or 'approximation'. If \code{(1-pct.clean)*N}
##' is large, then the approximation method is significantly faster, but less accurate
##' @param nProcesses The number of parallel processes required to calculate the exact confidence
sampleSize.fnr <- function(N, nh, fnr, rho, thresh, prior.prob, targetConf,
method = c('discrete', 'continuous'), nProcesses = 1){
method <- match.arg(method)
if(method == 'discrete'){
objFun <- function(nl){
calcConf.fnr(n1 = nh, n2 = nl, fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, nProcesses = nProcesses, method = 'lbeta') - targetConf
}
if(objFun(N-nh) < 0){
warning('Number of samples required exceeds number of items in population.')
out <- list('samples' = Inf, 'confidence' = NaN)
} else if(objFun(0) > 0){
message('No samples required to achieve target confidence')
achieved.conf <- calcConf.fnr(n1 = nh, n2 = 0, fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, nProcesses = nProcesses, method = 'lbeta')
out <- list('samples' = 0, 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
} else {
samps <- uniroot(objFun, c(0, N-nh), tol = 1e-08)$root
achieved.conf <- calcConf.fnr(n1 = nh, n2 = ceiling(samps), fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, , nProcesses = nProcesses,
method = 'lbeta')
out <- list('samples' = ceiling(samps), 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
}
# fnrConf.hgeo
} else {
objFun <- function(nl){
fnrConf.hgeo(n1 = nh, n2 = nl, N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh) - targetConf
}
test1 <- try(objFun(N-nh))
test2 <- try(objFun(0))
if(inherits(test1, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = N-nh, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else if(test1 < 0){
message('Number of samples required exceeds number of items in population.')
out <- list('samples' = Inf, 'confidence' = NaN)
} else if(inherits(test2, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = 0, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else if(test2 > 0){
message('No samples required to achieve target confidence')
achieved.conf <- fnrConf.hgeo(n1 = nh, n2 = 0, N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh)
out <- list('samples' = 0, 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
} else {
samps <- try(uniroot(objFun, c(0, N-nh), tol = 1e-08)$root)
if(inherits(samps, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = NA, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else {
achieved.conf <- fnrConf.hgeo(n1 = nh, n2 = ceiling(samps), N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh)
out <- list('samples' = ceiling(samps), 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
}
}
}
return(out)
}
|
/cjr/R/sampleSize.fnr.R
|
no_license
|
lhsego/sUtils
|
R
| false
| false
| 4,885
|
r
|
##' Compute the minimum sample size to reach a specified target confidence using either the
##' exact method or the asymptotic approximation.
##'
##' @author Alex Venzin
##' @export
##' @param N The total number of items in the population
##' @param nh The number of judgmental samples
##' @param fnr The false negative probability
##' @param rho The expectation relation multiplier
##' @param thresh The number of items that one is willing to accept following sampling that
##' are unacceptable
##' @param prior.prob The prior probability of observing an unacceptable item in the judgmental
##' subpopulation
##' @param targetConf The target confidence you are trying to achieve
##' @param method Can be one of 'exact' or 'approximation'. If \code{(1-pct.clean)*N}
##' is large, then the approximation method is significantly faster, but less accurate
##' @param nProcesses The number of parallel processes required to calculate the exact confidence
sampleSize.fnr <- function(N, nh, fnr, rho, thresh, prior.prob, targetConf,
method = c('discrete', 'continuous'), nProcesses = 1){
method <- match.arg(method)
if(method == 'discrete'){
objFun <- function(nl){
calcConf.fnr(n1 = nh, n2 = nl, fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, nProcesses = nProcesses, method = 'lbeta') - targetConf
}
if(objFun(N-nh) < 0){
warning('Number of samples required exceeds number of items in population.')
out <- list('samples' = Inf, 'confidence' = NaN)
} else if(objFun(0) > 0){
message('No samples required to achieve target confidence')
achieved.conf <- calcConf.fnr(n1 = nh, n2 = 0, fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, nProcesses = nProcesses, method = 'lbeta')
out <- list('samples' = 0, 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
} else {
samps <- uniroot(objFun, c(0, N-nh), tol = 1e-08)$root
achieved.conf <- calcConf.fnr(n1 = nh, n2 = ceiling(samps), fnr = fnr,
r = rho, N = N, prior.prob = prior.prob,
thresh = thresh, , nProcesses = nProcesses,
method = 'lbeta')
out <- list('samples' = ceiling(samps), 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
}
# fnrConf.hgeo
} else {
objFun <- function(nl){
fnrConf.hgeo(n1 = nh, n2 = nl, N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh) - targetConf
}
test1 <- try(objFun(N-nh))
test2 <- try(objFun(0))
if(inherits(test1, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = N-nh, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else if(test1 < 0){
message('Number of samples required exceeds number of items in population.')
out <- list('samples' = Inf, 'confidence' = NaN)
} else if(inherits(test2, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = 0, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else if(test2 > 0){
message('No samples required to achieve target confidence')
achieved.conf <- fnrConf.hgeo(n1 = nh, n2 = 0, N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh)
out <- list('samples' = 0, 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
} else {
samps <- try(uniroot(objFun, c(0, N-nh), tol = 1e-08)$root)
if(inherits(samps, 'try-error')){
message('Algorithm failure. Returning parameters.')
out <- list(nh = nh, nl = NA, N = N, fnr = fnr, rho = rho,
prior.prob = prior.prob, thresh = thresh,
targetConf = targetConf)
} else {
achieved.conf <- fnrConf.hgeo(n1 = nh, n2 = ceiling(samps), N = N, fnr = fnr, r = rho,
prior.prob = prior.prob, thresh = thresh)
out <- list('samples' = ceiling(samps), 'achieved.conf' = achieved.conf, 'true.conf' = targetConf)
}
}
}
return(out)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{imagebuilder}
\alias{imagebuilder}
\title{EC2 Image Builder}
\usage{
imagebuilder(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
EC2 Image Builder is a fully managed Amazon Web Services service that
makes it easier to automate the creation, management, and deployment of
customized, secure, and up-to-date "golden" server images that are
pre-installed and pre-configured with software and settings to meet
specific IT standards.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- imagebuilder(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.compute:imagebuilder_cancel_image_creation]{cancel_image_creation} \tab CancelImageCreation cancels the creation of Image\cr
\link[paws.compute:imagebuilder_create_component]{create_component} \tab Creates a new component that can be used to build, validate, test, and assess your image\cr
\link[paws.compute:imagebuilder_create_container_recipe]{create_container_recipe} \tab Creates a new container recipe\cr
\link[paws.compute:imagebuilder_create_distribution_configuration]{create_distribution_configuration} \tab Creates a new distribution configuration\cr
\link[paws.compute:imagebuilder_create_image]{create_image} \tab Creates a new image\cr
\link[paws.compute:imagebuilder_create_image_pipeline]{create_image_pipeline} \tab Creates a new image pipeline\cr
\link[paws.compute:imagebuilder_create_image_recipe]{create_image_recipe} \tab Creates a new image recipe\cr
\link[paws.compute:imagebuilder_create_infrastructure_configuration]{create_infrastructure_configuration} \tab Creates a new infrastructure configuration\cr
\link[paws.compute:imagebuilder_delete_component]{delete_component} \tab Deletes a component build version\cr
\link[paws.compute:imagebuilder_delete_container_recipe]{delete_container_recipe} \tab Deletes a container recipe\cr
\link[paws.compute:imagebuilder_delete_distribution_configuration]{delete_distribution_configuration} \tab Deletes a distribution configuration\cr
\link[paws.compute:imagebuilder_delete_image]{delete_image} \tab Deletes an Image Builder image resource\cr
\link[paws.compute:imagebuilder_delete_image_pipeline]{delete_image_pipeline} \tab Deletes an image pipeline\cr
\link[paws.compute:imagebuilder_delete_image_recipe]{delete_image_recipe} \tab Deletes an image recipe\cr
\link[paws.compute:imagebuilder_delete_infrastructure_configuration]{delete_infrastructure_configuration} \tab Deletes an infrastructure configuration\cr
\link[paws.compute:imagebuilder_get_component]{get_component} \tab Gets a component object\cr
\link[paws.compute:imagebuilder_get_component_policy]{get_component_policy} \tab Gets a component policy\cr
\link[paws.compute:imagebuilder_get_container_recipe]{get_container_recipe} \tab Retrieves a container recipe\cr
\link[paws.compute:imagebuilder_get_container_recipe_policy]{get_container_recipe_policy} \tab Retrieves the policy for a container recipe\cr
\link[paws.compute:imagebuilder_get_distribution_configuration]{get_distribution_configuration} \tab Gets a distribution configuration\cr
\link[paws.compute:imagebuilder_get_image]{get_image} \tab Gets an image\cr
\link[paws.compute:imagebuilder_get_image_pipeline]{get_image_pipeline} \tab Gets an image pipeline\cr
\link[paws.compute:imagebuilder_get_image_policy]{get_image_policy} \tab Gets an image policy\cr
\link[paws.compute:imagebuilder_get_image_recipe]{get_image_recipe} \tab Gets an image recipe\cr
\link[paws.compute:imagebuilder_get_image_recipe_policy]{get_image_recipe_policy} \tab Gets an image recipe policy\cr
\link[paws.compute:imagebuilder_get_infrastructure_configuration]{get_infrastructure_configuration} \tab Gets an infrastructure configuration\cr
\link[paws.compute:imagebuilder_get_workflow_execution]{get_workflow_execution} \tab Get the runtime information that was logged for a specific runtime instance of the workflow\cr
\link[paws.compute:imagebuilder_get_workflow_step_execution]{get_workflow_step_execution} \tab Get the runtime information that was logged for a specific runtime instance of the workflow step\cr
\link[paws.compute:imagebuilder_import_component]{import_component} \tab Imports a component and transforms its data into a component document\cr
\link[paws.compute:imagebuilder_import_vm_image]{import_vm_image} \tab When you export your virtual machine (VM) from its virtualization environment, that process creates a set of one or more disk container files that act as snapshots of your VM’s environment, settings, and data\cr
\link[paws.compute:imagebuilder_list_component_build_versions]{list_component_build_versions} \tab Returns the list of component build versions for the specified semantic version\cr
\link[paws.compute:imagebuilder_list_components]{list_components} \tab Returns the list of components that can be filtered by name, or by using the listed filters to streamline results\cr
\link[paws.compute:imagebuilder_list_container_recipes]{list_container_recipes} \tab Returns a list of container recipes\cr
\link[paws.compute:imagebuilder_list_distribution_configurations]{list_distribution_configurations} \tab Returns a list of distribution configurations\cr
\link[paws.compute:imagebuilder_list_image_build_versions]{list_image_build_versions} \tab Returns a list of image build versions\cr
\link[paws.compute:imagebuilder_list_image_packages]{list_image_packages} \tab List the Packages that are associated with an Image Build Version, as determined by Amazon Web Services Systems Manager Inventory at build time\cr
\link[paws.compute:imagebuilder_list_image_pipeline_images]{list_image_pipeline_images} \tab Returns a list of images created by the specified pipeline\cr
\link[paws.compute:imagebuilder_list_image_pipelines]{list_image_pipelines} \tab Returns a list of image pipelines\cr
\link[paws.compute:imagebuilder_list_image_recipes]{list_image_recipes} \tab Returns a list of image recipes\cr
\link[paws.compute:imagebuilder_list_images]{list_images} \tab Returns the list of images that you have access to\cr
\link[paws.compute:imagebuilder_list_image_scan_finding_aggregations]{list_image_scan_finding_aggregations} \tab Returns a list of image scan aggregations for your account\cr
\link[paws.compute:imagebuilder_list_image_scan_findings]{list_image_scan_findings} \tab Returns a list of image scan findings for your account\cr
\link[paws.compute:imagebuilder_list_infrastructure_configurations]{list_infrastructure_configurations} \tab Returns a list of infrastructure configurations\cr
\link[paws.compute:imagebuilder_list_tags_for_resource]{list_tags_for_resource} \tab Returns the list of tags for the specified resource\cr
\link[paws.compute:imagebuilder_list_workflow_executions]{list_workflow_executions} \tab Returns a list of workflow runtime instance metadata objects for a specific image build version\cr
\link[paws.compute:imagebuilder_list_workflow_step_executions]{list_workflow_step_executions} \tab Shows runtime data for each step in a runtime instance of the workflow that you specify in the request\cr
\link[paws.compute:imagebuilder_put_component_policy]{put_component_policy} \tab Applies a policy to a component\cr
\link[paws.compute:imagebuilder_put_container_recipe_policy]{put_container_recipe_policy} \tab Applies a policy to a container image\cr
\link[paws.compute:imagebuilder_put_image_policy]{put_image_policy} \tab Applies a policy to an image\cr
\link[paws.compute:imagebuilder_put_image_recipe_policy]{put_image_recipe_policy} \tab Applies a policy to an image recipe\cr
\link[paws.compute:imagebuilder_start_image_pipeline_execution]{start_image_pipeline_execution} \tab Manually triggers a pipeline to create an image\cr
\link[paws.compute:imagebuilder_tag_resource]{tag_resource} \tab Adds a tag to a resource\cr
\link[paws.compute:imagebuilder_untag_resource]{untag_resource} \tab Removes a tag from a resource\cr
\link[paws.compute:imagebuilder_update_distribution_configuration]{update_distribution_configuration} \tab Updates a new distribution configuration\cr
\link[paws.compute:imagebuilder_update_image_pipeline]{update_image_pipeline} \tab Updates an image pipeline\cr
\link[paws.compute:imagebuilder_update_infrastructure_configuration]{update_infrastructure_configuration} \tab Updates a new infrastructure configuration
}
}
\examples{
\dontrun{
svc <- imagebuilder()
svc$cancel_image_creation(
Foo = 123
)
}
}
|
/man/imagebuilder.Rd
|
no_license
|
cran/paws
|
R
| false
| true
| 10,080
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.R
\name{imagebuilder}
\alias{imagebuilder}
\title{EC2 Image Builder}
\usage{
imagebuilder(config = list())
}
\arguments{
\item{config}{Optional configuration of credentials, endpoint, and/or region.
\itemize{
\item{\strong{access_key_id}:} {AWS access key ID}
\item{\strong{secret_access_key}:} {AWS secret access key}
\item{\strong{session_token}:} {AWS temporary session token}
\item{\strong{profile}:} {The name of a profile to use. If not given, then the default profile is used.}
\item{\strong{anonymous}:} {Set anonymous credentials.}
\item{\strong{endpoint}:} {The complete URL to use for the constructed client.}
\item{\strong{region}:} {The AWS Region used in instantiating the client.}
\item{\strong{close_connection}:} {Immediately close all HTTP connections.}
\item{\strong{timeout}:} {The time in seconds till a timeout exception is thrown when attempting to make a connection. The default is 60 seconds.}
\item{\strong{s3_force_path_style}:} {Set this to \code{true} to force the request to use path-style addressing, i.e., \verb{http://s3.amazonaws.com/BUCKET/KEY}.}
}}
}
\value{
A client for the service. You can call the service's operations using
syntax like \code{svc$operation(...)}, where \code{svc} is the name you've assigned
to the client. The available operations are listed in the
Operations section.
}
\description{
EC2 Image Builder is a fully managed Amazon Web Services service that
makes it easier to automate the creation, management, and deployment of
customized, secure, and up-to-date "golden" server images that are
pre-installed and pre-configured with software and settings to meet
specific IT standards.
}
\section{Service syntax}{
\if{html}{\out{<div class="sourceCode">}}\preformatted{svc <- imagebuilder(
config = list(
credentials = list(
creds = list(
access_key_id = "string",
secret_access_key = "string",
session_token = "string"
),
profile = "string",
anonymous = "logical"
),
endpoint = "string",
region = "string",
close_connection = "logical",
timeout = "numeric",
s3_force_path_style = "logical"
)
)
}\if{html}{\out{</div>}}
}
\section{Operations}{
\tabular{ll}{
\link[paws.compute:imagebuilder_cancel_image_creation]{cancel_image_creation} \tab CancelImageCreation cancels the creation of Image\cr
\link[paws.compute:imagebuilder_create_component]{create_component} \tab Creates a new component that can be used to build, validate, test, and assess your image\cr
\link[paws.compute:imagebuilder_create_container_recipe]{create_container_recipe} \tab Creates a new container recipe\cr
\link[paws.compute:imagebuilder_create_distribution_configuration]{create_distribution_configuration} \tab Creates a new distribution configuration\cr
\link[paws.compute:imagebuilder_create_image]{create_image} \tab Creates a new image\cr
\link[paws.compute:imagebuilder_create_image_pipeline]{create_image_pipeline} \tab Creates a new image pipeline\cr
\link[paws.compute:imagebuilder_create_image_recipe]{create_image_recipe} \tab Creates a new image recipe\cr
\link[paws.compute:imagebuilder_create_infrastructure_configuration]{create_infrastructure_configuration} \tab Creates a new infrastructure configuration\cr
\link[paws.compute:imagebuilder_delete_component]{delete_component} \tab Deletes a component build version\cr
\link[paws.compute:imagebuilder_delete_container_recipe]{delete_container_recipe} \tab Deletes a container recipe\cr
\link[paws.compute:imagebuilder_delete_distribution_configuration]{delete_distribution_configuration} \tab Deletes a distribution configuration\cr
\link[paws.compute:imagebuilder_delete_image]{delete_image} \tab Deletes an Image Builder image resource\cr
\link[paws.compute:imagebuilder_delete_image_pipeline]{delete_image_pipeline} \tab Deletes an image pipeline\cr
\link[paws.compute:imagebuilder_delete_image_recipe]{delete_image_recipe} \tab Deletes an image recipe\cr
\link[paws.compute:imagebuilder_delete_infrastructure_configuration]{delete_infrastructure_configuration} \tab Deletes an infrastructure configuration\cr
\link[paws.compute:imagebuilder_get_component]{get_component} \tab Gets a component object\cr
\link[paws.compute:imagebuilder_get_component_policy]{get_component_policy} \tab Gets a component policy\cr
\link[paws.compute:imagebuilder_get_container_recipe]{get_container_recipe} \tab Retrieves a container recipe\cr
\link[paws.compute:imagebuilder_get_container_recipe_policy]{get_container_recipe_policy} \tab Retrieves the policy for a container recipe\cr
\link[paws.compute:imagebuilder_get_distribution_configuration]{get_distribution_configuration} \tab Gets a distribution configuration\cr
\link[paws.compute:imagebuilder_get_image]{get_image} \tab Gets an image\cr
\link[paws.compute:imagebuilder_get_image_pipeline]{get_image_pipeline} \tab Gets an image pipeline\cr
\link[paws.compute:imagebuilder_get_image_policy]{get_image_policy} \tab Gets an image policy\cr
\link[paws.compute:imagebuilder_get_image_recipe]{get_image_recipe} \tab Gets an image recipe\cr
\link[paws.compute:imagebuilder_get_image_recipe_policy]{get_image_recipe_policy} \tab Gets an image recipe policy\cr
\link[paws.compute:imagebuilder_get_infrastructure_configuration]{get_infrastructure_configuration} \tab Gets an infrastructure configuration\cr
\link[paws.compute:imagebuilder_get_workflow_execution]{get_workflow_execution} \tab Get the runtime information that was logged for a specific runtime instance of the workflow\cr
\link[paws.compute:imagebuilder_get_workflow_step_execution]{get_workflow_step_execution} \tab Get the runtime information that was logged for a specific runtime instance of the workflow step\cr
\link[paws.compute:imagebuilder_import_component]{import_component} \tab Imports a component and transforms its data into a component document\cr
\link[paws.compute:imagebuilder_import_vm_image]{import_vm_image} \tab When you export your virtual machine (VM) from its virtualization environment, that process creates a set of one or more disk container files that act as snapshots of your VM’s environment, settings, and data\cr
\link[paws.compute:imagebuilder_list_component_build_versions]{list_component_build_versions} \tab Returns the list of component build versions for the specified semantic version\cr
\link[paws.compute:imagebuilder_list_components]{list_components} \tab Returns the list of components that can be filtered by name, or by using the listed filters to streamline results\cr
\link[paws.compute:imagebuilder_list_container_recipes]{list_container_recipes} \tab Returns a list of container recipes\cr
\link[paws.compute:imagebuilder_list_distribution_configurations]{list_distribution_configurations} \tab Returns a list of distribution configurations\cr
\link[paws.compute:imagebuilder_list_image_build_versions]{list_image_build_versions} \tab Returns a list of image build versions\cr
\link[paws.compute:imagebuilder_list_image_packages]{list_image_packages} \tab List the Packages that are associated with an Image Build Version, as determined by Amazon Web Services Systems Manager Inventory at build time\cr
\link[paws.compute:imagebuilder_list_image_pipeline_images]{list_image_pipeline_images} \tab Returns a list of images created by the specified pipeline\cr
\link[paws.compute:imagebuilder_list_image_pipelines]{list_image_pipelines} \tab Returns a list of image pipelines\cr
\link[paws.compute:imagebuilder_list_image_recipes]{list_image_recipes} \tab Returns a list of image recipes\cr
\link[paws.compute:imagebuilder_list_images]{list_images} \tab Returns the list of images that you have access to\cr
\link[paws.compute:imagebuilder_list_image_scan_finding_aggregations]{list_image_scan_finding_aggregations} \tab Returns a list of image scan aggregations for your account\cr
\link[paws.compute:imagebuilder_list_image_scan_findings]{list_image_scan_findings} \tab Returns a list of image scan findings for your account\cr
\link[paws.compute:imagebuilder_list_infrastructure_configurations]{list_infrastructure_configurations} \tab Returns a list of infrastructure configurations\cr
\link[paws.compute:imagebuilder_list_tags_for_resource]{list_tags_for_resource} \tab Returns the list of tags for the specified resource\cr
\link[paws.compute:imagebuilder_list_workflow_executions]{list_workflow_executions} \tab Returns a list of workflow runtime instance metadata objects for a specific image build version\cr
\link[paws.compute:imagebuilder_list_workflow_step_executions]{list_workflow_step_executions} \tab Shows runtime data for each step in a runtime instance of the workflow that you specify in the request\cr
\link[paws.compute:imagebuilder_put_component_policy]{put_component_policy} \tab Applies a policy to a component\cr
\link[paws.compute:imagebuilder_put_container_recipe_policy]{put_container_recipe_policy} \tab Applies a policy to a container image\cr
\link[paws.compute:imagebuilder_put_image_policy]{put_image_policy} \tab Applies a policy to an image\cr
\link[paws.compute:imagebuilder_put_image_recipe_policy]{put_image_recipe_policy} \tab Applies a policy to an image recipe\cr
\link[paws.compute:imagebuilder_start_image_pipeline_execution]{start_image_pipeline_execution} \tab Manually triggers a pipeline to create an image\cr
\link[paws.compute:imagebuilder_tag_resource]{tag_resource} \tab Adds a tag to a resource\cr
\link[paws.compute:imagebuilder_untag_resource]{untag_resource} \tab Removes a tag from a resource\cr
\link[paws.compute:imagebuilder_update_distribution_configuration]{update_distribution_configuration} \tab Updates a new distribution configuration\cr
\link[paws.compute:imagebuilder_update_image_pipeline]{update_image_pipeline} \tab Updates an image pipeline\cr
\link[paws.compute:imagebuilder_update_infrastructure_configuration]{update_infrastructure_configuration} \tab Updates a new infrastructure configuration
}
}
\examples{
\dontrun{
svc <- imagebuilder()
svc$cancel_image_creation(
Foo = 123
)
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vec_dmeasure.R
\name{vec_dmeasure.internal}
\alias{vec_dmeasure.internal}
\title{Evaluate the unit measurement model density function for each unit}
\usage{
vec_dmeasure.internal(object, y, x, times, params, log = FALSE,
.gnsi = TRUE, ...)
}
\description{
Evaluate the unit measurement model density function for each unit
}
|
/man/vec_dmeasure.Rd
|
no_license
|
nanyyyyyy/spatPomp
|
R
| false
| true
| 405
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/vec_dmeasure.R
\name{vec_dmeasure.internal}
\alias{vec_dmeasure.internal}
\title{Evaluate the unit measurement model density function for each unit}
\usage{
vec_dmeasure.internal(object, y, x, times, params, log = FALSE,
.gnsi = TRUE, ...)
}
\description{
Evaluate the unit measurement model density function for each unit
}
|
#### About ----
# This code prepares data and generates maps for Census tracts and ZCTAs by their RUCA code classification.
#### Set up ----
library(tmap)
library(sf)
library(tidyverse)
setwd("~/git/rural-urban-classification")
#### Projections -----
# Set CRS, lower 48 - EPSG 102003 USA_Contiguous_Albers_Equal_Area_Conic
crs <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
# Alaska, EPSG:3338 https://epsg.io/3338
crs_alaska = "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs "
# Hawaii, ESRI:102007 https://epsg.io/102007
crs_hawaii = "+proj=aea +lat_1=8 +lat_2=18 +lat_0=13 +lon_0=-157 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
#### Load geometries -----
# ZCTA geometry files
zips.sf <- st_read("data_raw/geometryFiles/tl_2018_zcta/zctas2018.shp") %>%
st_transform(crs)
# Tract geometry files
tracts.sf <- st_read("data_raw/geometryFiles/tl_2018_tract/") %>%
st_transform(crs)
# States geometry files
states.sf <- st_read("data_raw/geometryFiles/tl_2018_state/") %>%
st_transform(crs)
# Broken down state geometries
states48.sf <- states.sf %>%
filter(!STATEFP %in% c("02", "15"))
# Alaska map
alaska.sf <- states.sf %>% filter(STATEFP == "02") %>% st_transform(crs_alaska)
# Hawaii map
hawaii.sf <- states.sf %>% filter(STATEFP == "15") %>% st_transform(crs_hawaii)
#### Tract Maps ----
# Load data
rucaT <- read.csv("data_final/UrbanSubRural_T.csv")
rucaT$tractFIPS <- sprintf("%011s", as.character(rucaT$tractFIPS))
# Merge with geometry
rucaT.sf <- merge(tracts.sf, rucaT, by.x = "GEOID", by.y = "tractFIPS")
rucaT.sf <- st_transform(rucaT.sf, crs)
str(rucaT.sf)
# Alaska tracts
alaskaT.sf <- rucaT.sf %>% filter(STATEFP == "02") %>%
st_transform(crs_alaska)
# Hawaii tracts
hawaiiT.sf <- rucaT.sf %>% filter(STATEFP == "15") %>%
st_transform(crs_hawaii)
# Continental US tracts
usT.sf <- rucaT.sf %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Map & show missing tracts
tract_map2 <-
tm_shape(usT.sf2) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "black",
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, main.title = "Census Tracts by Rural, Suburban, Urban Classification")
tmap_save(tract_map_missing, "figs/tract_map_missing.png")
# Redo sf merge, but keep all variables in rucaT
rucaT.sf2 <- merge(tracts.sf, rucaT, by.x = "GEOID", by.y = "tractFIPS", all = TRUE)
usT.sf2 <- rucaT.sf2 %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Update dataset to include all GEOIDs
rucaT.sf_all <- rucaT.sf2
rucaT.sf_all$rurality <- ifelse(is.na(rucaT.sf2$rurality), "Rural", rucaT.sf2$rurality)
# New tract datasets - use these
# Alaska tracts
alaskaT.sf <- rucaT.sf_all %>% filter(STATEFP == "02") %>%
st_transform(crs_alaska)
# Hawaii tracts
hawaiiT.sf <- rucaT.sf_all %>% filter(STATEFP == "15") %>%
st_transform(crs_hawaii)
# Continental US tracts
usT.sf_all <- rucaT.sf_all %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Map
alaskaT_map <-
tm_shape(alaskaT.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(alaska.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
hawaiiT_map <-
tm_shape(hawaiiT.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(hawaii.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
# US Tract Map
tract_map_notitle <-
tm_shape(usT.sf_all) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "#c7eae5", showNA = FALSE,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
tmap_save(tract_map_notitle, "figs/tract_map_notitle.png")
# No legend
tract_map_all_nolegend <-
tm_shape(usT.sf_all) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "#c7eae5", showNA = FALSE,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
tmap_save(tract_map_all_nolegend, "figs/tract_map_nolegend.png")
#### Disaggregated Tract Maps - Urban, Rural, Suburban -----
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Rural map
rural <- usT.sf_all %>% filter(rurality == "Rural")
rural_map <-
tm_shape(rural) +
tm_fill(col = "rurality", palette = "#c7eae5",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(rural_map, "figs/rural_map.png")
# Suburban map
suburban <- usT.sf_all %>% filter(rurality == "Suburban")
suburban_map <-
tm_shape(suburban) +
tm_fill(col = "rurality", palette = "#5ab4ac",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(suburban_map, "figs/suburban_map.png")
# Urban map
urban <- usT.sf_all %>% filter(rurality == "Urban")
urban_map <-
tm_shape(urban) +
tm_fill(col = "rurality", palette = "#01665e",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(urban_map, "figs/urban_map.png")
#### ZCTA Maps ----
# Load data
rucaZ <- read.csv("data_final/RuralSubUrban_Z.csv")
rucaZ$ZIP_CODE <- sprintf("%05s", as.character(rucaZ$ZIP_CODE))
# Merge with geometry
rucaZ.sf <- merge(zips.sf, rucaZ, by.x = "ZCTA5CE10", by.y = "ZIP_CODE")
rucaZ.sf <- st_transform(rucaZ.sf, crs)
# Alaksa zip codes
alaskaZ.sf <- rucaZ.sf %>% filter(STATE == "AK") %>%
st_transform(crs_alaska)
# Hawaii zip codes
hawaiiZ.sf <- rucaZ.sf %>% filter(STATE == "HI") %>%
st_transform(crs_hawaii)
# Continental US zip codes
usZ.sf <- rucaZ.sf %>% filter(!STATE %in% c("AK", "HI")) %>%
st_transform(crs)
# ZCTA Map
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
ZCTA_map <-
tm_shape(usZ.sf) +
tm_fill(col = "rurality", palette = rural.cols,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, main.title = "ZCTAs by Rural, Suburban, Urban Classification")
tmap_save(ZCTA_map, "figs/zcta_map.png")
ZCTA_map_notitle <-
tm_shape(usZ.sf) +
tm_fill(col = "rurality", palette = rural.cols,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
tmap_save(ZCTA_map_notitle, "figs/zcta_map_notitle.png")
alaskaZ_map <-
tm_shape(alaskaZ.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(alaska.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
hawaiiZ_map <-
tm_shape(hawaiiZ.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(hawaii.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
#### Disaggregated ZCTA Maps - Urban, Rural, Suburban -----
# Color scheme
# rural, suburban, urban
#rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Rural map
ruralZ <- usZ.sf %>% filter(rurality == "Rural")
ruralZ_map <-
tm_shape(ruralZ) +
tm_fill(col = "rurality", palette = "#c7eae5",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(ruralZ_map, "figs/ruralZ_map.png")
# Suburban map
suburbanZ <- usZ.sf %>% filter(rurality == "Suburban")
suburbanZ_map <-
tm_shape(suburbanZ) +
tm_fill(col = "rurality", palette = "#5ab4ac",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(suburbanZ_map, "figs/suburbanZ_map.png")
# Urban map
urbanZ <- usZ.sf %>% filter(rurality == "Urban")
urbanZ_map <-
tm_shape(urbanZ) +
tm_fill(col = "rurality", palette = "#01665e",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(urbanZ_map, "figs/urbanZ_map.png")
|
/code/rsu_maps.R
|
no_license
|
spaykin/rural-urban-classification
|
R
| false
| false
| 9,033
|
r
|
#### About ----
# This code prepares data and generates maps for Census tracts and ZCTAs by their RUCA code classification.
#### Set up ----
library(tmap)
library(sf)
library(tidyverse)
setwd("~/git/rural-urban-classification")
#### Projections -----
# Set CRS, lower 48 - EPSG 102003 USA_Contiguous_Albers_Equal_Area_Conic
crs <- "+proj=aea +lat_1=29.5 +lat_2=45.5 +lat_0=37.5 +lon_0=-96 +x_0=0 +y_0=0 +ellps=GRS80 +datum=NAD83 +units=m +no_defs"
# Alaska, EPSG:3338 https://epsg.io/3338
crs_alaska = "+proj=aea +lat_1=55 +lat_2=65 +lat_0=50 +lon_0=-154 +x_0=0 +y_0=0 +ellps=GRS80 +towgs84=0,0,0,0,0,0,0 +units=m +no_defs "
# Hawaii, ESRI:102007 https://epsg.io/102007
crs_hawaii = "+proj=aea +lat_1=8 +lat_2=18 +lat_0=13 +lon_0=-157 +x_0=0 +y_0=0 +datum=NAD83 +units=m +no_defs"
#### Load geometries -----
# ZCTA geometry files
zips.sf <- st_read("data_raw/geometryFiles/tl_2018_zcta/zctas2018.shp") %>%
st_transform(crs)
# Tract geometry files
tracts.sf <- st_read("data_raw/geometryFiles/tl_2018_tract/") %>%
st_transform(crs)
# States geometry files
states.sf <- st_read("data_raw/geometryFiles/tl_2018_state/") %>%
st_transform(crs)
# Broken down state geometries
states48.sf <- states.sf %>%
filter(!STATEFP %in% c("02", "15"))
# Alaska map
alaska.sf <- states.sf %>% filter(STATEFP == "02") %>% st_transform(crs_alaska)
# Hawaii map
hawaii.sf <- states.sf %>% filter(STATEFP == "15") %>% st_transform(crs_hawaii)
#### Tract Maps ----
# Load data
rucaT <- read.csv("data_final/UrbanSubRural_T.csv")
rucaT$tractFIPS <- sprintf("%011s", as.character(rucaT$tractFIPS))
# Merge with geometry
rucaT.sf <- merge(tracts.sf, rucaT, by.x = "GEOID", by.y = "tractFIPS")
rucaT.sf <- st_transform(rucaT.sf, crs)
str(rucaT.sf)
# Alaska tracts
alaskaT.sf <- rucaT.sf %>% filter(STATEFP == "02") %>%
st_transform(crs_alaska)
# Hawaii tracts
hawaiiT.sf <- rucaT.sf %>% filter(STATEFP == "15") %>%
st_transform(crs_hawaii)
# Continental US tracts
usT.sf <- rucaT.sf %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Map & show missing tracts
tract_map2 <-
tm_shape(usT.sf2) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "black",
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, main.title = "Census Tracts by Rural, Suburban, Urban Classification")
tmap_save(tract_map_missing, "figs/tract_map_missing.png")
# Redo sf merge, but keep all variables in rucaT
rucaT.sf2 <- merge(tracts.sf, rucaT, by.x = "GEOID", by.y = "tractFIPS", all = TRUE)
usT.sf2 <- rucaT.sf2 %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Update dataset to include all GEOIDs
rucaT.sf_all <- rucaT.sf2
rucaT.sf_all$rurality <- ifelse(is.na(rucaT.sf2$rurality), "Rural", rucaT.sf2$rurality)
# New tract datasets - use these
# Alaska tracts
alaskaT.sf <- rucaT.sf_all %>% filter(STATEFP == "02") %>%
st_transform(crs_alaska)
# Hawaii tracts
hawaiiT.sf <- rucaT.sf_all %>% filter(STATEFP == "15") %>%
st_transform(crs_hawaii)
# Continental US tracts
usT.sf_all <- rucaT.sf_all %>% filter(!STATEFP %in% c("02", "15")) %>%
st_transform(crs)
# Map
alaskaT_map <-
tm_shape(alaskaT.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(alaska.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
hawaiiT_map <-
tm_shape(hawaiiT.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(hawaii.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
# US Tract Map
tract_map_notitle <-
tm_shape(usT.sf_all) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "#c7eae5", showNA = FALSE,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
tmap_save(tract_map_notitle, "figs/tract_map_notitle.png")
# No legend
tract_map_all_nolegend <-
tm_shape(usT.sf_all) +
tm_fill(col = "rurality", palette = rural.cols, colorNA = "#c7eae5", showNA = FALSE,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
tmap_save(tract_map_all_nolegend, "figs/tract_map_nolegend.png")
#### Disaggregated Tract Maps - Urban, Rural, Suburban -----
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Rural map
rural <- usT.sf_all %>% filter(rurality == "Rural")
rural_map <-
tm_shape(rural) +
tm_fill(col = "rurality", palette = "#c7eae5",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(rural_map, "figs/rural_map.png")
# Suburban map
suburban <- usT.sf_all %>% filter(rurality == "Suburban")
suburban_map <-
tm_shape(suburban) +
tm_fill(col = "rurality", palette = "#5ab4ac",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(suburban_map, "figs/suburban_map.png")
# Urban map
urban <- usT.sf_all %>% filter(rurality == "Urban")
urban_map <-
tm_shape(urban) +
tm_fill(col = "rurality", palette = "#01665e",
title = "") +
tm_shape(usT.sf_all) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(urban_map, "figs/urban_map.png")
#### ZCTA Maps ----
# Load data
rucaZ <- read.csv("data_final/RuralSubUrban_Z.csv")
rucaZ$ZIP_CODE <- sprintf("%05s", as.character(rucaZ$ZIP_CODE))
# Merge with geometry
rucaZ.sf <- merge(zips.sf, rucaZ, by.x = "ZCTA5CE10", by.y = "ZIP_CODE")
rucaZ.sf <- st_transform(rucaZ.sf, crs)
# Alaksa zip codes
alaskaZ.sf <- rucaZ.sf %>% filter(STATE == "AK") %>%
st_transform(crs_alaska)
# Hawaii zip codes
hawaiiZ.sf <- rucaZ.sf %>% filter(STATE == "HI") %>%
st_transform(crs_hawaii)
# Continental US zip codes
usZ.sf <- rucaZ.sf %>% filter(!STATE %in% c("AK", "HI")) %>%
st_transform(crs)
# ZCTA Map
# Color scheme
# rural, suburban, urban
rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
ZCTA_map <-
tm_shape(usZ.sf) +
tm_fill(col = "rurality", palette = rural.cols,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, main.title = "ZCTAs by Rural, Suburban, Urban Classification")
tmap_save(ZCTA_map, "figs/zcta_map.png")
ZCTA_map_notitle <-
tm_shape(usZ.sf) +
tm_fill(col = "rurality", palette = rural.cols,
title = "Classification") +
tm_shape(states48.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE,
legend.title.size = 1.5,
legend.text.size = 1)
tmap_save(ZCTA_map_notitle, "figs/zcta_map_notitle.png")
alaskaZ_map <-
tm_shape(alaskaZ.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(alaska.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
hawaiiZ_map <-
tm_shape(hawaiiZ.sf) +
tm_fill(col = "rurality", palette = rural.cols) +
tm_shape(hawaii.sf) +
tm_borders(alpha = 0.7, lwd = 0.5) +
tm_layout(frame = FALSE, legend.show = FALSE)
#### Disaggregated ZCTA Maps - Urban, Rural, Suburban -----
# Color scheme
# rural, suburban, urban
#rural.cols <- c("#c7eae5", "#5ab4ac", "#01665e")
# Rural map
ruralZ <- usZ.sf %>% filter(rurality == "Rural")
ruralZ_map <-
tm_shape(ruralZ) +
tm_fill(col = "rurality", palette = "#c7eae5",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(ruralZ_map, "figs/ruralZ_map.png")
# Suburban map
suburbanZ <- usZ.sf %>% filter(rurality == "Suburban")
suburbanZ_map <-
tm_shape(suburbanZ) +
tm_fill(col = "rurality", palette = "#5ab4ac",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(suburbanZ_map, "figs/suburbanZ_map.png")
# Urban map
urbanZ <- usZ.sf %>% filter(rurality == "Urban")
urbanZ_map <-
tm_shape(urbanZ) +
tm_fill(col = "rurality", palette = "#01665e",
title = "") +
tm_shape(usZ.sf) + tm_borders(alpha = 0.3, lwd = 0.1) +
tm_shape(states48.sf) +
tm_borders(alpha = 1, lwd = 0.6) +
tm_layout(frame = FALSE,
legend.show = FALSE)
tmap_save(urbanZ_map, "figs/urbanZ_map.png")
|
#' @rdname rocCurve
#'
#' @description
#' A method to visualize the performance in the classification of synthesis, degradation
#' and processing rates based on the comparison of the original simulated rates and the one
#' obtained by the function \code{\link{modelRates}}. For each rate, classification performance is measured
#' in terms of sensitivity and specificity using a ROC curve analysis. False negatives (FN) represent cases
#' where the rate is identified as constant while it was simulated as varying. False positives (FP) represent
#' cases where INSPEcT identified a rate as varying while it was simulated as constant. On the contrary,
#' true positives (TP) and negatives (TN) are cases of correct classification of varying and constant rates, respectively.
#' Consequently, sensitivity and specificity are computed using increasing thresholds for the brown p-values,
#' and the ability of correctly classifying a rate is measured through the area under the curve (AUC) for each rate.
#' @param object An object of class INSPEcT_model, with true rates
#' @param object2 An modeled object of class INSPEcT
#' @param plot A logical indicating whether ROC curves should be plotted or not
#' @param comparative A logical indicating whether the cross-prediction should be visualized. When this mode is selected,
#' the p-values assigned to the variability of one rate (e.g. synthesis) are tested against the variability the other rates
#' (e.g. processing and degradation). Cross-prediction ROC curves are plotted with dashed lines.
#' @return A list of objects of class pROC with summary of each roc curve
#' @seealso \code{\link{makeSimModel}}, \code{\link{makeSimDataset}}, \code{\link{rocThresholds}}
#' @examples
#' if( Sys.info()["sysname"] != "Windows" ) {
#' nascentInspObj <- readRDS(system.file(package='INSPEcT', 'nascentInspObj.rds'))
#'
#' simRates<-makeSimModel(nascentInspObj, 1000, seed=1)
#'
#' # newTpts<-simRates@params$tpts
#' # nascentSim2replicates<-makeSimDataset(object=simRates
#' # ,tpts=newTpts
#' # ,nRep=3
#' # ,NoNascent=FALSE
#' # ,seed=1)
#' # nascentSim2replicates<-modelRates(nascentSim2replicates[1:100]
#' # ,seed=1)
#' # (not evaluated to save computational time)
#'
#' data("nascentSim2replicates",package='INSPEcT')
#'
#' rocCurve(simRates[1:100],nascentSim2replicates)
#' title("3rep. 11t.p. Total and nascent RNA", line=3)
#' }
setMethod('rocCurve', signature(object='INSPEcT_model', object2='INSPEcT'),
function(object, object2, plot=TRUE, comparative=FALSE) {
if( !.hasSlot(object2, 'version') ) {
stop("This object is OBSOLETE and cannot work with the current version of INSPEcT.")
}
# reduce the set of genes to the one present in the modeled object
if( length(object@ratesSpecs) != length(featureNames(object2)) )
object <- object[as.numeric(featureNames(object2))]
if(!comparative)
{
## obtain the response (true class)
allResponses <- geneClass(object)
## in case the classification is based on model selction (i.e. functional NoNascent)
## plot also the AIC information
plotAIC <- (object2@NoNascent & !object2@NF)
if( plotAIC ) {
## Pure AIC selection
AICsTmp <- AIC_internal(object2@model)
AICclass <- colnames(AICsTmp)[apply(AICsTmp,1,which.min)]
aCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a")
bCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b")
cCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c")
}
ratePvals <- ratePvals(object2)
# AT LEAST ONE FINITE PVALUE FOR EACH CONTROL CLASS!!!
responseAlpha <- as.numeric(grepl('a', allResponses))
if( length(table(responseAlpha))>1 ) {
rAlpha <- roc(response=responseAlpha, predictor=ratePvals$synthesis,direction=">")
} else {
rAlpha <- list(auc=NA)
}
responseBeta <- as.numeric(grepl('b', allResponses))
if( length(table(responseBeta))>1 ) {
rBeta <- roc(response=responseBeta, predictor=ratePvals$degradation,direction=">")
} else {
rBeta <- list(auc=NA)
}
responseGamma <- as.numeric(grepl('c', allResponses))
if( length(table(responseGamma))>1 ) {
rGamma <- roc(response=responseGamma, predictor=ratePvals$processing,direction=">")
} else {
rGamma <- list(auc=NA)
}
if( plot ) {
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlpha$auc), as.numeric(rGamma$auc), as.numeric(rBeta$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
if( !is.na(rAlpha[[1]]) ) {
plot.roc(rAlpha, col='red', lwd=4)
alphaIdx <- which.min(abs(0.05-rAlpha[[4]]))
points(rAlpha[[3]][alphaIdx],rAlpha[[2]][alphaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(aCoordinatesAIC["specificity"],aCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGamma[[1]]) ) {
plot.roc(rGamma, col='navy', lwd=4, add=!(is.na(rAlpha[[1]]) & is.na(rBeta[[1]])))
gammaIdx <- which.min(abs(0.05-rGamma[[4]]))
points(rGamma[[3]][gammaIdx],rGamma[[2]][gammaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(cCoordinatesAIC["specificity"],cCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBeta[[1]]) ) {
plot.roc(rBeta, col='deepskyblue', lwd=4, add=!is.na(rAlpha[[1]]))
betaIdx <- which.min(abs(0.05-rBeta[[4]]))
points(rBeta[[3]][betaIdx],rBeta[[2]][betaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bCoordinatesAIC["specificity"],bCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(1,1,1,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
}
## return the roc objects
out <- list(synthesis=rAlpha, degradation=rBeta, processing=rGamma)
} else { ########## comparative mode, show also cross-classifications
## obtain the response
allResponses <- geneClass(object)
ratePvals <- ratePvals(object2)
rAlphaAlpha <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rGammaAlpha <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rBetaAlpha <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rAlphaGamma <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rGammaGamma <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rBetaGamma <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rAlphaBeta <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
rGammaBeta <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
rBetaBeta <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
## in case the classification is based on model selction (i.e. functional NoNascent)
## plot also the AIC information
plotAIC <- object2@NoNascent & !object2@NF
if( plotAIC ) {
## Pure AIC selection
AICsTmp <- AIC(object2)
AICclass <- colnames(AICsTmp)[apply(AICsTmp,1,which.min)]
aaCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a")
gaCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a",predictor="c")
baCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a",predictor="b")
acCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c",predictor="a")
ccCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c")
bcCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c",predictor="b")
abCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b",predictor="a")
cbCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b",predictor="c")
bbCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b")
}
if( plot ) {
oldMfrow <- par()$mfrow
par(mfrow=c(1,3))
if( !is.na(rAlphaAlpha[[1]]) ) {
plot.roc(rAlphaAlpha, col='red', lwd=4, add=FALSE, main="Synthesis rate classification")
alphaAlphaIdx <- which.min(abs(0.05-rAlphaAlpha[[4]]))
points(rAlphaAlpha[[3]][alphaAlphaIdx],rAlphaAlpha[[2]][alphaAlphaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(aaCoordinatesAIC["specificity"],aaCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaAlpha[[1]]) ) {
plot.roc(rGammaAlpha, col='navy', lwd=4, add=!is.na(rAlphaAlpha[[1]]), lty=3)
gammaAlphaIdx <- which.min(abs(0.05-rGammaAlpha[[4]]))
points(rGammaAlpha[[3]][gammaAlphaIdx],rGammaAlpha[[2]][gammaAlphaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(gaCoordinatesAIC["specificity"],gaCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaAlpha[[1]]) ) {
plot.roc(rBetaAlpha, col='deepskyblue', lwd=4, add=!(is.na(rAlphaAlpha[[1]]) & is.na(rGammaAlpha[[1]])), lty=3)
betaAlphaIdx <- which.min(abs(0.05-rBetaAlpha[[4]]))
points(rBetaAlpha[[3]][betaAlphaIdx],rBetaAlpha[[2]][betaAlphaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(baCoordinatesAIC["specificity"],baCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaAlpha$auc), as.numeric(rGammaAlpha$auc), as.numeric(rBetaAlpha$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(1,3,3,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
if( !is.na(rAlphaGamma[[1]]) ) {
plot.roc(rAlphaGamma, col='red', lwd=4, lty=3, add=FALSE, main="Processing rate classification")
alphaGammaIdx <- which.min(abs(0.05-rAlphaGamma[[4]]))
points(rAlphaGamma[[3]][alphaGammaIdx],rAlphaGamma[[2]][alphaGammaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(acCoordinatesAIC["specificity"],acCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaGamma[[1]]) ) {
plot.roc(rGammaGamma, col='navy', lwd=4, add=!is.na(rAlphaGamma[[1]]))
gammaGammaIdx <- which.min(abs(0.05-rGammaGamma[[4]]))
points(rGammaGamma[[3]][gammaGammaIdx],rGammaGamma[[2]][gammaGammaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(ccCoordinatesAIC["specificity"],ccCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaGamma[[1]]) ) {
plot.roc(rBetaGamma, col='deepskyblue', lwd=4, lty=3, add=!(is.na(rAlphaGamma[[1]]) & is.na(rGammaGamma[[1]])))
betaGammaIdx <- which.min(abs(0.05-rBetaGamma[[4]]))
points(rBetaGamma[[3]][betaGammaIdx],rBetaGamma[[2]][betaGammaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bcCoordinatesAIC["specificity"],bcCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaGamma$auc), as.numeric(rGammaGamma$auc), as.numeric(rBetaGamma$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(3,1,3,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
if( !is.na(rAlphaBeta[[1]]) ) {
plot.roc(rAlphaBeta, col='red', lwd=4, lty=3, add=FALSE, main="Degradation rate classification")
alphaBetaIdx <- which.min(abs(0.05-rAlphaBeta[[4]]))
points(rAlphaBeta[[3]][alphaBetaIdx],rAlphaBeta[[2]][alphaBetaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(abCoordinatesAIC["specificity"],abCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaBeta[[1]]) ) {
plot.roc(rGammaBeta, col='navy', lwd=4, lty=3, add=!is.na(rAlphaBeta[[1]]))
gammaBetaIdx <- which.min(abs(0.05-rGammaBeta[[4]]))
points(rGammaBeta[[3]][gammaBetaIdx],rGammaBeta[[2]][gammaBetaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(cbCoordinatesAIC["specificity"],cbCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaBeta[[1]]) ) {
plot.roc(rBetaBeta, col='deepskyblue', lwd=4, add=!(is.na(rAlphaBeta[[1]]) & is.na(rGammaBeta[[1]])))
betaBetaIdx <- which.min(abs(0.05-rBetaBeta[[4]]))
points(rBetaBeta[[3]][betaBetaIdx],rBetaBeta[[2]][betaBetaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bbCoordinatesAIC["specificity"],bbCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaBeta$auc), as.numeric(rGammaBeta$auc), as.numeric(rBetaBeta$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(3,3,1,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
# restore par settings
par(mfrow=oldMfrow)
}
out <- list("ClassifyAlphaWithAlpha"=rAlphaAlpha
,"ClassifyBetaWithAlpha"=rBetaAlpha
,"ClassifyGammaWithAlpha"=rGammaAlpha
,"ClassifyAlphaWithGamma"=rAlphaGamma
,"ClassifyBetaWithGamma"=rBetaGamma
,"ClassifyGammaWithGamma"=rGammaGamma
,"ClassifyAlphaWithBeta"=rAlphaBeta
,"ClassifyBetaWithBeta"=rBetaBeta
,"ClassifyGammaWithBeta"=rGammaBeta)
}
})
coordinatesAIC <- function(AICclass,allResponses,class,predictor=NULL)
{
if(is.null(predictor)) predictor <- class
TP <- length(which(grepLogic(predictor,allResponses)&grepLogic(class,AICclass)))
P <- length(grep(predictor,allResponses))
TN <- length(which(!grepLogic(predictor,allResponses)&!grepLogic(class,AICclass)))
N <- length(allResponses) - P
specificityAIC <- TP/P
sensitivityAIC <- TN/N
c(specificity=specificityAIC,sensitivity=sensitivityAIC)
}
|
/R/rocCurve-methods.R
|
no_license
|
mfurla/INSPEcT
|
R
| false
| false
| 15,633
|
r
|
#' @rdname rocCurve
#'
#' @description
#' A method to visualize the performance in the classification of synthesis, degradation
#' and processing rates based on the comparison of the original simulated rates and the one
#' obtained by the function \code{\link{modelRates}}. For each rate, classification performance is measured
#' in terms of sensitivity and specificity using a ROC curve analysis. False negatives (FN) represent cases
#' where the rate is identified as constant while it was simulated as varying. False positives (FP) represent
#' cases where INSPEcT identified a rate as varying while it was simulated as constant. On the contrary,
#' true positives (TP) and negatives (TN) are cases of correct classification of varying and constant rates, respectively.
#' Consequently, sensitivity and specificity are computed using increasing thresholds for the brown p-values,
#' and the ability of correctly classifying a rate is measured through the area under the curve (AUC) for each rate.
#' @param object An object of class INSPEcT_model, with true rates
#' @param object2 An modeled object of class INSPEcT
#' @param plot A logical indicating whether ROC curves should be plotted or not
#' @param comparative A logical indicating whether the cross-prediction should be visualized. When this mode is selected,
#' the p-values assigned to the variability of one rate (e.g. synthesis) are tested against the variability the other rates
#' (e.g. processing and degradation). Cross-prediction ROC curves are plotted with dashed lines.
#' @return A list of objects of class pROC with summary of each roc curve
#' @seealso \code{\link{makeSimModel}}, \code{\link{makeSimDataset}}, \code{\link{rocThresholds}}
#' @examples
#' if( Sys.info()["sysname"] != "Windows" ) {
#' nascentInspObj <- readRDS(system.file(package='INSPEcT', 'nascentInspObj.rds'))
#'
#' simRates<-makeSimModel(nascentInspObj, 1000, seed=1)
#'
#' # newTpts<-simRates@params$tpts
#' # nascentSim2replicates<-makeSimDataset(object=simRates
#' # ,tpts=newTpts
#' # ,nRep=3
#' # ,NoNascent=FALSE
#' # ,seed=1)
#' # nascentSim2replicates<-modelRates(nascentSim2replicates[1:100]
#' # ,seed=1)
#' # (not evaluated to save computational time)
#'
#' data("nascentSim2replicates",package='INSPEcT')
#'
#' rocCurve(simRates[1:100],nascentSim2replicates)
#' title("3rep. 11t.p. Total and nascent RNA", line=3)
#' }
setMethod('rocCurve', signature(object='INSPEcT_model', object2='INSPEcT'),
function(object, object2, plot=TRUE, comparative=FALSE) {
if( !.hasSlot(object2, 'version') ) {
stop("This object is OBSOLETE and cannot work with the current version of INSPEcT.")
}
# reduce the set of genes to the one present in the modeled object
if( length(object@ratesSpecs) != length(featureNames(object2)) )
object <- object[as.numeric(featureNames(object2))]
if(!comparative)
{
## obtain the response (true class)
allResponses <- geneClass(object)
## in case the classification is based on model selction (i.e. functional NoNascent)
## plot also the AIC information
plotAIC <- (object2@NoNascent & !object2@NF)
if( plotAIC ) {
## Pure AIC selection
AICsTmp <- AIC_internal(object2@model)
AICclass <- colnames(AICsTmp)[apply(AICsTmp,1,which.min)]
aCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a")
bCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b")
cCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c")
}
ratePvals <- ratePvals(object2)
# AT LEAST ONE FINITE PVALUE FOR EACH CONTROL CLASS!!!
responseAlpha <- as.numeric(grepl('a', allResponses))
if( length(table(responseAlpha))>1 ) {
rAlpha <- roc(response=responseAlpha, predictor=ratePvals$synthesis,direction=">")
} else {
rAlpha <- list(auc=NA)
}
responseBeta <- as.numeric(grepl('b', allResponses))
if( length(table(responseBeta))>1 ) {
rBeta <- roc(response=responseBeta, predictor=ratePvals$degradation,direction=">")
} else {
rBeta <- list(auc=NA)
}
responseGamma <- as.numeric(grepl('c', allResponses))
if( length(table(responseGamma))>1 ) {
rGamma <- roc(response=responseGamma, predictor=ratePvals$processing,direction=">")
} else {
rGamma <- list(auc=NA)
}
if( plot ) {
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlpha$auc), as.numeric(rGamma$auc), as.numeric(rBeta$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
if( !is.na(rAlpha[[1]]) ) {
plot.roc(rAlpha, col='red', lwd=4)
alphaIdx <- which.min(abs(0.05-rAlpha[[4]]))
points(rAlpha[[3]][alphaIdx],rAlpha[[2]][alphaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(aCoordinatesAIC["specificity"],aCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGamma[[1]]) ) {
plot.roc(rGamma, col='navy', lwd=4, add=!(is.na(rAlpha[[1]]) & is.na(rBeta[[1]])))
gammaIdx <- which.min(abs(0.05-rGamma[[4]]))
points(rGamma[[3]][gammaIdx],rGamma[[2]][gammaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(cCoordinatesAIC["specificity"],cCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBeta[[1]]) ) {
plot.roc(rBeta, col='deepskyblue', lwd=4, add=!is.na(rAlpha[[1]]))
betaIdx <- which.min(abs(0.05-rBeta[[4]]))
points(rBeta[[3]][betaIdx],rBeta[[2]][betaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bCoordinatesAIC["specificity"],bCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(1,1,1,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
}
## return the roc objects
out <- list(synthesis=rAlpha, degradation=rBeta, processing=rGamma)
} else { ########## comparative mode, show also cross-classifications
## obtain the response
allResponses <- geneClass(object)
ratePvals <- ratePvals(object2)
rAlphaAlpha <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rGammaAlpha <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rBetaAlpha <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"synthesis"],direction=">"), error=function(e) list(auc=NA))
rAlphaGamma <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rGammaGamma <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rBetaGamma <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"processing"],direction=">"), error=function(e) list(auc=NA))
rAlphaBeta <- tryCatch(roc(response=as.numeric(grepl('a', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
rGammaBeta <- tryCatch(roc(response=as.numeric(grepl('c', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
rBetaBeta <- tryCatch(roc(response=as.numeric(grepl('b', allResponses))
, predictor=ratePvals[,"degradation"],direction=">"), error=function(e) list(auc=NA))
## in case the classification is based on model selction (i.e. functional NoNascent)
## plot also the AIC information
plotAIC <- object2@NoNascent & !object2@NF
if( plotAIC ) {
## Pure AIC selection
AICsTmp <- AIC(object2)
AICclass <- colnames(AICsTmp)[apply(AICsTmp,1,which.min)]
aaCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a")
gaCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a",predictor="c")
baCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="a",predictor="b")
acCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c",predictor="a")
ccCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c")
bcCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="c",predictor="b")
abCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b",predictor="a")
cbCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b",predictor="c")
bbCoordinatesAIC <- coordinatesAIC(AICclass=AICclass,allResponses=allResponses,class="b")
}
if( plot ) {
oldMfrow <- par()$mfrow
par(mfrow=c(1,3))
if( !is.na(rAlphaAlpha[[1]]) ) {
plot.roc(rAlphaAlpha, col='red', lwd=4, add=FALSE, main="Synthesis rate classification")
alphaAlphaIdx <- which.min(abs(0.05-rAlphaAlpha[[4]]))
points(rAlphaAlpha[[3]][alphaAlphaIdx],rAlphaAlpha[[2]][alphaAlphaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(aaCoordinatesAIC["specificity"],aaCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaAlpha[[1]]) ) {
plot.roc(rGammaAlpha, col='navy', lwd=4, add=!is.na(rAlphaAlpha[[1]]), lty=3)
gammaAlphaIdx <- which.min(abs(0.05-rGammaAlpha[[4]]))
points(rGammaAlpha[[3]][gammaAlphaIdx],rGammaAlpha[[2]][gammaAlphaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(gaCoordinatesAIC["specificity"],gaCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaAlpha[[1]]) ) {
plot.roc(rBetaAlpha, col='deepskyblue', lwd=4, add=!(is.na(rAlphaAlpha[[1]]) & is.na(rGammaAlpha[[1]])), lty=3)
betaAlphaIdx <- which.min(abs(0.05-rBetaAlpha[[4]]))
points(rBetaAlpha[[3]][betaAlphaIdx],rBetaAlpha[[2]][betaAlphaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(baCoordinatesAIC["specificity"],baCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaAlpha$auc), as.numeric(rGammaAlpha$auc), as.numeric(rBetaAlpha$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(1,3,3,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
if( !is.na(rAlphaGamma[[1]]) ) {
plot.roc(rAlphaGamma, col='red', lwd=4, lty=3, add=FALSE, main="Processing rate classification")
alphaGammaIdx <- which.min(abs(0.05-rAlphaGamma[[4]]))
points(rAlphaGamma[[3]][alphaGammaIdx],rAlphaGamma[[2]][alphaGammaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(acCoordinatesAIC["specificity"],acCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaGamma[[1]]) ) {
plot.roc(rGammaGamma, col='navy', lwd=4, add=!is.na(rAlphaGamma[[1]]))
gammaGammaIdx <- which.min(abs(0.05-rGammaGamma[[4]]))
points(rGammaGamma[[3]][gammaGammaIdx],rGammaGamma[[2]][gammaGammaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(ccCoordinatesAIC["specificity"],ccCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaGamma[[1]]) ) {
plot.roc(rBetaGamma, col='deepskyblue', lwd=4, lty=3, add=!(is.na(rAlphaGamma[[1]]) & is.na(rGammaGamma[[1]])))
betaGammaIdx <- which.min(abs(0.05-rBetaGamma[[4]]))
points(rBetaGamma[[3]][betaGammaIdx],rBetaGamma[[2]][betaGammaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bcCoordinatesAIC["specificity"],bcCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaGamma$auc), as.numeric(rGammaGamma$auc), as.numeric(rBetaGamma$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(3,1,3,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
if( !is.na(rAlphaBeta[[1]]) ) {
plot.roc(rAlphaBeta, col='red', lwd=4, lty=3, add=FALSE, main="Degradation rate classification")
alphaBetaIdx <- which.min(abs(0.05-rAlphaBeta[[4]]))
points(rAlphaBeta[[3]][alphaBetaIdx],rAlphaBeta[[2]][alphaBetaIdx],pch=16,col='red',cex=2.5)
if( plotAIC ) points(abCoordinatesAIC["specificity"],abCoordinatesAIC["sensitivity"],pch=17,col='red',cex=2.5)
}
if( !is.na(rGammaBeta[[1]]) ) {
plot.roc(rGammaBeta, col='navy', lwd=4, lty=3, add=!is.na(rAlphaBeta[[1]]))
gammaBetaIdx <- which.min(abs(0.05-rGammaBeta[[4]]))
points(rGammaBeta[[3]][gammaBetaIdx],rGammaBeta[[2]][gammaBetaIdx],pch=16,col='navy',cex=2.5)
if( plotAIC ) points(cbCoordinatesAIC["specificity"],cbCoordinatesAIC["sensitivity"],pch=17,col='navy',cex=2.5)
}
if( !is.na(rBetaBeta[[1]]) ) {
plot.roc(rBetaBeta, col='deepskyblue', lwd=4, add=!(is.na(rAlphaBeta[[1]]) & is.na(rGammaBeta[[1]])))
betaBetaIdx <- which.min(abs(0.05-rBetaBeta[[4]]))
points(rBetaBeta[[3]][betaBetaIdx],rBetaBeta[[2]][betaBetaIdx],pch=16,col='deepskyblue',cex=2.5)
if( plotAIC ) points(bbCoordinatesAIC["specificity"],bbCoordinatesAIC["sensitivity"],pch=17,col='deepskyblue',cex=2.5)
}
legendText <- paste(
c('synthesis', 'processing', 'degradation')
, ' - AUC='
, signif(c(as.numeric(rAlphaBeta$auc), as.numeric(rGammaBeta$auc), as.numeric(rBetaBeta$auc)), 3)
, sep=''
)
legendText <- c(legendText,'p = 0.05')
if( plotAIC ) legendText <- c(legendText,'AIC')
legend('bottomright', legend=legendText
, col=c('red', 'navy', 'deepskyblue','grey70','grey70'),
lty=c(3,3,1,NA,NA),
pch=c(NA,NA,NA,16,17), lwd=4)
# restore par settings
par(mfrow=oldMfrow)
}
out <- list("ClassifyAlphaWithAlpha"=rAlphaAlpha
,"ClassifyBetaWithAlpha"=rBetaAlpha
,"ClassifyGammaWithAlpha"=rGammaAlpha
,"ClassifyAlphaWithGamma"=rAlphaGamma
,"ClassifyBetaWithGamma"=rBetaGamma
,"ClassifyGammaWithGamma"=rGammaGamma
,"ClassifyAlphaWithBeta"=rAlphaBeta
,"ClassifyBetaWithBeta"=rBetaBeta
,"ClassifyGammaWithBeta"=rGammaBeta)
}
})
coordinatesAIC <- function(AICclass,allResponses,class,predictor=NULL)
{
if(is.null(predictor)) predictor <- class
TP <- length(which(grepLogic(predictor,allResponses)&grepLogic(class,AICclass)))
P <- length(grep(predictor,allResponses))
TN <- length(which(!grepLogic(predictor,allResponses)&!grepLogic(class,AICclass)))
N <- length(allResponses) - P
specificityAIC <- TP/P
sensitivityAIC <- TN/N
c(specificity=specificityAIC,sensitivity=sensitivityAIC)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-dams.R
\docType{data}
\name{dams}
\alias{dams}
\title{dams}
\format{A tibble with 511 rows and 9 variables:
\describe{
\item{city}{The city closest to the dam}
\item{state}{The state the dam is in}
\item{river}{The river that the dam is on}
\item{year_completed}{The year the dam was constructed}
\item{hazard}{The hazard rating of the dam. (H = High; L = Low; S = Signficant; U = Undetermined)}
\item{eap}{Status of the dam's emergency action plan. (Y = Yes; N = No; NR = Not Required;)}
\item{latitude}{The latitude of the dam}
\item{longitude}{The longitude of the dam}
}}
\usage{
dams
}
\description{
This data comes from the National Inventory of Dams, \url{https://nid-test.sec.usace.army.mil/ords/f?p=105:1::::::}
}
\examples{
\dontrun{
dams
}
}
\keyword{datasets}
|
/man/dams.Rd
|
no_license
|
uncoast-unconf/ggcart
|
R
| false
| true
| 855
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data-dams.R
\docType{data}
\name{dams}
\alias{dams}
\title{dams}
\format{A tibble with 511 rows and 9 variables:
\describe{
\item{city}{The city closest to the dam}
\item{state}{The state the dam is in}
\item{river}{The river that the dam is on}
\item{year_completed}{The year the dam was constructed}
\item{hazard}{The hazard rating of the dam. (H = High; L = Low; S = Signficant; U = Undetermined)}
\item{eap}{Status of the dam's emergency action plan. (Y = Yes; N = No; NR = Not Required;)}
\item{latitude}{The latitude of the dam}
\item{longitude}{The longitude of the dam}
}}
\usage{
dams
}
\description{
This data comes from the National Inventory of Dams, \url{https://nid-test.sec.usace.army.mil/ords/f?p=105:1::::::}
}
\examples{
\dontrun{
dams
}
}
\keyword{datasets}
|
### implement methods for PLS ####
setMethod("PLS", signature = c(x = "SImageSet", y = "matrix"),
function(x, y, ncomp = 3,
method = "nipals",
center = TRUE,
scale = FALSE,
iter.max = 100, ...)
{
method <- match.arg(method)
ncomps <- sort(ncomp)
if ( max(ncomps) > nrow(x) )
.stop("PLS: Can't fit more components than extent of dataset")
nas <- apply(y, 1, function(yi) any(is.na(yi)))
newx <- x
newy <- y
if ( any(nas) ) {
.message("PLS: Removing missing observations.")
x <- x[,!nas]
y <- y[!nas,]
}
.time.start()
.message("PLS: Centering data.")
Xt <- t(as.matrix(iData(x)))
Xt <- scale(Xt, center=center, scale=scale)
Y <- scale(y, center=center, scale=scale)
if ( center ) {
center <- attr(Xt, "scaled:center")
Ycenter <- attr(Y, "scaled:center")
} else {
Ycenter <- FALSE
}
if ( scale ) {
scale <- attr(Xt, "scaled:scale")
Yscale <- attr(Y, "scaled:scale")
} else {
Yscale <- FALSE
scale <- rep(1, ncol(Xt))
names(scale) <- colnames(Xt)
Yscale <- rep(1, ncol(Y))
names(Yscale) <- colnames(Y)
}
.message("PLS: Fitting partial least squares components.")
fit <- .PLS.fit(Xt, Y, ncomp=max(ncomps), method=method, iter.max=iter.max)
result <- lapply(ncomps, function(ncomp) {
append(fit, list(y=newy, ncomp=ncomp,
method=method, center=center, scale=scale,
Ycenter=Ycenter, Yscale=Yscale))
})
model <- AnnotatedDataFrame(
data=data.frame(ncomp=sapply(result, function(fit) fit$ncomp)),
varMetadata=data.frame(
labelDescription=c(ncomp="Number of PLS Components")))
featureNames(model) <- .format.data.labels(pData(model))
names(result) <- .format.data.labels(pData(model))
object <- new("PLS",
pixelData=x@pixelData,
featureData=x@featureData,
experimentData=x@experimentData,
protocolData=x@protocolData,
resultData=result,
modelData=model)
.time.stop()
predict(object, newx=newx, newy=newy)
})
setMethod("PLS", signature = c(x = "SImageSet", y = "ANY"),
function(x, y, ...)
{
if ( is.numeric(y) ) {
PLS(x, as.matrix(y), ...)
} else {
y <- as.factor(y)
newy <- sapply(levels(y),
function(Ck) as.integer(y == Ck))
attr(newy, "PLS:y") <- y
PLS(x, newy, ...)
}
})
setMethod("predict", "PLS",
function(object, newx, newy, ...)
{
if ( !is(newx, "iSet") )
.stop("'newx' must inherit from 'iSet'")
.time.start()
Xt <- t(as.matrix(iData(newx)))
Xt <- scale(Xt, center=object$center[[1]], scale=object$scale[[1]])
Y <- object$y[[1]]
if ( missing(newy) ) {
missing.newy <- TRUE
} else {
missing.newy <- FALSE
}
result <- lapply(object@resultData, function(res) {
.message("PLS: Predicting for ncomp = ", res$ncomp, ".")
pred <- .PLS.predict(Xt, Y, ncomp=res$ncomp,
loadings=res$loadings, weights=res$weights,
Yweights=res$Yweights)
if ( is.logical(res$Ycenter) && !Ycenter ) {
Ycenter <- 0
} else {
Ycenter <- res$Ycenter
}
if ( is.logical(res$Yscale) && !Yscale ) {
Yscale <- 1
} else {
Yscale <- res$Yscale
}
pred$fitted <- t(Yscale * t(pred$fitted) + Ycenter)
if ( is.factor(object$y) || !missing.newy ) {
pred$classes <- factor(apply(pred$fitted, 1, which.max))
if ( !is.null(attr(newy, "PLS:y")) ) {
newy <- attr(newy, "PLS:y")
levels(pred$classes) <- levels(newy)
} else {
levels(pred$classes) <- levels(object$y[[1]])
}
}
res[names(pred)] <- pred
if ( !missing.newy )
res$y <- newy
res
})
.message("PLS: Done.")
.time.stop()
new("PLS",
pixelData=newx@pixelData,
featureData=newx@featureData,
experimentData=newx@experimentData,
protocolData=newx@protocolData,
resultData=result,
modelData=object@modelData)
})
### implement methods for OPLS ####
setMethod("OPLS", signature = c(x = "SImageSet", y = "matrix"),
function(x, y, ncomp = 3,
method = "nipals",
center = TRUE,
scale = FALSE,
keep.Xnew = TRUE,
iter.max = 100, ...)
{
method <- match.arg(method)
ncomps <- sort(ncomp)
if ( max(ncomps) > nrow(x) )
.stop("OPLS: Can't fit more components than extent of dataset")
nas <- apply(y, 1, function(yi) any(is.na(yi)))
newx <- x
newy <- y
if ( any(nas) ) {
.message("OPLS: Removing missing observations.")
x <- x[,!nas]
y <- y[!nas,]
}
.time.start()
.message("OPLS: Centering data.")
Xt <- t(as.matrix(iData(x)))
Xt <- scale(Xt, center=center, scale=scale)
Y <- scale(y, center=center, scale=scale)
if ( center ) {
center <- attr(Xt, "scaled:center")
Ycenter <- attr(Y, "scaled:center")
} else {
Ycenter <- FALSE
}
if ( scale ) {
scale <- attr(Xt, "scaled:scale")
Yscale <- attr(Y, "scaled:scale")
} else {
Yscale <- FALSE
scale <- rep(1, ncol(Xt))
names(scale) <- colnames(Xt)
Yscale <- rep(1, ncol(Y))
names(Yscale) <- colnames(Y)
}
.message("OPLS: Fitting orthogonal partial least squares components.")
fit <- .OPLS.fit(Xt, Y, ncomp=max(ncomps), method=method, iter.max=iter.max)
result <- lapply(ncomps, function(ncomp) {
append(fit, list(y=newy, ncomp=ncomp,
method=method, center=center, scale=scale,
Ycenter=Ycenter, Yscale=Yscale))
})
model <- AnnotatedDataFrame(
data=data.frame(ncomp=sapply(result, function(fit) fit$ncomp)),
varMetadata=data.frame(
labelDescription=c(ncomp="Number of PLS Components")))
featureNames(model) <- .format.data.labels(pData(model))
names(result) <- .format.data.labels(pData(model))
object <- new("OPLS",
pixelData=x@pixelData,
featureData=x@featureData,
experimentData=x@experimentData,
protocolData=x@protocolData,
resultData=result,
modelData=model)
.time.stop()
predict(object, newx=newx, newy=newy, keep.Xnew=keep.Xnew, ...)
})
setMethod("OPLS", signature = c(x = "SImageSet", y = "ANY"),
function(x, y, ...)
{
if ( is.numeric(y) ) {
OPLS(x, as.matrix(y), ...)
} else {
y <- as.factor(y)
newy <- sapply(levels(y),
function(Ck) as.integer(y == Ck))
attr(newy, "OPLS:y") <- y
OPLS(x, newy, ...)
}
})
setMethod("predict", "OPLS",
function(object, newx, newy, keep.Xnew = TRUE, ...)
{
if ( !is(newx, "iSet") )
.stop("'newx' must inherit from 'iSet'")
.time.start()
Xt <- t(as.matrix(iData(newx)))
Xt <- scale(Xt, center=object$center[[1]], scale=object$scale[[1]])
Y <- object$y[[1]]
if ( missing(newy) ) {
missing.newy <- TRUE
} else {
missing.newy <- FALSE
}
result <- lapply(object@resultData, function(res) {
.message("OPLS: Predicting for ncomp = ", res$ncomp, ".")
pred <- .OPLS.predict(Xt, Y, ncomp=res$ncomp, method=res$method,
loadings=res$loadings, Oloadings=res$Oloadings,
weights=res$weights, Oweights=res$Oweights,
Yweights=res$Yweights)
if ( is.logical(res$Ycenter) && !Ycenter ) {
Ycenter <- 0
} else {
Ycenter <- res$Ycenter
}
if ( is.logical(res$Yscale) && !Yscale ) {
Yscale <- 1
} else {
Yscale <- res$Yscale
}
pred$fitted <- t(Yscale * t(pred$fitted) + Ycenter)
if ( is.factor(object$y) || !missing.newy ) {
pred$classes <- factor(apply(pred$fitted, 1, which.max))
if ( !is.null(attr(newy, "OPLS:y")) ) {
newy <- attr(newy, "OPLS:y")
levels(pred$classes) <- levels(newy)
} else {
levels(pred$classes) <- levels(object$y[[1]])
}
}
res[names(pred)] <- pred
if ( !keep.Xnew ) {
res$Xnew <- NULL
res$Xortho <- NULL
}
if ( !missing.newy )
res$y <- newy
res
})
.message("OPLS: Done.")
.time.stop()
new("OPLS",
pixelData=newx@pixelData,
featureData=newx@featureData,
experimentData=newx@experimentData,
protocolData=newx@protocolData,
resultData=result,
modelData=object@modelData)
})
.PLS.fit <- function(X, Y, ncomp, method, iter.max) {
if ( method == "nipals" ) {
nipals.PLS(X, Y, ncomp=ncomp, iter.max=iter.max)
} else {
stop("PLS method ", method, " not found")
}
}
.PLS.predict <- function(X, Y, ncomp, loadings, weights, Yweights) {
loadings.i <- loadings[,1:ncomp,drop=FALSE]
weights.i <- weights[,1:ncomp,drop=FALSE]
Yweights.i <- Yweights[,1:ncomp,drop=FALSE]
projection <- weights.i %*% solve(crossprod(loadings.i, weights.i))
coefficients <- tcrossprod(projection, Yweights.i)
scores <- X %*% projection
fitted <- X %*% coefficients
if ( is.factor(Y) ) {
rownames(Yweights) <- levels(Y)
colnames(coefficients) <- levels(Y)
colnames(fitted) <- levels(Y)
} else {
rownames(Yweights) <- colnames(Y)
colnames(coefficients) <- colnames(Y)
colnames(fitted) <- colnames(Y)
}
list(scores=scores, fitted=fitted,
loadings=loadings[,1:ncomp,drop=FALSE],
weights=weights[,1:ncomp,drop=FALSE],
Yweights=Yweights[,1:ncomp,drop=FALSE],
projection=projection, coefficients=coefficients)
}
.OPLS.fit <- function(X, Y, ncomp, method, iter.max) {
if ( method == "nipals" ) {
fit <- nipals.OPLS(X, Y, ncomp=ncomp, iter.max=iter.max)
} else {
stop("OPLS method ", method, " not found")
}
fit <- nipals.OPLS(X, Y, ncomp=ncomp, iter.max=iter.max)
Oloadings <- fit$Oloadings[,1:ncomp,drop=FALSE]
Oweights <- fit$Oweights[,1:ncomp,drop=FALSE]
Oscores <- X %*% Oweights
Xortho <- tcrossprod(Oscores, Oloadings)
Xnew <- X - Xortho
nas <- apply(Y, 1, anyNA)
fit1 <- nipals.PLS(Xnew, Y, ncomp=1, iter.max=iter.max)
fit$loadings <- fit1$loadings
fit$weights <- fit1$weights
fit$Yweights <- fit1$Yweights
fit
}
.OPLS.predict <- function(X, Y, ncomp, method, loadings, Oloadings,
weights, Oweights, Yweights, iter.max)
{
Oloadings <- Oloadings[,1:ncomp,drop=FALSE]
Oweights <- Oweights[,1:ncomp,drop=FALSE]
Oscores <- X %*% Oweights
Xortho <- tcrossprod(Oscores, Oloadings)
Xnew <- X - Xortho
pred <- .PLS.predict(Xnew, Y, ncomp=1, loadings=loadings,
weights=weights, Yweights=Yweights)
append(pred, list(Xnew=Xnew, Xortho=Xortho, Oscores=Oscores,
Oloadings=Oloadings, Oweights=Oweights))
}
|
/R/stats-PLS.R
|
no_license
|
EmilySekera/Cardinal
|
R
| false
| false
| 9,904
|
r
|
### implement methods for PLS ####
setMethod("PLS", signature = c(x = "SImageSet", y = "matrix"),
function(x, y, ncomp = 3,
method = "nipals",
center = TRUE,
scale = FALSE,
iter.max = 100, ...)
{
method <- match.arg(method)
ncomps <- sort(ncomp)
if ( max(ncomps) > nrow(x) )
.stop("PLS: Can't fit more components than extent of dataset")
nas <- apply(y, 1, function(yi) any(is.na(yi)))
newx <- x
newy <- y
if ( any(nas) ) {
.message("PLS: Removing missing observations.")
x <- x[,!nas]
y <- y[!nas,]
}
.time.start()
.message("PLS: Centering data.")
Xt <- t(as.matrix(iData(x)))
Xt <- scale(Xt, center=center, scale=scale)
Y <- scale(y, center=center, scale=scale)
if ( center ) {
center <- attr(Xt, "scaled:center")
Ycenter <- attr(Y, "scaled:center")
} else {
Ycenter <- FALSE
}
if ( scale ) {
scale <- attr(Xt, "scaled:scale")
Yscale <- attr(Y, "scaled:scale")
} else {
Yscale <- FALSE
scale <- rep(1, ncol(Xt))
names(scale) <- colnames(Xt)
Yscale <- rep(1, ncol(Y))
names(Yscale) <- colnames(Y)
}
.message("PLS: Fitting partial least squares components.")
fit <- .PLS.fit(Xt, Y, ncomp=max(ncomps), method=method, iter.max=iter.max)
result <- lapply(ncomps, function(ncomp) {
append(fit, list(y=newy, ncomp=ncomp,
method=method, center=center, scale=scale,
Ycenter=Ycenter, Yscale=Yscale))
})
model <- AnnotatedDataFrame(
data=data.frame(ncomp=sapply(result, function(fit) fit$ncomp)),
varMetadata=data.frame(
labelDescription=c(ncomp="Number of PLS Components")))
featureNames(model) <- .format.data.labels(pData(model))
names(result) <- .format.data.labels(pData(model))
object <- new("PLS",
pixelData=x@pixelData,
featureData=x@featureData,
experimentData=x@experimentData,
protocolData=x@protocolData,
resultData=result,
modelData=model)
.time.stop()
predict(object, newx=newx, newy=newy)
})
setMethod("PLS", signature = c(x = "SImageSet", y = "ANY"),
function(x, y, ...)
{
if ( is.numeric(y) ) {
PLS(x, as.matrix(y), ...)
} else {
y <- as.factor(y)
newy <- sapply(levels(y),
function(Ck) as.integer(y == Ck))
attr(newy, "PLS:y") <- y
PLS(x, newy, ...)
}
})
setMethod("predict", "PLS",
function(object, newx, newy, ...)
{
if ( !is(newx, "iSet") )
.stop("'newx' must inherit from 'iSet'")
.time.start()
Xt <- t(as.matrix(iData(newx)))
Xt <- scale(Xt, center=object$center[[1]], scale=object$scale[[1]])
Y <- object$y[[1]]
if ( missing(newy) ) {
missing.newy <- TRUE
} else {
missing.newy <- FALSE
}
result <- lapply(object@resultData, function(res) {
.message("PLS: Predicting for ncomp = ", res$ncomp, ".")
pred <- .PLS.predict(Xt, Y, ncomp=res$ncomp,
loadings=res$loadings, weights=res$weights,
Yweights=res$Yweights)
if ( is.logical(res$Ycenter) && !Ycenter ) {
Ycenter <- 0
} else {
Ycenter <- res$Ycenter
}
if ( is.logical(res$Yscale) && !Yscale ) {
Yscale <- 1
} else {
Yscale <- res$Yscale
}
pred$fitted <- t(Yscale * t(pred$fitted) + Ycenter)
if ( is.factor(object$y) || !missing.newy ) {
pred$classes <- factor(apply(pred$fitted, 1, which.max))
if ( !is.null(attr(newy, "PLS:y")) ) {
newy <- attr(newy, "PLS:y")
levels(pred$classes) <- levels(newy)
} else {
levels(pred$classes) <- levels(object$y[[1]])
}
}
res[names(pred)] <- pred
if ( !missing.newy )
res$y <- newy
res
})
.message("PLS: Done.")
.time.stop()
new("PLS",
pixelData=newx@pixelData,
featureData=newx@featureData,
experimentData=newx@experimentData,
protocolData=newx@protocolData,
resultData=result,
modelData=object@modelData)
})
### implement methods for OPLS ####
setMethod("OPLS", signature = c(x = "SImageSet", y = "matrix"),
function(x, y, ncomp = 3,
method = "nipals",
center = TRUE,
scale = FALSE,
keep.Xnew = TRUE,
iter.max = 100, ...)
{
method <- match.arg(method)
ncomps <- sort(ncomp)
if ( max(ncomps) > nrow(x) )
.stop("OPLS: Can't fit more components than extent of dataset")
nas <- apply(y, 1, function(yi) any(is.na(yi)))
newx <- x
newy <- y
if ( any(nas) ) {
.message("OPLS: Removing missing observations.")
x <- x[,!nas]
y <- y[!nas,]
}
.time.start()
.message("OPLS: Centering data.")
Xt <- t(as.matrix(iData(x)))
Xt <- scale(Xt, center=center, scale=scale)
Y <- scale(y, center=center, scale=scale)
if ( center ) {
center <- attr(Xt, "scaled:center")
Ycenter <- attr(Y, "scaled:center")
} else {
Ycenter <- FALSE
}
if ( scale ) {
scale <- attr(Xt, "scaled:scale")
Yscale <- attr(Y, "scaled:scale")
} else {
Yscale <- FALSE
scale <- rep(1, ncol(Xt))
names(scale) <- colnames(Xt)
Yscale <- rep(1, ncol(Y))
names(Yscale) <- colnames(Y)
}
.message("OPLS: Fitting orthogonal partial least squares components.")
fit <- .OPLS.fit(Xt, Y, ncomp=max(ncomps), method=method, iter.max=iter.max)
result <- lapply(ncomps, function(ncomp) {
append(fit, list(y=newy, ncomp=ncomp,
method=method, center=center, scale=scale,
Ycenter=Ycenter, Yscale=Yscale))
})
model <- AnnotatedDataFrame(
data=data.frame(ncomp=sapply(result, function(fit) fit$ncomp)),
varMetadata=data.frame(
labelDescription=c(ncomp="Number of PLS Components")))
featureNames(model) <- .format.data.labels(pData(model))
names(result) <- .format.data.labels(pData(model))
object <- new("OPLS",
pixelData=x@pixelData,
featureData=x@featureData,
experimentData=x@experimentData,
protocolData=x@protocolData,
resultData=result,
modelData=model)
.time.stop()
predict(object, newx=newx, newy=newy, keep.Xnew=keep.Xnew, ...)
})
setMethod("OPLS", signature = c(x = "SImageSet", y = "ANY"),
function(x, y, ...)
{
if ( is.numeric(y) ) {
OPLS(x, as.matrix(y), ...)
} else {
y <- as.factor(y)
newy <- sapply(levels(y),
function(Ck) as.integer(y == Ck))
attr(newy, "OPLS:y") <- y
OPLS(x, newy, ...)
}
})
setMethod("predict", "OPLS",
function(object, newx, newy, keep.Xnew = TRUE, ...)
{
if ( !is(newx, "iSet") )
.stop("'newx' must inherit from 'iSet'")
.time.start()
Xt <- t(as.matrix(iData(newx)))
Xt <- scale(Xt, center=object$center[[1]], scale=object$scale[[1]])
Y <- object$y[[1]]
if ( missing(newy) ) {
missing.newy <- TRUE
} else {
missing.newy <- FALSE
}
result <- lapply(object@resultData, function(res) {
.message("OPLS: Predicting for ncomp = ", res$ncomp, ".")
pred <- .OPLS.predict(Xt, Y, ncomp=res$ncomp, method=res$method,
loadings=res$loadings, Oloadings=res$Oloadings,
weights=res$weights, Oweights=res$Oweights,
Yweights=res$Yweights)
if ( is.logical(res$Ycenter) && !Ycenter ) {
Ycenter <- 0
} else {
Ycenter <- res$Ycenter
}
if ( is.logical(res$Yscale) && !Yscale ) {
Yscale <- 1
} else {
Yscale <- res$Yscale
}
pred$fitted <- t(Yscale * t(pred$fitted) + Ycenter)
if ( is.factor(object$y) || !missing.newy ) {
pred$classes <- factor(apply(pred$fitted, 1, which.max))
if ( !is.null(attr(newy, "OPLS:y")) ) {
newy <- attr(newy, "OPLS:y")
levels(pred$classes) <- levels(newy)
} else {
levels(pred$classes) <- levels(object$y[[1]])
}
}
res[names(pred)] <- pred
if ( !keep.Xnew ) {
res$Xnew <- NULL
res$Xortho <- NULL
}
if ( !missing.newy )
res$y <- newy
res
})
.message("OPLS: Done.")
.time.stop()
new("OPLS",
pixelData=newx@pixelData,
featureData=newx@featureData,
experimentData=newx@experimentData,
protocolData=newx@protocolData,
resultData=result,
modelData=object@modelData)
})
.PLS.fit <- function(X, Y, ncomp, method, iter.max) {
if ( method == "nipals" ) {
nipals.PLS(X, Y, ncomp=ncomp, iter.max=iter.max)
} else {
stop("PLS method ", method, " not found")
}
}
.PLS.predict <- function(X, Y, ncomp, loadings, weights, Yweights) {
loadings.i <- loadings[,1:ncomp,drop=FALSE]
weights.i <- weights[,1:ncomp,drop=FALSE]
Yweights.i <- Yweights[,1:ncomp,drop=FALSE]
projection <- weights.i %*% solve(crossprod(loadings.i, weights.i))
coefficients <- tcrossprod(projection, Yweights.i)
scores <- X %*% projection
fitted <- X %*% coefficients
if ( is.factor(Y) ) {
rownames(Yweights) <- levels(Y)
colnames(coefficients) <- levels(Y)
colnames(fitted) <- levels(Y)
} else {
rownames(Yweights) <- colnames(Y)
colnames(coefficients) <- colnames(Y)
colnames(fitted) <- colnames(Y)
}
list(scores=scores, fitted=fitted,
loadings=loadings[,1:ncomp,drop=FALSE],
weights=weights[,1:ncomp,drop=FALSE],
Yweights=Yweights[,1:ncomp,drop=FALSE],
projection=projection, coefficients=coefficients)
}
.OPLS.fit <- function(X, Y, ncomp, method, iter.max) {
if ( method == "nipals" ) {
fit <- nipals.OPLS(X, Y, ncomp=ncomp, iter.max=iter.max)
} else {
stop("OPLS method ", method, " not found")
}
fit <- nipals.OPLS(X, Y, ncomp=ncomp, iter.max=iter.max)
Oloadings <- fit$Oloadings[,1:ncomp,drop=FALSE]
Oweights <- fit$Oweights[,1:ncomp,drop=FALSE]
Oscores <- X %*% Oweights
Xortho <- tcrossprod(Oscores, Oloadings)
Xnew <- X - Xortho
nas <- apply(Y, 1, anyNA)
fit1 <- nipals.PLS(Xnew, Y, ncomp=1, iter.max=iter.max)
fit$loadings <- fit1$loadings
fit$weights <- fit1$weights
fit$Yweights <- fit1$Yweights
fit
}
.OPLS.predict <- function(X, Y, ncomp, method, loadings, Oloadings,
weights, Oweights, Yweights, iter.max)
{
Oloadings <- Oloadings[,1:ncomp,drop=FALSE]
Oweights <- Oweights[,1:ncomp,drop=FALSE]
Oscores <- X %*% Oweights
Xortho <- tcrossprod(Oscores, Oloadings)
Xnew <- X - Xortho
pred <- .PLS.predict(Xnew, Y, ncomp=1, loadings=loadings,
weights=weights, Yweights=Yweights)
append(pred, list(Xnew=Xnew, Xortho=Xortho, Oscores=Oscores,
Oloadings=Oloadings, Oweights=Oweights))
}
|
#' A function to load a module function from url or disk.
#'
#' Loads a module function into the global environment ready to be used in a
#' zoon workflow. This function is mostly for use while developing modules.
#' Workflows run with modules defined locally are no longer reproducible and
#' so are discouraged and will be tagged as 'unreproducible'.
#'
#' @param module A string that describes the location of the R file. Can be a
#' a full URL or a path to a local file.
#'
#' @return Name of the function. Adds function to global namespace.
#' @name LoadModule
#' @export
LoadModule <- function(module) {
# module must be a string
# URL to module in user's favourite repo & branch
zoonURL <- sprintf(
"%s/%s/R/%s.R",
options("zoonRepo"),
options("zoonRepoBranch"),
module
)
# If module is a path, load module
if (file.exists(module)) {
txt <- parse(text = paste(readLines(module), collapse = "\n"))
# If zoonURL is a zoon repo url, load module Could probably do same thing as
# GetModule here to avoid repeated web call
} else if (url.exists(zoonURL, .opts = list(ssl.verifypeer = FALSE))) {
txt <- parse(text = getURL(zoonURL, ssl.verifypeer = FALSE))
# If module on its own is a url, load module
} else if (url.exists(module, .opts = list(ssl.verifypeer = FALSE))) {
txt <- parse(text = getURL(module, ssl.verifypeer = FALSE))
# Otherwise throw error.
} else {
modList <- GetModuleList()
module_idx <- agrep(module, unlist(modList), max.distance = 0.3)
closeMatches <- unlist(modList)[module_idx]
if (length(closeMatches) == 0) {
stop ("Can't find '", module,
"' or any modules with closely matching names.")
} else if (length(closeMatches) == 1) {
stop ("Can't find '", module,
"'. Did you mean '", closeMatches, "'?")
} else {
stop ("Can't find '", module, "'. Did you mean one of '",
paste(closeMatches, collapse = "', "), "'?")
}
}
# Load to global environment
eval(txt, envir = globalenv())
# Return the actual name of the module that has been loaded.
# Don't just return 'module' argument as that can be url/path
# which isn't useful.
# Cruddy code. But module name is the only other object in this
# call environment.
eval(txt)
new.func.name <- ls()[!ls() %in% c("module", "txt", "zoonURL")]
return(new.func.name)
}
# A function to get a module function.
#
# Checks for the module in the global namespace, then the zoon repo. Then reads
# the module source and loads the function.
#
# @param module A string that describes the location of the R file. Can be a
# module name assuming the module is in global namespace or
# github.com/zoonproject/modules. Otherwise can be a full URL or a local
# file.
# @param forceReproducible Do we want to force the function to get modules from
# the zoon repo, even if they exist in the global namespace.
#
# @return Name of the function. Function is run with workflow and new module
# function is added to workflow environment.
# @name GetModule
GetModule <- function(module, forceReproducible, environment = parent.frame()) {
# URL to module in user's favourite repo & branch
zoonURL <- sprintf(
"%s/%s/R/%s.R",
options("zoonRepo"),
options("zoonRepoBranch"),
module
)
# If the module is in global namespace, use that function
# unless forceReproduce is TRUE, in which case we want to get from repo.
#
# Get module from zoonURL otherwise.
module_exists <- exists(module,
where = ".GlobalEnv",
mode = "function",
inherits = FALSE)
if (module_exists & !forceReproducible) {
assign(module,
eval(parse(text = module), envir = globalenv()),
envir = environment)
attr(module, "version") <- "local copy"
return(module)
} else {
rawText <- getURL(zoonURL, ssl.verifypeer = FALSE)
}
# getURL returns "Not Found" if no webpage found.
# Use this to avoid two web call.s
if (grepl("^404: Not Found", rawText)) {
stop('Cannot find "', module,
'". Check that the module is on the zoon repository ',
'or in the global namespace.')
}
# Parse text from webpage.
txt <- parse(text = rawText)
# Evaluate text in the workflow call environment
eval(txt, envir = environment)
# Assign version attribute
attr(module, "version") <- GetModuleVersion(rawText)
return(module)
}
# A function to apply GetModule to a list correctly.
# @param modules A list from CheckModList() given details for
# one or more modules.
# @param forceReproducible Logical to determine whether the modules should be
# taken from repo even if they exist locally to enforce reproducibility.
# @name LapplyGetModule
LapplyGetModule <- function (modules,
forceReproducible,
environment = parent.frame()) {
lapply(modules,
function(x) {
GotModule <- GetModule(as.character(x$module),
forceReproducible,
environment)
res <- c(x,
func = GotModule,
version = attr(GotModule, "version"))
return (res)
})
}
#' RunModels
#'
#' A function to train and predict crossvalidation folds and train one full
#' model and predict any external validation data. This function is primarily
#' used internally but can be used when running workflows interactively (see
#' vignette \code{basic zoon useage})
#'
#' @param df Dataframe from process module. Should contain columns value, type,
#' lon, lat and fold, a number indicating which cross validation set a
#' datapoint is in. If fold is 0 then this is considered external validation
#' data If all data is 0 or 1, then no cross validation is run.
#'
#' @param modelFunction String giving the name of the model function which is in
#' turn the name of the module.
#'
#' @param paras All other parameters that should be passed to the model
#' function. i.e. model[[1]]$paras
#'
#' @param workEnv The environment name of the workflow call environment.
#'
#' @return A list of length 2 containing the model trained on all data and a
#' data.frame which contains value, type, fold, lon, lat, predictions and then
#' all environmental variables.
#' @export
#' @name RunModels
RunModels <- function(df, modelFunction, paras, workEnv) {
# Count non zero folds
# 0 are for external validation only.
k <- length(unique(df$fold)[unique(df$fold) != 0])
# Doing predictions requires handling of NAs in subsets
# of the data (subsets = folds). This breaks if there
# is already a na.action attribute on df, so we remove it
if ("na.action" %in% names(attributes(df)))
attributes(df) <- attributes(df)[!names(attributes(df)) %in% "na.action"]
# Init. output dataframe with predictions column
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
dfOut <- cbindZoon(
subsetColumnsZoon(df, !colnames(df) %in% attr(df, "covCols")),
cbind(predictions = NA, df[colnames(df) %in% attr(df, "covCols")])
)
} else {
dfOut <- cbind(df[, 1:5], predictions = NA, df[, 6:NCOL(df)])
names(dfOut)[7:ncol(dfOut)] <- names(df)[6:ncol(df)]
}
# We don't know that they want cross validation.
# If they do, k>1, then run model k times and predict out of bag
# Skip otherwise
if (k > 1) {
for (i in 1:k) {
modelFold <- do.call(
modelFunction, c(
.df = list(df[df$fold != i, ]),
paras
),
envir = workEnv
)
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
pred <- ZoonPredict(
modelFold,
newdata = subsetColumnsZoon(
df[df$fold == i, ],
attr(df, "covCols")
)
)
} else {
pred <- ZoonPredict(
modelFold,
newdata = df[df$fold == i, 6:NCOL(df), drop = FALSE]
)
}
dfOut$predictions[df$fold == i] <- pred
}
}
# Run model on all data except external validation data
m <- do.call(
modelFunction, c(.df = list(df[df$fold != 0, ]), paras),
envir = workEnv
)
# If external validation dataset exists, predict that;.
if (0 %in% df$fold) {
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
pred <- ZoonPredict(
m,
newdata = subsetColumnsZoon(
df[df$fold == 0, ],
attr(df, "covCols")
)
)
} else {
pred <- ZoonPredict(
m,
newdata = df[df$fold == 0, 6:NCOL(df), drop = FALSE]
)
}
dfOut$predictions[df$fold == 0] <- pred
}
# Return list of crossvalid and external validation predictions
# This list is then the one list that has everything in it.
out <- list(model = m, data = dfOut)
return(out)
}
# CheckModList
#
# Helper to sort module arguments into lists with common structure.
# Want any input to end up as
# list(module = moduleName, paras = list(paraName1 = para, paraName2 = 2)).
# See tests/testthat/testZoon.R for list of potential input.
# @param x A 'call' or 'character' from substitute(occurrence) etc. So either
# quoted which yields a character substitute('module1') or unquotes which
# yields a call substitute(module1).
# @name CheckModList
# Check passing as quoted. e.g. occurrence = "ModuleName(k=2)"
# Also occurrence = "list(mod1, mod2)" is probably bad.
CheckModList <- function(x) {
# Should accept occurrence = 'module1', but NOT
# occurrence = 'module1(k=2)', or occurrence = 'list(mod1, mod1)'
if (inherits(x, "character")) {
if (grepl("[^\' | ^\"]", x) & grepl("[( | )]", x)) {
stop(paste(
"If specifying module arguments please use the form",
"Module(para = 2), without quotes. No special characters should exist",
"in module names."
))
}
}
# If argument is passed as unquoted moduleName: occurrence = ModuleName,
if (class(x) == "name") {
ModuleList <- list(list(module = as.character(x), paras = list()))
# If list of modules given: occurrence = list(Mod1, Mod2),
# If list(Mod1(k=2), Mod2(p = 3)), parameters sorted in
# FormatModuleList
} else if (x[[1]] == "list") {
listCall <- as.list(x)
listCall[[1]] <- NULL
ModuleList <- lapply(listCall, FormatModuleList)
# If Chained modules given: occurrence = Chain(Mod1, Mod2),
} else if (x[[1]] == "Chain") {
listCall <- as.list(x)
listCall[[1]] <- NULL
ModuleList <- lapply(listCall, FormatModuleList)
attr(ModuleList, "chain") <- TRUE
# If unquoted module w/ paras given: occurrence = Module1(k=2)
} else if (x[[1]] == "Replicate") {
listCall <- eval(x)
ModuleList <- lapply(listCall, FormatModuleList)
# If unquoted module w/ paras given: occurrence = Module1(k=2)
} else if (identical(class(x[[1]]), "name")) {
# Parameters
paras <- as.list(x)
paras[[1]] <- NULL
ModuleList <- list(list(module = as.character(x[[1]]), paras = paras))
# Deal with all quoted forms
# Can include 'module1', 'module(para = 2)', 'module(p = 2, q = 'wer')'
} else if (inherits(x, "character")) {
ModuleList <- list(SplitArgs(x))
} else {
stop(paste("Please check the format of argument", as.character(x)))
}
return(ModuleList)
}
# Split a string into a module and it's arguments
#
# A function that takes a string (from workflow$call) and splits it into a
# module name and it's arguments.
#
# @param string A string of the form "moduleName" or
# "moduleName(parameter = 2, parameter2 = 3)"
#
# @name SplitArgs
SplitArgs <- function(string) {
module <- gsub("(^)(.*)(\\(.*$)", "\\2", string)
if (grepl("\\(", string)) {
args <- gsub("(^.*\\()(.*)(\\)$)", "\\2", string)
} else {
args <- ""
}
sepArgs <- (strsplit(args, ","))[[1]]
arguments <- lapply(
strsplit(sepArgs, "="),
function(x) gsub(" ", "", x[2])
)
names(arguments) <- unlist(lapply(
strsplit(sepArgs, "="),
function(x) gsub(" ", "", x[1])
))
return(list(module = module, paras = arguments))
}
# FormatModuleList
#
# Little helper to format module lists. Want to return a list
# newList$module is the module name and newList$paras is a list
# of the parameters given for the module.
# @param x An object of class 'name' or 'call' e.g. module1 or module1(k=2).
# @name FormatModuleList
FormatModuleList <- function(x) {
# Turn 'call' or 'name' into list.
listx <- as.list(x)
# Empty list to populate.
newList <- list()
newList$module <- listx[[1]]
# Remove list element which contains modules name. Remaining is parameters.
listx[[1]] <- NULL
newList$paras <- listx
return(newList)
}
#' ExtractAndCombData
#'
#' Simply extract covariates from rasters and combine with occurrence data.
#' This function is primarily used internally but can be used when
#' running workflows interactively (see vignette \code{basic zoon useage})
#'
#' @param occurrence A data frame from an occurrence module
#' @param ras Environmental raster layer, stack or brick.
#' @export
#' @name ExtractAndCombData
ExtractAndCombData <- function(occurrence, ras) {
# Check for rows in the occurrence data that have missing data
NArows <- apply(
occurrence[, c("longitude", "latitude")],
1,
function(x) any(is.na(x))
)
if (any(NArows)) {
warning (sum(NArows), " row(s) of occurrence data have NA values ",
"for latitude/longitude and will be removed")
occurrence <- occurrence[!NArows, ]
}
if (is.na(projection(ras))) {
message("Covariate raster does not have a projection, zoon will assume ",
"this is in the same projection as your occurrence data")
} else if ("crs" %in% tolower(colnames(occurrence))) {
occurrence <- TransformCRS(
occurrence = occurrence,
ras_projection = projection(ras)
)
}
# Check that all points are within the raster
bad.coords <- is.na(cellFromXY(
ras,
occurrence[, c("longitude", "latitude")]
))
if (any(bad.coords)) {
nr_before <- nrow(occurrence)
occurrence <- occurrence[!bad.coords, ]
nr_after <- nrow(occurrence)
if (nr_after > 0) {
warning (nr_before - nr_after, "occurrence points are outside ",
"the raster extent and have been removed before modelling ",
"leaving", nr_after, "occurrence points")
} else if (nr_after == 0) {
warning ("All occurrence points are outside the raster extent. ",
"Try changing your raster.")
}
}
# extract covariates from lat long values in df.
ras.values <- raster::extract(ras, occurrence[, c('longitude', 'latitude')])
if(length(ras.values) == 0){
occurrenceCovariates <- NULL
warning ("Locations in the occurrence data did not match your raster ",
"so no covariate data were extracted. ",
"This is only a good idea if you are creating simulated data ",
"in the process module")
} else {
if (any(is.na(ras.values))) {
warning(sum(is.na(ras.values)), "extracted covariate values are NA. ",
"This may cause issues for some models")
}
occurrenceCovariates <- as.matrix(ras.values)
colnames(occurrenceCovariates) <- names(ras)
}
df <- cbindZoon(occurrence, occurrenceCovariates)
# assign call_path attribute to this new object
attr(df, "call_path") <- attr(occurrence, "call_path")
# record the covariate column names
attr(df, "covCols") <- names(ras)
# Return as list of df and ras as required by process modules
return(list(df = df, ras = ras))
}
#' Chain modules together
#'
#' \code{Chain} combines multiple modules of the same module type such that they
#' are executed sequentially and their outputs combined. For example, process
#' modules may be \code{Chain}ed to carry out successive processing operations.
#' By contrast, \code{list}ing modules of the same type would split the workflow
#' into multiple parallel workflows, each using a different module at this step.
#'
#' Similarly for occurrence or covariate modules the datasets are joined (row-
#' or layer-wise) whereas \code{list} would carry out separate analyses. Model
#' and output modules may not be chained. Developers should note that this
#' function is not actually used - calls using \code{Chain} are parsed by
#' workflow, with behaviour similar to this function.
#'
#' @param ... List of modules to be chained.
#' @export
#' @name Chain
Chain <- function(...) {
ans <- list(...)
attr(ans, "chain") <- TRUE
return(ans)
}
# SortArgs
#
#
# Helper to take substituted args from workflow call and paste them into
# a runeable workflow function.
# @name SortArgs
SortArgs <- function (occSub,
covSub,
proSub,
modSub,
outSub,
forceReproducible) {
call <- paste0(
"workflow(",
"occurrence = ", occSub,
", covariate = ", covSub,
", process = ", proSub,
", model = ", modSub,
", output = ", outSub,
", forceReproducible = ", as.character(forceReproducible),
")"
)
}
# SplitCall
#
# Helper to split a character string workflow call, as inherited from
# zoonWorkflow into it's constituent arguments
# @param call A character string of a valid zoon workflow call.
# @name SplitCall
SplitCall <- function(call) {
# Regex to find each argument within call.
# Find 3 patterns and sub whole string with just middle pattern
# Middle pattern is the argument name.
occurrence <- gsub("(.*occurrence = )(.*)(, covariate.*$)", "\\2", call)
covariate <- gsub("(.*covariate = )(.*)(, process.*$)", "\\2", call)
process <- gsub("(.*process = )(.*)(, model.*$)", "\\2", call)
model <- gsub("(.*model = )(.*)(, output.*$)", "\\2", call)
output <- gsub("(.*output = )(.*)(, forceReproducible.*$)", "\\2", call)
forceReproducible <- gsub("(.*forceReproducible = )(.*)())", "\\2", call)
# Make vector and add names.
split <- c(occurrence, covariate, process, model, output, forceReproducible)
names(split) <- c(
"occurrence", "covariate", "process",
"model", "output", "forceReproducible"
)
return(split)
}
# ErrorModule
#
# Function used in tryCatch calls in workflow.
# If an error is caught, return some useful messages.
# Then stop().
# cond is the error messages passed by try catch.
# mod is the modules number (1:5) to give NULLS to the correct modules.
#
# @param e The workflow call environment
# @param cond The error message that was caught in tryCatch.
# @param mod Which module has failed? 1=occurrence, 2=covariate, 3=process
# 4=model, 5=output.
# @name ErrorModule
ErrorModule <- function(cond, mod, e) {
# Select the module type using numeric mod argument
module <- c(
"occurrence module.",
"covariate module.",
paste("ExtractAndCombData, a function that combines",
"occurrence and covariate data."),
"process module.",
"model module.",
"output module."
)[mod]
# Give useful messages.
# What were the errors that were caught by tryCatch.
message("Caught errors:\n", cond)
message()
# Where did workflow break and where is the progress stored?
x <- paste("Stopping workflow due to error in", module, "\n")
# Throw error. The call for this error is meaningless so don't print it.
stop(x, call. = FALSE)
}
# PasteAndDep
#
# Paste and deparse. Helper to format substituted args. If arguments were
# chained or listed then substitute gives a list. We want to paste it back
# together.
# @name PasteAndDep
PasteAndDep <- function(x) {
paste(deparse(x), collapse = " ")
}
# Writeable
#
# Check whether we can write to a given filepath, throw an error if not.
# Inspired by the is.writeable functionality in assertthat
# @name Writeable
Writeable <- function(dir) {
OK <- TRUE
if (!is.character(dir)) OK <- FALSE
if (!length(dir) == 1) OK <- FALSE
if (!file.exists(dir)) OK <- FALSE
if (!file.access(dir, mode = 2)[[1]] == 0) OK <- FALSE
if (!OK) stop("directory is not writeable ", dir)
}
#' Get MaxEnt
#'
#' Helper function to get the MaxEnt java executable and install it in
#' the right locations for zoon modules that use `dismo::maxent` and
#' `biomod2`.
#'
#' @details Since MaxEnt may not be distributed other than via the MaxEnt
#' website, users must download the file themselves and place it in the
#' right location for R packages to access it. This function helps with that.
#' Just run \code{GetMaxEnt()} and follow the prompts in the terminal.
#'
#' @export
#' @name GetMaxEnt
#' @importFrom utils browseURL
GetMaxEnt <- function() {
# Send the user to download the MaxEnt executable,
# then find and upload it
# define text
browser_txt <- paste("\nTo get MaxEnt working, you'll need to download the",
"executable file from the MaxEnt website. The website",
"will require you to give some details, once you've",
"done this please download the 'maxent.jar' file to",
"somewhere memorable (you'll have to find it again in",
"a second).\n\nPress return to launch the MaxEnt",
"website and continue.")
chooser_txt <- paste("\n\n\n\nzoon now needs to copy the 'maxent.jar' file",
"to the correct locations.\n\nPress return to locate",
"the 'maxent.jar' file you just downloaded")
# step one, download the file
message(browser_txt) # speak to user
invisible(readline()) # make them hit return
browseURL("http://www.cs.princeton.edu/~schapire/maxent/") # open the browser
# step two, choose the file
message(chooser_txt) # speak to user
invisible(readline()) # make them hit return
file <- file.choose()
# check it's maxent.jar
if (basename(file) != "maxent.jar") {
stop("the file selected was not 'maxent.jar'")
}
# copy to dismo's and biomod2's preferred locations
dismo_loc <- paste0(system.file(package = "dismo"), "/java/maxent.jar")
biomod2_loc <- "./maxent.jar"
dismo_success <- file.copy(file, dismo_loc, overwrite = TRUE)
biomod2_success <- file.copy(file, biomod2_loc, overwrite = TRUE)
# message, warn, or error depending on the level of success
if (dismo_success) {
if (biomod2_success) {
message("maxent.jar successfully deployed for MaxEnt ",
"and BiomodModel modules")
} else {
warning ("maxent.jar successfully deployed for MaxEnt module, ",
"but not for BiomodModel module")
}
} else if (biomod2_success) {
warning ("maxent.jar successfully deployed for BiomodModel module, ",
"but not for MaxEnt module")
} else {
stop ("maxent.jar not deployed for MaxEnt or BiomodModel modules")
}
}
# AddDefaultParas
#
# Adds the default parameters and their descriptions to the parameters list in
# BuildModule.
# @param paras The orginial named list of paramenter descriptions
# @param type The module type used to allocated the default arguements
AddDefaultParas <- function(paras, type) {
# Define default arguements
defArgs <- list(
occurrence = NULL, covariate = NULL, process = c(".data"),
model = c(".df"), output = c(".model", ".ras")
)
# Remove defaults if they exist, then add in the defaults.
paras <- paras[!names(paras) %in% defArgs[[type]]]
default_paras <- list(
occurrence = NULL,
covariate = NULL,
process = list(.data = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.data} is a list of a data frame and a raster object returned",
"from occurrence modules and covariate modules respectively.",
"\\code{.data} is passed automatically in workflow from the occurrence",
"and covariate modules to the process module(s) and should not be passed",
"by the user."
)),
model = list(.df = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.df} is data frame that combines the occurrence data and",
"covariate data. \\code{.df} is passed automatically in workflow from",
"the process module(s) to the model module(s) and should not be",
"passed by the user."
)),
output = list(
.model = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.model} is list of a data frame (\\code{data}) and a model",
"object (\\code{model}). \\code{.model} is passed automatically in",
"workflow, combining data from the model module(s) and process",
"module(s) to the output module(s) and should not be passed by",
"the user."
),
.ras = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.ras} is a raster layer, brick or stack object. \\code{.ras}",
"is passed automatically in workflow from the covariate module(s) to",
"the output module(s) and should not be passed by the user."
)
)
)
# Add these defaults to the front of the para list
return(c(default_paras[[type]], paras))
}
# StringToCall
#
# takes a string and converts it to a call. This is useful for taking the
# call from a workflow that has been run and re-running it.
# @param x The string
StringToCall <- function(x) {
parse(text = x[[1]])[[1]]
}
# GetModuleVersion
#
# Using the raw text returned from a GetURL call to github
# this extracts the version number
GetModuleVersion <- function(rawText) {
# Break down into lines
ModLines <- strsplit(rawText, "\n")[[1]]
VersionLine <- ModLines[grep("@section Version: ", ModLines)]
TagPosition <- gregexpr("@section Version: ", VersionLine)
Start <- TagPosition[[1]] + attr(TagPosition[[1]], "match.length")
substr(VersionLine, Start, nchar(VersionLine))
}
# tryCatchModule
#
# This function runs a call to a module in testing and handles
# error that may occur in a way to make debugging easier
tryCatchModule <- function(expr, code_chunk, fun, debug = TRUE) {
tryCatch(
expr = expr,
error = function(err, func = fun, debug_f = debug) {
error_message <- paste(
"\nYour module failed to run with default parameters\n",
"ERROR:", err,
"\nYou can debug this error by running the following code chunk",
"\n===========\n",
ifelse(test = debug_f,
yes = paste0("debugonce(", func, ")\n"),
no = ""),
paste(code_chunk, collapse = "\n"),
"\n==========="
)
class(error_message) <- "moduleError"
return(error_message)
}
)
}
# tryCatchWorkflow
#
# This function runs a workflow in testing and handles
# error that may occur in a way to make debugging easier
tryCatchWorkflow <- function(expr, placeholder, fun) {
code_temp <- paste0(trimws(capture.output(print(substitute(expr)))))
code_chunk <- gsub(placeholder, fun, trimws(gsub("[{}]", "", code_temp)))
tryCatchModule(
expr = expr, code_chunk = code_chunk,
fun = fun, debug = FALSE
)
}
|
/R/zoonHelpers.R
|
no_license
|
rdpalacio/zoon
|
R
| false
| false
| 27,497
|
r
|
#' A function to load a module function from url or disk.
#'
#' Loads a module function into the global environment ready to be used in a
#' zoon workflow. This function is mostly for use while developing modules.
#' Workflows run with modules defined locally are no longer reproducible and
#' so are discouraged and will be tagged as 'unreproducible'.
#'
#' @param module A string that describes the location of the R file. Can be a
#' a full URL or a path to a local file.
#'
#' @return Name of the function. Adds function to global namespace.
#' @name LoadModule
#' @export
LoadModule <- function(module) {
# module must be a string
# URL to module in user's favourite repo & branch
zoonURL <- sprintf(
"%s/%s/R/%s.R",
options("zoonRepo"),
options("zoonRepoBranch"),
module
)
# If module is a path, load module
if (file.exists(module)) {
txt <- parse(text = paste(readLines(module), collapse = "\n"))
# If zoonURL is a zoon repo url, load module Could probably do same thing as
# GetModule here to avoid repeated web call
} else if (url.exists(zoonURL, .opts = list(ssl.verifypeer = FALSE))) {
txt <- parse(text = getURL(zoonURL, ssl.verifypeer = FALSE))
# If module on its own is a url, load module
} else if (url.exists(module, .opts = list(ssl.verifypeer = FALSE))) {
txt <- parse(text = getURL(module, ssl.verifypeer = FALSE))
# Otherwise throw error.
} else {
modList <- GetModuleList()
module_idx <- agrep(module, unlist(modList), max.distance = 0.3)
closeMatches <- unlist(modList)[module_idx]
if (length(closeMatches) == 0) {
stop ("Can't find '", module,
"' or any modules with closely matching names.")
} else if (length(closeMatches) == 1) {
stop ("Can't find '", module,
"'. Did you mean '", closeMatches, "'?")
} else {
stop ("Can't find '", module, "'. Did you mean one of '",
paste(closeMatches, collapse = "', "), "'?")
}
}
# Load to global environment
eval(txt, envir = globalenv())
# Return the actual name of the module that has been loaded.
# Don't just return 'module' argument as that can be url/path
# which isn't useful.
# Cruddy code. But module name is the only other object in this
# call environment.
eval(txt)
new.func.name <- ls()[!ls() %in% c("module", "txt", "zoonURL")]
return(new.func.name)
}
# A function to get a module function.
#
# Checks for the module in the global namespace, then the zoon repo. Then reads
# the module source and loads the function.
#
# @param module A string that describes the location of the R file. Can be a
# module name assuming the module is in global namespace or
# github.com/zoonproject/modules. Otherwise can be a full URL or a local
# file.
# @param forceReproducible Do we want to force the function to get modules from
# the zoon repo, even if they exist in the global namespace.
#
# @return Name of the function. Function is run with workflow and new module
# function is added to workflow environment.
# @name GetModule
GetModule <- function(module, forceReproducible, environment = parent.frame()) {
# URL to module in user's favourite repo & branch
zoonURL <- sprintf(
"%s/%s/R/%s.R",
options("zoonRepo"),
options("zoonRepoBranch"),
module
)
# If the module is in global namespace, use that function
# unless forceReproduce is TRUE, in which case we want to get from repo.
#
# Get module from zoonURL otherwise.
module_exists <- exists(module,
where = ".GlobalEnv",
mode = "function",
inherits = FALSE)
if (module_exists & !forceReproducible) {
assign(module,
eval(parse(text = module), envir = globalenv()),
envir = environment)
attr(module, "version") <- "local copy"
return(module)
} else {
rawText <- getURL(zoonURL, ssl.verifypeer = FALSE)
}
# getURL returns "Not Found" if no webpage found.
# Use this to avoid two web call.s
if (grepl("^404: Not Found", rawText)) {
stop('Cannot find "', module,
'". Check that the module is on the zoon repository ',
'or in the global namespace.')
}
# Parse text from webpage.
txt <- parse(text = rawText)
# Evaluate text in the workflow call environment
eval(txt, envir = environment)
# Assign version attribute
attr(module, "version") <- GetModuleVersion(rawText)
return(module)
}
# A function to apply GetModule to a list correctly.
# @param modules A list from CheckModList() given details for
# one or more modules.
# @param forceReproducible Logical to determine whether the modules should be
# taken from repo even if they exist locally to enforce reproducibility.
# @name LapplyGetModule
LapplyGetModule <- function (modules,
forceReproducible,
environment = parent.frame()) {
lapply(modules,
function(x) {
GotModule <- GetModule(as.character(x$module),
forceReproducible,
environment)
res <- c(x,
func = GotModule,
version = attr(GotModule, "version"))
return (res)
})
}
#' RunModels
#'
#' A function to train and predict crossvalidation folds and train one full
#' model and predict any external validation data. This function is primarily
#' used internally but can be used when running workflows interactively (see
#' vignette \code{basic zoon useage})
#'
#' @param df Dataframe from process module. Should contain columns value, type,
#' lon, lat and fold, a number indicating which cross validation set a
#' datapoint is in. If fold is 0 then this is considered external validation
#' data If all data is 0 or 1, then no cross validation is run.
#'
#' @param modelFunction String giving the name of the model function which is in
#' turn the name of the module.
#'
#' @param paras All other parameters that should be passed to the model
#' function. i.e. model[[1]]$paras
#'
#' @param workEnv The environment name of the workflow call environment.
#'
#' @return A list of length 2 containing the model trained on all data and a
#' data.frame which contains value, type, fold, lon, lat, predictions and then
#' all environmental variables.
#' @export
#' @name RunModels
RunModels <- function(df, modelFunction, paras, workEnv) {
# Count non zero folds
# 0 are for external validation only.
k <- length(unique(df$fold)[unique(df$fold) != 0])
# Doing predictions requires handling of NAs in subsets
# of the data (subsets = folds). This breaks if there
# is already a na.action attribute on df, so we remove it
if ("na.action" %in% names(attributes(df)))
attributes(df) <- attributes(df)[!names(attributes(df)) %in% "na.action"]
# Init. output dataframe with predictions column
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
dfOut <- cbindZoon(
subsetColumnsZoon(df, !colnames(df) %in% attr(df, "covCols")),
cbind(predictions = NA, df[colnames(df) %in% attr(df, "covCols")])
)
} else {
dfOut <- cbind(df[, 1:5], predictions = NA, df[, 6:NCOL(df)])
names(dfOut)[7:ncol(dfOut)] <- names(df)[6:ncol(df)]
}
# We don't know that they want cross validation.
# If they do, k>1, then run model k times and predict out of bag
# Skip otherwise
if (k > 1) {
for (i in 1:k) {
modelFold <- do.call(
modelFunction, c(
.df = list(df[df$fold != i, ]),
paras
),
envir = workEnv
)
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
pred <- ZoonPredict(
modelFold,
newdata = subsetColumnsZoon(
df[df$fold == i, ],
attr(df, "covCols")
)
)
} else {
pred <- ZoonPredict(
modelFold,
newdata = df[df$fold == i, 6:NCOL(df), drop = FALSE]
)
}
dfOut$predictions[df$fold == i] <- pred
}
}
# Run model on all data except external validation data
m <- do.call(
modelFunction, c(.df = list(df[df$fold != 0, ]), paras),
envir = workEnv
)
# If external validation dataset exists, predict that;.
if (0 %in% df$fold) {
# Old versions of modules dont use this attribute
## REMOVE ONCE MODULES UPDATED ##
if ("covCols" %in% names(attributes(df))) {
pred <- ZoonPredict(
m,
newdata = subsetColumnsZoon(
df[df$fold == 0, ],
attr(df, "covCols")
)
)
} else {
pred <- ZoonPredict(
m,
newdata = df[df$fold == 0, 6:NCOL(df), drop = FALSE]
)
}
dfOut$predictions[df$fold == 0] <- pred
}
# Return list of crossvalid and external validation predictions
# This list is then the one list that has everything in it.
out <- list(model = m, data = dfOut)
return(out)
}
# CheckModList
#
# Helper to sort module arguments into lists with common structure.
# Want any input to end up as
# list(module = moduleName, paras = list(paraName1 = para, paraName2 = 2)).
# See tests/testthat/testZoon.R for list of potential input.
# @param x A 'call' or 'character' from substitute(occurrence) etc. So either
# quoted which yields a character substitute('module1') or unquotes which
# yields a call substitute(module1).
# @name CheckModList
# Check passing as quoted. e.g. occurrence = "ModuleName(k=2)"
# Also occurrence = "list(mod1, mod2)" is probably bad.
CheckModList <- function(x) {
# Should accept occurrence = 'module1', but NOT
# occurrence = 'module1(k=2)', or occurrence = 'list(mod1, mod1)'
if (inherits(x, "character")) {
if (grepl("[^\' | ^\"]", x) & grepl("[( | )]", x)) {
stop(paste(
"If specifying module arguments please use the form",
"Module(para = 2), without quotes. No special characters should exist",
"in module names."
))
}
}
# If argument is passed as unquoted moduleName: occurrence = ModuleName,
if (class(x) == "name") {
ModuleList <- list(list(module = as.character(x), paras = list()))
# If list of modules given: occurrence = list(Mod1, Mod2),
# If list(Mod1(k=2), Mod2(p = 3)), parameters sorted in
# FormatModuleList
} else if (x[[1]] == "list") {
listCall <- as.list(x)
listCall[[1]] <- NULL
ModuleList <- lapply(listCall, FormatModuleList)
# If Chained modules given: occurrence = Chain(Mod1, Mod2),
} else if (x[[1]] == "Chain") {
listCall <- as.list(x)
listCall[[1]] <- NULL
ModuleList <- lapply(listCall, FormatModuleList)
attr(ModuleList, "chain") <- TRUE
# If unquoted module w/ paras given: occurrence = Module1(k=2)
} else if (x[[1]] == "Replicate") {
listCall <- eval(x)
ModuleList <- lapply(listCall, FormatModuleList)
# If unquoted module w/ paras given: occurrence = Module1(k=2)
} else if (identical(class(x[[1]]), "name")) {
# Parameters
paras <- as.list(x)
paras[[1]] <- NULL
ModuleList <- list(list(module = as.character(x[[1]]), paras = paras))
# Deal with all quoted forms
# Can include 'module1', 'module(para = 2)', 'module(p = 2, q = 'wer')'
} else if (inherits(x, "character")) {
ModuleList <- list(SplitArgs(x))
} else {
stop(paste("Please check the format of argument", as.character(x)))
}
return(ModuleList)
}
# Split a string into a module and it's arguments
#
# A function that takes a string (from workflow$call) and splits it into a
# module name and it's arguments.
#
# @param string A string of the form "moduleName" or
# "moduleName(parameter = 2, parameter2 = 3)"
#
# @name SplitArgs
SplitArgs <- function(string) {
module <- gsub("(^)(.*)(\\(.*$)", "\\2", string)
if (grepl("\\(", string)) {
args <- gsub("(^.*\\()(.*)(\\)$)", "\\2", string)
} else {
args <- ""
}
sepArgs <- (strsplit(args, ","))[[1]]
arguments <- lapply(
strsplit(sepArgs, "="),
function(x) gsub(" ", "", x[2])
)
names(arguments) <- unlist(lapply(
strsplit(sepArgs, "="),
function(x) gsub(" ", "", x[1])
))
return(list(module = module, paras = arguments))
}
# FormatModuleList
#
# Little helper to format module lists. Want to return a list
# newList$module is the module name and newList$paras is a list
# of the parameters given for the module.
# @param x An object of class 'name' or 'call' e.g. module1 or module1(k=2).
# @name FormatModuleList
FormatModuleList <- function(x) {
# Turn 'call' or 'name' into list.
listx <- as.list(x)
# Empty list to populate.
newList <- list()
newList$module <- listx[[1]]
# Remove list element which contains modules name. Remaining is parameters.
listx[[1]] <- NULL
newList$paras <- listx
return(newList)
}
#' ExtractAndCombData
#'
#' Simply extract covariates from rasters and combine with occurrence data.
#' This function is primarily used internally but can be used when
#' running workflows interactively (see vignette \code{basic zoon useage})
#'
#' @param occurrence A data frame from an occurrence module
#' @param ras Environmental raster layer, stack or brick.
#' @export
#' @name ExtractAndCombData
ExtractAndCombData <- function(occurrence, ras) {
# Check for rows in the occurrence data that have missing data
NArows <- apply(
occurrence[, c("longitude", "latitude")],
1,
function(x) any(is.na(x))
)
if (any(NArows)) {
warning (sum(NArows), " row(s) of occurrence data have NA values ",
"for latitude/longitude and will be removed")
occurrence <- occurrence[!NArows, ]
}
if (is.na(projection(ras))) {
message("Covariate raster does not have a projection, zoon will assume ",
"this is in the same projection as your occurrence data")
} else if ("crs" %in% tolower(colnames(occurrence))) {
occurrence <- TransformCRS(
occurrence = occurrence,
ras_projection = projection(ras)
)
}
# Check that all points are within the raster
bad.coords <- is.na(cellFromXY(
ras,
occurrence[, c("longitude", "latitude")]
))
if (any(bad.coords)) {
nr_before <- nrow(occurrence)
occurrence <- occurrence[!bad.coords, ]
nr_after <- nrow(occurrence)
if (nr_after > 0) {
warning (nr_before - nr_after, "occurrence points are outside ",
"the raster extent and have been removed before modelling ",
"leaving", nr_after, "occurrence points")
} else if (nr_after == 0) {
warning ("All occurrence points are outside the raster extent. ",
"Try changing your raster.")
}
}
# extract covariates from lat long values in df.
ras.values <- raster::extract(ras, occurrence[, c('longitude', 'latitude')])
if(length(ras.values) == 0){
occurrenceCovariates <- NULL
warning ("Locations in the occurrence data did not match your raster ",
"so no covariate data were extracted. ",
"This is only a good idea if you are creating simulated data ",
"in the process module")
} else {
if (any(is.na(ras.values))) {
warning(sum(is.na(ras.values)), "extracted covariate values are NA. ",
"This may cause issues for some models")
}
occurrenceCovariates <- as.matrix(ras.values)
colnames(occurrenceCovariates) <- names(ras)
}
df <- cbindZoon(occurrence, occurrenceCovariates)
# assign call_path attribute to this new object
attr(df, "call_path") <- attr(occurrence, "call_path")
# record the covariate column names
attr(df, "covCols") <- names(ras)
# Return as list of df and ras as required by process modules
return(list(df = df, ras = ras))
}
#' Chain modules together
#'
#' \code{Chain} combines multiple modules of the same module type such that they
#' are executed sequentially and their outputs combined. For example, process
#' modules may be \code{Chain}ed to carry out successive processing operations.
#' By contrast, \code{list}ing modules of the same type would split the workflow
#' into multiple parallel workflows, each using a different module at this step.
#'
#' Similarly for occurrence or covariate modules the datasets are joined (row-
#' or layer-wise) whereas \code{list} would carry out separate analyses. Model
#' and output modules may not be chained. Developers should note that this
#' function is not actually used - calls using \code{Chain} are parsed by
#' workflow, with behaviour similar to this function.
#'
#' @param ... List of modules to be chained.
#' @export
#' @name Chain
Chain <- function(...) {
ans <- list(...)
attr(ans, "chain") <- TRUE
return(ans)
}
# SortArgs
#
#
# Helper to take substituted args from workflow call and paste them into
# a runeable workflow function.
# @name SortArgs
SortArgs <- function (occSub,
covSub,
proSub,
modSub,
outSub,
forceReproducible) {
call <- paste0(
"workflow(",
"occurrence = ", occSub,
", covariate = ", covSub,
", process = ", proSub,
", model = ", modSub,
", output = ", outSub,
", forceReproducible = ", as.character(forceReproducible),
")"
)
}
# SplitCall
#
# Helper to split a character string workflow call, as inherited from
# zoonWorkflow into it's constituent arguments
# @param call A character string of a valid zoon workflow call.
# @name SplitCall
SplitCall <- function(call) {
# Regex to find each argument within call.
# Find 3 patterns and sub whole string with just middle pattern
# Middle pattern is the argument name.
occurrence <- gsub("(.*occurrence = )(.*)(, covariate.*$)", "\\2", call)
covariate <- gsub("(.*covariate = )(.*)(, process.*$)", "\\2", call)
process <- gsub("(.*process = )(.*)(, model.*$)", "\\2", call)
model <- gsub("(.*model = )(.*)(, output.*$)", "\\2", call)
output <- gsub("(.*output = )(.*)(, forceReproducible.*$)", "\\2", call)
forceReproducible <- gsub("(.*forceReproducible = )(.*)())", "\\2", call)
# Make vector and add names.
split <- c(occurrence, covariate, process, model, output, forceReproducible)
names(split) <- c(
"occurrence", "covariate", "process",
"model", "output", "forceReproducible"
)
return(split)
}
# ErrorModule
#
# Function used in tryCatch calls in workflow.
# If an error is caught, return some useful messages.
# Then stop().
# cond is the error messages passed by try catch.
# mod is the modules number (1:5) to give NULLS to the correct modules.
#
# @param e The workflow call environment
# @param cond The error message that was caught in tryCatch.
# @param mod Which module has failed? 1=occurrence, 2=covariate, 3=process
# 4=model, 5=output.
# @name ErrorModule
ErrorModule <- function(cond, mod, e) {
# Select the module type using numeric mod argument
module <- c(
"occurrence module.",
"covariate module.",
paste("ExtractAndCombData, a function that combines",
"occurrence and covariate data."),
"process module.",
"model module.",
"output module."
)[mod]
# Give useful messages.
# What were the errors that were caught by tryCatch.
message("Caught errors:\n", cond)
message()
# Where did workflow break and where is the progress stored?
x <- paste("Stopping workflow due to error in", module, "\n")
# Throw error. The call for this error is meaningless so don't print it.
stop(x, call. = FALSE)
}
# PasteAndDep
#
# Paste and deparse. Helper to format substituted args. If arguments were
# chained or listed then substitute gives a list. We want to paste it back
# together.
# @name PasteAndDep
PasteAndDep <- function(x) {
paste(deparse(x), collapse = " ")
}
# Writeable
#
# Check whether we can write to a given filepath, throw an error if not.
# Inspired by the is.writeable functionality in assertthat
# @name Writeable
Writeable <- function(dir) {
OK <- TRUE
if (!is.character(dir)) OK <- FALSE
if (!length(dir) == 1) OK <- FALSE
if (!file.exists(dir)) OK <- FALSE
if (!file.access(dir, mode = 2)[[1]] == 0) OK <- FALSE
if (!OK) stop("directory is not writeable ", dir)
}
#' Get MaxEnt
#'
#' Helper function to get the MaxEnt java executable and install it in
#' the right locations for zoon modules that use `dismo::maxent` and
#' `biomod2`.
#'
#' @details Since MaxEnt may not be distributed other than via the MaxEnt
#' website, users must download the file themselves and place it in the
#' right location for R packages to access it. This function helps with that.
#' Just run \code{GetMaxEnt()} and follow the prompts in the terminal.
#'
#' @export
#' @name GetMaxEnt
#' @importFrom utils browseURL
GetMaxEnt <- function() {
# Send the user to download the MaxEnt executable,
# then find and upload it
# define text
browser_txt <- paste("\nTo get MaxEnt working, you'll need to download the",
"executable file from the MaxEnt website. The website",
"will require you to give some details, once you've",
"done this please download the 'maxent.jar' file to",
"somewhere memorable (you'll have to find it again in",
"a second).\n\nPress return to launch the MaxEnt",
"website and continue.")
chooser_txt <- paste("\n\n\n\nzoon now needs to copy the 'maxent.jar' file",
"to the correct locations.\n\nPress return to locate",
"the 'maxent.jar' file you just downloaded")
# step one, download the file
message(browser_txt) # speak to user
invisible(readline()) # make them hit return
browseURL("http://www.cs.princeton.edu/~schapire/maxent/") # open the browser
# step two, choose the file
message(chooser_txt) # speak to user
invisible(readline()) # make them hit return
file <- file.choose()
# check it's maxent.jar
if (basename(file) != "maxent.jar") {
stop("the file selected was not 'maxent.jar'")
}
# copy to dismo's and biomod2's preferred locations
dismo_loc <- paste0(system.file(package = "dismo"), "/java/maxent.jar")
biomod2_loc <- "./maxent.jar"
dismo_success <- file.copy(file, dismo_loc, overwrite = TRUE)
biomod2_success <- file.copy(file, biomod2_loc, overwrite = TRUE)
# message, warn, or error depending on the level of success
if (dismo_success) {
if (biomod2_success) {
message("maxent.jar successfully deployed for MaxEnt ",
"and BiomodModel modules")
} else {
warning ("maxent.jar successfully deployed for MaxEnt module, ",
"but not for BiomodModel module")
}
} else if (biomod2_success) {
warning ("maxent.jar successfully deployed for BiomodModel module, ",
"but not for MaxEnt module")
} else {
stop ("maxent.jar not deployed for MaxEnt or BiomodModel modules")
}
}
# AddDefaultParas
#
# Adds the default parameters and their descriptions to the parameters list in
# BuildModule.
# @param paras The orginial named list of paramenter descriptions
# @param type The module type used to allocated the default arguements
AddDefaultParas <- function(paras, type) {
# Define default arguements
defArgs <- list(
occurrence = NULL, covariate = NULL, process = c(".data"),
model = c(".df"), output = c(".model", ".ras")
)
# Remove defaults if they exist, then add in the defaults.
paras <- paras[!names(paras) %in% defArgs[[type]]]
default_paras <- list(
occurrence = NULL,
covariate = NULL,
process = list(.data = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.data} is a list of a data frame and a raster object returned",
"from occurrence modules and covariate modules respectively.",
"\\code{.data} is passed automatically in workflow from the occurrence",
"and covariate modules to the process module(s) and should not be passed",
"by the user."
)),
model = list(.df = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.df} is data frame that combines the occurrence data and",
"covariate data. \\code{.df} is passed automatically in workflow from",
"the process module(s) to the model module(s) and should not be",
"passed by the user."
)),
output = list(
.model = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.model} is list of a data frame (\\code{data}) and a model",
"object (\\code{model}). \\code{.model} is passed automatically in",
"workflow, combining data from the model module(s) and process",
"module(s) to the output module(s) and should not be passed by",
"the user."
),
.ras = paste(
"\\strong{Internal parameter, do not use in the workflow function}.",
"\\code{.ras} is a raster layer, brick or stack object. \\code{.ras}",
"is passed automatically in workflow from the covariate module(s) to",
"the output module(s) and should not be passed by the user."
)
)
)
# Add these defaults to the front of the para list
return(c(default_paras[[type]], paras))
}
# StringToCall
#
# takes a string and converts it to a call. This is useful for taking the
# call from a workflow that has been run and re-running it.
# @param x The string
StringToCall <- function(x) {
parse(text = x[[1]])[[1]]
}
# GetModuleVersion
#
# Using the raw text returned from a GetURL call to github
# this extracts the version number
GetModuleVersion <- function(rawText) {
# Break down into lines
ModLines <- strsplit(rawText, "\n")[[1]]
VersionLine <- ModLines[grep("@section Version: ", ModLines)]
TagPosition <- gregexpr("@section Version: ", VersionLine)
Start <- TagPosition[[1]] + attr(TagPosition[[1]], "match.length")
substr(VersionLine, Start, nchar(VersionLine))
}
# tryCatchModule
#
# This function runs a call to a module in testing and handles
# error that may occur in a way to make debugging easier
tryCatchModule <- function(expr, code_chunk, fun, debug = TRUE) {
tryCatch(
expr = expr,
error = function(err, func = fun, debug_f = debug) {
error_message <- paste(
"\nYour module failed to run with default parameters\n",
"ERROR:", err,
"\nYou can debug this error by running the following code chunk",
"\n===========\n",
ifelse(test = debug_f,
yes = paste0("debugonce(", func, ")\n"),
no = ""),
paste(code_chunk, collapse = "\n"),
"\n==========="
)
class(error_message) <- "moduleError"
return(error_message)
}
)
}
# tryCatchWorkflow
#
# This function runs a workflow in testing and handles
# error that may occur in a way to make debugging easier
tryCatchWorkflow <- function(expr, placeholder, fun) {
code_temp <- paste0(trimws(capture.output(print(substitute(expr)))))
code_chunk <- gsub(placeholder, fun, trimws(gsub("[{}]", "", code_temp)))
tryCatchModule(
expr = expr, code_chunk = code_chunk,
fun = fun, debug = FALSE
)
}
|
## collections-dashboard-prep
# Prep accessions data from EMu for dashboard-prep
#
# 1) In EMu, retrieve Accession records for dashboard,
# 05-May-2017 dataset includes all efmnhtransactions records retrieved in this query:
#
# select all
# from efmnhtransactions
# where true and
# (
# (TraTransactionType contains 'incoming')
# or
# (TraTransactionType contains 'outgoing')
# )
# or
# (
# (LoaLoanType contains 'incoming')
# or
# (LoaLoanType contains 'outgoing')
# )
# or
# (TraTransactionType contains '\"Exhibit Loan\"')
# or
# (LoaLoanType contains '\"Exhibit Loan\"')
#
# 2) Report them out with "DashboardTran" report
# - see collections-dashboard "Help" page for details on which fields are included in report
# - If under 200k records, report all at one time.
# - Don't rename reported files. Keep them together in one folder.
#
# 3) Run this script
# - NOTE: May need to re-set working directory to folder containing reported csv's
# (see lines 59 & 60)
#
# Filter to only include records where:
# - date => 2005
# - Transaction (or Loan) Type = incoming or outgoing
# (exhibitions may be included later)
# - Counts use Items not "Object" fields
print(paste(date(), "-- ...finished setting up Experience data. Starting dash026LoansPrep.R"))
# point to the directory containing the set of "Group" csv's from EMu
setwd(paste0(origdir,"/data01raw/emuLoans"))
# Import raw EMu Accession data ####
Loan1 <- read.csv(file="efmnhtra.csv", stringsAsFactors = F)
# Ignore these for now -- but here in case detail is needed later
#temp = list.files(path=".", pattern=".csv")
#list2env(
# lapply(setNames(temp, make.names(temp)),
# read.csv), envir = .GlobalEnv)
## May need to convert efmnhtra.csv fields to as.character if import in batch
##Group1.csv <- Group1.csv[,-1]
##Group1.csv$row <- sequence(rle(as.character(InvCount.csv$efmnhtransactions_key))$lengths)
##Group1.csv <- spread(InvCount.csv, row, InvCount, sep="_")
#
#InvCount.csv <- InvCount.csv[,-1]
#InvDescr.csv <- InvDescr.csv[,-1]
#InvGeogr.csv <- InvGeogr.csv[,-1]
#InvTrans.csv <- InvTrans.csv[,-1]
#
#InvCount.csv$row <- sequence(rle(as.character(InvCount.csv$efmnhtransactions_key))$lengths)
#InvCount1 <- spread(InvCount.csv, row, InvCount, sep="_")
##InvCount1$sumInv <- summarise(InvCount1, sumItems = row_1+row_2+row_3+row_4+row_5)
#
#InvDescr.csv$row <- sequence(rle(as.character(InvDescr.csv$efmnhtransactions_key))$lengths)
#InvDescr1 <- spread(InvDescr.csv, row, InvDescription, sep="_")
#
#InvGeogr.csv$row <- sequence(rle(as.character(InvGeogr.csv$efmnhtransactions_key))$lengths)
#InvGeogr1 <- spread(InvGeogr.csv, row, InvGeography, sep="_")
#
#InvTrans.csv$row <- sequence(rle(as.character(InvTrans.csv$efmnhtransactions_key))$lengths)
#InvTrans1 <- spread(InvTrans.csv, row, InvTransactionType, sep="_")
Loan1$ObcTotalItems[which(is.na(Loan1$ObcTotalItems)==T)] <- 0
Loan1$ObuTotalItems[which(is.na(Loan1$ObuTotalItems)==T)] <- 0
Loan1$ObuTotalItems[which(is.na(Loan1$ObuTotalItems)==T)] <- 0
Loan1$LoanYear <- as.integer(substr(Loan1$TraDateProcessed, 1, 4))
Loan1 <- Loan1[which(Loan1$LoanYear>2004 & Loan1$LoanYear<2030),]
LoanSum <- aggregate(Loan1$TraTotalItemsLoaned, list(Loan1$LoanYear, Loan1$AccCatalogue), sum)
LoanCount <- dplyr::count(Loan1, AccCatalogue, LoanYear)
LoanSum2 <- LoanSum[which(LoanSum$x>0),]
LoanCount2 <- LoanCount[which(LoanCount$n>0 & is.na(LoanCount$n)==F),]
colnames(LoanSum2) = c("LoanYear","DarCollectionCode","SumItems")
colnames(LoanCount2) = c("DarCollectionCode","LoanYear","CountLoans")
# This can be exported for full count & sum dataset
LoanSumCount <- merge(LoanSum2, LoanCount2, by=c("LoanYear","DarCollectionCode"),all=T)
## This would be an alternate data structure (in case Pete needs particular setup for particular chart)
# LoanCount3 <- spread(LoanCount2, LoanYear, CountLoans)
|
/dash026LoansPrep.R
|
permissive
|
magpiedin/collections-dashboard-prep
|
R
| false
| false
| 4,163
|
r
|
## collections-dashboard-prep
# Prep accessions data from EMu for dashboard-prep
#
# 1) In EMu, retrieve Accession records for dashboard,
# 05-May-2017 dataset includes all efmnhtransactions records retrieved in this query:
#
# select all
# from efmnhtransactions
# where true and
# (
# (TraTransactionType contains 'incoming')
# or
# (TraTransactionType contains 'outgoing')
# )
# or
# (
# (LoaLoanType contains 'incoming')
# or
# (LoaLoanType contains 'outgoing')
# )
# or
# (TraTransactionType contains '\"Exhibit Loan\"')
# or
# (LoaLoanType contains '\"Exhibit Loan\"')
#
# 2) Report them out with "DashboardTran" report
# - see collections-dashboard "Help" page for details on which fields are included in report
# - If under 200k records, report all at one time.
# - Don't rename reported files. Keep them together in one folder.
#
# 3) Run this script
# - NOTE: May need to re-set working directory to folder containing reported csv's
# (see lines 59 & 60)
#
# Filter to only include records where:
# - date => 2005
# - Transaction (or Loan) Type = incoming or outgoing
# (exhibitions may be included later)
# - Counts use Items not "Object" fields
print(paste(date(), "-- ...finished setting up Experience data. Starting dash026LoansPrep.R"))
# point to the directory containing the set of "Group" csv's from EMu
setwd(paste0(origdir,"/data01raw/emuLoans"))
# Import raw EMu Accession data ####
Loan1 <- read.csv(file="efmnhtra.csv", stringsAsFactors = F)
# Ignore these for now -- but here in case detail is needed later
#temp = list.files(path=".", pattern=".csv")
#list2env(
# lapply(setNames(temp, make.names(temp)),
# read.csv), envir = .GlobalEnv)
## May need to convert efmnhtra.csv fields to as.character if import in batch
##Group1.csv <- Group1.csv[,-1]
##Group1.csv$row <- sequence(rle(as.character(InvCount.csv$efmnhtransactions_key))$lengths)
##Group1.csv <- spread(InvCount.csv, row, InvCount, sep="_")
#
#InvCount.csv <- InvCount.csv[,-1]
#InvDescr.csv <- InvDescr.csv[,-1]
#InvGeogr.csv <- InvGeogr.csv[,-1]
#InvTrans.csv <- InvTrans.csv[,-1]
#
#InvCount.csv$row <- sequence(rle(as.character(InvCount.csv$efmnhtransactions_key))$lengths)
#InvCount1 <- spread(InvCount.csv, row, InvCount, sep="_")
##InvCount1$sumInv <- summarise(InvCount1, sumItems = row_1+row_2+row_3+row_4+row_5)
#
#InvDescr.csv$row <- sequence(rle(as.character(InvDescr.csv$efmnhtransactions_key))$lengths)
#InvDescr1 <- spread(InvDescr.csv, row, InvDescription, sep="_")
#
#InvGeogr.csv$row <- sequence(rle(as.character(InvGeogr.csv$efmnhtransactions_key))$lengths)
#InvGeogr1 <- spread(InvGeogr.csv, row, InvGeography, sep="_")
#
#InvTrans.csv$row <- sequence(rle(as.character(InvTrans.csv$efmnhtransactions_key))$lengths)
#InvTrans1 <- spread(InvTrans.csv, row, InvTransactionType, sep="_")
Loan1$ObcTotalItems[which(is.na(Loan1$ObcTotalItems)==T)] <- 0
Loan1$ObuTotalItems[which(is.na(Loan1$ObuTotalItems)==T)] <- 0
Loan1$ObuTotalItems[which(is.na(Loan1$ObuTotalItems)==T)] <- 0
Loan1$LoanYear <- as.integer(substr(Loan1$TraDateProcessed, 1, 4))
Loan1 <- Loan1[which(Loan1$LoanYear>2004 & Loan1$LoanYear<2030),]
LoanSum <- aggregate(Loan1$TraTotalItemsLoaned, list(Loan1$LoanYear, Loan1$AccCatalogue), sum)
LoanCount <- dplyr::count(Loan1, AccCatalogue, LoanYear)
LoanSum2 <- LoanSum[which(LoanSum$x>0),]
LoanCount2 <- LoanCount[which(LoanCount$n>0 & is.na(LoanCount$n)==F),]
colnames(LoanSum2) = c("LoanYear","DarCollectionCode","SumItems")
colnames(LoanCount2) = c("DarCollectionCode","LoanYear","CountLoans")
# This can be exported for full count & sum dataset
LoanSumCount <- merge(LoanSum2, LoanCount2, by=c("LoanYear","DarCollectionCode"),all=T)
## This would be an alternate data structure (in case Pete needs particular setup for particular chart)
# LoanCount3 <- spread(LoanCount2, LoanYear, CountLoans)
|
\name{magnifSeven}
\alias{magnifSeven}
\title{This is a function to compute the 7 statistics of daily streamflow
used by Archfield et al., under revision (June 2013). Input to the function is a
time series of streamflow with date in the format Y-m-d. Data should be arranged in
two columns with names: 1) date and 2) discharge.
Created May 29, 2013 and functions are modified from previous versions of this code.}
\usage{
magnifSeven(timeseries1)
}
\arguments{
\item{timeseries1}{data frame of daily flow data}
}
\value{
magnif7 data frame of calculated statistics
}
\description{
This is a function to compute the 7 statistics of daily
streamflow used by Archfield et al., under revision (June
2013). Input to the function is a time series of streamflow
with date in the format Y-m-d. Data should be arranged in
two columns with names: 1) date and 2) discharge. Created
May 29, 2013 and functions are modified from previous
versions of this code.
}
\examples{
timeseries1<-sampleData
timeseries1<-data.frame(timeseries1$date,timeseries1$discharge,timeseries1$month_val,timeseries1$year_val,stringsAsFactors=FALSE)
timeseries1$date<-as.Date(timeseries1$timeseries1.date,"\%m/\%d/\%y")
timeseries1<-data.frame(timeseries1$date,timeseries1$timeseries1.discharge,timeseries1$timeseries1.month_val,timeseries1$timeseries1.year_val,stringsAsFactors=FALSE)
colnames(timeseries1)<-c("date","discharge","month_val","year_val")
magnifSeven(timeseries1)
}
|
/man/magnifSeven.Rd
|
no_license
|
tsangyp/EflowStats
|
R
| false
| false
| 1,450
|
rd
|
\name{magnifSeven}
\alias{magnifSeven}
\title{This is a function to compute the 7 statistics of daily streamflow
used by Archfield et al., under revision (June 2013). Input to the function is a
time series of streamflow with date in the format Y-m-d. Data should be arranged in
two columns with names: 1) date and 2) discharge.
Created May 29, 2013 and functions are modified from previous versions of this code.}
\usage{
magnifSeven(timeseries1)
}
\arguments{
\item{timeseries1}{data frame of daily flow data}
}
\value{
magnif7 data frame of calculated statistics
}
\description{
This is a function to compute the 7 statistics of daily
streamflow used by Archfield et al., under revision (June
2013). Input to the function is a time series of streamflow
with date in the format Y-m-d. Data should be arranged in
two columns with names: 1) date and 2) discharge. Created
May 29, 2013 and functions are modified from previous
versions of this code.
}
\examples{
timeseries1<-sampleData
timeseries1<-data.frame(timeseries1$date,timeseries1$discharge,timeseries1$month_val,timeseries1$year_val,stringsAsFactors=FALSE)
timeseries1$date<-as.Date(timeseries1$timeseries1.date,"\%m/\%d/\%y")
timeseries1<-data.frame(timeseries1$date,timeseries1$timeseries1.discharge,timeseries1$timeseries1.month_val,timeseries1$timeseries1.year_val,stringsAsFactors=FALSE)
colnames(timeseries1)<-c("date","discharge","month_val","year_val")
magnifSeven(timeseries1)
}
|
context("add elements in slides")
source("utils.R")
test_that("add text into placeholder", {
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_empty(type = "body")
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
small_red <- fp_text(color = "red", font.size = 14)
doc <- doc %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "chunk 1", style = small_red )
sm <- slide_summary(doc)
expect_equal(sm[1, ]$text, "chunk 1")
doc <- doc %>%
ph_add_text(str = "this is ", style = small_red, pos = "before" )
sm <- slide_summary(doc)
expect_equal(sm[1, ]$text, "this is chunk 1")
})
test_that("ph_add_par append when text alrady exists", {
small_red <- fp_text(color = "red", font.size = 14)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with("This is a ", location = ph_location_type(type = "body")) %>%
ph_add_par(level = 2)
doc <- doc %>%
ph_add_text(str = "test", style = small_red )
sm <- slide_summary(doc)
expect_equal(sm$text, "This is a test")
xmldoc <- doc$slide$get_slide(1)$get()
txt <- xml_find_all(xmldoc, "//p:spTree/p:sp/p:txBody/a:p") %>% xml_text()
expect_equal(txt, c("This is a ", "test"))
})
test_that("ph_add_text with hyperlink", {
small_red <- fp_text(color = "red", font.size = 14)
href_ <- "https://cran.r-project.org"
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with("This is a ", location = ph_location_type(type = "body")) %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, href = href_ )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( href_ %in% rel_df$target )
row_id_ <- which( rel_df$target_mode %in% "External" & rel_df$target %in% href_ )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("add img into placeholder", {
skip_on_os("windows")
img.file <- file.path( R.home("doc"), "html", "logo.jpg" )
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(external_img(img.file, height = 1.06, width = 1.39), location = ph_location_type(type = "body"),
use_loc_size = FALSE )
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
expect_equal(sm$cx, 1.39)
expect_equal(sm$cy, 1.06)
})
test_that("add formatted par into placeholder", {
bold_face <- shortcuts$fp_bold(font.size = 30)
bold_redface <- update(bold_face, color = "red")
fpar_ <- fpar(ftext("Hello ", prop = bold_face),
ftext("World", prop = bold_redface ),
ftext(", how are you?", prop = bold_face ) )
doc <- read_pptx() %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_empty(location = ph_location_type(type = "body")) %>%
ph_add_fpar(value = fpar_, type = "body", level = 2)
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
expect_equal(sm[1, ]$text, "Hello World, how are you?")
xmldoc <- doc$slide$get_slide(id = 1)$get()
cols <- xml_find_all(xmldoc, "//a:rPr/a:solidFill/a:srgbClr") %>% xml_attr("val")
expect_equal(cols, c("000000", "FF0000", "000000") )
expect_equal(xml_find_all(xmldoc, "//a:rPr") %>% xml_attr("b"), rep("1",3))
})
test_that("add xml into placeholder", {
xml_str <- "<p:sp xmlns:a=\"http://schemas.openxmlformats.org/drawingml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\" xmlns:p=\"http://schemas.openxmlformats.org/presentationml/2006/main\"><p:nvSpPr><p:cNvPr id=\"\" name=\"\"/><p:cNvSpPr><a:spLocks noGrp=\"1\"/></p:cNvSpPr><p:nvPr><p:ph type=\"title\"/></p:nvPr></p:nvSpPr><p:spPr/>\n<p:txBody><a:bodyPr/><a:lstStyle/><a:p><a:r><a:rPr/><a:t>Hello world 1</a:t></a:r></a:p></p:txBody></p:sp>"
library(xml2)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(value = as_xml_document(xml_str), location = ph_location_type(type = "body")) %>%
ph_with(value = as_xml_document(xml_str), location = ph_location(left = 1, top = 1, width = 3, height = 3))
sm <- slide_summary(doc)
expect_equal(nrow(sm), 2)
expect_equal(sm[1,]$text, "Hello world 1")
expect_equal(sm[2,]$text, "Hello world 1")
})
test_that("link to another shape", {
small_red <- fp_text(color = "red", font.size = 14)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(location = ph_location_type(type = "body"), value = "This is a ") %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, slide_index = 1 )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( "slide1.xml" %in% rel_df$target )
row_id_ <- which( rel_df$target %in% "slide1.xml" )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("ph_add_text with hyperlink", {
small_red <- fp_text(color = "red", font.size = 14)
href_ <- "https://cran.r-project.org"
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(location = ph_location_type(type = "body"), value = "This is a ") %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, href = href_ )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( href_ %in% rel_df$target )
row_id_ <- which( rel_df$target_mode %in% "External" & rel_df$target %in% href_ )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("slidelink shape", {
doc <- read_pptx() %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with("Un titre 1", location = ph_location_type(type = "title")) %>%
ph_with("text 1", location = ph_location_type(type = "body")) %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with("Un titre 2", location = ph_location_type(type = "title")) %>%
on_slide( index = 1)
slide_summary(doc)
doc <- doc %>%
ph_slidelink(type = "body", slide_index = 2 )
rel_df <- doc$slide$get_slide(1)$rel_df()
slide_filename <- doc$slide$get_metadata()$name[2]
expect_true( slide_filename %in% rel_df$target )
row_id_ <- which( is.na(rel_df$target_mode) & rel_df$target %in% slide_filename )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp[p:nvSpPr/p:cNvPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_false( inherits(node_, "xml_missing") )
})
test_that("hyperlink shape", {
doc <- read_pptx()
doc <- add_slide(doc, layout = "Title and Content", master = "Office Theme")
doc <- ph_with(x = doc, location = ph_location_type(type = "title"), value = "Un titre 1")
doc <- add_slide(doc, layout = "Title and Content", master = "Office Theme")
doc <- ph_with(x = doc, location = ph_location_type(type = "title"), value = "Un titre 2")
doc <- on_slide(doc, 1)
doc <- ph_hyperlink(x = doc, id_chr = "2", href = "https://cran.r-project.org")
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( "https://cran.r-project.org" %in% rel_df$target )
row_id_ <- which( !is.na(rel_df$target_mode) & rel_df$target %in% "https://cran.r-project.org" )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp[p:nvSpPr/p:cNvPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_false( inherits(node_, "xml_missing") )
})
unlink("*.pptx")
|
/tests/testthat/test-pptx-add.R
|
no_license
|
Kill3rbee/officer
|
R
| false
| false
| 7,988
|
r
|
context("add elements in slides")
source("utils.R")
test_that("add text into placeholder", {
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_empty(type = "body")
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
small_red <- fp_text(color = "red", font.size = 14)
doc <- doc %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "chunk 1", style = small_red )
sm <- slide_summary(doc)
expect_equal(sm[1, ]$text, "chunk 1")
doc <- doc %>%
ph_add_text(str = "this is ", style = small_red, pos = "before" )
sm <- slide_summary(doc)
expect_equal(sm[1, ]$text, "this is chunk 1")
})
test_that("ph_add_par append when text alrady exists", {
small_red <- fp_text(color = "red", font.size = 14)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with("This is a ", location = ph_location_type(type = "body")) %>%
ph_add_par(level = 2)
doc <- doc %>%
ph_add_text(str = "test", style = small_red )
sm <- slide_summary(doc)
expect_equal(sm$text, "This is a test")
xmldoc <- doc$slide$get_slide(1)$get()
txt <- xml_find_all(xmldoc, "//p:spTree/p:sp/p:txBody/a:p") %>% xml_text()
expect_equal(txt, c("This is a ", "test"))
})
test_that("ph_add_text with hyperlink", {
small_red <- fp_text(color = "red", font.size = 14)
href_ <- "https://cran.r-project.org"
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with("This is a ", location = ph_location_type(type = "body")) %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, href = href_ )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( href_ %in% rel_df$target )
row_id_ <- which( rel_df$target_mode %in% "External" & rel_df$target %in% href_ )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("add img into placeholder", {
skip_on_os("windows")
img.file <- file.path( R.home("doc"), "html", "logo.jpg" )
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(external_img(img.file, height = 1.06, width = 1.39), location = ph_location_type(type = "body"),
use_loc_size = FALSE )
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
expect_equal(sm$cx, 1.39)
expect_equal(sm$cy, 1.06)
})
test_that("add formatted par into placeholder", {
bold_face <- shortcuts$fp_bold(font.size = 30)
bold_redface <- update(bold_face, color = "red")
fpar_ <- fpar(ftext("Hello ", prop = bold_face),
ftext("World", prop = bold_redface ),
ftext(", how are you?", prop = bold_face ) )
doc <- read_pptx() %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_empty(location = ph_location_type(type = "body")) %>%
ph_add_fpar(value = fpar_, type = "body", level = 2)
sm <- slide_summary(doc)
expect_equal(nrow(sm), 1)
expect_equal(sm[1, ]$text, "Hello World, how are you?")
xmldoc <- doc$slide$get_slide(id = 1)$get()
cols <- xml_find_all(xmldoc, "//a:rPr/a:solidFill/a:srgbClr") %>% xml_attr("val")
expect_equal(cols, c("000000", "FF0000", "000000") )
expect_equal(xml_find_all(xmldoc, "//a:rPr") %>% xml_attr("b"), rep("1",3))
})
test_that("add xml into placeholder", {
xml_str <- "<p:sp xmlns:a=\"http://schemas.openxmlformats.org/drawingml/2006/main\" xmlns:r=\"http://schemas.openxmlformats.org/officeDocument/2006/relationships\" xmlns:p=\"http://schemas.openxmlformats.org/presentationml/2006/main\"><p:nvSpPr><p:cNvPr id=\"\" name=\"\"/><p:cNvSpPr><a:spLocks noGrp=\"1\"/></p:cNvSpPr><p:nvPr><p:ph type=\"title\"/></p:nvPr></p:nvSpPr><p:spPr/>\n<p:txBody><a:bodyPr/><a:lstStyle/><a:p><a:r><a:rPr/><a:t>Hello world 1</a:t></a:r></a:p></p:txBody></p:sp>"
library(xml2)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(value = as_xml_document(xml_str), location = ph_location_type(type = "body")) %>%
ph_with(value = as_xml_document(xml_str), location = ph_location(left = 1, top = 1, width = 3, height = 3))
sm <- slide_summary(doc)
expect_equal(nrow(sm), 2)
expect_equal(sm[1,]$text, "Hello world 1")
expect_equal(sm[2,]$text, "Hello world 1")
})
test_that("link to another shape", {
small_red <- fp_text(color = "red", font.size = 14)
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(location = ph_location_type(type = "body"), value = "This is a ") %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, slide_index = 1 )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( "slide1.xml" %in% rel_df$target )
row_id_ <- which( rel_df$target %in% "slide1.xml" )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("ph_add_text with hyperlink", {
small_red <- fp_text(color = "red", font.size = 14)
href_ <- "https://cran.r-project.org"
doc <- read_pptx() %>%
add_slide("Title and Content", "Office Theme") %>%
ph_with(location = ph_location_type(type = "body"), value = "This is a ") %>%
ph_add_par(level = 2) %>%
ph_add_text(str = "test", style = small_red, href = href_ )
xmldoc <- doc$slide$get_slide(1)$get()
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( href_ %in% rel_df$target )
row_id_ <- which( rel_df$target_mode %in% "External" & rel_df$target %in% href_ )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp/p:txBody/a:p/a:r[a:rPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_equal( xml_text(node_), "test")
})
test_that("slidelink shape", {
doc <- read_pptx() %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with("Un titre 1", location = ph_location_type(type = "title")) %>%
ph_with("text 1", location = ph_location_type(type = "body")) %>%
add_slide(layout = "Title and Content", master = "Office Theme") %>%
ph_with("Un titre 2", location = ph_location_type(type = "title")) %>%
on_slide( index = 1)
slide_summary(doc)
doc <- doc %>%
ph_slidelink(type = "body", slide_index = 2 )
rel_df <- doc$slide$get_slide(1)$rel_df()
slide_filename <- doc$slide$get_metadata()$name[2]
expect_true( slide_filename %in% rel_df$target )
row_id_ <- which( is.na(rel_df$target_mode) & rel_df$target %in% slide_filename )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp[p:nvSpPr/p:cNvPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_false( inherits(node_, "xml_missing") )
})
test_that("hyperlink shape", {
doc <- read_pptx()
doc <- add_slide(doc, layout = "Title and Content", master = "Office Theme")
doc <- ph_with(x = doc, location = ph_location_type(type = "title"), value = "Un titre 1")
doc <- add_slide(doc, layout = "Title and Content", master = "Office Theme")
doc <- ph_with(x = doc, location = ph_location_type(type = "title"), value = "Un titre 2")
doc <- on_slide(doc, 1)
doc <- ph_hyperlink(x = doc, id_chr = "2", href = "https://cran.r-project.org")
rel_df <- doc$slide$get_slide(1)$rel_df()
expect_true( "https://cran.r-project.org" %in% rel_df$target )
row_id_ <- which( !is.na(rel_df$target_mode) & rel_df$target %in% "https://cran.r-project.org" )
rid <- rel_df[row_id_, "id"]
xpath_ <- sprintf("//p:sp[p:nvSpPr/p:cNvPr/a:hlinkClick/@r:id='%s']", rid)
node_ <- xml_find_first(doc$slide$get_slide(1)$get(), xpath_ )
expect_false( inherits(node_, "xml_missing") )
})
unlink("*.pptx")
|
sanitizeInput <- function(conc,
viability,
Hill_fit,
conc_as_log = FALSE,
viability_as_pct = TRUE,
trunc = TRUE,
verbose = TRUE) # Set to 2 to see debug printouts
{
if (is.logical(conc_as_log) == FALSE) {
print(conc_as_log)
stop("'conc_as_log' is not a logical.")
}
if (is.logical(viability_as_pct) == FALSE) {
print(viability_as_pct)
stop("'viability_as_pct' is not a logical.")
}
if (is.logical(trunc) == FALSE) {
print(trunc)
stop("'trunc' is not a logical.")
}
if(!is.finite(verbose)){
stop("'verbose' should be a logical (or numerical) argument.")
}
if(!missing(viability)&&!missing(conc)&&missing(Hill_fit))
{
if (length(conc) != length(viability)) {
if(verbose==2){
print(conc)
print(viability)
}
stop("Log concentration vector is not of same length as viability vector.")
}
if( any(is.na(conc)&(!is.na(viability)))){
warning("Missing concentrations with non-missing viability values encountered. Removing viability values correspoding to those concentrations")
myx <- !is.na(conc)
conc <- as.numeric(conc[myx])
viability <- as.numeric(viability[myx])
}
if(any((!is.na(conc))&is.na(viability))){
warning("Missing viability with non-missing concentrations values encountered. Removing concentrations values correspoding to those viabilities")
myx <- !is.na(viability)
conc <- as.numeric(conc[myx])
viability <- as.numeric(viability[myx])
}
conc <- as.numeric(conc[!is.na(conc)])
viability <- as.numeric(viability[!is.na(viability)])
#CHECK THAT FUNCTION INPUTS ARE APPROPRIATE
if (prod(is.finite(conc)) != 1) {
print(conc)
stop("Concentration vector contains elements which are not real numbers.")
}
if (prod(is.finite(viability)) != 1) {
print(viability)
stop("Viability vector contains elements which are not real numbers.")
}
if (min(viability) < 0) {
if (verbose) {
warning("Warning: Negative viability data.")
}
}
if (max(viability) > (1 + 99 * viability_as_pct)) {
if (verbose) {
warning("Warning: Viability data exceeds negative control.")
}
}
if (conc_as_log == FALSE && min(conc) < 0) {
if (verbose == 2) {
print(conc)
print(conc_as_log)
}
stop("Negative concentrations encountered. Concentration data may be inappropriate, or 'conc_as_log' flag may be set incorrectly.")
}
if (viability_as_pct == TRUE && max(viability) < 5) {
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
if (verbose == 2) {
print(viability)
print(viability_as_pct)
}
}
if (viability_as_pct == FALSE && max(viability) > 5) {
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
if (verbose == 2) {
print(viability)
print(viability_as_pct)
}
}
if(is.unsorted(conc)){
warning("Concentration Values were unsorted. Sorting concentration and ordering viability in same order")
myx <- order(conc)
conc <- conc[myx]
viability <- viability[myx]
}
#CONVERT DOSE-RESPONSE DATA TO APPROPRIATE INTERNAL REPRESENTATION
if (conc_as_log == FALSE ) {
ii <- which(conc == 0)
if(length(ii) > 0) {
conc <- conc[-ii]
viability <- viability[-ii]
}
log_conc <- log10(conc)
} else {
log_conc <- conc
}
if (viability_as_pct == TRUE) {
viability <- viability / 100
}
if (trunc) {
viability = pmin(as.numeric(viability), 1)
viability = pmax(as.numeric(viability), 0)
}
return(list("log_conc"=log_conc, "viability"=viability))
}
if(!missing(Hill_fit) && missing(viability)){
if(is.list(Hill_fit)){
Hill_fit <- unlist(Hill_fit)
}
if (conc_as_log == FALSE && Hill_fit[[3]] < 0) {
print("EC50 passed in as:")
print(Hill_fit[[3]])
stop("'conc_as_log' flag may be set incorrectly, as the EC50 is negative when positive value is expected.")
}
if (viability_as_pct == FALSE && Hill_fit[[2]] > 1) {
print("Einf passed in as:")
print(Hill_fit[[2]])
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
}
if (conc_as_log == FALSE){
Hill_fit[[3]] <- log10(Hill_fit[[3]])
}
if (viability_as_pct == TRUE){
Hill_fit[[2]] <- Hill_fit[[2]]/100
}
if(missing(conc)){
return(list("Hill_fit"=Hill_fit))
} else {
conc <- as.numeric(conc[!is.na(conc)])
if (prod(is.finite(conc)) != 1) {
print(conc)
stop("Concentration vector contains elements which are not real numbers.")
}
if (conc_as_log == FALSE && min(conc) < 0) {
print(conc)
print(conc_as_log)
stop("Negative concentrations encountered. Concentration data may be inappropriate, or 'conc_as_log' flag may be set incorrectly.")
}
if (conc_as_log == FALSE ) {
ii <- which(conc == 0)
if(length(ii) > 0) {
conc <- conc[-ii]
}
log_conc <- log10(conc)
} else {
log_conc <- conc
}
if(is.unsorted(conc)){
myx <- order(conc)
conc <- conc[myx]
}
return(list("Hill_fit"=Hill_fit, "log_conc" = log_conc))
}
}
if(!missing(Hill_fit)&&!missing(viability)){
stop("Please pass in only one of 'Hill_fit' and 'viability', it is unclear which to use in the computation.")
}
if(missing(Hill_fit)&&missing(viability)){
stop("Both 'Hill_fit' and 'viability' missing, please pass in some data!")
}
}
|
/R/SanityCheck.R
|
no_license
|
cran/ToxicoGx
|
R
| false
| false
| 6,007
|
r
|
sanitizeInput <- function(conc,
viability,
Hill_fit,
conc_as_log = FALSE,
viability_as_pct = TRUE,
trunc = TRUE,
verbose = TRUE) # Set to 2 to see debug printouts
{
if (is.logical(conc_as_log) == FALSE) {
print(conc_as_log)
stop("'conc_as_log' is not a logical.")
}
if (is.logical(viability_as_pct) == FALSE) {
print(viability_as_pct)
stop("'viability_as_pct' is not a logical.")
}
if (is.logical(trunc) == FALSE) {
print(trunc)
stop("'trunc' is not a logical.")
}
if(!is.finite(verbose)){
stop("'verbose' should be a logical (or numerical) argument.")
}
if(!missing(viability)&&!missing(conc)&&missing(Hill_fit))
{
if (length(conc) != length(viability)) {
if(verbose==2){
print(conc)
print(viability)
}
stop("Log concentration vector is not of same length as viability vector.")
}
if( any(is.na(conc)&(!is.na(viability)))){
warning("Missing concentrations with non-missing viability values encountered. Removing viability values correspoding to those concentrations")
myx <- !is.na(conc)
conc <- as.numeric(conc[myx])
viability <- as.numeric(viability[myx])
}
if(any((!is.na(conc))&is.na(viability))){
warning("Missing viability with non-missing concentrations values encountered. Removing concentrations values correspoding to those viabilities")
myx <- !is.na(viability)
conc <- as.numeric(conc[myx])
viability <- as.numeric(viability[myx])
}
conc <- as.numeric(conc[!is.na(conc)])
viability <- as.numeric(viability[!is.na(viability)])
#CHECK THAT FUNCTION INPUTS ARE APPROPRIATE
if (prod(is.finite(conc)) != 1) {
print(conc)
stop("Concentration vector contains elements which are not real numbers.")
}
if (prod(is.finite(viability)) != 1) {
print(viability)
stop("Viability vector contains elements which are not real numbers.")
}
if (min(viability) < 0) {
if (verbose) {
warning("Warning: Negative viability data.")
}
}
if (max(viability) > (1 + 99 * viability_as_pct)) {
if (verbose) {
warning("Warning: Viability data exceeds negative control.")
}
}
if (conc_as_log == FALSE && min(conc) < 0) {
if (verbose == 2) {
print(conc)
print(conc_as_log)
}
stop("Negative concentrations encountered. Concentration data may be inappropriate, or 'conc_as_log' flag may be set incorrectly.")
}
if (viability_as_pct == TRUE && max(viability) < 5) {
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
if (verbose == 2) {
print(viability)
print(viability_as_pct)
}
}
if (viability_as_pct == FALSE && max(viability) > 5) {
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
if (verbose == 2) {
print(viability)
print(viability_as_pct)
}
}
if(is.unsorted(conc)){
warning("Concentration Values were unsorted. Sorting concentration and ordering viability in same order")
myx <- order(conc)
conc <- conc[myx]
viability <- viability[myx]
}
#CONVERT DOSE-RESPONSE DATA TO APPROPRIATE INTERNAL REPRESENTATION
if (conc_as_log == FALSE ) {
ii <- which(conc == 0)
if(length(ii) > 0) {
conc <- conc[-ii]
viability <- viability[-ii]
}
log_conc <- log10(conc)
} else {
log_conc <- conc
}
if (viability_as_pct == TRUE) {
viability <- viability / 100
}
if (trunc) {
viability = pmin(as.numeric(viability), 1)
viability = pmax(as.numeric(viability), 0)
}
return(list("log_conc"=log_conc, "viability"=viability))
}
if(!missing(Hill_fit) && missing(viability)){
if(is.list(Hill_fit)){
Hill_fit <- unlist(Hill_fit)
}
if (conc_as_log == FALSE && Hill_fit[[3]] < 0) {
print("EC50 passed in as:")
print(Hill_fit[[3]])
stop("'conc_as_log' flag may be set incorrectly, as the EC50 is negative when positive value is expected.")
}
if (viability_as_pct == FALSE && Hill_fit[[2]] > 1) {
print("Einf passed in as:")
print(Hill_fit[[2]])
warning("Warning: 'viability_as_pct' flag may be set incorrectly.")
}
if (conc_as_log == FALSE){
Hill_fit[[3]] <- log10(Hill_fit[[3]])
}
if (viability_as_pct == TRUE){
Hill_fit[[2]] <- Hill_fit[[2]]/100
}
if(missing(conc)){
return(list("Hill_fit"=Hill_fit))
} else {
conc <- as.numeric(conc[!is.na(conc)])
if (prod(is.finite(conc)) != 1) {
print(conc)
stop("Concentration vector contains elements which are not real numbers.")
}
if (conc_as_log == FALSE && min(conc) < 0) {
print(conc)
print(conc_as_log)
stop("Negative concentrations encountered. Concentration data may be inappropriate, or 'conc_as_log' flag may be set incorrectly.")
}
if (conc_as_log == FALSE ) {
ii <- which(conc == 0)
if(length(ii) > 0) {
conc <- conc[-ii]
}
log_conc <- log10(conc)
} else {
log_conc <- conc
}
if(is.unsorted(conc)){
myx <- order(conc)
conc <- conc[myx]
}
return(list("Hill_fit"=Hill_fit, "log_conc" = log_conc))
}
}
if(!missing(Hill_fit)&&!missing(viability)){
stop("Please pass in only one of 'Hill_fit' and 'viability', it is unclear which to use in the computation.")
}
if(missing(Hill_fit)&&missing(viability)){
stop("Both 'Hill_fit' and 'viability' missing, please pass in some data!")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GitConnectionTest.r
\docType{data}
\name{GitConnectionTest}
\alias{GitConnectionTest}
\title{GitConnectionTest Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
GitConnectionTest
}
\description{
GitConnectionTest Class
}
\section{Fields}{
\describe{
\item{\code{id}}{}
\item{\code{description}}{}
\item{\code{can}}{}
}}
\keyword{datasets}
|
/man/GitConnectionTest.Rd
|
permissive
|
tynesjo/lookr
|
R
| false
| true
| 452
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GitConnectionTest.r
\docType{data}
\name{GitConnectionTest}
\alias{GitConnectionTest}
\title{GitConnectionTest Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
GitConnectionTest
}
\description{
GitConnectionTest Class
}
\section{Fields}{
\describe{
\item{\code{id}}{}
\item{\code{description}}{}
\item{\code{can}}{}
}}
\keyword{datasets}
|
start <- as.POSIXlt(Sys.time())
library(testthat)
library(EuPathDB)
context("011_txdb_fungidb
12\n")
webservice <- "fungidb"
wanted <- "pombe"
testing <- download_eupath_metadata(overwrite=FALSE, webservice=webservice)
entry <- get_eupath_entry(species=wanted, metadata=testing)
txdb <- make_eupath_txdb(entry=entry, install=FALSE)
actual <- as.character(txdb[["txdb_name"]])
expected <- "TxDb.Schizosaccharomyces.pombe.972h.FungiDB.v49"
test_that("Do we get some txdb information?", {
expect_equal(expected, actual)
})
wanted <- "versicolor"
entry <- get_eupath_entry(species=wanted, metadata=testing)
txdb <- make_eupath_txdb(entry=entry, install=FALSE)
actual <- as.character(txdb[["txdb_name"]])
expected <- "TxDb.Aspergillus.versicolor.CBS.583.65.FungiDB.v49"
test_that("Do we get some txdb information?", {
expect_equal(actual, expected)
})
end <- as.POSIXlt(Sys.time())
elapsed <- round(x=as.numeric(end) - as.numeric(start))
message(paste0("\nFinished 010annotation_eupathdb.R in ", elapsed, " seconds."))
|
/tests/testthat/test_021_txdb_fungidb.R
|
no_license
|
khughitt/EuPathDB
|
R
| false
| false
| 1,027
|
r
|
start <- as.POSIXlt(Sys.time())
library(testthat)
library(EuPathDB)
context("011_txdb_fungidb
12\n")
webservice <- "fungidb"
wanted <- "pombe"
testing <- download_eupath_metadata(overwrite=FALSE, webservice=webservice)
entry <- get_eupath_entry(species=wanted, metadata=testing)
txdb <- make_eupath_txdb(entry=entry, install=FALSE)
actual <- as.character(txdb[["txdb_name"]])
expected <- "TxDb.Schizosaccharomyces.pombe.972h.FungiDB.v49"
test_that("Do we get some txdb information?", {
expect_equal(expected, actual)
})
wanted <- "versicolor"
entry <- get_eupath_entry(species=wanted, metadata=testing)
txdb <- make_eupath_txdb(entry=entry, install=FALSE)
actual <- as.character(txdb[["txdb_name"]])
expected <- "TxDb.Aspergillus.versicolor.CBS.583.65.FungiDB.v49"
test_that("Do we get some txdb information?", {
expect_equal(actual, expected)
})
end <- as.POSIXlt(Sys.time())
elapsed <- round(x=as.numeric(end) - as.numeric(start))
message(paste0("\nFinished 010annotation_eupathdb.R in ", elapsed, " seconds."))
|
#Getting and Cleaning Data
#Project
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# This file create ./data folder in the working directory and download the zip files there.
#create ./data folder
if(!file.exists("data")){dir.create("data")}
#download file
fileurl="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destfile="./data/data.zip"
download.file(fileurl,destfile)
datedownloaded=date()
|
/download_file.R
|
no_license
|
superpapper/Getting-and-Cleaning-Data-Project
|
R
| false
| false
| 527
|
r
|
#Getting and Cleaning Data
#Project
# Here are the data for the project:
#
# https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip
#
# This file create ./data folder in the working directory and download the zip files there.
#create ./data folder
if(!file.exists("data")){dir.create("data")}
#download file
fileurl="https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
destfile="./data/data.zip"
download.file(fileurl,destfile)
datedownloaded=date()
|
# Eric Cartaya
library(dplyr)
library(tidyverse) #declarations
library(psych)
library(olsrr)
library(rrcov)
library(ggpubr)
library(HDtest)
library(cluster)
library(lmtest)
library(fpc)
library(ggplot2)
corona_mental <- read.csv("F:/School/Multivariate Stat/Indicators_of_Anxiety_or_Depression_Based_on_Reported_Frequency_of_Symptoms_During_Last_7_Days.csv")#getting the file
#View(corona_mental)#viewing the table
corona_mental <- corona_mental %>% select(-Quartile.range) %>% select(-State) %>% select(-Confidence.Interval) %>% select(-Week.Label)
#library(MASS)
write.csv(or_table,"F:\\School\\Multivariate Stat\\temp.csv", row.names = TRUE)
|
/project_code.R
|
no_license
|
ericcartaya/CovidDepressionMultivariateStatEC2020
|
R
| false
| false
| 672
|
r
|
# Eric Cartaya
library(dplyr)
library(tidyverse) #declarations
library(psych)
library(olsrr)
library(rrcov)
library(ggpubr)
library(HDtest)
library(cluster)
library(lmtest)
library(fpc)
library(ggplot2)
corona_mental <- read.csv("F:/School/Multivariate Stat/Indicators_of_Anxiety_or_Depression_Based_on_Reported_Frequency_of_Symptoms_During_Last_7_Days.csv")#getting the file
#View(corona_mental)#viewing the table
corona_mental <- corona_mental %>% select(-Quartile.range) %>% select(-State) %>% select(-Confidence.Interval) %>% select(-Week.Label)
#library(MASS)
write.csv(or_table,"F:\\School\\Multivariate Stat\\temp.csv", row.names = TRUE)
|
#' Perform Analyses
#'
#' Performs batch jags analyses. It goes through the folders defined by \code{...}
#' and pulls out the data of name \code{name} (by default \code{"data"} and then fits
#' the models defined in \code{models}. Note \code{beep = TRUE} has caused
#' crashed with RStudio on windows.
#'
#' @param models A jags_model object.
#' @param ... A series of character vectors or a list of character vectors of
#' folders to cycle through.
#' @param niters A count of the number of iterations to perform.
#' @param mode A string of the opts_jagr mode.
#' @param name A string of the name of the data file.
#' @param beep A flag indicating whether to beep on completion.
#' @export
perform_analyses <- function(models, ..., niters = 10^3, mode = "current",
name = "data",
beep = getOption("poiscon.beep", default = FALSE)) {
assert_that(is.jags_model(models))
assert_that(is.count(niters))
assert_that(is.string(mode))
assert_that(is.string(name))
assert_that(is.flag(beep) && noNA(beep))
opts <- opts_jagr()
on.exit(opts_jagr(opts))
opts_jagr(mode = mode)
args <- list(...)
nargs <- length(args)
analysis <- function(models, name, niters) {
data <- load_rdata(name)
analysis <- jags_analysis(models, data = data, niters = niters)
save_analysis(analysis)
print(summary(analysis))
save_tables(analysis)
save_plots(analysis)
}
if (nargs == 0) {
analysis(models = models, name = name, niters = niters)
} else {
folders <- t(expand.grid(...))
for (i in 1:ncol(folders)) {
print(set_folders(as.character(as.list(unlist(folders[, i])))))
analysis(models = models, name = name, niters = niters)
}
}
if(beep)
beepr::beep(10)
invisible(TRUE)
}
#' Plots Residuals
#'
#' Plots
#'
#' @param ... character vectors of folders to cycle through
#' @export
plot_residuals_analyses <- function(...) {
args <- list(...)
nargs <- length(args)
plot_residuals_analysis <- function() {
analysis <- load_analysis()
plot_residuals(analysis)
}
if (nargs == 0) {
plot_residuals_analysis()
} else {
folders <- t(expand.grid(...))
for (i in 1:ncol(folders)) {
print(set_folders(as.character(as.list(unlist(folders[, i])))))
plot_residuals_analysis()
}
}
beepr::beep(10)
invisible(TRUE)
}
|
/R/perform-analyses.R
|
no_license
|
poissonconsulting/poiscon
|
R
| false
| false
| 2,410
|
r
|
#' Perform Analyses
#'
#' Performs batch jags analyses. It goes through the folders defined by \code{...}
#' and pulls out the data of name \code{name} (by default \code{"data"} and then fits
#' the models defined in \code{models}. Note \code{beep = TRUE} has caused
#' crashed with RStudio on windows.
#'
#' @param models A jags_model object.
#' @param ... A series of character vectors or a list of character vectors of
#' folders to cycle through.
#' @param niters A count of the number of iterations to perform.
#' @param mode A string of the opts_jagr mode.
#' @param name A string of the name of the data file.
#' @param beep A flag indicating whether to beep on completion.
#' @export
perform_analyses <- function(models, ..., niters = 10^3, mode = "current",
name = "data",
beep = getOption("poiscon.beep", default = FALSE)) {
assert_that(is.jags_model(models))
assert_that(is.count(niters))
assert_that(is.string(mode))
assert_that(is.string(name))
assert_that(is.flag(beep) && noNA(beep))
opts <- opts_jagr()
on.exit(opts_jagr(opts))
opts_jagr(mode = mode)
args <- list(...)
nargs <- length(args)
analysis <- function(models, name, niters) {
data <- load_rdata(name)
analysis <- jags_analysis(models, data = data, niters = niters)
save_analysis(analysis)
print(summary(analysis))
save_tables(analysis)
save_plots(analysis)
}
if (nargs == 0) {
analysis(models = models, name = name, niters = niters)
} else {
folders <- t(expand.grid(...))
for (i in 1:ncol(folders)) {
print(set_folders(as.character(as.list(unlist(folders[, i])))))
analysis(models = models, name = name, niters = niters)
}
}
if(beep)
beepr::beep(10)
invisible(TRUE)
}
#' Plots Residuals
#'
#' Plots
#'
#' @param ... character vectors of folders to cycle through
#' @export
plot_residuals_analyses <- function(...) {
args <- list(...)
nargs <- length(args)
plot_residuals_analysis <- function() {
analysis <- load_analysis()
plot_residuals(analysis)
}
if (nargs == 0) {
plot_residuals_analysis()
} else {
folders <- t(expand.grid(...))
for (i in 1:ncol(folders)) {
print(set_folders(as.character(as.list(unlist(folders[, i])))))
plot_residuals_analysis()
}
}
beepr::beep(10)
invisible(TRUE)
}
|
nGrid = c(50,100,250,1000)
regressionF = function(X){
return(sin(X*2*pi))
}
pGridF = function(n,exponential=F){
if(exponential){
return(round(exp(seq(0,log(n/100),length=11)))[-1])
}else{
return(2:10)
}
}
PhiF = function(X,p){
return(X^(1:p))
}
varAndBiasF = function(featureMat,f_star){
svd.out = svd(featureMat)
U = svd.out$u
D = svd.out$d
return(list('var'=sum(D^(-2)),
'bias'=f_star - U %*% t(U) %*% f_star,
'd'=D))
}
n = 10000
X = (1:n)/n
pGrid = pGridF(n)
f_star = regressionF(X)
biasOut = rep(0,length(pGrid))
varOut = rep(0,length(pGrid))
pSweep = 0
for(p in pGrid){
print(p)
pSweep = pSweep + 1
featureMat = scale(t(sapply(X,PhiF,p)))
varAndBias = varAndBiasF(featureMat,f_star)
biasOut[pSweep] = sum(varAndBias$bias**2)
varOut[pSweep] = varAndBias$var
}
plot(pGrid,biasOut,type='l',col='red')
lines(pGrid,varOut,col='blue')
risk = biasOut+varOut
points(pGrid,risk,col='green')
|
/SML2015/polynomial.R
|
no_license
|
darrenho/darrenho.github.io
|
R
| false
| false
| 995
|
r
|
nGrid = c(50,100,250,1000)
regressionF = function(X){
return(sin(X*2*pi))
}
pGridF = function(n,exponential=F){
if(exponential){
return(round(exp(seq(0,log(n/100),length=11)))[-1])
}else{
return(2:10)
}
}
PhiF = function(X,p){
return(X^(1:p))
}
varAndBiasF = function(featureMat,f_star){
svd.out = svd(featureMat)
U = svd.out$u
D = svd.out$d
return(list('var'=sum(D^(-2)),
'bias'=f_star - U %*% t(U) %*% f_star,
'd'=D))
}
n = 10000
X = (1:n)/n
pGrid = pGridF(n)
f_star = regressionF(X)
biasOut = rep(0,length(pGrid))
varOut = rep(0,length(pGrid))
pSweep = 0
for(p in pGrid){
print(p)
pSweep = pSweep + 1
featureMat = scale(t(sapply(X,PhiF,p)))
varAndBias = varAndBiasF(featureMat,f_star)
biasOut[pSweep] = sum(varAndBias$bias**2)
varOut[pSweep] = varAndBias$var
}
plot(pGrid,biasOut,type='l',col='red')
lines(pGrid,varOut,col='blue')
risk = biasOut+varOut
points(pGrid,risk,col='green')
|
### Data Visualization Lab
# 1/7/2016
## you will need the charm city circulator dataset:
library(ggplot2)
library(tidyr)
library(dplyr)
library(lubridate)
library(stringr)
# read in data
circ = read.csv("http://www.aejaffe.com/winterR_2016/data/Charm_City_Circulator_Ridership.csv",
header=TRUE,as.is=TRUE)
# covert dates
circ = mutate(circ, date = mdy(date))
# change colnames for reshaping
colnames(circ) = colnames(circ) %>%
str_replace("Board", ".Board") %>%
str_replace("Alight", ".Alight") %>%
str_replace("Average", ".Average")
circ$daily = NULL # remove
# make long
long = gather(circ, "var", "number",
starts_with("orange"),
starts_with("purple"), starts_with("green"),
starts_with("banner"))
# separate
long = separate_(long, "var", into = c("line", "type"),
sep = "[.]")
## take just average ridership per day
avg = filter(long, type == "Average")
avg = filter(avg, complete.cases(avg))
# Using ggplot2:
# 1. plot average ridership by date.
# a. Color the points by route (orange, purple, green, banner)
qplot(x = date, y = number, data= avg, colour = line)
# b. Color the points by day of the week
qplot(x = date, y = number, data= avg, colour = day)
# 2. Replot 1a where the colors of the points are the
# name of the route (with banner --> blue)
pal = c("blue", "darkgreen","orange","purple")
qplot(x = date, y = number, data= avg, colour = line) +
scale_colour_manual(values=pal)
# 3. plot average ridership by date with one panel per route
qplot(x = date, y = number, data= avg, facets = ~line)
qplot(x = date, y = number, data= avg, facets = ~line,
colour = line) + scale_colour_manual(values=pal)
# 4. plot average ridership by date with separate panels
# by day of the week, colored by route
qplot(x = date, y = number, data= avg, facets = ~day,
colour = line) + scale_colour_manual(values=pal)
# Using base R graphics:
# 5. plot average ridership on the orange route versus date
# as a solid line, and add dashed "error" lines based
# on the boardings and alightings.
# the line colors should be orange.
orange = filter(long, line=="orange")
orangeList = split(orange, orange$type)
plot(number ~ date, data =orangeList$Average,
col="orange", type="l", subset= 1:100)
lines(number ~ date, data =orangeList$Alighting,
col="orange", type="l", lty=2)
lines(number ~ date, data =orangeList$Boarding,
col="orange", type="l", lty=2)
orange = gather(orange, type, number)
|
/Data_Visualization/lab/Data_Visualization_Lab_key.R
|
permissive
|
anhnguyendepocen/summerR_2016
|
R
| false
| false
| 2,491
|
r
|
### Data Visualization Lab
# 1/7/2016
## you will need the charm city circulator dataset:
library(ggplot2)
library(tidyr)
library(dplyr)
library(lubridate)
library(stringr)
# read in data
circ = read.csv("http://www.aejaffe.com/winterR_2016/data/Charm_City_Circulator_Ridership.csv",
header=TRUE,as.is=TRUE)
# covert dates
circ = mutate(circ, date = mdy(date))
# change colnames for reshaping
colnames(circ) = colnames(circ) %>%
str_replace("Board", ".Board") %>%
str_replace("Alight", ".Alight") %>%
str_replace("Average", ".Average")
circ$daily = NULL # remove
# make long
long = gather(circ, "var", "number",
starts_with("orange"),
starts_with("purple"), starts_with("green"),
starts_with("banner"))
# separate
long = separate_(long, "var", into = c("line", "type"),
sep = "[.]")
## take just average ridership per day
avg = filter(long, type == "Average")
avg = filter(avg, complete.cases(avg))
# Using ggplot2:
# 1. plot average ridership by date.
# a. Color the points by route (orange, purple, green, banner)
qplot(x = date, y = number, data= avg, colour = line)
# b. Color the points by day of the week
qplot(x = date, y = number, data= avg, colour = day)
# 2. Replot 1a where the colors of the points are the
# name of the route (with banner --> blue)
pal = c("blue", "darkgreen","orange","purple")
qplot(x = date, y = number, data= avg, colour = line) +
scale_colour_manual(values=pal)
# 3. plot average ridership by date with one panel per route
qplot(x = date, y = number, data= avg, facets = ~line)
qplot(x = date, y = number, data= avg, facets = ~line,
colour = line) + scale_colour_manual(values=pal)
# 4. plot average ridership by date with separate panels
# by day of the week, colored by route
qplot(x = date, y = number, data= avg, facets = ~day,
colour = line) + scale_colour_manual(values=pal)
# Using base R graphics:
# 5. plot average ridership on the orange route versus date
# as a solid line, and add dashed "error" lines based
# on the boardings and alightings.
# the line colors should be orange.
orange = filter(long, line=="orange")
orangeList = split(orange, orange$type)
plot(number ~ date, data =orangeList$Average,
col="orange", type="l", subset= 1:100)
lines(number ~ date, data =orangeList$Alighting,
col="orange", type="l", lty=2)
lines(number ~ date, data =orangeList$Boarding,
col="orange", type="l", lty=2)
orange = gather(orange, type, number)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{GetPageImage}
\alias{GetPageImage}
\title{Get a raster image of a selected page}
\usage{
GetPageImage(i)
}
\arguments{
\item{i}{index of the selected subject}
}
\value{
raster of the image.
}
\description{
Download the image from the website (unless it's already cached).
}
|
/packages/oldWeather5/man/GetPageImage.Rd
|
no_license
|
oldweather/oldWeather5
|
R
| false
| false
| 375
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{GetPageImage}
\alias{GetPageImage}
\title{Get a raster image of a selected page}
\usage{
GetPageImage(i)
}
\arguments{
\item{i}{index of the selected subject}
}
\value{
raster of the image.
}
\description{
Download the image from the website (unless it's already cached).
}
|
\name{smooth.spec}
\alias{smooth.spec}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Smooth a spectrum using loess.
}
\description{
Performs a smothing over a spectrum using loess and a desired smoothing scale
}
\usage{
smooth.spec(spec, sc)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{spec}{
List with similar struture to that generated bu gen.spec
}
\item{sc}{
Smoothing scale
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{sepc }{List with the same structure as input spec, but with smoothing applied}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Luke Davies
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
spec<-get.spec(paste(.libPaths(),'/fourXPS/data/ExampleSpec.fits',sep=''))
spec.plot(spec, main='Example Spectrum', xlim=c(4500,9000), col='navy', lwd=0.5)
spec2<-smooth.spec(spec,0.02)
spec.plot(spec2, col='red', oplot=T, lwd=2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ }
\keyword{ }% __ONLY ONE__ keyword per line
|
/man/smooth.spec.Rd
|
no_license
|
lukejdavies/spec.tools
|
R
| false
| false
| 1,270
|
rd
|
\name{smooth.spec}
\alias{smooth.spec}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Smooth a spectrum using loess.
}
\description{
Performs a smothing over a spectrum using loess and a desired smoothing scale
}
\usage{
smooth.spec(spec, sc)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{spec}{
List with similar struture to that generated bu gen.spec
}
\item{sc}{
Smoothing scale
}
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
\item{sepc }{List with the same structure as input spec, but with smoothing applied}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Luke Davies
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
spec<-get.spec(paste(.libPaths(),'/fourXPS/data/ExampleSpec.fits',sep=''))
spec.plot(spec, main='Example Spectrum', xlim=c(4500,9000), col='navy', lwd=0.5)
spec2<-smooth.spec(spec,0.02)
spec.plot(spec2, col='red', oplot=T, lwd=2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ }
\keyword{ }% __ONLY ONE__ keyword per line
|
randEEG <-
function(n.classes=2,n.rec=10,n.channels = 20,n.signals=250,ar="default", ma="default", order="default", vars = c(1,2)) {
n.elec = n.channels
if(ar=="default")
ar = mat.or.vec(n.classes,1)
if(ma=="default")
ma = mat.or.vec(n.classes,1)
if(order=="default")
order = mat.or.vec(n.classes,3)
#--------------------------------------------------------------
#INI: transforming vector into matrices to avoid errors
if (n.classes==1){
if (is.null(nrow(order))) order<-t(as.matrix(order))
if (is.null(nrow(ar))) ar<-t(as.matrix(ar))
if (is.null(nrow(ma))) ma<-t(as.matrix(ma))
} else {
if (is.null(nrow(order))) order<-t(as.matrix(order))
if (is.null(nrow(ar))) ar<-as.matrix(ar)
if (is.null(nrow(ma))) ma<-as.matrix(ma)
}
#END: transforming vector into matrices to avoid errors
#--------------------------------------------------------------
#--------------------------------------------------------------
#INI: input control
for(i in 1:length(vars))
{
if(vars[i]<=0) stop("Parameter 'vars': All entries must be greater than 0.")}
if(n.classes<=0) stop("Parameter 'n.classes': This number must be greater than 0.")
if(n.rec<=0) stop("Parameter 'n.rec': This number must be greater than 0.")
if(n.elec<=0) stop("Parameter 'n.channels': This number must be greater than 0.")
if(n.signals<=0) stop("Parameter 'n.signals': This number must be greater than 0.")
if (nrow(order)!=n.classes & nrow(order)!=(n.classes*n.elec)) stop(
"Parameter 'order': \nThe number of rows in 'order' must be equal to 'n.classes' or 'n.classes*n.channels'")
if (nrow(order)!=nrow(ar)) stop("Parameter 'ar': \nMatrix 'ar' must have the same number of rows of 'order'.")
if (nrow(order)!=nrow(ma)) stop("Parameter 'ma': \nMatrix 'ma' must have the same number of rows of 'order'.")
if (nrow(order)!=length(vars)) stop("Parameter 'vars': \nThe length of 'vars' must be equal to the number of classes.")
m<-max(order[,1])
if (ncol(ar)>m & m>0) {ar<-ar[,1:m] ; warning("Matrices 'order' and 'ar' do not conform. Some parameters were deleted in 'ar' matrix to proceed the simulation.")}
repeat{ if (ncol(ar)<m) ar<-cbind(ar,numeric(nrow(ar))) else break;}
m<-max(order[,3])
if (ncol(ma)>m & m>0) {ma<-ma[,1:m]; warning("Matrices 'order' and 'ma' do not conform. Some parameters were deleted in 'ma' matrix to proceed the simulation.")}
repeat{ if (ncol(ma)<m) ma<-cbind(ma,numeric(nrow(ma))) else break;}
if (length(vars)!=n.classes & length(var)!=(n.classes*n.elec)) stop("Parameter 'vars': \nlength of 'vars' must be equal to the number of classes or equal to 'n.classes'*'n.channels'.")
#END: input control
#--------------------------------------------------------------
data <- mat.or.vec(n.signals*n.classes*n.rec,n.elec)
reps <- numeric(n.signals*n.classes*n.rec)
class <- numeric(n.signals*n.classes*n.rec)
for (i in 1:n.rec)
{
reps[((i-1)*n.signals+1):(i*n.signals)]<-rep(i,n.signals)
}
for (i in 1:n.classes)
{
class[((i-1)*n.signals*n.rec+1):(i*n.signals*n.rec)] <- rep(i,n.signals*n.rec) ;
if (i>1) reps[((i-1)*n.signals*n.rec+1):(i*n.signals*n.rec)]<-reps[1:(n.signals*n.rec)]
}
if (nrow(order)==n.classes)
{
for (i in 1:n.elec)
{
for (j in 1:n.classes)
{
for (k in 1:n.rec)
{
ord <- order[j,]
if (ord[1]==0)
{
AR<-c()
} else
{
AR<-ar[j,1:ord[1]]
}
if (ord[3]==0)
{
MA<-c()
} else
{
MA<-ma[j,1:ord[3]]
}
data[((j-1)*n.signals*n.rec+(k-1)*n.signals+1):((j-1)*n.signals*n.rec+k*n.signals),i] <- tail(arima.sim(n = n.signals, list(ar=AR,ma=MA,order=ord),sd = sqrt(vars[j])),n.signals)
}
}
}
} else
{
for (i in 1:n.elec)
{
for (j in 1:n.classes)
{
for (k in 1:n.rec)
{
ind <- (j-1)*n.elec+i
ord <- order[ind,]
if (ord[1]==0) {AR<-c()} else {AR<-ar[ind,1:ord[1]]}
if (ord[3]==0) {MA<-0} else {MA<-ma[ind,1:ord[3]]}
data[((j-1)*n.signals*n.rec+(k-1)*n.signals+1):((j-1)*n.signals*n.rec+k*n.signals),i] <- arima.sim(n = n.signals, list(ar=AR,ma=MA,order=ord),sd = sqrt(vars[ind]))
}
}
}
}
result <- list(
data = data , classes.Id = class , rec.Id = reps , n.classes=n.classes, n.rec = n.rec, n.channels=n.elec,n.signals=n.signals,vars=vars
)
class(result) <- "RandEEG"
return(result)
}
print.RandEEG <-
function(x, ...){
cat("Class: RandEEG \n \n")
cat("Description: Simulation of EEG data \n")
cat("Number of classes:",x$n.classes,"\n")
cat("Number of recordings of each class:",x$n.rec,"\n")
cat("Number of channels:",x$n.channels,"\n")
cat("Number of signals in each recording:",x$n.signals,"\n")
cat("Variance for each class:",x$vars,"\n")
}
summary.RandEEG <-
function(object, ...){
x<- object
cat("Class: RandEEG \n \n")
cat("Description: Simulation of EEG data \n")
cat("Number of classes:",x$n.classes,"\n")
cat("Number of recordings of each class:",x$n.rec,"\n")
cat("Number of channels:",x$n.channels,"\n")
cat("Number of signals in each recording:",x$n.signals,"\n")
cat("Variance for each class:",x$vars,"\n")
}
|
/R/randEEG.R
|
no_license
|
cran/eegAnalysis
|
R
| false
| false
| 5,556
|
r
|
randEEG <-
function(n.classes=2,n.rec=10,n.channels = 20,n.signals=250,ar="default", ma="default", order="default", vars = c(1,2)) {
n.elec = n.channels
if(ar=="default")
ar = mat.or.vec(n.classes,1)
if(ma=="default")
ma = mat.or.vec(n.classes,1)
if(order=="default")
order = mat.or.vec(n.classes,3)
#--------------------------------------------------------------
#INI: transforming vector into matrices to avoid errors
if (n.classes==1){
if (is.null(nrow(order))) order<-t(as.matrix(order))
if (is.null(nrow(ar))) ar<-t(as.matrix(ar))
if (is.null(nrow(ma))) ma<-t(as.matrix(ma))
} else {
if (is.null(nrow(order))) order<-t(as.matrix(order))
if (is.null(nrow(ar))) ar<-as.matrix(ar)
if (is.null(nrow(ma))) ma<-as.matrix(ma)
}
#END: transforming vector into matrices to avoid errors
#--------------------------------------------------------------
#--------------------------------------------------------------
#INI: input control
for(i in 1:length(vars))
{
if(vars[i]<=0) stop("Parameter 'vars': All entries must be greater than 0.")}
if(n.classes<=0) stop("Parameter 'n.classes': This number must be greater than 0.")
if(n.rec<=0) stop("Parameter 'n.rec': This number must be greater than 0.")
if(n.elec<=0) stop("Parameter 'n.channels': This number must be greater than 0.")
if(n.signals<=0) stop("Parameter 'n.signals': This number must be greater than 0.")
if (nrow(order)!=n.classes & nrow(order)!=(n.classes*n.elec)) stop(
"Parameter 'order': \nThe number of rows in 'order' must be equal to 'n.classes' or 'n.classes*n.channels'")
if (nrow(order)!=nrow(ar)) stop("Parameter 'ar': \nMatrix 'ar' must have the same number of rows of 'order'.")
if (nrow(order)!=nrow(ma)) stop("Parameter 'ma': \nMatrix 'ma' must have the same number of rows of 'order'.")
if (nrow(order)!=length(vars)) stop("Parameter 'vars': \nThe length of 'vars' must be equal to the number of classes.")
m<-max(order[,1])
if (ncol(ar)>m & m>0) {ar<-ar[,1:m] ; warning("Matrices 'order' and 'ar' do not conform. Some parameters were deleted in 'ar' matrix to proceed the simulation.")}
repeat{ if (ncol(ar)<m) ar<-cbind(ar,numeric(nrow(ar))) else break;}
m<-max(order[,3])
if (ncol(ma)>m & m>0) {ma<-ma[,1:m]; warning("Matrices 'order' and 'ma' do not conform. Some parameters were deleted in 'ma' matrix to proceed the simulation.")}
repeat{ if (ncol(ma)<m) ma<-cbind(ma,numeric(nrow(ma))) else break;}
if (length(vars)!=n.classes & length(var)!=(n.classes*n.elec)) stop("Parameter 'vars': \nlength of 'vars' must be equal to the number of classes or equal to 'n.classes'*'n.channels'.")
#END: input control
#--------------------------------------------------------------
data <- mat.or.vec(n.signals*n.classes*n.rec,n.elec)
reps <- numeric(n.signals*n.classes*n.rec)
class <- numeric(n.signals*n.classes*n.rec)
for (i in 1:n.rec)
{
reps[((i-1)*n.signals+1):(i*n.signals)]<-rep(i,n.signals)
}
for (i in 1:n.classes)
{
class[((i-1)*n.signals*n.rec+1):(i*n.signals*n.rec)] <- rep(i,n.signals*n.rec) ;
if (i>1) reps[((i-1)*n.signals*n.rec+1):(i*n.signals*n.rec)]<-reps[1:(n.signals*n.rec)]
}
if (nrow(order)==n.classes)
{
for (i in 1:n.elec)
{
for (j in 1:n.classes)
{
for (k in 1:n.rec)
{
ord <- order[j,]
if (ord[1]==0)
{
AR<-c()
} else
{
AR<-ar[j,1:ord[1]]
}
if (ord[3]==0)
{
MA<-c()
} else
{
MA<-ma[j,1:ord[3]]
}
data[((j-1)*n.signals*n.rec+(k-1)*n.signals+1):((j-1)*n.signals*n.rec+k*n.signals),i] <- tail(arima.sim(n = n.signals, list(ar=AR,ma=MA,order=ord),sd = sqrt(vars[j])),n.signals)
}
}
}
} else
{
for (i in 1:n.elec)
{
for (j in 1:n.classes)
{
for (k in 1:n.rec)
{
ind <- (j-1)*n.elec+i
ord <- order[ind,]
if (ord[1]==0) {AR<-c()} else {AR<-ar[ind,1:ord[1]]}
if (ord[3]==0) {MA<-0} else {MA<-ma[ind,1:ord[3]]}
data[((j-1)*n.signals*n.rec+(k-1)*n.signals+1):((j-1)*n.signals*n.rec+k*n.signals),i] <- arima.sim(n = n.signals, list(ar=AR,ma=MA,order=ord),sd = sqrt(vars[ind]))
}
}
}
}
result <- list(
data = data , classes.Id = class , rec.Id = reps , n.classes=n.classes, n.rec = n.rec, n.channels=n.elec,n.signals=n.signals,vars=vars
)
class(result) <- "RandEEG"
return(result)
}
print.RandEEG <-
function(x, ...){
cat("Class: RandEEG \n \n")
cat("Description: Simulation of EEG data \n")
cat("Number of classes:",x$n.classes,"\n")
cat("Number of recordings of each class:",x$n.rec,"\n")
cat("Number of channels:",x$n.channels,"\n")
cat("Number of signals in each recording:",x$n.signals,"\n")
cat("Variance for each class:",x$vars,"\n")
}
summary.RandEEG <-
function(object, ...){
x<- object
cat("Class: RandEEG \n \n")
cat("Description: Simulation of EEG data \n")
cat("Number of classes:",x$n.classes,"\n")
cat("Number of recordings of each class:",x$n.rec,"\n")
cat("Number of channels:",x$n.channels,"\n")
cat("Number of signals in each recording:",x$n.signals,"\n")
cat("Variance for each class:",x$vars,"\n")
}
|
#' @importFrom log4r debug
#' @export
f2 <- function(x, logger=create.logger(NULL)) {
debug(logger, paste("f2() input is:", x))
y <- x-1
debug(logger, paste("f2() output is:", y))
y
}
|
/R/f2.r
|
no_license
|
00mjk/rseedpkg
|
R
| false
| false
| 192
|
r
|
#' @importFrom log4r debug
#' @export
f2 <- function(x, logger=create.logger(NULL)) {
debug(logger, paste("f2() input is:", x))
y <- x-1
debug(logger, paste("f2() output is:", y))
y
}
|
rm( list=ls() )
sink( 'Rout/setup.Rout' )
load( paste0( 'parsed_data/', 'base', '.Rdata' ) )
X <- scale(G)
Yb2Z <- function(yb){
Z <- t(sapply( yb, function(x){
if( is.na(x) ){
return( c( NA, NA ) )
} else if( x == 0 ){
return( c( 0, 1 ) )
} else if( x == 1 ){
return( c( 1, 0 ) )
}
}))
Z * sqrt(nrow(Z)/sum(Z^2))
}
## load K
load( 'parsed_data/K.Rdata' )
K <- K[rownames(X),rownames(X)]
K <- K*nrow(K)/sum(diag(K))
y <- 1+as.numeric( Yb[,1] == max(Yb[,1],na.rm=T) )
stressall <- rowSums( Yb[,paste0( 'LS.', 1:16 )] )
stresspc <- svd( Yb[,paste0( 'LS.', 1:16 )] )$u[,1]
stresspc2 <- svd( scale(Yb[,paste0( 'LS.', (1:16)[-c(11,15)] )]) )$u[,1]
Y <- cbind( Y, stressall, stresspc, stresspc2 )
save( Yb2Z, y, X, K, Y, Yb, file='Rdata/data.Rdata' )
ppsub <- c(7,8,16:ncol(Yb),ncol(Yb)+2, ncol(Yb)+4:(ncol(Y)))
qqsub <- c(1:6 ,ncol(Yb)+c(0,1:3,10,11,12,13,14,15 ))
ppsub <- ppsub[-c(4,13,17)] # LS.2 is redundant with sepdivwid; LS.11 has only 3 in ctrls; LS.15 has only 17
# LS.6/13 have only 52/43 in ctrls
envs <- c(colnames(Yb),colnames(Y))
envs[ppsub]
envs[qqsub]
envs[qqsub]
qqsub <- qqsub[-c(9,15)]
envs[qqsub]
rbind(
round( colSums( Yb[Yb[,1]==1,intersect( ppsub, 1:ncol(Yb) )], na.rm=T ), 2 ),
round( colSums( Yb[Yb[,1]==0,intersect( ppsub, 1:ncol(Yb) )], na.rm=T ), 2 )
)
Ybnames <- c( 'Major Depression', 'Melancholia', "Panic Disorder", 'Anxiety Disorder', "Dysthymia", "Postnatal Depression",
"CSA", "Stress", "Father MD", "Mother MD", 'Agoraphobia', 'Social Phobia', 'Animal Phobia', 'Situational Phobia', 'Blood Phobia',
'Close Family Death', 'Divorced_Separated', 'Unemployed', 'Fired', 'Finanical Crisis', 'Legal Probems', 'Serious Illness',
'Serious Accident', 'Natural Disaster', 'Witnessed Violence', 'Raped', 'Physically Attacked', 'Abused as Child ', 'Neglected as Child', 'Violently Threatened', 'Other Terrible Events', 'Widowed' )
Ynames <- c( 'Total MD Episodes', 'Neuroticism', 'Family History', 'Cold Mother', 'Authoritarian Mother', 'Protective Mother', 'Cold Father', 'Authoritarian Father', 'Protective Father','Pre-Menstrual MD', 'Height', 'BMI', 'Mitochondrial DNA', 'Telomere Length', 'StressAll', 'StressPC', 'StressPC2' )
rbind(
Ybnames,
round( colSums( Yb[Yb[,1]==1,], na.rm=T ), 2 ),
round( colSums( Yb[Yb[,1]==0,], na.rm=T ), 2 )
)
B <- length(Ybnames)
P <- length(Ynames)
save( B, P, Ybnames, Ynames, envs, ppsub, qqsub, file='Rdata/setup.Rdata' )
|
/converge/setup_data.R
|
no_license
|
andywdahl/gxemm-scripts
|
R
| false
| false
| 2,441
|
r
|
rm( list=ls() )
sink( 'Rout/setup.Rout' )
load( paste0( 'parsed_data/', 'base', '.Rdata' ) )
X <- scale(G)
Yb2Z <- function(yb){
Z <- t(sapply( yb, function(x){
if( is.na(x) ){
return( c( NA, NA ) )
} else if( x == 0 ){
return( c( 0, 1 ) )
} else if( x == 1 ){
return( c( 1, 0 ) )
}
}))
Z * sqrt(nrow(Z)/sum(Z^2))
}
## load K
load( 'parsed_data/K.Rdata' )
K <- K[rownames(X),rownames(X)]
K <- K*nrow(K)/sum(diag(K))
y <- 1+as.numeric( Yb[,1] == max(Yb[,1],na.rm=T) )
stressall <- rowSums( Yb[,paste0( 'LS.', 1:16 )] )
stresspc <- svd( Yb[,paste0( 'LS.', 1:16 )] )$u[,1]
stresspc2 <- svd( scale(Yb[,paste0( 'LS.', (1:16)[-c(11,15)] )]) )$u[,1]
Y <- cbind( Y, stressall, stresspc, stresspc2 )
save( Yb2Z, y, X, K, Y, Yb, file='Rdata/data.Rdata' )
ppsub <- c(7,8,16:ncol(Yb),ncol(Yb)+2, ncol(Yb)+4:(ncol(Y)))
qqsub <- c(1:6 ,ncol(Yb)+c(0,1:3,10,11,12,13,14,15 ))
ppsub <- ppsub[-c(4,13,17)] # LS.2 is redundant with sepdivwid; LS.11 has only 3 in ctrls; LS.15 has only 17
# LS.6/13 have only 52/43 in ctrls
envs <- c(colnames(Yb),colnames(Y))
envs[ppsub]
envs[qqsub]
envs[qqsub]
qqsub <- qqsub[-c(9,15)]
envs[qqsub]
rbind(
round( colSums( Yb[Yb[,1]==1,intersect( ppsub, 1:ncol(Yb) )], na.rm=T ), 2 ),
round( colSums( Yb[Yb[,1]==0,intersect( ppsub, 1:ncol(Yb) )], na.rm=T ), 2 )
)
Ybnames <- c( 'Major Depression', 'Melancholia', "Panic Disorder", 'Anxiety Disorder', "Dysthymia", "Postnatal Depression",
"CSA", "Stress", "Father MD", "Mother MD", 'Agoraphobia', 'Social Phobia', 'Animal Phobia', 'Situational Phobia', 'Blood Phobia',
'Close Family Death', 'Divorced_Separated', 'Unemployed', 'Fired', 'Finanical Crisis', 'Legal Probems', 'Serious Illness',
'Serious Accident', 'Natural Disaster', 'Witnessed Violence', 'Raped', 'Physically Attacked', 'Abused as Child ', 'Neglected as Child', 'Violently Threatened', 'Other Terrible Events', 'Widowed' )
Ynames <- c( 'Total MD Episodes', 'Neuroticism', 'Family History', 'Cold Mother', 'Authoritarian Mother', 'Protective Mother', 'Cold Father', 'Authoritarian Father', 'Protective Father','Pre-Menstrual MD', 'Height', 'BMI', 'Mitochondrial DNA', 'Telomere Length', 'StressAll', 'StressPC', 'StressPC2' )
rbind(
Ybnames,
round( colSums( Yb[Yb[,1]==1,], na.rm=T ), 2 ),
round( colSums( Yb[Yb[,1]==0,], na.rm=T ), 2 )
)
B <- length(Ybnames)
P <- length(Ynames)
save( B, P, Ybnames, Ynames, envs, ppsub, qqsub, file='Rdata/setup.Rdata' )
|
#!/usr/local/bin/Rscript
library(caret)
library("fpc")
library(ROCR)
library(ipred)
library(rpart)
library(party)
library(e1071)
args <- commandArgs(TRUE)
model <- readRDS(args[1])
ind <- read.table(args[2],sep=';',header=FALSE)
goal <- as.numeric(args[3])
prediction <- predict(model, newdata=ind)
probability <- unlist(treeresponse(model, ind), use.names=F)[goal+1]
print(prediction)
print(probability)
|
/StructuralEntropy/tester.r
|
no_license
|
hdg7/EEE
|
R
| false
| false
| 411
|
r
|
#!/usr/local/bin/Rscript
library(caret)
library("fpc")
library(ROCR)
library(ipred)
library(rpart)
library(party)
library(e1071)
args <- commandArgs(TRUE)
model <- readRDS(args[1])
ind <- read.table(args[2],sep=';',header=FALSE)
goal <- as.numeric(args[3])
prediction <- predict(model, newdata=ind)
probability <- unlist(treeresponse(model, ind), use.names=F)[goal+1]
print(prediction)
print(probability)
|
#Part 1: (Plot for 1960 and 2013)
#Execute below code to generate three new vectors
Country_Code <- c("ABW","AFG","AGO","ALB","ARE","ARG","ARM","ATG","AUS","AUT","AZE","BDI","BEL","BEN","BFA","BGD","BGR","BHR","BHS","BIH","BLR","BLZ","BOL","BRA","BRB","BRN","BTN","BWA","CAF","CAN","CHE","CHL","CHN","CIV","CMR","COG","COL","COM","CPV","CRI","CUB","CYP","CZE","DEU","DJI","DNK","DOM","DZA","ECU","EGY","ERI","ESP","EST","ETH","FIN","FJI","FRA","FSM","GAB","GBR","GEO","GHA","GIN","GMB","GNB","GNQ","GRC","GRD","GTM","GUM","GUY","HKG","HND","HRV","HTI","HUN","IDN","IND","IRL","IRN","IRQ","ISL","ITA","JAM","JOR","JPN","KAZ","KEN","KGZ","KHM","KIR","KOR","KWT","LAO","LBN","LBR","LBY","LCA","LKA","LSO","LTU","LUX","LVA","MAC","MAR","MDA","MDG","MDV","MEX","MKD","MLI","MLT","MMR","MNE","MNG","MOZ","MRT","MUS","MWI","MYS","NAM","NCL","NER","NGA","NIC","NLD","NOR","NPL","NZL","OMN","PAK","PAN","PER","PHL","PNG","POL","PRI","PRT","PRY","PYF","QAT","ROU","RUS","RWA","SAU","SDN","SEN","SGP","SLB","SLE","SLV","SOM","SSD","STP","SUR","SVK","SVN","SWE","SWZ","SYR","TCD","TGO","THA","TJK","TKM","TLS","TON","TTO","TUN","TUR","TZA","UGA","UKR","URY","USA","UZB","VCT","VEN","VIR","VNM","VUT","WSM","YEM","ZAF","COD","ZMB","ZWE")
Life_Expectancy_At_Birth_1960 <- c(65.5693658536586,32.328512195122,32.9848292682927,62.2543658536585,52.2432195121951,65.2155365853659,65.8634634146342,61.7827317073171,70.8170731707317,68.5856097560976,60.836243902439,41.2360487804878,69.7019512195122,37.2782682926829,34.4779024390244,45.8293170731707,69.2475609756098,52.0893658536585,62.7290487804878,60.2762195121951,67.7080975609756,59.9613658536585,42.1183170731707,54.2054634146342,60.7380487804878,62.5003658536585,32.3593658536585,50.5477317073171,36.4826341463415,71.1331707317073,71.3134146341463,57.4582926829268,43.4658048780488,36.8724146341463,41.523756097561,48.5816341463415,56.716756097561,41.4424390243903,48.8564146341463,60.5761951219512,63.9046585365854,69.5939268292683,70.3487804878049,69.3129512195122,44.0212682926829,72.1765853658537,51.8452682926829,46.1351219512195,53.215,48.0137073170732,37.3629024390244,69.1092682926829,67.9059756097561,38.4057073170732,68.819756097561,55.9584878048781,69.8682926829268,57.5865853658537,39.5701219512195,71.1268292682927,63.4318536585366,45.8314634146342,34.8863902439024,32.0422195121951,37.8404390243902,36.7330487804878,68.1639024390244,59.8159268292683,45.5316341463415,61.2263414634146,60.2787317073171,66.9997073170732,46.2883170731707,64.6086585365854,42.1000975609756,68.0031707317073,48.6403170731707,41.1719512195122,69.691756097561,44.945512195122,48.0306829268293,73.4286585365854,69.1239024390244,64.1918292682927,52.6852682926829,67.6660975609756,58.3675853658537,46.3624146341463,56.1280731707317,41.2320243902439,49.2159756097561,53.0013170731707,60.3479512195122,43.2044634146342,63.2801219512195,34.7831707317073,42.6411951219512,57.303756097561,59.7471463414634,46.5107073170732,69.8473170731707,68.4463902439024,69.7868292682927,64.6609268292683,48.4466341463415,61.8127804878049,39.9746829268293,37.2686341463415,57.0656341463415,60.6228048780488,28.2116097560976,67.6017804878049,42.7363902439024,63.7056097560976,48.3688048780488,35.0037073170732,43.4830975609756,58.7452195121951,37.7736341463415,59.4753414634146,46.8803902439024,58.6390243902439,35.5150487804878,37.1829512195122,46.9988292682927,73.3926829268293,73.549756097561,35.1708292682927,71.2365853658537,42.6670731707317,45.2904634146342,60.8817073170732,47.6915853658537,57.8119268292683,38.462243902439,67.6804878048781,68.7196097560976,62.8089268292683,63.7937073170732,56.3570487804878,61.2060731707317,65.6424390243903,66.0552926829268,42.2492926829268,45.6662682926829,48.1876341463415,38.206,65.6598292682927,49.3817073170732,30.3315365853659,49.9479268292683,36.9658780487805,31.6767073170732,50.4513658536585,59.6801219512195,69.9759268292683,68.9780487804878,73.0056097560976,44.2337804878049,52.768243902439,38.0161219512195,40.2728292682927,54.6993170731707,56.1535365853659,54.4586829268293,33.7271219512195,61.3645365853659,62.6575853658537,42.009756097561,45.3844146341463,43.6538780487805,43.9835609756098,68.2995365853659,67.8963902439025,69.7707317073171,58.8855365853659,57.7238780487805,59.2851219512195,63.7302195121951,59.0670243902439,46.4874878048781,49.969512195122,34.3638048780488,49.0362926829268,41.0180487804878,45.1098048780488,51.5424634146342)
Life_Expectancy_At_Birth_2013 <- c(75.3286585365854,60.0282682926829,51.8661707317073,77.537243902439,77.1956341463415,75.9860975609756,74.5613658536585,75.7786585365854,82.1975609756098,80.890243902439,70.6931463414634,56.2516097560976,80.3853658536585,59.3120243902439,58.2406341463415,71.245243902439,74.4658536585366,76.5459512195122,75.0735365853659,76.2769268292683,72.4707317073171,69.9820487804878,67.9134390243903,74.1224390243903,75.3339512195122,78.5466585365854,69.1029268292683,64.3608048780488,49.8798780487805,81.4011219512195,82.7487804878049,81.1979268292683,75.3530243902439,51.2084634146342,55.0418048780488,61.6663902439024,73.8097317073171,62.9321707317073,72.9723658536585,79.2252195121951,79.2563902439025,79.9497804878049,78.2780487804878,81.0439024390244,61.6864634146342,80.3024390243903,73.3199024390244,74.5689512195122,75.648512195122,70.9257804878049,63.1778780487805,82.4268292682927,76.4243902439025,63.4421951219512,80.8317073170732,69.9179268292683,81.9682926829268,68.9733902439024,63.8435853658537,80.9560975609756,74.079512195122,61.1420731707317,58.216487804878,59.9992682926829,54.8384146341464,57.2908292682927,80.6341463414634,73.1935609756098,71.4863902439024,78.872512195122,66.3100243902439,83.8317073170732,72.9428536585366,77.1268292682927,62.4011463414634,75.2682926829268,68.7046097560976,67.6604146341463,81.0439024390244,75.1259756097561,69.4716829268293,83.1170731707317,82.290243902439,73.4689268292683,73.9014146341463,83.3319512195122,70.45,60.9537804878049,70.2024390243902,67.7720487804878,65.7665853658537,81.459756097561,74.462756097561,65.687243902439,80.1288780487805,60.5203902439024,71.6576829268293,74.9127073170732,74.2402926829268,49.3314634146342,74.1634146341464,81.7975609756098,73.9804878048781,80.3391463414634,73.7090487804878,68.811512195122,64.6739024390244,76.6026097560976,76.5326585365854,75.1870487804878,57.5351951219512,80.7463414634146,65.6540975609756,74.7583658536585,69.0618048780488,54.641512195122,62.8027073170732,74.46,61.466,74.567512195122,64.3438780487805,77.1219512195122,60.8281463414634,52.4421463414634,74.514756097561,81.1048780487805,81.4512195121951,69.222,81.4073170731707,76.8410487804878,65.9636829268293,77.4192195121951,74.2838536585366,68.1315609756097,62.4491707317073,76.8487804878049,78.7111951219512,80.3731707317073,72.7991707317073,76.3340731707317,78.4184878048781,74.4634146341463,71.0731707317073,63.3948292682927,74.1776341463415,63.1670487804878,65.878756097561,82.3463414634146,67.7189268292683,50.3631219512195,72.4981463414634,55.0230243902439,55.2209024390244,66.259512195122,70.99,76.2609756097561,80.2780487804878,81.7048780487805,48.9379268292683,74.7157804878049,51.1914878048781,59.1323658536585,74.2469268292683,69.4001707317073,65.4565609756098,67.5223658536585,72.6403414634147,70.3052926829268,73.6463414634147,75.1759512195122,64.2918292682927,57.7676829268293,71.159512195122,76.8361951219512,78.8414634146341,68.2275853658537,72.8108780487805,74.0744146341464,79.6243902439024,75.756487804878,71.669243902439,73.2503902439024,63.583512195122,56.7365853658537,58.2719268292683,59.2373658536585,55.633)
#(c) Kirill Eremenko, www.superdatascience.com
df<- data.frame(Country_Code = Country_Code, Life_Expectancy_At_Birth_1960= Life_Expectancy_At_Birth_1960,Life_Expectancy_At_Birth_2013 =Life_Expectancy_At_Birth_2013)
p <-('/Users/youssefkindo/Downloads/R_file/P2-Section5-Homework-Data.csv')
data<-read.csv(p)
split_2013 <-data$Year == 2013
df_2013<-data[split_1960,]
split_1960 <-data$Year==1960
df_1960 <- data[split_1960,]
merged_2013<- merge(df_2013, df, by.x="Country.Code", by.y="Country_Code")
merged_1960<- merge(df_1960, df, by.x="Country.Code", by.y="Country_Code")
library(ggplot2)
qplot(data=merged_2013, x=Life_Expectancy_At_Birth_1960, y=Fertility.Rate, color=Region, size=I(5), shape=I(19))
qplot(data=merged_1960, x=Life_Expectancy_At_Birth_1960, y=Fertility.Rate, color=Region, size=I(5), shape=I(19))
|
/Demographic_analysis.r
|
no_license
|
issoufkindo/Demographics-analysis
|
R
| false
| false
| 8,361
|
r
|
#Part 1: (Plot for 1960 and 2013)
#Execute below code to generate three new vectors
Country_Code <- c("ABW","AFG","AGO","ALB","ARE","ARG","ARM","ATG","AUS","AUT","AZE","BDI","BEL","BEN","BFA","BGD","BGR","BHR","BHS","BIH","BLR","BLZ","BOL","BRA","BRB","BRN","BTN","BWA","CAF","CAN","CHE","CHL","CHN","CIV","CMR","COG","COL","COM","CPV","CRI","CUB","CYP","CZE","DEU","DJI","DNK","DOM","DZA","ECU","EGY","ERI","ESP","EST","ETH","FIN","FJI","FRA","FSM","GAB","GBR","GEO","GHA","GIN","GMB","GNB","GNQ","GRC","GRD","GTM","GUM","GUY","HKG","HND","HRV","HTI","HUN","IDN","IND","IRL","IRN","IRQ","ISL","ITA","JAM","JOR","JPN","KAZ","KEN","KGZ","KHM","KIR","KOR","KWT","LAO","LBN","LBR","LBY","LCA","LKA","LSO","LTU","LUX","LVA","MAC","MAR","MDA","MDG","MDV","MEX","MKD","MLI","MLT","MMR","MNE","MNG","MOZ","MRT","MUS","MWI","MYS","NAM","NCL","NER","NGA","NIC","NLD","NOR","NPL","NZL","OMN","PAK","PAN","PER","PHL","PNG","POL","PRI","PRT","PRY","PYF","QAT","ROU","RUS","RWA","SAU","SDN","SEN","SGP","SLB","SLE","SLV","SOM","SSD","STP","SUR","SVK","SVN","SWE","SWZ","SYR","TCD","TGO","THA","TJK","TKM","TLS","TON","TTO","TUN","TUR","TZA","UGA","UKR","URY","USA","UZB","VCT","VEN","VIR","VNM","VUT","WSM","YEM","ZAF","COD","ZMB","ZWE")
Life_Expectancy_At_Birth_1960 <- c(65.5693658536586,32.328512195122,32.9848292682927,62.2543658536585,52.2432195121951,65.2155365853659,65.8634634146342,61.7827317073171,70.8170731707317,68.5856097560976,60.836243902439,41.2360487804878,69.7019512195122,37.2782682926829,34.4779024390244,45.8293170731707,69.2475609756098,52.0893658536585,62.7290487804878,60.2762195121951,67.7080975609756,59.9613658536585,42.1183170731707,54.2054634146342,60.7380487804878,62.5003658536585,32.3593658536585,50.5477317073171,36.4826341463415,71.1331707317073,71.3134146341463,57.4582926829268,43.4658048780488,36.8724146341463,41.523756097561,48.5816341463415,56.716756097561,41.4424390243903,48.8564146341463,60.5761951219512,63.9046585365854,69.5939268292683,70.3487804878049,69.3129512195122,44.0212682926829,72.1765853658537,51.8452682926829,46.1351219512195,53.215,48.0137073170732,37.3629024390244,69.1092682926829,67.9059756097561,38.4057073170732,68.819756097561,55.9584878048781,69.8682926829268,57.5865853658537,39.5701219512195,71.1268292682927,63.4318536585366,45.8314634146342,34.8863902439024,32.0422195121951,37.8404390243902,36.7330487804878,68.1639024390244,59.8159268292683,45.5316341463415,61.2263414634146,60.2787317073171,66.9997073170732,46.2883170731707,64.6086585365854,42.1000975609756,68.0031707317073,48.6403170731707,41.1719512195122,69.691756097561,44.945512195122,48.0306829268293,73.4286585365854,69.1239024390244,64.1918292682927,52.6852682926829,67.6660975609756,58.3675853658537,46.3624146341463,56.1280731707317,41.2320243902439,49.2159756097561,53.0013170731707,60.3479512195122,43.2044634146342,63.2801219512195,34.7831707317073,42.6411951219512,57.303756097561,59.7471463414634,46.5107073170732,69.8473170731707,68.4463902439024,69.7868292682927,64.6609268292683,48.4466341463415,61.8127804878049,39.9746829268293,37.2686341463415,57.0656341463415,60.6228048780488,28.2116097560976,67.6017804878049,42.7363902439024,63.7056097560976,48.3688048780488,35.0037073170732,43.4830975609756,58.7452195121951,37.7736341463415,59.4753414634146,46.8803902439024,58.6390243902439,35.5150487804878,37.1829512195122,46.9988292682927,73.3926829268293,73.549756097561,35.1708292682927,71.2365853658537,42.6670731707317,45.2904634146342,60.8817073170732,47.6915853658537,57.8119268292683,38.462243902439,67.6804878048781,68.7196097560976,62.8089268292683,63.7937073170732,56.3570487804878,61.2060731707317,65.6424390243903,66.0552926829268,42.2492926829268,45.6662682926829,48.1876341463415,38.206,65.6598292682927,49.3817073170732,30.3315365853659,49.9479268292683,36.9658780487805,31.6767073170732,50.4513658536585,59.6801219512195,69.9759268292683,68.9780487804878,73.0056097560976,44.2337804878049,52.768243902439,38.0161219512195,40.2728292682927,54.6993170731707,56.1535365853659,54.4586829268293,33.7271219512195,61.3645365853659,62.6575853658537,42.009756097561,45.3844146341463,43.6538780487805,43.9835609756098,68.2995365853659,67.8963902439025,69.7707317073171,58.8855365853659,57.7238780487805,59.2851219512195,63.7302195121951,59.0670243902439,46.4874878048781,49.969512195122,34.3638048780488,49.0362926829268,41.0180487804878,45.1098048780488,51.5424634146342)
Life_Expectancy_At_Birth_2013 <- c(75.3286585365854,60.0282682926829,51.8661707317073,77.537243902439,77.1956341463415,75.9860975609756,74.5613658536585,75.7786585365854,82.1975609756098,80.890243902439,70.6931463414634,56.2516097560976,80.3853658536585,59.3120243902439,58.2406341463415,71.245243902439,74.4658536585366,76.5459512195122,75.0735365853659,76.2769268292683,72.4707317073171,69.9820487804878,67.9134390243903,74.1224390243903,75.3339512195122,78.5466585365854,69.1029268292683,64.3608048780488,49.8798780487805,81.4011219512195,82.7487804878049,81.1979268292683,75.3530243902439,51.2084634146342,55.0418048780488,61.6663902439024,73.8097317073171,62.9321707317073,72.9723658536585,79.2252195121951,79.2563902439025,79.9497804878049,78.2780487804878,81.0439024390244,61.6864634146342,80.3024390243903,73.3199024390244,74.5689512195122,75.648512195122,70.9257804878049,63.1778780487805,82.4268292682927,76.4243902439025,63.4421951219512,80.8317073170732,69.9179268292683,81.9682926829268,68.9733902439024,63.8435853658537,80.9560975609756,74.079512195122,61.1420731707317,58.216487804878,59.9992682926829,54.8384146341464,57.2908292682927,80.6341463414634,73.1935609756098,71.4863902439024,78.872512195122,66.3100243902439,83.8317073170732,72.9428536585366,77.1268292682927,62.4011463414634,75.2682926829268,68.7046097560976,67.6604146341463,81.0439024390244,75.1259756097561,69.4716829268293,83.1170731707317,82.290243902439,73.4689268292683,73.9014146341463,83.3319512195122,70.45,60.9537804878049,70.2024390243902,67.7720487804878,65.7665853658537,81.459756097561,74.462756097561,65.687243902439,80.1288780487805,60.5203902439024,71.6576829268293,74.9127073170732,74.2402926829268,49.3314634146342,74.1634146341464,81.7975609756098,73.9804878048781,80.3391463414634,73.7090487804878,68.811512195122,64.6739024390244,76.6026097560976,76.5326585365854,75.1870487804878,57.5351951219512,80.7463414634146,65.6540975609756,74.7583658536585,69.0618048780488,54.641512195122,62.8027073170732,74.46,61.466,74.567512195122,64.3438780487805,77.1219512195122,60.8281463414634,52.4421463414634,74.514756097561,81.1048780487805,81.4512195121951,69.222,81.4073170731707,76.8410487804878,65.9636829268293,77.4192195121951,74.2838536585366,68.1315609756097,62.4491707317073,76.8487804878049,78.7111951219512,80.3731707317073,72.7991707317073,76.3340731707317,78.4184878048781,74.4634146341463,71.0731707317073,63.3948292682927,74.1776341463415,63.1670487804878,65.878756097561,82.3463414634146,67.7189268292683,50.3631219512195,72.4981463414634,55.0230243902439,55.2209024390244,66.259512195122,70.99,76.2609756097561,80.2780487804878,81.7048780487805,48.9379268292683,74.7157804878049,51.1914878048781,59.1323658536585,74.2469268292683,69.4001707317073,65.4565609756098,67.5223658536585,72.6403414634147,70.3052926829268,73.6463414634147,75.1759512195122,64.2918292682927,57.7676829268293,71.159512195122,76.8361951219512,78.8414634146341,68.2275853658537,72.8108780487805,74.0744146341464,79.6243902439024,75.756487804878,71.669243902439,73.2503902439024,63.583512195122,56.7365853658537,58.2719268292683,59.2373658536585,55.633)
#(c) Kirill Eremenko, www.superdatascience.com
df<- data.frame(Country_Code = Country_Code, Life_Expectancy_At_Birth_1960= Life_Expectancy_At_Birth_1960,Life_Expectancy_At_Birth_2013 =Life_Expectancy_At_Birth_2013)
p <-('/Users/youssefkindo/Downloads/R_file/P2-Section5-Homework-Data.csv')
data<-read.csv(p)
split_2013 <-data$Year == 2013
df_2013<-data[split_1960,]
split_1960 <-data$Year==1960
df_1960 <- data[split_1960,]
merged_2013<- merge(df_2013, df, by.x="Country.Code", by.y="Country_Code")
merged_1960<- merge(df_1960, df, by.x="Country.Code", by.y="Country_Code")
library(ggplot2)
qplot(data=merged_2013, x=Life_Expectancy_At_Birth_1960, y=Fertility.Rate, color=Region, size=I(5), shape=I(19))
qplot(data=merged_1960, x=Life_Expectancy_At_Birth_1960, y=Fertility.Rate, color=Region, size=I(5), shape=I(19))
|
#!/usr/bin/Rscript --no-save
# Compare the CERA20C and 20CR2c ensembles
# Print quality - A0 format
library(GSDF.TWCR)
library(GSDF.CERA20C)
library(GSDF.WeatherMap)
library(grid)
opt = list(
year = 1987,
month = 10,
day = 16,
hour = 0
)
Imagedir<-"."
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'land.colour',rgb(150,150,150,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'sea.colour',rgb(200,200,200,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'ice.colour',rgb(230,230,230,255,
maxColorValue=255))
range<-75
Options<-WeatherMap.set.option(Options,'lat.min',range*-1)
Options<-WeatherMap.set.option(Options,'lat.max',range)
Options<-WeatherMap.set.option(Options,'lon.min',range*-1/sqrt(2))
Options<-WeatherMap.set.option(Options,'lon.max',range/sqrt(2))
Options<-WeatherMap.set.option(Options,'pole.lon',180)
Options<-WeatherMap.set.option(Options,'pole.lat',181)
Options$vp.lon.min<-Options$lon.min
Options$vp.lon.max<-Options$lon.max
Options<-WeatherMap.set.option(Options,'wrap.spherical',F)
Options<-WeatherMap.set.option(Options,'obs.size',1)
Options<-WeatherMap.set.option(Options,'obs.colour',rgb(255,215,0,255,
maxColorValue=255))
Options$ice.points<-1000000
Options$mslp.base=101325 # Base value for anomalies
Options$mslp.range=50000 # Anomaly for max contour
Options$mslp.step=1000 # Smaller -> more contours
Options$mslp.tpscale=5 # Smaller -> contours less transparent
Options$mslp.lwd=10.0
Options$precip.colour=c(0,0.2,0)
contour.levels<-seq(Options$mslp.base-Options$mslp.range,
Options$mslp.base+Options$mslp.range,
Options$mslp.step)
# Load the 0.25 degree orography
orog<-GSDF.ncdf.load(sprintf("%s/orography/elev.0.25-deg.nc",Sys.getenv('SCRATCH')),'data',
lat.range=c(-90,90),lon.range=c(-180,360))
orog$data[orog$data<0]<-0 # sea-surface, not sea-bottom
is.na(orog$data[orog$data==0])<-TRUE
# 1-km orography (slow, needs lots of ram)
if(TRUE) {
orog<-GSDF.ncdf.load(sprintf("%s/orography/ETOPO2v2c_ud_rl.nc",Sys.getenv('SCRATCH')),'z',
lat.range=c(-90,90),lon.range=c(-180,360))
orog$data[orog$data<0]<-0 # sea-surface, not sea-bottom
is.na(orog$data[orog$data==0])<-TRUE
}
TWCR.get.member.at.hour<-function(variable,year,month,day,hour,member=1,version='3.5.1') {
t<-TWCR.get.members.slice.at.hour(variable,year,month,day,
hour,version=version)
t<-GSDF.select.from.1d(t,'ensemble',member)
gc()
return(t)
}
# Get the CERA20C grid data (same as ERA40)
gc<-readRDS('ERA_grids/ERA40_grid.Rdata')
w<-which(gc$centre.lat<0)
gc<-gc[w,]
# And the 20CR grid data
gt<-readRDS('ERA_grids/TWCR_grid.Rdata')
w<-which(gt$centre.lat<0)
gt<-gt[w,]
# And the ERA5 grid - use for precip
g5<-readRDS('ERA_grids/ERA5_grid.Rdata')
w<-which(g5$centre.lat<0)
g5<-g5[w,]
gp<-g5
gp$min.lon<-gp$min.lon+(gp$centre.lon-gp$min.lon)*0.05
gp$min.lat<-gp$min.lat+(gp$centre.lat-gp$min.lat)*0.05
gp$max.lon<-gp$max.lon+(gp$centre.lon-gp$max.lon)*0.05
gp$max.lat<-gp$max.lat+(gp$centre.lat-gp$max.lat)*0.05
# Plot the orography - raster background, fast
draw.land.flat<-function(Options) {
land<-GSDF.WeatherMap:::WeatherMap.rotate.pole(GSDF:::GSDF.pad.longitude(orog),Options)
lons<-land$dimensions[[GSDF.find.dimension(land,'lon')]]$values
base.colour<-Options$land.colour
plot.colours<-rep(rgb(0,0,0,0),length(land$data))
w<-which(land$data>0)
plot.colours[w]<-base.colour
m<-matrix(plot.colours, ncol=length(lons), byrow=TRUE)
r.w<-max(lons)-min(lons)+(lons[2]-lons[1])
r.c<-(max(lons)+min(lons))/2
grid.raster(m,,
x=unit(r.c,'native'),
y=unit(0,'native'),
width=unit(r.w,'native'),
height=unit(180,'native'))
}
# Boxes that straddle the date line need to be rationalised and duplicated
polish.longitudes<-function(lats,lons) {
w<-which(abs(lons[1,]-lons[2,])>200 |
abs(lons[2,]-lons[3,])>200 |
abs(lons[3,]-lons[4,])>200 |
abs(lons[4,]-lons[1,])>200)
lat.extras<-array(dim=c(4,length(w)))
lon.extras<-array(dim=c(4,length(w)))
for(i in seq_along(w)) {
w2<-which(lons[,w[i]]>0)
lons[w2,w[i]]<-lons[w2,w[i]]-360
lon.extras[,i]<-lons[,w[i]]+360
lat.extras[,i]<-lats[,w[i]]
}
return(list(lat=cbind(lats,lat.extras),
lon=cbind(lons,lon.extras)))
}
# Sub-divide the grid for regions where higher resolution is necessary
double.resolution<-function(lats,lons) {
if(!is.array(lons)) lons<-array(data=lons,dim=c(4,length(lons)/4))
if(!is.array(lats)) lats<-array(data=lats,dim=c(4,length(lats)/4))
sub.lons<-array(dim=c(4,length(lons)))
sub.lats<-array(dim=c(4,length(lats)))
sub.idx<-seq(1,length(lons),4)
# bottom left
sub.lons[1,sub.idx]<-lons[1,]
sub.lons[2,sub.idx]<-(lons[1,]+lons[2,])/2
sub.lons[3,sub.idx]<-(lons[1,]+lons[2,]+
lons[3,]+lons[4,])/4
sub.lons[4,sub.idx]<-(lons[1,]+lons[4,])/2
sub.lats[1,sub.idx]<-lats[1,]
sub.lats[2,sub.idx]<-(lats[1,]+lats[2,])/2
sub.lats[3,sub.idx]<-(lats[1,]+lats[2,]+
lats[3,]+lats[4,])/4
sub.lats[4,sub.idx]<-(lats[1,]+lats[4,])/2
# bottom right
sub.lons[1,sub.idx+1]<-sub.lons[2,sub.idx]
sub.lons[2,sub.idx+1]<-lons[2,]
sub.lons[3,sub.idx+1]<-(lons[2,]+lons[3,])/2
sub.lons[4,sub.idx+1]<-sub.lons[3,sub.idx]
sub.lats[1,sub.idx+1]<-sub.lats[2,sub.idx]
sub.lats[2,sub.idx+1]<-lats[2,]
sub.lats[3,sub.idx+1]<-(lats[2,]+lats[3,])/2
sub.lats[4,sub.idx+1]<-sub.lats[3,sub.idx]
# Top right
sub.lons[1,sub.idx+2]<-sub.lons[3,sub.idx]
sub.lons[2,sub.idx+2]<-sub.lons[3,sub.idx+1]
sub.lons[3,sub.idx+2]<-lons[3,]
sub.lons[4,sub.idx+2]<-(lons[3,]+lons[4,])/2
sub.lats[1,sub.idx+2]<-sub.lats[3,sub.idx]
sub.lats[2,sub.idx+2]<-sub.lats[3,sub.idx+1]
sub.lats[3,sub.idx+2]<-lats[3,]
sub.lats[4,sub.idx+2]<-(lats[3,]+lats[4,])/2
# Top left
sub.lons[1,sub.idx+3]<-sub.lons[4,sub.idx]
sub.lons[2,sub.idx+3]<-sub.lons[3,sub.idx]
sub.lons[3,sub.idx+3]<-sub.lons[4,sub.idx+2]
sub.lons[4,sub.idx+3]<-lons[4,]
sub.lats[1,sub.idx+3]<-sub.lats[4,sub.idx]
sub.lats[2,sub.idx+3]<-sub.lats[3,sub.idx]
sub.lats[3,sub.idx+3]<-sub.lats[4,sub.idx+2]
sub.lats[4,sub.idx+3]<-lats[4,]
return(list(lats=sub.lats,lons=sub.lons))
}
# Draw a field, point by point - using a reduced gaussian grid
draw.by.rgg<-function(field,grid,colour.function,selection.function,Options,
grid.colour=rgb(0.8,0.8,0.8,0),grid.lwd=0,grid.lty=1) {
field<-GSDF:::GSDF.pad.longitude(field) # Extras for periodic boundary conditions
value.points<-GSDF.interpolate.2d(field,grid$centre.lon,grid$centre.lat)
col<-colour.function(value.points)
for(group in unique(na.omit(col))) {
w<-which(col==group)
vert.lon<-array(dim=c(4,length(w)))
vert.lon[1,]<-grid$min.lon[w]
vert.lon[2,]<-grid$max.lon[w]
vert.lon[3,]<-grid$max.lon[w]
vert.lon[4,]<-grid$min.lon[w]
vert.lat<-array(dim=c(4,length(w)))
vert.lat[1,]<-grid$min.lat[w]
vert.lat[2,]<-grid$min.lat[w]
vert.lat[3,]<-grid$max.lat[w]
vert.lat[4,]<-grid$max.lat[w]
w<-which(vert.lon-Options$pole.lon==180)
if(length(w)>0) vert.lon[w]<-vert.lon[w]+0.0001
for(v in seq(1,4)) {
p.r<-GSDF.ll.to.rg(vert.lat[v,],vert.lon[v,],Options$pole.lat,Options$pole.lon)
vert.lat[v,]<-p.r$lat
vert.lon[v,]<-p.r$lon
}
w<-which(vert.lon>Options$vp.lon.max)
if(length(w)>0) vert.lon[w]<-vert.lon[w]-360
w<-which(vert.lon<Options$vp.lon.min)
if(length(w)>0) vert.lon[w]<-vert.lon[w]+360
pl<-polish.longitudes(vert.lat,vert.lon)
vert.lat<-pl$lat
vert.lon<-pl$lon
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(4,length(vert.lat[1,])))
# Fill in the fiddly area near the boundary
w<-which((inside[1,] | inside[2,] | inside[3,] | inside[4,]) &
!(inside[1,] & inside[2,] & inside[3,] & inside[4,]))
boundary.lat<-vert.lat[,w]
boundary.lon<-vert.lon[,w]
while(length(boundary.lon)>0) {
bp<-double.resolution(boundary.lat,boundary.lon)
boundary.lat<-bp$lats
boundary.lon<-bp$lons
gp<-gpar(col=rgb(0.8,0.8,0.8,0),fill=group,lwd=0)
inside<-array(data=selection.function(boundary.lat,boundary.lon),
dim=c(4,length(boundary.lat[1,])))
w<-which(inside[1,] & inside[2,] & inside[3,] & inside[4,])
if(length(w)>0) {
grid.polygon(x=unit(as.vector(boundary.lon[,w]),'native'),
y=unit(as.vector(boundary.lat[,w]),'native'),
id.lengths=rep(4,length(w)),
gp=gp)
}
w<-which((inside[1,] | inside[2,] | inside[3,] | inside[4,]) &
!(inside[1,] & inside[2,] & inside[3,] & inside[4,]))
if(length(w)==0) break
boundary.lat<-boundary.lat[,w]
boundary.lon<-boundary.lon[,w]
if(!is.array(boundary.lon)) boundary.lon<-array(data=boundary.lon,
dim=c(4,length(boundary.lon)/4))
if(!is.array(boundary.lat)) boundary.lat<-array(data=boundary.lat,
dim=c(4,length(boundary.lat)/4))
d.lat<-abs(boundary.lat[2,]-boundary.lat[1,])+
abs(boundary.lat[3,]-boundary.lat[2,])+
abs(boundary.lat[4,]-boundary.lat[3,])+
abs(boundary.lat[4,]-boundary.lat[1,])
d.lon<-abs(boundary.lon[2,]-boundary.lon[1,])+
abs(boundary.lon[3,]-boundary.lon[2,])+
abs(boundary.lon[4,]-boundary.lon[3,])+
abs(boundary.lon[4,]-boundary.lon[1,])
w<-which(d.lat>0.1 | d.lon>0.1)
if(length(w)==0) break
boundary.lat<-boundary.lat[,w]
boundary.lon<-boundary.lon[,w]
}
# add all the normal points inside the boundaries
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(4,length(vert.lat[1,])))
w<-which(inside[1,] & inside[2,] & inside[3,] & inside[4,])
if(length(w)==0) next
vert.lat<-vert.lat[,w]
vert.lon<-vert.lon[,w]
if(!is.array(vert.lat)) vert.lat<-array(data=vert.lat,dim=c(4,length(vert.lat)/4))
if(!is.array(vert.lon)) vert.lon<-array(data=vert.lon,dim=c(4,length(vert.lon)/4))
gp<-gpar(col=grid.colour,fill=group,lwd=grid.lwd,lty=grid.lty)
#gp<-gpar(col=rgb(0.8,0.8,0.8,0),fill=group,lwd=0)
grid.polygon(x=unit(as.vector(vert.lon),'native'),
y=unit(as.vector(vert.lat),'native'),
id.lengths=rep(4,dim(vert.lat)[2]),
gp=gp)
}
}
# Subdivide a segmented line into shorter segments
upline<-function(line) {
nseg<-length(line$x)
result<-list(x=rep(NA,nseg*2-1),
x=rep(NA,nseg*2-1))
sq<-seq(1,nseg*2-1,2)
result$x[sq]<-line$x
result$y[sq]<-line$y
sq<-seq(2,nseg*2-2,2)
result$x[sq]<-(result$x[sq-1]+result$x[sq+1])/2
result$y[sq]<-(result$y[sq-1]+result$y[sq+1])/2
result$level<-line$level
return(result)
}
draw.pressure<-function(mslp,selection.function,Options,colour=c(0,0,0,1)) {
M<-GSDF.WeatherMap:::WeatherMap.rotate.pole(mslp,Options)
M<-GSDF:::GSDF.pad.longitude(M) # Extras for periodic boundary conditions
lats<-M$dimensions[[GSDF.find.dimension(M,'lat')]]$values
longs<-M$dimensions[[GSDF.find.dimension(M,'lon')]]$values
# Need particular data format for contourLines
maxl<-Options$vp.lon.max+2
if(lats[2]<lats[1] || longs[2]<longs[1] || max(longs) > maxl ) {
if(lats[2]<lats[1]) lats<-rev(lats)
if(longs[2]<longs[1]) longs<-rev(longs)
longs[longs>maxl]<-longs[longs>maxl]-(maxl*2)
longs<-sort(longs)
M2<-M
M2$dimensions[[GSDF.find.dimension(M,'lat')]]$values<-lats
M2$dimensions[[GSDF.find.dimension(M,'lon')]]$values<-longs
M<-GSDF.regrid.2d(M,M2)
}
z<-matrix(data=M$data,nrow=length(longs),ncol=length(lats))
lines<-contourLines(longs,lats,z,
levels=contour.levels)
if(!is.na(lines) && length(lines)>0) {
for(i in seq(1,length(lines))) {
#lines[[i]]<-upline(upline(lines[[i]]))
tp<-min(1,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.tpscale))
lt<-5
lwd<-1
if(lines[[i]]$level<=Options$mslp.base) {
lt<-1
lwd<-1
}
gp<-gpar(col=rgb(colour[1],colour[2],colour[3],colour[4]),
lwd=Options$mslp.lwd*lwd,lty=lt)
res<-tryCatch({
for(p in seq_along(lines[[i]]$x)) {
if(!selection.function(lines[[i]]$y[p],lines[[i]]$x[p])) {
is.na(lines[[i]]$y[p])<-TRUE
is.na(lines[[i]]$x[p])<-TRUE
}
}
grid.xspline(x=unit(lines[[i]]$x,'native'),
y=unit(lines[[i]]$y,'native'),
gp=gp)
}, warning = function(w) {
print(w)
}, error = function(e) {
print(e)
}, finally = {
# Do nothing
})
}
}
}
draw.precipitation<-function(prate,value.function,selection.function,Options) {
prate<-GSDF.WeatherMap:::WeatherMap.rotate.pole(prate,Options)
res<-0.2
lon.points<-seq(-180,180,res)+Options$vp.lon.min+180
lat.points<-seq(-90,90,res)
lon.ex<-sort(rep(lon.points,length(lat.points)))
lat.ex<-rep(lat.points,length(lon.points))
value.points<-GSDF.interpolate.2d(prate,lon.ex,lat.ex)
value<-value.function(value.points)
w<-which(is.na(value))
if(length(w)>0) {
lon.ex<-lon.ex[-w]
lat.ex<-lat.ex[-w]
value<-value[-w]
}
scale<-runif(length(lon.ex),min=0.03,max=0.07)*4*value
lat.jitter<-runif(length(lon.ex),min=res*-1/2,max=res/2)
lon.jitter<-runif(length(lon.ex),min=res*-1/2,max=res/2)
vert.lat<-array(dim=c(2,length(lat.ex)))
vert.lat[1,]<-lat.ex+lat.jitter+scale
vert.lat[2,]<-lat.ex+lat.jitter-scale
vert.lon<-array(dim=c(2,length(lon.ex)))
vert.lon[1,]<-lon.ex+lon.jitter+scale/2
vert.lon[2,]<-lon.ex+lon.jitter-scale/2
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(2,length(vert.lat[1,])))
w<-which(inside[1,] & inside[2,])
vert.lat<-vert.lat[,w]
vert.lon<-vert.lon[,w]
gp<-gpar(col=rgb(0,0.2,0,1,1),fill=rgb(0,0.2,0,1,1),lwd=0.5)
grid.polyline(x=unit(as.vector(vert.lon),'native'),
y=unit(as.vector(vert.lat),'native'),
id.lengths=rep(2,dim(vert.lat)[2]),
gp=gp)
}
draw.streamlines<-function(s,selection.function,Options) {
gp<-set.streamline.GC(Options)
inside<-array(data=selection.function(s[['y']],s[['x']]),
dim=c(length(s[['x']][,1]),3))
w<-which(inside[,1] & inside[,2] & inside[,3])
s[['x']]<-s[['x']][w,]
s[['y']]<-s[['y']][w,]
grid.xspline(x=unit(as.vector(t(s[['x']])),'native'),
y=unit(as.vector(t(s[['y']])),'native'),
id.lengths=rep(Options$wind.vector.points,length(s[['x']][,1])),
shape=s[['shape']],
arrow=Options$wind.vector.arrow,
gp=gp)
}
# Choose TWCR points to plot
select.TWCR<-function(lat,lon) {
lat.boundary<-centre.lat+(lon-centre.lon)*0.1
lon.boundary<-centre.lon+(lat-centre.lat)*2
result<-rep(FALSE,length(lat))
w<-which((lon<centre.lon & lat>lat.boundary) |
(lat>centre.lat & lon<lon.boundary))
if(length(w)>0) result[w]<-TRUE
return(result)
}
# Intensity function for precipitation
set.precip.value<-function(rate) {
min.threshold<-0.0025
max.threshold<-0.03
rate<-sqrt(pmax(0,rate))
result<-rep(NA,length(rate))
value<-pmax(0,pmin(1,rate/max.threshold))
w<-which(runif(length(rate),0,1)<value & rate>min.threshold)
if(length(w)>0) result[w]<-value[w]
return(result)
}
# Colour function for precipitation
set.precip.colour<-function(rate) {
min.threshold<-0.0025
max.threshold<-0.03
rate<-sqrt(pmax(0,rate,na.rm=TRUE))
result<-rep(NA,length(rate))
value<-pmax(0,pmin(0.9,rate/max.threshold,na.rm=TRUE),na.rm=TRUE)
w<-which(is.na(value))
if(length(w)>0) value[w]<-0
result<-rgb(0,0.2,0,value)
w<-which(rate<min.threshold)
is.na(result[w])<-TRUE
return(result)
}
# Colour function for t2m
set.t2m.colour<-function(temperature,Trange=5) {
result<-rep(NA,length(temperature))
w<-which(temperature>=0)
if(length(w)>0) {
temperature[w]<-sqrt(temperature[w])
temperature[w]<-pmax(0,pmin(Trange,temperature[w]))
temperature[w]<-temperature[w]/Trange
temperature[w]<-round(temperature[w],2)
result[w]<-rgb(1,0,0,temperature[w]*0.5)
}
w<-which(temperature<0)
if(length(w)>0) {
temperature[w]<-sqrt(temperature[w]*-1)
temperature[w]<-pmax(0,pmin(Trange,temperature[w]))
temperature[w]<-temperature[w]/Trange
temperature[w]<-round(temperature[w],2)
result[w]<-rgb(0,0,1,temperature[w]*0.5)
}
return(result)
}
#Colour function for ice
set.ice.colour<-function(ice) {
result<-rep(NA,length(ice))
w<-which(ice>0)
ice[w]<-round(ice[w],2)
result[w]<-rgb(1,1,1,ice[w]*1.0)
return(result)
}
# Want to plot obs coverage rather than observations - make pseudo
# observations indicating coverage.
plot.obs.coverage<-function(obs,Options) {
if(Options$pole.lon!=0 || Options$pole.lat!=90) {
l2<-GSDF.ll.to.rg(obs$Latitude,obs$Longitude,Options$pole.lat,Options$pole.lon)
obs$Longitude<-l2$lon
obs$Latitude<-l2$lat
}
if(length(obs$Latitude)<1) return()
lon.m<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.m<-Options$vp.lon.min
w<-which(obs$Longitude<lon.m)
if(length(w)>0) obs$Longitude[w]<-obs$Longitude[w]+360
lon.m<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.m<-Options$vp.lon.max
w<-which(obs$Longitude>lon.m)
if(length(w)>0) obs$Longitude[w]<-obs$Longitude[w]-360
# Filter to .5/degree lat and lon
idx<-sprintf("%4d%4d",as.integer(obs$Latitude*1),as.integer(obs$Longitude*1))
w<-which(duplicated(idx))
if(length(w)>0) obs<-obs[-w,]
gp<-gpar(col=Options$obs.colour,fill=Options$obs.colour)
grid.points(x=unit(obs$Longitude,'native'),
y=unit(obs$Latitude,'native'),
size=unit(Options$obs.size,'native'),
pch=21,gp=gp)
}
# Colour function for streamlines
set.streamline.GC<-function(Options) {
alpha<-255
return(gpar(col=rgb(125,125,125,alpha,maxColorValue=255),
fill=rgb(125,125,125,alpha,maxColorValue=255),lwd=1.5))
}
# Make the actual plot
image.name<-sprintf("CERA_20CR.pdf",year,month,day,hour)
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
pdf(ifile.name,
width=46.8,
height=33.1,
bg='white',
family='Helvetica',
pointsize=24)
# CERA half
pushViewport(viewport(x=unit(1/4,'npc'),y=unit(1/2,'npc'),
width=unit(0.5-0.01,'npc'),
height=unit(1.0-0.01,'npc')))
base.gp<-gpar(fontfamily='Helvetica',fontface='bold',col='black')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
grid.polygon(x=c(0,1,1,0),y=c(0,0,1,1),gp=gpar(col=Options$sea.colour,
fill=Options$sea.colour))
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0,gp=base.gp,clip='on'))
icec<-CERA20C.get.slice.at.hour('icec',opt$year,opt$month,opt$day,opt$hour)
draw.by.rgg(icec,g5,set.ice.colour,function(lat,lon) return(rep(TRUE,length(lat))),Options)
draw.land.flat(Options)
for(member in seq(0,9)) {
select.CERA20C<-function(lat,lon) {
result<-rep(TRUE,length(lat))
return(result)
}
mslp<-CERA20C.get.slice.at.hour('prmsl',opt$year,opt$month,opt$day,opt$hour,member=member)
draw.pressure(mslp,select.CERA20C,Options,colour=c(0,0,1,0.1))
}
popViewport()
popViewport() # End of CERA half
# 20CR half
pushViewport(viewport(x=unit(3/4,'npc'),y=unit(1/2,'npc'),
width=unit(0.5-0.01,'npc'),
height=unit(1.0-0.01,'npc')))
base.gp<-gpar(fontfamily='Helvetica',fontface='bold',col='black')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
grid.polygon(x=c(0,1,1,0),y=c(0,0,1,1),gp=gpar(col=Options$sea.colour,
fill=Options$sea.colour))
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0,gp=base.gp,clip='on'))
icec<-TWCR.get.slice.at.hour('icec',opt$year,opt$month,opt$day,opt$hour,version='3.5.1')
draw.by.rgg(icec,g5,set.ice.colour,function(lat,lon) return(rep(TRUE,length(lat))),Options)
draw.land.flat(Options)
for(member in seq(1,56)) {
select.TWCR<-function(lat,lon) {
result<-rep(TRUE,length(lat))
return(result)
}
mslp<-TWCR.get.member.at.hour('prmsl',opt$year,opt$month,opt$day,opt$hour,member=member,version='3.5.1')
draw.pressure(mslp,select.TWCR,Options,colour=c(0,0,1,0.01))
}
obs<-TWCR.get.obs(opt$year,opt$month,opt$day,opt$hour,version='3.5.1')
plot.obs.coverage(obs,Options)
popViewport()
popViewport() # End of 20CR half
dev.off()
|
/CERA_v_20CR/spaghetti.R
|
no_license
|
philip-brohan/Posters
|
R
| false
| false
| 22,744
|
r
|
#!/usr/bin/Rscript --no-save
# Compare the CERA20C and 20CR2c ensembles
# Print quality - A0 format
library(GSDF.TWCR)
library(GSDF.CERA20C)
library(GSDF.WeatherMap)
library(grid)
opt = list(
year = 1987,
month = 10,
day = 16,
hour = 0
)
Imagedir<-"."
Options<-WeatherMap.set.option(NULL)
Options<-WeatherMap.set.option(Options,'land.colour',rgb(150,150,150,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'sea.colour',rgb(200,200,200,255,
maxColorValue=255))
Options<-WeatherMap.set.option(Options,'ice.colour',rgb(230,230,230,255,
maxColorValue=255))
range<-75
Options<-WeatherMap.set.option(Options,'lat.min',range*-1)
Options<-WeatherMap.set.option(Options,'lat.max',range)
Options<-WeatherMap.set.option(Options,'lon.min',range*-1/sqrt(2))
Options<-WeatherMap.set.option(Options,'lon.max',range/sqrt(2))
Options<-WeatherMap.set.option(Options,'pole.lon',180)
Options<-WeatherMap.set.option(Options,'pole.lat',181)
Options$vp.lon.min<-Options$lon.min
Options$vp.lon.max<-Options$lon.max
Options<-WeatherMap.set.option(Options,'wrap.spherical',F)
Options<-WeatherMap.set.option(Options,'obs.size',1)
Options<-WeatherMap.set.option(Options,'obs.colour',rgb(255,215,0,255,
maxColorValue=255))
Options$ice.points<-1000000
Options$mslp.base=101325 # Base value for anomalies
Options$mslp.range=50000 # Anomaly for max contour
Options$mslp.step=1000 # Smaller -> more contours
Options$mslp.tpscale=5 # Smaller -> contours less transparent
Options$mslp.lwd=10.0
Options$precip.colour=c(0,0.2,0)
contour.levels<-seq(Options$mslp.base-Options$mslp.range,
Options$mslp.base+Options$mslp.range,
Options$mslp.step)
# Load the 0.25 degree orography
orog<-GSDF.ncdf.load(sprintf("%s/orography/elev.0.25-deg.nc",Sys.getenv('SCRATCH')),'data',
lat.range=c(-90,90),lon.range=c(-180,360))
orog$data[orog$data<0]<-0 # sea-surface, not sea-bottom
is.na(orog$data[orog$data==0])<-TRUE
# 1-km orography (slow, needs lots of ram)
if(TRUE) {
orog<-GSDF.ncdf.load(sprintf("%s/orography/ETOPO2v2c_ud_rl.nc",Sys.getenv('SCRATCH')),'z',
lat.range=c(-90,90),lon.range=c(-180,360))
orog$data[orog$data<0]<-0 # sea-surface, not sea-bottom
is.na(orog$data[orog$data==0])<-TRUE
}
TWCR.get.member.at.hour<-function(variable,year,month,day,hour,member=1,version='3.5.1') {
t<-TWCR.get.members.slice.at.hour(variable,year,month,day,
hour,version=version)
t<-GSDF.select.from.1d(t,'ensemble',member)
gc()
return(t)
}
# Get the CERA20C grid data (same as ERA40)
gc<-readRDS('ERA_grids/ERA40_grid.Rdata')
w<-which(gc$centre.lat<0)
gc<-gc[w,]
# And the 20CR grid data
gt<-readRDS('ERA_grids/TWCR_grid.Rdata')
w<-which(gt$centre.lat<0)
gt<-gt[w,]
# And the ERA5 grid - use for precip
g5<-readRDS('ERA_grids/ERA5_grid.Rdata')
w<-which(g5$centre.lat<0)
g5<-g5[w,]
gp<-g5
gp$min.lon<-gp$min.lon+(gp$centre.lon-gp$min.lon)*0.05
gp$min.lat<-gp$min.lat+(gp$centre.lat-gp$min.lat)*0.05
gp$max.lon<-gp$max.lon+(gp$centre.lon-gp$max.lon)*0.05
gp$max.lat<-gp$max.lat+(gp$centre.lat-gp$max.lat)*0.05
# Plot the orography - raster background, fast
draw.land.flat<-function(Options) {
land<-GSDF.WeatherMap:::WeatherMap.rotate.pole(GSDF:::GSDF.pad.longitude(orog),Options)
lons<-land$dimensions[[GSDF.find.dimension(land,'lon')]]$values
base.colour<-Options$land.colour
plot.colours<-rep(rgb(0,0,0,0),length(land$data))
w<-which(land$data>0)
plot.colours[w]<-base.colour
m<-matrix(plot.colours, ncol=length(lons), byrow=TRUE)
r.w<-max(lons)-min(lons)+(lons[2]-lons[1])
r.c<-(max(lons)+min(lons))/2
grid.raster(m,,
x=unit(r.c,'native'),
y=unit(0,'native'),
width=unit(r.w,'native'),
height=unit(180,'native'))
}
# Boxes that straddle the date line need to be rationalised and duplicated
polish.longitudes<-function(lats,lons) {
w<-which(abs(lons[1,]-lons[2,])>200 |
abs(lons[2,]-lons[3,])>200 |
abs(lons[3,]-lons[4,])>200 |
abs(lons[4,]-lons[1,])>200)
lat.extras<-array(dim=c(4,length(w)))
lon.extras<-array(dim=c(4,length(w)))
for(i in seq_along(w)) {
w2<-which(lons[,w[i]]>0)
lons[w2,w[i]]<-lons[w2,w[i]]-360
lon.extras[,i]<-lons[,w[i]]+360
lat.extras[,i]<-lats[,w[i]]
}
return(list(lat=cbind(lats,lat.extras),
lon=cbind(lons,lon.extras)))
}
# Sub-divide the grid for regions where higher resolution is necessary
double.resolution<-function(lats,lons) {
if(!is.array(lons)) lons<-array(data=lons,dim=c(4,length(lons)/4))
if(!is.array(lats)) lats<-array(data=lats,dim=c(4,length(lats)/4))
sub.lons<-array(dim=c(4,length(lons)))
sub.lats<-array(dim=c(4,length(lats)))
sub.idx<-seq(1,length(lons),4)
# bottom left
sub.lons[1,sub.idx]<-lons[1,]
sub.lons[2,sub.idx]<-(lons[1,]+lons[2,])/2
sub.lons[3,sub.idx]<-(lons[1,]+lons[2,]+
lons[3,]+lons[4,])/4
sub.lons[4,sub.idx]<-(lons[1,]+lons[4,])/2
sub.lats[1,sub.idx]<-lats[1,]
sub.lats[2,sub.idx]<-(lats[1,]+lats[2,])/2
sub.lats[3,sub.idx]<-(lats[1,]+lats[2,]+
lats[3,]+lats[4,])/4
sub.lats[4,sub.idx]<-(lats[1,]+lats[4,])/2
# bottom right
sub.lons[1,sub.idx+1]<-sub.lons[2,sub.idx]
sub.lons[2,sub.idx+1]<-lons[2,]
sub.lons[3,sub.idx+1]<-(lons[2,]+lons[3,])/2
sub.lons[4,sub.idx+1]<-sub.lons[3,sub.idx]
sub.lats[1,sub.idx+1]<-sub.lats[2,sub.idx]
sub.lats[2,sub.idx+1]<-lats[2,]
sub.lats[3,sub.idx+1]<-(lats[2,]+lats[3,])/2
sub.lats[4,sub.idx+1]<-sub.lats[3,sub.idx]
# Top right
sub.lons[1,sub.idx+2]<-sub.lons[3,sub.idx]
sub.lons[2,sub.idx+2]<-sub.lons[3,sub.idx+1]
sub.lons[3,sub.idx+2]<-lons[3,]
sub.lons[4,sub.idx+2]<-(lons[3,]+lons[4,])/2
sub.lats[1,sub.idx+2]<-sub.lats[3,sub.idx]
sub.lats[2,sub.idx+2]<-sub.lats[3,sub.idx+1]
sub.lats[3,sub.idx+2]<-lats[3,]
sub.lats[4,sub.idx+2]<-(lats[3,]+lats[4,])/2
# Top left
sub.lons[1,sub.idx+3]<-sub.lons[4,sub.idx]
sub.lons[2,sub.idx+3]<-sub.lons[3,sub.idx]
sub.lons[3,sub.idx+3]<-sub.lons[4,sub.idx+2]
sub.lons[4,sub.idx+3]<-lons[4,]
sub.lats[1,sub.idx+3]<-sub.lats[4,sub.idx]
sub.lats[2,sub.idx+3]<-sub.lats[3,sub.idx]
sub.lats[3,sub.idx+3]<-sub.lats[4,sub.idx+2]
sub.lats[4,sub.idx+3]<-lats[4,]
return(list(lats=sub.lats,lons=sub.lons))
}
# Draw a field, point by point - using a reduced gaussian grid
draw.by.rgg<-function(field,grid,colour.function,selection.function,Options,
grid.colour=rgb(0.8,0.8,0.8,0),grid.lwd=0,grid.lty=1) {
field<-GSDF:::GSDF.pad.longitude(field) # Extras for periodic boundary conditions
value.points<-GSDF.interpolate.2d(field,grid$centre.lon,grid$centre.lat)
col<-colour.function(value.points)
for(group in unique(na.omit(col))) {
w<-which(col==group)
vert.lon<-array(dim=c(4,length(w)))
vert.lon[1,]<-grid$min.lon[w]
vert.lon[2,]<-grid$max.lon[w]
vert.lon[3,]<-grid$max.lon[w]
vert.lon[4,]<-grid$min.lon[w]
vert.lat<-array(dim=c(4,length(w)))
vert.lat[1,]<-grid$min.lat[w]
vert.lat[2,]<-grid$min.lat[w]
vert.lat[3,]<-grid$max.lat[w]
vert.lat[4,]<-grid$max.lat[w]
w<-which(vert.lon-Options$pole.lon==180)
if(length(w)>0) vert.lon[w]<-vert.lon[w]+0.0001
for(v in seq(1,4)) {
p.r<-GSDF.ll.to.rg(vert.lat[v,],vert.lon[v,],Options$pole.lat,Options$pole.lon)
vert.lat[v,]<-p.r$lat
vert.lon[v,]<-p.r$lon
}
w<-which(vert.lon>Options$vp.lon.max)
if(length(w)>0) vert.lon[w]<-vert.lon[w]-360
w<-which(vert.lon<Options$vp.lon.min)
if(length(w)>0) vert.lon[w]<-vert.lon[w]+360
pl<-polish.longitudes(vert.lat,vert.lon)
vert.lat<-pl$lat
vert.lon<-pl$lon
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(4,length(vert.lat[1,])))
# Fill in the fiddly area near the boundary
w<-which((inside[1,] | inside[2,] | inside[3,] | inside[4,]) &
!(inside[1,] & inside[2,] & inside[3,] & inside[4,]))
boundary.lat<-vert.lat[,w]
boundary.lon<-vert.lon[,w]
while(length(boundary.lon)>0) {
bp<-double.resolution(boundary.lat,boundary.lon)
boundary.lat<-bp$lats
boundary.lon<-bp$lons
gp<-gpar(col=rgb(0.8,0.8,0.8,0),fill=group,lwd=0)
inside<-array(data=selection.function(boundary.lat,boundary.lon),
dim=c(4,length(boundary.lat[1,])))
w<-which(inside[1,] & inside[2,] & inside[3,] & inside[4,])
if(length(w)>0) {
grid.polygon(x=unit(as.vector(boundary.lon[,w]),'native'),
y=unit(as.vector(boundary.lat[,w]),'native'),
id.lengths=rep(4,length(w)),
gp=gp)
}
w<-which((inside[1,] | inside[2,] | inside[3,] | inside[4,]) &
!(inside[1,] & inside[2,] & inside[3,] & inside[4,]))
if(length(w)==0) break
boundary.lat<-boundary.lat[,w]
boundary.lon<-boundary.lon[,w]
if(!is.array(boundary.lon)) boundary.lon<-array(data=boundary.lon,
dim=c(4,length(boundary.lon)/4))
if(!is.array(boundary.lat)) boundary.lat<-array(data=boundary.lat,
dim=c(4,length(boundary.lat)/4))
d.lat<-abs(boundary.lat[2,]-boundary.lat[1,])+
abs(boundary.lat[3,]-boundary.lat[2,])+
abs(boundary.lat[4,]-boundary.lat[3,])+
abs(boundary.lat[4,]-boundary.lat[1,])
d.lon<-abs(boundary.lon[2,]-boundary.lon[1,])+
abs(boundary.lon[3,]-boundary.lon[2,])+
abs(boundary.lon[4,]-boundary.lon[3,])+
abs(boundary.lon[4,]-boundary.lon[1,])
w<-which(d.lat>0.1 | d.lon>0.1)
if(length(w)==0) break
boundary.lat<-boundary.lat[,w]
boundary.lon<-boundary.lon[,w]
}
# add all the normal points inside the boundaries
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(4,length(vert.lat[1,])))
w<-which(inside[1,] & inside[2,] & inside[3,] & inside[4,])
if(length(w)==0) next
vert.lat<-vert.lat[,w]
vert.lon<-vert.lon[,w]
if(!is.array(vert.lat)) vert.lat<-array(data=vert.lat,dim=c(4,length(vert.lat)/4))
if(!is.array(vert.lon)) vert.lon<-array(data=vert.lon,dim=c(4,length(vert.lon)/4))
gp<-gpar(col=grid.colour,fill=group,lwd=grid.lwd,lty=grid.lty)
#gp<-gpar(col=rgb(0.8,0.8,0.8,0),fill=group,lwd=0)
grid.polygon(x=unit(as.vector(vert.lon),'native'),
y=unit(as.vector(vert.lat),'native'),
id.lengths=rep(4,dim(vert.lat)[2]),
gp=gp)
}
}
# Subdivide a segmented line into shorter segments
upline<-function(line) {
nseg<-length(line$x)
result<-list(x=rep(NA,nseg*2-1),
x=rep(NA,nseg*2-1))
sq<-seq(1,nseg*2-1,2)
result$x[sq]<-line$x
result$y[sq]<-line$y
sq<-seq(2,nseg*2-2,2)
result$x[sq]<-(result$x[sq-1]+result$x[sq+1])/2
result$y[sq]<-(result$y[sq-1]+result$y[sq+1])/2
result$level<-line$level
return(result)
}
draw.pressure<-function(mslp,selection.function,Options,colour=c(0,0,0,1)) {
M<-GSDF.WeatherMap:::WeatherMap.rotate.pole(mslp,Options)
M<-GSDF:::GSDF.pad.longitude(M) # Extras for periodic boundary conditions
lats<-M$dimensions[[GSDF.find.dimension(M,'lat')]]$values
longs<-M$dimensions[[GSDF.find.dimension(M,'lon')]]$values
# Need particular data format for contourLines
maxl<-Options$vp.lon.max+2
if(lats[2]<lats[1] || longs[2]<longs[1] || max(longs) > maxl ) {
if(lats[2]<lats[1]) lats<-rev(lats)
if(longs[2]<longs[1]) longs<-rev(longs)
longs[longs>maxl]<-longs[longs>maxl]-(maxl*2)
longs<-sort(longs)
M2<-M
M2$dimensions[[GSDF.find.dimension(M,'lat')]]$values<-lats
M2$dimensions[[GSDF.find.dimension(M,'lon')]]$values<-longs
M<-GSDF.regrid.2d(M,M2)
}
z<-matrix(data=M$data,nrow=length(longs),ncol=length(lats))
lines<-contourLines(longs,lats,z,
levels=contour.levels)
if(!is.na(lines) && length(lines)>0) {
for(i in seq(1,length(lines))) {
#lines[[i]]<-upline(upline(lines[[i]]))
tp<-min(1,(abs(lines[[i]]$level-Options$mslp.base)/
Options$mslp.tpscale))
lt<-5
lwd<-1
if(lines[[i]]$level<=Options$mslp.base) {
lt<-1
lwd<-1
}
gp<-gpar(col=rgb(colour[1],colour[2],colour[3],colour[4]),
lwd=Options$mslp.lwd*lwd,lty=lt)
res<-tryCatch({
for(p in seq_along(lines[[i]]$x)) {
if(!selection.function(lines[[i]]$y[p],lines[[i]]$x[p])) {
is.na(lines[[i]]$y[p])<-TRUE
is.na(lines[[i]]$x[p])<-TRUE
}
}
grid.xspline(x=unit(lines[[i]]$x,'native'),
y=unit(lines[[i]]$y,'native'),
gp=gp)
}, warning = function(w) {
print(w)
}, error = function(e) {
print(e)
}, finally = {
# Do nothing
})
}
}
}
draw.precipitation<-function(prate,value.function,selection.function,Options) {
prate<-GSDF.WeatherMap:::WeatherMap.rotate.pole(prate,Options)
res<-0.2
lon.points<-seq(-180,180,res)+Options$vp.lon.min+180
lat.points<-seq(-90,90,res)
lon.ex<-sort(rep(lon.points,length(lat.points)))
lat.ex<-rep(lat.points,length(lon.points))
value.points<-GSDF.interpolate.2d(prate,lon.ex,lat.ex)
value<-value.function(value.points)
w<-which(is.na(value))
if(length(w)>0) {
lon.ex<-lon.ex[-w]
lat.ex<-lat.ex[-w]
value<-value[-w]
}
scale<-runif(length(lon.ex),min=0.03,max=0.07)*4*value
lat.jitter<-runif(length(lon.ex),min=res*-1/2,max=res/2)
lon.jitter<-runif(length(lon.ex),min=res*-1/2,max=res/2)
vert.lat<-array(dim=c(2,length(lat.ex)))
vert.lat[1,]<-lat.ex+lat.jitter+scale
vert.lat[2,]<-lat.ex+lat.jitter-scale
vert.lon<-array(dim=c(2,length(lon.ex)))
vert.lon[1,]<-lon.ex+lon.jitter+scale/2
vert.lon[2,]<-lon.ex+lon.jitter-scale/2
inside<-array(data=selection.function(vert.lat,vert.lon),
dim=c(2,length(vert.lat[1,])))
w<-which(inside[1,] & inside[2,])
vert.lat<-vert.lat[,w]
vert.lon<-vert.lon[,w]
gp<-gpar(col=rgb(0,0.2,0,1,1),fill=rgb(0,0.2,0,1,1),lwd=0.5)
grid.polyline(x=unit(as.vector(vert.lon),'native'),
y=unit(as.vector(vert.lat),'native'),
id.lengths=rep(2,dim(vert.lat)[2]),
gp=gp)
}
draw.streamlines<-function(s,selection.function,Options) {
gp<-set.streamline.GC(Options)
inside<-array(data=selection.function(s[['y']],s[['x']]),
dim=c(length(s[['x']][,1]),3))
w<-which(inside[,1] & inside[,2] & inside[,3])
s[['x']]<-s[['x']][w,]
s[['y']]<-s[['y']][w,]
grid.xspline(x=unit(as.vector(t(s[['x']])),'native'),
y=unit(as.vector(t(s[['y']])),'native'),
id.lengths=rep(Options$wind.vector.points,length(s[['x']][,1])),
shape=s[['shape']],
arrow=Options$wind.vector.arrow,
gp=gp)
}
# Choose TWCR points to plot
select.TWCR<-function(lat,lon) {
lat.boundary<-centre.lat+(lon-centre.lon)*0.1
lon.boundary<-centre.lon+(lat-centre.lat)*2
result<-rep(FALSE,length(lat))
w<-which((lon<centre.lon & lat>lat.boundary) |
(lat>centre.lat & lon<lon.boundary))
if(length(w)>0) result[w]<-TRUE
return(result)
}
# Intensity function for precipitation
set.precip.value<-function(rate) {
min.threshold<-0.0025
max.threshold<-0.03
rate<-sqrt(pmax(0,rate))
result<-rep(NA,length(rate))
value<-pmax(0,pmin(1,rate/max.threshold))
w<-which(runif(length(rate),0,1)<value & rate>min.threshold)
if(length(w)>0) result[w]<-value[w]
return(result)
}
# Colour function for precipitation
set.precip.colour<-function(rate) {
min.threshold<-0.0025
max.threshold<-0.03
rate<-sqrt(pmax(0,rate,na.rm=TRUE))
result<-rep(NA,length(rate))
value<-pmax(0,pmin(0.9,rate/max.threshold,na.rm=TRUE),na.rm=TRUE)
w<-which(is.na(value))
if(length(w)>0) value[w]<-0
result<-rgb(0,0.2,0,value)
w<-which(rate<min.threshold)
is.na(result[w])<-TRUE
return(result)
}
# Colour function for t2m
set.t2m.colour<-function(temperature,Trange=5) {
result<-rep(NA,length(temperature))
w<-which(temperature>=0)
if(length(w)>0) {
temperature[w]<-sqrt(temperature[w])
temperature[w]<-pmax(0,pmin(Trange,temperature[w]))
temperature[w]<-temperature[w]/Trange
temperature[w]<-round(temperature[w],2)
result[w]<-rgb(1,0,0,temperature[w]*0.5)
}
w<-which(temperature<0)
if(length(w)>0) {
temperature[w]<-sqrt(temperature[w]*-1)
temperature[w]<-pmax(0,pmin(Trange,temperature[w]))
temperature[w]<-temperature[w]/Trange
temperature[w]<-round(temperature[w],2)
result[w]<-rgb(0,0,1,temperature[w]*0.5)
}
return(result)
}
#Colour function for ice
set.ice.colour<-function(ice) {
result<-rep(NA,length(ice))
w<-which(ice>0)
ice[w]<-round(ice[w],2)
result[w]<-rgb(1,1,1,ice[w]*1.0)
return(result)
}
# Want to plot obs coverage rather than observations - make pseudo
# observations indicating coverage.
plot.obs.coverage<-function(obs,Options) {
if(Options$pole.lon!=0 || Options$pole.lat!=90) {
l2<-GSDF.ll.to.rg(obs$Latitude,obs$Longitude,Options$pole.lat,Options$pole.lon)
obs$Longitude<-l2$lon
obs$Latitude<-l2$lat
}
if(length(obs$Latitude)<1) return()
lon.m<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.m<-Options$vp.lon.min
w<-which(obs$Longitude<lon.m)
if(length(w)>0) obs$Longitude[w]<-obs$Longitude[w]+360
lon.m<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.m<-Options$vp.lon.max
w<-which(obs$Longitude>lon.m)
if(length(w)>0) obs$Longitude[w]<-obs$Longitude[w]-360
# Filter to .5/degree lat and lon
idx<-sprintf("%4d%4d",as.integer(obs$Latitude*1),as.integer(obs$Longitude*1))
w<-which(duplicated(idx))
if(length(w)>0) obs<-obs[-w,]
gp<-gpar(col=Options$obs.colour,fill=Options$obs.colour)
grid.points(x=unit(obs$Longitude,'native'),
y=unit(obs$Latitude,'native'),
size=unit(Options$obs.size,'native'),
pch=21,gp=gp)
}
# Colour function for streamlines
set.streamline.GC<-function(Options) {
alpha<-255
return(gpar(col=rgb(125,125,125,alpha,maxColorValue=255),
fill=rgb(125,125,125,alpha,maxColorValue=255),lwd=1.5))
}
# Make the actual plot
image.name<-sprintf("CERA_20CR.pdf",year,month,day,hour)
ifile.name<-sprintf("%s/%s",Imagedir,image.name)
pdf(ifile.name,
width=46.8,
height=33.1,
bg='white',
family='Helvetica',
pointsize=24)
# CERA half
pushViewport(viewport(x=unit(1/4,'npc'),y=unit(1/2,'npc'),
width=unit(0.5-0.01,'npc'),
height=unit(1.0-0.01,'npc')))
base.gp<-gpar(fontfamily='Helvetica',fontface='bold',col='black')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
grid.polygon(x=c(0,1,1,0),y=c(0,0,1,1),gp=gpar(col=Options$sea.colour,
fill=Options$sea.colour))
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0,gp=base.gp,clip='on'))
icec<-CERA20C.get.slice.at.hour('icec',opt$year,opt$month,opt$day,opt$hour)
draw.by.rgg(icec,g5,set.ice.colour,function(lat,lon) return(rep(TRUE,length(lat))),Options)
draw.land.flat(Options)
for(member in seq(0,9)) {
select.CERA20C<-function(lat,lon) {
result<-rep(TRUE,length(lat))
return(result)
}
mslp<-CERA20C.get.slice.at.hour('prmsl',opt$year,opt$month,opt$day,opt$hour,member=member)
draw.pressure(mslp,select.CERA20C,Options,colour=c(0,0,1,0.1))
}
popViewport()
popViewport() # End of CERA half
# 20CR half
pushViewport(viewport(x=unit(3/4,'npc'),y=unit(1/2,'npc'),
width=unit(0.5-0.01,'npc'),
height=unit(1.0-0.01,'npc')))
base.gp<-gpar(fontfamily='Helvetica',fontface='bold',col='black')
lon.min<-Options$lon.min
if(!is.null(Options$vp.lon.min)) lon.min<-Options$vp.lon.min
lon.max<-Options$lon.max
if(!is.null(Options$vp.lon.max)) lon.max<-Options$vp.lon.max
lat.min<-Options$lat.min
if(!is.null(Options$vp.lat.min)) lat.min<-Options$vp.lat.min
lat.max<-Options$lat.max
if(!is.null(Options$vp.lat.max)) lat.max<-Options$vp.lat.max
grid.polygon(x=c(0,1,1,0),y=c(0,0,1,1),gp=gpar(col=Options$sea.colour,
fill=Options$sea.colour))
pushViewport(dataViewport(c(lon.min,lon.max),c(lat.min,lat.max),
extension=0,gp=base.gp,clip='on'))
icec<-TWCR.get.slice.at.hour('icec',opt$year,opt$month,opt$day,opt$hour,version='3.5.1')
draw.by.rgg(icec,g5,set.ice.colour,function(lat,lon) return(rep(TRUE,length(lat))),Options)
draw.land.flat(Options)
for(member in seq(1,56)) {
select.TWCR<-function(lat,lon) {
result<-rep(TRUE,length(lat))
return(result)
}
mslp<-TWCR.get.member.at.hour('prmsl',opt$year,opt$month,opt$day,opt$hour,member=member,version='3.5.1')
draw.pressure(mslp,select.TWCR,Options,colour=c(0,0,1,0.01))
}
obs<-TWCR.get.obs(opt$year,opt$month,opt$day,opt$hour,version='3.5.1')
plot.obs.coverage(obs,Options)
popViewport()
popViewport() # End of 20CR half
dev.off()
|
`ll.jRCI.A.cons1` =
function(par, yi, ind.lst, X, sex, ni, ni0, xs, iphi, theta){
par1 = c(par[1], par)
logL = ll.tRCI.A(par0=par1, yi=yi, ind.lst=ind.lst, X=X, twosex=FALSE, iphi=iphi)
logL = logL + ll.aRC(par0=par1[c(2, 3)], ni, ni0, xs, theta)
return(logL)
}
|
/R/ll.jRCI.A.cons1.R
|
no_license
|
cran/rxSeq
|
R
| false
| false
| 280
|
r
|
`ll.jRCI.A.cons1` =
function(par, yi, ind.lst, X, sex, ni, ni0, xs, iphi, theta){
par1 = c(par[1], par)
logL = ll.tRCI.A(par0=par1, yi=yi, ind.lst=ind.lst, X=X, twosex=FALSE, iphi=iphi)
logL = logL + ll.aRC(par0=par1[c(2, 3)], ni, ni0, xs, theta)
return(logL)
}
|
#' Links layer
#'
#' @inheritParams ggplot2::layer
#' @inheritParams ggplot2::geom_curve
#' @section Aesthetics:
#' \code{geom_links2()} understands the following aesthetics (required aesthetics are in bold):
#' \itemize{
#' \item \strong{\code{x}}
#' \item \strong{\code{y}}
#' \item \strong{\code{xend}}
#' \item \strong{\code{yend}}
#' \item \code{alpha}
#' \item \code{colour}
#' \item \code{fill}
#' \item \code{group}
#' \item \code{linetype}
#' \item \code{size}
#' }
#' @importFrom ggplot2 layer ggproto GeomCurve GeomPoint draw_key_path
#' @importFrom grid gTree
#' @rdname geom_links2
#' @author Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
#' @export
geom_links2 <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
curvature = 0,
angle = 90,
ncp = 5,
arrow = NULL,
arrow.fill = NULL,
lineend = "butt",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomLinks2,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
curvature = curvature,
angle = angle,
ncp = ncp,
arrow = arrow,
arrow.fill = arrow.fill,
lineend = lineend,
na.rm = na.rm,
...
)
)
}
#' @rdname geom_links2
#' @format NULL
#' @usage NULL
#' @export
GeomLinks2 <- ggproto(
"GeomLinks2", GeomCurve,
draw_panel = function(self, data, panel_params, coord, rm.dup = TRUE,
node.shape = 21, node.colour = "blue", node.fill = "red",
node.size = 2, curvature = 0, angle = 90, ncp = 5, arrow = NULL,
arrow.fill = NULL, lineend = "butt", na.rm = FALSE) {
aesthetics <- setdiff(names(data), c("x", "y", "xend", "yend", "colour",
"fill", "size", "linetype"))
start.colour <- node.colour[1]
end.colour <- if(length(node.colour) > 1) node.colour[2] else node.colour[1]
start.fill <- node.fill[1]
end.fill <- if(length(node.fill) > 1) node.fill[2] else node.fill[1]
start.shape <- node.shape[1]
end.shape <- if(length(node.shape) > 1) node.shape[2] else node.shape[1]
start.size <- node.size[1]
end.size <- if(length(node.size) > 1) node.size[2] else node.size[1]
start.data <- new_data_frame(
list(x = data$x,
y = data$y,
colour = start.colour,
fill = start.fill,
shape = start.shape,
size = start.size,
stroke = 0.5))
end.data <- new_data_frame(
list(x = data$xend,
y = data$yend,
colour = end.colour,
fill = end.fill,
shape = end.shape,
size = end.size,
stroke = 0.5))
if(isTRUE(rm.dup)) {
start.data <- cbind(start.data, data[aesthetics])[!duplicated(start.data), , drop = FALSE]
end.data <- cbind(end.data, data[aesthetics])[!duplicated(end.data), , drop = FALSE]
} else {
start.data <- cbind(start.data, data[aesthetics])
end.data <- cbind(end.data, data[aesthetics])
}
ggname(
"geom_links2",
grid::gTree(
children = grid::gList(
GeomCurve$draw_panel(data, panel_params, coord, curvature = curvature,
angle = angle, ncp = ncp, arrow = arrow,
arrow.fill = arrow.fill, lineend = lineend,
na.rm = na.rm),
GeomPoint$draw_panel(start.data, panel_params, coord),
GeomPoint$draw_panel(end.data, panel_params, coord)
)
)
)
},
draw_key = draw_key_path
)
|
/R/geom-link2.R
|
no_license
|
catherian-cat/ggcor
|
R
| false
| false
| 4,027
|
r
|
#' Links layer
#'
#' @inheritParams ggplot2::layer
#' @inheritParams ggplot2::geom_curve
#' @section Aesthetics:
#' \code{geom_links2()} understands the following aesthetics (required aesthetics are in bold):
#' \itemize{
#' \item \strong{\code{x}}
#' \item \strong{\code{y}}
#' \item \strong{\code{xend}}
#' \item \strong{\code{yend}}
#' \item \code{alpha}
#' \item \code{colour}
#' \item \code{fill}
#' \item \code{group}
#' \item \code{linetype}
#' \item \code{size}
#' }
#' @importFrom ggplot2 layer ggproto GeomCurve GeomPoint draw_key_path
#' @importFrom grid gTree
#' @rdname geom_links2
#' @author Houyun Huang, Lei Zhou, Jian Chen, Taiyun Wei
#' @export
geom_links2 <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
curvature = 0,
angle = 90,
ncp = 5,
arrow = NULL,
arrow.fill = NULL,
lineend = "butt",
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
layer(
data = data,
mapping = mapping,
stat = stat,
geom = GeomLinks2,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(
curvature = curvature,
angle = angle,
ncp = ncp,
arrow = arrow,
arrow.fill = arrow.fill,
lineend = lineend,
na.rm = na.rm,
...
)
)
}
#' @rdname geom_links2
#' @format NULL
#' @usage NULL
#' @export
GeomLinks2 <- ggproto(
"GeomLinks2", GeomCurve,
draw_panel = function(self, data, panel_params, coord, rm.dup = TRUE,
node.shape = 21, node.colour = "blue", node.fill = "red",
node.size = 2, curvature = 0, angle = 90, ncp = 5, arrow = NULL,
arrow.fill = NULL, lineend = "butt", na.rm = FALSE) {
aesthetics <- setdiff(names(data), c("x", "y", "xend", "yend", "colour",
"fill", "size", "linetype"))
start.colour <- node.colour[1]
end.colour <- if(length(node.colour) > 1) node.colour[2] else node.colour[1]
start.fill <- node.fill[1]
end.fill <- if(length(node.fill) > 1) node.fill[2] else node.fill[1]
start.shape <- node.shape[1]
end.shape <- if(length(node.shape) > 1) node.shape[2] else node.shape[1]
start.size <- node.size[1]
end.size <- if(length(node.size) > 1) node.size[2] else node.size[1]
start.data <- new_data_frame(
list(x = data$x,
y = data$y,
colour = start.colour,
fill = start.fill,
shape = start.shape,
size = start.size,
stroke = 0.5))
end.data <- new_data_frame(
list(x = data$xend,
y = data$yend,
colour = end.colour,
fill = end.fill,
shape = end.shape,
size = end.size,
stroke = 0.5))
if(isTRUE(rm.dup)) {
start.data <- cbind(start.data, data[aesthetics])[!duplicated(start.data), , drop = FALSE]
end.data <- cbind(end.data, data[aesthetics])[!duplicated(end.data), , drop = FALSE]
} else {
start.data <- cbind(start.data, data[aesthetics])
end.data <- cbind(end.data, data[aesthetics])
}
ggname(
"geom_links2",
grid::gTree(
children = grid::gList(
GeomCurve$draw_panel(data, panel_params, coord, curvature = curvature,
angle = angle, ncp = ncp, arrow = arrow,
arrow.fill = arrow.fill, lineend = lineend,
na.rm = na.rm),
GeomPoint$draw_panel(start.data, panel_params, coord),
GeomPoint$draw_panel(end.data, panel_params, coord)
)
)
)
},
draw_key = draw_key_path
)
|
# usage:
# R --vanilla --args "/data/myHistogram.png" "1,2,3,4"
makeHistogram = function(filename, lifespans) {
png(filename=filename, width=250, height=150, units="px", pointsize=16, bg="white" );
par(mar=c(3,3,0.5,1)); # trim margin around plot [b,l,t,r]
par(tcl=0.35); # switch tick marks to insides of axes
par(mgp=c(1.5,0.2,0)); # set margin lines; default c(3,1,0) [title,labels,line]
par(xaxs="r", yaxs="r"); # extend axis limits
par(lwd=2); # line width 2px
hist(lifespans, breaks=10, lty=1, lwd=2, xlab="age (cell divisions)", ylab="count", main="");
rug(lifespans, ticksize="0.1", lwd="1", side=1, col="red");
par(new=T);
boxplot(lifespans, horizontal=TRUE, axes=FALSE, col="red", xlim=c(1,6), at=3);
par(new=F);
dev.off();
}
args = commandArgs(TRUE);
filename = args[1];
lifespans = lapply(strsplit(args[2], ','), as.numeric)[[1]];
makeHistogram(filename, lifespans);
|
/src/Application/Model/PlotService/makeHistogram.R
|
no_license
|
bradyo/rls-db-builder
|
R
| false
| false
| 930
|
r
|
# usage:
# R --vanilla --args "/data/myHistogram.png" "1,2,3,4"
makeHistogram = function(filename, lifespans) {
png(filename=filename, width=250, height=150, units="px", pointsize=16, bg="white" );
par(mar=c(3,3,0.5,1)); # trim margin around plot [b,l,t,r]
par(tcl=0.35); # switch tick marks to insides of axes
par(mgp=c(1.5,0.2,0)); # set margin lines; default c(3,1,0) [title,labels,line]
par(xaxs="r", yaxs="r"); # extend axis limits
par(lwd=2); # line width 2px
hist(lifespans, breaks=10, lty=1, lwd=2, xlab="age (cell divisions)", ylab="count", main="");
rug(lifespans, ticksize="0.1", lwd="1", side=1, col="red");
par(new=T);
boxplot(lifespans, horizontal=TRUE, axes=FALSE, col="red", xlim=c(1,6), at=3);
par(new=F);
dev.off();
}
args = commandArgs(TRUE);
filename = args[1];
lifespans = lapply(strsplit(args[2], ','), as.numeric)[[1]];
makeHistogram(filename, lifespans);
|
library(Vennerable)
V4a <- Venn(SetNames=month.name[1:4],Weight=1:16)
CR4a <- compute.Venn(V4a,type="ChowRuskey")
VennGetFaceLabels(CR4a)
|
/tests/bug0524CR1100.R
|
no_license
|
JeremyBowyer/Vennerable
|
R
| false
| false
| 139
|
r
|
library(Vennerable)
V4a <- Venn(SetNames=month.name[1:4],Weight=1:16)
CR4a <- compute.Venn(V4a,type="ChowRuskey")
VennGetFaceLabels(CR4a)
|
<!-- Generated by pkgdown: do not edit by hand -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lines Annotation — anno_lines • ComplexHeatmap</title>
<!-- jquery -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<!-- Bootstrap -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha256-916EbMg70RQy9LHiGkXzG8hSg9EdNy97GazNG/aiY1w=" crossorigin="anonymous" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha256-U5ZEeKfGNOja007MMD3YBI0A3OSZOQbeG6z2f2Y0hu8=" crossorigin="anonymous"></script>
<!-- Font Awesome icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.7.1/css/all.min.css" integrity="sha256-nAmazAk6vS34Xqo0BSrTb+abbtFlgsFK7NKSi6o7Y78=" crossorigin="anonymous" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.7.1/css/v4-shims.min.css" integrity="sha256-6qHlizsOWFskGlwVOKuns+D1nB6ssZrHQrNj1wGplHc=" crossorigin="anonymous" />
<!-- clipboard.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.min.js" integrity="sha256-FiZwavyI2V6+EXO1U+xzLG3IKldpiTFf3153ea9zikQ=" crossorigin="anonymous"></script>
<!-- headroom.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/headroom/0.9.4/headroom.min.js" integrity="sha256-DJFC1kqIhelURkuza0AvYal5RxMtpzLjFhsnVIeuk+U=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/headroom/0.9.4/jQuery.headroom.min.js" integrity="sha256-ZX/yNShbjqsohH1k95liqY9Gd8uOiE1S4vZc+9KQ1K4=" crossorigin="anonymous"></script>
<!-- pkgdown -->
<link href="../pkgdown.css" rel="stylesheet">
<script src="../pkgdown.js"></script>
<meta property="og:title" content="Lines Annotation — anno_lines" />
<meta property="og:description" content="Lines Annotation" />
<meta name="twitter:card" content="summary" />
<!-- mathjax -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js" integrity="sha256-nvJJv9wWKEm88qvoQl9ekL2J+k/RWIsaSScxxlsrv8k=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/config/TeX-AMS-MML_HTMLorMML.js" integrity="sha256-84DKXVJXs0/F8OTMzX4UR909+jtl4G7SPypPavF+GfA=" crossorigin="anonymous"></script>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container template-reference-topic">
<header>
<div class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<span class="navbar-brand">
<a class="navbar-link" href="../index.html">ComplexHeatmap</a>
<span class="version label label-default" data-toggle="tooltip" data-placement="bottom" title="Released version">2.1.0</span>
</span>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li>
<a href="../index.html">
<span class="fas fa fas fa-home fa-lg"></span>
</a>
</li>
<li>
<a href="../reference/index.html">Reference</a>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-expanded="false">
Articles
<span class="caret"></span>
</a>
<ul class="dropdown-menu" role="menu">
<li>
<a href="../articles/complex_heatmap.html">UNKNOWN TITLE</a>
</li>
<li>
<a href="../articles/most_probably_asked_questions.html">UNKNOWN TITLE</a>
</li>
</ul>
</li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li>
<a href="https://github.com/jokergoo/ComplexHeatmap">
<span class="fab fa fab fa-github fa-lg"></span>
</a>
</li>
</ul>
</div><!--/.nav-collapse -->
</div><!--/.container -->
</div><!--/.navbar -->
</header>
<div class="row">
<div class="col-md-9 contents">
<div class="page-header">
<h1>Lines Annotation</h1>
<div class="hidden name"><code>anno_lines.rd</code></div>
</div>
<div class="ref-description">
<p>Lines Annotation</p>
</div>
<pre class="usage"><span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='no'>x</span>, <span class='kw'>which</span> <span class='kw'>=</span> <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='st'>"column"</span>, <span class='st'>"row"</span>), <span class='kw'>border</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(),
<span class='kw'>add_points</span> <span class='kw'>=</span> <span class='no'>smooth</span>, <span class='kw'>smooth</span> <span class='kw'>=</span> <span class='fl'>FALSE</span>, <span class='kw'>pch</span> <span class='kw'>=</span> <span class='fl'>16</span>, <span class='kw'>size</span> <span class='kw'>=</span> <span class='fu'>unit</span>(<span class='fl'>2</span>, <span class='st'>"mm"</span>), <span class='kw'>pt_gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(), <span class='kw'>ylim</span> <span class='kw'>=</span> <span class='kw'>NULL</span>,
<span class='kw'>extend</span> <span class='kw'>=</span> <span class='fl'>0.05</span>, <span class='kw'>axis</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>axis_param</span> <span class='kw'>=</span> <span class='fu'><a href='default_axis_param.rd.html'>default_axis_param</a></span>(<span class='no'>which</span>),
<span class='kw'>width</span> <span class='kw'>=</span> <span class='kw'>NULL</span>, <span class='kw'>height</span> <span class='kw'>=</span> <span class='kw'>NULL</span>)</pre>
<h2 class="hasAnchor" id="arguments"><a class="anchor" href="#arguments"></a>Arguments</h2>
<table class="ref-arguments">
<colgroup><col class="name" /><col class="desc" /></colgroup>
<tr>
<th>x</th>
<td><p>The value vector. The value can be a vector or a matrix. The length of the vector or the number of rows of the matrix is taken as the number of the observations of the annotation.</p></td>
</tr>
<tr>
<th>which</th>
<td><p>Whether it is a column annotation or a row annotation?</p></td>
</tr>
<tr>
<th>border</th>
<td><p>Wether draw borders of the annotation region?</p></td>
</tr>
<tr>
<th>gp</th>
<td><p>Graphic parameters for lines. The length of each graphic parameter can be 1, or number of columns of <code>x</code> is <code>x</code> is a matrix.</p></td>
</tr>
<tr>
<th>add_points</th>
<td><p>Whether to add points on the lines?</p></td>
</tr>
<tr>
<th>smooth</th>
<td><p>If it is <code>TRUE</code>, smoothing by <code><a href='https://rdrr.io/r/stats/loess.html'>loess</a></code> is performed. If it is <code>TRUE</code>, <code>add_points</code> is set to <code>TRUE</code> by default.</p></td>
</tr>
<tr>
<th>pch</th>
<td><p>Point type. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>size</th>
<td><p>Point size, the value should be a <code><a href='https://rdrr.io/r/grid/unit.html'>unit</a></code> object. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>pt_gp</th>
<td><p>Graphic parameters for points. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>ylim</th>
<td><p>Data ranges. By default it is <code><a href='https://rdrr.io/r/base/range.html'>range(x)</a></code>.</p></td>
</tr>
<tr>
<th>extend</th>
<td><p>The extension to both side of <code>ylim</code>. The value is a percent value corresponding to <code>ylim[2] - ylim[1]</code>.</p></td>
</tr>
<tr>
<th>axis</th>
<td><p>Whether to add axis?</p></td>
</tr>
<tr>
<th>axis_param</th>
<td><p>parameters for controlling axis. See <code><a href='default_axis_param.rd.html'>default_axis_param</a></code> for all possible settings and default parameters.</p></td>
</tr>
<tr>
<th>width</th>
<td><p>Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation.</p></td>
</tr>
<tr>
<th>height</th>
<td><p>Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation.</p></td>
</tr>
</table>
<h2 class="hasAnchor" id="value"><a class="anchor" href="#value"></a>Value</h2>
<p>An annotation function which can be used in <code><a href='HeatmapAnnotation.rd.html'>HeatmapAnnotation</a></code>.</p>
<h2 class="hasAnchor" id="see-also"><a class="anchor" href="#see-also"></a>See also</h2>
<div class='dont-index'><p><a href='https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation'>https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation</a></p></div>
<h2 class="hasAnchor" id="examples"><a class="anchor" href="#examples"></a>Examples</h2>
<pre class="examples"><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/stats/Uniform.html'>runif</a></span>(<span class='fl'>10</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"anno_lines"</span>)</div><div class='img'><img src='anno_lines-1.png' alt='' width='700' height='433' /></div><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/cbind.html'>cbind</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>:<span class='fl'>5</span>, <span class='fl'>1</span>:<span class='fl'>5</span>), <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>5</span>:<span class='fl'>1</span>, <span class='fl'>5</span>:<span class='fl'>1</span>)), <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>2</span>:<span class='fl'>3</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"matrix"</span>)</div><div class='img'><img src='anno_lines-2.png' alt='' width='700' height='433' /></div><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/cbind.html'>cbind</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>:<span class='fl'>5</span>, <span class='fl'>1</span>:<span class='fl'>5</span>), <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>5</span>:<span class='fl'>1</span>, <span class='fl'>5</span>:<span class='fl'>1</span>)), <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>2</span>:<span class='fl'>3</span>),
<span class='kw'>add_points</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>pt_gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>5</span>:<span class='fl'>6</span>), <span class='kw'>pch</span> <span class='kw'>=</span> <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>, <span class='fl'>16</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"matrix"</span>)</div><div class='img'><img src='anno_lines-3.png' alt='' width='700' height='433' /></div></pre>
</div>
<div class="col-md-3 hidden-xs hidden-sm" id="sidebar">
<h2>Contents</h2>
<ul class="nav nav-pills nav-stacked">
<li><a href="#arguments">Arguments</a></li>
<li><a href="#value">Value</a></li>
<li><a href="#see-also">See also</a></li>
<li><a href="#examples">Examples</a></li>
</ul>
</div>
</div>
<footer>
<div class="copyright">
<p>Developed by Zuguang Gu.</p>
</div>
<div class="pkgdown">
<p>Site built with <a href="https://pkgdown.r-lib.org/">pkgdown</a> 1.4.1.</p>
</div>
</footer>
</div>
</body>
</html>
|
/docs/reference/anno_lines.rd
|
permissive
|
jokergoo/ComplexHeatmap
|
R
| false
| false
| 13,682
|
rd
|
<!-- Generated by pkgdown: do not edit by hand -->
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Lines Annotation — anno_lines • ComplexHeatmap</title>
<!-- jquery -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/jquery/3.3.1/jquery.min.js" integrity="sha256-FgpCb/KJQlLNfOu91ta32o/NMZxltwRo8QtmkMRdAu8=" crossorigin="anonymous"></script>
<!-- Bootstrap -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/css/bootstrap.min.css" integrity="sha256-916EbMg70RQy9LHiGkXzG8hSg9EdNy97GazNG/aiY1w=" crossorigin="anonymous" />
<script src="https://cdnjs.cloudflare.com/ajax/libs/twitter-bootstrap/3.3.7/js/bootstrap.min.js" integrity="sha256-U5ZEeKfGNOja007MMD3YBI0A3OSZOQbeG6z2f2Y0hu8=" crossorigin="anonymous"></script>
<!-- Font Awesome icons -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.7.1/css/all.min.css" integrity="sha256-nAmazAk6vS34Xqo0BSrTb+abbtFlgsFK7NKSi6o7Y78=" crossorigin="anonymous" />
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/5.7.1/css/v4-shims.min.css" integrity="sha256-6qHlizsOWFskGlwVOKuns+D1nB6ssZrHQrNj1wGplHc=" crossorigin="anonymous" />
<!-- clipboard.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/clipboard.js/2.0.4/clipboard.min.js" integrity="sha256-FiZwavyI2V6+EXO1U+xzLG3IKldpiTFf3153ea9zikQ=" crossorigin="anonymous"></script>
<!-- headroom.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/headroom/0.9.4/headroom.min.js" integrity="sha256-DJFC1kqIhelURkuza0AvYal5RxMtpzLjFhsnVIeuk+U=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/headroom/0.9.4/jQuery.headroom.min.js" integrity="sha256-ZX/yNShbjqsohH1k95liqY9Gd8uOiE1S4vZc+9KQ1K4=" crossorigin="anonymous"></script>
<!-- pkgdown -->
<link href="../pkgdown.css" rel="stylesheet">
<script src="../pkgdown.js"></script>
<meta property="og:title" content="Lines Annotation — anno_lines" />
<meta property="og:description" content="Lines Annotation" />
<meta name="twitter:card" content="summary" />
<!-- mathjax -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/MathJax.js" integrity="sha256-nvJJv9wWKEm88qvoQl9ekL2J+k/RWIsaSScxxlsrv8k=" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/config/TeX-AMS-MML_HTMLorMML.js" integrity="sha256-84DKXVJXs0/F8OTMzX4UR909+jtl4G7SPypPavF+GfA=" crossorigin="anonymous"></script>
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
<body>
<div class="container template-reference-topic">
<header>
<div class="navbar navbar-default navbar-fixed-top" role="navigation">
<div class="container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<span class="navbar-brand">
<a class="navbar-link" href="../index.html">ComplexHeatmap</a>
<span class="version label label-default" data-toggle="tooltip" data-placement="bottom" title="Released version">2.1.0</span>
</span>
</div>
<div id="navbar" class="navbar-collapse collapse">
<ul class="nav navbar-nav">
<li>
<a href="../index.html">
<span class="fas fa fas fa-home fa-lg"></span>
</a>
</li>
<li>
<a href="../reference/index.html">Reference</a>
</li>
<li class="dropdown">
<a href="#" class="dropdown-toggle" data-toggle="dropdown" role="button" aria-expanded="false">
Articles
<span class="caret"></span>
</a>
<ul class="dropdown-menu" role="menu">
<li>
<a href="../articles/complex_heatmap.html">UNKNOWN TITLE</a>
</li>
<li>
<a href="../articles/most_probably_asked_questions.html">UNKNOWN TITLE</a>
</li>
</ul>
</li>
</ul>
<ul class="nav navbar-nav navbar-right">
<li>
<a href="https://github.com/jokergoo/ComplexHeatmap">
<span class="fab fa fab fa-github fa-lg"></span>
</a>
</li>
</ul>
</div><!--/.nav-collapse -->
</div><!--/.container -->
</div><!--/.navbar -->
</header>
<div class="row">
<div class="col-md-9 contents">
<div class="page-header">
<h1>Lines Annotation</h1>
<div class="hidden name"><code>anno_lines.rd</code></div>
</div>
<div class="ref-description">
<p>Lines Annotation</p>
</div>
<pre class="usage"><span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='no'>x</span>, <span class='kw'>which</span> <span class='kw'>=</span> <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='st'>"column"</span>, <span class='st'>"row"</span>), <span class='kw'>border</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(),
<span class='kw'>add_points</span> <span class='kw'>=</span> <span class='no'>smooth</span>, <span class='kw'>smooth</span> <span class='kw'>=</span> <span class='fl'>FALSE</span>, <span class='kw'>pch</span> <span class='kw'>=</span> <span class='fl'>16</span>, <span class='kw'>size</span> <span class='kw'>=</span> <span class='fu'>unit</span>(<span class='fl'>2</span>, <span class='st'>"mm"</span>), <span class='kw'>pt_gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(), <span class='kw'>ylim</span> <span class='kw'>=</span> <span class='kw'>NULL</span>,
<span class='kw'>extend</span> <span class='kw'>=</span> <span class='fl'>0.05</span>, <span class='kw'>axis</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>axis_param</span> <span class='kw'>=</span> <span class='fu'><a href='default_axis_param.rd.html'>default_axis_param</a></span>(<span class='no'>which</span>),
<span class='kw'>width</span> <span class='kw'>=</span> <span class='kw'>NULL</span>, <span class='kw'>height</span> <span class='kw'>=</span> <span class='kw'>NULL</span>)</pre>
<h2 class="hasAnchor" id="arguments"><a class="anchor" href="#arguments"></a>Arguments</h2>
<table class="ref-arguments">
<colgroup><col class="name" /><col class="desc" /></colgroup>
<tr>
<th>x</th>
<td><p>The value vector. The value can be a vector or a matrix. The length of the vector or the number of rows of the matrix is taken as the number of the observations of the annotation.</p></td>
</tr>
<tr>
<th>which</th>
<td><p>Whether it is a column annotation or a row annotation?</p></td>
</tr>
<tr>
<th>border</th>
<td><p>Wether draw borders of the annotation region?</p></td>
</tr>
<tr>
<th>gp</th>
<td><p>Graphic parameters for lines. The length of each graphic parameter can be 1, or number of columns of <code>x</code> is <code>x</code> is a matrix.</p></td>
</tr>
<tr>
<th>add_points</th>
<td><p>Whether to add points on the lines?</p></td>
</tr>
<tr>
<th>smooth</th>
<td><p>If it is <code>TRUE</code>, smoothing by <code><a href='https://rdrr.io/r/stats/loess.html'>loess</a></code> is performed. If it is <code>TRUE</code>, <code>add_points</code> is set to <code>TRUE</code> by default.</p></td>
</tr>
<tr>
<th>pch</th>
<td><p>Point type. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>size</th>
<td><p>Point size, the value should be a <code><a href='https://rdrr.io/r/grid/unit.html'>unit</a></code> object. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>pt_gp</th>
<td><p>Graphic parameters for points. The length setting is the same as <code>gp</code>.</p></td>
</tr>
<tr>
<th>ylim</th>
<td><p>Data ranges. By default it is <code><a href='https://rdrr.io/r/base/range.html'>range(x)</a></code>.</p></td>
</tr>
<tr>
<th>extend</th>
<td><p>The extension to both side of <code>ylim</code>. The value is a percent value corresponding to <code>ylim[2] - ylim[1]</code>.</p></td>
</tr>
<tr>
<th>axis</th>
<td><p>Whether to add axis?</p></td>
</tr>
<tr>
<th>axis_param</th>
<td><p>parameters for controlling axis. See <code><a href='default_axis_param.rd.html'>default_axis_param</a></code> for all possible settings and default parameters.</p></td>
</tr>
<tr>
<th>width</th>
<td><p>Width of the annotation. The value should be an absolute unit. Width is not allowed to be set for column annotation.</p></td>
</tr>
<tr>
<th>height</th>
<td><p>Height of the annotation. The value should be an absolute unit. Height is not allowed to be set for row annotation.</p></td>
</tr>
</table>
<h2 class="hasAnchor" id="value"><a class="anchor" href="#value"></a>Value</h2>
<p>An annotation function which can be used in <code><a href='HeatmapAnnotation.rd.html'>HeatmapAnnotation</a></code>.</p>
<h2 class="hasAnchor" id="see-also"><a class="anchor" href="#see-also"></a>See also</h2>
<div class='dont-index'><p><a href='https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation'>https://jokergoo.github.io/ComplexHeatmap-reference/book/heatmap-annotations.html#lines-annotation</a></p></div>
<h2 class="hasAnchor" id="examples"><a class="anchor" href="#examples"></a>Examples</h2>
<pre class="examples"><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/stats/Uniform.html'>runif</a></span>(<span class='fl'>10</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"anno_lines"</span>)</div><div class='img'><img src='anno_lines-1.png' alt='' width='700' height='433' /></div><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/cbind.html'>cbind</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>:<span class='fl'>5</span>, <span class='fl'>1</span>:<span class='fl'>5</span>), <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>5</span>:<span class='fl'>1</span>, <span class='fl'>5</span>:<span class='fl'>1</span>)), <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>2</span>:<span class='fl'>3</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"matrix"</span>)</div><div class='img'><img src='anno_lines-2.png' alt='' width='700' height='433' /></div><div class='input'><span class='no'>anno</span> <span class='kw'>=</span> <span class='fu'><a href='anno_lines.rd.html'>anno_lines</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/cbind.html'>cbind</a></span>(<span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>:<span class='fl'>5</span>, <span class='fl'>1</span>:<span class='fl'>5</span>), <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>5</span>:<span class='fl'>1</span>, <span class='fl'>5</span>:<span class='fl'>1</span>)), <span class='kw'>gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>2</span>:<span class='fl'>3</span>),
<span class='kw'>add_points</span> <span class='kw'>=</span> <span class='fl'>TRUE</span>, <span class='kw'>pt_gp</span> <span class='kw'>=</span> <span class='fu'>gpar</span>(<span class='kw'>col</span> <span class='kw'>=</span> <span class='fl'>5</span>:<span class='fl'>6</span>), <span class='kw'>pch</span> <span class='kw'>=</span> <span class='fu'><a href='https://rdrr.io/r/base/c.html'>c</a></span>(<span class='fl'>1</span>, <span class='fl'>16</span>))
<span class='fu'><a href='draw-dispatch.rd.html'>draw</a></span>(<span class='no'>anno</span>, <span class='kw'>test</span> <span class='kw'>=</span> <span class='st'>"matrix"</span>)</div><div class='img'><img src='anno_lines-3.png' alt='' width='700' height='433' /></div></pre>
</div>
<div class="col-md-3 hidden-xs hidden-sm" id="sidebar">
<h2>Contents</h2>
<ul class="nav nav-pills nav-stacked">
<li><a href="#arguments">Arguments</a></li>
<li><a href="#value">Value</a></li>
<li><a href="#see-also">See also</a></li>
<li><a href="#examples">Examples</a></li>
</ul>
</div>
</div>
<footer>
<div class="copyright">
<p>Developed by Zuguang Gu.</p>
</div>
<div class="pkgdown">
<p>Site built with <a href="https://pkgdown.r-lib.org/">pkgdown</a> 1.4.1.</p>
</div>
</footer>
</div>
</body>
</html>
|
#' @title Hydraulic drilling experiment.
#'
#' @description A data set concerning hydraulic drilling in rock
#'
#' @details The time \code{TIME} to drill five feet in rock is measured at different depths \code{DEPTH}
#'
#' @format A data frame with 17 rows and 2 variables:
#' \describe{
#' \item{DEPTH}{Depth at which drilling occurs, feet}
#' \item{TIME}{Time to drill 5 feet, minutes}
#' }
#'
#' @source \url{https://www.routledge.com/Statistics-for-Engineering-and-the-Sciences-Sixth-Edition/Mendenhall-Sincich/p/book/9781498728850}
"drill"
|
/R/drill.R
|
permissive
|
lilyraye/Intro2R-COVID
|
R
| false
| false
| 551
|
r
|
#' @title Hydraulic drilling experiment.
#'
#' @description A data set concerning hydraulic drilling in rock
#'
#' @details The time \code{TIME} to drill five feet in rock is measured at different depths \code{DEPTH}
#'
#' @format A data frame with 17 rows and 2 variables:
#' \describe{
#' \item{DEPTH}{Depth at which drilling occurs, feet}
#' \item{TIME}{Time to drill 5 feet, minutes}
#' }
#'
#' @source \url{https://www.routledge.com/Statistics-for-Engineering-and-the-Sciences-Sixth-Edition/Mendenhall-Sincich/p/book/9781498728850}
"drill"
|
context( "distance" )
test_that( "distance works with Rcpp", {
sourceCpp( "cpp/distance.cpp" )
n = 1000
m = matrix(runif(n*10), ncol = 10)
m = m/rowSums(m)
expect_equal(
rcpp_js_distance(m),
rcpp_parallel_js_distance(m)
)
})
|
/testthat/test-distance.R
|
no_license
|
RcppCore/RcppParallelTests
|
R
| false
| false
| 293
|
r
|
context( "distance" )
test_that( "distance works with Rcpp", {
sourceCpp( "cpp/distance.cpp" )
n = 1000
m = matrix(runif(n*10), ncol = 10)
m = m/rowSums(m)
expect_equal(
rcpp_js_distance(m),
rcpp_parallel_js_distance(m)
)
})
|
nFix<- function(data){
sub<- NULL; item<- NULL; seq<- NULL; cond<- NULL; word<- NULL; p<- NULL
nitems<- NULL; n<- NULL; p1<- NULL; p2<- NULL;
dataN<- NULL; dataT<- NULL;
nfix1<- NULL; nfix2<-NULL; nfix<- NULL; sound<- NULL
cat("Processing data for subject... ");
for(i in 1:length(unique(data$sub))){ # for each subect..
nitems<- unique(data$item[data$sub==i])# trials that participant saw
nitems<- sort(nitems)
cat(paste(i, " ", sep=""));
for(j in 1: length(nitems)){ # for each item of subect i
n<- subset(data, sub==i & item==nitems[j]) # subset data for subect i & item j
sub<- n$sub[1]
item<- n$item[1]
seq<- n$seq[1]
cond<- n$cond[1]
nfix<- nrow(n)
dataT<- data.frame(sub, item, seq, cond, nfix)
sub<- NULL; item<- NULL; seq<- NULL; cond<- NULL; word<- NULL; p<- NULL;
p1<- NULL; p2<- NULL; sound<- NULL
nfix1<- NULL; nfix2<- NULL; nfix<-NULL;
dataN<- rbind(dataN, dataT)
} # end of j
} # end of i
return(dataN)
}
|
/Experiment 1a/functions/nFix.R
|
no_license
|
martin-vasilev/preview_costs
|
R
| false
| false
| 1,122
|
r
|
nFix<- function(data){
sub<- NULL; item<- NULL; seq<- NULL; cond<- NULL; word<- NULL; p<- NULL
nitems<- NULL; n<- NULL; p1<- NULL; p2<- NULL;
dataN<- NULL; dataT<- NULL;
nfix1<- NULL; nfix2<-NULL; nfix<- NULL; sound<- NULL
cat("Processing data for subject... ");
for(i in 1:length(unique(data$sub))){ # for each subect..
nitems<- unique(data$item[data$sub==i])# trials that participant saw
nitems<- sort(nitems)
cat(paste(i, " ", sep=""));
for(j in 1: length(nitems)){ # for each item of subect i
n<- subset(data, sub==i & item==nitems[j]) # subset data for subect i & item j
sub<- n$sub[1]
item<- n$item[1]
seq<- n$seq[1]
cond<- n$cond[1]
nfix<- nrow(n)
dataT<- data.frame(sub, item, seq, cond, nfix)
sub<- NULL; item<- NULL; seq<- NULL; cond<- NULL; word<- NULL; p<- NULL;
p1<- NULL; p2<- NULL; sound<- NULL
nfix1<- NULL; nfix2<- NULL; nfix<-NULL;
dataN<- rbind(dataN, dataT)
} # end of j
} # end of i
return(dataN)
}
|
#' cofeatureR: Generate Cofeature Matrices
#'
#' Generate cofeature (feature by sample) matrices. The package
#' utilizes ggplot2::geom_tile to generate the matrix allowing for easy
#' customization of additions from the base matrix.
#'
#' @docType package
#' @name cofeatureR
#' @importFrom stats setNames
NULL
|
/R/cofeatureR-package.R
|
no_license
|
tinyheero/cofeatureR
|
R
| false
| false
| 314
|
r
|
#' cofeatureR: Generate Cofeature Matrices
#'
#' Generate cofeature (feature by sample) matrices. The package
#' utilizes ggplot2::geom_tile to generate the matrix allowing for easy
#' customization of additions from the base matrix.
#'
#' @docType package
#' @name cofeatureR
#' @importFrom stats setNames
NULL
|
require(foreach)
require(doMC)
require(GenomicRanges)
set.seed(20180102)
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
registerDoMC(ifelse(SSHFS, 2, 30))
outFold <- "check_shuff_sizegene0"
system(paste0("mkdir -p ", outFold))
### generate randomTADs using the shuffle randomization
shuffle_sourceFile <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/ezh2_utils_fct.R")
shuffle_chromoPartition_v1 <- local({
source(shuffle_sourceFile, local = TRUE)
environment(shuffle_chromoPartition_v1) <- .GlobalEnv
shuffle_chromoPartition_v1
})
assign_sourceFile <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2/TAD_DE_utils.R")
assignGene2TADs <- local({
source(assign_sourceFile, local = TRUE)
environment(assignGene2TADs) <- .GlobalEnv
assignGene2TADs
})
plot_multiDens <- function(size_list, plotTit="", legTxt=NULL, legPos="topright", my_ylab="density", my_xlab="") {
dens <- lapply(size_list, function(x) density(na.omit(x)))
names(dens) <- names(size_list)
lengthDens <- unlist(lapply(size_list, function(x) length(na.omit(x))))
plot(NA, xlim=range(sapply(dens, "[", "x")), ylim=range(sapply(dens, "[", "y")),
main=plotTit, xlab=my_xlab, ylab=my_ylab)
foo <- mapply(lines, dens, col=1:length(dens))
if(is.null(legTxt)){
# legTxt <- names(dens)
legTxt <- paste0(names(dens), " (n=", lengthDens, ")")
}
legend(legPos, legend=legTxt, fill=1:length(dens), bty='n')
}
minThresh <- 3
maxQuantile <- 0.99
### TAD data
TADpos_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_TADposDT.txt")
TADdt <- read.delim(TADpos_file, header=F, col.names=c("chromo", "region", "start", "end"), stringsAsFactors = F)
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
### gene file
entrezDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/gencode.v19.gene.minimal.chr_notAmbiguous_removeDupMZ_changedHeader.txt")
entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors=F)
### gene2tad file => for check
gene2tadDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_gene2tadDT.txt")
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names=c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = FALSE)
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
stopifnot(!any(duplicated(gene2tadDT$entrezID)))
tmp_gene2tadDT <- gene2tadDT[,c("entrezID", "region")]
rownames(tmp_gene2tadDT) <- tmp_gene2tadDT$entrezID
### expression table
rnaseqDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/CL/cell.lines.byGene_noDup_MZ.RData")
exprDT <- eval(parse(text = load(rnaseqDT_file)))
expr_genes <- as.character(unlist(sapply(rownames(exprDT), function(x) unlist(strsplit(x,split="///")))))
### take only the genes for which I have expression
entrezDT <- entrezDT[entrezDT$symbol %in% expr_genes,]
######### assign the genes
g2t_dt <- assignGene2TADs(regionDT=TADdt, geneDT = entrezDT, assignMethod = "maxOverlap")
### check that with the true data, I should have the same gene2tad assignment
innerGenes <- intersect(g2t_dt$entrezID, gene2tadDT$entrezID)
length(innerGenes)
tmp_g2t_dt <- g2t_dt
stopifnot(!any(duplicated(tmp_g2t_dt$entrezID)))
rownames(tmp_g2t_dt) <- tmp_g2t_dt$entrezID
inter_true_g2t_dt <- tmp_gene2tadDT[innerGenes,]
inter_assigned_g2t_dt <- tmp_g2t_dt[innerGenes,]
stopifnot(all(inter_true_g2t_dt$region == inter_assigned_g2t_dt$region))
##################################################################################################################################################################
##################################################################################################################################################################
# FROM NOW, WORK ONLY WITH TADs
gene2tadDT <- gene2tadDT[grep("_TAD", gene2tadDT$region),]
obs_genes_TAD <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
head(obs_genes_TAD)
maxThresh <- as.numeric(quantile(obs_genes_TAD, probs=maxQuantile))
sizeFilter_obs <- obs_genes_TAD[obs_genes_TAD >= minThresh & obs_genes_TAD <= maxThresh]
length(sizeFilter_obs)
maxEndDT <- aggregate(end~chromo, data=TADdt, FUN=max, na.rm=TRUE)
TADdt <- TADdt[grep("_TAD", TADdt$region),]
domainDT <- TADdt[,c("chromo", "start", "end")]
all_chromo <- intersect(domainDT$chromo, gene2tadDT$chromo)
################################################################################# NOW SHUFFLE AND ASSIGN
nPerm=1000
outFile <- paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata")
# shuff_genesByTAD_withSize <- foreach(i_perm=1:nPerm) %dopar% {
# allChr_randTADdt <- foreach(chromo = all_chromo, .combine="rbind") %do% {
# cat(paste0("... perm ", i_perm, " - ", chromo, "\n"))
# chrEnd <- maxEndDT$end[maxEndDT$chromo == chromo]
# # select the initial seet of TADs for this chromosome
# chromo_domainDT <- domainDT[domainDT$chromo == chromo,]
# randTADdt <- shuffle_chromoPartition_v1(domainDT=chromo_domainDT, chrSize = chrEnd , preservePattern=FALSE)
# randTADdt$region <- paste0(chromo, "_TAD", 1:nrow(randTADdt))
# # the area of the chromo covered by TADs should be the same before/after shuffling !
# stopifnot(abs(sum(chromo_domainDT$end-chromo_domainDT$start) - sum(randTADdt$end-randTADdt$start)) < 1e-10)
# # ensure not overlapping
# if(nrow(randTADdt) > 1) {
# for(i in 2:nrow(randTADdt))
# stopifnot(randTADdt$start[i] > randTADdt$end[i-1])
# }
# # ensure all starts smaller than ends
# stopifnot(randTADdt$start < randTADdt$end)
# randTADdt
# } # end building new
#
# shuff_g2t_dt <- assignGene2TADs(regionDT=allChr_randTADdt, geneDT = entrezDT, assignMethod = "maxOverlap")
# shuff_g2t_dt$region <- factor(as.character(shuff_g2t_dt$region), levels = as.character(allChr_randTADdt$region))
# stopifnot(!any(is.na(shuff_g2t_dt$region)))
#
# tmp <- setNames(as.numeric(table(shuff_g2t_dt$region)), names(table(shuff_g2t_dt$region)))
# allChr_randTADdt$size <- allChr_randTADdt$end-allChr_randTADdt$start+1
# stopifnot(!any(duplicated(allChr_randTADdt$region)))
# stopifnot(all(names(tmp) %in% allChr_randTADdt$region))
# rownames(allChr_randTADdt) <- allChr_randTADdt$region
# data.frame(region = names(tmp),
# nbrGenes = tmp,
# size = allChr_randTADdt[names(tmp),]$size,
# stringsAsFactors = F)
#
# }
# save(shuff_genesByTAD_withSize, file = outFile)
# cat(paste0("... written: ", outFile, "\n"))
load(paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata"))
n_TADs <- sapply(shuff_genesByTAD_withSize, length)
##################################################################################################################################################################
###################################################################################################### size of the regions without genes
##################################################################################################################################################################
TADpos_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_TADposDT.txt")
TADdt <- read.delim(TADpos_file, header=F, col.names=c("chromo", "region", "start", "end"), stringsAsFactors = F)
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
TADdt <- TADdt[grepl("_TAD", TADdt$region),]
TADdt$size <- TADdt$end - TADdt$start + 1
### gene file
entrezDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/gencode.v19.gene.minimal.chr_notAmbiguous_removeDupMZ_changedHeader.txt")
entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors=F)
### gene2tad file => for check
gene2tadDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_gene2tadDT.txt")
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names=c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = FALSE)
#LINC00115 chr1 761586 762902 chr1_TAD1
gene2tadDT <- gene2tadDT[grep("_TAD", gene2tadDT$region),]
gene2tadDT$region <- factor(as.character(gene2tadDT$region), levels = as.character(TADdt$region))
obs_genes_TAD <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
head(obs_genes_TAD)
stopifnot(!any(duplicated(TADdt$region)))
stopifnot(all(names(obs_genes_TAD) %in% TADdt$region))
rownames(TADdt) <- TADdt$region
obs_regions_size <- data.frame(region = names(obs_genes_TAD),
nbrGenes = obs_genes_TAD,
size = TADdt[names(obs_genes_TAD),]$size,
stringsAsFactors = F)
##################################################################################################################################################################
obs_0_length_DT <- obs_regions_size[obs_regions_size$nbrGenes == 0,]
obs_0_nbr <- sum(obs_regions_size$nbrGenes == 0)
load(paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata"))
shuff_0_length_list <- lapply(shuff_genesByTAD_withSize, function(x) {
x$size[x$nbrGenes == 0]
})
shuff_0_nbr_list <- lapply(shuff_genesByTAD_withSize, function(x) {
sum(x$nbrGenes == 0)
})
##################################################################################################################################################################
###################################################################################################### number of the regions without genes
##################################################################################################################################################################
outFile <- paste0(outFold, "/", "nbrTADs_0genes.png")
png(outFile, width=600, height=600)
plot_multiDens(list(shuff = unlist(shuff_0_nbr_list)),
my_xlab = "nbr TADs",
plotTit = "Nbr of regions with 0 genes")
legend("topleft", legend = paste0("nbr obs.: ", obs_0_nbr), bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
##################################################################################################################################################################
###################################################################################################### size of the regions without genes
outFile <- paste0(outFold, "/", "sizeBpTADs_0genes.png")
png(outFile, width=600, height=600)
plot_multiDens(list(obs = obs_0_length_DT$size, shuff = unlist(shuff_0_length_list)),
my_xlab = "size of the TAD (bp)",
plotTit = "size of the regions with 0 genes")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
outFile <- paste0(outFold, "/", "sizeBpTADs_0genes_log10.png")
png(outFile, width=600, height=600)
plot_multiDens(list(obs = log10(obs_0_length_DT$size), shuff = log10(unlist(shuff_0_length_list))),
my_xlab = "size of the TADs (bp) [log10]",
plotTit = "size of the regions with 0 genes")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
|
/check_shuff_sizegene0.R
|
no_license
|
marzuf/TAD_DE_pipeline_v2
|
R
| false
| false
| 12,069
|
r
|
require(foreach)
require(doMC)
require(GenomicRanges)
set.seed(20180102)
SSHFS <- F
setDir <- ifelse(SSHFS, "/media/electron", "")
registerDoMC(ifelse(SSHFS, 2, 30))
outFold <- "check_shuff_sizegene0"
system(paste0("mkdir -p ", outFold))
### generate randomTADs using the shuffle randomization
shuffle_sourceFile <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/ezh2_utils_fct.R")
shuffle_chromoPartition_v1 <- local({
source(shuffle_sourceFile, local = TRUE)
environment(shuffle_chromoPartition_v1) <- .GlobalEnv
shuffle_chromoPartition_v1
})
assign_sourceFile <- paste0(setDir, "/mnt/ed4/marie/scripts/TAD_DE_pipeline_v2/TAD_DE_utils.R")
assignGene2TADs <- local({
source(assign_sourceFile, local = TRUE)
environment(assignGene2TADs) <- .GlobalEnv
assignGene2TADs
})
plot_multiDens <- function(size_list, plotTit="", legTxt=NULL, legPos="topright", my_ylab="density", my_xlab="") {
dens <- lapply(size_list, function(x) density(na.omit(x)))
names(dens) <- names(size_list)
lengthDens <- unlist(lapply(size_list, function(x) length(na.omit(x))))
plot(NA, xlim=range(sapply(dens, "[", "x")), ylim=range(sapply(dens, "[", "y")),
main=plotTit, xlab=my_xlab, ylab=my_ylab)
foo <- mapply(lines, dens, col=1:length(dens))
if(is.null(legTxt)){
# legTxt <- names(dens)
legTxt <- paste0(names(dens), " (n=", lengthDens, ")")
}
legend(legPos, legend=legTxt, fill=1:length(dens), bty='n')
}
minThresh <- 3
maxQuantile <- 0.99
### TAD data
TADpos_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_TADposDT.txt")
TADdt <- read.delim(TADpos_file, header=F, col.names=c("chromo", "region", "start", "end"), stringsAsFactors = F)
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
### gene file
entrezDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/gencode.v19.gene.minimal.chr_notAmbiguous_removeDupMZ_changedHeader.txt")
entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors=F)
### gene2tad file => for check
gene2tadDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_gene2tadDT.txt")
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names=c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = FALSE)
#LINC00115 chr1 761586 762902 chr1_TAD1
#FAM41C chr1 803451 812283 chr1_TAD1
#SAMD11 chr1 860260 879955 chr1_TAD1
#NOC2L chr1 879584 894689 chr1_TAD1
stopifnot(!any(duplicated(gene2tadDT$entrezID)))
tmp_gene2tadDT <- gene2tadDT[,c("entrezID", "region")]
rownames(tmp_gene2tadDT) <- tmp_gene2tadDT$entrezID
### expression table
rnaseqDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/CL/cell.lines.byGene_noDup_MZ.RData")
exprDT <- eval(parse(text = load(rnaseqDT_file)))
expr_genes <- as.character(unlist(sapply(rownames(exprDT), function(x) unlist(strsplit(x,split="///")))))
### take only the genes for which I have expression
entrezDT <- entrezDT[entrezDT$symbol %in% expr_genes,]
######### assign the genes
g2t_dt <- assignGene2TADs(regionDT=TADdt, geneDT = entrezDT, assignMethod = "maxOverlap")
### check that with the true data, I should have the same gene2tad assignment
innerGenes <- intersect(g2t_dt$entrezID, gene2tadDT$entrezID)
length(innerGenes)
tmp_g2t_dt <- g2t_dt
stopifnot(!any(duplicated(tmp_g2t_dt$entrezID)))
rownames(tmp_g2t_dt) <- tmp_g2t_dt$entrezID
inter_true_g2t_dt <- tmp_gene2tadDT[innerGenes,]
inter_assigned_g2t_dt <- tmp_g2t_dt[innerGenes,]
stopifnot(all(inter_true_g2t_dt$region == inter_assigned_g2t_dt$region))
##################################################################################################################################################################
##################################################################################################################################################################
# FROM NOW, WORK ONLY WITH TADs
gene2tadDT <- gene2tadDT[grep("_TAD", gene2tadDT$region),]
obs_genes_TAD <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
head(obs_genes_TAD)
maxThresh <- as.numeric(quantile(obs_genes_TAD, probs=maxQuantile))
sizeFilter_obs <- obs_genes_TAD[obs_genes_TAD >= minThresh & obs_genes_TAD <= maxThresh]
length(sizeFilter_obs)
maxEndDT <- aggregate(end~chromo, data=TADdt, FUN=max, na.rm=TRUE)
TADdt <- TADdt[grep("_TAD", TADdt$region),]
domainDT <- TADdt[,c("chromo", "start", "end")]
all_chromo <- intersect(domainDT$chromo, gene2tadDT$chromo)
################################################################################# NOW SHUFFLE AND ASSIGN
nPerm=1000
outFile <- paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata")
# shuff_genesByTAD_withSize <- foreach(i_perm=1:nPerm) %dopar% {
# allChr_randTADdt <- foreach(chromo = all_chromo, .combine="rbind") %do% {
# cat(paste0("... perm ", i_perm, " - ", chromo, "\n"))
# chrEnd <- maxEndDT$end[maxEndDT$chromo == chromo]
# # select the initial seet of TADs for this chromosome
# chromo_domainDT <- domainDT[domainDT$chromo == chromo,]
# randTADdt <- shuffle_chromoPartition_v1(domainDT=chromo_domainDT, chrSize = chrEnd , preservePattern=FALSE)
# randTADdt$region <- paste0(chromo, "_TAD", 1:nrow(randTADdt))
# # the area of the chromo covered by TADs should be the same before/after shuffling !
# stopifnot(abs(sum(chromo_domainDT$end-chromo_domainDT$start) - sum(randTADdt$end-randTADdt$start)) < 1e-10)
# # ensure not overlapping
# if(nrow(randTADdt) > 1) {
# for(i in 2:nrow(randTADdt))
# stopifnot(randTADdt$start[i] > randTADdt$end[i-1])
# }
# # ensure all starts smaller than ends
# stopifnot(randTADdt$start < randTADdt$end)
# randTADdt
# } # end building new
#
# shuff_g2t_dt <- assignGene2TADs(regionDT=allChr_randTADdt, geneDT = entrezDT, assignMethod = "maxOverlap")
# shuff_g2t_dt$region <- factor(as.character(shuff_g2t_dt$region), levels = as.character(allChr_randTADdt$region))
# stopifnot(!any(is.na(shuff_g2t_dt$region)))
#
# tmp <- setNames(as.numeric(table(shuff_g2t_dt$region)), names(table(shuff_g2t_dt$region)))
# allChr_randTADdt$size <- allChr_randTADdt$end-allChr_randTADdt$start+1
# stopifnot(!any(duplicated(allChr_randTADdt$region)))
# stopifnot(all(names(tmp) %in% allChr_randTADdt$region))
# rownames(allChr_randTADdt) <- allChr_randTADdt$region
# data.frame(region = names(tmp),
# nbrGenes = tmp,
# size = allChr_randTADdt[names(tmp),]$size,
# stringsAsFactors = F)
#
# }
# save(shuff_genesByTAD_withSize, file = outFile)
# cat(paste0("... written: ", outFile, "\n"))
load(paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata"))
n_TADs <- sapply(shuff_genesByTAD_withSize, length)
##################################################################################################################################################################
###################################################################################################### size of the regions without genes
##################################################################################################################################################################
TADpos_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_TADposDT.txt")
TADdt <- read.delim(TADpos_file, header=F, col.names=c("chromo", "region", "start", "end"), stringsAsFactors = F)
#chr1 chr1_TAD1 750001 1300000
#chr1 chr1_TAD2 2750001 3650000
#chr1 chr1_TAD3 3650001 4150000
TADdt <- TADdt[grepl("_TAD", TADdt$region),]
TADdt$size <- TADdt$end - TADdt$start + 1
### gene file
entrezDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/GENE_DATA_MZ/gencode.v19.gene.minimal.chr_notAmbiguous_removeDupMZ_changedHeader.txt")
entrezDT <- read.delim(entrezDT_file, header=T, stringsAsFactors=F)
### gene2tad file => for check
gene2tadDT_file <- paste0(setDir, "/mnt/ed4/marie/scripts/EZH2_final_MAPQ/06_12_50kb_MAPQFILTER/consensus/gene2tad/KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1/TopDom/TopDom_KARPAS_DMSO_LY19WT_DMSO_LY19Y646F_DMSO_WSU_DMSO_c0.75_r100000_v0_w-1_gene2tadDT.txt")
gene2tadDT <- read.delim(gene2tadDT_file, header=F, col.names=c("entrezID", "chromo", "start", "end", "region"), stringsAsFactors = FALSE)
#LINC00115 chr1 761586 762902 chr1_TAD1
gene2tadDT <- gene2tadDT[grep("_TAD", gene2tadDT$region),]
gene2tadDT$region <- factor(as.character(gene2tadDT$region), levels = as.character(TADdt$region))
obs_genes_TAD <- setNames(as.numeric(table(gene2tadDT$region)), names(table(gene2tadDT$region)))
head(obs_genes_TAD)
stopifnot(!any(duplicated(TADdt$region)))
stopifnot(all(names(obs_genes_TAD) %in% TADdt$region))
rownames(TADdt) <- TADdt$region
obs_regions_size <- data.frame(region = names(obs_genes_TAD),
nbrGenes = obs_genes_TAD,
size = TADdt[names(obs_genes_TAD),]$size,
stringsAsFactors = F)
##################################################################################################################################################################
obs_0_length_DT <- obs_regions_size[obs_regions_size$nbrGenes == 0,]
obs_0_nbr <- sum(obs_regions_size$nbrGenes == 0)
load(paste0(outFold, "/", "shuff_genesByTAD_withSize.Rdata"))
shuff_0_length_list <- lapply(shuff_genesByTAD_withSize, function(x) {
x$size[x$nbrGenes == 0]
})
shuff_0_nbr_list <- lapply(shuff_genesByTAD_withSize, function(x) {
sum(x$nbrGenes == 0)
})
##################################################################################################################################################################
###################################################################################################### number of the regions without genes
##################################################################################################################################################################
outFile <- paste0(outFold, "/", "nbrTADs_0genes.png")
png(outFile, width=600, height=600)
plot_multiDens(list(shuff = unlist(shuff_0_nbr_list)),
my_xlab = "nbr TADs",
plotTit = "Nbr of regions with 0 genes")
legend("topleft", legend = paste0("nbr obs.: ", obs_0_nbr), bty="n")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
##################################################################################################################################################################
###################################################################################################### size of the regions without genes
outFile <- paste0(outFold, "/", "sizeBpTADs_0genes.png")
png(outFile, width=600, height=600)
plot_multiDens(list(obs = obs_0_length_DT$size, shuff = unlist(shuff_0_length_list)),
my_xlab = "size of the TAD (bp)",
plotTit = "size of the regions with 0 genes")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
outFile <- paste0(outFold, "/", "sizeBpTADs_0genes_log10.png")
png(outFile, width=600, height=600)
plot_multiDens(list(obs = log10(obs_0_length_DT$size), shuff = log10(unlist(shuff_0_length_list))),
my_xlab = "size of the TADs (bp) [log10]",
plotTit = "size of the regions with 0 genes")
foo <- dev.off()
cat(paste0("... written: ", outFile, "\n"))
|
library(tester)
### Name: is_scalar
### Title: Is scalar
### Aliases: is_negative_scalar is_not_scalar is_positive_scalar is_scalar
### ** Examples
is_scalar(1) # TRUE
is_scalar(pi) # TRUE
is_scalar(1:5) # FALSE
is_scalar(matrix(runif(4), 2, 2)) # FALSE
is_not_scalar(1:5) # TRUE
is_not_scalar(NULL) # TRUE
is_not_scalar(matrix(runif(4), 2, 2)) # TRUE
is_positive_scalar(1.0) # TRUE
is_positive_scalar(0) # FALSE
is_positive_scalar(-10) # FALSE
is_positive_scalar("hoskdflksfd") # FALSE
is_positive_scalar(NA) # FALSE
is_negative_scalar(-1) # TRUE
is_negative_scalar(0) # FALSE
is_negative_scalar(10) # FALSE
is_negative_scalar("hoskdflksfd") # FALSE
is_negative_scalar(NA) # FALSE
|
/data/genthat_extracted_code/tester/examples/is_scalar.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 709
|
r
|
library(tester)
### Name: is_scalar
### Title: Is scalar
### Aliases: is_negative_scalar is_not_scalar is_positive_scalar is_scalar
### ** Examples
is_scalar(1) # TRUE
is_scalar(pi) # TRUE
is_scalar(1:5) # FALSE
is_scalar(matrix(runif(4), 2, 2)) # FALSE
is_not_scalar(1:5) # TRUE
is_not_scalar(NULL) # TRUE
is_not_scalar(matrix(runif(4), 2, 2)) # TRUE
is_positive_scalar(1.0) # TRUE
is_positive_scalar(0) # FALSE
is_positive_scalar(-10) # FALSE
is_positive_scalar("hoskdflksfd") # FALSE
is_positive_scalar(NA) # FALSE
is_negative_scalar(-1) # TRUE
is_negative_scalar(0) # FALSE
is_negative_scalar(10) # FALSE
is_negative_scalar("hoskdflksfd") # FALSE
is_negative_scalar(NA) # FALSE
|
#' A simple function for removing cookies
#'
#' This function looks for cookies used for logging into the NBN. Sometimes
#' this login process can go wrong, using this function you can delete cookies
#' resolving some issues.
#'
#' @export
#' @param path The directory where the cookies are stored, this is your working
#' directory.
#' @param remove Logical, if TRUE the cookie file is deleted if FALSE the existance
#' of the file is reported.
#' @return NULL
#' @author Tom August, CEH \email{tomaug@@ceh.ac.uk}
#' @examples \dontrun{
#' NBNCookies()
#' }
NBNCookies <- function(path = getwd(), remove = FALSE){
fileList <- list.files(path=path)
if('rnbn_cookies.txt' %in% fileList){
if(remove){
unlink(file.path(path, 'rnbn_cookies.txt'))
cat('Cookies deleted\n')
} else {
cat('Cookies found, not deleted\n')
}
} else {
cat('No cookies file found\n')
}
}
|
/R/NBNCookies.r
|
no_license
|
JNCC-UK/rnbn
|
R
| false
| false
| 1,047
|
r
|
#' A simple function for removing cookies
#'
#' This function looks for cookies used for logging into the NBN. Sometimes
#' this login process can go wrong, using this function you can delete cookies
#' resolving some issues.
#'
#' @export
#' @param path The directory where the cookies are stored, this is your working
#' directory.
#' @param remove Logical, if TRUE the cookie file is deleted if FALSE the existance
#' of the file is reported.
#' @return NULL
#' @author Tom August, CEH \email{tomaug@@ceh.ac.uk}
#' @examples \dontrun{
#' NBNCookies()
#' }
NBNCookies <- function(path = getwd(), remove = FALSE){
fileList <- list.files(path=path)
if('rnbn_cookies.txt' %in% fileList){
if(remove){
unlink(file.path(path, 'rnbn_cookies.txt'))
cat('Cookies deleted\n')
} else {
cat('Cookies found, not deleted\n')
}
} else {
cat('No cookies file found\n')
}
}
|
library(fitPoly)
### Name: fitOneMarker
### Title: Function to fit multiple mixture models to signal ratios of a
### single bi-allelic marker
### Aliases: fitOneMarker
### ** Examples
## No test:
# These examples run for a total of about 9 sec.
data(fitPoly_data)
# triploid, no specified populations
fp <- fitOneMarker(ploidy=3, marker="mrk039",
data=fitPoly_data$ploidy3$dat3x)
# tetraploid, specified populations
# plot of the fitted model saved in tempdir()
fp <- fitOneMarker(ploidy=4, marker=2,
data=fitPoly_data$ploidy4$dat4x,
population=fitPoly_data$ploidy4$pop4x,
pop.parents=fitPoly_data$ploidy4$pop.par4x,
plot="fitted",
plot.dir=paste0(tempdir(),"/fpPlots4x"))
# hexaploid, specified populations, start values for means,
# plot of the fitted model saved in tempdir()
fp <- fitOneMarker(ploidy=6, marker=1,
data=fitPoly_data$ploidy6$dat6x,
population=fitPoly_data$ploidy6$pop6x,
pop.parents=fitPoly_data$ploidy6$pop.par6x,
startmeans=fitPoly_data$ploidy6$startmeans6x,
plot="fitted", plot.dir=paste0(tempdir(),"/fpPlots6x"))
## End(No test)
|
/data/genthat_extracted_code/fitPoly/examples/fitOneMarker.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 1,297
|
r
|
library(fitPoly)
### Name: fitOneMarker
### Title: Function to fit multiple mixture models to signal ratios of a
### single bi-allelic marker
### Aliases: fitOneMarker
### ** Examples
## No test:
# These examples run for a total of about 9 sec.
data(fitPoly_data)
# triploid, no specified populations
fp <- fitOneMarker(ploidy=3, marker="mrk039",
data=fitPoly_data$ploidy3$dat3x)
# tetraploid, specified populations
# plot of the fitted model saved in tempdir()
fp <- fitOneMarker(ploidy=4, marker=2,
data=fitPoly_data$ploidy4$dat4x,
population=fitPoly_data$ploidy4$pop4x,
pop.parents=fitPoly_data$ploidy4$pop.par4x,
plot="fitted",
plot.dir=paste0(tempdir(),"/fpPlots4x"))
# hexaploid, specified populations, start values for means,
# plot of the fitted model saved in tempdir()
fp <- fitOneMarker(ploidy=6, marker=1,
data=fitPoly_data$ploidy6$dat6x,
population=fitPoly_data$ploidy6$pop6x,
pop.parents=fitPoly_data$ploidy6$pop.par6x,
startmeans=fitPoly_data$ploidy6$startmeans6x,
plot="fitted", plot.dir=paste0(tempdir(),"/fpPlots6x"))
## End(No test)
|
#' print attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @method print attribute.profile.class
#' @export
setMethod(
f = "print",
signature = signature(x = 'attribute.profile.class'),
definition = function(x)
{
print(x@results)
}
)
#' head attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @method head attribute.profile.class
#' @export
setMethod(
f = "head",
signature = signature(x = 'attribute.profile.class'),
definition = function(x)
{
head(x@results)
}
)
#' summary attribute.profile.class
#'
#' @param object \code{\link{attribute.profile.class}} input object
#' @param verbose a logical. If TRUE, additional diagnostics are printed.
#' @method summary attribute.profile.class
#' @export
setMethod(
f = "summary",
signature = signature(object = 'attribute.profile.class'),
definition = function(object, verbose = TRUE, ...){
results <- slot(object, "results")
nresults <- nrow(results)
pmatrix <- slot(object, "attribute.profile.matrix")
attribute.profile <- ddply(results, .(max.class), summarize, counts = length(max.class),
proportion = length(max.class))
attribute.profile$proportion <- with(attribute.profile, round(proportion/nresults, 3))
attribute.profile <- rename(attribute.profile, c('max.class' = 'attribute.profile'))
attribute.profile <- cbind(attribute.profile, pmatrix[attribute.profile$attribute.profile,])
attribute.profile <- attribute.profile[, c('attribute.profile', colnames(pmatrix), 'counts', 'proportion')]
if (verbose){
cat(sprintf("\nNumber of attribute profiles: %d", nrow(attribute.profile)))
cat("\nAttribute profile counts and proportion: \n")
print(as.data.frame(attribute.profile, row.names = NULL))
}
invisible(attribute.profile)
}
)
#' plot attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @param type a string containing either \code{mean} or \code{profile}
#' @method plot attribute.profile.class
#' @export
setMethod(
f = "plot",
signature = signature(x = 'attribute.profile.class', y = "missing"),
definition = function(x, y, type = 'mean', ...)
{
results <- slot(x, "results")
if (type == 'mean'){
pmatrix <- slot(x, "attribute.profile.matrix")
melted.attr.profile <- melt(results, id.vars = "id", measure.vars = grep('[0-9]+', names(results), )
, value.name = "mean.attr.profile", variable.name = "attr.profile.number")
means.attr.profile <- ddply(melted.attr.profile, .(attr.profile.number), summarize, mean.attr.profile = mean(mean.attr.profile))
means.attr.profile$profile.labels <- sapply(as.numeric(levels(means.attr.profile$attr.profile.number)), function(x) paste(pmatrix[x, ], collapse = ","))
print(ggplot(means.attr.profile, aes(x = attr.profile.number, y = mean.attr.profile, fill = attr.profile.number)) + geom_bar(stat = "identity") +
scale_fill_discrete(name = "Attribute Profile", labels = means.attr.profile$profile.labels) +
ylim(0,1) + ylab("Mean Mastery Proportion") + xlab("Attribute Profile") + ggtitle("Mean Attribute Profile Mastery"))
}
if (type == 'profile'){
ngroups <- ncol(results) - 2
PlotSkillMasteryTableplot(results, ngroups, is.max.class = TRUE)
}
}
)
|
/dcmr/R/attribute_profile_class_methods.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 3,444
|
r
|
#' print attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @method print attribute.profile.class
#' @export
setMethod(
f = "print",
signature = signature(x = 'attribute.profile.class'),
definition = function(x)
{
print(x@results)
}
)
#' head attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @method head attribute.profile.class
#' @export
setMethod(
f = "head",
signature = signature(x = 'attribute.profile.class'),
definition = function(x)
{
head(x@results)
}
)
#' summary attribute.profile.class
#'
#' @param object \code{\link{attribute.profile.class}} input object
#' @param verbose a logical. If TRUE, additional diagnostics are printed.
#' @method summary attribute.profile.class
#' @export
setMethod(
f = "summary",
signature = signature(object = 'attribute.profile.class'),
definition = function(object, verbose = TRUE, ...){
results <- slot(object, "results")
nresults <- nrow(results)
pmatrix <- slot(object, "attribute.profile.matrix")
attribute.profile <- ddply(results, .(max.class), summarize, counts = length(max.class),
proportion = length(max.class))
attribute.profile$proportion <- with(attribute.profile, round(proportion/nresults, 3))
attribute.profile <- rename(attribute.profile, c('max.class' = 'attribute.profile'))
attribute.profile <- cbind(attribute.profile, pmatrix[attribute.profile$attribute.profile,])
attribute.profile <- attribute.profile[, c('attribute.profile', colnames(pmatrix), 'counts', 'proportion')]
if (verbose){
cat(sprintf("\nNumber of attribute profiles: %d", nrow(attribute.profile)))
cat("\nAttribute profile counts and proportion: \n")
print(as.data.frame(attribute.profile, row.names = NULL))
}
invisible(attribute.profile)
}
)
#' plot attribute.profile.class
#'
#' @param x \code{\link{attribute.profile.class}} input object
#' @param type a string containing either \code{mean} or \code{profile}
#' @method plot attribute.profile.class
#' @export
setMethod(
f = "plot",
signature = signature(x = 'attribute.profile.class', y = "missing"),
definition = function(x, y, type = 'mean', ...)
{
results <- slot(x, "results")
if (type == 'mean'){
pmatrix <- slot(x, "attribute.profile.matrix")
melted.attr.profile <- melt(results, id.vars = "id", measure.vars = grep('[0-9]+', names(results), )
, value.name = "mean.attr.profile", variable.name = "attr.profile.number")
means.attr.profile <- ddply(melted.attr.profile, .(attr.profile.number), summarize, mean.attr.profile = mean(mean.attr.profile))
means.attr.profile$profile.labels <- sapply(as.numeric(levels(means.attr.profile$attr.profile.number)), function(x) paste(pmatrix[x, ], collapse = ","))
print(ggplot(means.attr.profile, aes(x = attr.profile.number, y = mean.attr.profile, fill = attr.profile.number)) + geom_bar(stat = "identity") +
scale_fill_discrete(name = "Attribute Profile", labels = means.attr.profile$profile.labels) +
ylim(0,1) + ylab("Mean Mastery Proportion") + xlab("Attribute Profile") + ggtitle("Mean Attribute Profile Mastery"))
}
if (type == 'profile'){
ngroups <- ncol(results) - 2
PlotSkillMasteryTableplot(results, ngroups, is.max.class = TRUE)
}
}
)
|
load('networks_list_global.Rdata')
#Simulacao generalizada das cursvas de adocao, indice de mooran e outros
qtd_simulacoes_validas <- length(networks_list_global$limiteinferior_)
qtd_anos_simulacao<- length(networks_list_global$moran_obs_[1,])
qtd_cidades<-length(networks_list_global$infection_[1,])
names_cidades<-attr(networks_list_global$exposure_,'dimnames')[[1]]
names_anos_simulacao<-attr(networks_list_global$exposure_,'dimnames')[[2]]
names_simulacoes <- 1:qtd_simulacoes_validas
# busca os intervalos inferiores únicos, sendo que uma curva será gerada parea cada intervalo inferior
curvas <- unique(networks_list_global$limiteinferior_)
print(curvas)
# busca cada uma das faixas intervalos que contém o mesmo limite inferior
intervalos <- findInterval(curvas, networks_list_global$limiteinferior_)
print(intervalos)
# vai imprimir as várias curvas, com a quantidade de arestas para o ano
idx_anterior_ini_lim_inf<-1 #1276
for (i in 1:length(curvas)) {
#i<-97
lim_inf <- curvas[i]
idx_atual_fim_lim_inf <- intervalos[i]
nomeCurva<-paste('(',lim_inf,')')
qtd_intervalos_na_linha <- idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1
print(paste('Curva com nome ',nomeCurva,
' vai de ',idx_anterior_ini_lim_inf,' a ',idx_atual_fim_lim_inf,
' com ',(qtd_intervalos_na_linha),' elementos.'))
if (i == 1) { # gera uma plotagem inicial
plot(NULL,
xlim=c(min(networks_list_global$limitesuperior_-lim_inf),max(networks_list_global$limitesuperior_-lim_inf)),
ylim=c(min(networks_list_global$qtd_arestas_),max(networks_list_global$qtd_arestas_)),
main='Arestas por intervalo de tempo de deslocamento',
ylab='Qtd de arestas',
xlab='Amplitude do intervalo')
}
x<-networks_list_global$limitesuperior_[idx_atual_fim_lim_inf]
y<-networks_list_global$qtd_arestas_[idx_atual_fim_lim_inf]
if (qtd_intervalos_na_linha>= 10) text(x,y,nomeCurva)
print(paste('Plotando curva terminando em ',x,',',y))
lines(networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf],
networks_list_global$qtd_arestas_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf])
idx_anterior_ini_lim_inf<-idx_atual_fim_lim_inf+1
}
# vai imprimir as várias curvas, com o índice de moran para os vários anos (2009 a 2017)
for (j in 1:length(names_anos_simulacao)) {
# j <- 2
ano <- names_anos_simulacao[j]
idx_anterior_ini_lim_inf<-1 #1276
for (i in 1:length(curvas)) {
lim_inf <- curvas[i]
idx_atual_fim_lim_inf <- intervalos[i]
nomeCurva<-paste('(',lim_inf,')')
qtd_intervalos_na_linha <- idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1
print(paste('Curva com nome ',nomeCurva,
' vai de ',idx_anterior_ini_lim_inf,' a ',idx_atual_fim_lim_inf,
' com ',(idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1),' elementos.'))
if (i == 1) { # gera uma plotagem inicial
plot(NULL,
xlim=c(
min(networks_list_global$limitesuperior_-
networks_list_global$limiteinferior_),
max(networks_list_global$limitesuperior_-networks_list_global$limiteinferior_)),
ylim=c(min(networks_list_global$moran_obs_[,j]),max(networks_list_global$moran_obs_[,j])),
main='Moran I / adoção LIRAa / Intervalo de deslocamento',
ylab=paste('Moran I (',ano,')'),
xlab='Amplitude do intervalo')
}
validos<-(networks_list_global$moran_pval_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf,j])<1e-3
amplitudes<-networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]-
networks_list_global$limiteinferior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]
amplitudes_validas<-amplitudes[validos]
x<-max(amplitudes_validas)
y<-networks_list_global$moran_obs_[idx_atual_fim_lim_inf,j]
print(paste('Plotando curva terminando em ',x,',',y))
valores_x<-networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]-
networks_list_global$limiteinferior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]
valores_y<-networks_list_global$moran_obs_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf,j]
cor_=sample(rainbow(10))
lines(valores_x[validos],
valores_y[validos],col=cor_)
if (length(valores_x[validos]) && qtd_intervalos_na_linha>= 3)
text(x,y,nomeCurva,col=cor_)
idx_anterior_ini_lim_inf<-idx_atual_fim_lim_inf+1
}
}
|
/arquivo/plotagemCurvas.R
|
no_license
|
acgabriel3/difusaoDeUmaInovacao
|
R
| false
| false
| 4,480
|
r
|
load('networks_list_global.Rdata')
#Simulacao generalizada das cursvas de adocao, indice de mooran e outros
qtd_simulacoes_validas <- length(networks_list_global$limiteinferior_)
qtd_anos_simulacao<- length(networks_list_global$moran_obs_[1,])
qtd_cidades<-length(networks_list_global$infection_[1,])
names_cidades<-attr(networks_list_global$exposure_,'dimnames')[[1]]
names_anos_simulacao<-attr(networks_list_global$exposure_,'dimnames')[[2]]
names_simulacoes <- 1:qtd_simulacoes_validas
# busca os intervalos inferiores únicos, sendo que uma curva será gerada parea cada intervalo inferior
curvas <- unique(networks_list_global$limiteinferior_)
print(curvas)
# busca cada uma das faixas intervalos que contém o mesmo limite inferior
intervalos <- findInterval(curvas, networks_list_global$limiteinferior_)
print(intervalos)
# vai imprimir as várias curvas, com a quantidade de arestas para o ano
idx_anterior_ini_lim_inf<-1 #1276
for (i in 1:length(curvas)) {
#i<-97
lim_inf <- curvas[i]
idx_atual_fim_lim_inf <- intervalos[i]
nomeCurva<-paste('(',lim_inf,')')
qtd_intervalos_na_linha <- idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1
print(paste('Curva com nome ',nomeCurva,
' vai de ',idx_anterior_ini_lim_inf,' a ',idx_atual_fim_lim_inf,
' com ',(qtd_intervalos_na_linha),' elementos.'))
if (i == 1) { # gera uma plotagem inicial
plot(NULL,
xlim=c(min(networks_list_global$limitesuperior_-lim_inf),max(networks_list_global$limitesuperior_-lim_inf)),
ylim=c(min(networks_list_global$qtd_arestas_),max(networks_list_global$qtd_arestas_)),
main='Arestas por intervalo de tempo de deslocamento',
ylab='Qtd de arestas',
xlab='Amplitude do intervalo')
}
x<-networks_list_global$limitesuperior_[idx_atual_fim_lim_inf]
y<-networks_list_global$qtd_arestas_[idx_atual_fim_lim_inf]
if (qtd_intervalos_na_linha>= 10) text(x,y,nomeCurva)
print(paste('Plotando curva terminando em ',x,',',y))
lines(networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf],
networks_list_global$qtd_arestas_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf])
idx_anterior_ini_lim_inf<-idx_atual_fim_lim_inf+1
}
# vai imprimir as várias curvas, com o índice de moran para os vários anos (2009 a 2017)
for (j in 1:length(names_anos_simulacao)) {
# j <- 2
ano <- names_anos_simulacao[j]
idx_anterior_ini_lim_inf<-1 #1276
for (i in 1:length(curvas)) {
lim_inf <- curvas[i]
idx_atual_fim_lim_inf <- intervalos[i]
nomeCurva<-paste('(',lim_inf,')')
qtd_intervalos_na_linha <- idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1
print(paste('Curva com nome ',nomeCurva,
' vai de ',idx_anterior_ini_lim_inf,' a ',idx_atual_fim_lim_inf,
' com ',(idx_atual_fim_lim_inf-idx_anterior_ini_lim_inf+1),' elementos.'))
if (i == 1) { # gera uma plotagem inicial
plot(NULL,
xlim=c(
min(networks_list_global$limitesuperior_-
networks_list_global$limiteinferior_),
max(networks_list_global$limitesuperior_-networks_list_global$limiteinferior_)),
ylim=c(min(networks_list_global$moran_obs_[,j]),max(networks_list_global$moran_obs_[,j])),
main='Moran I / adoção LIRAa / Intervalo de deslocamento',
ylab=paste('Moran I (',ano,')'),
xlab='Amplitude do intervalo')
}
validos<-(networks_list_global$moran_pval_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf,j])<1e-3
amplitudes<-networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]-
networks_list_global$limiteinferior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]
amplitudes_validas<-amplitudes[validos]
x<-max(amplitudes_validas)
y<-networks_list_global$moran_obs_[idx_atual_fim_lim_inf,j]
print(paste('Plotando curva terminando em ',x,',',y))
valores_x<-networks_list_global$limitesuperior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]-
networks_list_global$limiteinferior_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf]
valores_y<-networks_list_global$moran_obs_[idx_anterior_ini_lim_inf:idx_atual_fim_lim_inf,j]
cor_=sample(rainbow(10))
lines(valores_x[validos],
valores_y[validos],col=cor_)
if (length(valores_x[validos]) && qtd_intervalos_na_linha>= 3)
text(x,y,nomeCurva,col=cor_)
idx_anterior_ini_lim_inf<-idx_atual_fim_lim_inf+1
}
}
|
#set the work directory
setwd('/Users/fiona/RWorkspace/iris')
#read iris data
iris <- read.csv('iris.csv',header = F)
#select the two kind of iris and two variable to train the model
iris.train <- iris[1:100,c(1,2,5)]
#data process,replace the name of the iris with 0,1,Iris-setosa=1, Iris-versicolor=0
iris.train$V5 <- as.numeric(iris.train$V5)-1
#compute p1 and p0
count0 <- nrow(iris.train[iris.train$V5 == 0,])
count1 <- nrow(iris.train[iris.train$V5 == 1,])
countall <- nrow(iris.train)
p1 <- count1/countall
p0 <- count0/countall
#define x and y
xb <- 1
x <- cbind(iris.train[,1:2], xb)
y <- iris.train[,3]
#compute the first-order derivative
m <- countall
derivative1 <- c(0,0,0)
for(i in 1:m){
xi <- matrix(x[i,],nrow = 1, ncol = 3)
xi
xi <- apply(xi, 2, as.numeric)
yi <- as.numeric(y[i])
y_p <- yi - p1
derivative1 <- derivative1 - (xi* y_p)
}
#compute the second-order derivative
derivative2 <- 0;
for(i in 1:m){
xi <- matrix(x[i,],nrow = 1, ncol = 3)
xi <- apply(xi, 2, as.numeric)
txi <- matrix(x[i,],nrow = 3, ncol = 1)
txi <- apply(txi, 2, as.numeric)
xtx <- xi%*%txi
derivative2 <- derivative2 + xtx*p1*p0
}
#define initial value of beita
beita <- c(1,1,1)
#compute beita iteration 1000 times
for(i in 1:1000){
beita <- beita - ((1/derivative2)*derivative1)
}
#draw a picture
x1 <- iris.train[iris.train$V5 == 0, 1]
y1 <- iris.train[iris.train$V5 == 0, 2]
x2 <- iris.train[iris.train$V5 == 1, 1]
y2 <- iris.train[iris.train$V5 == 1, 2]
plot(x1, y1, pch = 1, col = "red", axes =TRUE, xlab="sepal length", ylab="sepal width", xlim=c(4,7), ylim=c(1,5))
points(x2,y2)
abline(-4.5, 1.447368)
|
/PatternRecognition/iris/homework.R
|
no_license
|
Fiona-Gao/StudyWork
|
R
| false
| false
| 1,638
|
r
|
#set the work directory
setwd('/Users/fiona/RWorkspace/iris')
#read iris data
iris <- read.csv('iris.csv',header = F)
#select the two kind of iris and two variable to train the model
iris.train <- iris[1:100,c(1,2,5)]
#data process,replace the name of the iris with 0,1,Iris-setosa=1, Iris-versicolor=0
iris.train$V5 <- as.numeric(iris.train$V5)-1
#compute p1 and p0
count0 <- nrow(iris.train[iris.train$V5 == 0,])
count1 <- nrow(iris.train[iris.train$V5 == 1,])
countall <- nrow(iris.train)
p1 <- count1/countall
p0 <- count0/countall
#define x and y
xb <- 1
x <- cbind(iris.train[,1:2], xb)
y <- iris.train[,3]
#compute the first-order derivative
m <- countall
derivative1 <- c(0,0,0)
for(i in 1:m){
xi <- matrix(x[i,],nrow = 1, ncol = 3)
xi
xi <- apply(xi, 2, as.numeric)
yi <- as.numeric(y[i])
y_p <- yi - p1
derivative1 <- derivative1 - (xi* y_p)
}
#compute the second-order derivative
derivative2 <- 0;
for(i in 1:m){
xi <- matrix(x[i,],nrow = 1, ncol = 3)
xi <- apply(xi, 2, as.numeric)
txi <- matrix(x[i,],nrow = 3, ncol = 1)
txi <- apply(txi, 2, as.numeric)
xtx <- xi%*%txi
derivative2 <- derivative2 + xtx*p1*p0
}
#define initial value of beita
beita <- c(1,1,1)
#compute beita iteration 1000 times
for(i in 1:1000){
beita <- beita - ((1/derivative2)*derivative1)
}
#draw a picture
x1 <- iris.train[iris.train$V5 == 0, 1]
y1 <- iris.train[iris.train$V5 == 0, 2]
x2 <- iris.train[iris.train$V5 == 1, 1]
y2 <- iris.train[iris.train$V5 == 1, 2]
plot(x1, y1, pch = 1, col = "red", axes =TRUE, xlab="sepal length", ylab="sepal width", xlim=c(4,7), ylim=c(1,5))
points(x2,y2)
abline(-4.5, 1.447368)
|
\name{plot.correlation}
\alias{plot.correlation}
\title{Plot a correlation matrix}
\usage{
\method{plot}{correlation}(x, y=NULL, ...)
}
\arguments{
\item{x}{A result of \link{correlation} function;}
\item{y}{Not used;}
\item{...}{Not used.}
}
\description{
Plot a correlation matrix.
}
\details{
Plot \code{obj}, result of \link{correlation}.
}
%\references{
% References
%}
\author{Diego Pasqualin \email{dpasqualin@inf.ufpr.br}}
\seealso{
\link{correlation}
}
%\keyword{phylogenetics}
\keyword{utilities}
|
/man/plot.correlation.Rd
|
no_license
|
dpasqualin/sfreemap
|
R
| false
| false
| 536
|
rd
|
\name{plot.correlation}
\alias{plot.correlation}
\title{Plot a correlation matrix}
\usage{
\method{plot}{correlation}(x, y=NULL, ...)
}
\arguments{
\item{x}{A result of \link{correlation} function;}
\item{y}{Not used;}
\item{...}{Not used.}
}
\description{
Plot a correlation matrix.
}
\details{
Plot \code{obj}, result of \link{correlation}.
}
%\references{
% References
%}
\author{Diego Pasqualin \email{dpasqualin@inf.ufpr.br}}
\seealso{
\link{correlation}
}
%\keyword{phylogenetics}
\keyword{utilities}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(shinydashboard)
#install.packages("shinythemes")
library(shinythemes)
match<-read.csv("C:/Users/leona/Documents/Bedu/Postworks/Postworks/Postwork8/match.data.csv")
# Define UI for application that draws a histogram
ui <-
fluidPage(
dashboardPage(
dashboardHeader(title = "Basic dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Barras", tabName = "Dashboard", icon = icon("dashboard")),
menuItem("Probabilidad", tabName = "Probabilidad", icon = icon("file-picture-o")),
menuItem("Data Table", tabName = "data_table", icon = icon("table")),
menuItem("Imágen", tabName = "img", icon = icon("file-picture-o"))
)
),
dashboardBody(
tabItems(
# Histograma
tabItem(tabName = "Dashboard",
fluidRow(
titlePanel("Gráfica de Barras"),
selectInput("x", "Seleccione el valor de X",
choices = names(match)),
box(plotOutput("plot1", height = 500, width = 500)),
)
),
# Dispersión
tabItem(tabName = "Probabilidad",
fluidRow(
titlePanel(h3("Probabilidad")),
img( src = "P3FTAG.png",
height = 350, width = 500),
img( src = "P3FTHG.png",
height = 350, width = 500)
)
),
tabItem(tabName = "data_table",
fluidRow(
titlePanel(h3("Data Table")),
dataTableOutput ("data_table")
)
),
tabItem(tabName = "img",
fluidRow(
titlePanel(h3("Momios")),
img( src = "momiosmax.png",
height = 350, width = 500),
img( src = "momiosprom.png",
height = 350, width = 500)
)
)
)
)
)
)
#De aquí en adelante es la parte que corresponde al server
server <- function(input, output) {
library(ggplot2)
#Gráfico de Histograma
output$plot1 <- renderPlot({
x <- match[,input$x]
z <- match[,input$zz]
ggplot(match, aes(x=x)) +
geom_bar() +facet_wrap(vars(away.team))
})
#Data Table
output$data_table <- renderDataTable( {match},
options = list(aLengthMenu = c(5,25,50),
iDisplayLength = 5)
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
/Postworks/Postwork8/Post8/app.R
|
no_license
|
LeonarZuma/Postworks
|
R
| false
| false
| 3,796
|
r
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(ggplot2)
library(shinydashboard)
#install.packages("shinythemes")
library(shinythemes)
match<-read.csv("C:/Users/leona/Documents/Bedu/Postworks/Postworks/Postwork8/match.data.csv")
# Define UI for application that draws a histogram
ui <-
fluidPage(
dashboardPage(
dashboardHeader(title = "Basic dashboard"),
dashboardSidebar(
sidebarMenu(
menuItem("Barras", tabName = "Dashboard", icon = icon("dashboard")),
menuItem("Probabilidad", tabName = "Probabilidad", icon = icon("file-picture-o")),
menuItem("Data Table", tabName = "data_table", icon = icon("table")),
menuItem("Imágen", tabName = "img", icon = icon("file-picture-o"))
)
),
dashboardBody(
tabItems(
# Histograma
tabItem(tabName = "Dashboard",
fluidRow(
titlePanel("Gráfica de Barras"),
selectInput("x", "Seleccione el valor de X",
choices = names(match)),
box(plotOutput("plot1", height = 500, width = 500)),
)
),
# Dispersión
tabItem(tabName = "Probabilidad",
fluidRow(
titlePanel(h3("Probabilidad")),
img( src = "P3FTAG.png",
height = 350, width = 500),
img( src = "P3FTHG.png",
height = 350, width = 500)
)
),
tabItem(tabName = "data_table",
fluidRow(
titlePanel(h3("Data Table")),
dataTableOutput ("data_table")
)
),
tabItem(tabName = "img",
fluidRow(
titlePanel(h3("Momios")),
img( src = "momiosmax.png",
height = 350, width = 500),
img( src = "momiosprom.png",
height = 350, width = 500)
)
)
)
)
)
)
#De aquí en adelante es la parte que corresponde al server
server <- function(input, output) {
library(ggplot2)
#Gráfico de Histograma
output$plot1 <- renderPlot({
x <- match[,input$x]
z <- match[,input$zz]
ggplot(match, aes(x=x)) +
geom_bar() +facet_wrap(vars(away.team))
})
#Data Table
output$data_table <- renderDataTable( {match},
options = list(aLengthMenu = c(5,25,50),
iDisplayLength = 5)
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
###########################################################################/**
# @RdocFunction WHInit
#
# @title "Initialization of the W and H matrices"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and
# K is the number of probes where K should be even (K=2L).}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @list:
# \item{W}{A Kx2 @matrix of initial probe-affinity estimates.}
# \item{H}{A 2xI @matrix of initial allele-specific copy-number estimates.}
# \item{status}{An @integer specifying the status of the initialization:
# 0=normal case, 1=only one allele (either AA or BB), or
# 2=all samples are AB.
# }
# }
#
# \details{
# The allele-specific copy number estimates are estimated using a
# naive genotyping algorithm.
# The probe-affinities are estimated using a pseudo inverse.
# }
#
# @keyword internal
#*/###########################################################################
WHInit <- function(V, ...) {
# Number of arrays
I <- ncol(V);
# Number of probes
K <- nrow(V);
# Number of probe pairs
L <- as.integer(K/2);
# A small positive value
eps <- 0.0001;
H <- matrix(0, nrow=2L, ncol=I);
W <- matrix(0, nrow=K, ncol=2L);
rrA <- 1:L;
rrB <- (L+1):K;
rrBA <- c(rrB, rrA);
PMA <- V[rrA,,drop=FALSE];
PMB <- V[rrB,,drop=FALSE];
# We distinguish three locations:
# (1) AA (PMA > 2 PMB),
# (2) AB (PMA < 2PMB & PMB < 2PMA), and
# (3) BB (PMB > 2PMB).
# We apply this test for each of the probes and we use majority voting.
H[1,] <- as.integer(colMeans(PMA > 0.5*PMB) > 0.5);
H[2,] <- as.integer(colMeans(PMB > 0.5*PMA) > 0.5);
summary <- 2*H[1L,] + H[2L,];
dummy <- unique(summary);
status <- 0L;
# If all the samples have the same genotype, it is a special case
if (length(dummy) == 1L) {
# We have only one Genotype
# Case AA or BB
if (prod(H[,1L]) == 0) {
#print('only one allele AA or BB');
# Use the median for the first column of W
W[,1L] <- rowMedians(V)/2;
# Flip it for the second column
W[,2L] <- W[rrBA,1L];
# Change both of them if it was BB
H <- H*2;
if (H[2L,1L] == 1){
# Was it BB?
W <- W[,c(2L,1L),drop=FALSE];
H <- H[c(2L,1L),,drop=FALSE];
}
status <- 1L;
} else {
#disp('only samples AB')
W[,1L] <- rowMedians(V);
W[,2L] <- W[,1L];
# In this case there is no way to compute the cross hybridization
# We assume that there is no cross hybridization (just to asssume
# something :-)
W[rrB,1L] <- eps;
W[rrA,2L] <- eps;
status <- 2L;
}
} else {
# Normal case
aux <- colSums(H);
aux <- rep(aux, times=2L);
dim(aux) <- c((length(aux)/2), 2);
aux <- t(aux);
H <- 2 * H/aux;
H[is.na(H)] <- 0;
W <- t(miqr.solve(t(H),t(V)));
W[W < 0] <- eps;
# Sometimes, there are errors in the genotyping... Check correlation
corDiff <- cor(W[,1L],W[rrBA,2L]) - cor(W[,1L],W[,2L]);
if (is.na(corDiff) || corDiff < 0.1) {
#print('Too large Correlation')
#print('Solving for one allele')
W0 <- W;
W[,1L] <- rowMedians(W0);
W[,2L] <- W0[rrBA,1L];
H <- miqr.solve(W, V);
H[H < 0] <- 0;
status <- 1L;
}
}
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
stopifnot(nrow(H) == 2L && ncol(H) == I);
list(W=W, H=H, status=status);
} # WHInit()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Added Rdoc comments.
# o Cleaned up code.
# 2009-02-02 [MO]
# o Change some code and make it more efficient
# 2009-01-28 [HB]
# o BUG FIX: The code of WHInit() assumed 20 probes in one place.
############################################################################
|
/R/WHInit.R
|
no_license
|
HenrikBengtsson/ACNE
|
R
| false
| false
| 3,921
|
r
|
###########################################################################/**
# @RdocFunction WHInit
#
# @title "Initialization of the W and H matrices"
#
# \description{
# @get "title".
# }
#
# @synopsis
#
# \arguments{
# \item{V}{An KxI @matrix where I is the number of arrays and
# K is the number of probes where K should be even (K=2L).}
# \item{...}{Not used.}
# }
#
# \value{
# Returns a @list:
# \item{W}{A Kx2 @matrix of initial probe-affinity estimates.}
# \item{H}{A 2xI @matrix of initial allele-specific copy-number estimates.}
# \item{status}{An @integer specifying the status of the initialization:
# 0=normal case, 1=only one allele (either AA or BB), or
# 2=all samples are AB.
# }
# }
#
# \details{
# The allele-specific copy number estimates are estimated using a
# naive genotyping algorithm.
# The probe-affinities are estimated using a pseudo inverse.
# }
#
# @keyword internal
#*/###########################################################################
WHInit <- function(V, ...) {
# Number of arrays
I <- ncol(V);
# Number of probes
K <- nrow(V);
# Number of probe pairs
L <- as.integer(K/2);
# A small positive value
eps <- 0.0001;
H <- matrix(0, nrow=2L, ncol=I);
W <- matrix(0, nrow=K, ncol=2L);
rrA <- 1:L;
rrB <- (L+1):K;
rrBA <- c(rrB, rrA);
PMA <- V[rrA,,drop=FALSE];
PMB <- V[rrB,,drop=FALSE];
# We distinguish three locations:
# (1) AA (PMA > 2 PMB),
# (2) AB (PMA < 2PMB & PMB < 2PMA), and
# (3) BB (PMB > 2PMB).
# We apply this test for each of the probes and we use majority voting.
H[1,] <- as.integer(colMeans(PMA > 0.5*PMB) > 0.5);
H[2,] <- as.integer(colMeans(PMB > 0.5*PMA) > 0.5);
summary <- 2*H[1L,] + H[2L,];
dummy <- unique(summary);
status <- 0L;
# If all the samples have the same genotype, it is a special case
if (length(dummy) == 1L) {
# We have only one Genotype
# Case AA or BB
if (prod(H[,1L]) == 0) {
#print('only one allele AA or BB');
# Use the median for the first column of W
W[,1L] <- rowMedians(V)/2;
# Flip it for the second column
W[,2L] <- W[rrBA,1L];
# Change both of them if it was BB
H <- H*2;
if (H[2L,1L] == 1){
# Was it BB?
W <- W[,c(2L,1L),drop=FALSE];
H <- H[c(2L,1L),,drop=FALSE];
}
status <- 1L;
} else {
#disp('only samples AB')
W[,1L] <- rowMedians(V);
W[,2L] <- W[,1L];
# In this case there is no way to compute the cross hybridization
# We assume that there is no cross hybridization (just to asssume
# something :-)
W[rrB,1L] <- eps;
W[rrA,2L] <- eps;
status <- 2L;
}
} else {
# Normal case
aux <- colSums(H);
aux <- rep(aux, times=2L);
dim(aux) <- c((length(aux)/2), 2);
aux <- t(aux);
H <- 2 * H/aux;
H[is.na(H)] <- 0;
W <- t(miqr.solve(t(H),t(V)));
W[W < 0] <- eps;
# Sometimes, there are errors in the genotyping... Check correlation
corDiff <- cor(W[,1L],W[rrBA,2L]) - cor(W[,1L],W[,2L]);
if (is.na(corDiff) || corDiff < 0.1) {
#print('Too large Correlation')
#print('Solving for one allele')
W0 <- W;
W[,1L] <- rowMedians(W0);
W[,2L] <- W0[rrBA,1L];
H <- miqr.solve(W, V);
H[H < 0] <- 0;
status <- 1L;
}
}
# Sanity check (may be removed in the future /HB 2009-03-24)
stopifnot(nrow(W) == K && ncol(W) == 2L);
stopifnot(nrow(H) == 2L && ncol(H) == I);
list(W=W, H=H, status=status);
} # WHInit()
############################################################################
# HISTORY:
# 2009-03-24 [HB]
# o Added Rdoc comments.
# o Cleaned up code.
# 2009-02-02 [MO]
# o Change some code and make it more efficient
# 2009-01-28 [HB]
# o BUG FIX: The code of WHInit() assumed 20 probes in one place.
############################################################################
|
/rscript/textmining.R
|
no_license
|
r3dmaohong/TextMining
|
R
| false
| false
| 6,106
|
r
| ||
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ISOMetaIdentifier.R
\docType{class}
\name{ISOMetaIdentifier}
\alias{ISOMetaIdentifier}
\title{ISOMetaIdentifier}
\format{\code{\link{R6Class}} object.}
\usage{
ISOMetaIdentifier
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO MetaIdentifier
}
\description{
ISOMetaIdentifier
}
\section{Methods}{
\describe{
\item{\code{new(xml, code)}}{
This method is used to instantiate an ISOMetaIdentifier
}
}
}
\examples{
md <- ISOMetaIdentifier$new(code = "identifier")
xml <- md$encode()
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\references{
ISO 19115:2003 - Geographic information -- Metadata
}
\keyword{ISO}
\keyword{identifier}
\keyword{meta}
|
/man/ISOMetaIdentifier.Rd
|
no_license
|
sebkopf/geometa
|
R
| false
| false
| 765
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/ISOMetaIdentifier.R
\docType{class}
\name{ISOMetaIdentifier}
\alias{ISOMetaIdentifier}
\title{ISOMetaIdentifier}
\format{\code{\link{R6Class}} object.}
\usage{
ISOMetaIdentifier
}
\value{
Object of \code{\link{R6Class}} for modelling an ISO MetaIdentifier
}
\description{
ISOMetaIdentifier
}
\section{Methods}{
\describe{
\item{\code{new(xml, code)}}{
This method is used to instantiate an ISOMetaIdentifier
}
}
}
\examples{
md <- ISOMetaIdentifier$new(code = "identifier")
xml <- md$encode()
}
\author{
Emmanuel Blondel <emmanuel.blondel1@gmail.com>
}
\references{
ISO 19115:2003 - Geographic information -- Metadata
}
\keyword{ISO}
\keyword{identifier}
\keyword{meta}
|
# Exercise 3: interactive Shiny apps
# Load the shiny, ggplot2, and dplyr libraries
library("shiny")
library("ggplot2")
library("dplyr")
# You will once again be working with the `diamonds` data set provided by ggplot2
# Use dplyr's `sample_n()` function to get a random 3000 rows from the data set
# Store this sample in a variable `diamonds_sample`
diamonds_sample <- sample_n(diamonds, 1000)
# For convenience store the `range()` of values for the `price` column
# (of the ENTIRE diamonds dataset)
price_range <- range(diamonds$price)
# For convenience, get a vector of column names from the `diamonds` data set to
# use as select inputs
select_values <- colnames(diamonds)
# Define a UI using a `fluidPage()` layout with the following content:
ui <- fluidPage(
# A `titlePanel` with the title "Diamond Viewer"
titlePanel("Diamond Viewer"),
# A `sliderInput()` labeled "Price (in dollars)". This slider should let the
# user pick a range between the minimum and maximum price of the entire
# diamond data set. The initial value should be the vector of min/max price.
sliderInput(
label = "Price (in dollars)",
inputId = "price",
min = price_range[1],
max = price_range[2],
value = price_range
),
# A `selectInput()` labeled "Feature of Interest". This dropdown should let
# the user pick one of the columns of the diamond data set. Use the `carat`
# column as a default
selectInput(
label = "Feature of Interest",
inputId = "feature",
choices = select_values,
selected = "carat"
),
# A `checkboxInput()` labeled "Show Trendline". It's default value is TRUE
checkboxInput(
label = "Show Trendline",
inputId = "has_trendline",
value = TRUE
),
# A plotOutput showing the 'plot' output (based on the user specifications)
plotOutput(
outputId = "my_plot"),
# Bonus: a dataTableOutput showing a data table of relevant observations
dataTableOutput(outputId = "my_data_table")
) # end of UI
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, output) {
# Assign a reactive `renderPlot()` function to the outputted 'plot' value
output$my_plot <- renderPlot({
# This function should take the `diamonds_sample` data set and filter it by
# the input price (remember to get both ends)!
filtered_diamonds <- diamonds_sample %>%
filter(price > input$price[1] & price < input$price[2])
# Use the filtered data set to create a ggplot2 scatter plot with the carat
# on the x-axis, the price on the y-axis, and color based on the clarity.
# Facet the plot based on which feature the user selected to "facet by"
# (hint: you can just pass that string directly to `facet_wrap()`)
# Save your plot as a variable.
p <- ggplot(data = filtered_diamonds) +
geom_point(mapping = aes_string(x = input$feature, y = "price", color = "clarity"))
p
# Finally, if the "trendline" checkbox is selected, you should also include
# a geom_smooth geometry (with `se=FALSE`)
# Hint: use an if statement to see if you need to add more geoms to the plot
# Be sure and return the completed plot!
}) # end of renderPlot
} # end of server
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui, server)
## Double Bonus: For fun, can you make a similar browser for the `mpg` data set?
## it makes the bonus data table a lot more useful!
|
/chapter-19-exercises/exercise-3/app.R
|
permissive
|
emilyliu1999/book-exercises
|
R
| false
| false
| 3,508
|
r
|
# Exercise 3: interactive Shiny apps
# Load the shiny, ggplot2, and dplyr libraries
library("shiny")
library("ggplot2")
library("dplyr")
# You will once again be working with the `diamonds` data set provided by ggplot2
# Use dplyr's `sample_n()` function to get a random 3000 rows from the data set
# Store this sample in a variable `diamonds_sample`
diamonds_sample <- sample_n(diamonds, 1000)
# For convenience store the `range()` of values for the `price` column
# (of the ENTIRE diamonds dataset)
price_range <- range(diamonds$price)
# For convenience, get a vector of column names from the `diamonds` data set to
# use as select inputs
select_values <- colnames(diamonds)
# Define a UI using a `fluidPage()` layout with the following content:
ui <- fluidPage(
# A `titlePanel` with the title "Diamond Viewer"
titlePanel("Diamond Viewer"),
# A `sliderInput()` labeled "Price (in dollars)". This slider should let the
# user pick a range between the minimum and maximum price of the entire
# diamond data set. The initial value should be the vector of min/max price.
sliderInput(
label = "Price (in dollars)",
inputId = "price",
min = price_range[1],
max = price_range[2],
value = price_range
),
# A `selectInput()` labeled "Feature of Interest". This dropdown should let
# the user pick one of the columns of the diamond data set. Use the `carat`
# column as a default
selectInput(
label = "Feature of Interest",
inputId = "feature",
choices = select_values,
selected = "carat"
),
# A `checkboxInput()` labeled "Show Trendline". It's default value is TRUE
checkboxInput(
label = "Show Trendline",
inputId = "has_trendline",
value = TRUE
),
# A plotOutput showing the 'plot' output (based on the user specifications)
plotOutput(
outputId = "my_plot"),
# Bonus: a dataTableOutput showing a data table of relevant observations
dataTableOutput(outputId = "my_data_table")
) # end of UI
# Define a `server` function (with appropriate arguments)
# This function should perform the following:
server <- function(input, output) {
# Assign a reactive `renderPlot()` function to the outputted 'plot' value
output$my_plot <- renderPlot({
# This function should take the `diamonds_sample` data set and filter it by
# the input price (remember to get both ends)!
filtered_diamonds <- diamonds_sample %>%
filter(price > input$price[1] & price < input$price[2])
# Use the filtered data set to create a ggplot2 scatter plot with the carat
# on the x-axis, the price on the y-axis, and color based on the clarity.
# Facet the plot based on which feature the user selected to "facet by"
# (hint: you can just pass that string directly to `facet_wrap()`)
# Save your plot as a variable.
p <- ggplot(data = filtered_diamonds) +
geom_point(mapping = aes_string(x = input$feature, y = "price", color = "clarity"))
p
# Finally, if the "trendline" checkbox is selected, you should also include
# a geom_smooth geometry (with `se=FALSE`)
# Hint: use an if statement to see if you need to add more geoms to the plot
# Be sure and return the completed plot!
}) # end of renderPlot
} # end of server
# Create a new `shinyApp()` using the above ui and server
shinyApp(ui, server)
## Double Bonus: For fun, can you make a similar browser for the `mpg` data set?
## it makes the bonus data table a lot more useful!
|
#' Make predictor level bubble plots
#'
#' @param dat A data frame with the columns area, year
#' @param variable The column to summarize (character value)
#' @param group_id The events to count over (e.g. `"fishing_event_id"` or `"trip_id"`)
#' @param ncol Number of facetted columns (facets by area)
#'
#' @return A ggplot object
#' @export
#'
#' @examples
#' d <- data.frame(year = rep(1991:2010, each = 20),
#' spp_catch = rbinom(20 * 20, size = 1, prob = 0.8),
#' fishing_event_id = rep(1:20, 20),
#' my_predictor = sample(gl(20, 20), size = 400),
#' area = rep("a", 400))
#' plot_predictor_bubbles(d, "my_predictor")
plot_predictor_bubbles <- function(dat, variable,
group_id = "fishing_event_id", ncol = 2) {
temp_pos <- dat %>%
filter(spp_catch > 0) %>%
group_by(area, year, !!rlang::sym(variable)) %>%
summarise(n = length(unique(!!rlang::sym(group_id)))) %>%
group_by(area, !!rlang::sym(variable)) %>%
mutate(n_tot = sum(n)) %>%
ungroup()
temp_all <- dat %>%
group_by(area, year, !!rlang::sym(variable)) %>%
summarise(n = length(unique(!!rlang::sym(group_id)))) %>%
group_by(area, !!rlang::sym(variable)) %>%
mutate(n_tot = sum(n)) %>%
ungroup()
p <- temp_pos %>%
ggplot(aes_string("as.factor(year)", y = variable)) +
geom_point(aes_string(size = "n", fill = "n"), alpha = 0.4, pch = 21) +
geom_point(data = temp_all, aes_string(size = "n"), alpha = 0.4, pch = 21) +
facet_wrap(~area, scales = "free", ncol = ncol) +
ggplot2::scale_x_discrete(breaks = seq(1950, 2020, 5)) +
xlab("") + ylab(firstup(gsub("_", " ", variable))) +
labs(size = paste0("Number of\n", group_id)) +
labs(fill = paste0("Number of\n", group_id)) +
ggplot2::scale_size_continuous(range = c(0, 7), breaks = c(1, 10, 100, 500, 1000)) +
ggplot2::scale_fill_viridis_c(trans = "log", breaks = c(1, 10, 100, 500, 1000)) +
guides(fill = guide_legend(reverse=T), size = guide_legend(reverse=T)) +
theme_pbs()
if(length(levels(temp_all[[variable]]))> 13 & length(levels(temp_all[[variable]])) < 40){
p <- p + ggplot2::scale_y_discrete(breaks = levels(temp_all[[variable]])[c(T, rep(F, 1))])
}
if(length(levels(temp_all[[variable]]))> 41){
p <- p + ggplot2::scale_y_discrete(breaks = levels(temp_all[[variable]])[c(T, rep(F, 4))])
}
p
}
|
/R/plot_predictor_bubbles.R
|
no_license
|
pbs-assess/gfplot
|
R
| false
| false
| 2,340
|
r
|
#' Make predictor level bubble plots
#'
#' @param dat A data frame with the columns area, year
#' @param variable The column to summarize (character value)
#' @param group_id The events to count over (e.g. `"fishing_event_id"` or `"trip_id"`)
#' @param ncol Number of facetted columns (facets by area)
#'
#' @return A ggplot object
#' @export
#'
#' @examples
#' d <- data.frame(year = rep(1991:2010, each = 20),
#' spp_catch = rbinom(20 * 20, size = 1, prob = 0.8),
#' fishing_event_id = rep(1:20, 20),
#' my_predictor = sample(gl(20, 20), size = 400),
#' area = rep("a", 400))
#' plot_predictor_bubbles(d, "my_predictor")
plot_predictor_bubbles <- function(dat, variable,
group_id = "fishing_event_id", ncol = 2) {
temp_pos <- dat %>%
filter(spp_catch > 0) %>%
group_by(area, year, !!rlang::sym(variable)) %>%
summarise(n = length(unique(!!rlang::sym(group_id)))) %>%
group_by(area, !!rlang::sym(variable)) %>%
mutate(n_tot = sum(n)) %>%
ungroup()
temp_all <- dat %>%
group_by(area, year, !!rlang::sym(variable)) %>%
summarise(n = length(unique(!!rlang::sym(group_id)))) %>%
group_by(area, !!rlang::sym(variable)) %>%
mutate(n_tot = sum(n)) %>%
ungroup()
p <- temp_pos %>%
ggplot(aes_string("as.factor(year)", y = variable)) +
geom_point(aes_string(size = "n", fill = "n"), alpha = 0.4, pch = 21) +
geom_point(data = temp_all, aes_string(size = "n"), alpha = 0.4, pch = 21) +
facet_wrap(~area, scales = "free", ncol = ncol) +
ggplot2::scale_x_discrete(breaks = seq(1950, 2020, 5)) +
xlab("") + ylab(firstup(gsub("_", " ", variable))) +
labs(size = paste0("Number of\n", group_id)) +
labs(fill = paste0("Number of\n", group_id)) +
ggplot2::scale_size_continuous(range = c(0, 7), breaks = c(1, 10, 100, 500, 1000)) +
ggplot2::scale_fill_viridis_c(trans = "log", breaks = c(1, 10, 100, 500, 1000)) +
guides(fill = guide_legend(reverse=T), size = guide_legend(reverse=T)) +
theme_pbs()
if(length(levels(temp_all[[variable]]))> 13 & length(levels(temp_all[[variable]])) < 40){
p <- p + ggplot2::scale_y_discrete(breaks = levels(temp_all[[variable]])[c(T, rep(F, 1))])
}
if(length(levels(temp_all[[variable]]))> 41){
p <- p + ggplot2::scale_y_discrete(breaks = levels(temp_all[[variable]])[c(T, rep(F, 4))])
}
p
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_lr.R
\name{create_lr}
\alias{create_lr}
\title{Create Linear Regression Model}
\usage{
create_lr(data = data, param, param2, line = "white",
points = "green", theme = "dark", multi = T)
}
\arguments{
\item{data}{a dataframe of a user's playlists.}
\item{param}{a numeric variable in data}
\item{line}{a string with a color name}
\item{points}{a string with a color name}
\item{theme}{a string that has valid values "light" or "dark"}
\item{multi}{a boolean}
\item{param}{another numeric variable in data}
}
\value{
linear regression model(s) for each user's playlists or combined playlists.
}
\description{
\code{create_lr} takes a dataframe, two arguments, a line color, a point color, a theme,
and a boolean indicating if the user wants to see a multiple linear regression models per playlist.
}
\details{
This function uses \code{ggplot2} and \code{rlang}.
}
\examples{
data(christmas_playlists)
create_lr(
data = christmas_playlists,
param = tempo,
param2 = valence,
points = "darkgreen",
line = "red",
theme = "light",
multi = T
)
}
\author{
Belen Rodriguez <brodriguez@wesleyan.edu>
Kim Pham <spham@wesleyan.edu>
}
|
/man/create_lr.Rd
|
no_license
|
brodriguez97/spotiverseR
|
R
| false
| true
| 1,215
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_lr.R
\name{create_lr}
\alias{create_lr}
\title{Create Linear Regression Model}
\usage{
create_lr(data = data, param, param2, line = "white",
points = "green", theme = "dark", multi = T)
}
\arguments{
\item{data}{a dataframe of a user's playlists.}
\item{param}{a numeric variable in data}
\item{line}{a string with a color name}
\item{points}{a string with a color name}
\item{theme}{a string that has valid values "light" or "dark"}
\item{multi}{a boolean}
\item{param}{another numeric variable in data}
}
\value{
linear regression model(s) for each user's playlists or combined playlists.
}
\description{
\code{create_lr} takes a dataframe, two arguments, a line color, a point color, a theme,
and a boolean indicating if the user wants to see a multiple linear regression models per playlist.
}
\details{
This function uses \code{ggplot2} and \code{rlang}.
}
\examples{
data(christmas_playlists)
create_lr(
data = christmas_playlists,
param = tempo,
param2 = valence,
points = "darkgreen",
line = "red",
theme = "light",
multi = T
)
}
\author{
Belen Rodriguez <brodriguez@wesleyan.edu>
Kim Pham <spham@wesleyan.edu>
}
|
\name{.requestEFetchMethod}
\title{Calls the appropriate interface method for this operation and stores the results.}
\usage{.requestEFetchMethod(EFObj)}
\description{Calls the appropriate interface method for this operation and stores the results.}
\section{Warning}{
This function should not be called by the user.
}
\value{The modified object from the arguments, if no errors occured.}
\seealso{\code{\link[=requestEFetch]{requestEFetch}}}
\alias{.requestEFetchMethod,EFetchClass-method}
\alias{.requestEFetchMethod}
\author{Martin Schumann}
\arguments{\item{EFObj}{An object of this class}}
|
/RNCBI-0.9/R-Source-Win/man/requestEFetchMethod.Rd
|
no_license
|
MartinUndDerWolf/rncbi
|
R
| false
| false
| 608
|
rd
|
\name{.requestEFetchMethod}
\title{Calls the appropriate interface method for this operation and stores the results.}
\usage{.requestEFetchMethod(EFObj)}
\description{Calls the appropriate interface method for this operation and stores the results.}
\section{Warning}{
This function should not be called by the user.
}
\value{The modified object from the arguments, if no errors occured.}
\seealso{\code{\link[=requestEFetch]{requestEFetch}}}
\alias{.requestEFetchMethod,EFetchClass-method}
\alias{.requestEFetchMethod}
\author{Martin Schumann}
\arguments{\item{EFObj}{An object of this class}}
|
#' log likelihood function for microbiome mixture
#'
#' Calculate log likelihood function for microbiome sample mixture model at particular values of `p` and `e`.
#'
#' @param tab Dataset of read counts as 3d array of size 3x3x2, genotype in first sample x genotype in second sample x allele in read.
#' @param p Contaminant probability (proportion of mixture coming from the second sample).
#' @param e Sequencing error rate.
#'
#' @examples
#' data(mbmixdata)
#' mbmix_loglik(mbmixdata, p=0.74, e=0.002)
#'
#' @return The log likelihood evaluated at `p` and `e`.
#'
#' @export
mbmix_loglik <-
function(tab, p, e=0)
{
stopifnot(length(dim(tab)) == 3,
all(dim(tab) == c(3,3,2)))
stopifnot(length(p)==1, !is.na(p), p>=0, p<=1)
stopifnot(length(e)==1, !is.na(e), e>=0, e<=1)
prA <- rbind(c(1, 1-p + p/2, 1-p),
c((1-p)/2+p, 1/2, (1-p)/2),
c(p, p/2, 0))
prB <- 1 - prA
p <- array(c(prA*(1-e)+prB*e,
prB*(1-e)+prA*e),
dim=c(3,3,2))
sum(log(p[tab > 0])*tab[tab > 0])
}
# one parameter version, taking e as fixed
mbmix_loglik_fixede <-
function(p, tab, e=0.002)
{
if(length(p) > 1) return(sapply(p, mbmix_loglik_fixede, tab=tab, e=e))
mbmix_loglik(tab, p, e)
}
# one parameter version, taking p as fixed
mbmix_loglik_fixedp <-
function(e, tab, p=1e-6)
{
if(length(e) > 1) return(sapply(e, mbmix_loglik_fixedp, tab=tab, p=p))
mbmix_loglik(tab, p, e)
}
# parameters as a leading vector
mbmix_loglik_pe <-
function(theta, tab)
{
mbmix_loglik(tab, theta[1], theta[2])
}
|
/R/mbmix_loglik.R
|
permissive
|
kbroman/mbmixture
|
R
| false
| false
| 1,611
|
r
|
#' log likelihood function for microbiome mixture
#'
#' Calculate log likelihood function for microbiome sample mixture model at particular values of `p` and `e`.
#'
#' @param tab Dataset of read counts as 3d array of size 3x3x2, genotype in first sample x genotype in second sample x allele in read.
#' @param p Contaminant probability (proportion of mixture coming from the second sample).
#' @param e Sequencing error rate.
#'
#' @examples
#' data(mbmixdata)
#' mbmix_loglik(mbmixdata, p=0.74, e=0.002)
#'
#' @return The log likelihood evaluated at `p` and `e`.
#'
#' @export
mbmix_loglik <-
function(tab, p, e=0)
{
stopifnot(length(dim(tab)) == 3,
all(dim(tab) == c(3,3,2)))
stopifnot(length(p)==1, !is.na(p), p>=0, p<=1)
stopifnot(length(e)==1, !is.na(e), e>=0, e<=1)
prA <- rbind(c(1, 1-p + p/2, 1-p),
c((1-p)/2+p, 1/2, (1-p)/2),
c(p, p/2, 0))
prB <- 1 - prA
p <- array(c(prA*(1-e)+prB*e,
prB*(1-e)+prA*e),
dim=c(3,3,2))
sum(log(p[tab > 0])*tab[tab > 0])
}
# one parameter version, taking e as fixed
mbmix_loglik_fixede <-
function(p, tab, e=0.002)
{
if(length(p) > 1) return(sapply(p, mbmix_loglik_fixede, tab=tab, e=e))
mbmix_loglik(tab, p, e)
}
# one parameter version, taking p as fixed
mbmix_loglik_fixedp <-
function(e, tab, p=1e-6)
{
if(length(e) > 1) return(sapply(e, mbmix_loglik_fixedp, tab=tab, p=p))
mbmix_loglik(tab, p, e)
}
# parameters as a leading vector
mbmix_loglik_pe <-
function(theta, tab)
{
mbmix_loglik(tab, theta[1], theta[2])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CAPM.beta.R
\name{SFM.beta}
\alias{SFM.beta}
\alias{CAPM.beta}
\alias{CAPM.beta.bull}
\alias{CAPM.beta.bear}
\alias{TimingRatio}
\alias{SFM.beta.bull}
\alias{SFM.beta.bear}
\title{Calculate single factor model (CAPM) beta}
\usage{
SFM.beta(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt",
warning = T
)
SFM.beta.bull(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt"
)
SFM.beta.bear(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt"
)
TimingRatio(Ra, Rb, Rf = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{Rf}{risk free rate, in same period as your returns}
\item{\dots}{Other parameters like max.it or bb specific to lmrobdetMM regression.}
\item{digits}{(Optional): Number of digits to round the results to. Defaults to 3.}
\item{benchmarkCols}{(Optional): Boolean to show the benchmarks as columns. Defaults to TRUE.}
\item{method}{(Optional): string representing linear regression model, "LS" for Least Squares
and "Robust" for robust. Defaults to "LS}
\item{family}{(Optional):
If method == "Robust":
This is a string specifying the name of the family of loss function
to be used (current valid options are "bisquare", "opt" and "mopt").
Incomplete entries will be matched to the current valid options.
Defaults to "mopt".
Else: the parameter is ignored}
\item{warning}{(Optional): Boolean to show warnings or not. Defaults to TRUE.}
}
\description{
The single factor model or CAPM Beta is the beta of an asset to the variance
and covariance of an initial portfolio. Used to determine diversification potential.
}
\details{
This function uses a linear intercept model to achieve the same results as
the symbolic model used by \code{\link{BetaCoVariance}}
\deqn{\beta_{a,b}=\frac{CoV_{a,b}}{\sigma_{b}}=\frac{\sum((R_{a}-\bar{R_{a}})(R_{b}-\bar{R_{b}}))}{\sum(R_{b}-\bar{R_{b}})^{2}}}{beta
= cov(Ra,Rb)/var(R)}
Ruppert(2004) reports that this equation will give the estimated slope of
the linear regression of \eqn{R_{a}}{Ra} on \eqn{R_{b}}{Rb} and that this
slope can be used to determine the risk premium or excess expected return
(see Eq. 7.9 and 7.10, p. 230-231).
Two other functions apply the same notion of best fit to positive and
negative market returns, separately. The \code{SFM.beta.bull} is a
regression for only positive market returns, which can be used to understand
the behavior of the asset or portfolio in positive or 'bull' markets.
Alternatively, \code{SFM.beta.bear} provides the calculation on negative
market returns.
The \code{TimingRatio} may help assess whether the manager is a good timer
of asset allocation decisions. The ratio, which is calculated as
\deqn{TimingRatio =\frac{\beta^{+}}{\beta^{-}}}{Timing Ratio = beta+/beta-}
is best when greater than one in a rising market and less than one in a
falling market.
While the classical CAPM has been almost completely discredited by the
literature, it is an example of a simple single factor model,
comparing an asset to any arbitrary benchmark.
}
\examples{
data(managers)
SFM.beta(managers[, "HAM1"], managers[, "SP500 TR"], Rf = managers[, "US 3m TR"])
SFM.beta(managers[,1:3], managers[,8:10], Rf=.035/12)
SFM.beta(managers[,1], managers[,8:10], Rf=.035/12, benchmarkCols=FALSE)
betas <- SFM.beta(managers[,1:6],
managers[,8:10],
Rf=.035/12, method="Robust",
family="opt", bb=0.25, max.it=200, digits=4)
betas["HAM1", ]
betas[, "Beta : SP500 TR"]
SFM.beta.bull(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
SFM.beta.bull(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust")
SFM.beta.bear(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
SFM.beta.bear(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust")
TimingRatio(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
TimingRatio(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust", family="mopt")
chart.Regression(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
fit="conditional",
main="Conditional Beta")
}
\references{
Sharpe, W.F. Capital Asset Prices: A theory of market
equilibrium under conditions of risk. \emph{Journal of finance}, vol 19,
1964, 425-442. \cr Ruppert, David. \emph{Statistics and Finance, an
Introduction}. Springer. 2004. \cr Bacon, Carl. \emph{Practical portfolio
performance measurement and attribution}. Wiley. 2004. \cr
}
\seealso{
\code{\link{BetaCoVariance}} \code{\link{SFM.alpha}}
\code{\link{CAPM.utils}}
}
\author{
Dhairya Jain, Peter Carl
}
|
/man/SFM.beta.Rd
|
no_license
|
braverock/PerformanceAnalytics
|
R
| false
| true
| 5,079
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/CAPM.beta.R
\name{SFM.beta}
\alias{SFM.beta}
\alias{CAPM.beta}
\alias{CAPM.beta.bull}
\alias{CAPM.beta.bear}
\alias{TimingRatio}
\alias{SFM.beta.bull}
\alias{SFM.beta.bear}
\title{Calculate single factor model (CAPM) beta}
\usage{
SFM.beta(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt",
warning = T
)
SFM.beta.bull(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt"
)
SFM.beta.bear(
Ra,
Rb,
Rf = 0,
...,
digits = 3,
benchmarkCols = T,
method = "LS",
family = "mopt"
)
TimingRatio(Ra, Rb, Rf = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{Rf}{risk free rate, in same period as your returns}
\item{\dots}{Other parameters like max.it or bb specific to lmrobdetMM regression.}
\item{digits}{(Optional): Number of digits to round the results to. Defaults to 3.}
\item{benchmarkCols}{(Optional): Boolean to show the benchmarks as columns. Defaults to TRUE.}
\item{method}{(Optional): string representing linear regression model, "LS" for Least Squares
and "Robust" for robust. Defaults to "LS}
\item{family}{(Optional):
If method == "Robust":
This is a string specifying the name of the family of loss function
to be used (current valid options are "bisquare", "opt" and "mopt").
Incomplete entries will be matched to the current valid options.
Defaults to "mopt".
Else: the parameter is ignored}
\item{warning}{(Optional): Boolean to show warnings or not. Defaults to TRUE.}
}
\description{
The single factor model or CAPM Beta is the beta of an asset to the variance
and covariance of an initial portfolio. Used to determine diversification potential.
}
\details{
This function uses a linear intercept model to achieve the same results as
the symbolic model used by \code{\link{BetaCoVariance}}
\deqn{\beta_{a,b}=\frac{CoV_{a,b}}{\sigma_{b}}=\frac{\sum((R_{a}-\bar{R_{a}})(R_{b}-\bar{R_{b}}))}{\sum(R_{b}-\bar{R_{b}})^{2}}}{beta
= cov(Ra,Rb)/var(R)}
Ruppert(2004) reports that this equation will give the estimated slope of
the linear regression of \eqn{R_{a}}{Ra} on \eqn{R_{b}}{Rb} and that this
slope can be used to determine the risk premium or excess expected return
(see Eq. 7.9 and 7.10, p. 230-231).
Two other functions apply the same notion of best fit to positive and
negative market returns, separately. The \code{SFM.beta.bull} is a
regression for only positive market returns, which can be used to understand
the behavior of the asset or portfolio in positive or 'bull' markets.
Alternatively, \code{SFM.beta.bear} provides the calculation on negative
market returns.
The \code{TimingRatio} may help assess whether the manager is a good timer
of asset allocation decisions. The ratio, which is calculated as
\deqn{TimingRatio =\frac{\beta^{+}}{\beta^{-}}}{Timing Ratio = beta+/beta-}
is best when greater than one in a rising market and less than one in a
falling market.
While the classical CAPM has been almost completely discredited by the
literature, it is an example of a simple single factor model,
comparing an asset to any arbitrary benchmark.
}
\examples{
data(managers)
SFM.beta(managers[, "HAM1"], managers[, "SP500 TR"], Rf = managers[, "US 3m TR"])
SFM.beta(managers[,1:3], managers[,8:10], Rf=.035/12)
SFM.beta(managers[,1], managers[,8:10], Rf=.035/12, benchmarkCols=FALSE)
betas <- SFM.beta(managers[,1:6],
managers[,8:10],
Rf=.035/12, method="Robust",
family="opt", bb=0.25, max.it=200, digits=4)
betas["HAM1", ]
betas[, "Beta : SP500 TR"]
SFM.beta.bull(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
SFM.beta.bull(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust")
SFM.beta.bear(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
SFM.beta.bear(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust")
TimingRatio(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"])
TimingRatio(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
method="Robust", family="mopt")
chart.Regression(managers[, "HAM2"],
managers[, "SP500 TR"],
Rf = managers[, "US 3m TR"],
fit="conditional",
main="Conditional Beta")
}
\references{
Sharpe, W.F. Capital Asset Prices: A theory of market
equilibrium under conditions of risk. \emph{Journal of finance}, vol 19,
1964, 425-442. \cr Ruppert, David. \emph{Statistics and Finance, an
Introduction}. Springer. 2004. \cr Bacon, Carl. \emph{Practical portfolio
performance measurement and attribution}. Wiley. 2004. \cr
}
\seealso{
\code{\link{BetaCoVariance}} \code{\link{SFM.alpha}}
\code{\link{CAPM.utils}}
}
\author{
Dhairya Jain, Peter Carl
}
|
# Themes
# Theme functions -------------------------------------------------------------
#' pilot ggplot theme
#'
#' This ggplot theme implements the pilot chart style. The theme
#' offers various options for easily controlling plot settings so that you
#' don't need to resort to additional uses of the ggplot2 \code{theme} function
#' in most cases.
#'
#' The main arguments are \code{axes}, \code{grid}, \code{caption_position},
#' and \code{legend_positon}. Arguments are also available to set the color and
#' font properties of elements of the plot.
#'
#' When creating plots using this theme, the title and subtitle elements may
#' be set using ggplot2's \code{labs} function, which provides the traditional
#' ggplot title alignment. Alternatively, the package also provides a function
#' that aligns the title and subtitle with the left-hand edge of the y-axis
#' text rather than the left-hand edge of the plotting area.
#'
#' To set the title and subtitle using this approach, first create a plot in
#' the normal way, without specifying any title or subtitle in \code{labs}.
#' Then use the \code{add_pilot_titles} function to add the title and subtitle
#' elements. Using \code{add_pilot_titles} also has the benefit of correctly
#' setting the distance between the bottom of the title and the rest of the
#' plot when no subtitle is needed.
#'
#' The arguments for controlling the properties of text elements in this
#' function include options for setting the properties for the title and
#' subtitle. These will control titles and subtitles that are set using the
#' ggplot2 \code{labs} function. You can separately configure the properties
#' of the title and subtitle in \code{add_pilot_titles} if you need to change
#' the appearance of the title elements when they are set in the recommended
#' way.
#'
#' @param axes A string indicating which axes should have lines and ticks.
#' Specify which axes to show by including the matching characters in the
#' string: "t" for top, "r" for right, "b" for bottom, "l" for left. You will
#' need to ensure this argument is consistent with the axes settings in your
#' plot for the lines and ticks to be displayed. The default is an empty
#' string, meaning ticks and lines for the bottom and left axes are shown by
#' default.
#' @param grid A string indicating which gridlines should be shown. Specify
#' the gridlines to show by including the matching characters in the string:
#' "h" for horizontal, "v" for vertical. The default is "hv",
#' meaning both gridlines are shown by default.
#' @param legend_position A string indicating the position of the legend. Valid
#' positions are "top", "right", "bottom", "left", "top-right", "top-left",
#' "bottom-right", "bottom-left", and "none". The default is "right".
#' @param caption_position A string indicating the horizontal position of the
#' caption. Valid positions are "left" or "right". The default is "right".
#' @param title_family A string indicating the font-family to use for the
#' title. The default depends on the operating system.
#' @param subtitle_family A string indicating the font-family to use for the
#' subtitle. The default depends on the operating system.
#' @param axis_title_family A string indicating the font-family to use for
#' axis titles. The default depends on the operating system.
#' @param axis_text_family A string indicating the font-family to use for
#' axis text. The default depends on the operating system.
#' @param legend_title_family A string indicating the font-family to use for
#' legend titles. The default depends on the operating system.
#' @param legend_text_family A string indicating the font-family to use for
#' legend text. The default depends on the operating system.
#' @param facet_title_family A string indicating the font-family to use for
#' facet titles. The default depends on the operating system.
#' @param caption_family A string indicating the font-family to use for
#' captions. The default depends on the operating system.
#' @param title_size An integer indicating the font size to use for the title
#' in points. The default is 17 points.
#' @param subtitle_size An integer indicating the font size to use for the
#' subtitle in points. The default is 12 points.
#' @param axis_title_size An integer indicating the font size to use for axis
#' titles in points. The default is 11 points.
#' @param axis_text_size An integer indicating the font size to use for axis
#' text in points. The default is 11 points.
#' @param legend_title_size An integer indicating the font size to use for
#' legend titles in points. The default is 11 points.
#' @param legend_text_size An integer indicating the font size to use for
#' legend text in points. The default is 10 points.
#' @param facet_title_size An integer indicating the font size to use for
#' facet titles in points. The default is 10 points.
#' @param caption_size An integer indicating the font size to use for captions
#' in points. The default is 9 points.
#' @param title_color An RGB hex string indicating the color to use for the
#' title. The default is "#404040".
#' @param subtitle_color An RGB hex string indicating the color to use for the
#' subtitle. The default is "#404040".
#' @param axis_title_color An RGB hex string indicating the color to use for
#' axis titles. The default is "#404040".
#' @param axis_text_color An RGB hex string indicating the color to use for
#' axis text. The default is "#404040".
#' @param legend_title_color An RGB hex string indicating the color to use for
#' legend titles. The default is "#404040".
#' @param legend_text_color An RGB hex string indicating the color to use for
#' legend text. The default is "#404040".
#' @param facet_title_color An RGB hex string indicating the color to use for
#' facet titles. The default is "#303030".
#' @param caption_color An RGB hex string indicating the color to use for
#' captions. The default is "#404040".
#' @param background_color An RGB hex string indicating the color to use for
#' the background. The default is "#ffffff".
#' @param axis_line_color An RGB hex string indicating the color to use for
#' the axis lines. The default is "#a6a6a6".
#' @param grid_color An RGB hex string indicating the color to use for the
#' gridlines. The default is "#dad5d1".
#' @return A ggplot2 theme that implements the Commons Library style.
#' @export
theme_pilot <- function (
axes = "",
grid = "hv",
legend_position = "right",
caption_position = "right",
title_family = getOption("pilot.title_family"),
subtitle_family =getOption("pilot.subtitle_family"),
axis_title_family = getOption("pilot.axis_title_family"),
axis_text_family = getOption("pilot.axis_text_family"),
legend_title_family = getOption("pilot.legend_title_family"),
legend_text_family = getOption("pilot.legend_text_family"),
facet_title_family = getOption("pilot.facet_title_family"),
caption_family = getOption("pilot.caption_family"),
title_size = 17,
subtitle_size = 12,
axis_title_size = 11,
axis_text_size = 11,
legend_title_size = 11,
legend_text_size = 10,
facet_title_size = 10,
caption_size = 9,
title_color = "#404040",
subtitle_color = "#404040",
axis_title_color = "#404040",
axis_text_color = "#404040",
legend_title_color = "#404040",
legend_text_color = "#404040",
facet_title_color = "#303030",
caption_color = "#404040",
background_color = "#ffffff",
axis_line_color = "#404040",
grid_color = "#d8d8d8") {
# Set the caption horizontal justification
if (stringr::str_detect(caption_position, "left")) {
caption_hjust = 0
} else if (stringr::str_detect(caption_position, "right")) {
caption_hjust = 1
} else {
stop("The caption_position should be \"left\" or \"right\"")
}
# Baseline theme
theme_pilot <- ggplot2::theme(
plot.background = ggplot2::element_rect(
fill = background_color,
size = 0),
plot.margin = ggplot2::margin(
t = 20,
r = 20,
b = 20,
l = 20, unit = "pt"),
plot.title = ggplot2::element_text(
family = title_family,
color = title_color,
face = "bold",
hjust = 0,
size = title_size,
margin = ggplot2::margin(
t = 0,
r = 0,
b = 5,
l = 0, unit = "pt")),
plot.subtitle = ggplot2::element_text(
family = subtitle_family,
color = subtitle_color,
face = "plain",
hjust = 0,
size = subtitle_size,
margin = ggplot2::margin(
t = 0,
r = 0,
b = 24,
l = 0, unit = "pt")),
plot.caption = ggplot2::element_text(
family = caption_family,
color = caption_color,
hjust = caption_hjust,
size = caption_size,
margin = ggplot2::margin(
t = 24,
r = 0,
b = 0,
l = 0, unit = "pt")),
plot.caption.position = "plot",
panel.spacing = ggplot2::unit(20, "pt"),
panel.border = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
axis.line = ggplot2::element_blank(),
axis.line.x.top = ggplot2::element_blank(),
axis.line.y.right = ggplot2::element_blank(),
axis.line.x.bottom = ggplot2::element_blank(),
axis.line.y.left = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.title = ggplot2::element_text(
family = axis_title_family,
color = axis_title_color,
size = axis_title_size),
axis.title.x = ggplot2::element_text(
margin = ggplot2::margin(
t = 12,
r = 0,
b = 0,
l = 0, unit = "pt")),
axis.title.x.top = ggplot2::element_text(
margin = ggplot2::margin(
t = 0,
b = 12, unit = "pt")),
axis.title.y = ggplot2::element_text(
angle = 90,
margin = ggplot2::margin(
t = 0,
r = 12,
b = 0,
l = 0, unit = "pt")),
axis.title.y.right = ggplot2::element_text(
angle = 90,
margin = ggplot2::margin(
r = 0,
l = 12, unit = "pt")),
axis.text = ggplot2::element_text(
family = axis_text_family,
color = axis_text_color,
size = axis_text_size),
axis.text.x = ggplot2::element_text(
margin = ggplot2::margin(
t = 5,
r = 0,
b = 0,
l = 0, unit = "pt")),
axis.text.x.top = ggplot2::element_text(
margin = ggplot2::margin(
t = 0,
b = 5, unit = "pt")),
axis.text.y = ggplot2::element_text(
hjust = 1,
margin = ggplot2::margin(
t = 0,
r = 5,
b = 0,
l = 0, unit = "pt")),
axis.text.y.right = ggplot2::element_text(
hjust = 0,
margin = ggplot2::margin(
r = 0,
l = 5, unit = "pt")),
legend.background = ggplot2::element_rect(
color = NULL,
fill = background_color,
size = 0),
legend.key = ggplot2::element_rect(
color = background_color,
fill = background_color),
legend.title = ggplot2::element_text(
family = legend_title_family,
color = legend_title_color,
face = "bold",
size = legend_title_size),
legend.text = ggplot2::element_text(
family = legend_text_family,
color = legend_text_color,
size = legend_text_size),
strip.background = ggplot2::element_rect(
color = background_color,
fill = background_color),
strip.text = ggplot2::element_text(
family = facet_title_family,
color = facet_title_color,
size = facet_title_size,
face = "bold"))
# Axes
axis_line <- ggplot2::element_line(
color = axis_line_color,
size = 0.3,
linetype = "solid")
if (stringr::str_detect(axes, "t")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.x.top = axis_line,
axis.ticks.x.top = axis_line)
}
if (stringr::str_detect(axes, "r")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.y.right = axis_line,
axis.ticks.y.right = axis_line)
}
if (stringr::str_detect(axes, "b")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.x.bottom = axis_line,
axis.ticks.x.bottom = axis_line)
}
if (stringr::str_detect(axes, "l")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.y.left = axis_line,
axis.ticks.y.left = axis_line)
}
# Gridlines
grid_line <- ggplot2::element_line(
color = grid_color,
size = 0.35,
linetype = "solid")
if (stringr::str_detect(grid, "v")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(panel.grid.major.x = grid_line)
}
if (stringr::str_detect(grid, "h")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(panel.grid.major.y = grid_line)
}
# Legend
if (legend_position %in% c("top", "right", "bottom", "left", "none")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(legend.position = legend_position)
} else if (legend_position == "top-right") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(1,0))
} else if (legend_position == "top-left") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(0,1))
} else if (legend_position == "bottom-right") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.justification = c(1,0))
} else if (legend_position == "bottom-left") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.justification = c(0,1))
} else {
stop(paste(
"The legend_position should be one of:",
"\"top\", \"right\", \"bottom\", \"left\",",
"\"top-right\", \"top-left\", \"bottom-right\", \"bottom-left\""))
}
theme_pilot
}
# Function to add correctly aligned titles ------------------------------------
#' Add titles to a plot using the pilot theme style
#'
#' Use this function to add titles to a plot that uses \code{theme_pilot}.
#' Using this functin to set the title and/or subtitle will ensure that title
#' elements are aligned with the left-hand edge of the y-axis text, rather than
#' the left-hand edge of the plotting area.
#'
#' To use the function, first create a plot using \code{theme_pilot}
#' without setting the title or subtitle, capturing the plot object in a
#' variable. Pass the plot to \code{add_pilot_titles} along with the title
#' and subtitle you want to use. This function will return the same plot with
#' titles added to the top and the correct spacing between the titles and the
#' rest of the plot area.
#'
#' Arguments are also available to set the font properties of the title and
#' subtitle elements of the plot.
#'
#' @param plot A ggplot2 plot object to which titles will be added.
#' @param title A string containing the title to add to the plot. Use NULL if
#' you do not want a title.
#' @param subtitle A string containing the subtitle to add to the plot. Use
#' NULL if you do not want a subtitle.
#' @param title_family A string indicating the font-family to use for the
#' title. The default depends on the operating system.
#' @param subtitle_family A string indicating the font-family to use for the
#' subtitle. The default depends on the operating system.
#' @param title_size An integer indicating the font size to use for the title
#' in points. The default is 17 points.
#' @param subtitle_size An integer indicating the font size to use for the
#' subtitle in points. The default is 12 points.
#' @param title_color An RGB hex string indicating the color to use for the
#' title. The default is "#404040".
#' @param subtitle_color An RGB hex string indicating the color to use for the
#' subtitle. The default is "#404040".
#' @param background_color An RGB hex string indicating the color to use for
#' the background. The default is "#ffffff".
#' @return A copy of the input plot with a title and/or subtitle added.
#' @export
add_pilot_titles <- function(
plot,
title = NULL,
subtitle = NULL,
title_family = getOption("pilot.title_family"),
subtitle_family = getOption("pilot.subtitle_family"),
title_size = 17,
subtitle_size = 12,
title_color = "#404040",
subtitle_color = "#404040",
background_color = "#ffffff") {
# If no titles are provided, return the plot unmodified
if (is.null(title) && is.null(subtitle)) return(plot)
# Set the default title theme to the main theme plus any specified fonts
theme_titles <- theme_pilot() + ggplot2::theme(
plot.title = ggplot2::element_text(family = title_family),
plot.subtitle = ggplot2::element_text(family = subtitle_family))
# If no subtitle is provided, adjust the title's bottom padding
if (is.null(subtitle)) {
theme_titles <- theme_titles + ggplot2::theme(
plot.title = ggplot2::element_text(
family = title_family,
margin = ggplot2::margin(b = 24, unit = "pt")))
}
# Remove margin padding from the plot before re-adding with the titles
plot +
ggplot2::theme(plot.margin = ggplot2::margin(
t = 0,
r = 0,
b = 0,
l = 0, unit = "pt")) +
patchwork::plot_annotation(
title = title,
subtitle = subtitle,
theme = theme_titles)
}
|
/R/themes.R
|
permissive
|
olihawkins/pilot
|
R
| false
| false
| 19,129
|
r
|
# Themes
# Theme functions -------------------------------------------------------------
#' pilot ggplot theme
#'
#' This ggplot theme implements the pilot chart style. The theme
#' offers various options for easily controlling plot settings so that you
#' don't need to resort to additional uses of the ggplot2 \code{theme} function
#' in most cases.
#'
#' The main arguments are \code{axes}, \code{grid}, \code{caption_position},
#' and \code{legend_positon}. Arguments are also available to set the color and
#' font properties of elements of the plot.
#'
#' When creating plots using this theme, the title and subtitle elements may
#' be set using ggplot2's \code{labs} function, which provides the traditional
#' ggplot title alignment. Alternatively, the package also provides a function
#' that aligns the title and subtitle with the left-hand edge of the y-axis
#' text rather than the left-hand edge of the plotting area.
#'
#' To set the title and subtitle using this approach, first create a plot in
#' the normal way, without specifying any title or subtitle in \code{labs}.
#' Then use the \code{add_pilot_titles} function to add the title and subtitle
#' elements. Using \code{add_pilot_titles} also has the benefit of correctly
#' setting the distance between the bottom of the title and the rest of the
#' plot when no subtitle is needed.
#'
#' The arguments for controlling the properties of text elements in this
#' function include options for setting the properties for the title and
#' subtitle. These will control titles and subtitles that are set using the
#' ggplot2 \code{labs} function. You can separately configure the properties
#' of the title and subtitle in \code{add_pilot_titles} if you need to change
#' the appearance of the title elements when they are set in the recommended
#' way.
#'
#' @param axes A string indicating which axes should have lines and ticks.
#' Specify which axes to show by including the matching characters in the
#' string: "t" for top, "r" for right, "b" for bottom, "l" for left. You will
#' need to ensure this argument is consistent with the axes settings in your
#' plot for the lines and ticks to be displayed. The default is an empty
#' string, meaning ticks and lines for the bottom and left axes are shown by
#' default.
#' @param grid A string indicating which gridlines should be shown. Specify
#' the gridlines to show by including the matching characters in the string:
#' "h" for horizontal, "v" for vertical. The default is "hv",
#' meaning both gridlines are shown by default.
#' @param legend_position A string indicating the position of the legend. Valid
#' positions are "top", "right", "bottom", "left", "top-right", "top-left",
#' "bottom-right", "bottom-left", and "none". The default is "right".
#' @param caption_position A string indicating the horizontal position of the
#' caption. Valid positions are "left" or "right". The default is "right".
#' @param title_family A string indicating the font-family to use for the
#' title. The default depends on the operating system.
#' @param subtitle_family A string indicating the font-family to use for the
#' subtitle. The default depends on the operating system.
#' @param axis_title_family A string indicating the font-family to use for
#' axis titles. The default depends on the operating system.
#' @param axis_text_family A string indicating the font-family to use for
#' axis text. The default depends on the operating system.
#' @param legend_title_family A string indicating the font-family to use for
#' legend titles. The default depends on the operating system.
#' @param legend_text_family A string indicating the font-family to use for
#' legend text. The default depends on the operating system.
#' @param facet_title_family A string indicating the font-family to use for
#' facet titles. The default depends on the operating system.
#' @param caption_family A string indicating the font-family to use for
#' captions. The default depends on the operating system.
#' @param title_size An integer indicating the font size to use for the title
#' in points. The default is 17 points.
#' @param subtitle_size An integer indicating the font size to use for the
#' subtitle in points. The default is 12 points.
#' @param axis_title_size An integer indicating the font size to use for axis
#' titles in points. The default is 11 points.
#' @param axis_text_size An integer indicating the font size to use for axis
#' text in points. The default is 11 points.
#' @param legend_title_size An integer indicating the font size to use for
#' legend titles in points. The default is 11 points.
#' @param legend_text_size An integer indicating the font size to use for
#' legend text in points. The default is 10 points.
#' @param facet_title_size An integer indicating the font size to use for
#' facet titles in points. The default is 10 points.
#' @param caption_size An integer indicating the font size to use for captions
#' in points. The default is 9 points.
#' @param title_color An RGB hex string indicating the color to use for the
#' title. The default is "#404040".
#' @param subtitle_color An RGB hex string indicating the color to use for the
#' subtitle. The default is "#404040".
#' @param axis_title_color An RGB hex string indicating the color to use for
#' axis titles. The default is "#404040".
#' @param axis_text_color An RGB hex string indicating the color to use for
#' axis text. The default is "#404040".
#' @param legend_title_color An RGB hex string indicating the color to use for
#' legend titles. The default is "#404040".
#' @param legend_text_color An RGB hex string indicating the color to use for
#' legend text. The default is "#404040".
#' @param facet_title_color An RGB hex string indicating the color to use for
#' facet titles. The default is "#303030".
#' @param caption_color An RGB hex string indicating the color to use for
#' captions. The default is "#404040".
#' @param background_color An RGB hex string indicating the color to use for
#' the background. The default is "#ffffff".
#' @param axis_line_color An RGB hex string indicating the color to use for
#' the axis lines. The default is "#a6a6a6".
#' @param grid_color An RGB hex string indicating the color to use for the
#' gridlines. The default is "#dad5d1".
#' @return A ggplot2 theme that implements the Commons Library style.
#' @export
theme_pilot <- function (
axes = "",
grid = "hv",
legend_position = "right",
caption_position = "right",
title_family = getOption("pilot.title_family"),
subtitle_family =getOption("pilot.subtitle_family"),
axis_title_family = getOption("pilot.axis_title_family"),
axis_text_family = getOption("pilot.axis_text_family"),
legend_title_family = getOption("pilot.legend_title_family"),
legend_text_family = getOption("pilot.legend_text_family"),
facet_title_family = getOption("pilot.facet_title_family"),
caption_family = getOption("pilot.caption_family"),
title_size = 17,
subtitle_size = 12,
axis_title_size = 11,
axis_text_size = 11,
legend_title_size = 11,
legend_text_size = 10,
facet_title_size = 10,
caption_size = 9,
title_color = "#404040",
subtitle_color = "#404040",
axis_title_color = "#404040",
axis_text_color = "#404040",
legend_title_color = "#404040",
legend_text_color = "#404040",
facet_title_color = "#303030",
caption_color = "#404040",
background_color = "#ffffff",
axis_line_color = "#404040",
grid_color = "#d8d8d8") {
# Set the caption horizontal justification
if (stringr::str_detect(caption_position, "left")) {
caption_hjust = 0
} else if (stringr::str_detect(caption_position, "right")) {
caption_hjust = 1
} else {
stop("The caption_position should be \"left\" or \"right\"")
}
# Baseline theme
theme_pilot <- ggplot2::theme(
plot.background = ggplot2::element_rect(
fill = background_color,
size = 0),
plot.margin = ggplot2::margin(
t = 20,
r = 20,
b = 20,
l = 20, unit = "pt"),
plot.title = ggplot2::element_text(
family = title_family,
color = title_color,
face = "bold",
hjust = 0,
size = title_size,
margin = ggplot2::margin(
t = 0,
r = 0,
b = 5,
l = 0, unit = "pt")),
plot.subtitle = ggplot2::element_text(
family = subtitle_family,
color = subtitle_color,
face = "plain",
hjust = 0,
size = subtitle_size,
margin = ggplot2::margin(
t = 0,
r = 0,
b = 24,
l = 0, unit = "pt")),
plot.caption = ggplot2::element_text(
family = caption_family,
color = caption_color,
hjust = caption_hjust,
size = caption_size,
margin = ggplot2::margin(
t = 24,
r = 0,
b = 0,
l = 0, unit = "pt")),
plot.caption.position = "plot",
panel.spacing = ggplot2::unit(20, "pt"),
panel.border = ggplot2::element_blank(),
panel.background = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
panel.grid.major = ggplot2::element_blank(),
panel.grid.minor = ggplot2::element_blank(),
axis.line = ggplot2::element_blank(),
axis.line.x.top = ggplot2::element_blank(),
axis.line.y.right = ggplot2::element_blank(),
axis.line.x.bottom = ggplot2::element_blank(),
axis.line.y.left = ggplot2::element_blank(),
axis.ticks = ggplot2::element_blank(),
axis.title = ggplot2::element_text(
family = axis_title_family,
color = axis_title_color,
size = axis_title_size),
axis.title.x = ggplot2::element_text(
margin = ggplot2::margin(
t = 12,
r = 0,
b = 0,
l = 0, unit = "pt")),
axis.title.x.top = ggplot2::element_text(
margin = ggplot2::margin(
t = 0,
b = 12, unit = "pt")),
axis.title.y = ggplot2::element_text(
angle = 90,
margin = ggplot2::margin(
t = 0,
r = 12,
b = 0,
l = 0, unit = "pt")),
axis.title.y.right = ggplot2::element_text(
angle = 90,
margin = ggplot2::margin(
r = 0,
l = 12, unit = "pt")),
axis.text = ggplot2::element_text(
family = axis_text_family,
color = axis_text_color,
size = axis_text_size),
axis.text.x = ggplot2::element_text(
margin = ggplot2::margin(
t = 5,
r = 0,
b = 0,
l = 0, unit = "pt")),
axis.text.x.top = ggplot2::element_text(
margin = ggplot2::margin(
t = 0,
b = 5, unit = "pt")),
axis.text.y = ggplot2::element_text(
hjust = 1,
margin = ggplot2::margin(
t = 0,
r = 5,
b = 0,
l = 0, unit = "pt")),
axis.text.y.right = ggplot2::element_text(
hjust = 0,
margin = ggplot2::margin(
r = 0,
l = 5, unit = "pt")),
legend.background = ggplot2::element_rect(
color = NULL,
fill = background_color,
size = 0),
legend.key = ggplot2::element_rect(
color = background_color,
fill = background_color),
legend.title = ggplot2::element_text(
family = legend_title_family,
color = legend_title_color,
face = "bold",
size = legend_title_size),
legend.text = ggplot2::element_text(
family = legend_text_family,
color = legend_text_color,
size = legend_text_size),
strip.background = ggplot2::element_rect(
color = background_color,
fill = background_color),
strip.text = ggplot2::element_text(
family = facet_title_family,
color = facet_title_color,
size = facet_title_size,
face = "bold"))
# Axes
axis_line <- ggplot2::element_line(
color = axis_line_color,
size = 0.3,
linetype = "solid")
if (stringr::str_detect(axes, "t")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.x.top = axis_line,
axis.ticks.x.top = axis_line)
}
if (stringr::str_detect(axes, "r")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.y.right = axis_line,
axis.ticks.y.right = axis_line)
}
if (stringr::str_detect(axes, "b")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.x.bottom = axis_line,
axis.ticks.x.bottom = axis_line)
}
if (stringr::str_detect(axes, "l")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
axis.line.y.left = axis_line,
axis.ticks.y.left = axis_line)
}
# Gridlines
grid_line <- ggplot2::element_line(
color = grid_color,
size = 0.35,
linetype = "solid")
if (stringr::str_detect(grid, "v")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(panel.grid.major.x = grid_line)
}
if (stringr::str_detect(grid, "h")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(panel.grid.major.y = grid_line)
}
# Legend
if (legend_position %in% c("top", "right", "bottom", "left", "none")) {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(legend.position = legend_position)
} else if (legend_position == "top-right") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(1,0))
} else if (legend_position == "top-left") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "top",
legend.direction = "horizontal",
legend.justification = c(0,1))
} else if (legend_position == "bottom-right") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.justification = c(1,0))
} else if (legend_position == "bottom-left") {
theme_pilot <- theme_pilot %+replace%
ggplot2::theme(
legend.position = "bottom",
legend.direction = "horizontal",
legend.justification = c(0,1))
} else {
stop(paste(
"The legend_position should be one of:",
"\"top\", \"right\", \"bottom\", \"left\",",
"\"top-right\", \"top-left\", \"bottom-right\", \"bottom-left\""))
}
theme_pilot
}
# Function to add correctly aligned titles ------------------------------------
#' Add titles to a plot using the pilot theme style
#'
#' Use this function to add titles to a plot that uses \code{theme_pilot}.
#' Using this functin to set the title and/or subtitle will ensure that title
#' elements are aligned with the left-hand edge of the y-axis text, rather than
#' the left-hand edge of the plotting area.
#'
#' To use the function, first create a plot using \code{theme_pilot}
#' without setting the title or subtitle, capturing the plot object in a
#' variable. Pass the plot to \code{add_pilot_titles} along with the title
#' and subtitle you want to use. This function will return the same plot with
#' titles added to the top and the correct spacing between the titles and the
#' rest of the plot area.
#'
#' Arguments are also available to set the font properties of the title and
#' subtitle elements of the plot.
#'
#' @param plot A ggplot2 plot object to which titles will be added.
#' @param title A string containing the title to add to the plot. Use NULL if
#' you do not want a title.
#' @param subtitle A string containing the subtitle to add to the plot. Use
#' NULL if you do not want a subtitle.
#' @param title_family A string indicating the font-family to use for the
#' title. The default depends on the operating system.
#' @param subtitle_family A string indicating the font-family to use for the
#' subtitle. The default depends on the operating system.
#' @param title_size An integer indicating the font size to use for the title
#' in points. The default is 17 points.
#' @param subtitle_size An integer indicating the font size to use for the
#' subtitle in points. The default is 12 points.
#' @param title_color An RGB hex string indicating the color to use for the
#' title. The default is "#404040".
#' @param subtitle_color An RGB hex string indicating the color to use for the
#' subtitle. The default is "#404040".
#' @param background_color An RGB hex string indicating the color to use for
#' the background. The default is "#ffffff".
#' @return A copy of the input plot with a title and/or subtitle added.
#' @export
add_pilot_titles <- function(
plot,
title = NULL,
subtitle = NULL,
title_family = getOption("pilot.title_family"),
subtitle_family = getOption("pilot.subtitle_family"),
title_size = 17,
subtitle_size = 12,
title_color = "#404040",
subtitle_color = "#404040",
background_color = "#ffffff") {
# If no titles are provided, return the plot unmodified
if (is.null(title) && is.null(subtitle)) return(plot)
# Set the default title theme to the main theme plus any specified fonts
theme_titles <- theme_pilot() + ggplot2::theme(
plot.title = ggplot2::element_text(family = title_family),
plot.subtitle = ggplot2::element_text(family = subtitle_family))
# If no subtitle is provided, adjust the title's bottom padding
if (is.null(subtitle)) {
theme_titles <- theme_titles + ggplot2::theme(
plot.title = ggplot2::element_text(
family = title_family,
margin = ggplot2::margin(b = 24, unit = "pt")))
}
# Remove margin padding from the plot before re-adding with the titles
plot +
ggplot2::theme(plot.margin = ggplot2::margin(
t = 0,
r = 0,
b = 0,
l = 0, unit = "pt")) +
patchwork::plot_annotation(
title = title,
subtitle = subtitle,
theme = theme_titles)
}
|
check = function(a, b)
{
print(match.call())
stopifnot(all.equal(a, b, check.attributes=FALSE, check.names=FALSE))
}
library("scidb")
host = Sys.getenv("SCIDB_TEST_HOST")
test_with_security = ifelse(Sys.getenv("SCIDB_TEST_WITH_SECURITY") == 'true',
TRUE, FALSE)
if (nchar(host) > 0)
{
if (!test_with_security) {
db = scidbconnect(host)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083)
}
# 1 Data movement tests
# upload data frame
x = as.scidb(db, iris)
a = schema(x, "attributes")$name
# binary download
check(iris[, 1:4], as.R(x)[, a][, 1:4])
# iquery binary download
check(iris[, 1:4], iquery(db, x, return=TRUE)[, a][, 1:4])
# iquery CSV download
check(iris[, 1:4], iquery(db, x, return=TRUE, binary=FALSE)[, a][, 1:4])
# as.R only attributes
check(iris[, 1], as.R(x, only_attributes=TRUE)[, 1])
# only attributes and optional skipping of metadata query by supplying schema in full and abbreviated forms
check(as.R(db$op_count(x))$count, nrow(as.R(x)))
check(as.R(db$op_count(x))$count, nrow(as.R(x, only_attributes=TRUE)))
a = scidb(db, x@name, schema=schema(x))
check(as.R(db$op_count(x))$count, nrow(as.R(a)))
a = scidb(db, x@name, schema=gsub("\\[.*", "", schema(x)))
check(as.R(db$op_count(x))$count, nrow(as.R(a)))
# upload vector
check(1:5, as.R(as.scidb(db, 1:5))[, 2])
# upload matrix
x = matrix(rnorm(100), 10)
check(x, matrix(as.R(as.scidb(db, x))[, 3], 10, byrow=TRUE))
# upload csparse matrix
# also check shorthand projection syntax
x = Matrix::sparseMatrix(i=sample(10, 10), j=sample(10, 10), x=runif(10))
y = as.R(as.scidb(db, x))
check(x, Matrix::sparseMatrix(i=y$i + 1, j=y$j + 1, x=y$val))
# issue #126
df = as.data.frame(matrix(runif(10*100), 10, 100))
sdf = as.scidb(db, df)
check(df, as.R(sdf, only_attributes=TRUE))
# issue #130
df = data.frame(x1 = c("NA", NA), x2 = c(0.13, NA), x3 = c(TRUE, NA), stringsAsFactors=FALSE)
x = as.scidb(db, df)
check(df, as.R(x, only_attributes=TRUE))
# upload n-d array
# XXX WRITE ME, not implemented yet
# garbage collection
gc()
# 2 AFL tests
# Issue #128
i = 4
j = 6
x = db$build("<v:double>[i=1:2,2,0, j=1:3,1,0]", i * j)
check(sort(as.R(x)$v), c(1, 2, 2, 3, 4, 6))
x = db$apply(x, w, R(i) * R(j))
# Need as.integer() for integer64 coversion below
check(as.integer(as.R(x)$w), rep(24, 6))
# 3 Miscellaneous tests
# issue #156 type checks
# int64 option
if (!test_with_security) {
db = scidbconnect(host, int64=TRUE)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083, int64=TRUE)
}
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
if (!test_with_security) {
db = scidbconnect(host, int64=FALSE)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083, int64=FALSE)
}
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
# Issue #157
x = as.R(scidb(db, "build(<v:float>[i=1:5], sin(i))"), binary = FALSE)
# Issue #163
x = as.scidb(db, serialize(1:5, NULL))
y = as.R(x)
check(y$val[[1]], serialize(1:5,NULL))
iquery(db, "build(<val:binary>[i=1:2,10,0], null)", return=TRUE)
# Test for issue #161
iquery(db, "op_count(list())", return=TRUE, only_attributes=TRUE, binary=FALSE)
# Test for issue #158
x = iquery(db, "join(op_count(build(<val:int32>[i=0:234,100,0],random())),op_count(build(<val:int32>[i=0:1234,100,0],random())))",
schema = "<apples:uint64, oranges:uint64>[i=0:1,1,0]", return=TRUE)
check(names(x), c("i", "apples", "oranges"))
# issue #160 deal with partial schema string
x = iquery(db, "project(list(), name)", schema="<name:string>[No]", return=TRUE)
check(names(x), c("No", "name"))
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i; j]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3:0:3;j=1:3:0:3]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3,1,0,j=1:3,1,0]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3,1,0;j=1:3,1,0]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3;j=1:3]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i,j]")
# basic types from scalars
lapply(list(TRUE, "x", 420L, pi), function(x) check(x, as.R(as.scidb(db, x))$val))
# trickier types
x = Sys.Date()
check(as.POSIXct(x, tz="UTC"), as.R(as.scidb(db, x))$val)
x = iris$Species
check(as.character(x), as.R(as.scidb(db, x))$val)
# type conversion from data frames
x = data.frame(a=420L, b=pi, c=TRUE, d=factor("yellow"), e="SciDB", f=as.POSIXct(Sys.Date(), tz="UTC"), stringsAsFactors=FALSE)
# issue #164 improper default value parsing
tryCatch(iquery (db, "remove(x)"), error=invisible)
iquery(db, "create array x <x:double not null default 1>[i=1:10]")
as.R(scidb(db, "x"))
tryCatch(iquery (db, "remove(x)"), error=invisible)
# issue #158 support empty dimension spec []
iquery(db, "apply(build(<val:double>[i=1:3], random()), x, 'abc')", return=TRUE,
schema="<val:double, x:string>[]", only_attributes=TRUE)
# issue #172 (uint16 not supported)
iquery(db, "list('instances')", return=TRUE, binary=TRUE)
# Test for references and garbage collection in AFL statements
x = store(db, db$build("<x:double>[i=1:1,1,0]", R(pi)))
y = db$apply(x, "y", 2)
rm(x)
gc()
as.R(y)
rm(y)
# Issue 191 scoping issue example
a = db$build("<val:double>[x=1:10]", 'random()')
b = db$aggregate(a, "sum(val)")
as.R(b)
foo = function()
{
c = db$build("<val:double>[x=1:10]", 'random()')
d = db$aggregate(c, "sum(val)")
as.R(d)
}
foo()
# Issue 193 Extreme numeric values get truncated on upload
upload_data <- data.frame(a = 1.23456e-50)
upload_ref <- as.scidb(db, upload_data)
download_data <- as.R(upload_ref, only_attributes = TRUE)
stopifnot(upload_data$a == download_data$a)
# Issue 195 Empty data.frame(s)
for (scidb_type in names(scidb:::.scidbtypes))
for (only_attributes in c(FALSE, TRUE)) {
one_df <- iquery(
db,
paste("build(<x:", scidb_type, ">[i=0:0], null)"),
only_attributes = only_attributes,
return = TRUE)
empty_df <- iquery(
db,
paste("filter(build(<x:", scidb_type, ">[i=0:0], null), false)"),
only_attributes = only_attributes,
return = TRUE)
index <- 1 + ifelse(only_attributes, 0, 1)
if (class(one_df) == "data.frame") {
stopifnot(class(one_df[, index]) == class(empty_df[, index]))
merge(one_df, empty_df)
}
else {
stopifnot(class(one_df[[index]]) == class(empty_df[[index]]))
mapply(c, one_df, empty_df)
}
}
# Issue 195 Coerce very small floating point values to 0
small_df <- data.frame(a = .Machine$double.xmin,
b = .Machine$double.xmin / 10, # Will be coerced to 0
c = -.Machine$double.xmin,
d = -.Machine$double.xmin / 10) # Will be coerced to 0
small_df_db <- as.R(as.scidb(db, small_df), only_attributes = TRUE)
small_df_fix <- small_df
small_df_fix$b <- 0
small_df_fix$d <- 0
print(small_df_fix)
print(small_df_db)
check(small_df_db, small_df_fix)
}
|
/tests/a.R
|
no_license
|
arko-bhattacharya/SciDBR
|
R
| false
| false
| 7,605
|
r
|
check = function(a, b)
{
print(match.call())
stopifnot(all.equal(a, b, check.attributes=FALSE, check.names=FALSE))
}
library("scidb")
host = Sys.getenv("SCIDB_TEST_HOST")
test_with_security = ifelse(Sys.getenv("SCIDB_TEST_WITH_SECURITY") == 'true',
TRUE, FALSE)
if (nchar(host) > 0)
{
if (!test_with_security) {
db = scidbconnect(host)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083)
}
# 1 Data movement tests
# upload data frame
x = as.scidb(db, iris)
a = schema(x, "attributes")$name
# binary download
check(iris[, 1:4], as.R(x)[, a][, 1:4])
# iquery binary download
check(iris[, 1:4], iquery(db, x, return=TRUE)[, a][, 1:4])
# iquery CSV download
check(iris[, 1:4], iquery(db, x, return=TRUE, binary=FALSE)[, a][, 1:4])
# as.R only attributes
check(iris[, 1], as.R(x, only_attributes=TRUE)[, 1])
# only attributes and optional skipping of metadata query by supplying schema in full and abbreviated forms
check(as.R(db$op_count(x))$count, nrow(as.R(x)))
check(as.R(db$op_count(x))$count, nrow(as.R(x, only_attributes=TRUE)))
a = scidb(db, x@name, schema=schema(x))
check(as.R(db$op_count(x))$count, nrow(as.R(a)))
a = scidb(db, x@name, schema=gsub("\\[.*", "", schema(x)))
check(as.R(db$op_count(x))$count, nrow(as.R(a)))
# upload vector
check(1:5, as.R(as.scidb(db, 1:5))[, 2])
# upload matrix
x = matrix(rnorm(100), 10)
check(x, matrix(as.R(as.scidb(db, x))[, 3], 10, byrow=TRUE))
# upload csparse matrix
# also check shorthand projection syntax
x = Matrix::sparseMatrix(i=sample(10, 10), j=sample(10, 10), x=runif(10))
y = as.R(as.scidb(db, x))
check(x, Matrix::sparseMatrix(i=y$i + 1, j=y$j + 1, x=y$val))
# issue #126
df = as.data.frame(matrix(runif(10*100), 10, 100))
sdf = as.scidb(db, df)
check(df, as.R(sdf, only_attributes=TRUE))
# issue #130
df = data.frame(x1 = c("NA", NA), x2 = c(0.13, NA), x3 = c(TRUE, NA), stringsAsFactors=FALSE)
x = as.scidb(db, df)
check(df, as.R(x, only_attributes=TRUE))
# upload n-d array
# XXX WRITE ME, not implemented yet
# garbage collection
gc()
# 2 AFL tests
# Issue #128
i = 4
j = 6
x = db$build("<v:double>[i=1:2,2,0, j=1:3,1,0]", i * j)
check(sort(as.R(x)$v), c(1, 2, 2, 3, 4, 6))
x = db$apply(x, w, R(i) * R(j))
# Need as.integer() for integer64 coversion below
check(as.integer(as.R(x)$w), rep(24, 6))
# 3 Miscellaneous tests
# issue #156 type checks
# int64 option
if (!test_with_security) {
db = scidbconnect(host, int64=TRUE)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083, int64=TRUE)
}
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
if (!test_with_security) {
db = scidbconnect(host, int64=FALSE)
} else {
db = scidbconnect(username = 'root', password = 'Paradigm4',
protocol = 'https', port = 8083, int64=FALSE)
}
x = db$build("<v:int64>[i=1:2,2,0]", i)
check(as.R(x), as.R(as.scidb(db, as.R(x, TRUE))))
# Issue #157
x = as.R(scidb(db, "build(<v:float>[i=1:5], sin(i))"), binary = FALSE)
# Issue #163
x = as.scidb(db, serialize(1:5, NULL))
y = as.R(x)
check(y$val[[1]], serialize(1:5,NULL))
iquery(db, "build(<val:binary>[i=1:2,10,0], null)", return=TRUE)
# Test for issue #161
iquery(db, "op_count(list())", return=TRUE, only_attributes=TRUE, binary=FALSE)
# Test for issue #158
x = iquery(db, "join(op_count(build(<val:int32>[i=0:234,100,0],random())),op_count(build(<val:int32>[i=0:1234,100,0],random())))",
schema = "<apples:uint64, oranges:uint64>[i=0:1,1,0]", return=TRUE)
check(names(x), c("i", "apples", "oranges"))
# issue #160 deal with partial schema string
x = iquery(db, "project(list(), name)", schema="<name:string>[No]", return=TRUE)
check(names(x), c("No", "name"))
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i; j]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3:0:3;j=1:3:0:3]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3,1,0,j=1:3,1,0]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3,1,0;j=1:3,1,0]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i=1:3;j=1:3]")
iquery(db, "build(<val:double>[i=1:3;j=1:3], random())", return=T, schema="<val:double>[i,j]")
# basic types from scalars
lapply(list(TRUE, "x", 420L, pi), function(x) check(x, as.R(as.scidb(db, x))$val))
# trickier types
x = Sys.Date()
check(as.POSIXct(x, tz="UTC"), as.R(as.scidb(db, x))$val)
x = iris$Species
check(as.character(x), as.R(as.scidb(db, x))$val)
# type conversion from data frames
x = data.frame(a=420L, b=pi, c=TRUE, d=factor("yellow"), e="SciDB", f=as.POSIXct(Sys.Date(), tz="UTC"), stringsAsFactors=FALSE)
# issue #164 improper default value parsing
tryCatch(iquery (db, "remove(x)"), error=invisible)
iquery(db, "create array x <x:double not null default 1>[i=1:10]")
as.R(scidb(db, "x"))
tryCatch(iquery (db, "remove(x)"), error=invisible)
# issue #158 support empty dimension spec []
iquery(db, "apply(build(<val:double>[i=1:3], random()), x, 'abc')", return=TRUE,
schema="<val:double, x:string>[]", only_attributes=TRUE)
# issue #172 (uint16 not supported)
iquery(db, "list('instances')", return=TRUE, binary=TRUE)
# Test for references and garbage collection in AFL statements
x = store(db, db$build("<x:double>[i=1:1,1,0]", R(pi)))
y = db$apply(x, "y", 2)
rm(x)
gc()
as.R(y)
rm(y)
# Issue 191 scoping issue example
a = db$build("<val:double>[x=1:10]", 'random()')
b = db$aggregate(a, "sum(val)")
as.R(b)
foo = function()
{
c = db$build("<val:double>[x=1:10]", 'random()')
d = db$aggregate(c, "sum(val)")
as.R(d)
}
foo()
# Issue 193 Extreme numeric values get truncated on upload
upload_data <- data.frame(a = 1.23456e-50)
upload_ref <- as.scidb(db, upload_data)
download_data <- as.R(upload_ref, only_attributes = TRUE)
stopifnot(upload_data$a == download_data$a)
# Issue 195 Empty data.frame(s)
for (scidb_type in names(scidb:::.scidbtypes))
for (only_attributes in c(FALSE, TRUE)) {
one_df <- iquery(
db,
paste("build(<x:", scidb_type, ">[i=0:0], null)"),
only_attributes = only_attributes,
return = TRUE)
empty_df <- iquery(
db,
paste("filter(build(<x:", scidb_type, ">[i=0:0], null), false)"),
only_attributes = only_attributes,
return = TRUE)
index <- 1 + ifelse(only_attributes, 0, 1)
if (class(one_df) == "data.frame") {
stopifnot(class(one_df[, index]) == class(empty_df[, index]))
merge(one_df, empty_df)
}
else {
stopifnot(class(one_df[[index]]) == class(empty_df[[index]]))
mapply(c, one_df, empty_df)
}
}
# Issue 195 Coerce very small floating point values to 0
small_df <- data.frame(a = .Machine$double.xmin,
b = .Machine$double.xmin / 10, # Will be coerced to 0
c = -.Machine$double.xmin,
d = -.Machine$double.xmin / 10) # Will be coerced to 0
small_df_db <- as.R(as.scidb(db, small_df), only_attributes = TRUE)
small_df_fix <- small_df
small_df_fix$b <- 0
small_df_fix$d <- 0
print(small_df_fix)
print(small_df_db)
check(small_df_db, small_df_fix)
}
|
# AFGR May 4 2017
# This script is going to be used to run simulate data with different noise
# and corellation structures. The alpha tuning function will then be used to
# observe any patterns in data with known structure.
# First thing we need to do is load our library(s)
source('/home/adrose/hiLo/scripts/04_CognitiveModels/functions/functions.R')
install_load('foreach', 'doParallel', 'glmnet', 'bootstrap', 'psych')
# Cretae the simulated data here
Phi <- matrix(runif(25,-0.1,0.1),5,5)
for (i in 1:5) {
for (j in 1:5) {
Phi[i,j] <- Phi[j,i]
}}
diag(Phi) <- 1
fx <- matrix(runif(500,-0.1,0.1),100,5)
fx[,1] <- 0.9
x1 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x1AlphaVals <- returnOptAlpha(x1[,2:100], x1[,1], nCor=30)
fx <- matrix(runif(500,-0.2,0.2),100,5)
fx[,1] <- 0.8
x2 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x2AlphaVals <- returnOptAlpha(x2[,2:100], x2[,1], nCor=30)
fx <- matrix(runif(500,-0.25,0.25),100,5)
fx[,1] <- 0.7
x3 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x3AlphaVals <- returnOptAlpha(x3[,2:100], x3[,1], nCor=30)
fx <- matrix(runif(500,-0.3,0.3),100,5)
fx[,1] <- 0.6
x4 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x4AlphaVals <- returnOptAlpha(x4[,2:100], x4[,1], nCor=30)
fx <- matrix(runif(500,-0.35,0.35),100,5)
fx[,1] <- 0.5
x5 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x5AlphaVals <- returnOptAlpha(x5[,2:100], x5[,1], nCor=30)
pdf('simulatedDataAlphaHist.pdf')
hist(x1AlphaVals)
hist(x2AlphaVals)
hist(x3AlphaVals)
hist(x4AlphaVals)
hist(x5AlphaVals)
dev.off()
|
/scripts/04_CognitiveModels/scripts/validateAlphaSelection.R
|
no_license
|
PennBBL/hiLo
|
R
| false
| false
| 1,556
|
r
|
# AFGR May 4 2017
# This script is going to be used to run simulate data with different noise
# and corellation structures. The alpha tuning function will then be used to
# observe any patterns in data with known structure.
# First thing we need to do is load our library(s)
source('/home/adrose/hiLo/scripts/04_CognitiveModels/functions/functions.R')
install_load('foreach', 'doParallel', 'glmnet', 'bootstrap', 'psych')
# Cretae the simulated data here
Phi <- matrix(runif(25,-0.1,0.1),5,5)
for (i in 1:5) {
for (j in 1:5) {
Phi[i,j] <- Phi[j,i]
}}
diag(Phi) <- 1
fx <- matrix(runif(500,-0.1,0.1),100,5)
fx[,1] <- 0.9
x1 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x1AlphaVals <- returnOptAlpha(x1[,2:100], x1[,1], nCor=30)
fx <- matrix(runif(500,-0.2,0.2),100,5)
fx[,1] <- 0.8
x2 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x2AlphaVals <- returnOptAlpha(x2[,2:100], x2[,1], nCor=30)
fx <- matrix(runif(500,-0.25,0.25),100,5)
fx[,1] <- 0.7
x3 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x3AlphaVals <- returnOptAlpha(x3[,2:100], x3[,1], nCor=30)
fx <- matrix(runif(500,-0.3,0.3),100,5)
fx[,1] <- 0.6
x4 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x4AlphaVals <- returnOptAlpha(x4[,2:100], x4[,1], nCor=30)
fx <- matrix(runif(500,-0.35,0.35),100,5)
fx[,1] <- 0.5
x5 <- sim.structure(fx=fx,Phi=Phi,n=5000)$observed
x5AlphaVals <- returnOptAlpha(x5[,2:100], x5[,1], nCor=30)
pdf('simulatedDataAlphaHist.pdf')
hist(x1AlphaVals)
hist(x2AlphaVals)
hist(x3AlphaVals)
hist(x4AlphaVals)
hist(x5AlphaVals)
dev.off()
|
#----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#----------------------------------------------------------------------------
library(RSuite)
library(testthat)
source("R/test_utils.R")
source("R/project_management.R")
context("Testing if building succeeds for package dependant on stringr (roxygen issue)")
test_that_managed("Build package which dependents on stringr", {
prj <- init_test_project(repo_adapters = c("CRAN", "Dir"))
create_test_package("TestPackage", prj, deps = "stringr")
RSuite::prj_install_deps(prj, clean = T)
expect_that_packages_installed(
c("stringr", "magrittr", "stringi", "logging"),
prj)
RSuite::prj_build(prj)
expect_that_packages_installed(
c("TestPackage", "stringr", "magrittr", "stringi", "logging"),
prj)
})
|
/tests/test_stringr_dep_build.R
|
permissive
|
gitter-badger/RSuite-1
|
R
| false
| false
| 853
|
r
|
#----------------------------------------------------------------------------
# RSuite
# Copyright (c) 2017, WLOG Solutions
#----------------------------------------------------------------------------
library(RSuite)
library(testthat)
source("R/test_utils.R")
source("R/project_management.R")
context("Testing if building succeeds for package dependant on stringr (roxygen issue)")
test_that_managed("Build package which dependents on stringr", {
prj <- init_test_project(repo_adapters = c("CRAN", "Dir"))
create_test_package("TestPackage", prj, deps = "stringr")
RSuite::prj_install_deps(prj, clean = T)
expect_that_packages_installed(
c("stringr", "magrittr", "stringi", "logging"),
prj)
RSuite::prj_build(prj)
expect_that_packages_installed(
c("TestPackage", "stringr", "magrittr", "stringi", "logging"),
prj)
})
|
############################################################
### Power Simulation code
############################################################
#edit 5/21/20 added in ability to standardize scores and weight scores
#to obtain the arguments from the bash files (chr, ss)
args <- commandArgs()
chr <- args[6]
numPairs <- args[7]
seed <- args[8]
gene <- args[9]
YPrev <- args[10]
s <- args[11]
ORSize <- args[12]
percentageAssoc <- args[13]
lowLDTF <- args[14]
chr = as.integer(chr)
seed = as.integer(seed)
numPairs = as.integer(numPairs)
YPrev = as.integer(YPrev)/100
s = as.integer(s)/100
percentageAssoc = as.integer(percentageAssoc)
#set random seed for reproducibility
set.seed(seed)
#source needed functions
source('/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/MainPipeLineFunctionsToSource_v2.R')
#make sure low LD is T or FALSE
if(lowLDTF == "TRUE"){
LowLD = TRUE
} else {
LowLD = FALSE
}
#define score weights if wanted
scoreWeights = c()
#define gammas
null_Gamma = c(0)
if(ORSize == "small"){
gammaEff = c(0.14)
} else if(ORSize == "medium"){
gammaEff = c(0.41)
} else {
gammaEff = c(0.69)
}
############################################################
#for original, unstandardized and unweighted tests
#only R geno SNPs are associated
RunPowerPipelineLocalGhat_RSNPs(chr = chr, gene = gene, numPairs = numPairs, YPrev = YPrev, s = s, Gamma = null_Gamma, TrueScore = "IBS.gene", ORSize = ORSize, standardizeScores = FALSE, weightedScores = FALSE, percentageAssoc = percentageAssoc, LowLD = LowLD)
|
/JST_sPVE_Code/MainPipeline_Power_Ghat_RSNPs.R
|
no_license
|
arthurvickie/Multi-Marker_Method
|
R
| false
| false
| 1,525
|
r
|
############################################################
### Power Simulation code
############################################################
#edit 5/21/20 added in ability to standardize scores and weight scores
#to obtain the arguments from the bash files (chr, ss)
args <- commandArgs()
chr <- args[6]
numPairs <- args[7]
seed <- args[8]
gene <- args[9]
YPrev <- args[10]
s <- args[11]
ORSize <- args[12]
percentageAssoc <- args[13]
lowLDTF <- args[14]
chr = as.integer(chr)
seed = as.integer(seed)
numPairs = as.integer(numPairs)
YPrev = as.integer(YPrev)/100
s = as.integer(s)/100
percentageAssoc = as.integer(percentageAssoc)
#set random seed for reproducibility
set.seed(seed)
#source needed functions
source('/home/vlynn/Paper_II_Sims/HapGen_Files/Scripts/MainPipeLineFunctionsToSource_v2.R')
#make sure low LD is T or FALSE
if(lowLDTF == "TRUE"){
LowLD = TRUE
} else {
LowLD = FALSE
}
#define score weights if wanted
scoreWeights = c()
#define gammas
null_Gamma = c(0)
if(ORSize == "small"){
gammaEff = c(0.14)
} else if(ORSize == "medium"){
gammaEff = c(0.41)
} else {
gammaEff = c(0.69)
}
############################################################
#for original, unstandardized and unweighted tests
#only R geno SNPs are associated
RunPowerPipelineLocalGhat_RSNPs(chr = chr, gene = gene, numPairs = numPairs, YPrev = YPrev, s = s, Gamma = null_Gamma, TrueScore = "IBS.gene", ORSize = ORSize, standardizeScores = FALSE, weightedScores = FALSE, percentageAssoc = percentageAssoc, LowLD = LowLD)
|
\name{facet_gridx}
\alias{FacetGridx}
\alias{facet_gridx}
\title{Lay out panels in a rectangular/tabular manner, with added options}
\description{
This is an enhanced version of ggplot2's
\code{facet_grid}. It adds options to control whether
the facet rows and/or columns are associated with the
axes, and displayed on the left/bottom rather than the
right/top.
}
|
/man/facet_gridx.Rd
|
no_license
|
RmeanyMAN/ggplothacks
|
R
| false
| false
| 376
|
rd
|
\name{facet_gridx}
\alias{FacetGridx}
\alias{facet_gridx}
\title{Lay out panels in a rectangular/tabular manner, with added options}
\description{
This is an enhanced version of ggplot2's
\code{facet_grid}. It adds options to control whether
the facet rows and/or columns are associated with the
axes, and displayed on the left/bottom rather than the
right/top.
}
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample429.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results429.csv",sep=",")
|
/Reduced model optimizations/explorelikereduced429.R
|
no_license
|
roszenil/Bichromdryad
|
R
| false
| false
| 750
|
r
|
source( "masternegloglikereduced1.R" )
source("eudicottree.R" )
library( "expm" )
source( "Qmatrixwoodherb3.R" )
source("Pruning2.R")
bichrom.dataset<-read.table( "eudicotvals.txt",header=FALSE,sep=",",stringsAsFactors=FALSE)
last.state=50
uniform.samples<-read.csv("sample429.csv",header=FALSE)
a<- as.numeric(t(uniform.samples))
p.0<-rep(1,2*(last.state+1))/(2*(last.state+1))
results<-rep(0,10)
mle<-try(optim(par=a,fn=negloglikelihood.wh, method= "Nelder-Mead", bichrom.phy=angiosperm.tree, bichrom.data=bichrom.dataset,max.chromosome=last.state,pi.0=p.0),silent=TRUE)
print(mle)
if(class(mle)=="try-error"){results<-rep(NA,10)}else{
results[1:9]<-exp(mle$par)
results[10]<-mle$value}
write.table(results,file="results429.csv",sep=",")
|
library(sqldf)
data<-read.csv.sql("./household_power_consumption.txt", sql = "select * from file where Date IN ('1/2/2007','2/2/2007')", header = TRUE, sep = ";")
closeAllConnections()
datetime<-as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
dataDT<- cbind(data, datetime)
str(dataDT)
png(file = "plot2.png")
plot(dataDT$datetime, dataDT$Global_active_power, type="s", ylab ="Global Active Power (kilowatts)", xlab=" ")
dev.off()
|
/plot2.R
|
no_license
|
noname2use/ExData_Plotting1
|
R
| false
| false
| 452
|
r
|
library(sqldf)
data<-read.csv.sql("./household_power_consumption.txt", sql = "select * from file where Date IN ('1/2/2007','2/2/2007')", header = TRUE, sep = ";")
closeAllConnections()
datetime<-as.POSIXct(paste(data$Date, data$Time), format="%d/%m/%Y %H:%M:%S")
dataDT<- cbind(data, datetime)
str(dataDT)
png(file = "plot2.png")
plot(dataDT$datetime, dataDT$Global_active_power, type="s", ylab ="Global Active Power (kilowatts)", xlab=" ")
dev.off()
|
library(grankp, lib.loc="~newton/Rlibs")
## code to look at parameters where modal ordering differs from posterior mean ordering
# example (found by Nick when we were revising the rvalue paper)
t <- c(0.2, 1.9, 0.8)
mu <- c(0.85, 1.55, 0.95)/( 1 + t)
t <- c(0.2, 1.9, 0.8)
sigma2 <- t/(1+t)
n <- 2000
ss <- as.integer( round(n/sigma2) )
rr <- ss*(1-mu/sqrt(n))
test <- numeric(6)
ord <- c(1,2,3)
test[1] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[1] <- paste( ord , collapse="-" )
ord <- c(1,3,2)
test[2] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[2] <- paste( ord , collapse="-" )
ord <- c(2,1,3)
test[3] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[3] <- paste( ord , collapse="-" )
ord <- c(2,3,1)
test[4] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[4] <- paste( ord , collapse="-" )
ord <- c(3,1,2)
test[5] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[5] <- paste( ord , collapse="-" )
ord <- c(3,2,1)
test[6] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[6] <- paste( ord , collapse="-" )
## simulation check
B <- 10^7
x <- rnorm( B, mean=mu[1], sd=sqrt(sigma2[1]) )
y <- rnorm( B, mean=mu[2], sd=sqrt(sigma2[2]) )
z <- rnorm( B, mean=mu[3], sd=sqrt(sigma2[3]) )
sim <- test
sim[1] <- mean( (x>y) & (y>z) )
sim[2] <- mean( (x>z) & (z>y) )
sim[3] <- mean( (y>x) & (x>z) )
sim[4] <- mean( (y>z) & (z>x) )
sim[5] <- mean( (z>x) & (x>y) )
sim[6] <- mean( (z>y) & (y>x) )
|
/manuscript/R/bayes1.R
|
no_license
|
wiscstatman/GammaRank
|
R
| false
| false
| 1,453
|
r
|
library(grankp, lib.loc="~newton/Rlibs")
## code to look at parameters where modal ordering differs from posterior mean ordering
# example (found by Nick when we were revising the rvalue paper)
t <- c(0.2, 1.9, 0.8)
mu <- c(0.85, 1.55, 0.95)/( 1 + t)
t <- c(0.2, 1.9, 0.8)
sigma2 <- t/(1+t)
n <- 2000
ss <- as.integer( round(n/sigma2) )
rr <- ss*(1-mu/sqrt(n))
test <- numeric(6)
ord <- c(1,2,3)
test[1] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[1] <- paste( ord , collapse="-" )
ord <- c(1,3,2)
test[2] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[2] <- paste( ord , collapse="-" )
ord <- c(2,1,3)
test[3] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[3] <- paste( ord , collapse="-" )
ord <- c(2,3,1)
test[4] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[4] <- paste( ord , collapse="-" )
ord <- c(3,1,2)
test[5] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[5] <- paste( ord , collapse="-" )
ord <- c(3,2,1)
test[6] <- grankp( ss[ord], rr[ord], log.p=FALSE )
names(test)[6] <- paste( ord , collapse="-" )
## simulation check
B <- 10^7
x <- rnorm( B, mean=mu[1], sd=sqrt(sigma2[1]) )
y <- rnorm( B, mean=mu[2], sd=sqrt(sigma2[2]) )
z <- rnorm( B, mean=mu[3], sd=sqrt(sigma2[3]) )
sim <- test
sim[1] <- mean( (x>y) & (y>z) )
sim[2] <- mean( (x>z) & (z>y) )
sim[3] <- mean( (y>x) & (x>z) )
sim[4] <- mean( (y>z) & (z>x) )
sim[5] <- mean( (z>x) & (x>y) )
sim[6] <- mean( (z>y) & (y>x) )
|
library(ggplot)
frag <- read.table('/home/thuy/gitrepo/Umberjack_Benchmark/simulations/data/benchmark_art_profile/seq_error_rate.fraglen.csv', sep=",", header=TRUE)
head(frag)
summary(frag)
#_indel0.00045_qs0_cover9_fragmean136_fragstd94_seed6298981545792848188.sam
samsplit <- strsplit(as.character(frag$Sam), "_")
frag$indel <- unlist(lapply(samsplit,
function(x) {
indelstr <- x[grep("indel", x)]
indelrate <- as.numeric(sub("indel", "", indelstr))
}))
frag$qualshift <- unlist(lapply(samsplit,
function(x) {
qualshiftstr <- x[grep("qs", x)]
rate <- as.numeric(sub("qs", "", qualshiftstr))
}))
frag$cover <- unlist(lapply(samsplit,
function(x) {
coverstr <- x[grep("cover", x)]
rate <- as.numeric(sub("cover", "", coverstr))
}))
frag$fragmean <- unlist(lapply(samsplit,
function(x) {
fragmeanstr <- x[grep("fragmean", x)]
rate <- as.numeric(sub("fragmean", "", fragmeanstr))
}))
frag$fragstd <- unlist(lapply(samsplit,
function(x) {
fragstdstr <- x[grep("fragstd", x)]
rate <- as.numeric(sub("fragstd", "", fragstdstr))
}))
shortfrag <- frag[frag$fragmean < 200, ]
head(shortfrag)
summary(shortfrag)
shortfrag$cover <- as.factor(shortfrag$cover)
fig <- ggplot(shortfrag, aes(x=Fraglen, weight=Count)) +
#geom_density(color="black", fill="blue") +
geom_histogram(color="black", fill="blue", binwidth=50) +
facet_wrap(~cover)
print(fig)
|
/R/benchmark_art_fraglen.R
|
no_license
|
tnguyencfe/Umberjack_Benchmark
|
R
| false
| false
| 1,934
|
r
|
library(ggplot)
frag <- read.table('/home/thuy/gitrepo/Umberjack_Benchmark/simulations/data/benchmark_art_profile/seq_error_rate.fraglen.csv', sep=",", header=TRUE)
head(frag)
summary(frag)
#_indel0.00045_qs0_cover9_fragmean136_fragstd94_seed6298981545792848188.sam
samsplit <- strsplit(as.character(frag$Sam), "_")
frag$indel <- unlist(lapply(samsplit,
function(x) {
indelstr <- x[grep("indel", x)]
indelrate <- as.numeric(sub("indel", "", indelstr))
}))
frag$qualshift <- unlist(lapply(samsplit,
function(x) {
qualshiftstr <- x[grep("qs", x)]
rate <- as.numeric(sub("qs", "", qualshiftstr))
}))
frag$cover <- unlist(lapply(samsplit,
function(x) {
coverstr <- x[grep("cover", x)]
rate <- as.numeric(sub("cover", "", coverstr))
}))
frag$fragmean <- unlist(lapply(samsplit,
function(x) {
fragmeanstr <- x[grep("fragmean", x)]
rate <- as.numeric(sub("fragmean", "", fragmeanstr))
}))
frag$fragstd <- unlist(lapply(samsplit,
function(x) {
fragstdstr <- x[grep("fragstd", x)]
rate <- as.numeric(sub("fragstd", "", fragstdstr))
}))
shortfrag <- frag[frag$fragmean < 200, ]
head(shortfrag)
summary(shortfrag)
shortfrag$cover <- as.factor(shortfrag$cover)
fig <- ggplot(shortfrag, aes(x=Fraglen, weight=Count)) +
#geom_density(color="black", fill="blue") +
geom_histogram(color="black", fill="blue", binwidth=50) +
facet_wrap(~cover)
print(fig)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.