content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
######################################
# This script:
# reads in testing data (generates dummy data if not running on real data)
# processes testing data
# sense checks the processed data
# checks that fup-params n_any and n_pos were used in the study definition were appropriate
# plots the distribution of the testing behaviour variables
# saves the data
######################################
# Preliminaries ----
## Import libraries ----
library('tidyverse')
library('lubridate')
library('arrow')
library('here')
library('glue')
## import local functions and parameters ---
source(here("analysis", "design.R"))
source(here("lib", "functions", "utility.R"))
source(here("lib", "functions", "survival.R"))
## import command-line arguments ----
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
cohort <- "over12"
vaxn <- 1L
} else {
cohort <- args[[1]]
vaxn <- args[[2]]
}
## create output directories ----
outdir <- ghere("output", cohort,"vax{vaxn}", "covidtests")
fs::dir_create(file.path(outdir, "extract"))
fs::dir_create(file.path(outdir, "process"))
fs::dir_create(file.path(outdir, "checks"))
# import data ----
studydef_path <- file.path(outdir, "extract", "input_covidtests_{arm}.feather")
data_studydef_dummy <- bind_rows(
# read extracted data for treated and control
lapply(
c("treated", "control"),
function(arm) read_feather(glue(studydef_path))
)
) %>%
# because date types are not returned consistently by cohort extractor
mutate(across(ends_with("_date"), ~ as.Date(.))) %>%
# because of a bug in cohort extractor -- remove once fixed
mutate(patient_id = as.integer(patient_id))
# create labels for covidtestcuts (to match the ones generated by the cuts() function)
covidtestcuts_labels <- as.character(glue("({covidtestcuts[-length(covidtestcuts)]},{covidtestcuts[-1]}]"))
# use externally created dummy data if not running in the server
# check variables are as they should be
if(Sys.getenv("OPENSAFELY_BACKEND") %in% c("", "expectations")){
# generate custom dummy data
data_custom_dummy <- local({
# set seed so that dummy data results are reproducible
set.seed(10)
case_category <- c("LFT_Only"=0.4, "PCR_Only"=0.5, "LFT_WithPCR"=0.1)
symptomatic <- c("N"=0.4, "Y"=0.6)
data_tests <- read_rds(ghere("output", cohort,"vax{vaxn}", "match", "data_matched.rds")) %>%
select(patient_id, trial_date)
# add number of tests variables
data_tests <- data_tests %>%
bind_cols(map_dfc(
.x = seq_along(covidtestcuts_labels)-1, #-1 because python starts at 0
.f = ~tibble(
!! sym(str_c("anytest_", .x, "_n")) := rpois(n=nrow(data_tests), lambda = 2),
!! sym(str_c("postest_", .x, "_n")) := as.integer(pmax(
!! sym(str_c("anytest_", .x, "_n")) - rpois(n=nrow(data_tests), lambda = 1),
0
))
)
))
data_tests <- data_tests %>%
mutate(
# first anytest during the period
anytest_1_day = if_else(
rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
as.integer(runif(
n=nrow(data_tests),
-prebaselineperiods*postbaselinedays,
baselinedays+postbaselineperiods*postbaselinedays
))
),
# symptom category of anytest_1_day
anytest_1_symptomatic = if_else(
is.na(anytest_1_day),
NA_integer_,
as.integer(rbernoulli(n=nrow(data_tests), p=symptomatic[["Y"]]))
),
# first positive test in the period (anytest_1_day with added missingness for negative tests)
postest_1_day = if_else(
rbernoulli(n = nrow(data_tests), p=0.5),
NA_integer_,
anytest_1_day
),
# first positive test in the period and ever (postest_1_day with added missingness for those that have had a positive test before the period)
firstpostest_day = if_else(
is.na(postest_1_day) | rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
postest_1_day
),
# type of test on firstpostest_day
firstpostest_category = factor(
if_else(
is.na(firstpostest_day),
NA_character_,
sample(x=names(case_category), size = nrow(data_tests), prob = unname(case_category), replace=TRUE)
),
levels = names(case_category)
)
)
for (i in 2:n_any) {
# derive subsequent anytest_*_day and anytest_*_symptomatic
data_tests <- data_tests %>%
mutate(
!! sym(glue("anytest_{i}_day")) := if_else(
rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
!! sym(glue("anytest_{i-1}_day")) + as.integer(runif(n=nrow(data_tests), 1, 50))
),
!! sym(glue("anytest_{i}_symptomatic")) := if_else(
is.na(!! sym(glue("anytest_{i}_day"))),
NA_integer_,
as.integer(rbernoulli(n=nrow(data_tests), p=symptomatic[["Y"]]))
)
)
# derive subsequent postest_*_day
if (i <= n_pos) {
data_tests <- data_tests %>%
mutate(
!! sym(glue("postest_{i}_day")) := if_else(
rbernoulli(n = nrow(data_tests), p=0.5),
NA_integer_,
!! sym(glue("anytest_{i}_day"))
)
)
}
}
data_tests <- data_tests %>%
mutate(across(matches("anytest_\\d+_symptomatic"), factor, levels = c(0,1), labels = names(symptomatic))) %>%
mutate(across(ends_with("_day"), ~ as.Date(as.character(trial_date + .)))) %>%
rename_with(~str_replace(., "_day", "_date"), ends_with("_day"))
return(data_tests)
})
not_in_studydef <- names(data_custom_dummy)[!( names(data_custom_dummy) %in% names(data_studydef_dummy) )]
not_in_custom <- names(data_studydef_dummy)[!( names(data_studydef_dummy) %in% names(data_custom_dummy) )]
if(length(not_in_custom)!=0) stop(
paste(
"These variables are in studydef but not in custom: ",
paste(not_in_custom, collapse=", ")
)
)
if(length(not_in_studydef)!=0) stop(
paste(
"These variables are in custom but not in studydef: ",
paste(not_in_studydef, collapse=", ")
)
)
# reorder columns
data_studydef_dummy <- data_studydef_dummy[,names(data_custom_dummy)]
unmatched_types <- cbind(
map_chr(data_studydef_dummy, ~paste(class(.), collapse=", ")),
map_chr(data_custom_dummy, ~paste(class(.), collapse=", "))
)[ (map_chr(data_studydef_dummy, ~paste(class(.), collapse=", ")) != map_chr(data_custom_dummy, ~paste(class(.), collapse=", ")) ), ] %>%
as.data.frame() %>% rownames_to_column()
if(nrow(unmatched_types)>0) stop(
#unmatched_types
"inconsistent typing in studydef : dummy dataset\n",
apply(unmatched_types, 1, function(row) paste(paste(row, collapse=" : "), "\n"))
)
data_extract <- data_custom_dummy
rm(data_studydef_dummy, data_custom_dummy)
} else {
data_extract <- data_studydef_dummy
rm(data_studydef_dummy)
}
# summarise and save data_extract
my_skim(data_extract, path = file.path(outdir, "extract", "input_treated_skim.txt"))
data_split <- local({
# derive censor date and time until censoring
data_matched <- read_rds(ghere("output", cohort,"vax{vaxn}", "match", "data_matched.rds")) %>%
mutate(
censor_date = pmin(
dereg_date,
death_date,
# study_dates$testend_date,
trial_date - 1 + maxfup,
controlistreated_date - 1,
na.rm = TRUE
),
tte_censor = as.integer(censor_date-(trial_date-1)),
ind_outcome = 0
# censor_date = trial_date + maxfup -1 # use this to overwrite above definition until issue with `patients.minimum_of()` and date arithmetic is fixed
) %>%
select(patient_id, trial_date, treated, censor_date, tte_censor) %>%
group_by(patient_id, trial_date) %>%
mutate(new_id = cur_group_id()) %>%
ungroup()
# generate dataset with postbaselinecuts
fup_split <- data_matched %>%
select(new_id) %>%
uncount(weights = length(postbaselinecuts)-1, .id="period_id") %>%
mutate(fupstart_time = postbaselinecuts[period_id]) %>%
droplevels() %>%
select(new_id, period_id, fupstart_time)
# split time until censoring by postbaseline cuts
data_split <-
tmerge(
data1 = data_matched,
data2 = data_matched,
id = new_id,
tstart = 0,
tstop = tte_censor
) %>%
# add post-treatment periods
tmerge(
data1 = .,
data2 = fup_split,
id = new_id,
period_id = tdc(fupstart_time, period_id)
) %>%
mutate(
fup_cut = cut(
tstop, # because closed on the right
breaks = covidtestcuts,
right=TRUE
)
) %>%
transmute(
patient_id, trial_date, treated, censor_date, fup_cut,
persondays = as.integer(tstop-tstart)
) %>%
as_tibble()
return(data_split)
})
# reshape data_extract to data_anytest_long
data_anytest_long <- data_extract %>%
# select recurring date variables
select(patient_id, trial_date, matches(c("\\w+test_\\d+_date", "\\w+test_\\d+_symptomatic"))) %>%
# rename to make it easier to reshape
rename_with(
.fn = ~str_c(str_extract(.x, "\\d+_"), str_remove(.x, "\\d+_")),
.cols = matches("\\w+test_\\d+_\\w+")
) %>%
pivot_longer(
cols = matches("\\d+_\\w+test_\\w+"),
names_to = c("index", ".value"),
names_pattern = "(.*)_(.*_.*)",
values_drop_na = TRUE
) %>%
mutate(anytest_result = if_else(is.na(postest_date), "negative", "positive")) %>%
select(-index, -postest_date) %>%
left_join(
data_extract %>% select(patient_id, trial_date, starts_with("firstpostest")),
# joining on "anytest_date" = "firstpostest_date" here means that firstpostest_category joined to date of individual's first ever postest
by = c("patient_id", "trial_date", "anytest_date" = "firstpostest_date")
) %>%
# join censor date
left_join(
data_split %>%
distinct(patient_id, trial_date, treated, censor_date),
by = c("patient_id", "trial_date")
) %>%
mutate(censor = if_else(anytest_date <= censor_date, FALSE, TRUE)) %>%
# bin anytest_date
mutate(
anytest_cut=cut(
as.integer(anytest_date - trial_date),
breaks = covidtestcuts,
right=TRUE
)
) %>%
select(-anytest_date, -censor_date) %>% # remove unused
# remove any that are outside the time periods of interest (if this is the case for anytest_cut, it will be the case for all)
filter(!is.na(anytest_cut)) %>%
arrange(patient_id, trial_date, anytest_cut)
# calculate the sum of events per period
data_anytest_sum <- data_anytest_long %>%
# sum the number of tests per period
group_by(patient_id, trial_date, treated, anytest_cut) %>%
summarise(
# sum all dates (this is just used to check value of n in study definition if correct)
sum_anytest_uncensored=n(),
sum_postest_uncensored=sum(anytest_result=="positive", na.rm=TRUE),
# all variables below in this summarise() are derived from censored dates:
# sum censored dates (this will be used to calculate testing rates)
sum_anytest=sum(!censor),
# number of positive tests
sum_postest=sum(anytest_result=="positive" & !censor, na.rm=TRUE),
# number of symtpomatic tests
sum_symptomatic=sum(anytest_symptomatic=="Y" & !censor, na.rm = TRUE),
# number of firstpostest
sum_firstpostest=sum(!is.na(firstpostest_category) & !censor),
# number of each firstpostest type
sum_lftonly=sum(firstpostest_category=="LFT_Only" & !censor, na.rm = TRUE),
sum_pcronly=sum(firstpostest_category=="PCR_Only" & !censor, na.rm = TRUE),
sum_both=sum(firstpostest_category=="LFT_WithPCR" & !censor, na.rm = TRUE),
.groups="keep"
) %>%
ungroup() %>%
full_join(
expand(data_split, nesting(patient_id, trial_date, treated), fup_cut),
by = c("patient_id", "trial_date", "treated", "anytest_cut"= "fup_cut")
) %>%
replace_na(
replace=list(
sum_anytest_uncensored=0,
sum_postest_uncensored=0,
sum_anytest=0,
sum_postest=0,
sum_symptomatic=0,
sum_firstpostest=0,
sum_lftonly=0,
sum_pcronly=0,
sum_both=0
)
) %>%
# join the total number of tests per period (extracted with returning="number_of_matches_in_period" in study definition)
left_join(
data_extract %>%
select(patient_id, trial_date, matches("\\w+test_\\d+_n")) %>%
pivot_longer(
cols = matches("\\w+test_\\d+_n"),
names_pattern = "(.*test)_(.*)_n",
names_to = c(".value", "fup_cut")
) %>%
mutate(across(
fup_cut,
~factor(as.integer(.x)+1, # +1 because python starts at 0
levels = seq_along(covidtestcuts_labels),
labels = covidtestcuts_labels
))),
by = c("patient_id", "trial_date", "anytest_cut" = "fup_cut")
) %>%
# join data_split for persondays of follow-up per period
left_join(
data_split %>% select(patient_id, trial_date, treated, fup_cut, persondays),
by = c("patient_id", "trial_date", "treated", "anytest_cut" = "fup_cut")
) %>%
# label periods as pre or post baseline
mutate(
period = factor(if_else(
as.character(anytest_cut) %in% covidtestcuts_labels[1:fup_params$prebaselineperiods],
"prebaseline",
"postbaseline"
), levels = c("prebaseline", "postbaseline")
)
) %>%
# fill in persondays for prebaseline periods
# (must be postbaselinedays, otherwise they wouldn't be eligible)
mutate(across(
persondays,
~if_else(
period=="prebaseline",
as.integer(postbaselinedays),
persondays
))) %>%
arrange(patient_id, treated, trial_date, anytest_cut)
# checks ----
# sense check
cat("-----------------")
cat("Sense checks ----\n")
cat("Check maximum counts derived from firstpostest = 1:")
data_anytest_sum %>%
# sum all events within each patient_id, trial_date
group_by(patient_id, trial_date) %>%
summarise(across(c(sum_firstpostest, sum_lftonly, sum_pcronly, sum_both), sum)) %>%
ungroup() %>%
# max across all patient_id, trial_date
summarise(across(c(sum_firstpostest, sum_lftonly, sum_pcronly, sum_both), max))
cat("When persondays=NA (i.e. patient censored before period), check sums of censored counts are always zero:\n")
data_anytest_sum %>%
filter(is.na(persondays)) %>%
# group_by(anytest_cut, persondays) %>%
summarise(across(
c(sum_anytest, sum_postest, sum_symptomatic, sum_firstpostest, sum_lftonly, sum_pcronly, sum_both),
list(min=min, max=max)
)) %>%
# ungroup() %>%
pivot_longer(
cols=starts_with("sum"),
names_pattern = "sum_(.*)_(.*)",
names_to = c("name", ".value")
)
# check that the sums of the anytest_*_date variables match the anytest*_*_n variables
# if not, it's a flag that we need to increase n in the study definition
cat("------------------------------------------")
cat("Check `n_any` and `n_pos` appropriate ----\n")
cat ("Summarise number of tests missing per person per period when summing dates:\n")
data_anytest_sum %>%
mutate(
n_missing_anytest = anytest - sum_anytest_uncensored,
n_missing_postest = postest - sum_postest_uncensored
) %>%
group_by(anytest_cut) %>%
summarise(across(
starts_with("n_missing"),
list(min=min, max=max, mean=mean, median=median)
), .groups = "keep") %>%
ungroup() %>%
pivot_longer(
cols = starts_with("n_missing"),
names_pattern = "n_missing_(.*)_(.*)",
names_to = c("result", ".value")
) %>%
arrange(result, anytest_cut) %>%
group_split(result) %>% as.list()
plot_function <- function(result) {
plotpath <- file.path(outdir, "checks", glue("check_n_{result}.png"))
cat(glue("see {plotpath} for sum({result}_*_date) as a percent of {result}_*_n per period"), "\n")
p <- data_anytest_sum %>%
mutate(percent = 100*!!sym(glue("sum_{result}_uncensored"))/!!sym(result)) %>%
ggplot(aes(x=percent, colour=period)) +
geom_freqpoly(binwidth=1) +
facet_wrap(~anytest_cut, scales = "free_y", nrow=2) +
theme_bw() +
theme(legend.position = "bottom")
ggsave(filename = plotpath, plot = p, width = 20, height = 15, units = "cm")
return(p)
}
plot_function("anytest")
plot_function("postest")
cat("------------------------------------------")
cat("Check distribution of event counts ----\n")
plotpath <- file.path(outdir, "checks", "check_n_{result}.png")
cat(glue("see {plotpath} for distribution of event counts"), "\n")
data_anytest_sum %>%
select(-ends_with("uncensored")) %>%
select(patient_id, trial_date, treated, anytest_cut, starts_with("sum_")) %>%
pivot_longer(
cols = starts_with("sum")
) %>%
mutate(across(treated, factor, levels=0:1, labels = c("control", "treated"))) %>%
mutate(across(name,
~factor(
str_remove_all(.x, "sum_"),
levels = c("anytest", "symptomatic", "postest", "firstpostest", "lftonly", "pcronly", "both"))
)) %>%
ggplot(aes(x = value, y = ..density.., colour = treated)) +
geom_freqpoly(binwidth = 1) +
facet_grid(anytest_cut~name) +
scale_color_discrete(name=NULL) +
theme_bw() +
theme(legend.position = "bottom")
ggsave(
filename = file.path(outdir, "checks", glue("check_counts_dist.png")),
width = 15, height = 20, units = "cm"
)
# save dataset ----
data_anytest_sum %>%
filter(!is.na(persondays)) %>%
# keep only sum_*censor
select(-ends_with("uncensored")) %>%
select(patient_id, trial_date, treated, anytest_cut, persondays, starts_with("sum_")) %>%
write_rds(file.path(outdir, "process", "data_anytest_sum.rds"), compress = "gz")
| /analysis/covidtests/process_covidtests.R | permissive | opensafely/vaccine-effectiveness-in-kids | R | false | false | 17,874 | r | ######################################
# This script:
# reads in testing data (generates dummy data if not running on real data)
# processes testing data
# sense checks the processed data
# checks that fup-params n_any and n_pos were used in the study definition were appropriate
# plots the distribution of the testing behaviour variables
# saves the data
######################################
# Preliminaries ----
## Import libraries ----
library('tidyverse')
library('lubridate')
library('arrow')
library('here')
library('glue')
## import local functions and parameters ---
source(here("analysis", "design.R"))
source(here("lib", "functions", "utility.R"))
source(here("lib", "functions", "survival.R"))
## import command-line arguments ----
args <- commandArgs(trailingOnly = TRUE)
if (length(args) == 0) {
cohort <- "over12"
vaxn <- 1L
} else {
cohort <- args[[1]]
vaxn <- args[[2]]
}
## create output directories ----
outdir <- ghere("output", cohort,"vax{vaxn}", "covidtests")
fs::dir_create(file.path(outdir, "extract"))
fs::dir_create(file.path(outdir, "process"))
fs::dir_create(file.path(outdir, "checks"))
# import data ----
studydef_path <- file.path(outdir, "extract", "input_covidtests_{arm}.feather")
data_studydef_dummy <- bind_rows(
# read extracted data for treated and control
lapply(
c("treated", "control"),
function(arm) read_feather(glue(studydef_path))
)
) %>%
# because date types are not returned consistently by cohort extractor
mutate(across(ends_with("_date"), ~ as.Date(.))) %>%
# because of a bug in cohort extractor -- remove once fixed
mutate(patient_id = as.integer(patient_id))
# create labels for covidtestcuts (to match the ones generated by the cuts() function)
covidtestcuts_labels <- as.character(glue("({covidtestcuts[-length(covidtestcuts)]},{covidtestcuts[-1]}]"))
# use externally created dummy data if not running in the server
# check variables are as they should be
if(Sys.getenv("OPENSAFELY_BACKEND") %in% c("", "expectations")){
# generate custom dummy data
data_custom_dummy <- local({
# set seed so that dummy data results are reproducible
set.seed(10)
case_category <- c("LFT_Only"=0.4, "PCR_Only"=0.5, "LFT_WithPCR"=0.1)
symptomatic <- c("N"=0.4, "Y"=0.6)
data_tests <- read_rds(ghere("output", cohort,"vax{vaxn}", "match", "data_matched.rds")) %>%
select(patient_id, trial_date)
# add number of tests variables
data_tests <- data_tests %>%
bind_cols(map_dfc(
.x = seq_along(covidtestcuts_labels)-1, #-1 because python starts at 0
.f = ~tibble(
!! sym(str_c("anytest_", .x, "_n")) := rpois(n=nrow(data_tests), lambda = 2),
!! sym(str_c("postest_", .x, "_n")) := as.integer(pmax(
!! sym(str_c("anytest_", .x, "_n")) - rpois(n=nrow(data_tests), lambda = 1),
0
))
)
))
data_tests <- data_tests %>%
mutate(
# first anytest during the period
anytest_1_day = if_else(
rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
as.integer(runif(
n=nrow(data_tests),
-prebaselineperiods*postbaselinedays,
baselinedays+postbaselineperiods*postbaselinedays
))
),
# symptom category of anytest_1_day
anytest_1_symptomatic = if_else(
is.na(anytest_1_day),
NA_integer_,
as.integer(rbernoulli(n=nrow(data_tests), p=symptomatic[["Y"]]))
),
# first positive test in the period (anytest_1_day with added missingness for negative tests)
postest_1_day = if_else(
rbernoulli(n = nrow(data_tests), p=0.5),
NA_integer_,
anytest_1_day
),
# first positive test in the period and ever (postest_1_day with added missingness for those that have had a positive test before the period)
firstpostest_day = if_else(
is.na(postest_1_day) | rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
postest_1_day
),
# type of test on firstpostest_day
firstpostest_category = factor(
if_else(
is.na(firstpostest_day),
NA_character_,
sample(x=names(case_category), size = nrow(data_tests), prob = unname(case_category), replace=TRUE)
),
levels = names(case_category)
)
)
for (i in 2:n_any) {
# derive subsequent anytest_*_day and anytest_*_symptomatic
data_tests <- data_tests %>%
mutate(
!! sym(glue("anytest_{i}_day")) := if_else(
rbernoulli(n = nrow(data_tests), p=0.3),
NA_integer_,
!! sym(glue("anytest_{i-1}_day")) + as.integer(runif(n=nrow(data_tests), 1, 50))
),
!! sym(glue("anytest_{i}_symptomatic")) := if_else(
is.na(!! sym(glue("anytest_{i}_day"))),
NA_integer_,
as.integer(rbernoulli(n=nrow(data_tests), p=symptomatic[["Y"]]))
)
)
# derive subsequent postest_*_day
if (i <= n_pos) {
data_tests <- data_tests %>%
mutate(
!! sym(glue("postest_{i}_day")) := if_else(
rbernoulli(n = nrow(data_tests), p=0.5),
NA_integer_,
!! sym(glue("anytest_{i}_day"))
)
)
}
}
data_tests <- data_tests %>%
mutate(across(matches("anytest_\\d+_symptomatic"), factor, levels = c(0,1), labels = names(symptomatic))) %>%
mutate(across(ends_with("_day"), ~ as.Date(as.character(trial_date + .)))) %>%
rename_with(~str_replace(., "_day", "_date"), ends_with("_day"))
return(data_tests)
})
not_in_studydef <- names(data_custom_dummy)[!( names(data_custom_dummy) %in% names(data_studydef_dummy) )]
not_in_custom <- names(data_studydef_dummy)[!( names(data_studydef_dummy) %in% names(data_custom_dummy) )]
if(length(not_in_custom)!=0) stop(
paste(
"These variables are in studydef but not in custom: ",
paste(not_in_custom, collapse=", ")
)
)
if(length(not_in_studydef)!=0) stop(
paste(
"These variables are in custom but not in studydef: ",
paste(not_in_studydef, collapse=", ")
)
)
# reorder columns
data_studydef_dummy <- data_studydef_dummy[,names(data_custom_dummy)]
unmatched_types <- cbind(
map_chr(data_studydef_dummy, ~paste(class(.), collapse=", ")),
map_chr(data_custom_dummy, ~paste(class(.), collapse=", "))
)[ (map_chr(data_studydef_dummy, ~paste(class(.), collapse=", ")) != map_chr(data_custom_dummy, ~paste(class(.), collapse=", ")) ), ] %>%
as.data.frame() %>% rownames_to_column()
if(nrow(unmatched_types)>0) stop(
#unmatched_types
"inconsistent typing in studydef : dummy dataset\n",
apply(unmatched_types, 1, function(row) paste(paste(row, collapse=" : "), "\n"))
)
data_extract <- data_custom_dummy
rm(data_studydef_dummy, data_custom_dummy)
} else {
data_extract <- data_studydef_dummy
rm(data_studydef_dummy)
}
# summarise and save data_extract
my_skim(data_extract, path = file.path(outdir, "extract", "input_treated_skim.txt"))
data_split <- local({
# derive censor date and time until censoring
data_matched <- read_rds(ghere("output", cohort,"vax{vaxn}", "match", "data_matched.rds")) %>%
mutate(
censor_date = pmin(
dereg_date,
death_date,
# study_dates$testend_date,
trial_date - 1 + maxfup,
controlistreated_date - 1,
na.rm = TRUE
),
tte_censor = as.integer(censor_date-(trial_date-1)),
ind_outcome = 0
# censor_date = trial_date + maxfup -1 # use this to overwrite above definition until issue with `patients.minimum_of()` and date arithmetic is fixed
) %>%
select(patient_id, trial_date, treated, censor_date, tte_censor) %>%
group_by(patient_id, trial_date) %>%
mutate(new_id = cur_group_id()) %>%
ungroup()
# generate dataset with postbaselinecuts
fup_split <- data_matched %>%
select(new_id) %>%
uncount(weights = length(postbaselinecuts)-1, .id="period_id") %>%
mutate(fupstart_time = postbaselinecuts[period_id]) %>%
droplevels() %>%
select(new_id, period_id, fupstart_time)
# split time until censoring by postbaseline cuts
data_split <-
tmerge(
data1 = data_matched,
data2 = data_matched,
id = new_id,
tstart = 0,
tstop = tte_censor
) %>%
# add post-treatment periods
tmerge(
data1 = .,
data2 = fup_split,
id = new_id,
period_id = tdc(fupstart_time, period_id)
) %>%
mutate(
fup_cut = cut(
tstop, # because closed on the right
breaks = covidtestcuts,
right=TRUE
)
) %>%
transmute(
patient_id, trial_date, treated, censor_date, fup_cut,
persondays = as.integer(tstop-tstart)
) %>%
as_tibble()
return(data_split)
})
# reshape data_extract to data_anytest_long
data_anytest_long <- data_extract %>%
# select recurring date variables
select(patient_id, trial_date, matches(c("\\w+test_\\d+_date", "\\w+test_\\d+_symptomatic"))) %>%
# rename to make it easier to reshape
rename_with(
.fn = ~str_c(str_extract(.x, "\\d+_"), str_remove(.x, "\\d+_")),
.cols = matches("\\w+test_\\d+_\\w+")
) %>%
pivot_longer(
cols = matches("\\d+_\\w+test_\\w+"),
names_to = c("index", ".value"),
names_pattern = "(.*)_(.*_.*)",
values_drop_na = TRUE
) %>%
mutate(anytest_result = if_else(is.na(postest_date), "negative", "positive")) %>%
select(-index, -postest_date) %>%
left_join(
data_extract %>% select(patient_id, trial_date, starts_with("firstpostest")),
# joining on "anytest_date" = "firstpostest_date" here means that firstpostest_category joined to date of individual's first ever postest
by = c("patient_id", "trial_date", "anytest_date" = "firstpostest_date")
) %>%
# join censor date
left_join(
data_split %>%
distinct(patient_id, trial_date, treated, censor_date),
by = c("patient_id", "trial_date")
) %>%
mutate(censor = if_else(anytest_date <= censor_date, FALSE, TRUE)) %>%
# bin anytest_date
mutate(
anytest_cut=cut(
as.integer(anytest_date - trial_date),
breaks = covidtestcuts,
right=TRUE
)
) %>%
select(-anytest_date, -censor_date) %>% # remove unused
# remove any that are outside the time periods of interest (if this is the case for anytest_cut, it will be the case for all)
filter(!is.na(anytest_cut)) %>%
arrange(patient_id, trial_date, anytest_cut)
# calculate the sum of events per period
data_anytest_sum <- data_anytest_long %>%
# sum the number of tests per period
group_by(patient_id, trial_date, treated, anytest_cut) %>%
summarise(
# sum all dates (this is just used to check value of n in study definition if correct)
sum_anytest_uncensored=n(),
sum_postest_uncensored=sum(anytest_result=="positive", na.rm=TRUE),
# all variables below in this summarise() are derived from censored dates:
# sum censored dates (this will be used to calculate testing rates)
sum_anytest=sum(!censor),
# number of positive tests
sum_postest=sum(anytest_result=="positive" & !censor, na.rm=TRUE),
# number of symtpomatic tests
sum_symptomatic=sum(anytest_symptomatic=="Y" & !censor, na.rm = TRUE),
# number of firstpostest
sum_firstpostest=sum(!is.na(firstpostest_category) & !censor),
# number of each firstpostest type
sum_lftonly=sum(firstpostest_category=="LFT_Only" & !censor, na.rm = TRUE),
sum_pcronly=sum(firstpostest_category=="PCR_Only" & !censor, na.rm = TRUE),
sum_both=sum(firstpostest_category=="LFT_WithPCR" & !censor, na.rm = TRUE),
.groups="keep"
) %>%
ungroup() %>%
full_join(
expand(data_split, nesting(patient_id, trial_date, treated), fup_cut),
by = c("patient_id", "trial_date", "treated", "anytest_cut"= "fup_cut")
) %>%
replace_na(
replace=list(
sum_anytest_uncensored=0,
sum_postest_uncensored=0,
sum_anytest=0,
sum_postest=0,
sum_symptomatic=0,
sum_firstpostest=0,
sum_lftonly=0,
sum_pcronly=0,
sum_both=0
)
) %>%
# join the total number of tests per period (extracted with returning="number_of_matches_in_period" in study definition)
left_join(
data_extract %>%
select(patient_id, trial_date, matches("\\w+test_\\d+_n")) %>%
pivot_longer(
cols = matches("\\w+test_\\d+_n"),
names_pattern = "(.*test)_(.*)_n",
names_to = c(".value", "fup_cut")
) %>%
mutate(across(
fup_cut,
~factor(as.integer(.x)+1, # +1 because python starts at 0
levels = seq_along(covidtestcuts_labels),
labels = covidtestcuts_labels
))),
by = c("patient_id", "trial_date", "anytest_cut" = "fup_cut")
) %>%
# join data_split for persondays of follow-up per period
left_join(
data_split %>% select(patient_id, trial_date, treated, fup_cut, persondays),
by = c("patient_id", "trial_date", "treated", "anytest_cut" = "fup_cut")
) %>%
# label periods as pre or post baseline
mutate(
period = factor(if_else(
as.character(anytest_cut) %in% covidtestcuts_labels[1:fup_params$prebaselineperiods],
"prebaseline",
"postbaseline"
), levels = c("prebaseline", "postbaseline")
)
) %>%
# fill in persondays for prebaseline periods
# (must be postbaselinedays, otherwise they wouldn't be eligible)
mutate(across(
persondays,
~if_else(
period=="prebaseline",
as.integer(postbaselinedays),
persondays
))) %>%
arrange(patient_id, treated, trial_date, anytest_cut)
# checks ----
# sense check
cat("-----------------")
cat("Sense checks ----\n")
cat("Check maximum counts derived from firstpostest = 1:")
data_anytest_sum %>%
# sum all events within each patient_id, trial_date
group_by(patient_id, trial_date) %>%
summarise(across(c(sum_firstpostest, sum_lftonly, sum_pcronly, sum_both), sum)) %>%
ungroup() %>%
# max across all patient_id, trial_date
summarise(across(c(sum_firstpostest, sum_lftonly, sum_pcronly, sum_both), max))
cat("When persondays=NA (i.e. patient censored before period), check sums of censored counts are always zero:\n")
data_anytest_sum %>%
filter(is.na(persondays)) %>%
# group_by(anytest_cut, persondays) %>%
summarise(across(
c(sum_anytest, sum_postest, sum_symptomatic, sum_firstpostest, sum_lftonly, sum_pcronly, sum_both),
list(min=min, max=max)
)) %>%
# ungroup() %>%
pivot_longer(
cols=starts_with("sum"),
names_pattern = "sum_(.*)_(.*)",
names_to = c("name", ".value")
)
# check that the sums of the anytest_*_date variables match the anytest*_*_n variables
# if not, it's a flag that we need to increase n in the study definition
cat("------------------------------------------")
cat("Check `n_any` and `n_pos` appropriate ----\n")
cat ("Summarise number of tests missing per person per period when summing dates:\n")
data_anytest_sum %>%
mutate(
n_missing_anytest = anytest - sum_anytest_uncensored,
n_missing_postest = postest - sum_postest_uncensored
) %>%
group_by(anytest_cut) %>%
summarise(across(
starts_with("n_missing"),
list(min=min, max=max, mean=mean, median=median)
), .groups = "keep") %>%
ungroup() %>%
pivot_longer(
cols = starts_with("n_missing"),
names_pattern = "n_missing_(.*)_(.*)",
names_to = c("result", ".value")
) %>%
arrange(result, anytest_cut) %>%
group_split(result) %>% as.list()
plot_function <- function(result) {
plotpath <- file.path(outdir, "checks", glue("check_n_{result}.png"))
cat(glue("see {plotpath} for sum({result}_*_date) as a percent of {result}_*_n per period"), "\n")
p <- data_anytest_sum %>%
mutate(percent = 100*!!sym(glue("sum_{result}_uncensored"))/!!sym(result)) %>%
ggplot(aes(x=percent, colour=period)) +
geom_freqpoly(binwidth=1) +
facet_wrap(~anytest_cut, scales = "free_y", nrow=2) +
theme_bw() +
theme(legend.position = "bottom")
ggsave(filename = plotpath, plot = p, width = 20, height = 15, units = "cm")
return(p)
}
plot_function("anytest")
plot_function("postest")
cat("------------------------------------------")
cat("Check distribution of event counts ----\n")
plotpath <- file.path(outdir, "checks", "check_n_{result}.png")
cat(glue("see {plotpath} for distribution of event counts"), "\n")
data_anytest_sum %>%
select(-ends_with("uncensored")) %>%
select(patient_id, trial_date, treated, anytest_cut, starts_with("sum_")) %>%
pivot_longer(
cols = starts_with("sum")
) %>%
mutate(across(treated, factor, levels=0:1, labels = c("control", "treated"))) %>%
mutate(across(name,
~factor(
str_remove_all(.x, "sum_"),
levels = c("anytest", "symptomatic", "postest", "firstpostest", "lftonly", "pcronly", "both"))
)) %>%
ggplot(aes(x = value, y = ..density.., colour = treated)) +
geom_freqpoly(binwidth = 1) +
facet_grid(anytest_cut~name) +
scale_color_discrete(name=NULL) +
theme_bw() +
theme(legend.position = "bottom")
ggsave(
filename = file.path(outdir, "checks", glue("check_counts_dist.png")),
width = 15, height = 20, units = "cm"
)
# save dataset ----
data_anytest_sum %>%
filter(!is.na(persondays)) %>%
# keep only sum_*censor
select(-ends_with("uncensored")) %>%
select(patient_id, trial_date, treated, anytest_cut, persondays, starts_with("sum_")) %>%
write_rds(file.path(outdir, "process", "data_anytest_sum.rds"), compress = "gz")
|
args = commandArgs(TRUE)
method = args[1]
percentage = args[2]
replicate = args[3]
#install DoubletFinder
##remotes::install_github('chris-mcginnis-ucsf/DoubletFinder')
#load DoubletFinder
library(DoubletFinder)
setwd(paste0("/scratch/antwerpen/206/vsc20688/master_thesis/leishmania/diploid/dataset_5/synthetic_",
method,
"/",
percentage,
"percent/",
percentage,
"_",
replicate,
"/"))
library(dplyr)
library(Seurat)
library(patchwork)
output.data <- Read10X(data.dir = "./matrix")
output <- CreateSeuratObject(output.data)
output <- NormalizeData(output)
output <- FindVariableFeatures(output, selection.method = "vst", nfeatures = 2000)
output <- ScaleData(output)
output <- RunPCA(output)
output <- RunUMAP(output, dims = 1:10)
#pK identification (no ground-truth)
sweep.res.list_output <- paramSweep_v3(output, PCs = 1:10, sct = FALSE)
sweep.stats_output <- summarizeSweep(sweep.res.list_output, GT = FALSE)
bcvmn_output <- find.pK(sweep.stats_output)
#homotypic doublet proportion estimate
annotations <- output@meta.data$ClusteringResults
homotypic.prop <- modelHomotypic(annotations)
nExp.poi <- round(0.0634*nrow(output@meta.data))
nExp.poi.adj <- round(nExp.poi*(1-homotypic.prop))
#run DoubletFinder with varying classification stringencies
output <- doubletFinder_v3(output, PCs = 1:10, pN = 0.25, pK = 0.09, nExp = nExp.poi, reuse.pANN = FALSE, sct = FALSE)
#how does the output of DoubletFinder look like?
##output[[]]
#try to retrieve only the column containing doublet info
##output@meta.data[,5]
#save column containing doublet info in dataframe
output_DoubletFinder <- data.frame(output@meta.data[,5])
#add column barcodes to this dataframe
#this column contains the barcodes of the cells
output_DoubletFinder$barcodes <- rownames(output@meta.data)
#filter only barcodes whereoff DF.classifications is 'doublet'
#there are 5000 observations in dataframe output_DoubletFinder, so 5000 rows
#make object with length 5000
rows <- c(1:5000)
count <- 0
#create empty dataframe doublets
##barcodes of doublets will be saved in this dataframe
doublets <- data.frame(barcodes=character())
for (row in rows) {
if (output_DoubletFinder[row, 1] == "Doublet"){
count <- count + 1
doublets[count, "barcodes"] <- output_DoubletFinder[row, "barcodes"]
setwd(paste0("/scratch/antwerpen/206/vsc20688/master_thesis/leishmania/diploid/dataset_5/synthetic_",
method,
"/",
percentage,
"percent/",
percentage,
"_",
replicate,
"/DoubletFinder/"))
write.csv(doublets, "doublets_DF.csv")
}
}
##output_backup <- output_DoubletFinder
| /DF_synthetic.R | no_license | isabellesmits/Master_Thesis_2021 | R | false | false | 2,904 | r | args = commandArgs(TRUE)
method = args[1]
percentage = args[2]
replicate = args[3]
#install DoubletFinder
##remotes::install_github('chris-mcginnis-ucsf/DoubletFinder')
#load DoubletFinder
library(DoubletFinder)
setwd(paste0("/scratch/antwerpen/206/vsc20688/master_thesis/leishmania/diploid/dataset_5/synthetic_",
method,
"/",
percentage,
"percent/",
percentage,
"_",
replicate,
"/"))
library(dplyr)
library(Seurat)
library(patchwork)
output.data <- Read10X(data.dir = "./matrix")
output <- CreateSeuratObject(output.data)
output <- NormalizeData(output)
output <- FindVariableFeatures(output, selection.method = "vst", nfeatures = 2000)
output <- ScaleData(output)
output <- RunPCA(output)
output <- RunUMAP(output, dims = 1:10)
#pK identification (no ground-truth)
sweep.res.list_output <- paramSweep_v3(output, PCs = 1:10, sct = FALSE)
sweep.stats_output <- summarizeSweep(sweep.res.list_output, GT = FALSE)
bcvmn_output <- find.pK(sweep.stats_output)
#homotypic doublet proportion estimate
annotations <- output@meta.data$ClusteringResults
homotypic.prop <- modelHomotypic(annotations)
nExp.poi <- round(0.0634*nrow(output@meta.data))
nExp.poi.adj <- round(nExp.poi*(1-homotypic.prop))
#run DoubletFinder with varying classification stringencies
output <- doubletFinder_v3(output, PCs = 1:10, pN = 0.25, pK = 0.09, nExp = nExp.poi, reuse.pANN = FALSE, sct = FALSE)
#how does the output of DoubletFinder look like?
##output[[]]
#try to retrieve only the column containing doublet info
##output@meta.data[,5]
#save column containing doublet info in dataframe
output_DoubletFinder <- data.frame(output@meta.data[,5])
#add column barcodes to this dataframe
#this column contains the barcodes of the cells
output_DoubletFinder$barcodes <- rownames(output@meta.data)
#filter only barcodes whereoff DF.classifications is 'doublet'
#there are 5000 observations in dataframe output_DoubletFinder, so 5000 rows
#make object with length 5000
rows <- c(1:5000)
count <- 0
#create empty dataframe doublets
##barcodes of doublets will be saved in this dataframe
doublets <- data.frame(barcodes=character())
for (row in rows) {
if (output_DoubletFinder[row, 1] == "Doublet"){
count <- count + 1
doublets[count, "barcodes"] <- output_DoubletFinder[row, "barcodes"]
setwd(paste0("/scratch/antwerpen/206/vsc20688/master_thesis/leishmania/diploid/dataset_5/synthetic_",
method,
"/",
percentage,
"percent/",
percentage,
"_",
replicate,
"/DoubletFinder/"))
write.csv(doublets, "doublets_DF.csv")
}
}
##output_backup <- output_DoubletFinder
|
#Delosh, Jason Predict 401 SEC60 Data Analysis 2 R Code
require(ggplot2)
require(moments)
require(fBasics)
library(gridExtra)
library(rockchalk)
library(flux)
ab <- read.csv(file.path("abalones.csv"),sep=" ")
da2<-read.csv('mydataused.csv')
df2<-data.frame(da2) #set ab as dataframe
str(da2) #review mydata structure
head(da2) #review header data
tail(da2) #review tail data
sum(is.na(df2))
#Q1
#Q1a)
#note: ratio=shuck/volume
par(mfrow=c(1,2))
hist(df2$RATIOc,
include.lowest=TRUE, right=TRUE,
main='Histogram of Ratio:\nShuck to Volume',
xlab='Ratio (g/cm^3)',
ylab='Frequency',
col='light green',
xlim=c(min(df2$RATIOc)-0.05,max(df2$RATIOc)+0.05))
abline(v=mean(df2$RATIOc),col='red',lwd=3,lty=3)
abline(v=median(df2$RATIOc),col='yellow',lwd=3,lty=3)
legend(0.175,300,legend=c(sprintf('red=mean:%s',round(mean(df2$RATIOc),3)),
sprintf('yellow=median:%s',round(median(df2$RATIOc),3))))
qqnorm(df2$RATIOc,ylab='Sample Quanitles for Abalone Ratio',
main='Q-Q Plot of Abalone Ratio\nvs. Norm Plot', col='red')
qqline(df2$RATIOc,col='green')
par(mfrow=c(1,1))
moments::skewness(df2$RATIOc)
moments::kurtosis(df2$RATIOc)
#Q1b
df2$L_Ratio<-log10(df2$RATIOc)
str(df2)
par(mfrow=c(1,2))
hist(df2$L_Ratio,
include.lowest=TRUE, right=TRUE,
main='Histogram of Log10 Transformed\nAbalone Ratio:Shuck to Volume',
xlab='Log10 Ratio (g/cm^3)',
ylab='Frequency',
col='light green',
xlim=c(min(df2$L_Ratio)-0.05,max(df2$L_Ratio)+0.05))
abline(v=mean(df2$L_Ratio),col='red',lwd=3,lty=2)
abline(v=median(df2$L_Ratio),col='yellow',lwd=3,lty=3)
legend('topright',legend=c(sprintf('red=mean:%s',round(mean(df2$L_Ratio),3)),
sprintf('yellow=median:%s',round(median(df2$L_Ratio),3))))
qqnorm(df2$L_Ratio,ylab='Sample Quanitles for Log10 Transformed Abalone Ratio',
main='Q-Q Plot of Abalone Log10\nRatio vs. Norm Plot', col='red')
qqline(df2$L_Ratio,col='green')
par(mfrow=c(1,1))
moments::skewness(df2$L_Ratio)
moments::kurtosis(df2$L_Ratio)
ggplot(df2,aes(x=CLASS,y=L_Ratio))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Log 10 Ratio: Shuck to Volume')+xlab('Class')
#Q1c
bartlett.test(df2$L_Ratio~df2$CLASS)
bartlett.test(df2$L_Ratio~df2$SEX)
#Q2
#Q2a
aovL_Ratio_i<-aov(L_Ratio~factor(CLASS)*factor(SEX), data=df2)
summary(aovL_Ratio_i)
aovL_Ratio_ni<-aov(L_Ratio~factor(CLASS)+factor(SEX), data=df2)
summary(aovL_Ratio_ni)
#Q2b
TukeyHSD(aovL_Ratio_ni)
#Q3a
df2$Type<-combineLevels(df2$SEX,levs=c("M","F"),"ADULT")
str(df2)
#hist(df2$VOLUMEc=="ADULT")
ggplot(df2,aes(x=df2$VOLUMEc,group=Type))+
geom_histogram(color='blue',binwidth = 25)+facet_grid(~Type)+
xlab('Volume')+ylab('Frequency')+
ggtitle("Abalone Volume Frequency Grouped by Maturity")
#Q3b
df2$L_Shuck<-log10(df2$SHUCK)
df2$L_Volume<-log10(df2$VOLUMEc)
grid.arrange(
ggplot(df2,aes(x=VOLUMEc,y=SHUCK,color=CLASS,name="Class"))+
geom_point(size=2)+xlab("Volume")+ylab("Shuck")+
ggtitle("Volume vs. Shuck"),
ggplot(df2,aes(x=L_Volume,y=L_Shuck,color=CLASS))+
geom_point(size=2)+xlab("Log 10 Volume")+ylab("Log 10 Shuck")+
ggtitle("Log10 Transformation: Volume vs. Shuck"),
ggplot(df2,aes(x=VOLUMEc,y=SHUCK,color=Type))+
geom_point(size=2)+xlab("Volume")+ylab("Shuck")+
ggtitle("Volume vs. Shuck"),
ggplot(df2,aes(x=L_Volume,y=L_Shuck,color=Type))+
geom_point(size=2)+xlab("Log 10 Volume")+ylab("Log 10 Shuck")+
ggtitle("Log10 Transformation: Volume vs. Shuck"),
ncol=2)
#Q4a
shuckregress<-lm(L_Shuck~L_Volume+CLASS+Type,data=df2)
summary(shuckregress)
#Q5a
par(mfrow=c(1,2))
hist(shuckregress$residuals, col='light blue',
main="Histogram: ANOVA Residuals",
xlab='Residuals',ylab='Frequency',
breaks=30)
qqnorm(shuckregress$residuals,ylab='Sample Quanitles',
main='Q-Q Plot of ANOVA Residuals', col='red')
qqline(shuckregress$residuals,col='green')
par(mfrow=c(1,1))
moments::skewness(shuckregress$residuals)
moments::kurtosis(shuckregress$residuals)
#Q5b
grid.arrange(
ggplot(shuckregress,aes(x=df2$L_Volume,y=shuckregress$residuals))+
geom_point(aes(color=CLASS))+xlab('Log10 Volume')+ylab('Residuals')+
theme(legend.position='top'),
ggplot(shuckregress,aes(x=df2$L_Volume,y=shuckregress$residuals))+
geom_point(aes(color=Type))+xlab('Log10 Volume')+ylab('Residuals')+
theme(legend.position='top'),
ggplot(shuckregress,aes(x=CLASS,y=shuckregress$residuals))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Residuals')+xlab('Class'),
ggplot(shuckregress,aes(x=Type,y=shuckregress$residuals))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Residuals')+xlab('Type'),
ncol=2)
#test homogeneity of residuals
bartlett.test(shuckregress$residuals~CLASS,data=df2)
#Q6
#Q6a
infant<-df2$Type=="I"
adult<-df2$Type=="ADULT"
maxvol<-max(df2$VOLUMEc)
minvol<-min(df2$VOLUMEc)
difvol<-(maxvol-minvol)/1000
propI<-numeric(0)
propA<-numeric(0)
volVal<-numeric(0)
totI<-length(df2$Type[infant]) #gets count in the column
totA<-length(df2$Type[adult]) #gets count in the column
for (k in 1:1000) {
value<-minvol+k*difvol
volVal[k]<-value
propI[k]<-sum(df2$VOLUMEc[infant]<=value)/totI
propA[k]<-sum(df2$VOLUMEc[adult]<=value)/totA
}
propdf<-data.frame(volVal,propI,propA)
head(propdf)
head(propI, 20)
head(propA, 20)
head(volVal, 20)
#Q6b
nI<-sum(propI<=0.5)
splitI<-minvol+(nI+0.5)*difvol
nA<-sum(propA<=0.5)
splitA<-minvol+(nA+0.5)*difvol
rsplitI=round(splitI,2)
rsplitA=round(splitA,2)
ggplot(propdf)+
geom_line(aes(x=volVal,y=propI, colour='Infant_Prop'),size=1)+
geom_line(aes(x=volVal,y=propA, colour='Adult_Prop'),size=1)+
geom_vline(xintercept=splitA)+
geom_text(aes(splitA+25,0.48,label=round(splitA,2)))+
geom_vline(xintercept=splitI,show.legend=TRUE)+
geom_text(aes(splitI+25,0.48,label=round(splitI,2)))+
geom_hline(yintercept=0.5)+
xlab('Volume')+ylab('Proportion')+
ggtitle("Propotion of Non-Harvetes Infants and Adults")+
scale_colour_manual(name="Line Color",
values=c('Infant_Prop'="red", 'Adult_Prop'="blue"))
#Q7a,b,c
A_I_diff<-(1-propA)-(1-propI)
yloessA<-loess(1-propA~volVal,span=0.25, family=c('symmetric'))
yloessI<-loess(1-propI~volVal,span=0.25, family=c('symmetric'))
smDiff<-predict(yloessA)-predict(yloessI)
ggplot(propdf)+
geom_line(aes(x=volVal,y=A_I_diff, colour='Actual'),size=1)+
geom_line(aes(x=volVal,y=smDiff, colour='Smooth Curve'),size=1)+
geom_vline(xintercept=which.max(smDiff))+
geom_text(aes(which.max(smDiff)+20,0,
label=sprintf('Volume:%s',which.max(smDiff)),angle=90))+
xlab('Volume')+ylab('Proportion Difference')+
ggtitle('Harvest Proportion Difference: Adult vs. Infant')+
scale_colour_manual(name="Line Color",
values=c('Actual'="blue", 'Smooth Curve'="red"))
#Q7d
maxdiffI<-(1-propI)[which.max(smDiff)]
maxdiffI
maxdiffA<-(1-propA)[which.max(smDiff)]
maxdiffA
#Q8
#Q8a
#get volume cuttoff corresponding to the smallest volume value greater
#than the largest volume among class A1 infants **Zero Harvest
threshI<-volVal[volVal>max(df2[df2$CLASS=='A1'&
df2$Type=='I','VOLUMEc'])][1]
#calculate proportions for infants and adults with the threshI cutoff
harPropI<-sum(df2[df2$Type=='I',"VOLUMEc"]>threshI)/sum(df2$Type=='I')
harPropA<-sum(df2[df2$Type=='ADULT',"VOLUMEc"]>threshI)/sum(df2$Type=='ADULT')
#Q8b
#calculate the proportion of adults to be harvested based on the
#smallest difference between adult proportion and (1-propI)
harVolA2<-volVal[which.min(abs(propA-(1-propI)))]
harPropI2<-sum(df2[df2$Type=='I',"VOLUMEc"]>harVolA2)/sum(df2$Type=='I')
harPropA2<-sum(df2[df2$Type=='ADULT',"VOLUMEc"]>harVolA2)/sum(df2$Type=='ADULT')
#Q9
ggplot(propdf)+
geom_line(aes(x=(1-propI),y=(1-propA)),color='red',size=1)+
geom_abline(intercept=0, slope=1, linetype=2)+
geom_point(aes(harPropI,harPropA))+
geom_text(aes(harPropI+0.18,harPropA,
label=sprintf('No Infant ClassA Vol=:%s',round(threshI,1))))+
geom_point(aes(harPropI2,harPropA2))+
geom_text(aes(harPropI2+0.18,harPropA2,
label=sprintf('Equal Harvest Vol=:%s',round(harVolA2,2))))+
geom_point(aes(maxdiffI,maxdiffA))+ #q7d
geom_text(aes(maxdiffI+0.18,maxdiffA,
label=sprintf('Max Prop Diff Vol=:%s',which.max(smDiff))))+
ggtitle('ROC Curve: Adult vs. Infant Harvest Populations')+
xlab('Infant Harvest Proportion')+ylab('Adult Harvest Proportion')
#area under the curve
auc((1-propI),(1-propA))
#total proportional yield
zhdiffy<-round(sum(df2$VOLUMEc>=volVal[threshI])/(totI+totA),2)
ehdiffy<-round(sum(df2$VOLUMEc>=volVal[harVolA2])/(totI+totA),2)
maxdiffy<-round(sum(df2$VOLUMEc>=volVal[which.max(smDiff)])/(totI+totA),2)
cutoff<-c('No Infant ClassA','Equal Harvest','Max Prop Diff')
volumes<-c(threshI,harVolA2,which.max(smDiff))
tpr<-c(harPropA,harPropA2,maxdiffA)
fpr<-c(harPropI,harPropI2,maxdiffI)
ppy<-c(zhdiffy,ehdiffy,maxdiffy)
summdf<-data.frame(cutoff,volumes,tpr,fpr,ppy)
colnames(summdf)<-c('Cutoff Type','Volume','True Pos Rate',
'False Pos Rate','Prop Yield')
data.frame(lapply(summdf,function(y) if (is.numeric(y)) round(y,2) else y))
| /Stats_and_Math/DescriptiveStats.R | no_license | yangboyubyron/DS_Recipes | R | false | false | 9,374 | r | #Delosh, Jason Predict 401 SEC60 Data Analysis 2 R Code
require(ggplot2)
require(moments)
require(fBasics)
library(gridExtra)
library(rockchalk)
library(flux)
ab <- read.csv(file.path("abalones.csv"),sep=" ")
da2<-read.csv('mydataused.csv')
df2<-data.frame(da2) #set ab as dataframe
str(da2) #review mydata structure
head(da2) #review header data
tail(da2) #review tail data
sum(is.na(df2))
#Q1
#Q1a)
#note: ratio=shuck/volume
par(mfrow=c(1,2))
hist(df2$RATIOc,
include.lowest=TRUE, right=TRUE,
main='Histogram of Ratio:\nShuck to Volume',
xlab='Ratio (g/cm^3)',
ylab='Frequency',
col='light green',
xlim=c(min(df2$RATIOc)-0.05,max(df2$RATIOc)+0.05))
abline(v=mean(df2$RATIOc),col='red',lwd=3,lty=3)
abline(v=median(df2$RATIOc),col='yellow',lwd=3,lty=3)
legend(0.175,300,legend=c(sprintf('red=mean:%s',round(mean(df2$RATIOc),3)),
sprintf('yellow=median:%s',round(median(df2$RATIOc),3))))
qqnorm(df2$RATIOc,ylab='Sample Quanitles for Abalone Ratio',
main='Q-Q Plot of Abalone Ratio\nvs. Norm Plot', col='red')
qqline(df2$RATIOc,col='green')
par(mfrow=c(1,1))
moments::skewness(df2$RATIOc)
moments::kurtosis(df2$RATIOc)
#Q1b
df2$L_Ratio<-log10(df2$RATIOc)
str(df2)
par(mfrow=c(1,2))
hist(df2$L_Ratio,
include.lowest=TRUE, right=TRUE,
main='Histogram of Log10 Transformed\nAbalone Ratio:Shuck to Volume',
xlab='Log10 Ratio (g/cm^3)',
ylab='Frequency',
col='light green',
xlim=c(min(df2$L_Ratio)-0.05,max(df2$L_Ratio)+0.05))
abline(v=mean(df2$L_Ratio),col='red',lwd=3,lty=2)
abline(v=median(df2$L_Ratio),col='yellow',lwd=3,lty=3)
legend('topright',legend=c(sprintf('red=mean:%s',round(mean(df2$L_Ratio),3)),
sprintf('yellow=median:%s',round(median(df2$L_Ratio),3))))
qqnorm(df2$L_Ratio,ylab='Sample Quanitles for Log10 Transformed Abalone Ratio',
main='Q-Q Plot of Abalone Log10\nRatio vs. Norm Plot', col='red')
qqline(df2$L_Ratio,col='green')
par(mfrow=c(1,1))
moments::skewness(df2$L_Ratio)
moments::kurtosis(df2$L_Ratio)
ggplot(df2,aes(x=CLASS,y=L_Ratio))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Log 10 Ratio: Shuck to Volume')+xlab('Class')
#Q1c
bartlett.test(df2$L_Ratio~df2$CLASS)
bartlett.test(df2$L_Ratio~df2$SEX)
#Q2
#Q2a
aovL_Ratio_i<-aov(L_Ratio~factor(CLASS)*factor(SEX), data=df2)
summary(aovL_Ratio_i)
aovL_Ratio_ni<-aov(L_Ratio~factor(CLASS)+factor(SEX), data=df2)
summary(aovL_Ratio_ni)
#Q2b
TukeyHSD(aovL_Ratio_ni)
#Q3a
df2$Type<-combineLevels(df2$SEX,levs=c("M","F"),"ADULT")
str(df2)
#hist(df2$VOLUMEc=="ADULT")
ggplot(df2,aes(x=df2$VOLUMEc,group=Type))+
geom_histogram(color='blue',binwidth = 25)+facet_grid(~Type)+
xlab('Volume')+ylab('Frequency')+
ggtitle("Abalone Volume Frequency Grouped by Maturity")
#Q3b
df2$L_Shuck<-log10(df2$SHUCK)
df2$L_Volume<-log10(df2$VOLUMEc)
grid.arrange(
ggplot(df2,aes(x=VOLUMEc,y=SHUCK,color=CLASS,name="Class"))+
geom_point(size=2)+xlab("Volume")+ylab("Shuck")+
ggtitle("Volume vs. Shuck"),
ggplot(df2,aes(x=L_Volume,y=L_Shuck,color=CLASS))+
geom_point(size=2)+xlab("Log 10 Volume")+ylab("Log 10 Shuck")+
ggtitle("Log10 Transformation: Volume vs. Shuck"),
ggplot(df2,aes(x=VOLUMEc,y=SHUCK,color=Type))+
geom_point(size=2)+xlab("Volume")+ylab("Shuck")+
ggtitle("Volume vs. Shuck"),
ggplot(df2,aes(x=L_Volume,y=L_Shuck,color=Type))+
geom_point(size=2)+xlab("Log 10 Volume")+ylab("Log 10 Shuck")+
ggtitle("Log10 Transformation: Volume vs. Shuck"),
ncol=2)
#Q4a
shuckregress<-lm(L_Shuck~L_Volume+CLASS+Type,data=df2)
summary(shuckregress)
#Q5a
par(mfrow=c(1,2))
hist(shuckregress$residuals, col='light blue',
main="Histogram: ANOVA Residuals",
xlab='Residuals',ylab='Frequency',
breaks=30)
qqnorm(shuckregress$residuals,ylab='Sample Quanitles',
main='Q-Q Plot of ANOVA Residuals', col='red')
qqline(shuckregress$residuals,col='green')
par(mfrow=c(1,1))
moments::skewness(shuckregress$residuals)
moments::kurtosis(shuckregress$residuals)
#Q5b
grid.arrange(
ggplot(shuckregress,aes(x=df2$L_Volume,y=shuckregress$residuals))+
geom_point(aes(color=CLASS))+xlab('Log10 Volume')+ylab('Residuals')+
theme(legend.position='top'),
ggplot(shuckregress,aes(x=df2$L_Volume,y=shuckregress$residuals))+
geom_point(aes(color=Type))+xlab('Log10 Volume')+ylab('Residuals')+
theme(legend.position='top'),
ggplot(shuckregress,aes(x=CLASS,y=shuckregress$residuals))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Residuals')+xlab('Class'),
ggplot(shuckregress,aes(x=Type,y=shuckregress$residuals))+
geom_boxplot(outlier.color='red',
outlier.shape =1, outlier.size=3,
notch=TRUE)+
ylab('Residuals')+xlab('Type'),
ncol=2)
#test homogeneity of residuals
bartlett.test(shuckregress$residuals~CLASS,data=df2)
#Q6
#Q6a
infant<-df2$Type=="I"
adult<-df2$Type=="ADULT"
maxvol<-max(df2$VOLUMEc)
minvol<-min(df2$VOLUMEc)
difvol<-(maxvol-minvol)/1000
propI<-numeric(0)
propA<-numeric(0)
volVal<-numeric(0)
totI<-length(df2$Type[infant]) #gets count in the column
totA<-length(df2$Type[adult]) #gets count in the column
for (k in 1:1000) {
value<-minvol+k*difvol
volVal[k]<-value
propI[k]<-sum(df2$VOLUMEc[infant]<=value)/totI
propA[k]<-sum(df2$VOLUMEc[adult]<=value)/totA
}
propdf<-data.frame(volVal,propI,propA)
head(propdf)
head(propI, 20)
head(propA, 20)
head(volVal, 20)
#Q6b
nI<-sum(propI<=0.5)
splitI<-minvol+(nI+0.5)*difvol
nA<-sum(propA<=0.5)
splitA<-minvol+(nA+0.5)*difvol
rsplitI=round(splitI,2)
rsplitA=round(splitA,2)
ggplot(propdf)+
geom_line(aes(x=volVal,y=propI, colour='Infant_Prop'),size=1)+
geom_line(aes(x=volVal,y=propA, colour='Adult_Prop'),size=1)+
geom_vline(xintercept=splitA)+
geom_text(aes(splitA+25,0.48,label=round(splitA,2)))+
geom_vline(xintercept=splitI,show.legend=TRUE)+
geom_text(aes(splitI+25,0.48,label=round(splitI,2)))+
geom_hline(yintercept=0.5)+
xlab('Volume')+ylab('Proportion')+
ggtitle("Propotion of Non-Harvetes Infants and Adults")+
scale_colour_manual(name="Line Color",
values=c('Infant_Prop'="red", 'Adult_Prop'="blue"))
#Q7a,b,c
A_I_diff<-(1-propA)-(1-propI)
yloessA<-loess(1-propA~volVal,span=0.25, family=c('symmetric'))
yloessI<-loess(1-propI~volVal,span=0.25, family=c('symmetric'))
smDiff<-predict(yloessA)-predict(yloessI)
ggplot(propdf)+
geom_line(aes(x=volVal,y=A_I_diff, colour='Actual'),size=1)+
geom_line(aes(x=volVal,y=smDiff, colour='Smooth Curve'),size=1)+
geom_vline(xintercept=which.max(smDiff))+
geom_text(aes(which.max(smDiff)+20,0,
label=sprintf('Volume:%s',which.max(smDiff)),angle=90))+
xlab('Volume')+ylab('Proportion Difference')+
ggtitle('Harvest Proportion Difference: Adult vs. Infant')+
scale_colour_manual(name="Line Color",
values=c('Actual'="blue", 'Smooth Curve'="red"))
#Q7d
maxdiffI<-(1-propI)[which.max(smDiff)]
maxdiffI
maxdiffA<-(1-propA)[which.max(smDiff)]
maxdiffA
#Q8
#Q8a
#get volume cuttoff corresponding to the smallest volume value greater
#than the largest volume among class A1 infants **Zero Harvest
threshI<-volVal[volVal>max(df2[df2$CLASS=='A1'&
df2$Type=='I','VOLUMEc'])][1]
#calculate proportions for infants and adults with the threshI cutoff
harPropI<-sum(df2[df2$Type=='I',"VOLUMEc"]>threshI)/sum(df2$Type=='I')
harPropA<-sum(df2[df2$Type=='ADULT',"VOLUMEc"]>threshI)/sum(df2$Type=='ADULT')
#Q8b
#calculate the proportion of adults to be harvested based on the
#smallest difference between adult proportion and (1-propI)
harVolA2<-volVal[which.min(abs(propA-(1-propI)))]
harPropI2<-sum(df2[df2$Type=='I',"VOLUMEc"]>harVolA2)/sum(df2$Type=='I')
harPropA2<-sum(df2[df2$Type=='ADULT',"VOLUMEc"]>harVolA2)/sum(df2$Type=='ADULT')
#Q9
ggplot(propdf)+
geom_line(aes(x=(1-propI),y=(1-propA)),color='red',size=1)+
geom_abline(intercept=0, slope=1, linetype=2)+
geom_point(aes(harPropI,harPropA))+
geom_text(aes(harPropI+0.18,harPropA,
label=sprintf('No Infant ClassA Vol=:%s',round(threshI,1))))+
geom_point(aes(harPropI2,harPropA2))+
geom_text(aes(harPropI2+0.18,harPropA2,
label=sprintf('Equal Harvest Vol=:%s',round(harVolA2,2))))+
geom_point(aes(maxdiffI,maxdiffA))+ #q7d
geom_text(aes(maxdiffI+0.18,maxdiffA,
label=sprintf('Max Prop Diff Vol=:%s',which.max(smDiff))))+
ggtitle('ROC Curve: Adult vs. Infant Harvest Populations')+
xlab('Infant Harvest Proportion')+ylab('Adult Harvest Proportion')
#area under the curve
auc((1-propI),(1-propA))
#total proportional yield
zhdiffy<-round(sum(df2$VOLUMEc>=volVal[threshI])/(totI+totA),2)
ehdiffy<-round(sum(df2$VOLUMEc>=volVal[harVolA2])/(totI+totA),2)
maxdiffy<-round(sum(df2$VOLUMEc>=volVal[which.max(smDiff)])/(totI+totA),2)
cutoff<-c('No Infant ClassA','Equal Harvest','Max Prop Diff')
volumes<-c(threshI,harVolA2,which.max(smDiff))
tpr<-c(harPropA,harPropA2,maxdiffA)
fpr<-c(harPropI,harPropI2,maxdiffI)
ppy<-c(zhdiffy,ehdiffy,maxdiffy)
summdf<-data.frame(cutoff,volumes,tpr,fpr,ppy)
colnames(summdf)<-c('Cutoff Type','Volume','True Pos Rate',
'False Pos Rate','Prop Yield')
data.frame(lapply(summdf,function(y) if (is.numeric(y)) round(y,2) else y))
|
#pdf("Sims_estimating_mu_over_s_DIFF_s_values.pdf")
pdf("TRY.pdf")
for (Ne in c(10000)){
#currently Ne cannot be changed in the sims
#DataOverview<-data.frame("N"=Ne,"mu"=0,"cost"=0,"num_runs"=0,"datapointsperrun"=0,"equiPi"=0,"VarPi"=0,"expectedPi"=0,"t_half"=0)
n=0
NUMRUNS=1; numoutputs=10000
system("./Code_and_shellscript/make_HIV1site") #compile the code
for (mu in c(0.000002,0.00002,0.0002)){
# for (mu in c(0.00002)){
#for (cost in c(0.001,0.005,0.01,0.05,0.1,0.2)){
for (cost in c(0.01,0.05,0.1)){
print("")
print(paste("cost",cost))
print("")
print(paste("mu",mu))
print("")
avepivalues<-vector()
for (seed in 1:NUMRUNS){
#make script
x<-"#!/bin/bash"
x<-c(x,paste("mu=",mu,sep=""))
x<-c(x,paste("cost=",cost,sep=""))
outputfrequency=min(c(2*Ne,ceiling(5/cost)))
x<-c(x,paste("output_every_Xgen=",outputfrequency,sep=""))
x<-c(x,paste("numgen_inN=",(numoutputs+2)*outputfrequency/Ne,sep=""))
x<-c(x,paste("start_output=",2*outputfrequency/Ne,sep=""))
x<-c(x,paste("for seed in",seed))
x<-c(x,"do",
"echo \"", "$seed", "$mu", "$cost",
"$output_every_Xgen", "$numgen_inN", "$start_output",
paste("\" | ./Code_and_shellscript/HIVevolution_HIV1site >../Data/Link",cost,"_",seed,".txt",sep=""),
"done")
write(x,file="./Code_and_shellscript/tempscript.sh")
system("chmod 775 ./Code_and_shellscript/tempscript.sh")
#Run tempscript.sh
system("./Code_and_shellscript/tempscript.sh")
#READ FREQS FILE
read.csv(paste("../Data/Link",cost,"_",seed,".txt",sep=""),sep="\t",header=TRUE)->simdata
#in stead of the real frequencies, lets assume we have a sample from each patient of, say, 100, seqs.
plot(c(0,0),col=0,xlim=c(-0.5,3.5),ylim=c(-1*mu/cost,8*mu/cost),xlab="Num Patients",xaxt="n",ylab="95% of observed average freq of allele",main=paste("Ne",Ne,", mu",mu,", Theta",2*Ne*mu ,", cost",cost))
axis(1, at=log10(samplesizes), labels=samplesizes)
abline(h=mu/cost,lty=2)
diffnumseqs <- c(50,100,200)
for (num_seqs_per_patient in diffnumseqs){
for (i in 1:length(simdata$freq)){
simdata$est_freq[i]<-rbinom(1,num_seqs_per_patient,simdata$freq[i])/num_seqs_per_patient}
#system(paste("rm ","../Data/Link",cost,"_",seed,".txt",sep=""))
samplesizes<-c(1,3,10,30,100,300,1000)
for (num_patients in samplesizes){
list_averages<-vector()
for (i in 1:1000){
list_averages<-c(list_averages,mean(sample(simdata$est_freq,num_patients)))}
co=which(diffnumseqs==num_seqs_per_patient)
X=(which(diffnumseqs==num_seqs_per_patient)-2)*0.1
print(paste(num_seqs_per_patient,num_patients,log10(num_patients)-0.02+X))
rect(log10(num_patients)-0.02+X,sort(list_averages)[25],log10(num_patients)+0.02 +X,sort(list_averages)[975],col=co)
# text(log10(num_patients)+X,sort(list_averages)[975]+0.15*mu/cost+(X+0.2)/200,paste(round(sort(list_averages)[25]/(mu/cost),2),"-",round(sort(list_averages)[975]/(mu/cost),2)),cex=0.6)
text(log10(num_patients)+X,(-1+co/8)*mu/cost,paste(round(sort(list_averages)[25]/(mu/cost),2),"-",round(sort(list_averages)[975]/(mu/cost),2)),cex=0.5)
}
text(log10(num_patients),6.5*mu/cost,"bars and",cex=0.8)
text(log10(num_patients),6*mu/cost,"numbers indicate range",cex=0.8)
text(log10(num_patients),5.5*mu/cost,"of 95% of estimates",cex=0.8)
text(log10(num_patients),5.*mu/cost,"black:50, red: 100, ",cex=0.8)
text(log10(num_patients),4.5*mu/cost,"green: 200 sequences/pat",cex=0.8)
text(-0.3,1.1*mu/cost,"mu/s",cex=0.8)
}}}}}
dev.off()
#rbinom(n, size, prob)
| /RScriptMutationSelection.r | no_license | pleunipennings/EstimatingMuOverS | R | false | false | 3,616 | r |
#pdf("Sims_estimating_mu_over_s_DIFF_s_values.pdf")
pdf("TRY.pdf")
for (Ne in c(10000)){
#currently Ne cannot be changed in the sims
#DataOverview<-data.frame("N"=Ne,"mu"=0,"cost"=0,"num_runs"=0,"datapointsperrun"=0,"equiPi"=0,"VarPi"=0,"expectedPi"=0,"t_half"=0)
n=0
NUMRUNS=1; numoutputs=10000
system("./Code_and_shellscript/make_HIV1site") #compile the code
for (mu in c(0.000002,0.00002,0.0002)){
# for (mu in c(0.00002)){
#for (cost in c(0.001,0.005,0.01,0.05,0.1,0.2)){
for (cost in c(0.01,0.05,0.1)){
print("")
print(paste("cost",cost))
print("")
print(paste("mu",mu))
print("")
avepivalues<-vector()
for (seed in 1:NUMRUNS){
#make script
x<-"#!/bin/bash"
x<-c(x,paste("mu=",mu,sep=""))
x<-c(x,paste("cost=",cost,sep=""))
outputfrequency=min(c(2*Ne,ceiling(5/cost)))
x<-c(x,paste("output_every_Xgen=",outputfrequency,sep=""))
x<-c(x,paste("numgen_inN=",(numoutputs+2)*outputfrequency/Ne,sep=""))
x<-c(x,paste("start_output=",2*outputfrequency/Ne,sep=""))
x<-c(x,paste("for seed in",seed))
x<-c(x,"do",
"echo \"", "$seed", "$mu", "$cost",
"$output_every_Xgen", "$numgen_inN", "$start_output",
paste("\" | ./Code_and_shellscript/HIVevolution_HIV1site >../Data/Link",cost,"_",seed,".txt",sep=""),
"done")
write(x,file="./Code_and_shellscript/tempscript.sh")
system("chmod 775 ./Code_and_shellscript/tempscript.sh")
#Run tempscript.sh
system("./Code_and_shellscript/tempscript.sh")
#READ FREQS FILE
read.csv(paste("../Data/Link",cost,"_",seed,".txt",sep=""),sep="\t",header=TRUE)->simdata
#in stead of the real frequencies, lets assume we have a sample from each patient of, say, 100, seqs.
plot(c(0,0),col=0,xlim=c(-0.5,3.5),ylim=c(-1*mu/cost,8*mu/cost),xlab="Num Patients",xaxt="n",ylab="95% of observed average freq of allele",main=paste("Ne",Ne,", mu",mu,", Theta",2*Ne*mu ,", cost",cost))
axis(1, at=log10(samplesizes), labels=samplesizes)
abline(h=mu/cost,lty=2)
diffnumseqs <- c(50,100,200)
for (num_seqs_per_patient in diffnumseqs){
for (i in 1:length(simdata$freq)){
simdata$est_freq[i]<-rbinom(1,num_seqs_per_patient,simdata$freq[i])/num_seqs_per_patient}
#system(paste("rm ","../Data/Link",cost,"_",seed,".txt",sep=""))
samplesizes<-c(1,3,10,30,100,300,1000)
for (num_patients in samplesizes){
list_averages<-vector()
for (i in 1:1000){
list_averages<-c(list_averages,mean(sample(simdata$est_freq,num_patients)))}
co=which(diffnumseqs==num_seqs_per_patient)
X=(which(diffnumseqs==num_seqs_per_patient)-2)*0.1
print(paste(num_seqs_per_patient,num_patients,log10(num_patients)-0.02+X))
rect(log10(num_patients)-0.02+X,sort(list_averages)[25],log10(num_patients)+0.02 +X,sort(list_averages)[975],col=co)
# text(log10(num_patients)+X,sort(list_averages)[975]+0.15*mu/cost+(X+0.2)/200,paste(round(sort(list_averages)[25]/(mu/cost),2),"-",round(sort(list_averages)[975]/(mu/cost),2)),cex=0.6)
text(log10(num_patients)+X,(-1+co/8)*mu/cost,paste(round(sort(list_averages)[25]/(mu/cost),2),"-",round(sort(list_averages)[975]/(mu/cost),2)),cex=0.5)
}
text(log10(num_patients),6.5*mu/cost,"bars and",cex=0.8)
text(log10(num_patients),6*mu/cost,"numbers indicate range",cex=0.8)
text(log10(num_patients),5.5*mu/cost,"of 95% of estimates",cex=0.8)
text(log10(num_patients),5.*mu/cost,"black:50, red: 100, ",cex=0.8)
text(log10(num_patients),4.5*mu/cost,"green: 200 sequences/pat",cex=0.8)
text(-0.3,1.1*mu/cost,"mu/s",cex=0.8)
}}}}}
dev.off()
#rbinom(n, size, prob)
|
N <- 8
xs <- seq(-1, 1, length=N)#sample(1:20, 10)
(xs <- xs[order(xs)])
xs
L <- function(x, j){
tmp <- 1
for(i in 1:N){
if(i!=j)tmp <- tmp*(x-xs[i])/(xs[j] - xs[i])
}
tmp
}
files <- F
if(files)pdf("Li.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
plot(function(x)L(x,1), xlim=c(min(xs), max(xs)), ylim=c(-1, 1), las=1, xlab=expression(x), ylab=expression(L[0](x)))
points(xs, sapply(xs, function(x)L(x,1)), pch=19)
for(i in 2:N){
plot(function(x)L(x,i), xlim=c(min(xs), max(xs)), ylim=c(-1, 1), las=1, xlab=expression(x), ylab=bquote(paste(L[.(i-1)](x))))
points(xs, sapply(xs, function(x)L(x,i)), pch=19)
}
if(files)dev.off()
if(files)pdf("fLi.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
plot(function(x)f(xs[1])*L(x,1), xlim=c(min(xs), max(xs)), ylim=c(-3, 3), las=1, xlab=expression(x), ylab=expression(f(x[0])*L[0](x)))
points(xs, sapply(xs, function(x)f(xs[1])*L(x,1)), pch=19)
for(i in 2:N){
plot(function(x)f(xs[i])*L(x,i), xlim=c(min(xs), max(xs)), ylim=c(-3, 3), las=1, xlab=expression(x), ylab=bquote(paste(f(x[.(i-1)])*L[.(i-1)](x))))
points(xs, sapply(xs, function(x)f(xs[i])*L(x,i)), pch=19)
}
if(files)dev.off()
f <- function(x)x^3*exp(-1.1*x)*sin(x)
Pk <- function(x, k){
tmp <- 0
for(i in 1:k){
tmp <- tmp + f(xs[i])*L(x, i)
}
tmp
}
if(files)pdf("fP.pdf", height=5, width=10)
par(mfrow=c(1,2), mar=c(4.1, 5.1, 1.1, 1.1))
plot(f, xlim=c(min(xs), max(xs)), ylab="Function and approximation", las=1, lwd=2)
plot(function(x)Pk(x,N), xlim=c(min(xs), max(xs)), add=T, col="gray")
points(xs, sapply(xs, f), pch=19)
points(xs, sapply(xs, P), pch=19, col='gray')
plot(function(x) f(x) - Pk(x, N), xlim=c(min(xs), max(xs)), ylab="Error (f(x) - P(x))\n", las=1)
points(xs, 0*xs, pch=19, col=1)
if(files)dev.off()
if(files)pdf("fPk.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
for(i in 1:N){
plot(f, xlim=c(min(xs), max(xs)), ylab= bquote(paste("Function, approximation, and ", P[.(N)][","][.(i)](x))), las=1, lwd=2, ylim=c(0, 3))
# plot(P, xlim=c(min(xs), max(xs)), add=T, col="gray")
plot(function(x)Pk(x,i), xlim=c(min(xs), max(xs)), add=T, col="gray", lwd=2)
points(xs[1:i], sapply(xs[1:i], f), pch=19)
}
if(files)dev.off()
| /books/ana/assets/2_interpolation/lagrange2.R | no_license | seanteachesmath/seanteachesmath.github.io | R | false | false | 2,278 | r | N <- 8
xs <- seq(-1, 1, length=N)#sample(1:20, 10)
(xs <- xs[order(xs)])
xs
L <- function(x, j){
tmp <- 1
for(i in 1:N){
if(i!=j)tmp <- tmp*(x-xs[i])/(xs[j] - xs[i])
}
tmp
}
files <- F
if(files)pdf("Li.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
plot(function(x)L(x,1), xlim=c(min(xs), max(xs)), ylim=c(-1, 1), las=1, xlab=expression(x), ylab=expression(L[0](x)))
points(xs, sapply(xs, function(x)L(x,1)), pch=19)
for(i in 2:N){
plot(function(x)L(x,i), xlim=c(min(xs), max(xs)), ylim=c(-1, 1), las=1, xlab=expression(x), ylab=bquote(paste(L[.(i-1)](x))))
points(xs, sapply(xs, function(x)L(x,i)), pch=19)
}
if(files)dev.off()
if(files)pdf("fLi.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
plot(function(x)f(xs[1])*L(x,1), xlim=c(min(xs), max(xs)), ylim=c(-3, 3), las=1, xlab=expression(x), ylab=expression(f(x[0])*L[0](x)))
points(xs, sapply(xs, function(x)f(xs[1])*L(x,1)), pch=19)
for(i in 2:N){
plot(function(x)f(xs[i])*L(x,i), xlim=c(min(xs), max(xs)), ylim=c(-3, 3), las=1, xlab=expression(x), ylab=bquote(paste(f(x[.(i-1)])*L[.(i-1)](x))))
points(xs, sapply(xs, function(x)f(xs[i])*L(x,i)), pch=19)
}
if(files)dev.off()
f <- function(x)x^3*exp(-1.1*x)*sin(x)
Pk <- function(x, k){
tmp <- 0
for(i in 1:k){
tmp <- tmp + f(xs[i])*L(x, i)
}
tmp
}
if(files)pdf("fP.pdf", height=5, width=10)
par(mfrow=c(1,2), mar=c(4.1, 5.1, 1.1, 1.1))
plot(f, xlim=c(min(xs), max(xs)), ylab="Function and approximation", las=1, lwd=2)
plot(function(x)Pk(x,N), xlim=c(min(xs), max(xs)), add=T, col="gray")
points(xs, sapply(xs, f), pch=19)
points(xs, sapply(xs, P), pch=19, col='gray')
plot(function(x) f(x) - Pk(x, N), xlim=c(min(xs), max(xs)), ylab="Error (f(x) - P(x))\n", las=1)
points(xs, 0*xs, pch=19, col=1)
if(files)dev.off()
if(files)pdf("fPk.pdf", height=5, width=10)
par(mfrow=c(2, ceiling(N/2)), mar=c(4.1, 4.1, 1.1, 1.1))
for(i in 1:N){
plot(f, xlim=c(min(xs), max(xs)), ylab= bquote(paste("Function, approximation, and ", P[.(N)][","][.(i)](x))), las=1, lwd=2, ylim=c(0, 3))
# plot(P, xlim=c(min(xs), max(xs)), add=T, col="gray")
plot(function(x)Pk(x,i), xlim=c(min(xs), max(xs)), add=T, col="gray", lwd=2)
points(xs[1:i], sapply(xs[1:i], f), pch=19)
}
if(files)dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcCleaning.R
\name{zoneFusion2}
\alias{zoneFusion2}
\title{zoneFusion2 basic function for merging 2 zones}
\usage{
zoneFusion2(zoneMain, zoneSuppr, simplitol = 0.001)
}
\arguments{
\item{zoneMain}{zone to merge into}
\item{zoneSuppr}{zone to remove by merging it into main zone}
\item{simplitol}{tolerance for spatial polygons geometry simplification}
}
\value{
a zone
}
\description{
zoneFusion2 basic function for merging 2 zones
}
\details{
merge 2 zones, called by zoneFusion3 and zoneFusion4
}
\examples{
data(resZTest)
Z=resZTest$zonePolygone
plotZ(Z)
sp::plot(zoneFusion2(Z[[6]],Z[[2]]),add=TRUE,col="blue")
}
| /man/zoneFusion2.Rd | no_license | hazaeljones/geozoning | R | false | true | 699 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funcCleaning.R
\name{zoneFusion2}
\alias{zoneFusion2}
\title{zoneFusion2 basic function for merging 2 zones}
\usage{
zoneFusion2(zoneMain, zoneSuppr, simplitol = 0.001)
}
\arguments{
\item{zoneMain}{zone to merge into}
\item{zoneSuppr}{zone to remove by merging it into main zone}
\item{simplitol}{tolerance for spatial polygons geometry simplification}
}
\value{
a zone
}
\description{
zoneFusion2 basic function for merging 2 zones
}
\details{
merge 2 zones, called by zoneFusion3 and zoneFusion4
}
\examples{
data(resZTest)
Z=resZTest$zonePolygone
plotZ(Z)
sp::plot(zoneFusion2(Z[[6]],Z[[2]]),add=TRUE,col="blue")
}
|
# fix.Configuration.R
# make Configuration consistent
# with tables in draft final repor v10
# Trunk & 3 Branches
# Hybrid Mini-Manifold
# Central Manifold
# Two Heaters
# One-zone w/o Recirc
# here's what start with
DT_relative[,list(Configuration=unique(Configuration))]
# "Trunk & 3 Branches"
DT_relative[ str_detect(Configuration,"Branch"),
list(Configuration, Identification, table)][order(table)]
# also shows up in all tables
# fix it
DT_relative[str_detect(Configuration,"Branch"),
Configuration := "Trunk & 3 Branches"]
# "Hybrid Mini-Manifold"
DT_relative[ str_detect(Configuration,"manifold"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"manifold"),
Configuration := "Hybrid Mini-Manifold"]
# "Central Manifold"
DT_relative[ str_detect(Configuration,"Home Run"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"Home Run"),
Configuration := "Central Manifold"]
# "One-zone w/o Recirc"
DT_relative[ str_detect(Configuration,"zone"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"zone"),
Configuration := "One-zone w/o Recirc"]
# here's what it looks like after fixing
DT_relative[,list(Configuration=unique(Configuration))]
| /fix.Configuration.R | no_license | jim-lutz/draw_patterns | R | false | false | 1,481 | r | # fix.Configuration.R
# make Configuration consistent
# with tables in draft final repor v10
# Trunk & 3 Branches
# Hybrid Mini-Manifold
# Central Manifold
# Two Heaters
# One-zone w/o Recirc
# here's what start with
DT_relative[,list(Configuration=unique(Configuration))]
# "Trunk & 3 Branches"
DT_relative[ str_detect(Configuration,"Branch"),
list(Configuration, Identification, table)][order(table)]
# also shows up in all tables
# fix it
DT_relative[str_detect(Configuration,"Branch"),
Configuration := "Trunk & 3 Branches"]
# "Hybrid Mini-Manifold"
DT_relative[ str_detect(Configuration,"manifold"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"manifold"),
Configuration := "Hybrid Mini-Manifold"]
# "Central Manifold"
DT_relative[ str_detect(Configuration,"Home Run"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"Home Run"),
Configuration := "Central Manifold"]
# "One-zone w/o Recirc"
DT_relative[ str_detect(Configuration,"zone"),
list(Configuration, Identification, table)]
# also shows up in all tables
# fix it
DT_relative[ str_detect(Configuration,"zone"),
Configuration := "One-zone w/o Recirc"]
# here's what it looks like after fixing
DT_relative[,list(Configuration=unique(Configuration))]
|
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Fit and run fixed and mixed effects effects Carlina stochastic IPM
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rm(list=ls(all=TRUE))
library(doBy)
library(lme4)
library(MCMCglmm)
set.seed(53241986)
## Working directory must be set here, so the source()'s below run
root=ifelse(.Platform$OS.type=="windows","c:/repos","~/Repos");
setwd(paste(root,"/ipm_book/Rcode/c2",sep=""));
source("../utilities/Standard Graphical Pars.R");
root=ifelse(.Platform$OS.type=="windows","c:/repos","~/Repos");
setwd(paste(root,"/ipm_book/Rcode/c7/Carlina",sep=""));
load("Yearly parameters.Rdata")
source("Carlina Demog Funs DI.R")
#####################################################################
#Stochastic perturbation analysis
#####################################################################
nBigMatrix <- 100
n.est <- 20000
n.runin <- 500
minsize <- 1.5
maxsize <- 5
n.years <-20
stoc_pert_analysis<-function(params,n.est,n.runin,C.t,C.t.mean){
year.i <- sample(1:n.years,n.est+1,replace=TRUE)
K.year.i <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.K<-mk_K(nBigMatrix,params[,i],minsize,maxsize)
K.year.i[i,,] <- year.K$K
}
h <- year.K$h;
meshpts <- year.K$meshpts
#Calculate mean kernel, v and w
mean.kernel <- apply(K.year.i,2:3,mean)
w <- Re(eigen(mean.kernel)$vectors[,1]);
v <- Re(eigen(t(mean.kernel))$vectors[,1]);
# scale eigenvectors <v,w>=1
w <- abs(w)/sum(h*abs(w))
v <- abs(v)
v <- v/(h*sum(v*w))
cat(h*sum(v*w)," should = 1","\n")
#Esimate Lambda s
#initialize variables
nt<-rep(1/nBigMatrix,nBigMatrix)
rt.V <- rt.N <- rep(NA,n.est)
#Iterate model
for (year.t in 1:n.est){
if(year.t%%10000==0) cat("iterate: ", year.t,"\n");
#iterate model with year-specific kernel
nt1<-K.year.i[year.i[year.t],,] %*% nt
sum.nt1<-sum(nt1)
#Calculate log growth rates
rt.V[year.t] <- log(sum(nt1*v)/sum(nt*v))
rt.N[year.t] <- log(sum(nt1)/sum(nt))
nt <- nt1 / sum.nt1
}
Ls <- exp(mean(rt.V))
### Get wt and Rt time series ###
wt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in 1:n.est) {
K <- K.year.i[year.i[i],,]
wt[i+1,] <-K %*% wt[i,]
wt[i+1,] <-wt[i+1,]/sum(wt[i+1,]);
if(i%%10000==0) cat("wt ",i,"\n")
}
### Get vt time series ###
vt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in (n.est+1):2) {
K <- K.year.i[year.i[i],,]
vt[i-1,] <- vt[i,] %*% K
vt[i-1,] <- vt[i-1,]/sum(vt[i-1,]);
if(i%%10000==0) cat("vt ",i,"\n")
}
elas.s <- matrix(0,nBigMatrix,nBigMatrix)
elas.s.mean <- matrix(0,nBigMatrix,nBigMatrix)
for (year.t in n.runin:(n.est-n.runin)) {
#standard calculations needed for the various formulae
vt1.wt <- outer(vt[year.t+1,],wt[year.t,],FUN="*")
vt1.C.wt <- vt1.wt * C.t[year.i[year.t],,]
vt1.C.wt.mean <- vt1.wt * C.t.mean[year.i[year.t],,]
K <- K.year.i[year.i[year.t],,]
vt1.K.wt <- sum(vt[year.t+1,] * (K %*% wt[year.t,]))
#calculation of the standard elasticities
elas.s <-elas.s + (vt1.C.wt) / vt1.K.wt;
elas.s.mean <-elas.s.mean + (vt1.C.wt.mean) / vt1.K.wt;
}
elas.s <- elas.s/(n.est-2*n.runin+1)
elas.s.mean <- elas.s.mean/(n.est-2*n.runin+1)
return(list(meshpts=year.K$meshpts, h=h, elas.s=elas.s, elas.s.mean=elas.s.mean, mean.kernel=mean.kernel, Ls=Ls))
}
########################################################################
#Let's do the probability of flowering function
########################################################################
#Select the parameters to use
params.to.use <- m.par.est
#Calculate meshpts and h for evaluating the perturbation kernels
year.K <- mk_K(nBigMatrix,params.to.use[,1],minsize,maxsize)
meshpts <- year.K$meshpts
h <- year.K$h
#First calculate the mean function and perturbation kernels
p_bz.mean <- 0
for(i in 1:n.years){
p_bz.mean <- p_bz.mean + p_bz(meshpts, params.to.use[,i])
}
p_bz.mean <- p_bz.mean/n.years
Ct_z1z <- function(z1,z,m.par){
return( p_bz(z, m.par) * s_z(z, m.par) *
( m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) - G_z1z(z1, z, m.par)) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( p_bz.mean * s_z(z, m.par) *
( m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) - G_z1z(z1, z, m.par)) )
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
meshpts <- pert.K$meshpts
elas.s <- apply(pert.K$elas.s,2,sum)
elas.s.mean <- apply(pert.K$elas.s.mean,2,sum)
elas.s.sd <- elas.s - elas.s.mean
sens.mean <- elas.s.mean * pert.K$Ls / p_bz.mean
set_graph_pars("panel4")
plot(meshpts,elas.s,type="l",xlab="Size (t), z",ylab=expression(e[S] ^p[b]))
add_panel_label("a")
plot(meshpts,elas.s.mean,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{p[b]*","*mu}))
add_panel_label("b")
plot(meshpts,elas.s.sd,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{p[b]*","*sigma}))
add_panel_label("c")
plot(meshpts,sens.mean,type="l",xlab="Size (t), z",ylab=expression(s[S] ^{p[b]*","*mu}))
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinapbElasSens.eps")
########################################################################
#Let's do the survival function
########################################################################
#First calculate the mean function and perturbation kernels
s.zmean <- 0
for(i in 1:n.years){
s.zmean <- s.zmean + s_z(meshpts, params.to.use[,i])
}
s.zmean <- s.zmean/n.years
Ct_z1z <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) +
s_z(z, m.par) * p_bz(z, m.par) * m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( s.zmean * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) +
s.zmean * p_bz(z, m.par) * m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) )
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
#max(pert.K$elas.tmp-apply(pert.K$elas.s,2,sum))
meshpts <- pert.K$meshpts
elas.s <- apply(pert.K$elas.s,2,sum)
elas.s.mean <- apply(pert.K$elas.s.mean,2,sum)
elas.s.sd <- elas.s - elas.s.mean
sens.mean <- elas.s.mean * pert.K$Ls / s.zmean
#Check sum elasticities is 1
cat(sum(pert.K$elas.s)," should be 1","\n")
set_graph_pars("panel4")
plot(meshpts,elas.s,type="l",xlab="Size (t), z",ylab=expression(e[S] ^s(z)))
add_panel_label("a")
plot(meshpts,elas.s.mean,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{s(z)*","*mu}))
add_panel_label("b")
plot(meshpts,elas.s.sd,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{s(z)*","*sigma}))
add_panel_label("c")
plot(meshpts,sens.mean,type="l",xlab="Size (t), z",ylab=expression(s[S] ^{s(z)*","*mu}))
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinasElasSens.eps")
########################################################################
#Let's do the growth function
########################################################################
#First calculate the mean function and perturbation kernels
G.mean <- 0
for(i in 1:n.years){
G.mean <- G.mean + outer(meshpts, meshpts, G_z1z, m.par = params.to.use[,i])
}
G.mean <- G.mean/n.years
Ct_z1z <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) ) * G.mean)
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
meshpts <- pert.K$meshpts
elas.s.sd <- pert.K$elas.s - pert.K$elas.s.mean
sens.mean <- pert.K$elas.s.mean * pert.K$Ls / G.mean
## set up the plots
ikeep <- ikeep <- which(meshpts>1.5 & meshpts<5) # use to extract a region to plot
set_graph_pars("panel4")
## plot the growth sensitivity and elasticity surfaces
image(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s[ikeep,ikeep]),
add=TRUE)
add_panel_label("a")
## plot the offspring size kernel sensitivity and elasticity surfaces
image(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s.mean[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s.mean[ikeep,ikeep]), add=TRUE)
add_panel_label("b")
image(meshpts[ikeep], meshpts[ikeep], t(elas.s.sd [ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(elas.s.sd [ikeep,ikeep]), add=TRUE)
add_panel_label("c")
image(meshpts[ikeep], meshpts[ikeep], t(sens.mean[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(sens.mean[ikeep,ikeep]), add=TRUE)
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinaGElasSens.eps")
########################################################
#Old code sums before averaging and explicit loop...
stoc.pert.analysis.old<-function(params,n.est,n.runin,C.t,C.t.mean){
year.i <- sample(1:n.years,n.est+1,replace=TRUE)
K.year.i <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.K<-mk_K(nBigMatrix,params[,i],minsize,maxsize)
K.year.i[i,,] <- year.K$K
}
h <- year.K$h;
meshpts <- year.K$meshpts
#Calculate mean kernel, v and w
mean.kernel <- apply(K.year.i,2:3,mean)
w <- Re(eigen(mean.kernel)$vectors[,1]);
v <- Re(eigen(t(mean.kernel))$vectors[,1]);
# scale eigenvectors <v,w>=1
w <- abs(w)/sum(h*abs(w))
v <- abs(v)
v <- v/(h*sum(v*w))
cat(h*sum(v*w)," should = 1","\n")
#Esimate Lambda s
#initialize variables
nt<-rep(1/nBigMatrix,nBigMatrix)
rt.V <- rt.N <- rep(NA,n.est)
#Iterate model
for (year.t in 1:n.est){
if(year.t%%10000==0) cat("iterate: ", year.t,"\n");
#iterate model with year-specific kernel
nt1<-K.year.i[year.i[year.t],,] %*% nt
sum.nt1<-sum(nt1)
#Calculate log growth rates
rt.V[year.t] <- log(sum(nt1*v)/sum(nt*v))
rt.N[year.t] <- log(sum(nt1)/sum(nt))
nt <- nt1 / sum.nt1
}
Ls <- mean(rt.V)
### Get wt and Rt time series ###
wt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in 1:n.est) {
K <- K.year.i[year.i[i],,]
wt[i+1,] <-K %*% wt[i,]
wt[i+1,] <-wt[i+1,]/sum(wt[i+1,]);
if(i%%10000==0) cat("wt ",i,"\n")
}
### Get vt time series ###
vt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in (n.est+1):2) {
K <- K.year.i[year.i[i],,]
vt[i-1,] <- vt[i,] %*% K
vt[i-1,] <- vt[i-1,]/sum(vt[i-1,]);
if(i%%10000==0) cat("vt ",i,"\n")
}
elas.s <- rep(0,nBigMatrix)
elas.s.mean <- rep(0,nBigMatrix)
for (year.t in n.runin:(n.est-n.runin)) {
#standard calculations needed for the various formulae
vt1.C.wt <- sapply(1:nBigMatrix,function(z0) sum(vt[year.t+1,] *
(C.t[year.i[year.t],,z0] * wt[year.t,z0])))
vt1.C.wt.mean <- sapply(1:nBigMatrix,function(z0) sum(vt[year.t+1,] *
(C.t.mean[year.i[year.t],,z0] * wt[year.t,z0])))
K <- K.year.i[year.i[year.t],,]
vt1.K.wt <- sum(vt[year.t+1,] * (K %*% wt[year.t,]))
# # vt1.above <- rep(NA,nBigMatrix)
# for(z0 in 1:nBigMatrix){
# pr.pb.c0.G <- params["p.r",year.i[year.t]] * b_z(meshpts[z0],params[,year.i[year.t]]) *
# c_0z1(meshpts,params[,year.i[year.t]]) -
# G_z1z(meshpts,meshpts[z0],params[,year.i[year.t]])
# pb.s.wt <- p_bz(meshpts[z0],params[,year.i[year.t]]) *
# s_z(meshpts[z0],params[,year.i[year.t]])
# vt1.above[z0] <- sum(vt[year.t+1,] * pb.s.wt * (pr.pb.c0.G * wt[year.t,z0])) *h
# }
#calculation of the standard elasticities
elas.s <-elas.s + (vt1.C.wt) / vt1.K.wt;
elas.s.mean <-elas.s.mean + (vt1.C.wt.mean) / vt1.K.wt;
}
elas.s <- elas.s/(n.est-2*n.runin+1)
elas.s.mean <- elas.s.mean/(n.est-2*n.runin+1)
return(list(meshpts=year.K$meshpts, h=h, elas.s=elas.s, elas.s.mean=elas.s.mean, mean.kernel=mean.kernel, Ls=Ls))
}
| /ipmbook-code/c7/Carlina/Carlina Fixed Effects K fun pert.R | no_license | aekendig/population-modeling-techniques | R | false | false | 13,831 | r | ## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
## Fit and run fixed and mixed effects effects Carlina stochastic IPM
## ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rm(list=ls(all=TRUE))
library(doBy)
library(lme4)
library(MCMCglmm)
set.seed(53241986)
## Working directory must be set here, so the source()'s below run
root=ifelse(.Platform$OS.type=="windows","c:/repos","~/Repos");
setwd(paste(root,"/ipm_book/Rcode/c2",sep=""));
source("../utilities/Standard Graphical Pars.R");
root=ifelse(.Platform$OS.type=="windows","c:/repos","~/Repos");
setwd(paste(root,"/ipm_book/Rcode/c7/Carlina",sep=""));
load("Yearly parameters.Rdata")
source("Carlina Demog Funs DI.R")
#####################################################################
#Stochastic perturbation analysis
#####################################################################
nBigMatrix <- 100
n.est <- 20000
n.runin <- 500
minsize <- 1.5
maxsize <- 5
n.years <-20
stoc_pert_analysis<-function(params,n.est,n.runin,C.t,C.t.mean){
year.i <- sample(1:n.years,n.est+1,replace=TRUE)
K.year.i <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.K<-mk_K(nBigMatrix,params[,i],minsize,maxsize)
K.year.i[i,,] <- year.K$K
}
h <- year.K$h;
meshpts <- year.K$meshpts
#Calculate mean kernel, v and w
mean.kernel <- apply(K.year.i,2:3,mean)
w <- Re(eigen(mean.kernel)$vectors[,1]);
v <- Re(eigen(t(mean.kernel))$vectors[,1]);
# scale eigenvectors <v,w>=1
w <- abs(w)/sum(h*abs(w))
v <- abs(v)
v <- v/(h*sum(v*w))
cat(h*sum(v*w)," should = 1","\n")
#Esimate Lambda s
#initialize variables
nt<-rep(1/nBigMatrix,nBigMatrix)
rt.V <- rt.N <- rep(NA,n.est)
#Iterate model
for (year.t in 1:n.est){
if(year.t%%10000==0) cat("iterate: ", year.t,"\n");
#iterate model with year-specific kernel
nt1<-K.year.i[year.i[year.t],,] %*% nt
sum.nt1<-sum(nt1)
#Calculate log growth rates
rt.V[year.t] <- log(sum(nt1*v)/sum(nt*v))
rt.N[year.t] <- log(sum(nt1)/sum(nt))
nt <- nt1 / sum.nt1
}
Ls <- exp(mean(rt.V))
### Get wt and Rt time series ###
wt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in 1:n.est) {
K <- K.year.i[year.i[i],,]
wt[i+1,] <-K %*% wt[i,]
wt[i+1,] <-wt[i+1,]/sum(wt[i+1,]);
if(i%%10000==0) cat("wt ",i,"\n")
}
### Get vt time series ###
vt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in (n.est+1):2) {
K <- K.year.i[year.i[i],,]
vt[i-1,] <- vt[i,] %*% K
vt[i-1,] <- vt[i-1,]/sum(vt[i-1,]);
if(i%%10000==0) cat("vt ",i,"\n")
}
elas.s <- matrix(0,nBigMatrix,nBigMatrix)
elas.s.mean <- matrix(0,nBigMatrix,nBigMatrix)
for (year.t in n.runin:(n.est-n.runin)) {
#standard calculations needed for the various formulae
vt1.wt <- outer(vt[year.t+1,],wt[year.t,],FUN="*")
vt1.C.wt <- vt1.wt * C.t[year.i[year.t],,]
vt1.C.wt.mean <- vt1.wt * C.t.mean[year.i[year.t],,]
K <- K.year.i[year.i[year.t],,]
vt1.K.wt <- sum(vt[year.t+1,] * (K %*% wt[year.t,]))
#calculation of the standard elasticities
elas.s <-elas.s + (vt1.C.wt) / vt1.K.wt;
elas.s.mean <-elas.s.mean + (vt1.C.wt.mean) / vt1.K.wt;
}
elas.s <- elas.s/(n.est-2*n.runin+1)
elas.s.mean <- elas.s.mean/(n.est-2*n.runin+1)
return(list(meshpts=year.K$meshpts, h=h, elas.s=elas.s, elas.s.mean=elas.s.mean, mean.kernel=mean.kernel, Ls=Ls))
}
########################################################################
#Let's do the probability of flowering function
########################################################################
#Select the parameters to use
params.to.use <- m.par.est
#Calculate meshpts and h for evaluating the perturbation kernels
year.K <- mk_K(nBigMatrix,params.to.use[,1],minsize,maxsize)
meshpts <- year.K$meshpts
h <- year.K$h
#First calculate the mean function and perturbation kernels
p_bz.mean <- 0
for(i in 1:n.years){
p_bz.mean <- p_bz.mean + p_bz(meshpts, params.to.use[,i])
}
p_bz.mean <- p_bz.mean/n.years
Ct_z1z <- function(z1,z,m.par){
return( p_bz(z, m.par) * s_z(z, m.par) *
( m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) - G_z1z(z1, z, m.par)) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( p_bz.mean * s_z(z, m.par) *
( m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) - G_z1z(z1, z, m.par)) )
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
meshpts <- pert.K$meshpts
elas.s <- apply(pert.K$elas.s,2,sum)
elas.s.mean <- apply(pert.K$elas.s.mean,2,sum)
elas.s.sd <- elas.s - elas.s.mean
sens.mean <- elas.s.mean * pert.K$Ls / p_bz.mean
set_graph_pars("panel4")
plot(meshpts,elas.s,type="l",xlab="Size (t), z",ylab=expression(e[S] ^p[b]))
add_panel_label("a")
plot(meshpts,elas.s.mean,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{p[b]*","*mu}))
add_panel_label("b")
plot(meshpts,elas.s.sd,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{p[b]*","*sigma}))
add_panel_label("c")
plot(meshpts,sens.mean,type="l",xlab="Size (t), z",ylab=expression(s[S] ^{p[b]*","*mu}))
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinapbElasSens.eps")
########################################################################
#Let's do the survival function
########################################################################
#First calculate the mean function and perturbation kernels
s.zmean <- 0
for(i in 1:n.years){
s.zmean <- s.zmean + s_z(meshpts, params.to.use[,i])
}
s.zmean <- s.zmean/n.years
Ct_z1z <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) +
s_z(z, m.par) * p_bz(z, m.par) * m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( s.zmean * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) +
s.zmean * p_bz(z, m.par) * m.par["p.r"] * b_z(z, m.par) * c_0z1(z1, m.par) )
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
#max(pert.K$elas.tmp-apply(pert.K$elas.s,2,sum))
meshpts <- pert.K$meshpts
elas.s <- apply(pert.K$elas.s,2,sum)
elas.s.mean <- apply(pert.K$elas.s.mean,2,sum)
elas.s.sd <- elas.s - elas.s.mean
sens.mean <- elas.s.mean * pert.K$Ls / s.zmean
#Check sum elasticities is 1
cat(sum(pert.K$elas.s)," should be 1","\n")
set_graph_pars("panel4")
plot(meshpts,elas.s,type="l",xlab="Size (t), z",ylab=expression(e[S] ^s(z)))
add_panel_label("a")
plot(meshpts,elas.s.mean,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{s(z)*","*mu}))
add_panel_label("b")
plot(meshpts,elas.s.sd,type="l",xlab="Size (t), z",ylab=expression(e[S] ^{s(z)*","*sigma}))
add_panel_label("c")
plot(meshpts,sens.mean,type="l",xlab="Size (t), z",ylab=expression(s[S] ^{s(z)*","*mu}))
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinasElasSens.eps")
########################################################################
#Let's do the growth function
########################################################################
#First calculate the mean function and perturbation kernels
G.mean <- 0
for(i in 1:n.years){
G.mean <- G.mean + outer(meshpts, meshpts, G_z1z, m.par = params.to.use[,i])
}
G.mean <- G.mean/n.years
Ct_z1z <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) )*G_z1z(z1, z, m.par) )
}
C.pert <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z, m.par = params.to.use[,i]))
C.pert[i,,] <- year.C
}
Ct_z1z_mean <- function(z1,z,m.par){
return( s_z(z, m.par) * (1- p_bz(z, m.par) ) * G.mean)
}
C.pert.mean <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.C <-h * (outer(meshpts, meshpts, Ct_z1z_mean, m.par = params.to.use[,i]))
C.pert.mean[i,,] <- year.C
}
pert.K <- stoc_pert_analysis(params.to.use, n.est, n.runin, C.pert, C.pert.mean)
meshpts <- pert.K$meshpts
elas.s.sd <- pert.K$elas.s - pert.K$elas.s.mean
sens.mean <- pert.K$elas.s.mean * pert.K$Ls / G.mean
## set up the plots
ikeep <- ikeep <- which(meshpts>1.5 & meshpts<5) # use to extract a region to plot
set_graph_pars("panel4")
## plot the growth sensitivity and elasticity surfaces
image(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s[ikeep,ikeep]),
add=TRUE)
add_panel_label("a")
## plot the offspring size kernel sensitivity and elasticity surfaces
image(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s.mean[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(pert.K$elas.s.mean[ikeep,ikeep]), add=TRUE)
add_panel_label("b")
image(meshpts[ikeep], meshpts[ikeep], t(elas.s.sd [ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(elas.s.sd [ikeep,ikeep]), add=TRUE)
add_panel_label("c")
image(meshpts[ikeep], meshpts[ikeep], t(sens.mean[ikeep,ikeep]),
col=grey(seq(0.6, 1, length=100)),
xlab="Size (t), z", ylab="Size (t+1), z\'")
contour(meshpts[ikeep], meshpts[ikeep], t(sens.mean[ikeep,ikeep]), add=TRUE)
add_panel_label("d")
dev.copy2eps(file="~/Repos/ipm_book/c7/figures/CarlinaGElasSens.eps")
########################################################
#Old code sums before averaging and explicit loop...
stoc.pert.analysis.old<-function(params,n.est,n.runin,C.t,C.t.mean){
year.i <- sample(1:n.years,n.est+1,replace=TRUE)
K.year.i <- array(NA,c(n.years,nBigMatrix,nBigMatrix))
for(i in 1:n.years){
year.K<-mk_K(nBigMatrix,params[,i],minsize,maxsize)
K.year.i[i,,] <- year.K$K
}
h <- year.K$h;
meshpts <- year.K$meshpts
#Calculate mean kernel, v and w
mean.kernel <- apply(K.year.i,2:3,mean)
w <- Re(eigen(mean.kernel)$vectors[,1]);
v <- Re(eigen(t(mean.kernel))$vectors[,1]);
# scale eigenvectors <v,w>=1
w <- abs(w)/sum(h*abs(w))
v <- abs(v)
v <- v/(h*sum(v*w))
cat(h*sum(v*w)," should = 1","\n")
#Esimate Lambda s
#initialize variables
nt<-rep(1/nBigMatrix,nBigMatrix)
rt.V <- rt.N <- rep(NA,n.est)
#Iterate model
for (year.t in 1:n.est){
if(year.t%%10000==0) cat("iterate: ", year.t,"\n");
#iterate model with year-specific kernel
nt1<-K.year.i[year.i[year.t],,] %*% nt
sum.nt1<-sum(nt1)
#Calculate log growth rates
rt.V[year.t] <- log(sum(nt1*v)/sum(nt*v))
rt.N[year.t] <- log(sum(nt1)/sum(nt))
nt <- nt1 / sum.nt1
}
Ls <- mean(rt.V)
### Get wt and Rt time series ###
wt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in 1:n.est) {
K <- K.year.i[year.i[i],,]
wt[i+1,] <-K %*% wt[i,]
wt[i+1,] <-wt[i+1,]/sum(wt[i+1,]);
if(i%%10000==0) cat("wt ",i,"\n")
}
### Get vt time series ###
vt<-matrix(1/nBigMatrix, nrow=n.est+1, ncol=nBigMatrix);
for (i in (n.est+1):2) {
K <- K.year.i[year.i[i],,]
vt[i-1,] <- vt[i,] %*% K
vt[i-1,] <- vt[i-1,]/sum(vt[i-1,]);
if(i%%10000==0) cat("vt ",i,"\n")
}
elas.s <- rep(0,nBigMatrix)
elas.s.mean <- rep(0,nBigMatrix)
for (year.t in n.runin:(n.est-n.runin)) {
#standard calculations needed for the various formulae
vt1.C.wt <- sapply(1:nBigMatrix,function(z0) sum(vt[year.t+1,] *
(C.t[year.i[year.t],,z0] * wt[year.t,z0])))
vt1.C.wt.mean <- sapply(1:nBigMatrix,function(z0) sum(vt[year.t+1,] *
(C.t.mean[year.i[year.t],,z0] * wt[year.t,z0])))
K <- K.year.i[year.i[year.t],,]
vt1.K.wt <- sum(vt[year.t+1,] * (K %*% wt[year.t,]))
# # vt1.above <- rep(NA,nBigMatrix)
# for(z0 in 1:nBigMatrix){
# pr.pb.c0.G <- params["p.r",year.i[year.t]] * b_z(meshpts[z0],params[,year.i[year.t]]) *
# c_0z1(meshpts,params[,year.i[year.t]]) -
# G_z1z(meshpts,meshpts[z0],params[,year.i[year.t]])
# pb.s.wt <- p_bz(meshpts[z0],params[,year.i[year.t]]) *
# s_z(meshpts[z0],params[,year.i[year.t]])
# vt1.above[z0] <- sum(vt[year.t+1,] * pb.s.wt * (pr.pb.c0.G * wt[year.t,z0])) *h
# }
#calculation of the standard elasticities
elas.s <-elas.s + (vt1.C.wt) / vt1.K.wt;
elas.s.mean <-elas.s.mean + (vt1.C.wt.mean) / vt1.K.wt;
}
elas.s <- elas.s/(n.est-2*n.runin+1)
elas.s.mean <- elas.s.mean/(n.est-2*n.runin+1)
return(list(meshpts=year.K$meshpts, h=h, elas.s=elas.s, elas.s.mean=elas.s.mean, mean.kernel=mean.kernel, Ls=Ls))
}
|
#' @importFrom bigWig bed.step.bpQuery.bigWig load.bigWig unload.bigWig
#' @importFrom dREG genomic_data_model
#' @importFrom data.table rbindlist
#' @importFrom snow getMPIcluster
#' @importFrom snowfall sfExport sfInit sfLapply sfRemoveAll sfStop
#utility functions
#get the chromosome length
get.chrom.length<-function(chr, chrom.info.table){
return(chrom.info.table[(chrom.info.table[,1]==chr),2])
}
#extract to construct bedgraph and then to bigwig
tobigwig<-function(filename, temp.bg, chromInfo){
#get bedgraph
bedgraph.sorted=tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bedgraph formatted dataframes to tempfile
#write.table(bedgraph,file= bedgraph.file,quote=F,sep="\t",col.names=F,row.names=F)
command=paste("LC_ALL=C sort -k1,1 -k2,2n", temp.bg, "| uniq >", bedgraph.sorted,sep=" ")
#browser()
try(system(command))
command=paste("bedGraphToBigWig", bedgraph.sorted, chromInfo ,filename, sep=" ")
#cat(command,"\n")
try(system(command))
#unlink(bedgraph.file)
unlink(bedgraph.sorted)
}
#betools merge
bedTools.merge<-function(bed)
{
#create temp files
a.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste("LC_ALL=C sort -k1,1 -k2,2n",a.file,"| mergeBed -i stdin >",out,sep=" ")
#cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(out)
return(res)
}
#function that calls bedtools and operate on two bed dataframes
bedTools.2in<-function(functionstring="bedIntersect",bed1,bed2,opt.string="")
{
#create temp files
a.file=tempfile()
b.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed1,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
write.table(bed2,file=b.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste(functionstring,"-a",a.file,"-b",b.file,opt.string,">",out,sep=" ")
# cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(b.file);unlink(out)
return(res)
}
#dREG HD peak calling functions
#scan for continuous peak that passes minimun peak length defined
#returns a matrix in the form of matrix(start, end)
scan_for_peak<-function(potential.peak.positions){
length.tot<-length(potential.peak.positions)
if (length.tot==0) return(cbind(numeric(0),numeric(0)))
peak.starts<-c(1)
peak.ends<-c()
i<-1
while (i< length.tot){
if(potential.peak.positions[i+1]!=potential.peak.positions[i]+1){
#break point found
peak.starts<-c(peak.starts,i+1)
peak.ends<-c(peak.ends,i)
}
i<-i+1
}
peak.ends<-c(peak.ends ,length.tot)
return (cbind(potential.peak.positions [peak.starts[!is.na(peak.ends)]], (potential.peak.positions [peak.ends[!is.na(peak.ends)]]+1)))
}
D1.criterion.filter<-function(peaks, spl.model, start){
peak.pass.or.not<-c() #true false vector
for (i in c(1:nrow(peaks))){
position.pass.or.not<-c()
for (j in c((peaks[i,1]-start+2):(peaks[i,2]-start+1))){
D1.val<-predict(spl.model,x=c(j, (j-1)),deriv=1)$y
if(prod(D1.val)<=0) position.pass.or.not <-c(position.pass.or.not, TRUE)
else position.pass.or.not <-c(position.pass.or.not, FALSE)
}
peak.pass.or.not<-c(peak.pass.or.not, sum(position.pass.or.not)>0) #or ==1? whichever is faster
}
return (matrix(peaks [peak.pass.or.not,],byrow=FALSE, ncol=2))
}
split_peak<-function(predicted_data, knots.ratio, background){
#print(attr(predicted_data,"start"))
full.vec<-seq(attr(predicted_data,"start"),(attr(predicted_data,"end")-1))
positions.ONEbased<-full.vec-full.vec[1]+1
#chose degree of freedom proportional to peak length
df<-as.integer(length(full.vec)/knots.ratio)+3
#print(length(positions.ONEbased))
#print(length(predicted_data))
dnase.spl<-smooth.spline(positions.ONEbased,predicted_data,df=df)
D0.val<-predict(dnase.spl,x=c(1:length(full.vec)))$y
D2.val<-predict(dnase.spl,x=c(1:length(full.vec)),deriv=2)$y
#get the positions below 2nd derivative cutoff
potential.peak.positions.D2<-full.vec[D2.val<0]
#get the positions above the Dnase value background level
potential.peak.positions.D0 <- full.vec[predicted_data> background]
potential.peak.positions<-intersect (potential.peak.positions.D2, potential.peak.positions.D0)
#scan for continuous peak that passes minimun peak length defined
peaks<-scan_for_peak(potential.peak.positions)
#further filtering the peak using "change of sign criterion" of 1st order derivative
if(!is.null(peaks)){
if(nrow(peaks)!=0){
peaks.D1.filtered <- D1.criterion.filter(peaks, dnase.spl, start=attr(predicted_data,"start"))
peak.dataframe<-cbind.data.frame(rep(attr(predicted_data,"chrom"), nrow(peaks.D1.filtered)), peaks.D1.filtered)
return (peak.dataframe)
}
}
}
#for regions of outlier (too high) PRO-seq reads, where rarely found in training example, we do scaling by a factor to pull everything back to the distribution of traning examples. This aims at fixing the chunk region in prediction.
fix_distribution<-function(dat){
#training.max reference determined by 150K+ training examples over true +ve dREG regions (both dnase and grocap +ve) 90% percentile of max(training example input vectors)
training.max<-4.341949
dat.max<-max(dat)
if(dat.max <=training.max) return(dat)
else return(dat/(dat.max/training.max))
}
#dREG_HD_pred functions
#return the input data
dREG_HD_get_dat <-function( bed_line, zoom, bigwig_plus, bigwig_minus,total){
chrom<-bed_line[[1]]
start<-as.integer(bed_line[[2]])
end<-as.integer(bed_line[[3]])-1
positions<-c(start:end)
#scaling using total read depth
dat_unscaled <- .Call("get_genomic_data_R", as.character(rep(chrom,length(positions))), as.integer(positions), as.character(bigwig_plus), as.character(bigwig_minus), zoom, FALSE, PACKAGE= "dREG")
dat_unscaled <-unlist (dat_unscaled)/(total/1E6)
dat<- cbind(rep(),t(matrix(dat_unscaled, ncol=NROW(positions))))
dat<-fix_distribution(dat)
stopifnot(nrow(dat)==length(positions))
cbind.data.frame(chrom, positions, positions+1, dat)
}
#@param bed is a data.frame from the block of bed_file
run_dREG_HD_pred<-function(gdm, bed,bigwig_plus,bigwig_minus, model,total, temp.bg, ncores, use_rgtsvm) {
stopifnot(NROW(gdm@window_sizes) == NROW(gdm@half_nWindows))
zoom<- list(as.integer(gdm@window_sizes), as.integer(gdm@half_nWindows))
if (nrow(bed)<ncores) blocks=nrow(bed)
else blocks= ncores
line.cutoff<-as.integer(seq(from=0, to=nrow(bed),length.out= blocks+1))
if(use_rgtsvm){
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext, zoom, bigwig_plus, bigwig_minus,total){
requireNamespace("dREG")
do.call(rbind.data.frame,apply(dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),],MARGIN=1,FUN= dREG_HD_get_dat,zoom= zoom, bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus,total= total))
}
sfInit(parallel = TRUE, cpus = blocks, type = "SOCK" )
sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total");
#sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total","dREG_HD_get_dat","fix_distribution");
dat<-do.call(rbind.data.frame,sfLapply(x=1:blocks,fun= cpu.fun, line.cutoff= line.cutoff, dREG_bed_ext= bed,zoom= zoom,bigwig_plus = bigwig_plus, bigwig_minus = bigwig_minus, total = total))
sfStop()
#sfRemoveAll()
pos<-dat[,1:3]
ret <- Rgtsvm::predict.gtsvm(model,dat[,4:ncol(dat)])
rm(dat)
gc(verbose=TRUE, reset=TRUE)
stopifnot(nrow(pos)==sum(bed$V3-bed$V2))
options(scipen =99)
write.table(cbind.data.frame(pos,ret),file=temp.bg,quote=F,sep="\t",col.names=F,row.names=F,append = TRUE)
}
else{
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext, zoom, bigwig_plus, bigwig_minus,total,model) {
requireNamespace("dREG")
requireNamespace("e1071")
dat <-do.call(rbind.data.frame,apply(dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),],MARGIN=1,FUN= dREG_HD_get_dat,zoom= zoom, bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus,total= total))
pos<-dat[,1:3]
ret <- predict(model,dat[,4:ncol(dat)])
rm(dat)
gc(verbose=TRUE, reset=TRUE)
cbind.data.frame(pos,ret)
}
sfInit(parallel = TRUE, cpus = blocks, type = "SOCK" )
sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total")
dat<-do.call(rbind.data.frame,sfLapply(x=1:blocks,fun= cpu.fun, line.cutoff= line.cutoff, dREG_bed_ext= bed,zoom= zoom,bigwig_plus = bigwig_plus, bigwig_minus = bigwig_minus, total = total,model=model))
sfStop()
sfRemoveAll()
stopifnot(nrow(dat)==sum(bed$V3-bed$V2))
options(scipen =99)
write.table(dat,file=temp.bg,quote=F,sep="\t",col.names=F,row.names=F,append = TRUE)
}
gc(verbose=TRUE, reset=TRUE)
return(NULL)
}
split.bed.evenly<-function(bed){
GPU<-1.8
tot.num.examples <-as.integer(GPU*1024*1024*1024/8/123)
line.cutoff<-c(0)
current.row<-1
while(current.row<=nrow(bed)){
current.num.examples <-bed[current.row,3]-bed[current.row,2]
while(current.num.examples <= tot.num.examples && current.row<=nrow(bed)){
current.row<-current.row+1
current.num.examples<-current.num.examples+(bed[current.row,3]-bed[current.row,2])
}
line.cutoff<-c(line.cutoff,current.row-1)
}
return(line.cutoff)
}
get.chromosome.info <- function(bw.plus, bw.minus)
{
chrom <- rbind( cbind( bw.plus$chroms, bw.plus$chromSizes), cbind( bw.minus$chroms, bw.minus$chromSizes) );
chr.size <- unlist( lapply( unique(chrom[,1]), function(chr){max( as.numeric( chrom[which(chrom[,1]==chr),2])) } ) );
return(data.frame( V1=unique(chrom[,1]), V2=chr.size ));
}
#main function runs prediction for blocks of dREG_bed in parallel
#dREG_HD<-function(bed_path, bigwig_plus, bigwig_minus, #chromInfo, model, ncores=1, use_rgtsvm=FALSE){
#' @export
dREG_HD<-function(bed_path, bigwig_plus, bigwig_minus, model, ncores=1, use_rgtsvm=FALSE){
#Step1: imputing Dnase-I signal in parallel mode
message("running dREG-HD on ", bed_path);
bw.plus <- load.bigWig(bigwig_plus);
bw.minus <- load.bigWig(bigwig_minus);
total<-abs(bw.plus$mean*bw.plus$basesCovered)+abs(bw.minus$mean*bw.minus$basesCovered);
unload.bigWig(bw.plus);
unload.bigWig(bw.minus);
ext=200 #impute on extended dREG site
dREG_bed<-read.table(bed_path);
#chrom.info.table<-read.table(chromInfo);
chrom.info.table <- get.chromosome.info( bw.plus, bw.minus );
chromInfo <- tempfile("chrom.info.");
write.table( chrom.info.table, file=chromInfo, quote=F, row.names=F, col.names=F, sep="\t");
dREG_bed_ext<-cbind.data.frame(dREG_bed$V1, apply(cbind(0,(dREG_bed$V2-ext)), MARGIN=1, FUN=max), apply(cbind(sapply(as.character(dREG_bed$V1), FUN= get.chrom.length, chrom.info.table),(dREG_bed$V3+ext)), MARGIN=1, FUN=min));
dREG_bed_ext<-bedTools.merge(dREG_bed_ext);
if(use_rgtsvm) class(model)<-"gtsvm"
gdm<-genomic_data_model(60,30);
line.cutoff<-split.bed.evenly(dREG_bed_ext);
blocks<-length(line.cutoff)-1;
message("number of total blocks=",blocks)
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext,bigwig_plus, bigwig_minus, gdm, model,total, temp.bg, ncores, use_rgtsvm){
message(idx);
dREG_bed_ext_part<-dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),]
run_dREG_HD_pred(gdm=gdm, bed= dREG_bed_ext_part,bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus, model= model,total= total, temp.bg= temp.bg, ncores= ncores, use_rgtsvm= use_rgtsvm)
rm(list=ls());
gc(verbose=TRUE, reset=TRUE);
return(NULL);
}
temp.bg=tempfile();
lapply(c(1:blocks),FUN= cpu.fun,line.cutoff= line.cutoff, dREG_bed_ext= dREG_bed_ext,bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus, gdm=gdm, model=model,total=total, temp.bg= temp.bg,ncores= ncores, use_rgtsvm= use_rgtsvm)
#Step2: generate bigwig file from "returned_pred_data" file
bw.filename<-paste(bed_path,"_imputedDnase.bw",sep="")
tobigwig(filename=bw.filename, temp.bg = temp.bg, chromInfo= chromInfo)
unlink(temp.bg);
rm(model);
gc(verbose=TRUE, reset=TRUE);
#Step three generate dREG HD peaks
imputed_dnase.bw <- load.bigWig(bw.filename);
returned_pred_data <- bed.step.bpQuery.bigWig(bw= imputed_dnase.bw,bed= dREG_bed_ext,step=1);
unload.bigWig(imputed_dnase.bw);
line.cutoff=as.integer(seq(from=0, to=length(returned_pred_data),length.out= ncores+1 ))
returned_pred_data_list <- list();
for( i in 1:(length(line.cutoff)-1) )
returned_pred_data_list[[i]] <- returned_pred_data[ c((line.cutoff[i]+1):line.cutoff[i+1])];
rm(returned_pred_data);
gc(verbose=TRUE, reset=TRUE);
cpu.fun<-function( pred_data, knots.ratio, background){
rbindlist(lapply( pred_data, FUN= split_peak, knots.ratio= knots.ratio, background= background));
}
#relaxed mode
message("calling peaks under relaxed condition");
# mclapply cause much more memory (ncore times ) are allocated potentially.
#dREG_HD_bed <- rbindlist( mclapply( returned_pred_data_list, FUN= cpu.fun, knots.ratio= 397.4,background=0.02, mc.cores = ncores ) );
sfInit(parallel = TRUE, cpus = ncores, type = "SOCK" );
dREG_HD_bed <- rbindlist( sfLapply( x = returned_pred_data_list,fun= cpu.fun, knots.ratio= 397.4,background=0.02 ) );
sfStop();
dREG_HD.filename <- paste(bed_path,"_dREG_HD_relaxed.bed",sep="");
dREG_HD_bed.intersected<-bedTools.2in ("bedtools intersect -u", dREG_HD_bed, dREG_bed);
dREG_HD_bed.intersected.merged<-bedTools.merge(dREG_HD_bed.intersected);
write.table(dREG_HD_bed.intersected.merged,file=dREG_HD.filename,sep="\t",row.names=FALSE,quote=FALSE,col.names=FALSE);
rm(list=c("dREG_HD_bed","dREG_HD_bed.intersected","dREG_HD_bed.intersected.merged"));
gc(verbose=TRUE, reset=TRUE);
message("calling peaks under stringent condition");
#stringent mode
#dREG_HD_bed <-rbindlist( mclapply( X=1:ncores, FUN= cpu.fun, knots.ratio= 1350, background= 0.02723683,mc.cores = ncores));
sfInit(parallel = TRUE, cpus = ncores, type = "SOCK" );
dREG_HD_bed <- rbindlist( sfLapply(x = returned_pred_data_list,fun= cpu.fun, knots.ratio= 1350, background= 0.02723683 ) );
sfStop();
dREG_HD.filename <- paste(bed_path,"_dREG_HD_stringent.bed",sep="");
dREG_HD_bed.intersected<-bedTools.2in ("bedtools intersect -u", dREG_HD_bed, dREG_bed);
dREG_HD_bed.intersected.merged<-bedTools.merge(dREG_HD_bed.intersected);
write.table(dREG_HD_bed.intersected.merged,file=dREG_HD.filename,sep="\t",row.names=FALSE,quote=FALSE,col.names=FALSE);
}
| /dREG.HD/R/dREG_HD_GPU_working.R | no_license | Danko-Lab/dREG.HD | R | false | false | 14,681 | r | #' @importFrom bigWig bed.step.bpQuery.bigWig load.bigWig unload.bigWig
#' @importFrom dREG genomic_data_model
#' @importFrom data.table rbindlist
#' @importFrom snow getMPIcluster
#' @importFrom snowfall sfExport sfInit sfLapply sfRemoveAll sfStop
#utility functions
#get the chromosome length
get.chrom.length<-function(chr, chrom.info.table){
return(chrom.info.table[(chrom.info.table[,1]==chr),2])
}
#extract to construct bedgraph and then to bigwig
tobigwig<-function(filename, temp.bg, chromInfo){
#get bedgraph
bedgraph.sorted=tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bedgraph formatted dataframes to tempfile
#write.table(bedgraph,file= bedgraph.file,quote=F,sep="\t",col.names=F,row.names=F)
command=paste("LC_ALL=C sort -k1,1 -k2,2n", temp.bg, "| uniq >", bedgraph.sorted,sep=" ")
#browser()
try(system(command))
command=paste("bedGraphToBigWig", bedgraph.sorted, chromInfo ,filename, sep=" ")
#cat(command,"\n")
try(system(command))
#unlink(bedgraph.file)
unlink(bedgraph.sorted)
}
#betools merge
bedTools.merge<-function(bed)
{
#create temp files
a.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste("LC_ALL=C sort -k1,1 -k2,2n",a.file,"| mergeBed -i stdin >",out,sep=" ")
#cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(out)
return(res)
}
#function that calls bedtools and operate on two bed dataframes
bedTools.2in<-function(functionstring="bedIntersect",bed1,bed2,opt.string="")
{
#create temp files
a.file=tempfile()
b.file=tempfile()
out =tempfile()
options(scipen =99) # not to use scientific notation when writing out
#write bed formatted dataframes to tempfile
write.table(bed1,file=a.file,quote=F,sep="\t",col.names=F,row.names=F)
write.table(bed2,file=b.file,quote=F,sep="\t",col.names=F,row.names=F)
# create the command string and call the command using system()
command=paste(functionstring,"-a",a.file,"-b",b.file,opt.string,">",out,sep=" ")
# cat(command,"\n")
try(system(command))
res=read.table(out,header=F)
unlink(a.file);unlink(b.file);unlink(out)
return(res)
}
#dREG HD peak calling functions
#scan for continuous peak that passes minimun peak length defined
#returns a matrix in the form of matrix(start, end)
scan_for_peak<-function(potential.peak.positions){
length.tot<-length(potential.peak.positions)
if (length.tot==0) return(cbind(numeric(0),numeric(0)))
peak.starts<-c(1)
peak.ends<-c()
i<-1
while (i< length.tot){
if(potential.peak.positions[i+1]!=potential.peak.positions[i]+1){
#break point found
peak.starts<-c(peak.starts,i+1)
peak.ends<-c(peak.ends,i)
}
i<-i+1
}
peak.ends<-c(peak.ends ,length.tot)
return (cbind(potential.peak.positions [peak.starts[!is.na(peak.ends)]], (potential.peak.positions [peak.ends[!is.na(peak.ends)]]+1)))
}
D1.criterion.filter<-function(peaks, spl.model, start){
peak.pass.or.not<-c() #true false vector
for (i in c(1:nrow(peaks))){
position.pass.or.not<-c()
for (j in c((peaks[i,1]-start+2):(peaks[i,2]-start+1))){
D1.val<-predict(spl.model,x=c(j, (j-1)),deriv=1)$y
if(prod(D1.val)<=0) position.pass.or.not <-c(position.pass.or.not, TRUE)
else position.pass.or.not <-c(position.pass.or.not, FALSE)
}
peak.pass.or.not<-c(peak.pass.or.not, sum(position.pass.or.not)>0) #or ==1? whichever is faster
}
return (matrix(peaks [peak.pass.or.not,],byrow=FALSE, ncol=2))
}
split_peak<-function(predicted_data, knots.ratio, background){
#print(attr(predicted_data,"start"))
full.vec<-seq(attr(predicted_data,"start"),(attr(predicted_data,"end")-1))
positions.ONEbased<-full.vec-full.vec[1]+1
#chose degree of freedom proportional to peak length
df<-as.integer(length(full.vec)/knots.ratio)+3
#print(length(positions.ONEbased))
#print(length(predicted_data))
dnase.spl<-smooth.spline(positions.ONEbased,predicted_data,df=df)
D0.val<-predict(dnase.spl,x=c(1:length(full.vec)))$y
D2.val<-predict(dnase.spl,x=c(1:length(full.vec)),deriv=2)$y
#get the positions below 2nd derivative cutoff
potential.peak.positions.D2<-full.vec[D2.val<0]
#get the positions above the Dnase value background level
potential.peak.positions.D0 <- full.vec[predicted_data> background]
potential.peak.positions<-intersect (potential.peak.positions.D2, potential.peak.positions.D0)
#scan for continuous peak that passes minimun peak length defined
peaks<-scan_for_peak(potential.peak.positions)
#further filtering the peak using "change of sign criterion" of 1st order derivative
if(!is.null(peaks)){
if(nrow(peaks)!=0){
peaks.D1.filtered <- D1.criterion.filter(peaks, dnase.spl, start=attr(predicted_data,"start"))
peak.dataframe<-cbind.data.frame(rep(attr(predicted_data,"chrom"), nrow(peaks.D1.filtered)), peaks.D1.filtered)
return (peak.dataframe)
}
}
}
#for regions of outlier (too high) PRO-seq reads, where rarely found in training example, we do scaling by a factor to pull everything back to the distribution of traning examples. This aims at fixing the chunk region in prediction.
fix_distribution<-function(dat){
#training.max reference determined by 150K+ training examples over true +ve dREG regions (both dnase and grocap +ve) 90% percentile of max(training example input vectors)
training.max<-4.341949
dat.max<-max(dat)
if(dat.max <=training.max) return(dat)
else return(dat/(dat.max/training.max))
}
#dREG_HD_pred functions
#return the input data
dREG_HD_get_dat <-function( bed_line, zoom, bigwig_plus, bigwig_minus,total){
chrom<-bed_line[[1]]
start<-as.integer(bed_line[[2]])
end<-as.integer(bed_line[[3]])-1
positions<-c(start:end)
#scaling using total read depth
dat_unscaled <- .Call("get_genomic_data_R", as.character(rep(chrom,length(positions))), as.integer(positions), as.character(bigwig_plus), as.character(bigwig_minus), zoom, FALSE, PACKAGE= "dREG")
dat_unscaled <-unlist (dat_unscaled)/(total/1E6)
dat<- cbind(rep(),t(matrix(dat_unscaled, ncol=NROW(positions))))
dat<-fix_distribution(dat)
stopifnot(nrow(dat)==length(positions))
cbind.data.frame(chrom, positions, positions+1, dat)
}
#@param bed is a data.frame from the block of bed_file
run_dREG_HD_pred<-function(gdm, bed,bigwig_plus,bigwig_minus, model,total, temp.bg, ncores, use_rgtsvm) {
stopifnot(NROW(gdm@window_sizes) == NROW(gdm@half_nWindows))
zoom<- list(as.integer(gdm@window_sizes), as.integer(gdm@half_nWindows))
if (nrow(bed)<ncores) blocks=nrow(bed)
else blocks= ncores
line.cutoff<-as.integer(seq(from=0, to=nrow(bed),length.out= blocks+1))
if(use_rgtsvm){
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext, zoom, bigwig_plus, bigwig_minus,total){
requireNamespace("dREG")
do.call(rbind.data.frame,apply(dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),],MARGIN=1,FUN= dREG_HD_get_dat,zoom= zoom, bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus,total= total))
}
sfInit(parallel = TRUE, cpus = blocks, type = "SOCK" )
sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total");
#sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total","dREG_HD_get_dat","fix_distribution");
dat<-do.call(rbind.data.frame,sfLapply(x=1:blocks,fun= cpu.fun, line.cutoff= line.cutoff, dREG_bed_ext= bed,zoom= zoom,bigwig_plus = bigwig_plus, bigwig_minus = bigwig_minus, total = total))
sfStop()
#sfRemoveAll()
pos<-dat[,1:3]
ret <- Rgtsvm::predict.gtsvm(model,dat[,4:ncol(dat)])
rm(dat)
gc(verbose=TRUE, reset=TRUE)
stopifnot(nrow(pos)==sum(bed$V3-bed$V2))
options(scipen =99)
write.table(cbind.data.frame(pos,ret),file=temp.bg,quote=F,sep="\t",col.names=F,row.names=F,append = TRUE)
}
else{
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext, zoom, bigwig_plus, bigwig_minus,total,model) {
requireNamespace("dREG")
requireNamespace("e1071")
dat <-do.call(rbind.data.frame,apply(dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),],MARGIN=1,FUN= dREG_HD_get_dat,zoom= zoom, bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus,total= total))
pos<-dat[,1:3]
ret <- predict(model,dat[,4:ncol(dat)])
rm(dat)
gc(verbose=TRUE, reset=TRUE)
cbind.data.frame(pos,ret)
}
sfInit(parallel = TRUE, cpus = blocks, type = "SOCK" )
sfExport("blocks","line.cutoff","bed","zoom","bigwig_plus","bigwig_minus","total")
dat<-do.call(rbind.data.frame,sfLapply(x=1:blocks,fun= cpu.fun, line.cutoff= line.cutoff, dREG_bed_ext= bed,zoom= zoom,bigwig_plus = bigwig_plus, bigwig_minus = bigwig_minus, total = total,model=model))
sfStop()
sfRemoveAll()
stopifnot(nrow(dat)==sum(bed$V3-bed$V2))
options(scipen =99)
write.table(dat,file=temp.bg,quote=F,sep="\t",col.names=F,row.names=F,append = TRUE)
}
gc(verbose=TRUE, reset=TRUE)
return(NULL)
}
split.bed.evenly<-function(bed){
GPU<-1.8
tot.num.examples <-as.integer(GPU*1024*1024*1024/8/123)
line.cutoff<-c(0)
current.row<-1
while(current.row<=nrow(bed)){
current.num.examples <-bed[current.row,3]-bed[current.row,2]
while(current.num.examples <= tot.num.examples && current.row<=nrow(bed)){
current.row<-current.row+1
current.num.examples<-current.num.examples+(bed[current.row,3]-bed[current.row,2])
}
line.cutoff<-c(line.cutoff,current.row-1)
}
return(line.cutoff)
}
get.chromosome.info <- function(bw.plus, bw.minus)
{
chrom <- rbind( cbind( bw.plus$chroms, bw.plus$chromSizes), cbind( bw.minus$chroms, bw.minus$chromSizes) );
chr.size <- unlist( lapply( unique(chrom[,1]), function(chr){max( as.numeric( chrom[which(chrom[,1]==chr),2])) } ) );
return(data.frame( V1=unique(chrom[,1]), V2=chr.size ));
}
#main function runs prediction for blocks of dREG_bed in parallel
#dREG_HD<-function(bed_path, bigwig_plus, bigwig_minus, #chromInfo, model, ncores=1, use_rgtsvm=FALSE){
#' @export
dREG_HD<-function(bed_path, bigwig_plus, bigwig_minus, model, ncores=1, use_rgtsvm=FALSE){
#Step1: imputing Dnase-I signal in parallel mode
message("running dREG-HD on ", bed_path);
bw.plus <- load.bigWig(bigwig_plus);
bw.minus <- load.bigWig(bigwig_minus);
total<-abs(bw.plus$mean*bw.plus$basesCovered)+abs(bw.minus$mean*bw.minus$basesCovered);
unload.bigWig(bw.plus);
unload.bigWig(bw.minus);
ext=200 #impute on extended dREG site
dREG_bed<-read.table(bed_path);
#chrom.info.table<-read.table(chromInfo);
chrom.info.table <- get.chromosome.info( bw.plus, bw.minus );
chromInfo <- tempfile("chrom.info.");
write.table( chrom.info.table, file=chromInfo, quote=F, row.names=F, col.names=F, sep="\t");
dREG_bed_ext<-cbind.data.frame(dREG_bed$V1, apply(cbind(0,(dREG_bed$V2-ext)), MARGIN=1, FUN=max), apply(cbind(sapply(as.character(dREG_bed$V1), FUN= get.chrom.length, chrom.info.table),(dREG_bed$V3+ext)), MARGIN=1, FUN=min));
dREG_bed_ext<-bedTools.merge(dREG_bed_ext);
if(use_rgtsvm) class(model)<-"gtsvm"
gdm<-genomic_data_model(60,30);
line.cutoff<-split.bed.evenly(dREG_bed_ext);
blocks<-length(line.cutoff)-1;
message("number of total blocks=",blocks)
cpu.fun<-function(idx, line.cutoff, dREG_bed_ext,bigwig_plus, bigwig_minus, gdm, model,total, temp.bg, ncores, use_rgtsvm){
message(idx);
dREG_bed_ext_part<-dREG_bed_ext[c((line.cutoff[idx]+1):line.cutoff[idx+1]),]
run_dREG_HD_pred(gdm=gdm, bed= dREG_bed_ext_part,bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus, model= model,total= total, temp.bg= temp.bg, ncores= ncores, use_rgtsvm= use_rgtsvm)
rm(list=ls());
gc(verbose=TRUE, reset=TRUE);
return(NULL);
}
temp.bg=tempfile();
lapply(c(1:blocks),FUN= cpu.fun,line.cutoff= line.cutoff, dREG_bed_ext= dREG_bed_ext,bigwig_plus= bigwig_plus, bigwig_minus= bigwig_minus, gdm=gdm, model=model,total=total, temp.bg= temp.bg,ncores= ncores, use_rgtsvm= use_rgtsvm)
#Step2: generate bigwig file from "returned_pred_data" file
bw.filename<-paste(bed_path,"_imputedDnase.bw",sep="")
tobigwig(filename=bw.filename, temp.bg = temp.bg, chromInfo= chromInfo)
unlink(temp.bg);
rm(model);
gc(verbose=TRUE, reset=TRUE);
#Step three generate dREG HD peaks
imputed_dnase.bw <- load.bigWig(bw.filename);
returned_pred_data <- bed.step.bpQuery.bigWig(bw= imputed_dnase.bw,bed= dREG_bed_ext,step=1);
unload.bigWig(imputed_dnase.bw);
line.cutoff=as.integer(seq(from=0, to=length(returned_pred_data),length.out= ncores+1 ))
returned_pred_data_list <- list();
for( i in 1:(length(line.cutoff)-1) )
returned_pred_data_list[[i]] <- returned_pred_data[ c((line.cutoff[i]+1):line.cutoff[i+1])];
rm(returned_pred_data);
gc(verbose=TRUE, reset=TRUE);
cpu.fun<-function( pred_data, knots.ratio, background){
rbindlist(lapply( pred_data, FUN= split_peak, knots.ratio= knots.ratio, background= background));
}
#relaxed mode
message("calling peaks under relaxed condition");
# mclapply cause much more memory (ncore times ) are allocated potentially.
#dREG_HD_bed <- rbindlist( mclapply( returned_pred_data_list, FUN= cpu.fun, knots.ratio= 397.4,background=0.02, mc.cores = ncores ) );
sfInit(parallel = TRUE, cpus = ncores, type = "SOCK" );
dREG_HD_bed <- rbindlist( sfLapply( x = returned_pred_data_list,fun= cpu.fun, knots.ratio= 397.4,background=0.02 ) );
sfStop();
dREG_HD.filename <- paste(bed_path,"_dREG_HD_relaxed.bed",sep="");
dREG_HD_bed.intersected<-bedTools.2in ("bedtools intersect -u", dREG_HD_bed, dREG_bed);
dREG_HD_bed.intersected.merged<-bedTools.merge(dREG_HD_bed.intersected);
write.table(dREG_HD_bed.intersected.merged,file=dREG_HD.filename,sep="\t",row.names=FALSE,quote=FALSE,col.names=FALSE);
rm(list=c("dREG_HD_bed","dREG_HD_bed.intersected","dREG_HD_bed.intersected.merged"));
gc(verbose=TRUE, reset=TRUE);
message("calling peaks under stringent condition");
#stringent mode
#dREG_HD_bed <-rbindlist( mclapply( X=1:ncores, FUN= cpu.fun, knots.ratio= 1350, background= 0.02723683,mc.cores = ncores));
sfInit(parallel = TRUE, cpus = ncores, type = "SOCK" );
dREG_HD_bed <- rbindlist( sfLapply(x = returned_pred_data_list,fun= cpu.fun, knots.ratio= 1350, background= 0.02723683 ) );
sfStop();
dREG_HD.filename <- paste(bed_path,"_dREG_HD_stringent.bed",sep="");
dREG_HD_bed.intersected<-bedTools.2in ("bedtools intersect -u", dREG_HD_bed, dREG_bed);
dREG_HD_bed.intersected.merged<-bedTools.merge(dREG_HD_bed.intersected);
write.table(dREG_HD_bed.intersected.merged,file=dREG_HD.filename,sep="\t",row.names=FALSE,quote=FALSE,col.names=FALSE);
}
|
#' Read in csv-File as dataframe
#'
#' This function checks if a given csv-file exists and if so reads it in silently as a dataframe
#'
#' @param filename A character string giving the path to the file to read in
#'
#' @return This function returns the data from the given file as a dataframe
#'
#' @note Errors can result from the given filename not being a valid path to an existing file or the file not being encoded in the right format
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Generate filename for given year
#'
#' This function determines the filename including extensions of the accident data
#' of the given year
#'
#' @param year The year for which to generate the filename as integer or type that can be meaningfullz converted with as.integer
#'
#' @return This function returns the filename in the format "accident_YEAR.csv.bz2" with YEAR replaced with the given year
#'
#' @examples
#' make_filename(1991)
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' Read in data for several years
#'
#' This function reads in the accident data for multiple years as a list of dataframes. If one of the years can not be
#' read in the function skips that year with a warning
#'
#' @param years Collection of years as vector or list where each element is in integer format or can be converted with as.integer
#'
#' @return This function returns a list of dataframes where each dataframe contains the data for one year
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr select
#'
#' @note Errors in this function can result from errors thrown by the fars_read function or invalid years parameter being passed
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' Summarize Monthly Accident Number for each year
#'
#' For a given collection of years this function reads in the accident data and then returning a summary of the data
#' which contains one row for each calendar month containing the number of accidents in that month for different years
#' in seperate columns
#'
#' @param years Collection of years as vector or list where each element is in integer format or can be converted with as.integer
#'
#' @return This function returns a data frame with dimension (12,length(years)).
#'
#' @importFrom dplyr bind_rows
#' @importFrom dplyr group_by
#' @importFrom dplyr summarize
#' @importFrom tidyr spread
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' Visualize Accidents on State Map
#'
#' For a selected year and state the locations of all accidents are shown on the map of that state
#'
#' @param state.num Valid State number as integer selecting which state to plot
#' @param year Year for which to plot the data as integer
#'
#' @return Returns plot displaying longitude and latitude of the accidents on the map
#'
#' @importFrom maps map
#' @importFrom graphics points
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
| /R/fars_functions.R | no_license | ckoopmann/fars | R | false | false | 4,692 | r | #' Read in csv-File as dataframe
#'
#' This function checks if a given csv-file exists and if so reads it in silently as a dataframe
#'
#' @param filename A character string giving the path to the file to read in
#'
#' @return This function returns the data from the given file as a dataframe
#'
#' @note Errors can result from the given filename not being a valid path to an existing file or the file not being encoded in the right format
#'
#' @importFrom readr read_csv
#' @importFrom dplyr tbl_df
#'
#' @export
fars_read <- function(filename) {
if(!file.exists(filename))
stop("file '", filename, "' does not exist")
data <- suppressMessages({
readr::read_csv(filename, progress = FALSE)
})
dplyr::tbl_df(data)
}
#' Generate filename for given year
#'
#' This function determines the filename including extensions of the accident data
#' of the given year
#'
#' @param year The year for which to generate the filename as integer or type that can be meaningfullz converted with as.integer
#'
#' @return This function returns the filename in the format "accident_YEAR.csv.bz2" with YEAR replaced with the given year
#'
#' @examples
#' make_filename(1991)
#' @export
make_filename <- function(year) {
year <- as.integer(year)
sprintf("accident_%d.csv.bz2", year)
}
#' Read in data for several years
#'
#' This function reads in the accident data for multiple years as a list of dataframes. If one of the years can not be
#' read in the function skips that year with a warning
#'
#' @param years Collection of years as vector or list where each element is in integer format or can be converted with as.integer
#'
#' @return This function returns a list of dataframes where each dataframe contains the data for one year
#'
#' @importFrom dplyr mutate
#' @importFrom dplyr select
#'
#' @note Errors in this function can result from errors thrown by the fars_read function or invalid years parameter being passed
#'
#' @export
fars_read_years <- function(years) {
lapply(years, function(year) {
file <- make_filename(year)
tryCatch({
dat <- fars_read(file)
dplyr::mutate(dat, year = year) %>%
dplyr::select(MONTH, year)
}, error = function(e) {
warning("invalid year: ", year)
return(NULL)
})
})
}
#' Summarize Monthly Accident Number for each year
#'
#' For a given collection of years this function reads in the accident data and then returning a summary of the data
#' which contains one row for each calendar month containing the number of accidents in that month for different years
#' in seperate columns
#'
#' @param years Collection of years as vector or list where each element is in integer format or can be converted with as.integer
#'
#' @return This function returns a data frame with dimension (12,length(years)).
#'
#' @importFrom dplyr bind_rows
#' @importFrom dplyr group_by
#' @importFrom dplyr summarize
#' @importFrom tidyr spread
#'
#' @export
fars_summarize_years <- function(years) {
dat_list <- fars_read_years(years)
dplyr::bind_rows(dat_list) %>%
dplyr::group_by(year, MONTH) %>%
dplyr::summarize(n = n()) %>%
tidyr::spread(year, n)
}
#' Visualize Accidents on State Map
#'
#' For a selected year and state the locations of all accidents are shown on the map of that state
#'
#' @param state.num Valid State number as integer selecting which state to plot
#' @param year Year for which to plot the data as integer
#'
#' @return Returns plot displaying longitude and latitude of the accidents on the map
#'
#' @importFrom maps map
#' @importFrom graphics points
#'
#' @export
fars_map_state <- function(state.num, year) {
filename <- make_filename(year)
data <- fars_read(filename)
state.num <- as.integer(state.num)
if(!(state.num %in% unique(data$STATE)))
stop("invalid STATE number: ", state.num)
data.sub <- dplyr::filter(data, STATE == state.num)
if(nrow(data.sub) == 0L) {
message("no accidents to plot")
return(invisible(NULL))
}
is.na(data.sub$LONGITUD) <- data.sub$LONGITUD > 900
is.na(data.sub$LATITUDE) <- data.sub$LATITUDE > 90
with(data.sub, {
maps::map("state", ylim = range(LATITUDE, na.rm = TRUE),
xlim = range(LONGITUD, na.rm = TRUE))
graphics::points(LONGITUD, LATITUDE, pch = 46)
})
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mq_operations.R
\name{mq_create_broker}
\alias{mq_create_broker}
\title{Creates a broker}
\usage{
mq_create_broker(
AuthenticationStrategy = NULL,
AutoMinorVersionUpgrade,
BrokerName,
Configuration = NULL,
CreatorRequestId = NULL,
DeploymentMode,
EncryptionOptions = NULL,
EngineType,
EngineVersion,
HostInstanceType,
LdapServerMetadata = NULL,
Logs = NULL,
MaintenanceWindowStartTime = NULL,
PubliclyAccessible,
SecurityGroups = NULL,
StorageType = NULL,
SubnetIds = NULL,
Tags = NULL,
Users
)
}
\arguments{
\item{AuthenticationStrategy}{Optional. The authentication strategy used to secure the broker. The
default is SIMPLE.}
\item{AutoMinorVersionUpgrade}{[required] Enables automatic upgrades to new minor versions for brokers, as new
versions are released and supported by Amazon MQ. Automatic upgrades
occur during the scheduled maintenance window of the broker or after a
manual broker reboot. Set to true by default, if no value is specified.}
\item{BrokerName}{[required] Required. The broker's name. This value must be unique in your AWS
account, 1-50 characters long, must contain only letters, numbers,
dashes, and underscores, and must not contain white spaces, brackets,
wildcard characters, or special characters.}
\item{Configuration}{A list of information about the configuration.}
\item{CreatorRequestId}{The unique ID that the requester receives for the created broker. Amazon
MQ passes your ID with the API action. Note: We recommend using a
Universally Unique Identifier (UUID) for the creatorRequestId. You may
omit the creatorRequestId if your application doesn't require
idempotency.}
\item{DeploymentMode}{[required] Required. The broker's deployment mode.}
\item{EncryptionOptions}{Encryption options for the broker. Does not apply to RabbitMQ brokers.}
\item{EngineType}{[required] Required. The type of broker engine. Currently, Amazon MQ supports
ACTIVEMQ and RABBITMQ.}
\item{EngineVersion}{[required] Required. The broker engine's version. For a list of supported engine
versions, see \href{https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/}{Supported engines}.}
\item{HostInstanceType}{[required] Required. The broker's instance type.}
\item{LdapServerMetadata}{Optional. The metadata of the LDAP server used to authenticate and
authorize connections to the broker. Does not apply to RabbitMQ brokers.}
\item{Logs}{Enables Amazon CloudWatch logging for brokers.}
\item{MaintenanceWindowStartTime}{The parameters that determine the WeeklyStartTime.}
\item{PubliclyAccessible}{[required] Enables connections from applications outside of the VPC that hosts the
broker's subnets. Set to false by default, if no value is provided.}
\item{SecurityGroups}{The list of rules (1 minimum, 125 maximum) that authorize connections to
brokers.}
\item{StorageType}{The broker's storage type.}
\item{SubnetIds}{The list of groups that define which subnets and IP ranges the broker
can use from different Availability Zones. If you specify more than one
subnet, the subnets must be in different Availability Zones. Amazon MQ
will not be able to create VPC endpoints for your broker with multiple
subnets in the same Availability Zone. A SINGLE_INSTANCE deployment
requires one subnet (for example, the default subnet). An
ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two
subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no
subnet requirements when deployed with public accessibility. Deployment
without public accessibility requires at least one subnet.
If you specify subnets in a \href{https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html}{shared VPC}
for a RabbitMQ broker, the associated VPC to which the specified subnets
belong must be owned by your AWS account. Amazon MQ will not be able to
create VPC endpoints in VPCs that are not owned by your AWS account.}
\item{Tags}{Create tags when creating the broker.}
\item{Users}{[required] Required. The list of broker users (persons or applications) who can
access queues and topics. This value can contain only alphanumeric
characters, dashes, periods, underscores, and tildes (- . _ ~). This
value must be 2-100 characters long.
Amazon MQ for RabbitMQ
When you create an Amazon MQ for RabbitMQ broker, one and only one
administrative user is accepted and created when a broker is first
provisioned. All subsequent broker users are created by making RabbitMQ
API calls directly to brokers or via the RabbitMQ web console.}
}
\description{
Creates a broker. Note: This API is asynchronous.
See \url{https://www.paws-r-sdk.com/docs/mq_create_broker/} for full documentation.
}
\keyword{internal}
| /man/mq_create_broker.Rd | no_license | cran/paws.application.integration | R | false | true | 4,768 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mq_operations.R
\name{mq_create_broker}
\alias{mq_create_broker}
\title{Creates a broker}
\usage{
mq_create_broker(
AuthenticationStrategy = NULL,
AutoMinorVersionUpgrade,
BrokerName,
Configuration = NULL,
CreatorRequestId = NULL,
DeploymentMode,
EncryptionOptions = NULL,
EngineType,
EngineVersion,
HostInstanceType,
LdapServerMetadata = NULL,
Logs = NULL,
MaintenanceWindowStartTime = NULL,
PubliclyAccessible,
SecurityGroups = NULL,
StorageType = NULL,
SubnetIds = NULL,
Tags = NULL,
Users
)
}
\arguments{
\item{AuthenticationStrategy}{Optional. The authentication strategy used to secure the broker. The
default is SIMPLE.}
\item{AutoMinorVersionUpgrade}{[required] Enables automatic upgrades to new minor versions for brokers, as new
versions are released and supported by Amazon MQ. Automatic upgrades
occur during the scheduled maintenance window of the broker or after a
manual broker reboot. Set to true by default, if no value is specified.}
\item{BrokerName}{[required] Required. The broker's name. This value must be unique in your AWS
account, 1-50 characters long, must contain only letters, numbers,
dashes, and underscores, and must not contain white spaces, brackets,
wildcard characters, or special characters.}
\item{Configuration}{A list of information about the configuration.}
\item{CreatorRequestId}{The unique ID that the requester receives for the created broker. Amazon
MQ passes your ID with the API action. Note: We recommend using a
Universally Unique Identifier (UUID) for the creatorRequestId. You may
omit the creatorRequestId if your application doesn't require
idempotency.}
\item{DeploymentMode}{[required] Required. The broker's deployment mode.}
\item{EncryptionOptions}{Encryption options for the broker. Does not apply to RabbitMQ brokers.}
\item{EngineType}{[required] Required. The type of broker engine. Currently, Amazon MQ supports
ACTIVEMQ and RABBITMQ.}
\item{EngineVersion}{[required] Required. The broker engine's version. For a list of supported engine
versions, see \href{https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/}{Supported engines}.}
\item{HostInstanceType}{[required] Required. The broker's instance type.}
\item{LdapServerMetadata}{Optional. The metadata of the LDAP server used to authenticate and
authorize connections to the broker. Does not apply to RabbitMQ brokers.}
\item{Logs}{Enables Amazon CloudWatch logging for brokers.}
\item{MaintenanceWindowStartTime}{The parameters that determine the WeeklyStartTime.}
\item{PubliclyAccessible}{[required] Enables connections from applications outside of the VPC that hosts the
broker's subnets. Set to false by default, if no value is provided.}
\item{SecurityGroups}{The list of rules (1 minimum, 125 maximum) that authorize connections to
brokers.}
\item{StorageType}{The broker's storage type.}
\item{SubnetIds}{The list of groups that define which subnets and IP ranges the broker
can use from different Availability Zones. If you specify more than one
subnet, the subnets must be in different Availability Zones. Amazon MQ
will not be able to create VPC endpoints for your broker with multiple
subnets in the same Availability Zone. A SINGLE_INSTANCE deployment
requires one subnet (for example, the default subnet). An
ACTIVE_STANDBY_MULTI_AZ Amazon MQ for ActiveMQ deployment requires two
subnets. A CLUSTER_MULTI_AZ Amazon MQ for RabbitMQ deployment has no
subnet requirements when deployed with public accessibility. Deployment
without public accessibility requires at least one subnet.
If you specify subnets in a \href{https://docs.aws.amazon.com/vpc/latest/userguide/vpc-sharing.html}{shared VPC}
for a RabbitMQ broker, the associated VPC to which the specified subnets
belong must be owned by your AWS account. Amazon MQ will not be able to
create VPC endpoints in VPCs that are not owned by your AWS account.}
\item{Tags}{Create tags when creating the broker.}
\item{Users}{[required] Required. The list of broker users (persons or applications) who can
access queues and topics. This value can contain only alphanumeric
characters, dashes, periods, underscores, and tildes (- . _ ~). This
value must be 2-100 characters long.
Amazon MQ for RabbitMQ
When you create an Amazon MQ for RabbitMQ broker, one and only one
administrative user is accepted and created when a broker is first
provisioned. All subsequent broker users are created by making RabbitMQ
API calls directly to brokers or via the RabbitMQ web console.}
}
\description{
Creates a broker. Note: This API is asynchronous.
See \url{https://www.paws-r-sdk.com/docs/mq_create_broker/} for full documentation.
}
\keyword{internal}
|
###############################################
##
## DarkSky: RShiny ~ Server
##
## author: Ryan Johnson
## created: Aug 2019
##
###############################################
shinyServer(function(input, output) {
## ========================================
## Reactive Values
## ========================================
map.coordinates <- reactive({
switch(
input$map.city,
Seattle = list(lng = -122.3320708, lat = 47.6062095),
`Santa Clara` = list(lng = -121.955238, lat = 37.354107)
)
})
wikipedia.url <- reactive({
switch(
input$map.city,
Seattle = "https://en.wikipedia.org/wiki/Seattle",
`Santa Clara` = "https://en.wikipedia.org/wiki/Santa_Clara,_California"
)
})
map.forecast <- reactive({
# darksky::get_forecast_for(map.coordinates()$lat, map.coordinates()$lng, today())
# owmr::get_current("Seattle", units = "metric")
})
## ========================================
## Server Output
## ========================================
output$GeographyMap <- renderLeaflet({
# map parameters
map.coordinates <- map.coordinates()
map.zoom <- 11
# map setup
m <- leaflet() %>% addTiles() %>%
setView(lng = map.coordinates$lng, lat = map.coordinates$lat, zoom = map.zoom) %>%
addMiniMap(zoomLevelOffset = -4)
# render map
m
})
output$USMap <- renderLeaflet({
# map setup
m <- leaflet(city.data %>% dplyr::filter(Country == "US")) %>% addTiles() %>%
addMarkers(lng = ~Long, lat = ~Lat, popup = ~City) # %>% suspendScroll()
# render map
m
})
output$EuropeMap <- renderLeaflet({
# map setup
m <- leaflet(city.data %>% dplyr::filter(Country %in% c("DE", "FR", "NL"))) %>% addTiles() %>%
addMarkers(lng = ~Long, lat = ~Lat, popup = ~City)
# render map
m
})
output$TopographyMap <- renderLeaflet({
# map parameters
map.coordinates <- map.coordinates()
map.zoom <- 11
# map setup
m <- leaflet() %>% addTiles() %>%
setView(lng = map.coordinates$lng, lat = map.coordinates$lat, zoom = map.zoom)
# render map
m
})
output$CityDescription <- renderTable({
city <- input$map.city
# scrape wikipedia
info.box <- wikipedia.url() %>%
read_html() %>%
html_nodes("table.vcard") %>%
html_table(header=FALSE, fill=TRUE) %>%
.[[1]] %>% as.data.frame()
clean.info.box <- switch(
city,
Seattle = info.box %>%
# select non-image features
dplyr::slice(c(5:6, 10:37, 39:nrow(info.box))) %>%
# remove annotations
dplyr::mutate(X1 = gsub("\\[.*\\]", "", X1),
X2 = gsub("\\[.*\\]", "", X2)) %>%
do({
# city nickname
.[1,1] = 'Nickname'
.[1,2] = gsub(".*:", "", .[1,2])
# city motto
.[2,1] = 'Motto'
.[2,2] = gsub(".*:", "", .[2,2])
# city government
.[8,2] = ""
# city area
.[13,2] = ""
# city population
.[20,2] = ""
# ----
.
}),
`Santa Clara` = info.box %>%
# select non-image features
dplyr::slice(c(9:30, 32:nrow(info.box))) %>%
# remove annotations
dplyr::mutate(X1 = gsub("\\[.*\\]", "", X1),
X2 = gsub("\\[.*\\]", "", X2)) %>%
do({
# city government
.[6,2] = ""
# city area
.[11,2] = ""
# city population
.[16,2] = ""
# ----
.
})
)
# render table
clean.info.box
}, colnames = FALSE, hover = TRUE)
})
| /RCode/shiny-app/server.R | no_license | rtjohnson12/Otenki | R | false | false | 4,694 | r | ###############################################
##
## DarkSky: RShiny ~ Server
##
## author: Ryan Johnson
## created: Aug 2019
##
###############################################
shinyServer(function(input, output) {
## ========================================
## Reactive Values
## ========================================
map.coordinates <- reactive({
switch(
input$map.city,
Seattle = list(lng = -122.3320708, lat = 47.6062095),
`Santa Clara` = list(lng = -121.955238, lat = 37.354107)
)
})
wikipedia.url <- reactive({
switch(
input$map.city,
Seattle = "https://en.wikipedia.org/wiki/Seattle",
`Santa Clara` = "https://en.wikipedia.org/wiki/Santa_Clara,_California"
)
})
map.forecast <- reactive({
# darksky::get_forecast_for(map.coordinates()$lat, map.coordinates()$lng, today())
# owmr::get_current("Seattle", units = "metric")
})
## ========================================
## Server Output
## ========================================
output$GeographyMap <- renderLeaflet({
# map parameters
map.coordinates <- map.coordinates()
map.zoom <- 11
# map setup
m <- leaflet() %>% addTiles() %>%
setView(lng = map.coordinates$lng, lat = map.coordinates$lat, zoom = map.zoom) %>%
addMiniMap(zoomLevelOffset = -4)
# render map
m
})
output$USMap <- renderLeaflet({
# map setup
m <- leaflet(city.data %>% dplyr::filter(Country == "US")) %>% addTiles() %>%
addMarkers(lng = ~Long, lat = ~Lat, popup = ~City) # %>% suspendScroll()
# render map
m
})
output$EuropeMap <- renderLeaflet({
# map setup
m <- leaflet(city.data %>% dplyr::filter(Country %in% c("DE", "FR", "NL"))) %>% addTiles() %>%
addMarkers(lng = ~Long, lat = ~Lat, popup = ~City)
# render map
m
})
output$TopographyMap <- renderLeaflet({
# map parameters
map.coordinates <- map.coordinates()
map.zoom <- 11
# map setup
m <- leaflet() %>% addTiles() %>%
setView(lng = map.coordinates$lng, lat = map.coordinates$lat, zoom = map.zoom)
# render map
m
})
output$CityDescription <- renderTable({
city <- input$map.city
# scrape wikipedia
info.box <- wikipedia.url() %>%
read_html() %>%
html_nodes("table.vcard") %>%
html_table(header=FALSE, fill=TRUE) %>%
.[[1]] %>% as.data.frame()
clean.info.box <- switch(
city,
Seattle = info.box %>%
# select non-image features
dplyr::slice(c(5:6, 10:37, 39:nrow(info.box))) %>%
# remove annotations
dplyr::mutate(X1 = gsub("\\[.*\\]", "", X1),
X2 = gsub("\\[.*\\]", "", X2)) %>%
do({
# city nickname
.[1,1] = 'Nickname'
.[1,2] = gsub(".*:", "", .[1,2])
# city motto
.[2,1] = 'Motto'
.[2,2] = gsub(".*:", "", .[2,2])
# city government
.[8,2] = ""
# city area
.[13,2] = ""
# city population
.[20,2] = ""
# ----
.
}),
`Santa Clara` = info.box %>%
# select non-image features
dplyr::slice(c(9:30, 32:nrow(info.box))) %>%
# remove annotations
dplyr::mutate(X1 = gsub("\\[.*\\]", "", X1),
X2 = gsub("\\[.*\\]", "", X2)) %>%
do({
# city government
.[6,2] = ""
# city area
.[11,2] = ""
# city population
.[16,2] = ""
# ----
.
})
)
# render table
clean.info.box
}, colnames = FALSE, hover = TRUE)
})
|
library(shiny)
library(shinyjs)
library(ggvis)
library(d3heatmap)
library(phyloseq)
library(ape)
library(plotly)
source("helpers.R")
alpha.methods <- c("Shannon", "Simpson", "InvSimpson")
# Weigthed Unifrac, Bray-Curtis
beta.methods <- c("wUniFrac", "bray")
tax.name <- c('superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family',
'genus', 'species', 'no rank')
norm.methods <- c('EBayes coreOTU Normalization',
'Quantile coreOTU Normalization', 'Library Size Scaling')
measure.type <- c('Final Guess', 'Final Best Hit', 'Final High Confidence Hit')
minbatch <- function(batch1){
batch2 <- as.factor(batch1)
batch3 <- split(batch1,batch2)
return(min(unlist(lapply(1:length(batch3),
function(x) length(batch3[[x]])))))
}
shinyInput <- getShinyInput()
pstat <- shinyInput$pstat
covariates <- colnames(sample_data(pstat))
# choose the covariates that has less than 8 levels
covariates.colorbar <- c()
for (i in 1:length(covariates)){
num.levels <- length(unique(sample_data(pstat)[[covariates[i]]]))
if (num.levels < 8){
covariates.colorbar <- c(covariates.colorbar, covariates[i])
}
}
# choose the covariates that has 2 levels
covariates.two.levels <- c()
for (i in 1:length(covariates)){
num.levels <- length(unique(sample_data(pstat)[[covariates[i]]]))
if (num.levels == 2){
covariates.two.levels <- c(covariates.two.levels, covariates[i])
}
}
maxbatchElems <- minbatch(c(pstat@sam_data[,1])[[1]])
maxcondElems <- minbatch(c(pstat@sam_data[,2])[[1]])
defaultDisp <- 30
defaultGenesDisp <- 10
maxGenes <- dim(pstat@otu_table)[1]
#sample name
sample.names.all <- colnames(pstat@otu_table@.Data)
# load ui tabs right before calling shinyUI()
source("ui_01_upload.R", local = TRUE) #creates shiny_panel_upload variable
source("ui_02_filter.R", local = TRUE) #creates shiny_panel_upload variable
source("ui_07_biomarker.R", local = TRUE) #creates shiny_panel_upload variable
shinyUI(navbarPage(paste("PathoStat v", packageVersion("PathoStat"), sep = ""), id="PathoStat", fluid=TRUE,
theme = "bootstrap.min.css",
tabPanel("Upload", shiny_panel_upload),
tabPanel("Data Summary/Filtering", shiny_panel_filter),
tabPanel("Relative Abundance",
sidebarLayout(
sidebarPanel(
#selectizeInput('taxl', 'Taxonomy Level', choices = setNames(
# tax.abb, tax.name)),
selectizeInput('taxl', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_condition", "Select Condition:",
covariates),
width=3
),
mainPanel(
tabsetPanel(
tabPanel("Taxonomy level RA",
ggvisOutput("TaxRelAbundancePlot")),
# new heatmap
tabPanel("Heatmap",
helpText("Note: Only variables with less than 8 levels could be mapped to color bar."),
fluidRow(
column(3, selectInput("select_heatmap_condition_1", "Add colorbar based on:",
covariates.colorbar)),
column(3, selectInput("select_heatmap_condition_2", "Add second colorbar based on:",
covariates.colorbar)),
column(3, checkboxInput("checkbox_heatmap_scale", "Row scaling", value = TRUE)),
column(3, checkboxInput("checkbox_heatmap", "Add colorbar", value = TRUE))
),
plotOutput("Heatmap", height="550px"),
downloadButton('download_heatmap_pdf', 'Download heatmap PDF'))
), width=9
)
)
),
tabPanel("Diversity",
tabsetPanel(
tabPanel("Alpha Diversity",
br(),
sidebarLayout(
sidebarPanel(
br(),
selectizeInput('taxl.alpha', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_alpha_div_condition", "Compare between:",
covariates.colorbar),
selectInput("select_alpha_div_method", "Choose method:",
alpha.methods)
),
mainPanel(
br(),
tabsetPanel(
tabPanel("Boxplot",
plotlyOutput("AlphaDiversity"),
actionButton("download_alpha", "Download Alpha diversity pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")
),
tabPanel("Taxa number Barplot",
plotlyOutput("AlphaDiversityBarplot")
),
tabPanel("Statistical Test",
selectInput("select_alpha_stat_method","Non-parametric Test", c("Mann-Whitney","Kruskal-Wallis")),
tableOutput("alpha.stat.test")
),
tabPanel("Alpha Diversity Table",
br(),
downloadButton('download_table_alpha', 'Download this table'),
DT::dataTableOutput("table.alpha")
)
)
)
)
),
tabPanel("Beta Diversity",
br(),
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.beta', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_beta_div_method", "Choose method:",
beta.methods),
helpText("Only variables with 2 levels are supported for boxplot and stat test here." ),
selectInput("select_beta_condition", "Select condition",
covariates.two.levels)
),
mainPanel(
br(),
tabsetPanel(
tabPanel("Heatmap",
br(),
fluidRow(
column(3, selectInput("select_beta_heatmap_condition_1", "Add colorbar on:",
covariates.colorbar)),
column(3, selectInput("select_beta_heatmap_condition_2", "Add second colorbar on:",
covariates.colorbar))
),
checkboxInput("checkbox_beta_heatmap", "Add colorbar", value = TRUE),
plotOutput("BetaDiversityHeatmap"),
downloadButton('download_beta_heatmap_pdf', 'Download heatmap PDF')
),
tabPanel("Boxplot",
plotlyOutput("BetaDiversityBoxplot"),
actionButton("download_beta_boxplot", "Download pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")
),
tabPanel("Statistical Test",
selectInput("select_beta_stat_method","Select Test", c("PERMANOVA", "Kruskal-Wallis", "Mann-Whitney")),
numericInput("num.permutation.permanova", "Number of permutations", value = 999, max = 2000),
tableOutput("beta.stat.test")
),
tabPanel("Beta Diversity Table",
br(),
downloadButton('download_table_beta', 'Download this table'),
DT::dataTableOutput("table.beta")
)
)
)
)
)
)
),
tabPanel("Dimension Reduction",
sidebarLayout(
sidebarPanel(
numericInput('xcol.new', 'Principal Component (x-axis)', 1,
min = 1, max = 50),
numericInput('ycol.new', 'Principal Component (y-axis)', 2,
min = 1, max = 50),
selectizeInput('taxl.pca', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_pca_color", "Color points by:",
covariates),
selectInput("select_pca_shape", "Shape points by:",
covariates.colorbar),
width=3
),
mainPanel(
tabsetPanel(
tabPanel("PCA plot",
# This is a bit different pdf downloading method for plotly,
# as we must buy lisence for that
plotlyOutput("pca.plotly"),
actionButton("download_pca", "Download PCA pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")),
tabPanel("PCA variance", DT::dataTableOutput("PCAtable")),
tabPanel("PCoA plot",
plotlyOutput("pcoa.plotly"),
selectInput("pcoa.method", "PCoA method:",
beta.methods),
actionButton("download_pcoa", "Download PCoA pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")),
tabPanel("PCoA variance", DT::dataTableOutput("PCoAtable"))
)
)
)
),
tabPanel("Differential Analysis",
tabsetPanel(
tabPanel("Deseq2",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.da', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('da.condition', 'Select condition',
choices = covariates.two.levels),
selectizeInput('da.condition.covariate', 'Select (multiple) covariates',
choices = covariates, multiple = TRUE),
helpText("Continuous covariates would be automatically cut into factors with 3 levels."),
numericInput('da.count.cutoff', 'Minumum count cut-off', 500,
min = 1, max = 5000),
numericInput('da.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("DeSeq2",
tabsetPanel(
tabPanel("DE output",
DT::dataTableOutput("DeSeq2Table.new"),
downloadButton("download_deseq_tb", "Download this table")
)
)
), width=9
)
)
),
tabPanel("edgeR",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.edger', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('edger.condition', 'Select condition',
choices = covariates.two.levels),
helpText("Continuous covariates would be automatically cut into factors with 3 levels."),
numericInput('edger.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("edgeR",
tabsetPanel(
tabPanel("DE output",
DT::dataTableOutput("edgerTable.new"),
downloadButton("download_edger_tb", "Download this table")
)
)
), width=9
)
)
),
tabPanel("Abundance analysis",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.pa', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('pa.condition', 'Select condition',
choices = covariates.two.levels),
numericInput('pa.count.cutoff', 'Minumum count cut-off', 500,
min = 1, max = 5000),
numericInput('pa.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("Test output",
tabsetPanel(
tabPanel("output",
selectizeInput('pa.method', 'Select test method',
choices = c("Fisher Exact Test", "Chi-squared Test", "Mann-Whitney Test")),
DT::dataTableOutput("pa.test"),
downloadButton("download_pa_test", "Download this table")
)
)
), width=9
)
)
)
)
),
tabPanel("Biomarker", shiny_panel_biomarker),
tabPanel("Pathway (Under construction by Tyler)")
# tabPanel("Time Series",
# tabsetPanel(
# tabPanel("Visualization",
# sidebarLayout(
# sidebarPanel(
# selectInput(inputId="Allusset",
# label="Visualization column",
# choices = colnames(shinyInput$pstat@sam_data)),
# checkboxInput(inputId="Allurar",
# label="Rarefaction? (maximum reads of minimal
# sample count)"),
# selectInput(inputId="Alluglom", label="Agglomerate taxa",
# choices = colnames(shinyInput$pstat@tax_table)),
# uiOutput("Allustax"),
# downloadButton('downloadAlluvialPlot',
# 'Download Plot')
# ),
# mainPanel(
# plotOutput("TimePlotVisu",height = "600px")
# )
# )
# )
# )
# )
)
)
| /inst/shiny/PathoStat/ui.R | no_license | tseanlu/PathoStat | R | false | false | 16,290 | r | library(shiny)
library(shinyjs)
library(ggvis)
library(d3heatmap)
library(phyloseq)
library(ape)
library(plotly)
source("helpers.R")
alpha.methods <- c("Shannon", "Simpson", "InvSimpson")
# Weigthed Unifrac, Bray-Curtis
beta.methods <- c("wUniFrac", "bray")
tax.name <- c('superkingdom', 'kingdom', 'phylum', 'class', 'order', 'family',
'genus', 'species', 'no rank')
norm.methods <- c('EBayes coreOTU Normalization',
'Quantile coreOTU Normalization', 'Library Size Scaling')
measure.type <- c('Final Guess', 'Final Best Hit', 'Final High Confidence Hit')
minbatch <- function(batch1){
batch2 <- as.factor(batch1)
batch3 <- split(batch1,batch2)
return(min(unlist(lapply(1:length(batch3),
function(x) length(batch3[[x]])))))
}
shinyInput <- getShinyInput()
pstat <- shinyInput$pstat
covariates <- colnames(sample_data(pstat))
# choose the covariates that has less than 8 levels
covariates.colorbar <- c()
for (i in 1:length(covariates)){
num.levels <- length(unique(sample_data(pstat)[[covariates[i]]]))
if (num.levels < 8){
covariates.colorbar <- c(covariates.colorbar, covariates[i])
}
}
# choose the covariates that has 2 levels
covariates.two.levels <- c()
for (i in 1:length(covariates)){
num.levels <- length(unique(sample_data(pstat)[[covariates[i]]]))
if (num.levels == 2){
covariates.two.levels <- c(covariates.two.levels, covariates[i])
}
}
maxbatchElems <- minbatch(c(pstat@sam_data[,1])[[1]])
maxcondElems <- minbatch(c(pstat@sam_data[,2])[[1]])
defaultDisp <- 30
defaultGenesDisp <- 10
maxGenes <- dim(pstat@otu_table)[1]
#sample name
sample.names.all <- colnames(pstat@otu_table@.Data)
# load ui tabs right before calling shinyUI()
source("ui_01_upload.R", local = TRUE) #creates shiny_panel_upload variable
source("ui_02_filter.R", local = TRUE) #creates shiny_panel_upload variable
source("ui_07_biomarker.R", local = TRUE) #creates shiny_panel_upload variable
shinyUI(navbarPage(paste("PathoStat v", packageVersion("PathoStat"), sep = ""), id="PathoStat", fluid=TRUE,
theme = "bootstrap.min.css",
tabPanel("Upload", shiny_panel_upload),
tabPanel("Data Summary/Filtering", shiny_panel_filter),
tabPanel("Relative Abundance",
sidebarLayout(
sidebarPanel(
#selectizeInput('taxl', 'Taxonomy Level', choices = setNames(
# tax.abb, tax.name)),
selectizeInput('taxl', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_condition", "Select Condition:",
covariates),
width=3
),
mainPanel(
tabsetPanel(
tabPanel("Taxonomy level RA",
ggvisOutput("TaxRelAbundancePlot")),
# new heatmap
tabPanel("Heatmap",
helpText("Note: Only variables with less than 8 levels could be mapped to color bar."),
fluidRow(
column(3, selectInput("select_heatmap_condition_1", "Add colorbar based on:",
covariates.colorbar)),
column(3, selectInput("select_heatmap_condition_2", "Add second colorbar based on:",
covariates.colorbar)),
column(3, checkboxInput("checkbox_heatmap_scale", "Row scaling", value = TRUE)),
column(3, checkboxInput("checkbox_heatmap", "Add colorbar", value = TRUE))
),
plotOutput("Heatmap", height="550px"),
downloadButton('download_heatmap_pdf', 'Download heatmap PDF'))
), width=9
)
)
),
tabPanel("Diversity",
tabsetPanel(
tabPanel("Alpha Diversity",
br(),
sidebarLayout(
sidebarPanel(
br(),
selectizeInput('taxl.alpha', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_alpha_div_condition", "Compare between:",
covariates.colorbar),
selectInput("select_alpha_div_method", "Choose method:",
alpha.methods)
),
mainPanel(
br(),
tabsetPanel(
tabPanel("Boxplot",
plotlyOutput("AlphaDiversity"),
actionButton("download_alpha", "Download Alpha diversity pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")
),
tabPanel("Taxa number Barplot",
plotlyOutput("AlphaDiversityBarplot")
),
tabPanel("Statistical Test",
selectInput("select_alpha_stat_method","Non-parametric Test", c("Mann-Whitney","Kruskal-Wallis")),
tableOutput("alpha.stat.test")
),
tabPanel("Alpha Diversity Table",
br(),
downloadButton('download_table_alpha', 'Download this table'),
DT::dataTableOutput("table.alpha")
)
)
)
)
),
tabPanel("Beta Diversity",
br(),
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.beta', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_beta_div_method", "Choose method:",
beta.methods),
helpText("Only variables with 2 levels are supported for boxplot and stat test here." ),
selectInput("select_beta_condition", "Select condition",
covariates.two.levels)
),
mainPanel(
br(),
tabsetPanel(
tabPanel("Heatmap",
br(),
fluidRow(
column(3, selectInput("select_beta_heatmap_condition_1", "Add colorbar on:",
covariates.colorbar)),
column(3, selectInput("select_beta_heatmap_condition_2", "Add second colorbar on:",
covariates.colorbar))
),
checkboxInput("checkbox_beta_heatmap", "Add colorbar", value = TRUE),
plotOutput("BetaDiversityHeatmap"),
downloadButton('download_beta_heatmap_pdf', 'Download heatmap PDF')
),
tabPanel("Boxplot",
plotlyOutput("BetaDiversityBoxplot"),
actionButton("download_beta_boxplot", "Download pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")
),
tabPanel("Statistical Test",
selectInput("select_beta_stat_method","Select Test", c("PERMANOVA", "Kruskal-Wallis", "Mann-Whitney")),
numericInput("num.permutation.permanova", "Number of permutations", value = 999, max = 2000),
tableOutput("beta.stat.test")
),
tabPanel("Beta Diversity Table",
br(),
downloadButton('download_table_beta', 'Download this table'),
DT::dataTableOutput("table.beta")
)
)
)
)
)
)
),
tabPanel("Dimension Reduction",
sidebarLayout(
sidebarPanel(
numericInput('xcol.new', 'Principal Component (x-axis)', 1,
min = 1, max = 50),
numericInput('ycol.new', 'Principal Component (y-axis)', 2,
min = 1, max = 50),
selectizeInput('taxl.pca', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectInput("select_pca_color", "Color points by:",
covariates),
selectInput("select_pca_shape", "Shape points by:",
covariates.colorbar),
width=3
),
mainPanel(
tabsetPanel(
tabPanel("PCA plot",
# This is a bit different pdf downloading method for plotly,
# as we must buy lisence for that
plotlyOutput("pca.plotly"),
actionButton("download_pca", "Download PCA pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")),
tabPanel("PCA variance", DT::dataTableOutput("PCAtable")),
tabPanel("PCoA plot",
plotlyOutput("pcoa.plotly"),
selectInput("pcoa.method", "PCoA method:",
beta.methods),
actionButton("download_pcoa", "Download PCoA pdf"),
helpText("Note: Wait for 8-10s after clicking DOWNLOAD, and the figure will be opened externally.")),
tabPanel("PCoA variance", DT::dataTableOutput("PCoAtable"))
)
)
)
),
tabPanel("Differential Analysis",
tabsetPanel(
tabPanel("Deseq2",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.da', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('da.condition', 'Select condition',
choices = covariates.two.levels),
selectizeInput('da.condition.covariate', 'Select (multiple) covariates',
choices = covariates, multiple = TRUE),
helpText("Continuous covariates would be automatically cut into factors with 3 levels."),
numericInput('da.count.cutoff', 'Minumum count cut-off', 500,
min = 1, max = 5000),
numericInput('da.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("DeSeq2",
tabsetPanel(
tabPanel("DE output",
DT::dataTableOutput("DeSeq2Table.new"),
downloadButton("download_deseq_tb", "Download this table")
)
)
), width=9
)
)
),
tabPanel("edgeR",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.edger', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('edger.condition', 'Select condition',
choices = covariates.two.levels),
helpText("Continuous covariates would be automatically cut into factors with 3 levels."),
numericInput('edger.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("edgeR",
tabsetPanel(
tabPanel("DE output",
DT::dataTableOutput("edgerTable.new"),
downloadButton("download_edger_tb", "Download this table")
)
)
), width=9
)
)
),
tabPanel("Abundance analysis",
sidebarLayout(
sidebarPanel(
selectizeInput('taxl.pa', 'Taxonomy Level', choices = tax.name,
selected='no rank'),
selectizeInput('pa.condition', 'Select condition',
choices = covariates.two.levels),
numericInput('pa.count.cutoff', 'Minumum count cut-off', 500,
min = 1, max = 5000),
numericInput('pa.padj.cutoff', 'Choose padj cut-off', 0.5,
min = 1e-100, max = 1),
width=3
),
mainPanel(
tabPanel("Test output",
tabsetPanel(
tabPanel("output",
selectizeInput('pa.method', 'Select test method',
choices = c("Fisher Exact Test", "Chi-squared Test", "Mann-Whitney Test")),
DT::dataTableOutput("pa.test"),
downloadButton("download_pa_test", "Download this table")
)
)
), width=9
)
)
)
)
),
tabPanel("Biomarker", shiny_panel_biomarker),
tabPanel("Pathway (Under construction by Tyler)")
# tabPanel("Time Series",
# tabsetPanel(
# tabPanel("Visualization",
# sidebarLayout(
# sidebarPanel(
# selectInput(inputId="Allusset",
# label="Visualization column",
# choices = colnames(shinyInput$pstat@sam_data)),
# checkboxInput(inputId="Allurar",
# label="Rarefaction? (maximum reads of minimal
# sample count)"),
# selectInput(inputId="Alluglom", label="Agglomerate taxa",
# choices = colnames(shinyInput$pstat@tax_table)),
# uiOutput("Allustax"),
# downloadButton('downloadAlluvialPlot',
# 'Download Plot')
# ),
# mainPanel(
# plotOutput("TimePlotVisu",height = "600px")
# )
# )
# )
# )
# )
)
)
|
devtools::install_github("ropenscilabs/changes")
devtools::install_github("hadley/pillar")
devtools::install_github('JianhuaHuang/streamlineR')
library(changes) #git
library(skimr) # 数据探查: 统计量
library(visdat) # 数据探查:数据类型 缺失值
library(otvPlots) # 支持自动可视化变量分布,并为大型数据集计算时间汇总汇总统计数据
library(streamlineR) # 建模流程 自动分变量组 woe iv 的分析工具
library(caret)
library(mlr)
library(manipulate)
library(MatchingFrontier) #实现了对不平衡(在处理组和控制组之间)和样本大小的同步联合优化与精简,优化了分组匹配效率
library(miic)#实现了一种信息理论方法,它从纯粹的观测数据中学习因果或非因果图模型,同时还包括未观察到的潜在变量的影响.
library(usethis)#自动化包和项目设置任务,包括建立单元测试,测试覆盖率,持续集成,Git,GitHub,许可证,RStudio项目等等,否则需要手动执行.
## 降维
library(Rtsne) # t-随机临域嵌入法 降维,同时保留数据的局部和全局结构。
#http://blog.csdn.net/uncle_ll/article/details/59482203
library(ggfortify)#简单易用的统一的界面来用一行代码来对许多受欢迎的R软件包结果进行二维可视化的一个R工具包
#https://terrytangyuan.github.io/2015/11/24/ggfortify-intro/
#https://zhuanlan.zhihu.com/p/28172816
library(workflowr):# 提供了支持可重复性操作和团队协作的数据科学实施框架.
browseVignettes("workflowr")
library(mleap)# http://mleap-docs.combust.ml/ # 未安装成功
library(CBDA)# 实现一个集合预测器(“超级学习者”),使用基于过滤和全面感知的方法对大数据进行分类.
library(featurefinder) #通过使用rpart分类和回归树对模型残差进行详细分析,实现了一种用于查找模型特征的方法.
library(iilassso)# 未安装成功 #用可独立解释的Lasso实现用于拟合线性、逻辑回归模型的高效算法.
library(live)#live : 通过了解关键因素,实施解释复杂机器学习模型的方法
library(mlrCPO) #提供了一套工具集,使得mlr(机器学习框架)更为丰富,可兼容多种预处理操作符(CPOS).
library(varrank) #提供用于执行变量排序和特征选择的计算工具箱.
install.packages("spFSR")# 特征选择
library(SmartEDA) # 未安装成功 # SmartEDA : 提供自动描述数据结构和关系的功能,以便于探索性数据分析.
library(anomalize) # 实现一个“整洁”工作流程,用于检测时间序列数据中的异常情况,支持分解时间序列、检测异常和将多个时间序列的正常数据与异常数据进行分离等功能.
library(table1) # 提供用于创建用于描述性统计的HTML表格的功能.
library(foolbox)# 提供一个操作函数的框架,并使用元编程来翻译.
library(graphframes)#graphframes : 提供了sparklyr包的扩展,提供了基于DataFrame的接口来处理图形.
library(pkgnet) #pkgnet : 使用图论的工具来分析R包中的函数与其导入的包之间的依赖关系,并量化它们的复杂性等.
library(promises) # 提供了在R中进行异步编程的基本抽象.
library(ERSA)# ERSA : 提供函数对回归计算的结果进行展示,同时该功能可以集成到shiny中进行可视化显示.
library(prepplot) #prepplot : 提供一个单独的函数,用于创建基础绘图区域,可以轻松制作自定义图形.
library(ggstatsplot)# ggstatsplot 未安装成功: 对ggplot2进行扩展,生成带有统计试验结果的可视化图形.
| /工具函数包/rpack2learn.R | no_license | longhua8800w/learnR | R | false | false | 3,664 | r |
devtools::install_github("ropenscilabs/changes")
devtools::install_github("hadley/pillar")
devtools::install_github('JianhuaHuang/streamlineR')
library(changes) #git
library(skimr) # 数据探查: 统计量
library(visdat) # 数据探查:数据类型 缺失值
library(otvPlots) # 支持自动可视化变量分布,并为大型数据集计算时间汇总汇总统计数据
library(streamlineR) # 建模流程 自动分变量组 woe iv 的分析工具
library(caret)
library(mlr)
library(manipulate)
library(MatchingFrontier) #实现了对不平衡(在处理组和控制组之间)和样本大小的同步联合优化与精简,优化了分组匹配效率
library(miic)#实现了一种信息理论方法,它从纯粹的观测数据中学习因果或非因果图模型,同时还包括未观察到的潜在变量的影响.
library(usethis)#自动化包和项目设置任务,包括建立单元测试,测试覆盖率,持续集成,Git,GitHub,许可证,RStudio项目等等,否则需要手动执行.
## 降维
library(Rtsne) # t-随机临域嵌入法 降维,同时保留数据的局部和全局结构。
#http://blog.csdn.net/uncle_ll/article/details/59482203
library(ggfortify)#简单易用的统一的界面来用一行代码来对许多受欢迎的R软件包结果进行二维可视化的一个R工具包
#https://terrytangyuan.github.io/2015/11/24/ggfortify-intro/
#https://zhuanlan.zhihu.com/p/28172816
library(workflowr):# 提供了支持可重复性操作和团队协作的数据科学实施框架.
browseVignettes("workflowr")
library(mleap)# http://mleap-docs.combust.ml/ # 未安装成功
library(CBDA)# 实现一个集合预测器(“超级学习者”),使用基于过滤和全面感知的方法对大数据进行分类.
library(featurefinder) #通过使用rpart分类和回归树对模型残差进行详细分析,实现了一种用于查找模型特征的方法.
library(iilassso)# 未安装成功 #用可独立解释的Lasso实现用于拟合线性、逻辑回归模型的高效算法.
library(live)#live : 通过了解关键因素,实施解释复杂机器学习模型的方法
library(mlrCPO) #提供了一套工具集,使得mlr(机器学习框架)更为丰富,可兼容多种预处理操作符(CPOS).
library(varrank) #提供用于执行变量排序和特征选择的计算工具箱.
install.packages("spFSR")# 特征选择
library(SmartEDA) # 未安装成功 # SmartEDA : 提供自动描述数据结构和关系的功能,以便于探索性数据分析.
library(anomalize) # 实现一个“整洁”工作流程,用于检测时间序列数据中的异常情况,支持分解时间序列、检测异常和将多个时间序列的正常数据与异常数据进行分离等功能.
library(table1) # 提供用于创建用于描述性统计的HTML表格的功能.
library(foolbox)# 提供一个操作函数的框架,并使用元编程来翻译.
library(graphframes)#graphframes : 提供了sparklyr包的扩展,提供了基于DataFrame的接口来处理图形.
library(pkgnet) #pkgnet : 使用图论的工具来分析R包中的函数与其导入的包之间的依赖关系,并量化它们的复杂性等.
library(promises) # 提供了在R中进行异步编程的基本抽象.
library(ERSA)# ERSA : 提供函数对回归计算的结果进行展示,同时该功能可以集成到shiny中进行可视化显示.
library(prepplot) #prepplot : 提供一个单独的函数,用于创建基础绘图区域,可以轻松制作自定义图形.
library(ggstatsplot)# ggstatsplot 未安装成功: 对ggplot2进行扩展,生成带有统计试验结果的可视化图形.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{simulation_data_emiss_means}
\alias{simulation_data_emiss_means}
\title{Parsed simulation results for emission distribution between-subject means.}
\format{
A list with two entries:
data_preprocessed: A data frame with 323.937 rows and 14 variables. :
\describe{
\item{iteration_id}{string. unique id of the iteration}
\item{emiss_var_short}{string. One of 'EEG_mean_beta', 'EOG_median_theta' or 'EOG_min_beta'.}
\item{state}{string. Latent state. One of 'state1' (Awake), 'state2' (NREM) or 'state3' (REM)}
\item{median}{numeric. MAP value (median) of the posterior distribution.}
\item{SE}{numeric. MAP value (SE) of the posterior distribution.}
\item{var}{string. Concatenation of 'emiss_var_short' plus the statistic (median/SE) plus the
state name.}
\item{true_val}{numeric. Value of the population parameter used in the simulation study. These values are
the same as those used in the sleepsimR library. See
<https://github.com/JasperHG90/sleepsimR/blob/master/R/zzz.R>}
\item{lower}{numeric. Lower 95\% CCI for the median MAP value.}
\item{upper}{numeric. Upper 95\% CCI for the median MAP value.}
\item{scenario_id}{string .unique id of the simulation scenario}
\item{n}{int. number of subjects}
\item{n_t}{int. number of observed data points for each subject}
\item{zeta}{float. between-subject variance for the means of the emission distributions}
\item{Q}{float. between-subject variance for the intercepts of the transition-probability matrix}
}
summary_by_scenario: A data frame with 1.296 rows and 21 variables. In my study, I use 144 scenarios. Given
that I use three emission variables, each of which has 3 latent states, each scenario is represented 9 times
in this dataset.
\describe{
\item{scenario_id}{string .unique id of the simulation scenario}
\item{emiss_var_short}{string. One of 'EEG_mean_beta', 'EOG_median_theta' or 'EOG_min_beta'.}
\item{bias}{bias of the parameter estimates in the scenario relative to the population parameter.}
\item{bias_mcmc_se}{MCMC standard error of bias estimate.}
\item{empirical_se}{Empirical standard error of the parameter estimates in the scenario.}
\item{empirical_se_mcmc_se}{MCMC standard error of the empirical standard error.}
\item{MSE}{MSE of the parameter estimates in the scenario relative to the population parameter.}
\item{MSE_mcmc_se}{MCMC standard error of the MSE estimate.}
\item{coverage}{Percentage of cases in a scenario where the population parameter is included in
the 95\% CCI.}
\item{coverage_mcmc_se}{MCMC standard error of the coverage estimate.}
\item{bias_corr_coverage}{Percentage of cases in a scenario where the average estimated parameter
value is included in the 95\% CCI.}
\item{bias_corr_coverage_mcmc_se}{MCMC standard error of the bias corrected coverage estimate.}
\item{multimodal}{P-value of a test for multimodality in the distribution of parameter estimates.
See \link[multimode]{modetest}.}
\item{modSE}{Model standard error of the parameter estimates in the scenario.}
\item{modSE_mcmc_se}{MCMC standard error of the modSE estimate.}
\item{n}{int. number of subjects}
\item{n_t}{int. number of observed data points for each subject}
\item{zeta}{float. between-subject variance for the means of the emission distributions}
\item{Q}{float. between-subject variance for the intercepts of the transition-probability matrix}
}
}
\usage{
simulation_data_emiss_means
}
\description{
This dataset contains two separate datasets in a list. The first dataset is raw data that contains all
parameter estimates obtained from the simulation results (median MAP, SE MAP, lower 95\% CCI and upper
95\% CCI). The second dataset summarizes the raw in terms of the simulation metrics (bias, MSE etc.) for
each simulation scenario. To reproduce these datasets, follow the instructions in the
"data-raw/3_preprocess_simulation_results" found in this R library.
}
\seealso{
The simulation was conducted using the following two programs. The first program is the resource
manager found at <https://github.com/JasperHG90/sleepsimR-api/releases>. In particular, versions 1.3.1,
1.3.2 and 1.3.3 were used. These versions contain different iterations of the simulation study. For more
information, see \link[sleepsimRdata]{scen}, \link[sleepsimRdata]{scen_subs} and
\link[sleepsimRdata]{scen_rerun}. The program used to run the simulations can be found here
<https://github.com/JasperHG90/sleepsimR-run/releases>. In particular, version 1.3 was used. The
simulations were executed on a cluster managed by SURF <https://www.surf.nl/en>. For more information
about the architectural design of the simulation study, visit
<https://github.com/JasperHG90/sleepsimR-documentation>.
For a definition of the simulation metrics, see: Morris, Tim P., Ian R. White, and Michael J. Crowther. "Using simulation studies to evaluate statistical methods." Statistics in medicine 38.11 (2019): 2074-2102.
}
\keyword{datasets}
| /man/simulation_data_emiss_means.Rd | permissive | JasperHG90/sleepsimRdata | R | false | true | 5,109 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{simulation_data_emiss_means}
\alias{simulation_data_emiss_means}
\title{Parsed simulation results for emission distribution between-subject means.}
\format{
A list with two entries:
data_preprocessed: A data frame with 323.937 rows and 14 variables. :
\describe{
\item{iteration_id}{string. unique id of the iteration}
\item{emiss_var_short}{string. One of 'EEG_mean_beta', 'EOG_median_theta' or 'EOG_min_beta'.}
\item{state}{string. Latent state. One of 'state1' (Awake), 'state2' (NREM) or 'state3' (REM)}
\item{median}{numeric. MAP value (median) of the posterior distribution.}
\item{SE}{numeric. MAP value (SE) of the posterior distribution.}
\item{var}{string. Concatenation of 'emiss_var_short' plus the statistic (median/SE) plus the
state name.}
\item{true_val}{numeric. Value of the population parameter used in the simulation study. These values are
the same as those used in the sleepsimR library. See
<https://github.com/JasperHG90/sleepsimR/blob/master/R/zzz.R>}
\item{lower}{numeric. Lower 95\% CCI for the median MAP value.}
\item{upper}{numeric. Upper 95\% CCI for the median MAP value.}
\item{scenario_id}{string .unique id of the simulation scenario}
\item{n}{int. number of subjects}
\item{n_t}{int. number of observed data points for each subject}
\item{zeta}{float. between-subject variance for the means of the emission distributions}
\item{Q}{float. between-subject variance for the intercepts of the transition-probability matrix}
}
summary_by_scenario: A data frame with 1.296 rows and 21 variables. In my study, I use 144 scenarios. Given
that I use three emission variables, each of which has 3 latent states, each scenario is represented 9 times
in this dataset.
\describe{
\item{scenario_id}{string .unique id of the simulation scenario}
\item{emiss_var_short}{string. One of 'EEG_mean_beta', 'EOG_median_theta' or 'EOG_min_beta'.}
\item{bias}{bias of the parameter estimates in the scenario relative to the population parameter.}
\item{bias_mcmc_se}{MCMC standard error of bias estimate.}
\item{empirical_se}{Empirical standard error of the parameter estimates in the scenario.}
\item{empirical_se_mcmc_se}{MCMC standard error of the empirical standard error.}
\item{MSE}{MSE of the parameter estimates in the scenario relative to the population parameter.}
\item{MSE_mcmc_se}{MCMC standard error of the MSE estimate.}
\item{coverage}{Percentage of cases in a scenario where the population parameter is included in
the 95\% CCI.}
\item{coverage_mcmc_se}{MCMC standard error of the coverage estimate.}
\item{bias_corr_coverage}{Percentage of cases in a scenario where the average estimated parameter
value is included in the 95\% CCI.}
\item{bias_corr_coverage_mcmc_se}{MCMC standard error of the bias corrected coverage estimate.}
\item{multimodal}{P-value of a test for multimodality in the distribution of parameter estimates.
See \link[multimode]{modetest}.}
\item{modSE}{Model standard error of the parameter estimates in the scenario.}
\item{modSE_mcmc_se}{MCMC standard error of the modSE estimate.}
\item{n}{int. number of subjects}
\item{n_t}{int. number of observed data points for each subject}
\item{zeta}{float. between-subject variance for the means of the emission distributions}
\item{Q}{float. between-subject variance for the intercepts of the transition-probability matrix}
}
}
\usage{
simulation_data_emiss_means
}
\description{
This dataset contains two separate datasets in a list. The first dataset is raw data that contains all
parameter estimates obtained from the simulation results (median MAP, SE MAP, lower 95\% CCI and upper
95\% CCI). The second dataset summarizes the raw in terms of the simulation metrics (bias, MSE etc.) for
each simulation scenario. To reproduce these datasets, follow the instructions in the
"data-raw/3_preprocess_simulation_results" found in this R library.
}
\seealso{
The simulation was conducted using the following two programs. The first program is the resource
manager found at <https://github.com/JasperHG90/sleepsimR-api/releases>. In particular, versions 1.3.1,
1.3.2 and 1.3.3 were used. These versions contain different iterations of the simulation study. For more
information, see \link[sleepsimRdata]{scen}, \link[sleepsimRdata]{scen_subs} and
\link[sleepsimRdata]{scen_rerun}. The program used to run the simulations can be found here
<https://github.com/JasperHG90/sleepsimR-run/releases>. In particular, version 1.3 was used. The
simulations were executed on a cluster managed by SURF <https://www.surf.nl/en>. For more information
about the architectural design of the simulation study, visit
<https://github.com/JasperHG90/sleepsimR-documentation>.
For a definition of the simulation metrics, see: Morris, Tim P., Ian R. White, and Michael J. Crowther. "Using simulation studies to evaluate statistical methods." Statistics in medicine 38.11 (2019): 2074-2102.
}
\keyword{datasets}
|
if(!require(tidyverse)) install.packages("tidyverse")
if(!require(kableExtra)) install.packages("kableExtra")
if(!require(tidyr)) install.packages("tidyr")
if(!require(tidyverse)) install.packages("tidyverse")
if(!require(stringr)) install.packages("stringr")
if(!require(forcats)) install.packages("forcats")
if(!require(ggplot2)) install.packages("ggplot2")
library(dplyr)
library(tidyverse)
library(kableExtra)
library(tidyr)
library(stringr)
library(forcats)
library(ggplot2)
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
RMSE <- function(true_ratings = NULL, predicted_ratings = NULL) {
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#edx %>% summarize(Users = n_distinct(userId),
# Movies = n_distinct(movieId)) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",font_size = 10,full_width = FALSE)
edx$date <- as.POSIXct(edx$timestamp, origin="1970-01-01")
validation$date <- as.POSIXct(validation$timestamp, origin="1970-01-01")
edx$yearOfRate <- format(edx$date,"%Y")
edx$monthOfRate <- format(edx$date,"%m")
validation$yearOfRate <- format(validation$date,"%Y")
validation$monthOfRate <- format(validation$date,"%m")
edx <- edx %>%
mutate(title = str_trim(title)) %>%extract(title,c("titleTemp", "release"),
regex = "^(.*) \\(([0-9 \\-]*)\\)$",remove = F) %>%
mutate(release = if_else(str_length(release) > 4,
as.integer(str_split(release, "-",simplify = T)[1]),
as.integer(release))) %>%
mutate(title = if_else(is.na(titleTemp),
title,titleTemp)) %>%select(-titleTemp)
validation <- validation %>%
mutate(title = str_trim(title)) %>%
extract(title,
c("titleTemp", "release"),
regex = "^(.*) \\(([0-9 \\-]*)\\)$",
remove = F) %>%
mutate(release = if_else(str_length(release) > 4,
as.integer(str_split(release, "-",
simplify = T)[1]),
as.integer(release))
) %>%
mutate(title = if_else(is.na(titleTemp),
title,
titleTemp)
) %>%
select(-titleTemp)
edx <- edx %>%
mutate(genre = fct_explicit_na(genres,
na_level = "(no genres listed)")
) %>%
separate_rows(genre,
sep = "\\|")
validation <- validation %>%
mutate(genre = fct_explicit_na(genres,
na_level = "(no genres listed)")
) %>%
separate_rows(genre,
sep = "\\|")
edx <- edx %>% select(userId, movieId, rating, title, genre, release, yearOfRate, monthOfRate)
validation <- validation %>% select(userId, movieId, rating, title, genre, release, yearOfRate, monthOfRate)
edx$yearOfRate <- as.numeric(edx$yearOfRate)
edx$monthOfRate <- as.numeric(edx$monthOfRate)
edx$release <- as.numeric(edx$release)
validation$yearOfRate <- as.numeric(validation$yearOfRate)
validation$monthOfRate <- as.numeric(validation$monthOfRate)
validation$release <- as.numeric(validation$release)
#head(edx) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(count = n()) %>%
# arrange(desc(count)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(mean = mean(rating)) %>%
# arrange(desc(mean)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(median = median(rating)) %>%
# arrange(desc(median)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(count = n()) %>%
# arrange(desc(count)) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(mean = mean(rating)) %>%
# arrange(desc(mean)) %>%
# head(n=35) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(median = median(rating)) %>%
# arrange(desc(median)) %>%
# head(n=35) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
paste("The mean is:", as.character(mean(edx$rating)))
mu_hat <- mean(edx$rating)
rmse_mean_model_result <- RMSE(validation$rating, mu_hat)
results <- data.frame(model="Naive Mean-Baseline Model", RMSE=rmse_mean_model_result)
#------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>% group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
rmse_movie_model <- validation %>%left_join(movie_avgs, by='movieId') %>%
mutate(pred = mu_hat + b_i) %>% pull(pred)
rmse_movie_model_result <- RMSE(validation$rating, rmse_movie_model)
results <- results %>% add_row(model="Movie-Based Model", RMSE=rmse_movie_model_result)
#------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu_hat - b_i))
rmse_movie_user_model <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(pred = mu_hat + b_i + b_u) %>%
pull(pred)
rmse_movie_user_model_result <- RMSE(validation$rating, rmse_movie_user_model)
results <- results %>% add_row(model="Movie+User Based Model", RMSE=rmse_movie_user_model_result)
#-------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu_hat - b_i))
genre_pop <- edx %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
group_by(genre) %>%
summarize(b_u_g = mean(rating - mu_hat - b_i - b_u))
rmse_movie_user_genre_model <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
left_join(genre_pop, by='genre') %>%
mutate(pred = mu_hat + b_i + b_u + b_u_g) %>%
pull(pred)
rmse_movie_user_genre_model_result <- RMSE(validation$rating, rmse_movie_user_genre_model)
results <- results %>% add_row(model="Movie+User+Genre Based Model", RMSE=rmse_movie_user_genre_model_result)
#-------=-
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 10, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
predicted_ratings <- validation %>%left_join(b_i, by='movieId') %>%
mutate(pred = mu_hat + b_i) %>%pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie-Based Model", RMSE=rmse_regularized_movie_model)
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 15, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
b_u <- edx %>%
left_join(b_i, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu_hat) / (n() + lambda))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
mutate(pred = mu_hat + b_i + b_u) %>%
pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_user_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie+User Based Model", RMSE=rmse_regularized_movie_user_model)
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 15, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
b_u <- edx %>%
left_join(b_i, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu_hat) / (n() + lambda))
b_u_g <- edx %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
group_by(genre) %>%
summarize(b_u_g = sum(rating - b_i - mu_hat - b_u) / (n() + lambda))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
left_join(b_u_g, by='genre') %>%
mutate(pred = mu_hat + b_i + b_u + b_u_g) %>%
pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_user_genre_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie+User+Genre Based Model", RMSE=rmse_regularized_movie_user_genre_model)
results %>%
kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
position = "center",
font_size = 10,
full_width = FALSE) | /RMSE.r | no_license | sulaiman-alawad/thefinalproject | R | false | false | 11,785 | r | if(!require(tidyverse)) install.packages("tidyverse")
if(!require(kableExtra)) install.packages("kableExtra")
if(!require(tidyr)) install.packages("tidyr")
if(!require(tidyverse)) install.packages("tidyverse")
if(!require(stringr)) install.packages("stringr")
if(!require(forcats)) install.packages("forcats")
if(!require(ggplot2)) install.packages("ggplot2")
library(dplyr)
library(tidyverse)
library(kableExtra)
library(tidyr)
library(stringr)
library(forcats)
library(ggplot2)
if(!require(tidyverse)) install.packages("tidyverse", repos = "http://cran.us.r-project.org")
if(!require(caret)) install.packages("caret", repos = "http://cran.us.r-project.org")
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- read.table(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
set.seed(1)
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
RMSE <- function(true_ratings = NULL, predicted_ratings = NULL) {
sqrt(mean((true_ratings - predicted_ratings)^2))
}
#edx %>% summarize(Users = n_distinct(userId),
# Movies = n_distinct(movieId)) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",font_size = 10,full_width = FALSE)
edx$date <- as.POSIXct(edx$timestamp, origin="1970-01-01")
validation$date <- as.POSIXct(validation$timestamp, origin="1970-01-01")
edx$yearOfRate <- format(edx$date,"%Y")
edx$monthOfRate <- format(edx$date,"%m")
validation$yearOfRate <- format(validation$date,"%Y")
validation$monthOfRate <- format(validation$date,"%m")
edx <- edx %>%
mutate(title = str_trim(title)) %>%extract(title,c("titleTemp", "release"),
regex = "^(.*) \\(([0-9 \\-]*)\\)$",remove = F) %>%
mutate(release = if_else(str_length(release) > 4,
as.integer(str_split(release, "-",simplify = T)[1]),
as.integer(release))) %>%
mutate(title = if_else(is.na(titleTemp),
title,titleTemp)) %>%select(-titleTemp)
validation <- validation %>%
mutate(title = str_trim(title)) %>%
extract(title,
c("titleTemp", "release"),
regex = "^(.*) \\(([0-9 \\-]*)\\)$",
remove = F) %>%
mutate(release = if_else(str_length(release) > 4,
as.integer(str_split(release, "-",
simplify = T)[1]),
as.integer(release))
) %>%
mutate(title = if_else(is.na(titleTemp),
title,
titleTemp)
) %>%
select(-titleTemp)
edx <- edx %>%
mutate(genre = fct_explicit_na(genres,
na_level = "(no genres listed)")
) %>%
separate_rows(genre,
sep = "\\|")
validation <- validation %>%
mutate(genre = fct_explicit_na(genres,
na_level = "(no genres listed)")
) %>%
separate_rows(genre,
sep = "\\|")
edx <- edx %>% select(userId, movieId, rating, title, genre, release, yearOfRate, monthOfRate)
validation <- validation %>% select(userId, movieId, rating, title, genre, release, yearOfRate, monthOfRate)
edx$yearOfRate <- as.numeric(edx$yearOfRate)
edx$monthOfRate <- as.numeric(edx$monthOfRate)
edx$release <- as.numeric(edx$release)
validation$yearOfRate <- as.numeric(validation$yearOfRate)
validation$monthOfRate <- as.numeric(validation$monthOfRate)
validation$release <- as.numeric(validation$release)
#head(edx) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(count = n()) %>%
# arrange(desc(count)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(mean = mean(rating)) %>%
# arrange(desc(mean)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(title) %>%
# summarise(median = median(rating)) %>%
# arrange(desc(median)) %>%
# head(n=25) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(count = n()) %>%
# arrange(desc(count)) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(mean = mean(rating)) %>%
# arrange(desc(mean)) %>%
# head(n=35) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
#edx %>%
# group_by(genre) %>%
# summarise(median = median(rating)) %>%
# arrange(desc(median)) %>%
# head(n=35) %>%
# kable() %>%
# kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
# position = "center",
# font_size = 10,
# full_width = FALSE)
paste("The mean is:", as.character(mean(edx$rating)))
mu_hat <- mean(edx$rating)
rmse_mean_model_result <- RMSE(validation$rating, mu_hat)
results <- data.frame(model="Naive Mean-Baseline Model", RMSE=rmse_mean_model_result)
#------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>% group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
rmse_movie_model <- validation %>%left_join(movie_avgs, by='movieId') %>%
mutate(pred = mu_hat + b_i) %>% pull(pred)
rmse_movie_model_result <- RMSE(validation$rating, rmse_movie_model)
results <- results %>% add_row(model="Movie-Based Model", RMSE=rmse_movie_model_result)
#------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu_hat - b_i))
rmse_movie_user_model <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
mutate(pred = mu_hat + b_i + b_u) %>%
pull(pred)
rmse_movie_user_model_result <- RMSE(validation$rating, rmse_movie_user_model)
results <- results %>% add_row(model="Movie+User Based Model", RMSE=rmse_movie_user_model_result)
#-------
mu_hat <- mean(edx$rating)
movie_avgs <- edx %>%
group_by(movieId) %>%
summarize(b_i = mean(rating - mu_hat))
user_avgs <- edx %>%
left_join(movie_avgs, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = mean(rating - mu_hat - b_i))
genre_pop <- edx %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
group_by(genre) %>%
summarize(b_u_g = mean(rating - mu_hat - b_i - b_u))
rmse_movie_user_genre_model <- validation %>%
left_join(movie_avgs, by='movieId') %>%
left_join(user_avgs, by='userId') %>%
left_join(genre_pop, by='genre') %>%
mutate(pred = mu_hat + b_i + b_u + b_u_g) %>%
pull(pred)
rmse_movie_user_genre_model_result <- RMSE(validation$rating, rmse_movie_user_genre_model)
results <- results %>% add_row(model="Movie+User+Genre Based Model", RMSE=rmse_movie_user_genre_model_result)
#-------=-
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 10, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
predicted_ratings <- validation %>%left_join(b_i, by='movieId') %>%
mutate(pred = mu_hat + b_i) %>%pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie-Based Model", RMSE=rmse_regularized_movie_model)
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 15, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
b_u <- edx %>%
left_join(b_i, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu_hat) / (n() + lambda))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
mutate(pred = mu_hat + b_i + b_u) %>%
pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_user_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie+User Based Model", RMSE=rmse_regularized_movie_user_model)
mu_hat <- mean(edx$rating)
lambdas <- seq(0, 15, 0.1)
rmses <- sapply(lambdas, function(lambda) {
b_i <- edx %>%
group_by(movieId) %>%
summarize(b_i = sum(rating - mu_hat) / (n() + lambda))
b_u <- edx %>%
left_join(b_i, by='movieId') %>%
group_by(userId) %>%
summarize(b_u = sum(rating - b_i - mu_hat) / (n() + lambda))
b_u_g <- edx %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
group_by(genre) %>%
summarize(b_u_g = sum(rating - b_i - mu_hat - b_u) / (n() + lambda))
predicted_ratings <- validation %>%
left_join(b_i, by='movieId') %>%
left_join(b_u, by='userId') %>%
left_join(b_u_g, by='genre') %>%
mutate(pred = mu_hat + b_i + b_u + b_u_g) %>%
pull(pred)
return(RMSE(validation$rating, predicted_ratings))
})
df <- data.frame(RMSE = rmses, lambdas = lambdas)
min_lambda <- lambdas[which.min(rmses)]
rmse_regularized_movie_user_genre_model <- min(rmses)
results <- results %>% add_row(model="Regularized Movie+User+Genre Based Model", RMSE=rmse_regularized_movie_user_genre_model)
results %>%
kable() %>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed", "responsive"),
position = "center",
font_size = 10,
full_width = FALSE) |
###############################
##
## Project: MetaboGuru
##
## Purpose: TypeI Partial mismatch for Score test with Laplacian
##
## Author: Charlie Carpenter
## Email: charles.carpenter@cuanschutz.edu
##
## Date Created: 2020-11-13
##
## ---------------------------
## Notes:
##
##
## ---------------------------
# Helpful Functions -------------------------------------------------------
`%nin%` <- Negate(`%in%`)
source('PartialNetworkSimFunctions.R')
nsim <- 10000 ## number of simulations
n <- 160 ## sample size
# mX <- matrix(1, n)
# b0 <- 0.2644 ## intercept term
sd.y <- 1.3688 ## standard deviation of Y
tau <- 1 ## Tuning parameter for regularization kernel of normalized laplacian
set.seed(4)
X <- data.frame(X1 = rep(0:1, each = n/2),
X2 = runif(n, 0, 5))
b0 <- c(0.2644, 0.5, 0.25)
H0.form <- formula(Y~X1+X2)
pp <- 0.70
# Same Size ---------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_15 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_15$pos_def); unique(part70_davie_L_ss_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_30 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_30$pos_def); unique(part70_davie_L_ss_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_45 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_45$pos_def); unique(part70_davie_L_ss_t1_45$TypeI)
# Small Graph -------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_15 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_15$pos_def); unique(part70_davie_L_sm_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_30 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_30$pos_def); unique(part70_davie_L_sm_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_45 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_45$pos_def); unique(part70_davie_L_sm_t1_45$TypeI)
# Diff Density ------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_15 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_15$pos_def); unique(part70_davie_L_dm_t1_15$TypeI)
## ## ## ##
part70_davie_L_dh_t1_15 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_15$pos_def); unique(part70_davie_L_dh_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_30 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_30$pos_def); unique(part70_davie_L_dm_t1_30$TypeI)
## ## ## ##
part70_davie_L_dh_t1_30 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_30$pos_def); unique(part70_davie_L_dh_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_45 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_45$pos_def); unique(part70_davie_L_dm_t1_45$TypeI)
## ## ## ##
part70_davie_L_dh_t1_45 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_45$pos_def); unique(part70_davie_L_dh_t1_45$TypeI)
| /SimulationFunctions/PartialMismatchSimulations/Type_I/P_70/Part70_Score_TypeI_Lap.R | no_license | CharlieCarpenter/PaIRKAT | R | false | false | 8,391 | r | ###############################
##
## Project: MetaboGuru
##
## Purpose: TypeI Partial mismatch for Score test with Laplacian
##
## Author: Charlie Carpenter
## Email: charles.carpenter@cuanschutz.edu
##
## Date Created: 2020-11-13
##
## ---------------------------
## Notes:
##
##
## ---------------------------
# Helpful Functions -------------------------------------------------------
`%nin%` <- Negate(`%in%`)
source('PartialNetworkSimFunctions.R')
nsim <- 10000 ## number of simulations
n <- 160 ## sample size
# mX <- matrix(1, n)
# b0 <- 0.2644 ## intercept term
sd.y <- 1.3688 ## standard deviation of Y
tau <- 1 ## Tuning parameter for regularization kernel of normalized laplacian
set.seed(4)
X <- data.frame(X1 = rep(0:1, each = n/2),
X2 = runif(n, 0, 5))
b0 <- c(0.2644, 0.5, 0.25)
H0.form <- formula(Y~X1+X2)
pp <- 0.70
# Same Size ---------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_15 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_15$pos_def); unique(part70_davie_L_ss_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_30 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_30$pos_def); unique(part70_davie_L_ss_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_ss_t1_45 <- plyr::ldply(graph.list, Part_Davie_SameSize, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_ss_t1_45$pos_def); unique(part70_davie_L_ss_t1_45$TypeI)
# Small Graph -------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_15 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_15$pos_def); unique(part70_davie_L_sm_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_30 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_30$pos_def); unique(part70_davie_L_sm_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_sm_t1_45 <- plyr::ldply(graph.list, Part_Davie_SmallGraph, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_sm_t1_45$pos_def); unique(part70_davie_L_sm_t1_45$TypeI)
# Diff Density ------------------------------------------------------------
# * 15 --------------------------------------------------------------------
p <- 15 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_15 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_15$pos_def); unique(part70_davie_L_dm_t1_15$TypeI)
## ## ## ##
part70_davie_L_dh_t1_15 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_15$pos_def); unique(part70_davie_L_dh_t1_15$TypeI)
# * 30 --------------------------------------------------------------------
p <- 30 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_30 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_30$pos_def); unique(part70_davie_L_dm_t1_30$TypeI)
## ## ## ##
part70_davie_L_dh_t1_30 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_30$pos_def); unique(part70_davie_L_dh_t1_30$TypeI)
# * 45 --------------------------------------------------------------------
p <- 45 ## size of network
set.seed(2)
graph.list <- lapply(1:nsim, function(x) sample_pa(n = p, directed = F))
zz <- rep(0, p)
part70_davie_L_dm_t1_45 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.05) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dm_t1_45$pos_def); unique(part70_davie_L_dm_t1_45$TypeI)
## ## ## ##
part70_davie_L_dh_t1_45 <- plyr::ldply(graph.list, Part_Davie_DiffDens, perc.perm = pp,
include.network = "L",
H0.form=H0.form, data = X, b0=b0,
sd.y=sd.y, zz=zz, tau=tau,
new.edge.prob=0.15) %>%
mutate(TypeI = sum(pVal < 0.05, na.rm = T)/n())
sum(part70_davie_L_dh_t1_45$pos_def); unique(part70_davie_L_dh_t1_45$TypeI)
|
#' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
# rdname catm
# messages() with some of the same syntax as cat(): takes a sep argument and
# does not append a newline by default
catm <- function(..., sep = " ", appendLF = FALSE) {
message(paste(..., sep = sep), appendLF = appendLF)
}
# used in displaying verbose messages for tokens_select and dfm_select
message_select <- function(selection, nfeats, ndocs, nfeatspad = 0, ndocspad = 0) {
catm(if (selection == "keep") "kept" else "removed", " ",
format(nfeats, big.mark = ",", scientific = FALSE),
" feature", if (nfeats != 1L) "s" else "", sep = "")
if (ndocs > 0) {
catm(" and ",
format(ndocs, big.mark=",", scientific = FALSE),
" document", if (ndocs != 1L) "s" else "",
sep = "")
}
if ((nfeatspad + ndocspad) > 0) {
catm(", padded ", sep = "")
}
if (nfeatspad > 0) {
catm(format(nfeatspad, big.mark=",", scientific = FALSE),
" feature", if (nfeatspad != 1L) "s" else "",
sep = "")
}
if (ndocspad > 0) {
if (nfeatspad > 0) catm(" and ", sep = "")
catm(format(ndocspad, big.mark=",", scientific = FALSE),
" document", if (ndocspad != 1L) "s" else "",
sep = "")
}
catm("", appendLF = TRUE)
}
##
## reassign the slots to an S4 dfm-like object
## necessary when some operation from the Matrix class obliterates them
## Ken B
reassign_slots <- function(x_new, x_org, exceptions = NULL) {
snames <- slotNames(class(x_org))
snames <- setdiff(snames, c("Dim", "Dimnames", "i", "p", "x", "factors", exceptions))
for (sname in snames) {
try({
slot(x_new, sname) <- slot(x_org, sname)
}, silent = TRUE)
}
x_new
}
#' function extending base::attributes()
#' @param x an object
#' @param overwrite if \code{TRUE}, overwrite old attributes
#' @param value new attributes
#' @keywords internal
"attributes<-" <- function(x, overwrite = TRUE, value) {
if (overwrite) {
base::attributes(x) <- value
} else {
base::attributes(x) <- c(base::attributes(x), value[!(names(value) %in% names(base::attributes(x)))])
}
return(x)
}
#' function to assign multiple slots to a S4 object
#' @param x an S4 object
#' @param exceptions slots to ignore
#' @param value a list of attributes extracted by attributes()
#' @keywords internal
"slots<-" <- function(x, exceptions = c("Dim", "Dimnames", "i", "p", "x", "factors"), value) {
slots <- methods::getSlots(class(x)[1])
for (sname in names(value)) {
if (!sname %in% names(slots) || sname %in% exceptions) next
if (!identical(typeof(value[[sname]]), slots[[sname]])) next
methods::slot(x, sname) <- value[[sname]]
}
return(x)
}
#' utility function to create a object with new set of attributes
#' @param x an underlying R object of a new object
#' @param attrs attributes of a new object
#' @param overwrite_attributes overwrite attributes of the input object, if \code{TRUE}
#' @keywords internal
create <- function(x, what, attrs = NULL, overwrite_attributes = FALSE, ...) {
if (what == 'tokens') {
class <- c('tokens', 'list')
}
x <- structure(x, class = class, ...)
if (!is.null(attrs)) {
attributes(x, overwrite_attributes) <- attrs
}
return(x)
}
#' convert various input as pattern to a vector used in tokens_select,
#' tokens_compound and kwic.
#' @inheritParams pattern
#' @inheritParams valuetype
#' @param case_insensitive ignore the case of dictionary values if \code{TRUE}
#' @param concatenator concatenator that join multi-word expression in tokens object
#' @param remove_unigram ignore single-word patterns if \code{TRUE}
#' @seealso regex2id
#' @keywords internal
pattern2id <- function(pattern, types, valuetype, case_insensitive,
concatenator = '_', remove_unigram = FALSE) {
if (is.dfm(pattern))
stop('dfm cannot be used as pattern')
if (is.collocations(pattern) || is.collocations(pattern)) {
pattern <- stri_split_charclass(pattern$collocation, "\\p{Z}")
pattern_id <- lapply(pattern, function(x) fastmatch::fmatch(x, types))
pattern_id <- pattern_id[sapply(pattern_id, function(x) all(!is.na(x)))]
} else {
if (is.dictionary(pattern)) {
pattern <- unlist(pattern, use.names = FALSE)
pattern <- split_dictionary_values(pattern, concatenator)
} else {
pattern <- as.list(pattern)
}
if (remove_unigram)
pattern <- pattern[lengths(pattern) > 1] # drop single-word pattern
pattern_id <- regex2id(pattern, types, valuetype, case_insensitive)
}
attr(pattern_id, 'pattern') <- stri_c_list(pattern, sep = ' ')
return(pattern_id)
}
#' internal function for \code{select_types()} to check if a string is a regular expression
#' @param x a character string to be tested
#' @keywords internal
is_regex <- function(x){
any(stri_detect_fixed(x, c(".", "(", ")", "^", "{", "}", "+", "$", "*", "?", "[", "]", "\\")))
}
#' internal function for \code{select_types()} to escape regular expressions
#'
#' This function escapes glob patterns before \code{utils:glob2rx()}, therefore * and ?
#' are unescaped.
#' @param x character vector to be escaped
#' @keywords internal
escape_regex <- function(x){
#stri_replace_all_regex(x, "([.()^\\{\\}+$*\\[\\]\\\\])", "\\\\$1") # escape any
stri_replace_all_regex(x, "([.()^\\{\\}+$\\[\\]\\\\])", "\\\\$1") # allow glob
}
# function to check dots arguments against a list of permissible arguments
check_dots <- function(dots, permissible_args = NULL) {
if (length(dots) == 0) return()
args <- names(dots)
impermissible_args <- setdiff(args, permissible_args)
if (length(impermissible_args))
warning("Argument", if (length(impermissible_args) > 1) "s " else " ",
paste(impermissible_args, collapse = ', '), " not used.",
noBreaks. = TRUE, call. = FALSE)
}
#' Print friendly object class not defined message
#'
#' Checks valid methods and issues a friendlier error message in case the method is
#' undefined for the supplied object type.
#' @param object_class character describing the object class
#' @param function_name character which is the function name
#' @keywords internal
#' @examples
#' # as.tokens.default <- function(x, concatenator = "", ...) {
#' # stop(friendly_class_undefined_message(class(x), "as.tokens"))
#' # }
friendly_class_undefined_message <- function(object_class, function_name) {
valid_object_types <-
utils::methods(function_name) %>%
as.character() %>%
stringi::stri_extract_last_regex("(data\\.){0,1}\\w+$")
valid_object_types <- valid_object_types[valid_object_types != "default"]
paste0(function_name, "() only works on ",
paste(valid_object_types, collapse = ", "),
" objects.")
}
| /R/utils.R | no_license | datascience2017/quanteda | R | false | false | 7,032 | r | #' @importFrom magrittr %>%
#' @export
magrittr::`%>%`
# rdname catm
# messages() with some of the same syntax as cat(): takes a sep argument and
# does not append a newline by default
catm <- function(..., sep = " ", appendLF = FALSE) {
message(paste(..., sep = sep), appendLF = appendLF)
}
# used in displaying verbose messages for tokens_select and dfm_select
message_select <- function(selection, nfeats, ndocs, nfeatspad = 0, ndocspad = 0) {
catm(if (selection == "keep") "kept" else "removed", " ",
format(nfeats, big.mark = ",", scientific = FALSE),
" feature", if (nfeats != 1L) "s" else "", sep = "")
if (ndocs > 0) {
catm(" and ",
format(ndocs, big.mark=",", scientific = FALSE),
" document", if (ndocs != 1L) "s" else "",
sep = "")
}
if ((nfeatspad + ndocspad) > 0) {
catm(", padded ", sep = "")
}
if (nfeatspad > 0) {
catm(format(nfeatspad, big.mark=",", scientific = FALSE),
" feature", if (nfeatspad != 1L) "s" else "",
sep = "")
}
if (ndocspad > 0) {
if (nfeatspad > 0) catm(" and ", sep = "")
catm(format(ndocspad, big.mark=",", scientific = FALSE),
" document", if (ndocspad != 1L) "s" else "",
sep = "")
}
catm("", appendLF = TRUE)
}
##
## reassign the slots to an S4 dfm-like object
## necessary when some operation from the Matrix class obliterates them
## Ken B
reassign_slots <- function(x_new, x_org, exceptions = NULL) {
snames <- slotNames(class(x_org))
snames <- setdiff(snames, c("Dim", "Dimnames", "i", "p", "x", "factors", exceptions))
for (sname in snames) {
try({
slot(x_new, sname) <- slot(x_org, sname)
}, silent = TRUE)
}
x_new
}
#' function extending base::attributes()
#' @param x an object
#' @param overwrite if \code{TRUE}, overwrite old attributes
#' @param value new attributes
#' @keywords internal
"attributes<-" <- function(x, overwrite = TRUE, value) {
if (overwrite) {
base::attributes(x) <- value
} else {
base::attributes(x) <- c(base::attributes(x), value[!(names(value) %in% names(base::attributes(x)))])
}
return(x)
}
#' function to assign multiple slots to a S4 object
#' @param x an S4 object
#' @param exceptions slots to ignore
#' @param value a list of attributes extracted by attributes()
#' @keywords internal
"slots<-" <- function(x, exceptions = c("Dim", "Dimnames", "i", "p", "x", "factors"), value) {
slots <- methods::getSlots(class(x)[1])
for (sname in names(value)) {
if (!sname %in% names(slots) || sname %in% exceptions) next
if (!identical(typeof(value[[sname]]), slots[[sname]])) next
methods::slot(x, sname) <- value[[sname]]
}
return(x)
}
#' utility function to create a object with new set of attributes
#' @param x an underlying R object of a new object
#' @param attrs attributes of a new object
#' @param overwrite_attributes overwrite attributes of the input object, if \code{TRUE}
#' @keywords internal
create <- function(x, what, attrs = NULL, overwrite_attributes = FALSE, ...) {
if (what == 'tokens') {
class <- c('tokens', 'list')
}
x <- structure(x, class = class, ...)
if (!is.null(attrs)) {
attributes(x, overwrite_attributes) <- attrs
}
return(x)
}
#' convert various input as pattern to a vector used in tokens_select,
#' tokens_compound and kwic.
#' @inheritParams pattern
#' @inheritParams valuetype
#' @param case_insensitive ignore the case of dictionary values if \code{TRUE}
#' @param concatenator concatenator that join multi-word expression in tokens object
#' @param remove_unigram ignore single-word patterns if \code{TRUE}
#' @seealso regex2id
#' @keywords internal
pattern2id <- function(pattern, types, valuetype, case_insensitive,
concatenator = '_', remove_unigram = FALSE) {
if (is.dfm(pattern))
stop('dfm cannot be used as pattern')
if (is.collocations(pattern) || is.collocations(pattern)) {
pattern <- stri_split_charclass(pattern$collocation, "\\p{Z}")
pattern_id <- lapply(pattern, function(x) fastmatch::fmatch(x, types))
pattern_id <- pattern_id[sapply(pattern_id, function(x) all(!is.na(x)))]
} else {
if (is.dictionary(pattern)) {
pattern <- unlist(pattern, use.names = FALSE)
pattern <- split_dictionary_values(pattern, concatenator)
} else {
pattern <- as.list(pattern)
}
if (remove_unigram)
pattern <- pattern[lengths(pattern) > 1] # drop single-word pattern
pattern_id <- regex2id(pattern, types, valuetype, case_insensitive)
}
attr(pattern_id, 'pattern') <- stri_c_list(pattern, sep = ' ')
return(pattern_id)
}
#' internal function for \code{select_types()} to check if a string is a regular expression
#' @param x a character string to be tested
#' @keywords internal
is_regex <- function(x){
any(stri_detect_fixed(x, c(".", "(", ")", "^", "{", "}", "+", "$", "*", "?", "[", "]", "\\")))
}
#' internal function for \code{select_types()} to escape regular expressions
#'
#' This function escapes glob patterns before \code{utils:glob2rx()}, therefore * and ?
#' are unescaped.
#' @param x character vector to be escaped
#' @keywords internal
escape_regex <- function(x){
#stri_replace_all_regex(x, "([.()^\\{\\}+$*\\[\\]\\\\])", "\\\\$1") # escape any
stri_replace_all_regex(x, "([.()^\\{\\}+$\\[\\]\\\\])", "\\\\$1") # allow glob
}
# function to check dots arguments against a list of permissible arguments
check_dots <- function(dots, permissible_args = NULL) {
if (length(dots) == 0) return()
args <- names(dots)
impermissible_args <- setdiff(args, permissible_args)
if (length(impermissible_args))
warning("Argument", if (length(impermissible_args) > 1) "s " else " ",
paste(impermissible_args, collapse = ', '), " not used.",
noBreaks. = TRUE, call. = FALSE)
}
#' Print friendly object class not defined message
#'
#' Checks valid methods and issues a friendlier error message in case the method is
#' undefined for the supplied object type.
#' @param object_class character describing the object class
#' @param function_name character which is the function name
#' @keywords internal
#' @examples
#' # as.tokens.default <- function(x, concatenator = "", ...) {
#' # stop(friendly_class_undefined_message(class(x), "as.tokens"))
#' # }
friendly_class_undefined_message <- function(object_class, function_name) {
valid_object_types <-
utils::methods(function_name) %>%
as.character() %>%
stringi::stri_extract_last_regex("(data\\.){0,1}\\w+$")
valid_object_types <- valid_object_types[valid_object_types != "default"]
paste0(function_name, "() only works on ",
paste(valid_object_types, collapse = ", "),
" objects.")
}
|
\name{AddMonths}
\alias{AddMonths}
\title{Add a Month to a Date
}
\description{Clueless adding numbers of months to a date will in some cases lead to invalid dates, think of e.g. 2012-01-30 + 1 month. \cr AddMonths ensures that the result is always a valid date, e.g.
\code{as.Date("2013-01-31") + 1 month} will be \code{"2013-02-28"}. If number \code{n} is negative, the months will be subtracted.
}
\usage{
AddMonths(x, n, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a Date object (or something which can be coerced by \code{\link{as.Date}}(x, ...) to such an object)
to which a number of months has to be added.
%% ~~Describe \code{x} here~~
}
\item{n}{the number of months to be added. If n is negative the months will be subtracted.
%% ~~Describe \code{n} here~~
}
\item{\dots}{the dots are passed to \code{\link{as.Date}}, e.g. for supplying \code{origin}.
%% ~~Describe \code{ceiling} here~~
}
}
\details{All parameters will be recyled if necessary.
}
\value{a vector of class \code{Date} with the same dimension as \code{x}, containing the transformed dates.
}
\references{
Thanks to Antonio: \url{http://stackoverflow.com/questions/14169620/add-a-month-to-a-date}
}
\author{Andri Signorell <andri@signorell.net>, based on code by Roland Rapold and Antonio
}
\seealso{\code{\link{AddMonthsYM}}; Date functions: \code{\link{Year}}, \code{\link{Month}}, etc.
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# characters will be coerced to Date
AddMonths("2013-01-31", 1)
# negative n
AddMonths(as.Date("2013-03-31"), -1)
# Arguments will be recycled
# (with warning if the longer is not a multiple of length of shorter)
AddMonths(c("2013-01-31", "2013-03-31", "2013-10-31", "2013-12-31"), c(1,-1))
x <- as.POSIXct(c("2015-01-31", "2015-08-31"))
n <- c(1, 3)
AddMonths(x, n)
# mind the origin if x supplied as numeric ...
x <- as.numeric(as.Date(x))
AddMonths(x, n, origin=as.Date("1970-01-01"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ chron }
| /man/AddMonths.Rd | no_license | mainwaringb/DescTools | R | false | false | 2,098 | rd | \name{AddMonths}
\alias{AddMonths}
\title{Add a Month to a Date
}
\description{Clueless adding numbers of months to a date will in some cases lead to invalid dates, think of e.g. 2012-01-30 + 1 month. \cr AddMonths ensures that the result is always a valid date, e.g.
\code{as.Date("2013-01-31") + 1 month} will be \code{"2013-02-28"}. If number \code{n} is negative, the months will be subtracted.
}
\usage{
AddMonths(x, n, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{a Date object (or something which can be coerced by \code{\link{as.Date}}(x, ...) to such an object)
to which a number of months has to be added.
%% ~~Describe \code{x} here~~
}
\item{n}{the number of months to be added. If n is negative the months will be subtracted.
%% ~~Describe \code{n} here~~
}
\item{\dots}{the dots are passed to \code{\link{as.Date}}, e.g. for supplying \code{origin}.
%% ~~Describe \code{ceiling} here~~
}
}
\details{All parameters will be recyled if necessary.
}
\value{a vector of class \code{Date} with the same dimension as \code{x}, containing the transformed dates.
}
\references{
Thanks to Antonio: \url{http://stackoverflow.com/questions/14169620/add-a-month-to-a-date}
}
\author{Andri Signorell <andri@signorell.net>, based on code by Roland Rapold and Antonio
}
\seealso{\code{\link{AddMonthsYM}}; Date functions: \code{\link{Year}}, \code{\link{Month}}, etc.
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# characters will be coerced to Date
AddMonths("2013-01-31", 1)
# negative n
AddMonths(as.Date("2013-03-31"), -1)
# Arguments will be recycled
# (with warning if the longer is not a multiple of length of shorter)
AddMonths(c("2013-01-31", "2013-03-31", "2013-10-31", "2013-12-31"), c(1,-1))
x <- as.POSIXct(c("2015-01-31", "2015-08-31"))
n <- c(1, 3)
AddMonths(x, n)
# mind the origin if x supplied as numeric ...
x <- as.numeric(as.Date(x))
AddMonths(x, n, origin=as.Date("1970-01-01"))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ chron }
|
#' Plotting estimated locations
#'
#' Plots estimated densities of animal locations, which are latent
#' variables in SECR models.
#'
#' @param fit A fitted model from \link{fit.ascr}.
#' @param id A numeric vector with row numbers from
#' \code{fit$args$capt}, indicating which individuals' locations
#' are to be plotted.
#' @param infotypes A character vector indicating the type(s) of
#' information to be used when plotting the estimated density of
#' location. Elements can be a subset of \code{"capt"},
#' \code{"bearing"}, \code{"dist"}, \code{"ss"}, \code{"toa"},
#' \code{"combined"}, and \code{"all"}, where \code{"capt"} shows
#' estimated location only using detection locations,
#' \code{"combined"} combines all information types together, and
#' \code{"all"} plots all possible contour types. When signal
#' strength information is used in the model fit, \code{"capt"}
#' and \code{"ss"} are equivalent as the signal strength
#' information is built into the detection function. By default,
#' only the most informative contour is plotted, i.e.,
#' \code{"capt"} if the model was fitted with no additional
#' information, and \code{"combined"} otherwise.
#' @param combine Logical, if \code{TRUE} then the information types
#' specified in \code{infotypes} are combined into a single
#' contour. If \code{FALSE} then separate contours are plotted for
#' each information type.
#' @param xlim A numeric vector of length 2, giving the x coordinate
#' range.
#' @param ylim A numeric vector of length 2, giving the y coordinate
#' range.
#' @param mask A matrix with two columns. Each row provides Cartesian
#' coordinates for the location of a mask point. The function
#' \link[ascr]{create.mask} will return a suitable object. The
#' mask used to fit the model \code{fit} will be used by default;
#' this argument is usually used when estimated location contours
#' need to be plotted to a higher resolution than this.
#' @param levels A numeric vector giving the values to be associated
#' with the plotted contours.
#' @param nlevels The number of contour levels desired. Ignored if
#' \code{levels} is provided.
#' @param density Logical, if \code{TRUE}, the labels on contours (and
#' the levels specified by \code{levels}) refer to the density of
#' the estimated distribution of the individual's location. If
#' \code{FALSE}, the labels on contours (and the levels specified
#' by \code{levels}) refer to the probability of the individual
#' being located within the associated contour under the estimated
#' distribution of the individual's location.
#' @param cols A list with named components corresponding to each
#' contour type (i.e., a subset of \code{"capt"},
#' \code{"bearing"}, \code{"dist"}, \code{"toa"}, and
#' \code{"combined"}). Each component provides the colour of the
#' associated contour type (e.g., using a character string such as
#' \code{"red"}, or a call to the function
#' \link[grDevices]{rgb}). By default, if only one contour is to
#' be plotted, it will be plotted in black. Alternatively, a
#' vector with a single element, specifying the colour for all
#' contours.
#' @param ltys The line type of the contours, with the same required
#' syntax as \code{cols}; see \link{par}.
#' @param trap.col The colour of the points representing detector
#' locations.
#' @param circle.traps Logical, if \code{TRUE} circles are plotted
#' around traps that made a detection of the individual in
#' question.
#' @param show.labels Logical, if \code{TRUE}, contours are labelled
#' with the appropriate probability density (if \code{density} is
#' \code{TRUE}), or the corresponding probability of the
#' individual being within the associated contour, under the
#' estimated density (if \code{density} is \code{FALSE}).
#' @param plot.contours Logical, if \code{TRUE}, contours are
#' plotted. Note that, if \code{FALSE}, nothing corresponding to
#' the density of the individuals' locations is plotted unless
#' \code{plot.estlocs} is \code{TRUE}.
#' @param plot.estlocs Logical, if \code{TRUE}, dots are plotted at
#' the mode of the combined densities. If a density has more than
#' a single mode (and the modes have the same density value) then
#' a dot will be plotted for each.
#' @param keep.estlocs Logical, if \code{TRUE}, the locations of the
#' estimated locations are returned.
#' @param plot.arrows Logical, if \code{TRUE}, arrows indicating the
#' estimated bearing to the individual are plotted from detectors
#' at which detections were made.
#' @param plot.circles Logical, if \code{TRUE}, circles indicating the
#' estimated distance to the individual are plotted around
#' detectors at which detections were made.
#' @param arrow.length Numeric, providing the length of the arrows
#' (only used if \code{plot.arrows} is \code{TRUE}).
#' @param show.legend Logical, if \code{TRUE}, a legend will be added
#' to the plot.
#' @param show.axes Logical, if \code{TRUE}, axes will be added to the
#' plot.
#' @param add Logical, if \code{TRUE}, contours will be added to an
#' existing plot.
#'
#' @examples
#' locations(example$fits$simple.hn, 1)
#' locations(example$fits$simple.hn, 1, levels = c(0.50, 0.90, 0.95))
#' \dontrun{
#' fine.mask <- create.mask(example$traps, 20, spacing = 0.2)
#' locations(example$fits$bearing.hn, 1, infotypes = "all", mask = fine.mask)
#' }
#'
#' @export
locations <- function(fit, id, infotypes = NULL, combine = FALSE,
xlim = range(mask[, 1]),
ylim = range(mask[, 2]), mask = get.mask(fit),
levels = NULL, nlevels = 10, density = FALSE,
cols = list(combined = "black", capt = "purple",
ss = "orange", bearing = "green", dist = "brown", toa = "blue"),
ltys = list(combined = "solid", capt = "solid",
ss = "solid", bearing = "solid", dist = "solid", toa = "solid"),
trap.col = "red", circle.traps = TRUE,
show.labels = TRUE, plot.contours = TRUE,
plot.estlocs = FALSE,
keep.estlocs = FALSE,
plot.arrows = "bearing" %in% fit$infotypes,
plot.circles = "dist" %in% fit$infotypes & !("bearing" %in% fit$infotypes),
arrow.length = NULL,
show.legend = FALSE, show.axes = TRUE, add = FALSE){
## Error for locations() with a directional model.
if (!is.null(fit$args$ss.opts$directional)){
if (fit$args$ss.opts$directional){
stop("The locations() function has not yet been implemented for directional model fits.")
}
}
if (!is.null(fit$args$ss.opts$het.source)){
if (fit$args$ss.opts$het.source){
stop("The locations() function has not yet been implemented for heterogeneous source strength model fits.")
}
}
if (fit$first.calls){
stop("The locations() function has not yet been implemented for first-call models.")
}
## Error if combine specified without infotypes.
if (missing(infotypes) & combine){
stop("Argument `combine' is only useful if `infotypes' is provided.")
}
## Saving estimated locations.
if (keep.estlocs){
estlocs <- matrix(0, nrow = length(id), ncol = 2)
j <- 1
}
## Setting up plotting area.
if (!add){
plot.new()
plot.window(xlim = xlim, ylim = ylim, asp = 1)
box()
if (show.axes){
axis(1)
axis(2)
}
}
## Ignoring 'nlevels' if 'levels' is provided.
if (!is.null(levels)){
nlevels <- length(levels)
}
## Logical value for whether or not any additional information was
## used in model fit.
any.infotypes <- length(fit$infotypes) > 0
## Setting default infotypes.
if (is.null(infotypes)){
if (any.infotypes){
infotypes <- "combined"
} else {
infotypes <- "capt"
}
}
## Error if "combined" is used when there is no additional information.
if ("combined" %in% infotypes & !any.infotypes){
stop("No additional information used in model 'fit', so a \"combined\" contour cannot be plotted.")
}
## Working out which contours to plot.
if ("all" %in% infotypes){
infotypes <- c(fit$infotypes, "capt", "combined"[any.infotypes])
}
## If "ss" is an infotype, set to "capt". OR NOT. <<- Wait what the hell is going on in this comment.
##infotypes[infotypes == "ss"] <- "capt"
infotypes <- unique(infotypes)
## Setting colour to "black" if there is only one contour to be plotted.
if (missing(cols)){
if (length(infotypes) == 1){
cols <- "black"
}
}
if (missing(ltys)){
if (length(infotypes) == 1){
ltys <- "solid"
}
}
if (length(cols) == 1){
cols.save <- cols
cols <- vector(mode = "list", length = length(infotypes))
names(cols) <- infotypes
cols[infotypes] <- cols.save
}
if (length(ltys) == 1){
ltys.save <- ltys
ltys <- vector(mode = "list", length = length(infotypes))
names(ltys) <- infotypes
ltys[infotypes] <- ltys.save
if (combine){
ltys["combined"] <- ltys.save
}
}
plot.types <- c("combined", "capt", "ss", "bearing", "dist", "toa") %in% infotypes
names(plot.types) <- c("combined", "capt", "ss", "bearing", "dist", "toa")
if (combine){
plot.types["combined"] <- TRUE
}
## Setting all to TRUE if combined is used.
## Some error catching.
for (i in c("bearing", "dist", "toa")){
if (plot.types[i] & !fit$fit.types[i]){
msg <- paste("Contours for information type '", i, "' cannot be plotted as this information was not used in the model 'fit'", sep = "")
warning(msg)
plot.types[i] <- FALSE
}
}
traps <- get.traps(fit)
detfn <- fit$args$detfn
ss.link <- fit$args$ss.opts$ss.link
dists <- distances(traps, mask)
## Calculating density due to animal locations.
p.det <- p.dot(fit = fit, points = mask)
## Divide by normalising constant; not conversion to square metres.
a <- attr(mask, "area")
f.x <- p.det/(a*sum(p.det))
## Calculating conditional density of capture history, given location.
for (i in id){
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["capt"])){
f.combined <- f.x
} else {
f.combined <- 0*f.x + 1
}
}
capt <- fit$args$capt$bincapt[i, ]
## Contour due to capture history.
if (plot.types["capt"] | plot.types["combined"] | plot.types["ss"]){
det.pars <- get.par(fit, fit$detpars, as.list = TRUE)
if (fit$fit.types["ss"]){
det.pars$cutoff <- fit$args$ss.opts$cutoff
}
det.probs <- calc.detfn(dists, detfn, det.pars, ss.link)
f.capt <- colProds(det.probs*capt + (1 - det.probs)*(1 - capt))
if (plot.types["capt"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.capt, levels = levels,
nlevels = nlevels, prob = !density, col = cols$capt,
lty = ltys$capt, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (fit$fit.types["ss"]){
f.ss.capt <- ss.density(fit, i, mask, dists)
f.ss <- f.ss.capt/f.capt
## Such a hack, but this keeps f.combined correct,
## below.
f.capt <- f.ss.capt
## Ughhhh sorry about this one.
f.ss[f.ss == Inf] <- 0
if (plot.types["ss"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.ss, levels = levels,
nlevels = nlevels, prob = !density, col = cols$ss,
lty = ltys$ss, show.labels = show.labels,
plot.contours = plot.contours)
}
}
} else {
fit.ss <- f.capt*0 + 1
}
## This shit is such a mess, sorry if you have to work out
## how this works later.
if (plot.types["combined"] & !combine){
f.combined <- f.combined*f.capt
} else if (plot.types["combined"] & combine){
f.true.capt <- f.capt/f.ss
f.true.capt[f.ss == 0] <- 0
if (plot.types["capt"] & plot.types["ss"]){
f.combined <- f.combined*f.capt
} else if (plot.types["capt"] & !plot.types["ss"]){
f.combined <- f.combined*f.true.capt
} else if (!plot.types["capt"] & plot.types["ss"]){
f.combined <- f.combined*f.ss
}
}
}
## Contour due to estimated bearings.
if (plot.types["bearing"] | plot.types["combined"] & fit$fit.types["bearing"]){
f.bearing <- bearing.density(fit, i, mask)
if (plot.types["bearing"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.bearing, levels = levels,
nlevels = nlevels, prob = !density, col = cols$bearing,
lty = ltys$bearing, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["bearing"])){
f.combined <- f.combined*f.bearing
}
}
}
## Contour due to estimated distances.
if (plot.types["dist"] | plot.types["combined"] & fit$fit.types["dist"]){
f.dist <- dist.density(fit, i, mask, dists)
if (plot.types["dist"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.dist, levels = levels,
nlevels = nlevels, prob = !density, col = cols$dist,
lty = ltys$dist, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["dist"])){
f.combined <- f.combined*f.dist
}
}
if (plot.circles){
show.circles(fit, i, trap.col)
}
}
## Contour due to measured times of arrival.
if (plot.types["toa"] | plot.types["combined"] &
fit$fit.types["toa"] & sum(capt) > 1){
f.toa <- toa.density(fit, i, mask, dists)
if (plot.types["toa"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.toa, levels = levels,
nlevels = nlevels, prob = !density, col = cols$toa,
lty = ltys$toa, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["toa"])){
f.combined <- f.combined*f.toa
}
}
}
## Combined contour.
if (plot.types["combined"]){
show.contour(mask = mask, dens = f.combined, levels = levels,
nlevels = nlevels, prob = !density, col = cols$combined,
lty = ltys$combined, show.labels = show.labels,
plot.contours = plot.contours)
}
if (plot.estlocs){
if ((any.infotypes & plot.types["combined"]) | !any.infotypes){
f.estlocs <- if (any.infotypes) f.combined else f.capt*f.x
mode.points <- which(f.estlocs == max(f.estlocs))
points(mask[mode.points, 1], mask[mode.points, 2],
pch = 16, col = "black")
if (keep.estlocs){
estlocs[j, ] <- c(mask[mode.points, 1], mask[mode.points, 2])
j <- j + 1
}
}
}
}
## Plotting traps, and circles around them.
if (!add){
points(traps, col = trap.col, pch = 4, lwd = 2)
if (circle.traps){
if (length(id) == 1){
points(traps[capt == 1, , drop = FALSE], col = trap.col, cex = 2, lwd = 2)
}
}
}
## Plotting arrows for estimated bearings.
if (fit$fit.types["bearing"]){
if (plot.arrows){
if (fit$fit.types["dist"]){
arrow.length <- fit$args$capt$dist[i, capt == 1]
}
show.arrows(fit, i, arrow.length, trap.col)
}
}
## Making legend.
if (show.legend){
legend.labels <- infotypes
legend.cols <- c(cols[infotypes], recursive = TRUE)
legend.ltys <- c(ltys[infotypes], recursive = TRUE)
legend("topright", legend = infotypes, lty = legend.ltys, col = legend.cols, bg = "white")
}
if (keep.estlocs){
out <- list(estlocs = estlocs)
} else {
out <- invisible(TRUE)
}
out
}
## Helper to get stuff in the right form for contour().
show.contour <- function(mask, dens, nlevels, levels, prob, col = "black", lty = 1, show.labels, plot.contours){
if (plot.contours){
## Divide densities by normalising constant before plotting.
a <- attr(mask, "a")*10000
## Note conversion of area to square metres.
dens <- dens/(a*sum(dens))
unique.x <- sort(unique(mask[, 1]))
unique.y <- sort(unique(mask[, 2]))
z <- matrix(NA, nrow = length(unique.x), ncol = length(unique.y))
n.mask <- nrow(mask)
for (i in 1:n.mask){
x <- mask[i, 1]
y <- mask[i, 2]
index.x <- which(x == unique.x)
index.y <- which(y == unique.y)
z[index.x, index.y] <- dens[i]
}
## Sorting out levels.
if (is.null(levels)){
levels <- pretty(range(z, finite = TRUE), nlevels)
} else {
if (prob){
z.sort <- sort(z, decreasing = TRUE)
probs.sort <- cumsum(z.sort)/sum(z.sort)
prob.levels <- levels
levels <- numeric(nlevels)
for (i in 1:nlevels){
levels[i] <- z.sort[which(abs(probs.sort - prob.levels[i]) ==
min(abs(probs.sort - prob.levels[i])))[1]]
}
}
}
if (prob){
labels <- character(nlevels)
for (i in 1:nlevels){
labels[i] <- format(round(sum(z[z > levels[i]], na.rm = TRUE)/
sum(z, na.rm = TRUE), 2), nsmall = 2)
}
} else {
labels <- NULL
}
contour(x = unique.x, y = unique.y, z = z, levels = levels, labels = labels,
col = col, lty = lty, drawlabels = show.labels, add = TRUE)
}
}
## Calculating density due to estimated bearings.
bearing.density <- function(fit, id, mask){
capt <- fit$args$capt$bincapt[id, ]
bearing.capt <- fit$args$capt$bearing[id, capt == 1]
kappa <- get.par(fit, "kappa")
mask.bearings <- bearings(get.traps(fit)[capt == 1, , drop = FALSE], mask)
mask.dens <- matrix(0, nrow = sum(capt), ncol = nrow(mask))
for (i in 1:sum(capt)){
mask.dens[i, ] <- dvm(bearing.capt[i], mu = mask.bearings[i, ], kappa = kappa)
}
## Returning densities.
colProds(mask.dens)
}
## Calculating density due to estimated distances.
dist.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
dists <- dists[capt == 1, , drop = FALSE]
dist.capt <- fit$args$capt$dist[id, capt == 1]
alpha <- get.par(fit, "alpha")
mask.dens <- matrix(0, nrow = sum(capt), ncol = nrow(mask))
betas <- alpha/dists
for (i in 1:sum(capt)){
mask.dens[i, ] <- dgamma(dist.capt[i], shape = alpha, rate = betas[i, ])
}
## Returning densities.
colProds(mask.dens)
}
ss.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
ss.capt <- fit$args$capt$ss[id, ]
det.pars <- get.par(fit, fit$detpars, cutoff = TRUE, as.list = TRUE)
detfn <- fit$args$detfn
ss.link <- fit$args$ss.opts$ss.link
n.traps <- nrow(get.traps(fit))
mask.dens <- matrix(0, nrow = n.traps, ncol = nrow(mask))
for (i in 1:n.traps){
if (capt[i] == 0){
mask.dens[i, ] <- 1 - calc.detfn(dists[i, ], detfn, det.pars, ss.link)
} else if (capt[i] == 1){
mu.ss <- det.pars[["b0.ss"]] - det.pars[["b1.ss"]]*dists[i, ]
mask.dens[i, ] <- dnorm(ss.capt[i], mu.ss, det.pars[["sigma.ss"]])
} else {
stop("The binary capture history must only contain 0s and 1s.")
}
}
colProds(mask.dens)
}
toa.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
dists <- dists[capt == 1, ]
toa.capt <- fit$args$capt$toa[id, capt == 1]
sigma.toa <- get.par(fit, "sigma.toa")
prod.times <- toa.capt - dists/fit$args$sound.speed
toa.ssq <- aaply(prod.times, 2, function(x) sum((x - mean(x))^2))
out <- (2*pi*sigma.toa^2)^((1 - sum(capt))/2)*
exp(toa.ssq/(-2*sigma.toa^2))
}
## Plots arrows on traps where a detection was made, showing estimated bearing.
show.arrows <- function(fit, id, arrow.length = NULL, trap.col){
xlim <- par("usr")[c(1, 2)]
ylim <- par("usr")[c(3, 4)]
if (is.null(arrow.length)){
arrow.length <- 0.05*min(c(diff(range(xlim)), diff(range(ylim))))
}
capt <- fit$args$capt$bincapt[id, ]
bearing.capt <- fit$args$capt$bearing[id, capt == 1]
trappos <- get.traps(fit)[which(capt == 1), , drop = FALSE]
sinb <- sin(bearing.capt)*arrow.length
cosb <- cos(bearing.capt)*arrow.length
arrows(trappos[, 1], trappos[, 2], trappos[, 1] + sinb, trappos[, 2] + cosb,
length = 0.1, col = trap.col, lwd = 2)
}
## Plots circles around traps where a detection was made, showing estimated distance.
show.circles <- function(fit, id, trap.col){
capt <- fit$args$capt$bincapt[id, ]
dist.capt <- fit$args$capt$dist[id, capt == 1]
trappos <- get.traps(fit)[which(capt == 1), , drop = FALSE]
for (i in 1:nrow(trappos)){
centre <- trappos[i, ]
radius <- dist.capt[i]
circles(centre, radius, col = trap.col, lwd = 2)
}
}
circles <- function(centre, radius, ...){
bearings <- seq(0, 2*pi, length.out = 100)
xs <- centre[1] + sin(bearings)*radius
ys <- centre[2] + cos(bearings)*radius
lines(xs, ys, ...)
}
| /R/locations.r | no_license | cmjt/ascr | R | false | false | 23,455 | r | #' Plotting estimated locations
#'
#' Plots estimated densities of animal locations, which are latent
#' variables in SECR models.
#'
#' @param fit A fitted model from \link{fit.ascr}.
#' @param id A numeric vector with row numbers from
#' \code{fit$args$capt}, indicating which individuals' locations
#' are to be plotted.
#' @param infotypes A character vector indicating the type(s) of
#' information to be used when plotting the estimated density of
#' location. Elements can be a subset of \code{"capt"},
#' \code{"bearing"}, \code{"dist"}, \code{"ss"}, \code{"toa"},
#' \code{"combined"}, and \code{"all"}, where \code{"capt"} shows
#' estimated location only using detection locations,
#' \code{"combined"} combines all information types together, and
#' \code{"all"} plots all possible contour types. When signal
#' strength information is used in the model fit, \code{"capt"}
#' and \code{"ss"} are equivalent as the signal strength
#' information is built into the detection function. By default,
#' only the most informative contour is plotted, i.e.,
#' \code{"capt"} if the model was fitted with no additional
#' information, and \code{"combined"} otherwise.
#' @param combine Logical, if \code{TRUE} then the information types
#' specified in \code{infotypes} are combined into a single
#' contour. If \code{FALSE} then separate contours are plotted for
#' each information type.
#' @param xlim A numeric vector of length 2, giving the x coordinate
#' range.
#' @param ylim A numeric vector of length 2, giving the y coordinate
#' range.
#' @param mask A matrix with two columns. Each row provides Cartesian
#' coordinates for the location of a mask point. The function
#' \link[ascr]{create.mask} will return a suitable object. The
#' mask used to fit the model \code{fit} will be used by default;
#' this argument is usually used when estimated location contours
#' need to be plotted to a higher resolution than this.
#' @param levels A numeric vector giving the values to be associated
#' with the plotted contours.
#' @param nlevels The number of contour levels desired. Ignored if
#' \code{levels} is provided.
#' @param density Logical, if \code{TRUE}, the labels on contours (and
#' the levels specified by \code{levels}) refer to the density of
#' the estimated distribution of the individual's location. If
#' \code{FALSE}, the labels on contours (and the levels specified
#' by \code{levels}) refer to the probability of the individual
#' being located within the associated contour under the estimated
#' distribution of the individual's location.
#' @param cols A list with named components corresponding to each
#' contour type (i.e., a subset of \code{"capt"},
#' \code{"bearing"}, \code{"dist"}, \code{"toa"}, and
#' \code{"combined"}). Each component provides the colour of the
#' associated contour type (e.g., using a character string such as
#' \code{"red"}, or a call to the function
#' \link[grDevices]{rgb}). By default, if only one contour is to
#' be plotted, it will be plotted in black. Alternatively, a
#' vector with a single element, specifying the colour for all
#' contours.
#' @param ltys The line type of the contours, with the same required
#' syntax as \code{cols}; see \link{par}.
#' @param trap.col The colour of the points representing detector
#' locations.
#' @param circle.traps Logical, if \code{TRUE} circles are plotted
#' around traps that made a detection of the individual in
#' question.
#' @param show.labels Logical, if \code{TRUE}, contours are labelled
#' with the appropriate probability density (if \code{density} is
#' \code{TRUE}), or the corresponding probability of the
#' individual being within the associated contour, under the
#' estimated density (if \code{density} is \code{FALSE}).
#' @param plot.contours Logical, if \code{TRUE}, contours are
#' plotted. Note that, if \code{FALSE}, nothing corresponding to
#' the density of the individuals' locations is plotted unless
#' \code{plot.estlocs} is \code{TRUE}.
#' @param plot.estlocs Logical, if \code{TRUE}, dots are plotted at
#' the mode of the combined densities. If a density has more than
#' a single mode (and the modes have the same density value) then
#' a dot will be plotted for each.
#' @param keep.estlocs Logical, if \code{TRUE}, the locations of the
#' estimated locations are returned.
#' @param plot.arrows Logical, if \code{TRUE}, arrows indicating the
#' estimated bearing to the individual are plotted from detectors
#' at which detections were made.
#' @param plot.circles Logical, if \code{TRUE}, circles indicating the
#' estimated distance to the individual are plotted around
#' detectors at which detections were made.
#' @param arrow.length Numeric, providing the length of the arrows
#' (only used if \code{plot.arrows} is \code{TRUE}).
#' @param show.legend Logical, if \code{TRUE}, a legend will be added
#' to the plot.
#' @param show.axes Logical, if \code{TRUE}, axes will be added to the
#' plot.
#' @param add Logical, if \code{TRUE}, contours will be added to an
#' existing plot.
#'
#' @examples
#' locations(example$fits$simple.hn, 1)
#' locations(example$fits$simple.hn, 1, levels = c(0.50, 0.90, 0.95))
#' \dontrun{
#' fine.mask <- create.mask(example$traps, 20, spacing = 0.2)
#' locations(example$fits$bearing.hn, 1, infotypes = "all", mask = fine.mask)
#' }
#'
#' @export
locations <- function(fit, id, infotypes = NULL, combine = FALSE,
xlim = range(mask[, 1]),
ylim = range(mask[, 2]), mask = get.mask(fit),
levels = NULL, nlevels = 10, density = FALSE,
cols = list(combined = "black", capt = "purple",
ss = "orange", bearing = "green", dist = "brown", toa = "blue"),
ltys = list(combined = "solid", capt = "solid",
ss = "solid", bearing = "solid", dist = "solid", toa = "solid"),
trap.col = "red", circle.traps = TRUE,
show.labels = TRUE, plot.contours = TRUE,
plot.estlocs = FALSE,
keep.estlocs = FALSE,
plot.arrows = "bearing" %in% fit$infotypes,
plot.circles = "dist" %in% fit$infotypes & !("bearing" %in% fit$infotypes),
arrow.length = NULL,
show.legend = FALSE, show.axes = TRUE, add = FALSE){
## Error for locations() with a directional model.
if (!is.null(fit$args$ss.opts$directional)){
if (fit$args$ss.opts$directional){
stop("The locations() function has not yet been implemented for directional model fits.")
}
}
if (!is.null(fit$args$ss.opts$het.source)){
if (fit$args$ss.opts$het.source){
stop("The locations() function has not yet been implemented for heterogeneous source strength model fits.")
}
}
if (fit$first.calls){
stop("The locations() function has not yet been implemented for first-call models.")
}
## Error if combine specified without infotypes.
if (missing(infotypes) & combine){
stop("Argument `combine' is only useful if `infotypes' is provided.")
}
## Saving estimated locations.
if (keep.estlocs){
estlocs <- matrix(0, nrow = length(id), ncol = 2)
j <- 1
}
## Setting up plotting area.
if (!add){
plot.new()
plot.window(xlim = xlim, ylim = ylim, asp = 1)
box()
if (show.axes){
axis(1)
axis(2)
}
}
## Ignoring 'nlevels' if 'levels' is provided.
if (!is.null(levels)){
nlevels <- length(levels)
}
## Logical value for whether or not any additional information was
## used in model fit.
any.infotypes <- length(fit$infotypes) > 0
## Setting default infotypes.
if (is.null(infotypes)){
if (any.infotypes){
infotypes <- "combined"
} else {
infotypes <- "capt"
}
}
## Error if "combined" is used when there is no additional information.
if ("combined" %in% infotypes & !any.infotypes){
stop("No additional information used in model 'fit', so a \"combined\" contour cannot be plotted.")
}
## Working out which contours to plot.
if ("all" %in% infotypes){
infotypes <- c(fit$infotypes, "capt", "combined"[any.infotypes])
}
## If "ss" is an infotype, set to "capt". OR NOT. <<- Wait what the hell is going on in this comment.
##infotypes[infotypes == "ss"] <- "capt"
infotypes <- unique(infotypes)
## Setting colour to "black" if there is only one contour to be plotted.
if (missing(cols)){
if (length(infotypes) == 1){
cols <- "black"
}
}
if (missing(ltys)){
if (length(infotypes) == 1){
ltys <- "solid"
}
}
if (length(cols) == 1){
cols.save <- cols
cols <- vector(mode = "list", length = length(infotypes))
names(cols) <- infotypes
cols[infotypes] <- cols.save
}
if (length(ltys) == 1){
ltys.save <- ltys
ltys <- vector(mode = "list", length = length(infotypes))
names(ltys) <- infotypes
ltys[infotypes] <- ltys.save
if (combine){
ltys["combined"] <- ltys.save
}
}
plot.types <- c("combined", "capt", "ss", "bearing", "dist", "toa") %in% infotypes
names(plot.types) <- c("combined", "capt", "ss", "bearing", "dist", "toa")
if (combine){
plot.types["combined"] <- TRUE
}
## Setting all to TRUE if combined is used.
## Some error catching.
for (i in c("bearing", "dist", "toa")){
if (plot.types[i] & !fit$fit.types[i]){
msg <- paste("Contours for information type '", i, "' cannot be plotted as this information was not used in the model 'fit'", sep = "")
warning(msg)
plot.types[i] <- FALSE
}
}
traps <- get.traps(fit)
detfn <- fit$args$detfn
ss.link <- fit$args$ss.opts$ss.link
dists <- distances(traps, mask)
## Calculating density due to animal locations.
p.det <- p.dot(fit = fit, points = mask)
## Divide by normalising constant; not conversion to square metres.
a <- attr(mask, "area")
f.x <- p.det/(a*sum(p.det))
## Calculating conditional density of capture history, given location.
for (i in id){
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["capt"])){
f.combined <- f.x
} else {
f.combined <- 0*f.x + 1
}
}
capt <- fit$args$capt$bincapt[i, ]
## Contour due to capture history.
if (plot.types["capt"] | plot.types["combined"] | plot.types["ss"]){
det.pars <- get.par(fit, fit$detpars, as.list = TRUE)
if (fit$fit.types["ss"]){
det.pars$cutoff <- fit$args$ss.opts$cutoff
}
det.probs <- calc.detfn(dists, detfn, det.pars, ss.link)
f.capt <- colProds(det.probs*capt + (1 - det.probs)*(1 - capt))
if (plot.types["capt"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.capt, levels = levels,
nlevels = nlevels, prob = !density, col = cols$capt,
lty = ltys$capt, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (fit$fit.types["ss"]){
f.ss.capt <- ss.density(fit, i, mask, dists)
f.ss <- f.ss.capt/f.capt
## Such a hack, but this keeps f.combined correct,
## below.
f.capt <- f.ss.capt
## Ughhhh sorry about this one.
f.ss[f.ss == Inf] <- 0
if (plot.types["ss"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.ss, levels = levels,
nlevels = nlevels, prob = !density, col = cols$ss,
lty = ltys$ss, show.labels = show.labels,
plot.contours = plot.contours)
}
}
} else {
fit.ss <- f.capt*0 + 1
}
## This shit is such a mess, sorry if you have to work out
## how this works later.
if (plot.types["combined"] & !combine){
f.combined <- f.combined*f.capt
} else if (plot.types["combined"] & combine){
f.true.capt <- f.capt/f.ss
f.true.capt[f.ss == 0] <- 0
if (plot.types["capt"] & plot.types["ss"]){
f.combined <- f.combined*f.capt
} else if (plot.types["capt"] & !plot.types["ss"]){
f.combined <- f.combined*f.true.capt
} else if (!plot.types["capt"] & plot.types["ss"]){
f.combined <- f.combined*f.ss
}
}
}
## Contour due to estimated bearings.
if (plot.types["bearing"] | plot.types["combined"] & fit$fit.types["bearing"]){
f.bearing <- bearing.density(fit, i, mask)
if (plot.types["bearing"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.bearing, levels = levels,
nlevels = nlevels, prob = !density, col = cols$bearing,
lty = ltys$bearing, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["bearing"])){
f.combined <- f.combined*f.bearing
}
}
}
## Contour due to estimated distances.
if (plot.types["dist"] | plot.types["combined"] & fit$fit.types["dist"]){
f.dist <- dist.density(fit, i, mask, dists)
if (plot.types["dist"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.dist, levels = levels,
nlevels = nlevels, prob = !density, col = cols$dist,
lty = ltys$dist, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["dist"])){
f.combined <- f.combined*f.dist
}
}
if (plot.circles){
show.circles(fit, i, trap.col)
}
}
## Contour due to measured times of arrival.
if (plot.types["toa"] | plot.types["combined"] &
fit$fit.types["toa"] & sum(capt) > 1){
f.toa <- toa.density(fit, i, mask, dists)
if (plot.types["toa"]){
if (!combine){
show.contour(mask = mask, dens = f.x*f.toa, levels = levels,
nlevels = nlevels, prob = !density, col = cols$toa,
lty = ltys$toa, show.labels = show.labels,
plot.contours = plot.contours)
}
}
if (plot.types["combined"]){
if ((!combine) | (combine & plot.types["toa"])){
f.combined <- f.combined*f.toa
}
}
}
## Combined contour.
if (plot.types["combined"]){
show.contour(mask = mask, dens = f.combined, levels = levels,
nlevels = nlevels, prob = !density, col = cols$combined,
lty = ltys$combined, show.labels = show.labels,
plot.contours = plot.contours)
}
if (plot.estlocs){
if ((any.infotypes & plot.types["combined"]) | !any.infotypes){
f.estlocs <- if (any.infotypes) f.combined else f.capt*f.x
mode.points <- which(f.estlocs == max(f.estlocs))
points(mask[mode.points, 1], mask[mode.points, 2],
pch = 16, col = "black")
if (keep.estlocs){
estlocs[j, ] <- c(mask[mode.points, 1], mask[mode.points, 2])
j <- j + 1
}
}
}
}
## Plotting traps, and circles around them.
if (!add){
points(traps, col = trap.col, pch = 4, lwd = 2)
if (circle.traps){
if (length(id) == 1){
points(traps[capt == 1, , drop = FALSE], col = trap.col, cex = 2, lwd = 2)
}
}
}
## Plotting arrows for estimated bearings.
if (fit$fit.types["bearing"]){
if (plot.arrows){
if (fit$fit.types["dist"]){
arrow.length <- fit$args$capt$dist[i, capt == 1]
}
show.arrows(fit, i, arrow.length, trap.col)
}
}
## Making legend.
if (show.legend){
legend.labels <- infotypes
legend.cols <- c(cols[infotypes], recursive = TRUE)
legend.ltys <- c(ltys[infotypes], recursive = TRUE)
legend("topright", legend = infotypes, lty = legend.ltys, col = legend.cols, bg = "white")
}
if (keep.estlocs){
out <- list(estlocs = estlocs)
} else {
out <- invisible(TRUE)
}
out
}
## Helper to get stuff in the right form for contour().
show.contour <- function(mask, dens, nlevels, levels, prob, col = "black", lty = 1, show.labels, plot.contours){
if (plot.contours){
## Divide densities by normalising constant before plotting.
a <- attr(mask, "a")*10000
## Note conversion of area to square metres.
dens <- dens/(a*sum(dens))
unique.x <- sort(unique(mask[, 1]))
unique.y <- sort(unique(mask[, 2]))
z <- matrix(NA, nrow = length(unique.x), ncol = length(unique.y))
n.mask <- nrow(mask)
for (i in 1:n.mask){
x <- mask[i, 1]
y <- mask[i, 2]
index.x <- which(x == unique.x)
index.y <- which(y == unique.y)
z[index.x, index.y] <- dens[i]
}
## Sorting out levels.
if (is.null(levels)){
levels <- pretty(range(z, finite = TRUE), nlevels)
} else {
if (prob){
z.sort <- sort(z, decreasing = TRUE)
probs.sort <- cumsum(z.sort)/sum(z.sort)
prob.levels <- levels
levels <- numeric(nlevels)
for (i in 1:nlevels){
levels[i] <- z.sort[which(abs(probs.sort - prob.levels[i]) ==
min(abs(probs.sort - prob.levels[i])))[1]]
}
}
}
if (prob){
labels <- character(nlevels)
for (i in 1:nlevels){
labels[i] <- format(round(sum(z[z > levels[i]], na.rm = TRUE)/
sum(z, na.rm = TRUE), 2), nsmall = 2)
}
} else {
labels <- NULL
}
contour(x = unique.x, y = unique.y, z = z, levels = levels, labels = labels,
col = col, lty = lty, drawlabels = show.labels, add = TRUE)
}
}
## Calculating density due to estimated bearings.
bearing.density <- function(fit, id, mask){
capt <- fit$args$capt$bincapt[id, ]
bearing.capt <- fit$args$capt$bearing[id, capt == 1]
kappa <- get.par(fit, "kappa")
mask.bearings <- bearings(get.traps(fit)[capt == 1, , drop = FALSE], mask)
mask.dens <- matrix(0, nrow = sum(capt), ncol = nrow(mask))
for (i in 1:sum(capt)){
mask.dens[i, ] <- dvm(bearing.capt[i], mu = mask.bearings[i, ], kappa = kappa)
}
## Returning densities.
colProds(mask.dens)
}
## Calculating density due to estimated distances.
dist.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
dists <- dists[capt == 1, , drop = FALSE]
dist.capt <- fit$args$capt$dist[id, capt == 1]
alpha <- get.par(fit, "alpha")
mask.dens <- matrix(0, nrow = sum(capt), ncol = nrow(mask))
betas <- alpha/dists
for (i in 1:sum(capt)){
mask.dens[i, ] <- dgamma(dist.capt[i], shape = alpha, rate = betas[i, ])
}
## Returning densities.
colProds(mask.dens)
}
ss.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
ss.capt <- fit$args$capt$ss[id, ]
det.pars <- get.par(fit, fit$detpars, cutoff = TRUE, as.list = TRUE)
detfn <- fit$args$detfn
ss.link <- fit$args$ss.opts$ss.link
n.traps <- nrow(get.traps(fit))
mask.dens <- matrix(0, nrow = n.traps, ncol = nrow(mask))
for (i in 1:n.traps){
if (capt[i] == 0){
mask.dens[i, ] <- 1 - calc.detfn(dists[i, ], detfn, det.pars, ss.link)
} else if (capt[i] == 1){
mu.ss <- det.pars[["b0.ss"]] - det.pars[["b1.ss"]]*dists[i, ]
mask.dens[i, ] <- dnorm(ss.capt[i], mu.ss, det.pars[["sigma.ss"]])
} else {
stop("The binary capture history must only contain 0s and 1s.")
}
}
colProds(mask.dens)
}
toa.density <- function(fit, id, mask, dists){
capt <- fit$args$capt$bincapt[id, ]
dists <- dists[capt == 1, ]
toa.capt <- fit$args$capt$toa[id, capt == 1]
sigma.toa <- get.par(fit, "sigma.toa")
prod.times <- toa.capt - dists/fit$args$sound.speed
toa.ssq <- aaply(prod.times, 2, function(x) sum((x - mean(x))^2))
out <- (2*pi*sigma.toa^2)^((1 - sum(capt))/2)*
exp(toa.ssq/(-2*sigma.toa^2))
}
## Plots arrows on traps where a detection was made, showing estimated bearing.
show.arrows <- function(fit, id, arrow.length = NULL, trap.col){
xlim <- par("usr")[c(1, 2)]
ylim <- par("usr")[c(3, 4)]
if (is.null(arrow.length)){
arrow.length <- 0.05*min(c(diff(range(xlim)), diff(range(ylim))))
}
capt <- fit$args$capt$bincapt[id, ]
bearing.capt <- fit$args$capt$bearing[id, capt == 1]
trappos <- get.traps(fit)[which(capt == 1), , drop = FALSE]
sinb <- sin(bearing.capt)*arrow.length
cosb <- cos(bearing.capt)*arrow.length
arrows(trappos[, 1], trappos[, 2], trappos[, 1] + sinb, trappos[, 2] + cosb,
length = 0.1, col = trap.col, lwd = 2)
}
## Plots circles around traps where a detection was made, showing estimated distance.
show.circles <- function(fit, id, trap.col){
capt <- fit$args$capt$bincapt[id, ]
dist.capt <- fit$args$capt$dist[id, capt == 1]
trappos <- get.traps(fit)[which(capt == 1), , drop = FALSE]
for (i in 1:nrow(trappos)){
centre <- trappos[i, ]
radius <- dist.capt[i]
circles(centre, radius, col = trap.col, lwd = 2)
}
}
circles <- function(centre, radius, ...){
bearings <- seq(0, 2*pi, length.out = 100)
xs <- centre[1] + sin(bearings)*radius
ys <- centre[2] + cos(bearings)*radius
lines(xs, ys, ...)
}
|
##original jackknife
Jackknife<-function( v1, statfunc=sd ) {
n1 <- length(v1)
jackvec <- NULL
mu0 <- statfunc(v1)
for(i in 1:n1)
{
mua<-statfunc(v1[-i])
jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
}
jackbias<-mean(jackvec)-mu0
jacksd<-sd(jackvec)
#list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, mean0 = mean0, jack_mean = jack_mean)
list(mu0=mu0,jackbias=jackbias,jacksd=jacksd)
}
##original jackknife
#mean(pseudo) ±(t0.975,n−1)* root(var(pseudo)/n)
#mean(pseudo) + qt(0.975,length(x)-1)*sqrt(var(pseudo)/length(x))
#mean(pseudo) - qt(0.975,length(x)-1)*sqrt(var(pseudo)/length(x))
#bootstrap:
#mean0-(sd0/sqrt(n0))*qnorm(1-alpha/2)
Jackknife_confidence_interval <- function( v1, statfunc=sd, alpha = 0.05 ) {
n1 <- length(v1)
jackvec <- NULL
mu0 <- statfunc(v1)
for(i in 1:n1)
{
mua<-statfunc(v1[-i])
jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
}
jackbias<-mean(jackvec)-mu0
jacksd<-sd(jackvec)
lq<-quantile(jackvec,alpha/2)
uq<-quantile(jackvec,1-alpha/2)
#ADD the other two confidence intervals.
#incorporate into the bootstrap confidence interval (what algebra supports this?) and output result
LB<-mean(v1)-(sd(v1)/sqrt(n1))*uq
UB<-mean(v1)-(sd(v1)/sqrt(n1))*lq
NLB<-mean(v1)-(sd(v1)/sqrt(n1))*qnorm(1-alpha/2)
NUB<-mean(v1)+(sd(v1)/sqrt(n1))*qnorm(1-alpha/2)
#list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, mean0 = mean0, jack_mean = jack_mean)
list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, jack.confidence.interval=c(LB,UB), normal.confidence.interval=c(NLB,NUB))
}
Jackknife_confidence_interval_trial <-function( v1, statfunc=sd, alpha = 0.05 ) {
n1 <- length(v1)
jackvec <- NULL
jackbiasvec <- NULL
mean0 <- statfunc(v1)
sd0 <- sd(v1)
for(i in 1:n1)
{
meanb <- statfunc(v1[-i])
sdb<-sqrt(var(v1[-i]))
#jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
jackvec<-c(jackvec,(meanb-mean0)/(sdb/sqrt(n1-1)))
jackbiasvec<-c(jackbiasvec,meanb-mean0)
}
jackbias<-mean(jackbiasvec)
#jacksd<-sd(jackvec)
lq<-quantile(jackvec, alpha/2)
uq<-quantile(jackvec, 1-alpha/2)
LB<-mean0-(sd0/sqrt(n1))*uq
UB<-mean0-(sd0/sqrt(n1))*lq
##NORMAL
NLB<-mean0-(sd0/sqrt(n1))*qnorm(1-alpha/2)
NUB<-mean0+(sd0/sqrt(n1))*qnorm(1-alpha/2)
list(pivotal.confidence.interval = c(LB,UB), normal.confidence.interval=c(NLB,NUB))
}
v1 = c(1:1000)
Jackknife(v1)
Jackknife_confidence_interval_trial(v1,mean)
## Q1 ##
| /Bootstrap/R codes/jackknife.R | permissive | prakruti-joshi/Probability-and-Statistical-Inference | R | false | false | 2,630 | r | ##original jackknife
Jackknife<-function( v1, statfunc=sd ) {
n1 <- length(v1)
jackvec <- NULL
mu0 <- statfunc(v1)
for(i in 1:n1)
{
mua<-statfunc(v1[-i])
jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
}
jackbias<-mean(jackvec)-mu0
jacksd<-sd(jackvec)
#list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, mean0 = mean0, jack_mean = jack_mean)
list(mu0=mu0,jackbias=jackbias,jacksd=jacksd)
}
##original jackknife
#mean(pseudo) ±(t0.975,n−1)* root(var(pseudo)/n)
#mean(pseudo) + qt(0.975,length(x)-1)*sqrt(var(pseudo)/length(x))
#mean(pseudo) - qt(0.975,length(x)-1)*sqrt(var(pseudo)/length(x))
#bootstrap:
#mean0-(sd0/sqrt(n0))*qnorm(1-alpha/2)
Jackknife_confidence_interval <- function( v1, statfunc=sd, alpha = 0.05 ) {
n1 <- length(v1)
jackvec <- NULL
mu0 <- statfunc(v1)
for(i in 1:n1)
{
mua<-statfunc(v1[-i])
jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
}
jackbias<-mean(jackvec)-mu0
jacksd<-sd(jackvec)
lq<-quantile(jackvec,alpha/2)
uq<-quantile(jackvec,1-alpha/2)
#ADD the other two confidence intervals.
#incorporate into the bootstrap confidence interval (what algebra supports this?) and output result
LB<-mean(v1)-(sd(v1)/sqrt(n1))*uq
UB<-mean(v1)-(sd(v1)/sqrt(n1))*lq
NLB<-mean(v1)-(sd(v1)/sqrt(n1))*qnorm(1-alpha/2)
NUB<-mean(v1)+(sd(v1)/sqrt(n1))*qnorm(1-alpha/2)
#list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, mean0 = mean0, jack_mean = jack_mean)
list(mu0=mu0,jackbias=jackbias,jacksd=jacksd, jack.confidence.interval=c(LB,UB), normal.confidence.interval=c(NLB,NUB))
}
Jackknife_confidence_interval_trial <-function( v1, statfunc=sd, alpha = 0.05 ) {
n1 <- length(v1)
jackvec <- NULL
jackbiasvec <- NULL
mean0 <- statfunc(v1)
sd0 <- sd(v1)
for(i in 1:n1)
{
meanb <- statfunc(v1[-i])
sdb<-sqrt(var(v1[-i]))
#jackvec<-c(jackvec, n1*(mu0)-(n1-1)*mua)
jackvec<-c(jackvec,(meanb-mean0)/(sdb/sqrt(n1-1)))
jackbiasvec<-c(jackbiasvec,meanb-mean0)
}
jackbias<-mean(jackbiasvec)
#jacksd<-sd(jackvec)
lq<-quantile(jackvec, alpha/2)
uq<-quantile(jackvec, 1-alpha/2)
LB<-mean0-(sd0/sqrt(n1))*uq
UB<-mean0-(sd0/sqrt(n1))*lq
##NORMAL
NLB<-mean0-(sd0/sqrt(n1))*qnorm(1-alpha/2)
NUB<-mean0+(sd0/sqrt(n1))*qnorm(1-alpha/2)
list(pivotal.confidence.interval = c(LB,UB), normal.confidence.interval=c(NLB,NUB))
}
v1 = c(1:1000)
Jackknife(v1)
Jackknife_confidence_interval_trial(v1,mean)
## Q1 ##
|
library(dplyr)
library(parsnip)
library(stringr)
library(tidyverse)
library(ggplot2)
neds2016_CORE = rename(`NEDS_2016_CORE.sample.random.(1)`, AGE = V1, AMONTH = V2, AWEEKEND = V3, DIED_VISIT = V4, DISCWT = V5, DISP_ED = V6, DQTR = V7, DXVER = V8,
EDEVENT = V9, FEMALE = V10, HCUPFILE = V11, HOSP_ED = V12, I10_DX1 = V13, I10_DX2 = V14, I10_DX3 = V15, I10_DX4 = V16, I10_DX5 = V17,
I10_DX6 = V18, I10_DX7 = V19, I10_DX8 = V20, I10_DX9 = V21, I10_DX10 = V22, I10_DX11 = V23, I10_DX12 = V24, I10_DX13 = V25, I10_DX14 = V26, I10_DX15 = V27,
I10_DX16 = V28, I10_DX17 = V29, I10_DX18 = V30, I10_DX19 = V31, I10_DX20 = V32, I10_DX21 = V33, I10_DX22 = V34, I10_DX23 = V35, I10_DX24 = V36, I10_DX25 = V37, I10_DX26 = V38,
I10_DX27 = V39, I10_DX28 = V40, I10_DX29 = V41, I10_DX30 = V42, I10_ECAUSE1 = V43, I10_ECAUSE2 = V44, I10_ECAUSE3 = V45, I10_ECAUSE4 = V46, I10_NDX = V47,
I10_NECAUSE = V48, KEY_ED = V49, NEDS_STRATUM = V50, PAY1 = V51, PAY2 = V52, PL_NCHS = V53, TOTCHG_ED = V54, YEAR = V55, ZIPINC_QRTL = V56)
neds2016_CORE$Alcohol_Abuse <- ifelse(neds2016_CORE$I10_DX1 == "F1010"| neds2016_CORE$I10_DX1 == "F1011"| neds2016_CORE$I10_DX1 == "F10120"| neds2016_CORE$I10_DX1 == "F10121"| neds2016_CORE$I10_DX1 == "F10129"| neds2016_CORE$I10_DX1 == "F1014"| neds2016_CORE$I10_DX1 == "F10150"| neds2016_CORE$I10_DX1 == "F10151"| neds2016_CORE$I10_DX1 == "F10159"| neds2016_CORE$I10_DX1 == "F10180"| neds2016_CORE$I10_DX1 == "F10181"| neds2016_CORE$I10_DX1 == "F10182"| neds2016_CORE$I10_DX1 == "F10188"| neds2016_CORE$I10_DX1 == "F1019",1,0)
neds2016_CORE <- neds2016_CORE %>%
filter(ZIPINC_QRTL == 1 | ZIPINC_QRTL == 2 | ZIPINC_QRTL == 3 | ZIPINC_QRTL == 4)
neds2016_CORE$AMONTH <- ifelse(neds2016_CORE$AMONTH == -9, 9, neds2016_CORE$AMONTH)
neds2016_CORE <- neds2016_CORE %>%
filter(FEMALE == "0" | FEMALE == "1" )
# Converting FEMALE, AGE, SAlcohol_Abuse, ZIPINC_QRTL, YEAR
neds2016_CORE$FEMALE <- as.factor(neds2016_CORE$FEMALE)
neds2016_CORE$AGE <- as.numeric(neds2016_CORE$AGE)
neds2016_CORE$Alcohol_Abuse <- as.factor(neds2016_CORE$Alcohol_Abuse)
neds2016_CORE$ZIPINC_QRTL <- as.factor(neds2016_CORE$ZIPINC_QRTL)
neds2016_CORE$YEAR <- as.factor(neds2016_CORE$YEAR)
neds2016_CORE$AMONTH <- as.factor(neds2016_CORE$AMONTH)
summary(neds2016_CORE)
neds2016_time <- neds2016_CORE %>%
select(Alcohol_Abuse, YEAR) %>%
filter(Alcohol_Abuse == 1)
ggplot(data = neds2016_time) +
geom_bar(mapping = aes(x=YEAR))
neds2016_sex <- neds2016_CORE %>%
select(Alcohol_Abuse, FEMALE) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_sex)
ggplot(data = neds2016_sex) +
geom_bar(mapping = aes(x=FEMALE))
neds2016_CORE$Age_type <- ifelse(neds2016_CORE$AGE <= 44,"18-44 years","45-64 years")
neds2016_CORE$Age_type <- ifelse(neds2016_CORE$AGE >= 65, "65 years or older", neds2016_CORE$Age_type)
neds2016_CORE$Age_type <- as.factor(neds2016_CORE$Age_type)
neds2016_Age_type <- neds2016_CORE %>%
select(Alcohol_Abuse, Age_type) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_Age_type)
ggplot(data = neds2016_Age_type) +
geom_bar(mapping = aes(x=Age_type))
neds2016_Quartile <- neds2016_CORE %>%
select(Alcohol_Abuse, ZIPINC_QRTL) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_Quartile)
ggplot(data = neds2016_Quartile) +
geom_bar(mapping = aes(x=ZIPINC_QRTL))
neds2016_AMONTH <- neds2016_CORE %>%
select(Alcohol_Abuse, AMONTH) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_CORE$AMONTH)
summary(neds2016_AMONTH$AMONTH)
ggplot(data = neds2016_AMONTH) +
geom_bar(mapping = aes(x=AMONTH))
#Choosing the variables
neds2016_1 <- neds2016_CORE %>%
select(FEMALE,ZIPINC_QRTL, Alcohol_Abuse,Age_type,AMONTH)
str(neds2016_1)
#glm model
logistic <- glm(Alcohol_Abuse ~.,data = neds2016_1, family = "binomial")
summary(logistic)
#Alcohol_Abuse=5.879135+
ll.null <- logistic$null.deviance/-2
ll.proposed <- logistic$deviance/-2
(ll.null - ll.proposed)/ll.null
1 - pchisq(2*(ll.proposed - ll.null), df=(length(logistic$coefficients)-1))
predicted.data <- data.frame(probability.of.alcohol_abuse=logistic$fitted.values,Alcohol_Abuse=neds2016_1$Alcohol_Abuse)
predicted.data <- predicted.data[order(predicted.data$probability.of.alcohol_abuse,decreasing = FALSE),]
predicted.data$rank <- 1:nrow(predicted.data)
library(ggplot2)
library(cowplot)
ggplot(data=predicted.data, aes(x=rank, y=probability.of.alcohol_abuse))+
geom_point(aes(color=Alcohol_Abuse), alpha=1, shape=4, stroke=2)+
xlab("Index")+
ylab("Predicted probability of alcohol abuse")
ggsave("Alcohol_abuse_probabilities.pdf")
summary(neds2016_Age_type) | /Alcohol_abuse code.R | no_license | laconde/Alcohol_abuse | R | false | false | 4,715 | r | library(dplyr)
library(parsnip)
library(stringr)
library(tidyverse)
library(ggplot2)
neds2016_CORE = rename(`NEDS_2016_CORE.sample.random.(1)`, AGE = V1, AMONTH = V2, AWEEKEND = V3, DIED_VISIT = V4, DISCWT = V5, DISP_ED = V6, DQTR = V7, DXVER = V8,
EDEVENT = V9, FEMALE = V10, HCUPFILE = V11, HOSP_ED = V12, I10_DX1 = V13, I10_DX2 = V14, I10_DX3 = V15, I10_DX4 = V16, I10_DX5 = V17,
I10_DX6 = V18, I10_DX7 = V19, I10_DX8 = V20, I10_DX9 = V21, I10_DX10 = V22, I10_DX11 = V23, I10_DX12 = V24, I10_DX13 = V25, I10_DX14 = V26, I10_DX15 = V27,
I10_DX16 = V28, I10_DX17 = V29, I10_DX18 = V30, I10_DX19 = V31, I10_DX20 = V32, I10_DX21 = V33, I10_DX22 = V34, I10_DX23 = V35, I10_DX24 = V36, I10_DX25 = V37, I10_DX26 = V38,
I10_DX27 = V39, I10_DX28 = V40, I10_DX29 = V41, I10_DX30 = V42, I10_ECAUSE1 = V43, I10_ECAUSE2 = V44, I10_ECAUSE3 = V45, I10_ECAUSE4 = V46, I10_NDX = V47,
I10_NECAUSE = V48, KEY_ED = V49, NEDS_STRATUM = V50, PAY1 = V51, PAY2 = V52, PL_NCHS = V53, TOTCHG_ED = V54, YEAR = V55, ZIPINC_QRTL = V56)
neds2016_CORE$Alcohol_Abuse <- ifelse(neds2016_CORE$I10_DX1 == "F1010"| neds2016_CORE$I10_DX1 == "F1011"| neds2016_CORE$I10_DX1 == "F10120"| neds2016_CORE$I10_DX1 == "F10121"| neds2016_CORE$I10_DX1 == "F10129"| neds2016_CORE$I10_DX1 == "F1014"| neds2016_CORE$I10_DX1 == "F10150"| neds2016_CORE$I10_DX1 == "F10151"| neds2016_CORE$I10_DX1 == "F10159"| neds2016_CORE$I10_DX1 == "F10180"| neds2016_CORE$I10_DX1 == "F10181"| neds2016_CORE$I10_DX1 == "F10182"| neds2016_CORE$I10_DX1 == "F10188"| neds2016_CORE$I10_DX1 == "F1019",1,0)
neds2016_CORE <- neds2016_CORE %>%
filter(ZIPINC_QRTL == 1 | ZIPINC_QRTL == 2 | ZIPINC_QRTL == 3 | ZIPINC_QRTL == 4)
neds2016_CORE$AMONTH <- ifelse(neds2016_CORE$AMONTH == -9, 9, neds2016_CORE$AMONTH)
neds2016_CORE <- neds2016_CORE %>%
filter(FEMALE == "0" | FEMALE == "1" )
# Converting FEMALE, AGE, SAlcohol_Abuse, ZIPINC_QRTL, YEAR
neds2016_CORE$FEMALE <- as.factor(neds2016_CORE$FEMALE)
neds2016_CORE$AGE <- as.numeric(neds2016_CORE$AGE)
neds2016_CORE$Alcohol_Abuse <- as.factor(neds2016_CORE$Alcohol_Abuse)
neds2016_CORE$ZIPINC_QRTL <- as.factor(neds2016_CORE$ZIPINC_QRTL)
neds2016_CORE$YEAR <- as.factor(neds2016_CORE$YEAR)
neds2016_CORE$AMONTH <- as.factor(neds2016_CORE$AMONTH)
summary(neds2016_CORE)
neds2016_time <- neds2016_CORE %>%
select(Alcohol_Abuse, YEAR) %>%
filter(Alcohol_Abuse == 1)
ggplot(data = neds2016_time) +
geom_bar(mapping = aes(x=YEAR))
neds2016_sex <- neds2016_CORE %>%
select(Alcohol_Abuse, FEMALE) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_sex)
ggplot(data = neds2016_sex) +
geom_bar(mapping = aes(x=FEMALE))
neds2016_CORE$Age_type <- ifelse(neds2016_CORE$AGE <= 44,"18-44 years","45-64 years")
neds2016_CORE$Age_type <- ifelse(neds2016_CORE$AGE >= 65, "65 years or older", neds2016_CORE$Age_type)
neds2016_CORE$Age_type <- as.factor(neds2016_CORE$Age_type)
neds2016_Age_type <- neds2016_CORE %>%
select(Alcohol_Abuse, Age_type) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_Age_type)
ggplot(data = neds2016_Age_type) +
geom_bar(mapping = aes(x=Age_type))
neds2016_Quartile <- neds2016_CORE %>%
select(Alcohol_Abuse, ZIPINC_QRTL) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_Quartile)
ggplot(data = neds2016_Quartile) +
geom_bar(mapping = aes(x=ZIPINC_QRTL))
neds2016_AMONTH <- neds2016_CORE %>%
select(Alcohol_Abuse, AMONTH) %>%
filter(Alcohol_Abuse == 1)
summary(neds2016_CORE$AMONTH)
summary(neds2016_AMONTH$AMONTH)
ggplot(data = neds2016_AMONTH) +
geom_bar(mapping = aes(x=AMONTH))
#Choosing the variables
neds2016_1 <- neds2016_CORE %>%
select(FEMALE,ZIPINC_QRTL, Alcohol_Abuse,Age_type,AMONTH)
str(neds2016_1)
#glm model
logistic <- glm(Alcohol_Abuse ~.,data = neds2016_1, family = "binomial")
summary(logistic)
#Alcohol_Abuse=5.879135+
ll.null <- logistic$null.deviance/-2
ll.proposed <- logistic$deviance/-2
(ll.null - ll.proposed)/ll.null
1 - pchisq(2*(ll.proposed - ll.null), df=(length(logistic$coefficients)-1))
predicted.data <- data.frame(probability.of.alcohol_abuse=logistic$fitted.values,Alcohol_Abuse=neds2016_1$Alcohol_Abuse)
predicted.data <- predicted.data[order(predicted.data$probability.of.alcohol_abuse,decreasing = FALSE),]
predicted.data$rank <- 1:nrow(predicted.data)
library(ggplot2)
library(cowplot)
ggplot(data=predicted.data, aes(x=rank, y=probability.of.alcohol_abuse))+
geom_point(aes(color=Alcohol_Abuse), alpha=1, shape=4, stroke=2)+
xlab("Index")+
ylab("Predicted probability of alcohol abuse")
ggsave("Alcohol_abuse_probabilities.pdf")
summary(neds2016_Age_type) |
#-------------------------Assignment 13.2----------------------------
# Visualize the correlation between all variables in a meaningful and clear way
# of representing. Find out top 3 reasons for having more crime in a city.
# we will use the data of assignment 13.1
getwd()
crimemodel <- read.csv("Users/Pragati/Desktop/Assignment/crimemodel.csv")
View(crimemodel)
str(crimemodel)
library(dplyr)
crimemodel1 <- crimemodel
crimemodel1 <- mutate(crimemodel, day = as.numeric(as.factor(day)),
month = as.numeric(as.factor(month)),
season = as.numeric(as.factor(season)))
names(crimemodel1)
str(crimemodel1)
correlation <- cor(crimemodel1[,c("Beat","count","Arrest","day","month","past.crime.1",
"past.crime.7","past.crime.30","past.arrest.30","crime.trend",
"policing","season")])
psych::cor.plot(correlation)
#-------------------------------------------------------------------------
# b. What is the difference between co-variance and correlation? Take an
#example from this dataset and show the differences if any?
# Co-Variance is a systematic relationship between a pair of random variables wherein a
# change in one variable reciprocated by an equivalent change in another variable.
# Measure of correlation, Lie between -??? and +???, Change in scale affects covariance
# Correlation is statistical measure that indicates how strongly two variables are related.
# Scaled version of covariance, Lie between -1 and +1,
# Change in scale does not affect the coorelation. Unit free measure
# Correlation is a special case of covariance which can be obtained when the data is standardised.
cov(crimemodel1$count, crimemodel1$past.crime.30)
cor(crimemodel1$count, crimemodel1$past.crime.30)
# covariances is hard to compare as the value ranges from -??? and +???
# we get a different covariance from when we do it in other units
# Then we need to 'normalize' the covariance by means divide the covariance
# by something that represents the diversity and scale in both the covariates,
# and end up with a value that is assured to be between -1 and 1: the correlation
| /Assignment 7.2.R | no_license | pragatipurwar/7.2 | R | false | false | 2,195 | r |
#-------------------------Assignment 13.2----------------------------
# Visualize the correlation between all variables in a meaningful and clear way
# of representing. Find out top 3 reasons for having more crime in a city.
# we will use the data of assignment 13.1
getwd()
crimemodel <- read.csv("Users/Pragati/Desktop/Assignment/crimemodel.csv")
View(crimemodel)
str(crimemodel)
library(dplyr)
crimemodel1 <- crimemodel
crimemodel1 <- mutate(crimemodel, day = as.numeric(as.factor(day)),
month = as.numeric(as.factor(month)),
season = as.numeric(as.factor(season)))
names(crimemodel1)
str(crimemodel1)
correlation <- cor(crimemodel1[,c("Beat","count","Arrest","day","month","past.crime.1",
"past.crime.7","past.crime.30","past.arrest.30","crime.trend",
"policing","season")])
psych::cor.plot(correlation)
#-------------------------------------------------------------------------
# b. What is the difference between co-variance and correlation? Take an
#example from this dataset and show the differences if any?
# Co-Variance is a systematic relationship between a pair of random variables wherein a
# change in one variable reciprocated by an equivalent change in another variable.
# Measure of correlation, Lie between -??? and +???, Change in scale affects covariance
# Correlation is statistical measure that indicates how strongly two variables are related.
# Scaled version of covariance, Lie between -1 and +1,
# Change in scale does not affect the coorelation. Unit free measure
# Correlation is a special case of covariance which can be obtained when the data is standardised.
cov(crimemodel1$count, crimemodel1$past.crime.30)
cor(crimemodel1$count, crimemodel1$past.crime.30)
# covariances is hard to compare as the value ranges from -??? and +???
# we get a different covariance from when we do it in other units
# Then we need to 'normalize' the covariance by means divide the covariance
# by something that represents the diversity and scale in both the covariates,
# and end up with a value that is assured to be between -1 and 1: the correlation
|
#' Creates a bar plot of the top 5 most present adapter sequences.
#' @param ac_sorted Sorted table of adapters and counts.
#' @param output_file File to save data frame to. Default NA.
#' @return Barplot of top 5 most frequent adapter sequences.
#'
#' @examples
#' if(.Platform$OS.type != "windows") {
#' infile <- system.file("extdata", "test.fq.gz", package = "qckitfastq")
#' ac_sorted <- adapter_content(infile)
#' plot_adapter_content(ac_sorted)
#' }
#' @importFrom utils write.csv
#' @export
plot_adapter_content <- function(ac_sorted,output_file=NA){
if(.Platform$OS.type == "windows") {
stop("This function is not available on Windows due to the lack of C++14 support, sorry.")
}
ac <- ac_sorted[seq_len(5)]
df<-data.frame(names(ac),as.numeric(ac))
names(df) <- c("Adapters", "Counts")
p <- ggplot2::ggplot(data=df,ggplot2::aes(x=.data$Adapters,y=.data$Counts)) +
ggplot2::geom_bar(stat="identity") +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1)) +
ggplot2::ggtitle("Top 5 adapters and their counts in reads file") +
ggplot2::xlab("Adapters") +
ggplot2::ylab("Counts")
if (!is.na(output_file)){ggplot2::ggsave(file=output_file,p)}
return(p)
} | /R/plot_adapter_content.R | no_license | compbiocore/qckitfastq | R | false | false | 1,254 | r | #' Creates a bar plot of the top 5 most present adapter sequences.
#' @param ac_sorted Sorted table of adapters and counts.
#' @param output_file File to save data frame to. Default NA.
#' @return Barplot of top 5 most frequent adapter sequences.
#'
#' @examples
#' if(.Platform$OS.type != "windows") {
#' infile <- system.file("extdata", "test.fq.gz", package = "qckitfastq")
#' ac_sorted <- adapter_content(infile)
#' plot_adapter_content(ac_sorted)
#' }
#' @importFrom utils write.csv
#' @export
plot_adapter_content <- function(ac_sorted,output_file=NA){
if(.Platform$OS.type == "windows") {
stop("This function is not available on Windows due to the lack of C++14 support, sorry.")
}
ac <- ac_sorted[seq_len(5)]
df<-data.frame(names(ac),as.numeric(ac))
names(df) <- c("Adapters", "Counts")
p <- ggplot2::ggplot(data=df,ggplot2::aes(x=.data$Adapters,y=.data$Counts)) +
ggplot2::geom_bar(stat="identity") +
ggplot2::theme(axis.text.x = ggplot2::element_text(angle = 90, hjust = 1)) +
ggplot2::ggtitle("Top 5 adapters and their counts in reads file") +
ggplot2::xlab("Adapters") +
ggplot2::ylab("Counts")
if (!is.na(output_file)){ggplot2::ggsave(file=output_file,p)}
return(p)
} |
library(tidyverse)
library(sas7bdat)
library(haven)
library(stringr)
install.packages("haven")
#readr package for reading and parsing sas data files... it is stored as a tibble
dob<-read_sas('G:/Property_ORGPROJ/Orgproj/AnalyticsGrp/tom/scr/adhoc_06192018/dob.sas7bdat')
dob.df<-as.data.frame(dob)
class(dob)
class(dob.df)
str(dob)
#& dob$PERMIT_DATE<"2016-01-01"
completed.jobs.2015<-dob.df[(dob.df$COMPLETION_DATE<"2016-03-30" & dob.df$COMPLETION_DATE>"2015-04-01" & dob.df$PERMIT_DATE>"2014-01-31"),]
#filtering a tibble
all(is.na(dob$COMPLETION_DATE))
head(dob$COMPLETION_DATE[!is.na(dob$COMPLETION_DATE)])
completed.jobs.2015<-filter(dob,COMPLETION_DATE<"2016-03-30" & COMPLETION_DATE>"2015-04-01")
head(dob$BORO)
head(completed.jobs.2015)
completed.jobs.2015$BORO[0:4]
#create BBL codes
completed.jobs.2015$BBL<-as.numeric(completed.jobs.2015$BORO)*1000000000+as.numeric(completed.jobs.2015$BLOCK)*10000 + as.numeric(completed.jobs.2015$LOT)
#uncompleted jobs that were likely being finished in the year
dob$PERMIT_DATE>"2014-12-31" &
table(dob$JOB_TYPE)
dob[,c('BORO''BLOCK','LOT',)]
head(completed.jobs.2015$BORO)
head(as.numeric(completed.jobs.2015$BORO))
head(completed.jobs.2015$BBL)
#read in newbase file
nb<-read_sas('G:/Property_ORGPROJ/Orgproj/ORG_DATA_FOLDER/orgproj/newbase/newbasb_keep_d051917s.sas7bdat')
nb2<-read_sas('G:/Property_ORGPROJ/Orgproj/ORG_DATA_FOLDER/orgproj/newbase/newbasb_keep_d052416s.sas7bdat')
#read in newbase file from EC2
cols<-names(nb)
#apply regex and search for NNN
bools2<-grepl('SQFT|SF',cols)
bools<-grepl('LEASE|BORO|BLOCK|LOT|GROSS|GRO|NET|BLO',cols)
sum(bools2)
cols[bools2]
table(nb$NETFLG)
#export BBL and netflg
tgt<-c('BORO','BLOCK','LOT','NETFLG','COMSQFT','RESSQFT','GARSQFT')
portion<-nb2[,tgt]
write.csv(portion,'G:/Property/Luis_C/aws/fy16_determination.csv',row.names=FALSE)
| /phys_sas_parser.R | no_license | calleja/nnn | R | false | false | 1,922 | r | library(tidyverse)
library(sas7bdat)
library(haven)
library(stringr)
install.packages("haven")
#readr package for reading and parsing sas data files... it is stored as a tibble
dob<-read_sas('G:/Property_ORGPROJ/Orgproj/AnalyticsGrp/tom/scr/adhoc_06192018/dob.sas7bdat')
dob.df<-as.data.frame(dob)
class(dob)
class(dob.df)
str(dob)
#& dob$PERMIT_DATE<"2016-01-01"
completed.jobs.2015<-dob.df[(dob.df$COMPLETION_DATE<"2016-03-30" & dob.df$COMPLETION_DATE>"2015-04-01" & dob.df$PERMIT_DATE>"2014-01-31"),]
#filtering a tibble
all(is.na(dob$COMPLETION_DATE))
head(dob$COMPLETION_DATE[!is.na(dob$COMPLETION_DATE)])
completed.jobs.2015<-filter(dob,COMPLETION_DATE<"2016-03-30" & COMPLETION_DATE>"2015-04-01")
head(dob$BORO)
head(completed.jobs.2015)
completed.jobs.2015$BORO[0:4]
#create BBL codes
completed.jobs.2015$BBL<-as.numeric(completed.jobs.2015$BORO)*1000000000+as.numeric(completed.jobs.2015$BLOCK)*10000 + as.numeric(completed.jobs.2015$LOT)
#uncompleted jobs that were likely being finished in the year
dob$PERMIT_DATE>"2014-12-31" &
table(dob$JOB_TYPE)
dob[,c('BORO''BLOCK','LOT',)]
head(completed.jobs.2015$BORO)
head(as.numeric(completed.jobs.2015$BORO))
head(completed.jobs.2015$BBL)
#read in newbase file
nb<-read_sas('G:/Property_ORGPROJ/Orgproj/ORG_DATA_FOLDER/orgproj/newbase/newbasb_keep_d051917s.sas7bdat')
nb2<-read_sas('G:/Property_ORGPROJ/Orgproj/ORG_DATA_FOLDER/orgproj/newbase/newbasb_keep_d052416s.sas7bdat')
#read in newbase file from EC2
cols<-names(nb)
#apply regex and search for NNN
bools2<-grepl('SQFT|SF',cols)
bools<-grepl('LEASE|BORO|BLOCK|LOT|GROSS|GRO|NET|BLO',cols)
sum(bools2)
cols[bools2]
table(nb$NETFLG)
#export BBL and netflg
tgt<-c('BORO','BLOCK','LOT','NETFLG','COMSQFT','RESSQFT','GARSQFT')
portion<-nb2[,tgt]
write.csv(portion,'G:/Property/Luis_C/aws/fy16_determination.csv',row.names=FALSE)
|
c(1, 2, 3)
| /Chapter08/07_simple_vector.R | permissive | PacktPublishing/Hands-On-Data-Science-with-SQL-Server-2017 | R | false | false | 11 | r | c(1, 2, 3)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
getorigin <- function() x
setorigin <- function(y) {
x <<- y
m <<- NULL
}
getinverse <- function() m
setinverse <- function(inv) m <<- inv
list(getorigin = getorigin,
setorigin = setorigin,
getinverse = getinverse,
setinverse = setinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if (!is.null(m)) {
message("get in cache data")
return (m)
}
org <- x$getorigin()
inv <- solve(org)
x$setinverse(inv)
inv
}
| /cachematrix.R | no_license | dshen19/ProgrammingAssignment2 | R | false | false | 938 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
getorigin <- function() x
setorigin <- function(y) {
x <<- y
m <<- NULL
}
getinverse <- function() m
setinverse <- function(inv) m <<- inv
list(getorigin = getorigin,
setorigin = setorigin,
getinverse = getinverse,
setinverse = setinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if (!is.null(m)) {
message("get in cache data")
return (m)
}
org <- x$getorigin()
inv <- solve(org)
x$setinverse(inv)
inv
}
|
\name{summary.cgOneFactorFit}
\docType{methods}
\alias{summary,cgOneFactorFit-method}
\alias{summary.cgOneFactorFit}
\title{
Summary display of a One Factor Model Fit object with some format options
}
\description{
Summary printing of a \code{cgOneFactorFit} object,
which contains fitted model information.
}
\usage{
\S4method{summary}{cgOneFactorFit}(object, title = NULL, endptname = NULL, ...)
}
\arguments{
\item{object }{
An \code{\link{cgOneFactorFit}} object.
}
\item{title }{
The title printed out with the summary of the fitted model(s).
If \code{NULL}, it is set to be "Fitted Model Summaries of"
the \code{analysisname} value in the \code{settings} slot of the
\code{\link{cgOneFactorFit}} object.
}
\item{endptname }{
The endpoint name, printed out with the fitted model information.
If \code{NULL}, it is set to
the \code{endptname} value in the \code{settings} slot of the
\code{\link{cgOneFactorFit}} object.
}
\item{\dots }{
Additional arguments. Only one is currently valid:
\describe{
\item{\code{model} }{For \code{cgOneFactorFit}
objects that have output derived from
classical least squares \code{\link{lm}} or resistant & robust
\code{\link{rlm}} fits, the following argument values are possible:
\describe{
\item{\code{"both"}}{Both the ordinary
classical least squares and resistant & robust model fit
summaries are printed.
This is the default when both fits are present in
the \code{cgOneFactorFit} object specified in the \code{object}
argument.
}
\item{\code{"olsonly"}}{Only the ordinary
classical least squares model fit summary is printed.
}
\item{\code{"rronly"}}{Only the resistant &
robust model fit summary is printed.
}
}
}
}
For other possible \code{cgOneFactorFit}
components such as
accelerated failure time or unequal variance model fits, the \code{model}
argument is not relevant, and the single model fit summary is printed
for these model types.
}
}
\details{
The object summary is printed using a mix of \code{cat} and \code{print}
calls. See \code{\link{cgOneFactorFit}}
for details of the \code{*fit} and other object slots.
This method simply echoes summary methods for individual fit classes,
such as \code{\link{lm}} and \code{\link{rlm}}.
}
\value{
\code{summary.cgOneFactorFit} returns
\code{\link{invisible}}. The main purpose is the side
effect of printing to the current output connection, which is
typically the console.
}
\author{
Bill Pikounis [aut, cre, cph], John Oleynick [aut], Eva Ye [ctb]
}
\note{
Contact \email{cg@billpikounis.net} for bug reports, questions,
concerns, and comments.
}
\seealso{
\code{\link{cgOneFactorFit}}
}
\examples{
data(canine)
canine.data <- prepareCGOneFactorData(canine, format="groupcolumns",
analysisname="Canine",
endptname="Prostate Volume",
endptunits=expression(plain(cm)^3),
digits=1, logscale=TRUE, refgrp="CC")
canine.fit <- fit(canine.data)
summary(canine.fit)
}
| /man/summary.cgOneFactorFit.Rd | no_license | cran/cg | R | false | false | 3,324 | rd | \name{summary.cgOneFactorFit}
\docType{methods}
\alias{summary,cgOneFactorFit-method}
\alias{summary.cgOneFactorFit}
\title{
Summary display of a One Factor Model Fit object with some format options
}
\description{
Summary printing of a \code{cgOneFactorFit} object,
which contains fitted model information.
}
\usage{
\S4method{summary}{cgOneFactorFit}(object, title = NULL, endptname = NULL, ...)
}
\arguments{
\item{object }{
An \code{\link{cgOneFactorFit}} object.
}
\item{title }{
The title printed out with the summary of the fitted model(s).
If \code{NULL}, it is set to be "Fitted Model Summaries of"
the \code{analysisname} value in the \code{settings} slot of the
\code{\link{cgOneFactorFit}} object.
}
\item{endptname }{
The endpoint name, printed out with the fitted model information.
If \code{NULL}, it is set to
the \code{endptname} value in the \code{settings} slot of the
\code{\link{cgOneFactorFit}} object.
}
\item{\dots }{
Additional arguments. Only one is currently valid:
\describe{
\item{\code{model} }{For \code{cgOneFactorFit}
objects that have output derived from
classical least squares \code{\link{lm}} or resistant & robust
\code{\link{rlm}} fits, the following argument values are possible:
\describe{
\item{\code{"both"}}{Both the ordinary
classical least squares and resistant & robust model fit
summaries are printed.
This is the default when both fits are present in
the \code{cgOneFactorFit} object specified in the \code{object}
argument.
}
\item{\code{"olsonly"}}{Only the ordinary
classical least squares model fit summary is printed.
}
\item{\code{"rronly"}}{Only the resistant &
robust model fit summary is printed.
}
}
}
}
For other possible \code{cgOneFactorFit}
components such as
accelerated failure time or unequal variance model fits, the \code{model}
argument is not relevant, and the single model fit summary is printed
for these model types.
}
}
\details{
The object summary is printed using a mix of \code{cat} and \code{print}
calls. See \code{\link{cgOneFactorFit}}
for details of the \code{*fit} and other object slots.
This method simply echoes summary methods for individual fit classes,
such as \code{\link{lm}} and \code{\link{rlm}}.
}
\value{
\code{summary.cgOneFactorFit} returns
\code{\link{invisible}}. The main purpose is the side
effect of printing to the current output connection, which is
typically the console.
}
\author{
Bill Pikounis [aut, cre, cph], John Oleynick [aut], Eva Ye [ctb]
}
\note{
Contact \email{cg@billpikounis.net} for bug reports, questions,
concerns, and comments.
}
\seealso{
\code{\link{cgOneFactorFit}}
}
\examples{
data(canine)
canine.data <- prepareCGOneFactorData(canine, format="groupcolumns",
analysisname="Canine",
endptname="Prostate Volume",
endptunits=expression(plain(cm)^3),
digits=1, logscale=TRUE, refgrp="CC")
canine.fit <- fit(canine.data)
summary(canine.fit)
}
|
## The following two functions help in caching the potentially time-consuming computation of calculating the inverse of a matrix
## The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to
#set the value of the matrix
#get the value of the matrix
#set the value of the inverse matrix
#get the value of the inverse matrix
makeCacheMatrix <- function(m = matrix()) {
i <- NULL #inverse
#set the matrix
set <- function(y) {
m <<- y
i <<- NULL
}
#get the matrix
get <- function() m
#set the inverse
setInverse <- function(inverse) i <<- inverse
#get the inverse
getInverse <- function() i
#return the functions
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function calculates the inverse of the special "matrix" created with the above function.
# However, it first checks to see if the mean has already been calculated.
# If so, it gets the mean from the cache and skips the computation.
# Otherwise, it calculates the mean of the data and sets the value of the mean in the cache via the setmean function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
#check if the value already exists?
if(!is.null(i)) {
#if yes, then return the cached value
message("getting cached data")
return(i)
}
#if not, then create it
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
| /cachematrix.R | no_license | mshangiti/ProgrammingAssignment2 | R | false | false | 1,567 | r | ## The following two functions help in caching the potentially time-consuming computation of calculating the inverse of a matrix
## The first function, makeCacheMatrix creates a special "matrix", which is really a list containing a function to
#set the value of the matrix
#get the value of the matrix
#set the value of the inverse matrix
#get the value of the inverse matrix
makeCacheMatrix <- function(m = matrix()) {
i <- NULL #inverse
#set the matrix
set <- function(y) {
m <<- y
i <<- NULL
}
#get the matrix
get <- function() m
#set the inverse
setInverse <- function(inverse) i <<- inverse
#get the inverse
getInverse <- function() i
#return the functions
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The following function calculates the inverse of the special "matrix" created with the above function.
# However, it first checks to see if the mean has already been calculated.
# If so, it gets the mean from the cache and skips the computation.
# Otherwise, it calculates the mean of the data and sets the value of the mean in the cache via the setmean function.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getInverse()
#check if the value already exists?
if(!is.null(i)) {
#if yes, then return the cached value
message("getting cached data")
return(i)
}
#if not, then create it
data <- x$get()
i <- solve(data, ...)
x$setInverse(i)
i
}
|
if (not_on_cran()) {
set_cmdstan_path()
}
test_that("profiling works if profiling data is present", {
skip_on_cran()
skip_if(cmdstan_version() < "2.26.0")
mod <- testing_model("logistic_profiling")
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
expect_equal(length(fit$profile_files()), 4)
profiles <- fit$profiles()
expect_equal(length(profiles), 4)
expect_equal(dim(profiles[[1]]), c(3,9))
expect_equal(profiles[[1]][,"name"], c("glm", "priors", "udf"))
})
test_that("profiling errors if no profiling files are present", {
skip_on_cran()
mod <- testing_model("logistic")
suppressWarnings(
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
)
expect_error(
fit$profile_files(),
"No profile files found. The model that produced the fit did not use any profiling.",
fixed = TRUE
)
expect_error(fit$profiles(),
"No profile files found. The model that produced the fit did not use any profiling.")
expect_error(fit$save_profile_files(),
"No profile files found. The model that produced the fit did not use any profiling.")
})
test_that("saving profile csv output works", {
skip_on_cran()
skip_if(cmdstan_version() < "2.26.0")
mod <- testing_model("logistic_profiling")
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
old_paths <- fit$profile_files()
checkmate::expect_file_exists(old_paths, extension = "csv")
expect_message(
paths <- fit$save_profile_files(tempdir(), basename = "testing-output"),
paste("Moved", fit$num_procs(), "files and set internal paths")
)
checkmate::expect_file_exists(paths, extension = "csv")
expect_true(all(file.size(paths) > 0))
expect_false(any(file.exists(old_paths)))
})
| /tests/testthat/test-profiling.R | permissive | clamped-params/cmdstanr | R | false | false | 1,862 | r | if (not_on_cran()) {
set_cmdstan_path()
}
test_that("profiling works if profiling data is present", {
skip_on_cran()
skip_if(cmdstan_version() < "2.26.0")
mod <- testing_model("logistic_profiling")
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
expect_equal(length(fit$profile_files()), 4)
profiles <- fit$profiles()
expect_equal(length(profiles), 4)
expect_equal(dim(profiles[[1]]), c(3,9))
expect_equal(profiles[[1]][,"name"], c("glm", "priors", "udf"))
})
test_that("profiling errors if no profiling files are present", {
skip_on_cran()
mod <- testing_model("logistic")
suppressWarnings(
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
)
expect_error(
fit$profile_files(),
"No profile files found. The model that produced the fit did not use any profiling.",
fixed = TRUE
)
expect_error(fit$profiles(),
"No profile files found. The model that produced the fit did not use any profiling.")
expect_error(fit$save_profile_files(),
"No profile files found. The model that produced the fit did not use any profiling.")
})
test_that("saving profile csv output works", {
skip_on_cran()
skip_if(cmdstan_version() < "2.26.0")
mod <- testing_model("logistic_profiling")
utils::capture.output(
fit <- mod$sample(data = testing_data("logistic"), refresh = 0)
)
old_paths <- fit$profile_files()
checkmate::expect_file_exists(old_paths, extension = "csv")
expect_message(
paths <- fit$save_profile_files(tempdir(), basename = "testing-output"),
paste("Moved", fit$num_procs(), "files and set internal paths")
)
checkmate::expect_file_exists(paths, extension = "csv")
expect_true(all(file.size(paths) > 0))
expect_false(any(file.exists(old_paths)))
})
|
plot.PAM <-
function (clust, what, res.folder = ".", cols = "bwr", trim = -1, autoadj = TRUE,
pdf.width = 13, pdf.height = 10, labelwidth = 0.6, labelheight = 0.25,
reorder = c(TRUE, TRUE), r.cex = 0.5, c.cex = 1, PDF = TRUE, PNG = FALSE,
main = NULL, file = main, shiny = FALSE)
{
if (autoadj) {
adj.l <- plotAdjust(clust$dat)
} else {
adj.l <- list(pdf.width = pdf.width, pdf.height = pdf.height, labelwidth = labelwidth,
labelheight = labelheight, r.cex = r.cex, c.cex = c.cex)
}
if (is.null(file)) {
filename.pam <- paste("PAM clustering of", what)
} else {
filename.pam <- file
}
if (shiny) {
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, main=main)
} else {
if (PDF) {
pdf.name <- file.path(res.folder, paste(filename.pam, ".pdf", sep=""))
pdf(pdf.name, width=adj.l$pdf.width, height=adj.l$pdf.height)
invisible(make_heatmap(clust, what, cols = cols, trim = trim,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex, main=main))
dev.off()
}
if (PNG) {
png.name <- file.path(res.folder, paste(filename.pam, ".png", sep=""))
png(png.name, width=adj.l$pdf.width, height=adj.l$pdf.height, units="in")
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, PNG = TRUE, main=main)
if (all(unlist(lapply(png.name, is.null)))) { png.name <- NULL }
if (!PDF) {
return(png.name)
}
}
if (!PDF && !PNG) {
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, main=main)
return()
}
if (PDF) {
return(pdf.name)
} else {
return(NULL)
}
}
}
| /R/plot.PAM.R | no_license | cancer-genetics-utu/PAMhm | R | false | false | 2,274 | r | plot.PAM <-
function (clust, what, res.folder = ".", cols = "bwr", trim = -1, autoadj = TRUE,
pdf.width = 13, pdf.height = 10, labelwidth = 0.6, labelheight = 0.25,
reorder = c(TRUE, TRUE), r.cex = 0.5, c.cex = 1, PDF = TRUE, PNG = FALSE,
main = NULL, file = main, shiny = FALSE)
{
if (autoadj) {
adj.l <- plotAdjust(clust$dat)
} else {
adj.l <- list(pdf.width = pdf.width, pdf.height = pdf.height, labelwidth = labelwidth,
labelheight = labelheight, r.cex = r.cex, c.cex = c.cex)
}
if (is.null(file)) {
filename.pam <- paste("PAM clustering of", what)
} else {
filename.pam <- file
}
if (shiny) {
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, main=main)
} else {
if (PDF) {
pdf.name <- file.path(res.folder, paste(filename.pam, ".pdf", sep=""))
pdf(pdf.name, width=adj.l$pdf.width, height=adj.l$pdf.height)
invisible(make_heatmap(clust, what, cols = cols, trim = trim,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex, main=main))
dev.off()
}
if (PNG) {
png.name <- file.path(res.folder, paste(filename.pam, ".png", sep=""))
png(png.name, width=adj.l$pdf.width, height=adj.l$pdf.height, units="in")
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, PNG = TRUE, main=main)
if (all(unlist(lapply(png.name, is.null)))) { png.name <- NULL }
if (!PDF) {
return(png.name)
}
}
if (!PDF && !PNG) {
make_heatmap(clust, what, cols = cols, trim = trim,
pdf.width = adj.l$pdf.width, pdf.height = adj.l$pdf.height,
labelwidth=adj.l$labelwidth, labelheight=adj.l$labelheight,
reorder=reorder, r.cex=adj.l$r.cex, c.cex=adj.l$c.cex,
folder.name = res.folder, main=main)
return()
}
if (PDF) {
return(pdf.name)
} else {
return(NULL)
}
}
}
|
source('lib.R')
###
library(ChIPseeker)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(clusterProfiler)
library(org.Hs.eg.db)
###
NAME <- 'H3K36me3_K562.ENCFF903ZMQ.hg19.filtered'
#NAME <- 'H3K36me3_K562.ENCFF901ACN.hg19.filtered'
BED_FN <- paste0(DATA_DIR, NAME, '.bed')
###
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
peakAnno <- annotatePeak(BED_FN, tssRegion=c(-3000, 3000), TxDb=txdb, annoDb="org.Hs.eg.db")
pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.plotAnnoPie.pdf'))
#png(paste0(OUT_DIR, 'chip_seeker.', NAME, '.plotAnnoPie.png'))
plotAnnoPie(peakAnno)
dev.off()
###
peak <- readPeakFile(BED_FN)
pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.covplot.pdf'))
covplot(peak, weightCol="V5")
dev.off() | /src/chip_seeker.R | no_license | LOBER2814/hse21_H3K36me3_ZDNA_human | R | false | false | 717 | r |
source('lib.R')
###
library(ChIPseeker)
library(TxDb.Hsapiens.UCSC.hg19.knownGene)
library(clusterProfiler)
library(org.Hs.eg.db)
###
NAME <- 'H3K36me3_K562.ENCFF903ZMQ.hg19.filtered'
#NAME <- 'H3K36me3_K562.ENCFF901ACN.hg19.filtered'
BED_FN <- paste0(DATA_DIR, NAME, '.bed')
###
txdb <- TxDb.Hsapiens.UCSC.hg19.knownGene
peakAnno <- annotatePeak(BED_FN, tssRegion=c(-3000, 3000), TxDb=txdb, annoDb="org.Hs.eg.db")
pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.plotAnnoPie.pdf'))
#png(paste0(OUT_DIR, 'chip_seeker.', NAME, '.plotAnnoPie.png'))
plotAnnoPie(peakAnno)
dev.off()
###
peak <- readPeakFile(BED_FN)
pdf(paste0(OUT_DIR, 'chip_seeker.', NAME, '.covplot.pdf'))
covplot(peak, weightCol="V5")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimalAUCX_functions_surv.R
\name{hessAUC}
\alias{hessAUC}
\title{function for Hessian matrix of AUC.}
\usage{
hessAUC(beta, Z, w = 1)
}
\arguments{
\item{beta}{coefficient estimates from betahat/betahatX function.}
\item{Z}{(m x n) x p data matrix as defined in betahat/betahatX function.}
\item{w}{inverse probability weighting for missing data (default is 1).}
}
\value{
Hessian matrix of AUC.
}
\description{
function for Hessian matrix of AUC.
}
| /man/hessAUC.Rd | no_license | xhuang4/optaucx | R | false | true | 532 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/optimalAUCX_functions_surv.R
\name{hessAUC}
\alias{hessAUC}
\title{function for Hessian matrix of AUC.}
\usage{
hessAUC(beta, Z, w = 1)
}
\arguments{
\item{beta}{coefficient estimates from betahat/betahatX function.}
\item{Z}{(m x n) x p data matrix as defined in betahat/betahatX function.}
\item{w}{inverse probability weighting for missing data (default is 1).}
}
\value{
Hessian matrix of AUC.
}
\description{
function for Hessian matrix of AUC.
}
|
library(rcompanion)
### Name: pairwiseMedianTest
### Title: Pairwise Mood's median tests
### Aliases: pairwiseMedianTest
### ** Examples
data(PoohPiglet)
PoohPiglet$Speaker = factor(PoohPiglet$Speaker,
levels = c("Pooh", "Tigger", "Piglet"))
PT = pairwiseMedianTest(Likert ~ Speaker,
data = PoohPiglet,
exact = NULL,
method = "fdr")
PT
cldList(comparison = PT$Comparison,
p.value = PT$p.adjust,
threshold = 0.05)
| /data/genthat_extracted_code/rcompanion/examples/pairwiseMedianTest.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 587 | r | library(rcompanion)
### Name: pairwiseMedianTest
### Title: Pairwise Mood's median tests
### Aliases: pairwiseMedianTest
### ** Examples
data(PoohPiglet)
PoohPiglet$Speaker = factor(PoohPiglet$Speaker,
levels = c("Pooh", "Tigger", "Piglet"))
PT = pairwiseMedianTest(Likert ~ Speaker,
data = PoohPiglet,
exact = NULL,
method = "fdr")
PT
cldList(comparison = PT$Comparison,
p.value = PT$p.adjust,
threshold = 0.05)
|
setwd("C:/Users/dkurera001/Documents/Data Science/4. Exploratory Data Analysis/")
Dates <- c ("1/2/2007", "2/2/2007")
Data <- read.table ( file = "household_power_consumption.txt", header = TRUE, sep =";", , stringsAsFactors=FALSE )
Data2 <- Data[which(Data$Date %in% Dates),]
Data2$Time <- strptime(do.call(paste0,Data2[c(1,2)]), "%d/%m/%Y%H:%M:%S")
Data2$Date <- as.Date(Data2$Date, "%d/%m/%Y")
with (Data2, plot(Time, as.numeric(Sub_metering_1), type = "l", ylab = "Energy Sub Metering" , xlab =""), axis(2,at= c(10,20,30,40)))
with (Data2, points(Time, as.numeric(Sub_metering_2), type="l", col="red"))
with (Data2, points(Time, as.numeric(Sub_metering_3), type="l", col="blue"))
legend("topright", pch = NA, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png")
dev.off()
| /plot3.R | no_license | dckurera/ExData_Plotting1 | R | false | false | 881 | r | setwd("C:/Users/dkurera001/Documents/Data Science/4. Exploratory Data Analysis/")
Dates <- c ("1/2/2007", "2/2/2007")
Data <- read.table ( file = "household_power_consumption.txt", header = TRUE, sep =";", , stringsAsFactors=FALSE )
Data2 <- Data[which(Data$Date %in% Dates),]
Data2$Time <- strptime(do.call(paste0,Data2[c(1,2)]), "%d/%m/%Y%H:%M:%S")
Data2$Date <- as.Date(Data2$Date, "%d/%m/%Y")
with (Data2, plot(Time, as.numeric(Sub_metering_1), type = "l", ylab = "Energy Sub Metering" , xlab =""), axis(2,at= c(10,20,30,40)))
with (Data2, points(Time, as.numeric(Sub_metering_2), type="l", col="red"))
with (Data2, points(Time, as.numeric(Sub_metering_3), type="l", col="blue"))
legend("topright", pch = NA, lwd = 3, col = c("black", "blue", "red"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
dev.copy(png,"plot3.png")
dev.off()
|
\name{dccspec-methods}
\docType{methods}
\alias{dccspec}
\alias{dccspec,ANY-method}
\alias{dccspec,uGARCHmultispec-method}
\title{function: DCC-GARCH Specification}
\description{
Method for creating a DCC-GARCH specification object prior to fitting.
}
\usage{
dccspec(uspec, VAR = FALSE, robust = FALSE, lag = 1, lag.max = NULL,
lag.criterion = c("AIC", "HQ", "SC", "FPE"), external.regressors = NULL,
robust.control = list("gamma" = 0.25, "delta" = 0.01, "nc" = 10, "ns" = 500),
dccOrder = c(1,1), model = c("DCC", "aDCC", "FDCC"), groups = rep(1, length(uspec@spec)),
distribution = c("mvnorm", "mvt", "mvlaplace"), start.pars = list(), fixed.pars = list())
}
\arguments{
\item{uspec}{
A \code{\linkS4class{uGARCHmultispec}} object created by calling
\code{\link{multispec}} on a list of univariate GARCH specifications.}
\item{VAR}{ Whether to fit a VAR model for the conditional mean.}
\item{robust}{ Whether to use the robust version of VAR.}
\item{lag}{ The VAR lag.}
\item{lag.max}{ The maximum VAR lag to search for best fit.}
\item{lag.criterion}{ The criterion to use for choosing the best lag when
lag.max is not NULL.}
\item{external.regressors}{ Allows for a matrix of common pre-lagged external
regressors for the VAR option.
}
\item{robust.control}{ The tuning parameters to the robust regression
including the proportion to trim (\dQuote{gamma}), the critical value for
re-weighted estimator (\dQuote{delta}), the number of subsets (\dQuote{ns})
and the number of C-steps (\dQuote{nc}.}
\item{dccOrder}{ The DCC autoregressive order.}
\item{model}{ The DCC model to use, with a choice of the symmetric DCC,
asymmetric (aDCC) and the Flexible DCC (FDCC). See notes for more details.}
\item{groups}{ The groups corresponding to each asset in the FDCC model, where
these are assumed and checked to be contiguous and increasing (unless only 1 group).}
\item{distribution}{ The multivariate distribution. Currently the multivariate
Normal, Student and Laplace are implemented, and only the Normal for the FDCC model.}
\item{start.pars}{ (optional) Starting values for the DCC parameters (starting
values for the univariate garch specification should be passed directly
via the \sQuote{uspec} object).}
\item{fixed.pars}{ (optional) Fixed DCC parameters. This is required in the
\code{\link{dccfilter}}, \code{\link{dccforecast}}, \code{\link{dccsim}} with
spec, and \code{\link{dccroll}}
methods.}
}
\value{
A \code{\linkS4class{DCCspec}} object containing details of the DCC-GARCH
specification.
}
\details{
The \code{robust} option allows for a robust version of VAR based on the
multivariate Least Trimmed Squares Estimator described in Croux and Joossens
(2008).
}
\note{
The FDCC model of Billio, Caporin and Gobbo (2006) allows different DCC
parameters to govern the dynamics of the correlation of distinct groups. The
drawback is a somewhat larger parameter set, and no correlation targeting.
Still, it remains a feasible model for not too large a number of groups, and
avoids the unrealistic assumption, particularly for large datasets, of one
parameter governing all the dynamics, as in the DCC model. Note that the group
indices must be increasing (unless all 1), which means that you should arrange
your dataset so that the assets are ordered by their groups.
}
\references{
Billio, M., Caporin, M., & Gobbo, M. 2006, Flexible dynamic conditional
correlation multivariate GARCH models for asset allocation, \emph{Applied
Financial Economics Letters}, \bold{2(02)}, 123--130.\cr
Croux, C. and Joossens, K. 2008, Robust estimation of the vector autoregressive
model by a least trimmed squares procedure, \emph{COMPSTAT}, 489--501.\cr
Cappiello, L., Engle, R.F. and Sheppard, K. 2006, Asymmetric dynamics in the
correlations of global equity and bond returns, \emph{Journal of Financial
Econometrics} \bold{4}, 537--572.\cr
Engle, R.F. and Sheppard, K. 2001, Theoretical and empirical properties of
dynamic conditional correlation multivariate GARCH, \emph{NBER Working Paper}.\cr
}
\author{Alexios Ghalanos}
\keyword{methods} | /fuzzedpackages/rmgarch/man/dccspec-methods.Rd | no_license | akhikolla/testpackages | R | false | false | 4,216 | rd | \name{dccspec-methods}
\docType{methods}
\alias{dccspec}
\alias{dccspec,ANY-method}
\alias{dccspec,uGARCHmultispec-method}
\title{function: DCC-GARCH Specification}
\description{
Method for creating a DCC-GARCH specification object prior to fitting.
}
\usage{
dccspec(uspec, VAR = FALSE, robust = FALSE, lag = 1, lag.max = NULL,
lag.criterion = c("AIC", "HQ", "SC", "FPE"), external.regressors = NULL,
robust.control = list("gamma" = 0.25, "delta" = 0.01, "nc" = 10, "ns" = 500),
dccOrder = c(1,1), model = c("DCC", "aDCC", "FDCC"), groups = rep(1, length(uspec@spec)),
distribution = c("mvnorm", "mvt", "mvlaplace"), start.pars = list(), fixed.pars = list())
}
\arguments{
\item{uspec}{
A \code{\linkS4class{uGARCHmultispec}} object created by calling
\code{\link{multispec}} on a list of univariate GARCH specifications.}
\item{VAR}{ Whether to fit a VAR model for the conditional mean.}
\item{robust}{ Whether to use the robust version of VAR.}
\item{lag}{ The VAR lag.}
\item{lag.max}{ The maximum VAR lag to search for best fit.}
\item{lag.criterion}{ The criterion to use for choosing the best lag when
lag.max is not NULL.}
\item{external.regressors}{ Allows for a matrix of common pre-lagged external
regressors for the VAR option.
}
\item{robust.control}{ The tuning parameters to the robust regression
including the proportion to trim (\dQuote{gamma}), the critical value for
re-weighted estimator (\dQuote{delta}), the number of subsets (\dQuote{ns})
and the number of C-steps (\dQuote{nc}.}
\item{dccOrder}{ The DCC autoregressive order.}
\item{model}{ The DCC model to use, with a choice of the symmetric DCC,
asymmetric (aDCC) and the Flexible DCC (FDCC). See notes for more details.}
\item{groups}{ The groups corresponding to each asset in the FDCC model, where
these are assumed and checked to be contiguous and increasing (unless only 1 group).}
\item{distribution}{ The multivariate distribution. Currently the multivariate
Normal, Student and Laplace are implemented, and only the Normal for the FDCC model.}
\item{start.pars}{ (optional) Starting values for the DCC parameters (starting
values for the univariate garch specification should be passed directly
via the \sQuote{uspec} object).}
\item{fixed.pars}{ (optional) Fixed DCC parameters. This is required in the
\code{\link{dccfilter}}, \code{\link{dccforecast}}, \code{\link{dccsim}} with
spec, and \code{\link{dccroll}}
methods.}
}
\value{
A \code{\linkS4class{DCCspec}} object containing details of the DCC-GARCH
specification.
}
\details{
The \code{robust} option allows for a robust version of VAR based on the
multivariate Least Trimmed Squares Estimator described in Croux and Joossens
(2008).
}
\note{
The FDCC model of Billio, Caporin and Gobbo (2006) allows different DCC
parameters to govern the dynamics of the correlation of distinct groups. The
drawback is a somewhat larger parameter set, and no correlation targeting.
Still, it remains a feasible model for not too large a number of groups, and
avoids the unrealistic assumption, particularly for large datasets, of one
parameter governing all the dynamics, as in the DCC model. Note that the group
indices must be increasing (unless all 1), which means that you should arrange
your dataset so that the assets are ordered by their groups.
}
\references{
Billio, M., Caporin, M., & Gobbo, M. 2006, Flexible dynamic conditional
correlation multivariate GARCH models for asset allocation, \emph{Applied
Financial Economics Letters}, \bold{2(02)}, 123--130.\cr
Croux, C. and Joossens, K. 2008, Robust estimation of the vector autoregressive
model by a least trimmed squares procedure, \emph{COMPSTAT}, 489--501.\cr
Cappiello, L., Engle, R.F. and Sheppard, K. 2006, Asymmetric dynamics in the
correlations of global equity and bond returns, \emph{Journal of Financial
Econometrics} \bold{4}, 537--572.\cr
Engle, R.F. and Sheppard, K. 2001, Theoretical and empirical properties of
dynamic conditional correlation multivariate GARCH, \emph{NBER Working Paper}.\cr
}
\author{Alexios Ghalanos}
\keyword{methods} |
#REally really messy count data: Too many options.
#load some packages
library(tidyverse)
library(pscl)
library(MASS)
library(visreg)
#The data set: Catch of Delta Smelt in the Fall Midwater Trawl from Montezuma Slough
#FMWT data is avaialable here: ftp://ftp.wildlife.ca.gov/TownetFallMidwaterTrawl/FMWT%20Data/
#I downloaded the data set and subset just the data on Delta Smelt.
#load the file:
FMWT_DS = read.csv("FMWT_DeltaSmelt.csv")[,-1]
#switch Station to a factor and Date to a date
FMWT_DS$Station = as.factor(FMWT_DS$Station)
FMWT_DS$Date = as.Date(FMWT_DS$Date)
#subset just the stations in Montezuma Slough prior to 2011
FMWT_DS2 = filter(FMWT_DS, (Station == "608" | Station == "605" | Station == "606") & Year <= 2011)
#if I wanted to investigate factors, such as salinity and temperature, that affect
#smelt catch, I might try a linear model. However, linear models have assumptions of normality and
#homogeneity of variance.
#Just see what a linear model looks like
dslm = lm(catch~ Station + Secchi + TopEC, data = FMWT_DS2)
summary(dslm)
#woohoo! Lots of siginificant pvalues! But we are violating all sorts of assumptions
#check out the diagnostic plots
#if you need a review of what these plots mean: https://data.library.virginia.edu/diagnostic-plots/
plot(dslm)
#look at the histogram
ggplot(FMWT_DS2, aes(x=catch)) + geom_histogram()
#look at a Shapiro-wilk normality test
shapiro.test(FMWT_DS2$catch)
#Definitely not normal!
#traditional statistics would now have you try a log-transformation on the data
#except we need to at "1" to each value because of all the zeros.
FMWT_DS2$logcatch = log(FMWT_DS2$catch +1)
shapiro.test(FMWT_DS2$logcatch)
#but we are still not normal.
#Furthermore, a lot of stastitions say you shouldn't transform count data.
#instead we can use generalized linear models with different error distributions.
#https://www.r-bloggers.com/do-not-log-transform-count-data-bitches/
#https://besjournals.onlinelibrary.wiley.com/doi/10.1111/j.2041-210X.2010.00021.x
#But which distribution should we use? Poisson is the usual first choice for count data.
#Poisson distributions
dsglm = glm(catch~ Station + Secchi + TopEC, family = poisson, data = FMWT_DS2)
summary(dsglm)
#Wow! super significant. but how are we on those diagnostic plots?
plot(dsglm)
#Not good. We might have overdispersion. Poisson models are genrally good at describing the mean,
#but they usually underestimate the variance.
#To check for that, we look at the difference
#between a posson and a quazipoisson distribution
#Quasipoisson
#A quassipoisson model uses the mean regression function and variance from the Poisson regression,
#but leaves the dispersion parameter unrestricted. (in a Poisson model dispersion = 1)
# Do not write in your report or paper that you used a quasi-Poisson distribution. Just say that
#you did a Poisson GLM, detected overdispersion, and corrected the standard errors
#using a quasi-GLM model where the variance is given by φ × μ, where μ is the
#mean and φ the dispersion parameter. Zuur et al. 2009
dsglm2 = glm(catch~ Station + Secchi + TopEC, family = quasipoisson, data = FMWT_DS2)
summary(dsglm2)$dispersion # dispersion coefficient
#test for differences between the two model specifications
pchisq(summary(dsglm2)$dispersion * dsglm$df.residual, dsglm$df.residual, lower = F)
#it's very overdisperssed
#Negative binomial distributions account for overdisperssion, but variance is a quadratic
#function of the mean rather than a linear function of the mean (as in quazipoisson).
#There is lots of discussion as to which is better (e.g. Ver Hoef and Bveng 2007)
dsnb2 = glm.nb(catch~ Station + Secchi + TopEC, data = FMWT_DS2)
summary(dsnb2)
plot(dsnb2)
#but even this doesn't look great. The problem is, we have too many zeros, which isn't dealt with well
#in either quazipoisson or negative binomial models.
#To deal with excess zeros, we can use a zero-inflated model, or a zero-augmented model (hurdle model)
#to figure out which to use, we need to understand the potential sources of zeros (modefied from Zuur et al 2009):
#1. Structural zeros. Delta smelt are not caught because the habitat is not suitable (too hot, to clear, too salty).
#2. Design error. Delta smelt are not caught because the midwater trawl doesn't sample the top part of hte water
#well and smelt hang out near the surface.
#3. Observer error. Delta smelt were caught, but Rosie thought they were Wakasagi.
#4. Smelt error. The habitat was suitable but the smelt weren't using it.
#Structural zeros and "smelt error" are considered "true zeros",
#whereas design error and observer error are "false zeros"
#Zero-augmented models (two-part models or hurdle models) do not differentiate between types of zeros.
#zero-inflated models (mixture models) do differentiate so you can model zeros caused by the habitat
#being bad seperately from the zeros caused by observer or design error.
#the overdisperssion in our Poisson model may have just been caused by the extra zeros,
#so a zero inflated Poisson might work
dszip = zeroinfl(catch~ Station + TopEC + Secchi, dist = "poisson", data = FMWT_DS2)
summary(dszip)
#that was a wierd error. Yuck.
#If there was also overdisspersion in the counts, then a zero inflated negative binomial will
#work better.
dsznb = zeroinfl(catch~ Station + TopEC + Secchi, dist = "negbin", data = FMWT_DS2)
summary(dsznb)
#Dang. More errors.
#I get a warning "In sqrt(diag(object$vcov)) : NaNs produced"
#this is usually because some of the covariates need to be standardized.
#the function "scale" will subtract the mean from the column and divide by its standard
#deviation to put all the variables on the same scale.
FMWT_DS2$TopEC2 = scale(FMWT_DS2$TopEC)
FMWT_DS2$Secchi2 = scale(FMWT_DS2$Secchi)
#try it again with the scaled covariates
dszip = zeroinfl(catch~ Station + TopEC2 + Secchi2, dist = "poisson", data = FMWT_DS2)
summary(dszip)
#If there was also overdisspersion in the counts, then a zero inflated negative binomial will
#work better.
dsznb = zeroinfl(catch~ Station + TopEC2 + Secchi2, dist = "negbin", data = FMWT_DS2)
summary(dsznb)
#much better!
#compare the zeroinflated models with a likelihood ratio test to see if we've delt with the overdispersion
library(lmtest)
lrtest(dszip, dsznb)
#it's telling me I'm still overdisperssed, I should use the negative binomial version
#look at the covariates again
summary(dsznb)
#look at the partial residual plots (also known as conditional plots)
visreg(dsznb)
#Model validation is based on plotting Pearson residuals against the fitted values
#and the Pearson residuals against each explanitory variable. You should not see any pattern (Zuur et al. 2009)
tests = data.frame(DSresid = residuals(dsznb, type = "pearson"),
DSfit = dsznb$fitted.values,
sec = dsznb$model["Secchi2"],
EC = dsznb$model["TopEC2"])
ggplot(data = tests, aes(x=DSresid, y = DSfit)) + geom_point()
ggplot(data = tests, aes(x=DSresid, y = Secchi2)) + geom_point()
ggplot(data = tests, aes(x=DSresid, y = TopEC2)) + geom_point()
#looks OK!
#you can now use AIC model selection, or whatever method you prefer, to decide which covariates are most important.
| /2019.08.08_countmodels presentation/countmodels.R | no_license | morgan-gilbert/DataScience | R | false | false | 7,264 | r | #REally really messy count data: Too many options.
#load some packages
library(tidyverse)
library(pscl)
library(MASS)
library(visreg)
#The data set: Catch of Delta Smelt in the Fall Midwater Trawl from Montezuma Slough
#FMWT data is avaialable here: ftp://ftp.wildlife.ca.gov/TownetFallMidwaterTrawl/FMWT%20Data/
#I downloaded the data set and subset just the data on Delta Smelt.
#load the file:
FMWT_DS = read.csv("FMWT_DeltaSmelt.csv")[,-1]
#switch Station to a factor and Date to a date
FMWT_DS$Station = as.factor(FMWT_DS$Station)
FMWT_DS$Date = as.Date(FMWT_DS$Date)
#subset just the stations in Montezuma Slough prior to 2011
FMWT_DS2 = filter(FMWT_DS, (Station == "608" | Station == "605" | Station == "606") & Year <= 2011)
#if I wanted to investigate factors, such as salinity and temperature, that affect
#smelt catch, I might try a linear model. However, linear models have assumptions of normality and
#homogeneity of variance.
#Just see what a linear model looks like
dslm = lm(catch~ Station + Secchi + TopEC, data = FMWT_DS2)
summary(dslm)
#woohoo! Lots of siginificant pvalues! But we are violating all sorts of assumptions
#check out the diagnostic plots
#if you need a review of what these plots mean: https://data.library.virginia.edu/diagnostic-plots/
plot(dslm)
#look at the histogram
ggplot(FMWT_DS2, aes(x=catch)) + geom_histogram()
#look at a Shapiro-wilk normality test
shapiro.test(FMWT_DS2$catch)
#Definitely not normal!
#traditional statistics would now have you try a log-transformation on the data
#except we need to at "1" to each value because of all the zeros.
FMWT_DS2$logcatch = log(FMWT_DS2$catch +1)
shapiro.test(FMWT_DS2$logcatch)
#but we are still not normal.
#Furthermore, a lot of stastitions say you shouldn't transform count data.
#instead we can use generalized linear models with different error distributions.
#https://www.r-bloggers.com/do-not-log-transform-count-data-bitches/
#https://besjournals.onlinelibrary.wiley.com/doi/10.1111/j.2041-210X.2010.00021.x
#But which distribution should we use? Poisson is the usual first choice for count data.
#Poisson distributions
dsglm = glm(catch~ Station + Secchi + TopEC, family = poisson, data = FMWT_DS2)
summary(dsglm)
#Wow! super significant. but how are we on those diagnostic plots?
plot(dsglm)
#Not good. We might have overdispersion. Poisson models are genrally good at describing the mean,
#but they usually underestimate the variance.
#To check for that, we look at the difference
#between a posson and a quazipoisson distribution
#Quasipoisson
#A quassipoisson model uses the mean regression function and variance from the Poisson regression,
#but leaves the dispersion parameter unrestricted. (in a Poisson model dispersion = 1)
# Do not write in your report or paper that you used a quasi-Poisson distribution. Just say that
#you did a Poisson GLM, detected overdispersion, and corrected the standard errors
#using a quasi-GLM model where the variance is given by φ × μ, where μ is the
#mean and φ the dispersion parameter. Zuur et al. 2009
dsglm2 = glm(catch~ Station + Secchi + TopEC, family = quasipoisson, data = FMWT_DS2)
summary(dsglm2)$dispersion # dispersion coefficient
#test for differences between the two model specifications
pchisq(summary(dsglm2)$dispersion * dsglm$df.residual, dsglm$df.residual, lower = F)
#it's very overdisperssed
#Negative binomial distributions account for overdisperssion, but variance is a quadratic
#function of the mean rather than a linear function of the mean (as in quazipoisson).
#There is lots of discussion as to which is better (e.g. Ver Hoef and Bveng 2007)
dsnb2 = glm.nb(catch~ Station + Secchi + TopEC, data = FMWT_DS2)
summary(dsnb2)
plot(dsnb2)
#but even this doesn't look great. The problem is, we have too many zeros, which isn't dealt with well
#in either quazipoisson or negative binomial models.
#To deal with excess zeros, we can use a zero-inflated model, or a zero-augmented model (hurdle model)
#to figure out which to use, we need to understand the potential sources of zeros (modefied from Zuur et al 2009):
#1. Structural zeros. Delta smelt are not caught because the habitat is not suitable (too hot, to clear, too salty).
#2. Design error. Delta smelt are not caught because the midwater trawl doesn't sample the top part of hte water
#well and smelt hang out near the surface.
#3. Observer error. Delta smelt were caught, but Rosie thought they were Wakasagi.
#4. Smelt error. The habitat was suitable but the smelt weren't using it.
#Structural zeros and "smelt error" are considered "true zeros",
#whereas design error and observer error are "false zeros"
#Zero-augmented models (two-part models or hurdle models) do not differentiate between types of zeros.
#zero-inflated models (mixture models) do differentiate so you can model zeros caused by the habitat
#being bad seperately from the zeros caused by observer or design error.
#the overdisperssion in our Poisson model may have just been caused by the extra zeros,
#so a zero inflated Poisson might work
dszip = zeroinfl(catch~ Station + TopEC + Secchi, dist = "poisson", data = FMWT_DS2)
summary(dszip)
#that was a wierd error. Yuck.
#If there was also overdisspersion in the counts, then a zero inflated negative binomial will
#work better.
dsznb = zeroinfl(catch~ Station + TopEC + Secchi, dist = "negbin", data = FMWT_DS2)
summary(dsznb)
#Dang. More errors.
#I get a warning "In sqrt(diag(object$vcov)) : NaNs produced"
#this is usually because some of the covariates need to be standardized.
#the function "scale" will subtract the mean from the column and divide by its standard
#deviation to put all the variables on the same scale.
FMWT_DS2$TopEC2 = scale(FMWT_DS2$TopEC)
FMWT_DS2$Secchi2 = scale(FMWT_DS2$Secchi)
#try it again with the scaled covariates
dszip = zeroinfl(catch~ Station + TopEC2 + Secchi2, dist = "poisson", data = FMWT_DS2)
summary(dszip)
#If there was also overdisspersion in the counts, then a zero inflated negative binomial will
#work better.
dsznb = zeroinfl(catch~ Station + TopEC2 + Secchi2, dist = "negbin", data = FMWT_DS2)
summary(dsznb)
#much better!
#compare the zeroinflated models with a likelihood ratio test to see if we've delt with the overdispersion
library(lmtest)
lrtest(dszip, dsznb)
#it's telling me I'm still overdisperssed, I should use the negative binomial version
#look at the covariates again
summary(dsznb)
#look at the partial residual plots (also known as conditional plots)
visreg(dsznb)
#Model validation is based on plotting Pearson residuals against the fitted values
#and the Pearson residuals against each explanitory variable. You should not see any pattern (Zuur et al. 2009)
tests = data.frame(DSresid = residuals(dsznb, type = "pearson"),
DSfit = dsznb$fitted.values,
sec = dsznb$model["Secchi2"],
EC = dsznb$model["TopEC2"])
ggplot(data = tests, aes(x=DSresid, y = DSfit)) + geom_point()
ggplot(data = tests, aes(x=DSresid, y = Secchi2)) + geom_point()
ggplot(data = tests, aes(x=DSresid, y = TopEC2)) + geom_point()
#looks OK!
#you can now use AIC model selection, or whatever method you prefer, to decide which covariates are most important.
|
#' Run the solver
#'
#' Runs the solver executable and extracts the quantity of interest (QOI)
#' and its Jacobian matrix (if available).
#'
#' @param solver solver object.
#' @param x numeric, the point (parameter set) at which QOI is evaluated.
#' @param ... additional arguments passed to other functions, note that
#' particular solvers can have some [required_args()] and they must be
#' provided.
#'
#' @return List with components:
#' \describe{
#' \item{`qoi`}{QOI value at `x`, default implementation returns `NA`;}
#' \item{`jacobian`}{QOI Jacobian matrix at `x`, default implementation
#' returns `NA`, `NA` is also returned if the solver does not provide Jacobian
#' info.}
#' }
#'
#' @export
#'
#' @examples
#' run(x = 10)
#' run(NULL, c(2, 4))
run <- function(solver, x, ...) {
UseMethod("run")
}
#' @export
run.default <- function(solver = NULL, x = NULL, ...) {
list(qoi = NA, jacobian = NA)
}
#' @describeIn run Checks if number of solver parameters (unless
#' `NULL`) equals length of given point and delegates to default method.
#'
#' @export
run.solver <- function(solver, x, ...) {
npars <- nparams(solver)
assert_point_not_null(x)
other_args <- attr(solver, "required_args_nox")
assert_all_args_not_null(other_args, list(...))
if (!is.null(npars) && length(x) != npars) {
stop("Dimension of 'x' must equal number of parameters in 'solver'",
call. = FALSE)
}
solver$add_run()
NextMethod("run")
}
#' @describeIn run Computes output using provided functions.
#'
#' @export
run.r_solver <- function(solver, x, ...) {
NextMethod("run")
jac <- if (provides_jacobian(solver)) solver$jacobian(x) else NA
list(qoi = solver$qoi(x), jacobian = jac)
}
#' @describeIn run Runs solver executable and reads values from output
#' file(s). If solver process exits with non-zero status code, a warning is
#' issued and list of `NA`'s is returned.
#'
#' @param ignore.stdout logical, if not `NULL` overrides default setting in
#' `shell_solver` object
#' @param ignore.stderr logical, if not `NULL` overrides default setting in
#' `shell_solver` object
#' @param silent logical, suppress diagnostic messages (not warnings), `TRUE`
#' forces `ignore.stdout = TRUE` and `ignore.stderr = TRUE`
#'
#' @export
run.shell_solver <- function(solver, x, ignore.stdout = NULL,
ignore.stderr = NULL, silent = FALSE, ...) {
if (silent) {
ignore.stdout <- TRUE
ignore.stderr <- TRUE
}
NextMethod("run")
cmd <- paste(solver$cmd, solver$combine_args(x, ...))
s_message("Solver command: ", cmd, silent = silent)
do_ignore_stdout <- solver$ignore.stdout
if (!is.null(ignore.stdout)) {
do_ignore_stdout <- ignore.stdout
}
do_ignore_stderr <- solver$ignore.stderr
if (!is.null(ignore.stderr)) {
do_ignore_stderr <- ignore.stderr
}
result <- do_run(solver, cmd, ignore.stdout = do_ignore_stdout,
ignore.stderr = do_ignore_stderr, silent = silent)
if (result$status != 0) {
warning("Solver exited with status ", result$status, call. = FALSE)
} else {
s_message("Solver exited normally", silent = silent)
}
result[["status"]] <- NULL
result
}
# Run given command in given directory
do_run <- function(solver, cmd, ignore.stdout, ignore.stderr, silent) {
if (!is.null(solver$wd)) {
s_message("Entering ", solver$wd, silent = silent)
withr::local_dir(solver$wd)
}
status <- system(cmd, ignore.stdout = ignore.stdout,
ignore.stderr = ignore.stderr)
if (status != 0) {
return(list(status = status, qoi = NA, jacobian = NA))
}
qoi <- read_qoi(solver)
jac <- read_jacobian(solver)
list(status = 0, qoi = qoi, jacobian = jac)
}
# Read QOI file (2nd arg needed if run from outside solver working dir)
read_qoi <- function(solver, make_path_absolute = FALSE) {
qoi_file <- solver$qoi_file
if (make_path_absolute) {
qoi_file <- output_file(solver, "qoi")
}
if (!file.exists(qoi_file)) {
stop("Quantity-of-interest file does not exist: ", qoi_file, call. = FALSE)
}
solver$read_qoi(qoi_file)
}
# Read QOI Jacobian file (2nd arg needed if run from outside solver working dir)
read_jacobian <- function(solver, make_path_absolute = FALSE) {
if (!provides_jacobian(solver)) {
return(NA)
}
jacobian_file <- solver$jacobian_file
if (make_path_absolute) {
jacobian_file <- output_file(solver, "jacobian")
}
if (!file.exists(jacobian_file)) {
return(NA)
}
solver$read_jacobian(jacobian_file)
}
| /R/run.R | permissive | maciejsmolka/solvergater | R | false | false | 4,508 | r | #' Run the solver
#'
#' Runs the solver executable and extracts the quantity of interest (QOI)
#' and its Jacobian matrix (if available).
#'
#' @param solver solver object.
#' @param x numeric, the point (parameter set) at which QOI is evaluated.
#' @param ... additional arguments passed to other functions, note that
#' particular solvers can have some [required_args()] and they must be
#' provided.
#'
#' @return List with components:
#' \describe{
#' \item{`qoi`}{QOI value at `x`, default implementation returns `NA`;}
#' \item{`jacobian`}{QOI Jacobian matrix at `x`, default implementation
#' returns `NA`, `NA` is also returned if the solver does not provide Jacobian
#' info.}
#' }
#'
#' @export
#'
#' @examples
#' run(x = 10)
#' run(NULL, c(2, 4))
run <- function(solver, x, ...) {
UseMethod("run")
}
#' @export
run.default <- function(solver = NULL, x = NULL, ...) {
list(qoi = NA, jacobian = NA)
}
#' @describeIn run Checks if number of solver parameters (unless
#' `NULL`) equals length of given point and delegates to default method.
#'
#' @export
run.solver <- function(solver, x, ...) {
npars <- nparams(solver)
assert_point_not_null(x)
other_args <- attr(solver, "required_args_nox")
assert_all_args_not_null(other_args, list(...))
if (!is.null(npars) && length(x) != npars) {
stop("Dimension of 'x' must equal number of parameters in 'solver'",
call. = FALSE)
}
solver$add_run()
NextMethod("run")
}
#' @describeIn run Computes output using provided functions.
#'
#' @export
run.r_solver <- function(solver, x, ...) {
NextMethod("run")
jac <- if (provides_jacobian(solver)) solver$jacobian(x) else NA
list(qoi = solver$qoi(x), jacobian = jac)
}
#' @describeIn run Runs solver executable and reads values from output
#' file(s). If solver process exits with non-zero status code, a warning is
#' issued and list of `NA`'s is returned.
#'
#' @param ignore.stdout logical, if not `NULL` overrides default setting in
#' `shell_solver` object
#' @param ignore.stderr logical, if not `NULL` overrides default setting in
#' `shell_solver` object
#' @param silent logical, suppress diagnostic messages (not warnings), `TRUE`
#' forces `ignore.stdout = TRUE` and `ignore.stderr = TRUE`
#'
#' @export
run.shell_solver <- function(solver, x, ignore.stdout = NULL,
ignore.stderr = NULL, silent = FALSE, ...) {
if (silent) {
ignore.stdout <- TRUE
ignore.stderr <- TRUE
}
NextMethod("run")
cmd <- paste(solver$cmd, solver$combine_args(x, ...))
s_message("Solver command: ", cmd, silent = silent)
do_ignore_stdout <- solver$ignore.stdout
if (!is.null(ignore.stdout)) {
do_ignore_stdout <- ignore.stdout
}
do_ignore_stderr <- solver$ignore.stderr
if (!is.null(ignore.stderr)) {
do_ignore_stderr <- ignore.stderr
}
result <- do_run(solver, cmd, ignore.stdout = do_ignore_stdout,
ignore.stderr = do_ignore_stderr, silent = silent)
if (result$status != 0) {
warning("Solver exited with status ", result$status, call. = FALSE)
} else {
s_message("Solver exited normally", silent = silent)
}
result[["status"]] <- NULL
result
}
# Run given command in given directory
do_run <- function(solver, cmd, ignore.stdout, ignore.stderr, silent) {
if (!is.null(solver$wd)) {
s_message("Entering ", solver$wd, silent = silent)
withr::local_dir(solver$wd)
}
status <- system(cmd, ignore.stdout = ignore.stdout,
ignore.stderr = ignore.stderr)
if (status != 0) {
return(list(status = status, qoi = NA, jacobian = NA))
}
qoi <- read_qoi(solver)
jac <- read_jacobian(solver)
list(status = 0, qoi = qoi, jacobian = jac)
}
# Read QOI file (2nd arg needed if run from outside solver working dir)
read_qoi <- function(solver, make_path_absolute = FALSE) {
qoi_file <- solver$qoi_file
if (make_path_absolute) {
qoi_file <- output_file(solver, "qoi")
}
if (!file.exists(qoi_file)) {
stop("Quantity-of-interest file does not exist: ", qoi_file, call. = FALSE)
}
solver$read_qoi(qoi_file)
}
# Read QOI Jacobian file (2nd arg needed if run from outside solver working dir)
read_jacobian <- function(solver, make_path_absolute = FALSE) {
if (!provides_jacobian(solver)) {
return(NA)
}
jacobian_file <- solver$jacobian_file
if (make_path_absolute) {
jacobian_file <- output_file(solver, "jacobian")
}
if (!file.exists(jacobian_file)) {
return(NA)
}
solver$read_jacobian(jacobian_file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aesPC_wrapper.R
\name{AESPCA_pVals}
\alias{AESPCA_pVals}
\alias{AESPCA_pVals,OmicsPathway-method}
\title{Test pathway association with AES-PCA}
\usage{
AESPCA_pVals(
object,
numPCs = 1,
numReps = 0L,
parallel = FALSE,
numCores = NULL,
asPCA = FALSE,
adjustpValues = TRUE,
adjustment = c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD", "BH", "BY",
"ABH", "TSBH"),
...
)
\S4method{AESPCA_pVals}{OmicsPathway}(
object,
numPCs = 1,
numReps = 1000,
parallel = FALSE,
numCores = NULL,
asPCA = FALSE,
adjustpValues = TRUE,
adjustment = c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD", "BH", "BY",
"ABH", "TSBH"),
...
)
}
\arguments{
\item{object}{An object of class \code{OmicsPathway} with a response matrix
or vector.}
\item{numPCs}{The number of PCs to extract from each pathway. Defaults to 1.}
\item{numReps}{How many permutations to estimate the \eqn{p}-value? Defaults
to 0 (that is, to estimate the \eqn{p}-value parametrically). If
\code{numReps} > 0, then the non-parametric, permutation \eqn{p}-value
will be returned based on the number of random samples specified.}
\item{parallel}{Should the computation be completed in parallel? Defaults to
\code{FALSE}.}
\item{numCores}{If \code{parallel = TRUE}, how many cores should be used for
computation? Internally defaults to the number of available cores minus 1.}
\item{asPCA}{Should the computation return the eigenvectors and eigenvalues
instead of the adaptive, elastic-net, sparse principal components and their
corresponding loadings. Defaults to \code{FALSE}; this should be used for
diagnostic or comparative purposes only.}
\item{adjustpValues}{Should you adjust the \eqn{p}-values for multiple
comparisons? Defaults to TRUE.}
\item{adjustment}{Character vector of procedures. The returned data frame
will be sorted in ascending order by the first procedure in this vector,
with ties broken by the unadjusted \eqn{p}-value. If only one procedure is
selected, then it is necessarily the first procedure. See the documentation
for the \code{\link{ControlFDR}} function for the adjustment procedure
definitions and citations.}
\item{...}{Dots for additional internal arguments.}
}
\value{
A results list with class \code{aespcOut}. This list has three
components: a data frame of pathway details, pathway \eqn{p}-values, and
potential adjustments to those values (\code{pVals_df}); a list of the
first \code{numPCs} \emph{score} vectors for each pathway (\code{PCs_ls});
and a list of the first \code{numPCs} feature loading vectors for each
pathway (\code{loadings_ls}). The \eqn{p}-value data frame has columns:
\itemize{
\item{\code{pathways} : }{The names of the pathways in the \code{Omics*}}
object (given in \code{object@trimPathwayCollection$pathways}.)
\item{\code{setsize} : }{The number of genes in each of the original
pathways (given in the \code{object@trimPathwayCollection$setsize}
object).}
\item{\code{n_tested} : }{The number of genes in each of the trimmed
pathways (given in the \code{object@trimPathwayCollection$n_tested}
object).}
\item{\code{terms} : }{The pathway description, as given in the
\code{object@trimPathwayCollection$TERMS} object.}
\item{\code{rawp} : }{The unadjusted \eqn{p}-values of each pathway.}
\item{\code{...} : }{Additional columns of adjusted \eqn{p}-values as
specified through the \code{adjustment} argument.}
}
The data frame will be sorted in ascending order by the method specified
first in the \code{adjustment} argument. If \code{adjustpValues = FALSE},
then the data frame will be sorted by the raw \eqn{p}-values. If you have
the suggested \code{tidyverse} package suite loaded, then this data frame
will print as a \code{\link[tibble]{tibble}}. Otherwise, it will print as
a data frame.
}
\description{
Given a supervised \code{OmicsPath} object (one of
\code{OmicsSurv}, \code{OmicsReg}, or \code{OmicsCateg}), extract the
first \eqn{k} adaptive, elastic-net, sparse principal components (PCs)
from each pathway-subset of the features in the -Omics assay design
matrix, test their association with the response matrix, and return a
data frame of the adjusted \eqn{p}-values for each pathway.
}
\details{
This is a wrapper function for the \code{\link{ExtractAESPCs}},
\code{\link{PermTestSurv}}, \code{\link{PermTestReg}}, and
\code{\link{PermTestCateg}} functions.
Please see our Quickstart Guide for this package:
\url{https://gabrielodom.github.io/pathwayPCA/articles/Supplement1-Quickstart_Guide.html}
}
\examples{
### Load the Example Data ###
data("colonSurv_df")
data("colon_pathwayCollection")
### Create an OmicsSurv Object ###
colon_Omics <- CreateOmics(
assayData_df = colonSurv_df[, -(2:3)],
pathwayCollection_ls = colon_pathwayCollection,
response = colonSurv_df[, 1:3],
respType = "surv"
)
### Calculate Pathway p-Values ###
colonSurv_aespc <- AESPCA_pVals(
object = colon_Omics,
numReps = 0,
parallel = TRUE,
numCores = 2,
adjustpValues = TRUE,
adjustment = c("Hoch", "SidakSD")
)
}
\seealso{
\code{\link{CreateOmics}}; \code{\link{ExtractAESPCs}};
\code{\link{PermTestSurv}}; \code{\link{PermTestReg}};
\code{\link{PermTestCateg}}; \code{\link{TabulatepValues}};
\code{\link[parallel]{clusterApply}}
}
| /man/AESPCA_pVals.Rd | no_license | gabrielodom/pathwayPCA | R | false | true | 5,457 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/aesPC_wrapper.R
\name{AESPCA_pVals}
\alias{AESPCA_pVals}
\alias{AESPCA_pVals,OmicsPathway-method}
\title{Test pathway association with AES-PCA}
\usage{
AESPCA_pVals(
object,
numPCs = 1,
numReps = 0L,
parallel = FALSE,
numCores = NULL,
asPCA = FALSE,
adjustpValues = TRUE,
adjustment = c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD", "BH", "BY",
"ABH", "TSBH"),
...
)
\S4method{AESPCA_pVals}{OmicsPathway}(
object,
numPCs = 1,
numReps = 1000,
parallel = FALSE,
numCores = NULL,
asPCA = FALSE,
adjustpValues = TRUE,
adjustment = c("Bonferroni", "Holm", "Hochberg", "SidakSS", "SidakSD", "BH", "BY",
"ABH", "TSBH"),
...
)
}
\arguments{
\item{object}{An object of class \code{OmicsPathway} with a response matrix
or vector.}
\item{numPCs}{The number of PCs to extract from each pathway. Defaults to 1.}
\item{numReps}{How many permutations to estimate the \eqn{p}-value? Defaults
to 0 (that is, to estimate the \eqn{p}-value parametrically). If
\code{numReps} > 0, then the non-parametric, permutation \eqn{p}-value
will be returned based on the number of random samples specified.}
\item{parallel}{Should the computation be completed in parallel? Defaults to
\code{FALSE}.}
\item{numCores}{If \code{parallel = TRUE}, how many cores should be used for
computation? Internally defaults to the number of available cores minus 1.}
\item{asPCA}{Should the computation return the eigenvectors and eigenvalues
instead of the adaptive, elastic-net, sparse principal components and their
corresponding loadings. Defaults to \code{FALSE}; this should be used for
diagnostic or comparative purposes only.}
\item{adjustpValues}{Should you adjust the \eqn{p}-values for multiple
comparisons? Defaults to TRUE.}
\item{adjustment}{Character vector of procedures. The returned data frame
will be sorted in ascending order by the first procedure in this vector,
with ties broken by the unadjusted \eqn{p}-value. If only one procedure is
selected, then it is necessarily the first procedure. See the documentation
for the \code{\link{ControlFDR}} function for the adjustment procedure
definitions and citations.}
\item{...}{Dots for additional internal arguments.}
}
\value{
A results list with class \code{aespcOut}. This list has three
components: a data frame of pathway details, pathway \eqn{p}-values, and
potential adjustments to those values (\code{pVals_df}); a list of the
first \code{numPCs} \emph{score} vectors for each pathway (\code{PCs_ls});
and a list of the first \code{numPCs} feature loading vectors for each
pathway (\code{loadings_ls}). The \eqn{p}-value data frame has columns:
\itemize{
\item{\code{pathways} : }{The names of the pathways in the \code{Omics*}}
object (given in \code{object@trimPathwayCollection$pathways}.)
\item{\code{setsize} : }{The number of genes in each of the original
pathways (given in the \code{object@trimPathwayCollection$setsize}
object).}
\item{\code{n_tested} : }{The number of genes in each of the trimmed
pathways (given in the \code{object@trimPathwayCollection$n_tested}
object).}
\item{\code{terms} : }{The pathway description, as given in the
\code{object@trimPathwayCollection$TERMS} object.}
\item{\code{rawp} : }{The unadjusted \eqn{p}-values of each pathway.}
\item{\code{...} : }{Additional columns of adjusted \eqn{p}-values as
specified through the \code{adjustment} argument.}
}
The data frame will be sorted in ascending order by the method specified
first in the \code{adjustment} argument. If \code{adjustpValues = FALSE},
then the data frame will be sorted by the raw \eqn{p}-values. If you have
the suggested \code{tidyverse} package suite loaded, then this data frame
will print as a \code{\link[tibble]{tibble}}. Otherwise, it will print as
a data frame.
}
\description{
Given a supervised \code{OmicsPath} object (one of
\code{OmicsSurv}, \code{OmicsReg}, or \code{OmicsCateg}), extract the
first \eqn{k} adaptive, elastic-net, sparse principal components (PCs)
from each pathway-subset of the features in the -Omics assay design
matrix, test their association with the response matrix, and return a
data frame of the adjusted \eqn{p}-values for each pathway.
}
\details{
This is a wrapper function for the \code{\link{ExtractAESPCs}},
\code{\link{PermTestSurv}}, \code{\link{PermTestReg}}, and
\code{\link{PermTestCateg}} functions.
Please see our Quickstart Guide for this package:
\url{https://gabrielodom.github.io/pathwayPCA/articles/Supplement1-Quickstart_Guide.html}
}
\examples{
### Load the Example Data ###
data("colonSurv_df")
data("colon_pathwayCollection")
### Create an OmicsSurv Object ###
colon_Omics <- CreateOmics(
assayData_df = colonSurv_df[, -(2:3)],
pathwayCollection_ls = colon_pathwayCollection,
response = colonSurv_df[, 1:3],
respType = "surv"
)
### Calculate Pathway p-Values ###
colonSurv_aespc <- AESPCA_pVals(
object = colon_Omics,
numReps = 0,
parallel = TRUE,
numCores = 2,
adjustpValues = TRUE,
adjustment = c("Hoch", "SidakSD")
)
}
\seealso{
\code{\link{CreateOmics}}; \code{\link{ExtractAESPCs}};
\code{\link{PermTestSurv}}; \code{\link{PermTestReg}};
\code{\link{PermTestCateg}}; \code{\link{TabulatepValues}};
\code{\link[parallel]{clusterApply}}
}
|
### Getting data, data are stored in the directory data
file_path = "./data/household_power_consumption.txt"
#start_date = "2007-02-01"
#stop_date = "2007-02-02"
power_dataset = read.table(file_path, sep = ";", na.strings = "?", skip = 66637, nrows = 2880)
names(power_dataset) = names(read.table(file_path, header = TRUE, sep = ";", nrows = 1))
#head(power_dataset)
str(power_dataset)
# collect the right data
# Create DateTime column
power_dataset$DateTime = strptime(paste(power_dataset$Date, power_dataset$Time), "%d/%m/%Y %H:%M:%S")
#head(plot_data)
# Create plot
par(bg = "white")
png(filename = "plot1.png", width = 480, height = 480)
hist(power_dataset$Global_active_power, breaks = seq(0, 7.5, by = 0.5), col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
| /plot1.R | no_license | agdiiura/ExData_Plotting1 | R | false | false | 832 | r | ### Getting data, data are stored in the directory data
file_path = "./data/household_power_consumption.txt"
#start_date = "2007-02-01"
#stop_date = "2007-02-02"
power_dataset = read.table(file_path, sep = ";", na.strings = "?", skip = 66637, nrows = 2880)
names(power_dataset) = names(read.table(file_path, header = TRUE, sep = ";", nrows = 1))
#head(power_dataset)
str(power_dataset)
# collect the right data
# Create DateTime column
power_dataset$DateTime = strptime(paste(power_dataset$Date, power_dataset$Time), "%d/%m/%Y %H:%M:%S")
#head(plot_data)
# Create plot
par(bg = "white")
png(filename = "plot1.png", width = 480, height = 480)
hist(power_dataset$Global_active_power, breaks = seq(0, 7.5, by = 0.5), col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
dev.off()
|
##本文主要讲述如何让Excel使用者迁移到R中;内容还待诸位一起补充
##数据输入与读取
数列<- scan()##可以直接复制粘贴数据后回车确认;scan()真的超级好用
fix(数列)##像excel一样修改数据
##读取Excel中的数据
##读取Excel文件之前,建议把excel另存为csv格式
读取的Excel数据<- read.csv('100.csv',sep=",")
数据框<-data.frame(matrix(数列,5,2))
##数据操作
升序<- sort(数列) ##升序排列(1,2,3……)##Excel中的排序操作
计数<- length(数列) ##计数操作## Excel中的count()函数
求和<- sum(数列) ##求和操作
累积求和<- cumsum(数列) ##累积求和操作
唯一数<- unique(数列) ##找到数列中的唯一数
sapply(数据框,sum)##对数据框各列循环操作 sapply(数据框,操作)
##常见统计公式
均值<- mean(数列) ##均值## Excel中的average()函数
标准差<- sd(数列) ##标准差
中位数<- median(数列) ##中位数
频度统计<- table(数列) ##频度统计
plot(table(数列)) ##plot()画图函数,快速可视化数据
range(数列) ##极差
range(数列)[2]-range(数列)[1]##极距
方差<- var(数列) ##样本方差
最小值<- min(数列)
最大值<- max(数列)
抽样<- sample(数列,10,replace=TRUE)## 有放回的抽样 10次
plot(抽样,main="Niu Bi")
##公式刷(只对矩阵有效)
##数据操作之父 appl数列()
逐行求和<- apply(数据框,1,sum)##1 表示行
逐列求和<- apply(数据框,2,sum)##2 表示列
##插值
插值后数列 <- append(数列 , 11,after = 3) ##插值操作 在1:5中的第3个数后面 插入1:2
## 新的数列<-append(原来的数列,想要添加的数列,在第几个数之后插入)##添加元素
##索引操作
which(数列==插值后数列[-1] , arr.ind =TRUE)
##条件公式
##表格显示
##拟合曲线
plot(x,数列)
拟合曲线<- spline(x, 数列, n = 50)
lines(拟合曲线)
lm(y ~ x)
##统计图形 改善中文显示下次讲
boxplot(数列)##箱子图 用来统计多变量
hist(数列)##直方图 用来统计单变量
scatter(数列)##散点图 用来统计多变量 如人口分布
| /R&Excel/R&Excel.R | permissive | strategist922/R-Tutor | R | false | false | 2,151 | r | ##本文主要讲述如何让Excel使用者迁移到R中;内容还待诸位一起补充
##数据输入与读取
数列<- scan()##可以直接复制粘贴数据后回车确认;scan()真的超级好用
fix(数列)##像excel一样修改数据
##读取Excel中的数据
##读取Excel文件之前,建议把excel另存为csv格式
读取的Excel数据<- read.csv('100.csv',sep=",")
数据框<-data.frame(matrix(数列,5,2))
##数据操作
升序<- sort(数列) ##升序排列(1,2,3……)##Excel中的排序操作
计数<- length(数列) ##计数操作## Excel中的count()函数
求和<- sum(数列) ##求和操作
累积求和<- cumsum(数列) ##累积求和操作
唯一数<- unique(数列) ##找到数列中的唯一数
sapply(数据框,sum)##对数据框各列循环操作 sapply(数据框,操作)
##常见统计公式
均值<- mean(数列) ##均值## Excel中的average()函数
标准差<- sd(数列) ##标准差
中位数<- median(数列) ##中位数
频度统计<- table(数列) ##频度统计
plot(table(数列)) ##plot()画图函数,快速可视化数据
range(数列) ##极差
range(数列)[2]-range(数列)[1]##极距
方差<- var(数列) ##样本方差
最小值<- min(数列)
最大值<- max(数列)
抽样<- sample(数列,10,replace=TRUE)## 有放回的抽样 10次
plot(抽样,main="Niu Bi")
##公式刷(只对矩阵有效)
##数据操作之父 appl数列()
逐行求和<- apply(数据框,1,sum)##1 表示行
逐列求和<- apply(数据框,2,sum)##2 表示列
##插值
插值后数列 <- append(数列 , 11,after = 3) ##插值操作 在1:5中的第3个数后面 插入1:2
## 新的数列<-append(原来的数列,想要添加的数列,在第几个数之后插入)##添加元素
##索引操作
which(数列==插值后数列[-1] , arr.ind =TRUE)
##条件公式
##表格显示
##拟合曲线
plot(x,数列)
拟合曲线<- spline(x, 数列, n = 50)
lines(拟合曲线)
lm(y ~ x)
##统计图形 改善中文显示下次讲
boxplot(数列)##箱子图 用来统计多变量
hist(数列)##直方图 用来统计单变量
scatter(数列)##散点图 用来统计多变量 如人口分布
|
source("SpaceTimeStructureMix.R")
load(list.files(pattern="dataset.Robj"))
sample.cov <- globe.data$globe.sample.covariance
globe.coords <- globe.data$globe.coords
sim.data <- list("geo.coords" = globe.coords,
"time.coords" = matrix(0,nrow=nrow(globe.coords),ncol=2),
"sample.covariance" = sample.cov,
"n.loci" = 10000)
model.options = list("round.earth" = TRUE,
"n.clusters" = 4,
"temporal.sampling"=FALSE,
"no.st" = FALSE)
mcmc.options = list("ngen" = 1e7,
"samplefreq" = 1e4,
"printfreq" = 1e3,
"savefreq" = 1e6,
"output.file.name" = "k4_globe_output.Robj")
MCMC.gid(sim.data,model.options,mcmc.options,initial.parameters=NULL)
| /datasets/globe/globe_analyses/spatial/k_4/exe.spatialStructure.R | no_license | gbradburd/spatialStructure | R | false | false | 684 | r | source("SpaceTimeStructureMix.R")
load(list.files(pattern="dataset.Robj"))
sample.cov <- globe.data$globe.sample.covariance
globe.coords <- globe.data$globe.coords
sim.data <- list("geo.coords" = globe.coords,
"time.coords" = matrix(0,nrow=nrow(globe.coords),ncol=2),
"sample.covariance" = sample.cov,
"n.loci" = 10000)
model.options = list("round.earth" = TRUE,
"n.clusters" = 4,
"temporal.sampling"=FALSE,
"no.st" = FALSE)
mcmc.options = list("ngen" = 1e7,
"samplefreq" = 1e4,
"printfreq" = 1e3,
"savefreq" = 1e6,
"output.file.name" = "k4_globe_output.Robj")
MCMC.gid(sim.data,model.options,mcmc.options,initial.parameters=NULL)
|
# Meta analysis using the random effects model
# https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/random.html
library(meta)
library(metafor)
library(metaplotr)
library(dplyr)
# Fetch data
data <- read.csv("datasets/results.csv")
# Reverse negative correlations
data$Identified.persistent.effect<-data$Identified.persistent.effect %>% abs()
# Effect size pooling
m.hksj <- metagen(data$Identified.persistent.effect,
data$Standard.error,
#data = madata,
#studlab = paste(Author),
comb.fixed = FALSE,
comb.random = TRUE,
#method.tau = "SJ",
hakn = TRUE,
prediction = TRUE,
sm = "MD")
print(m.hksj)
# Calculate min and max span
data$min.span = data$Start.of.outcome-data$End.of.exposure
data$max.span = data$End.of.outcome - data$Start.of.exposure
# Calculate central span
data$central.span = (data$max.span - data$min.span)/2
# Plot persistence span vs error bars
x = data$central.span
se.x = data$max.span - data$central.span
y = data$Identified.persistent.effect
se.y = data$Standard.error
crosshairs(x, y, se.x, se.y, confint = .7,
main_lab = "",
x_lab = "Persistence span",
#x_lim = c(0, 2000),
y_lab = "Effect size",
#y_lim = c(0, 1),
mdrtr = data$Paper,
mdrtr_lab = 'Paper',
annotate = TRUE)
| /scripts/meta.R | no_license | pablo-vs/persistence | R | false | false | 1,466 | r | # Meta analysis using the random effects model
# https://bookdown.org/MathiasHarrer/Doing_Meta_Analysis_in_R/random.html
library(meta)
library(metafor)
library(metaplotr)
library(dplyr)
# Fetch data
data <- read.csv("datasets/results.csv")
# Reverse negative correlations
data$Identified.persistent.effect<-data$Identified.persistent.effect %>% abs()
# Effect size pooling
m.hksj <- metagen(data$Identified.persistent.effect,
data$Standard.error,
#data = madata,
#studlab = paste(Author),
comb.fixed = FALSE,
comb.random = TRUE,
#method.tau = "SJ",
hakn = TRUE,
prediction = TRUE,
sm = "MD")
print(m.hksj)
# Calculate min and max span
data$min.span = data$Start.of.outcome-data$End.of.exposure
data$max.span = data$End.of.outcome - data$Start.of.exposure
# Calculate central span
data$central.span = (data$max.span - data$min.span)/2
# Plot persistence span vs error bars
x = data$central.span
se.x = data$max.span - data$central.span
y = data$Identified.persistent.effect
se.y = data$Standard.error
crosshairs(x, y, se.x, se.y, confint = .7,
main_lab = "",
x_lab = "Persistence span",
#x_lim = c(0, 2000),
y_lab = "Effect size",
#y_lim = c(0, 1),
mdrtr = data$Paper,
mdrtr_lab = 'Paper',
annotate = TRUE)
|
## plot4
png("plot4.png", width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
## top left plot
with(plotdata, plot(DateTime, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "" ))
## top right plot
with(plotdata, plot(DateTime, Voltage, type = "l", ylab = "Voltage", xlab = "datetime" ))
## bottom left plot
with(plotdata, {
plot(DateTime, Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "" )
points(DateTime, Sub_metering_2, type = "l", col = "red" )
points(DateTime, Sub_metering_3, type = "l", col = "blue" )
legend("topright", lty = "solid", lwd =1, col = c("black", "red", " blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
## bottom right
with(plotdata, plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime"))
## dev.copy(png, file = "plot2.png")
dev.off()
| /plot4.R | no_license | mingjenchang/ExData_Plotting1 | R | false | false | 896 | r | ## plot4
png("plot4.png", width = 480, height = 480, units = "px")
par(mfrow = c(2,2))
## top left plot
with(plotdata, plot(DateTime, Global_active_power, type = "l", ylab = "Global Active Power (kilowatts)", xlab = "" ))
## top right plot
with(plotdata, plot(DateTime, Voltage, type = "l", ylab = "Voltage", xlab = "datetime" ))
## bottom left plot
with(plotdata, {
plot(DateTime, Sub_metering_1, type = "l", ylab = "Energy sub metering", xlab = "" )
points(DateTime, Sub_metering_2, type = "l", col = "red" )
points(DateTime, Sub_metering_3, type = "l", col = "blue" )
legend("topright", lty = "solid", lwd =1, col = c("black", "red", " blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
})
## bottom right
with(plotdata, plot(DateTime, Global_reactive_power, type = "l", xlab = "datetime"))
## dev.copy(png, file = "plot2.png")
dev.off()
|
#' Integrating TF properties from sources
#' corresponding to (Garcia-Alonso et al. Cancer Research 2018)
#' described in: https://www.biorxiv.org/content/biorxiv/early/2018/06/18/337915.full.pdf
#'
#' For the census of human Transcription Factors from TFClass database (v2014)
#' involving 1,541 human TFs (Wingender et al. 2013),
#' this code loads and merges the following data:
#' - DNA-binding domain from TFClass database (v2014)
#' - mode of interaction with the chromatin (Pioneers, Settlers and Migrants; Ehsani et al. 2016)
#' - number of GTEx tissues (GTEx Consortium 2013) where the gene is expressed (i.e. average expression > 2 fpkm)
#' - DNA-binding mode (monomer, homomer or heteromer) that we
#' - manually curated from UniProt (The UniProt Consortium 2017) (version November 2017)
#' - or complemented with the annotation provided in(Lambert et al. 2018)
#'- DNA-binding specificity (Lambert et al. 2018).
#'- Mode of regulation, manually curated from UniProt “Function CC” field to classify TFs into activators, repressors, activators and repressors or unknown mode of regulation
rm(list = ls())
home = '/Volumes/GoogleDrive/My Drive/projects/TFbenchmark/'
setwd(home)
source('code/lib/utils.r')
source('code/lib/data.r')
TFrole_genesets = list()
# TF mode_of_regulation
activators = load_activators()
dualActRep = load_dualActRep()
repressors = load_repressors()
TFrole_genesets[['mode_of_regulation']][['activators']] = activators
TFrole_genesets[['mode_of_regulation']][['repressors']] = repressors
TFrole_genesets[['mode_of_regulation']][['dual']] = dualActRep
# TF DNA_binding_mode
complexes = load_complexes()
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = unique(subset(complexes, complex_type == 'heterodimer')$gene_symbol)
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = unique(subset(complexes, complex_type != 'heterodimer')$gene_symbol)
# from lambet2018
complexes = read.csv('data/TF_info/regulation_type/lambert_cell2018/TableS1.csv', stringsAsFactors = F)
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = unique(c(TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']], subset(complexes, X.2 == '2 Obligate heteromer')$X))
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = unique(c(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] , subset(complexes, X.2 == '1 Monomer or homomultimer')$X))
contradictory = intersect(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']],
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']])
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = setdiff(TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']],
contradictory)
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = setdiff(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']],
contradictory)
# DNA_binding_specificity from lambet2018
TFrole_genesets[['DNA_binding_specificity']][['Low_specificity_DNA-binding_protein']] = unique(subset(complexes, X.2 == '3 Low specificity DNA-binding protein')$X)
TFrole_genesets[['DNA_binding_specificity']][['Not_a_DNA_binding_protein']] = unique(subset(complexes, X.2 == '4 Not a DNA binding protein')$X)
TFrole_genesets[['DNA_binding_specificity']][['High_specificity_DNA-binding_protein']] = unique(subset(complexes, X.2 %in% c('1 Monomer or homomultimer', '2 Obligate heteromer'))$X)
#
# # TF chromatin regulation
CHrole = load_CHregulation()
TFrole_genesets[['chromatin_regulation_mode']][['settler']] = unique(subset(CHrole, Chromatin_Opening_Type == 'SETTLER')$Approved_symbol)
TFrole_genesets[['chromatin_regulation_mode']][['migrant']] = unique(subset(CHrole, Chromatin_Opening_Type %in% c('NEGATIVE_MIGRANT', 'POSITIVE_MIGRANT'))$Approved_symbol)
TFrole_genesets[['chromatin_regulation_mode']][['pioneer']] = unique(subset(CHrole, Chromatin_Opening_Type == 'PIONEER')$Approved_symbol)
# TF_class
TFclassification = load_TFclass_classes()
for (gr in unique(TFclassification$superclass_name) )
TFrole_genesets[['TF_superclass']][[gr]] = unique(subset(TFclassification, superclass_name == gr)$name)
for (gr in unique(TFclassification$class_name) )
TFrole_genesets[['TF_class']][[gr]] = unique(subset(TFclassification, class_name == gr)$name)
# for (gr in unique(TFclassification$family_name) )
# TFrole_genesets[['TF_class']][[gr]] = unique(subset(TFclassification, family_name == gr)$name)
# # TF tissue_of_expression
TF_x_tissue = load_TFtissues()
TF_x_tissue_freq = table(TF_x_tissue$V1) / max(table(TF_x_tissue$V1))
TFrole_genesets[['tissue_of_expression']][['tissue-specific']] = names(which(TF_x_tissue_freq<0.1))
TFrole_genesets[['tissue_of_expression']][['no_tissue-specific']] = names(which(TF_x_tissue_freq>0.9))
# Save
save(TFrole_genesets, file = 'data/TF_info/TFrole_genesets.rdata')
| /code/regulons_QC/TFproperties_enrichment/generate_TFproperties_annotation.r | no_license | Ran485/TFbenchmark | R | false | false | 5,083 | r | #' Integrating TF properties from sources
#' corresponding to (Garcia-Alonso et al. Cancer Research 2018)
#' described in: https://www.biorxiv.org/content/biorxiv/early/2018/06/18/337915.full.pdf
#'
#' For the census of human Transcription Factors from TFClass database (v2014)
#' involving 1,541 human TFs (Wingender et al. 2013),
#' this code loads and merges the following data:
#' - DNA-binding domain from TFClass database (v2014)
#' - mode of interaction with the chromatin (Pioneers, Settlers and Migrants; Ehsani et al. 2016)
#' - number of GTEx tissues (GTEx Consortium 2013) where the gene is expressed (i.e. average expression > 2 fpkm)
#' - DNA-binding mode (monomer, homomer or heteromer) that we
#' - manually curated from UniProt (The UniProt Consortium 2017) (version November 2017)
#' - or complemented with the annotation provided in(Lambert et al. 2018)
#'- DNA-binding specificity (Lambert et al. 2018).
#'- Mode of regulation, manually curated from UniProt “Function CC” field to classify TFs into activators, repressors, activators and repressors or unknown mode of regulation
rm(list = ls())
home = '/Volumes/GoogleDrive/My Drive/projects/TFbenchmark/'
setwd(home)
source('code/lib/utils.r')
source('code/lib/data.r')
TFrole_genesets = list()
# TF mode_of_regulation
activators = load_activators()
dualActRep = load_dualActRep()
repressors = load_repressors()
TFrole_genesets[['mode_of_regulation']][['activators']] = activators
TFrole_genesets[['mode_of_regulation']][['repressors']] = repressors
TFrole_genesets[['mode_of_regulation']][['dual']] = dualActRep
# TF DNA_binding_mode
complexes = load_complexes()
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = unique(subset(complexes, complex_type == 'heterodimer')$gene_symbol)
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = unique(subset(complexes, complex_type != 'heterodimer')$gene_symbol)
# from lambet2018
complexes = read.csv('data/TF_info/regulation_type/lambert_cell2018/TableS1.csv', stringsAsFactors = F)
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = unique(c(TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']], subset(complexes, X.2 == '2 Obligate heteromer')$X))
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = unique(c(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] , subset(complexes, X.2 == '1 Monomer or homomultimer')$X))
contradictory = intersect(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']],
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']])
TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']] = setdiff(TFrole_genesets[['DNA_binding_mode']][['Obligate_heteromer']],
contradictory)
TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']] = setdiff(TFrole_genesets[['DNA_binding_mode']][['Monomer_or_homomultimer']],
contradictory)
# DNA_binding_specificity from lambet2018
TFrole_genesets[['DNA_binding_specificity']][['Low_specificity_DNA-binding_protein']] = unique(subset(complexes, X.2 == '3 Low specificity DNA-binding protein')$X)
TFrole_genesets[['DNA_binding_specificity']][['Not_a_DNA_binding_protein']] = unique(subset(complexes, X.2 == '4 Not a DNA binding protein')$X)
TFrole_genesets[['DNA_binding_specificity']][['High_specificity_DNA-binding_protein']] = unique(subset(complexes, X.2 %in% c('1 Monomer or homomultimer', '2 Obligate heteromer'))$X)
#
# # TF chromatin regulation
CHrole = load_CHregulation()
TFrole_genesets[['chromatin_regulation_mode']][['settler']] = unique(subset(CHrole, Chromatin_Opening_Type == 'SETTLER')$Approved_symbol)
TFrole_genesets[['chromatin_regulation_mode']][['migrant']] = unique(subset(CHrole, Chromatin_Opening_Type %in% c('NEGATIVE_MIGRANT', 'POSITIVE_MIGRANT'))$Approved_symbol)
TFrole_genesets[['chromatin_regulation_mode']][['pioneer']] = unique(subset(CHrole, Chromatin_Opening_Type == 'PIONEER')$Approved_symbol)
# TF_class
TFclassification = load_TFclass_classes()
for (gr in unique(TFclassification$superclass_name) )
TFrole_genesets[['TF_superclass']][[gr]] = unique(subset(TFclassification, superclass_name == gr)$name)
for (gr in unique(TFclassification$class_name) )
TFrole_genesets[['TF_class']][[gr]] = unique(subset(TFclassification, class_name == gr)$name)
# for (gr in unique(TFclassification$family_name) )
# TFrole_genesets[['TF_class']][[gr]] = unique(subset(TFclassification, family_name == gr)$name)
# # TF tissue_of_expression
TF_x_tissue = load_TFtissues()
TF_x_tissue_freq = table(TF_x_tissue$V1) / max(table(TF_x_tissue$V1))
TFrole_genesets[['tissue_of_expression']][['tissue-specific']] = names(which(TF_x_tissue_freq<0.1))
TFrole_genesets[['tissue_of_expression']][['no_tissue-specific']] = names(which(TF_x_tissue_freq>0.9))
# Save
save(TFrole_genesets, file = 'data/TF_info/TFrole_genesets.rdata')
|
#' @title Observability Matrix
#'
#' @description
#' This function creates the observability matrix.
#'
#' @param A State-space matrix, A
#' @param C State-space matrix, C
#'
#' @return \code{obsv(A, C)} returns the observability matrix, \code{obsvm}.
#' where
#' obsvm = | C CA CA^2 ... CA^(n-1) |
#'
#'@seealso \code{\link{ctrb}}
#'
#' @examples
#' A <- rbind(c(0,1), c(-25,-4))
#' C <- rbind(c(1,0), c(0,1))
#' obsv(A, C)
#'
#' @export
obsv <- function(A, C) {
obsm <- t(ctrb (t(A), t(C)))
return(obsm)
}
| /R/obsv.R | no_license | cran/control | R | false | false | 544 | r | #' @title Observability Matrix
#'
#' @description
#' This function creates the observability matrix.
#'
#' @param A State-space matrix, A
#' @param C State-space matrix, C
#'
#' @return \code{obsv(A, C)} returns the observability matrix, \code{obsvm}.
#' where
#' obsvm = | C CA CA^2 ... CA^(n-1) |
#'
#'@seealso \code{\link{ctrb}}
#'
#' @examples
#' A <- rbind(c(0,1), c(-25,-4))
#' C <- rbind(c(1,0), c(0,1))
#' obsv(A, C)
#'
#' @export
obsv <- function(A, C) {
obsm <- t(ctrb (t(A), t(C)))
return(obsm)
}
|
#' propensity_bipartite_matches
#'
#' Propensity match for bipartite matching.
#'
#' @inheritParams all_bipartite_matches
#' @param propensity_list See \code{gen_propensity_list}
#'
#' @export
propensity_bipartite_matches <- function(x_mat,
treat_vec,
match_method = c(
"with_replacement",
"optimal",
"greedy"
),
propensity_list =
gen_propensity_list(),
n_sinks = 0,
caliper_list = gen_caliper_list(),
sqrt_mahal = TRUE,
tol_val = NULL) {
## in case of logical
treat_vec <- treat_vec * 1L
## generate propensity score
prop_score <- propensity_score(
x_mat = x_mat,
treat_vec = treat_vec,
propensity_list = propensity_list
)
prop_dist_mat <- abs(outer(
prop_score[treat_vec == 1],
prop_score[treat_vec == 0],
"-"
))
if (!is.null(caliper_list)) {
prop_dist_mat <- prop_dist_mat + create_caliper(caliper_list,
treat_vec = treat_vec
)
}
bipartite_matches(
dist_mat = prop_dist_mat,
treat_vec = treat_vec,
match_method = match_method,
n_sinks = n_sinks,
tol_val = tol_val
)
}
| /R/propensity_bipartite_matches.R | permissive | rzgross/uRbanmatching | R | false | false | 1,600 | r | #' propensity_bipartite_matches
#'
#' Propensity match for bipartite matching.
#'
#' @inheritParams all_bipartite_matches
#' @param propensity_list See \code{gen_propensity_list}
#'
#' @export
propensity_bipartite_matches <- function(x_mat,
treat_vec,
match_method = c(
"with_replacement",
"optimal",
"greedy"
),
propensity_list =
gen_propensity_list(),
n_sinks = 0,
caliper_list = gen_caliper_list(),
sqrt_mahal = TRUE,
tol_val = NULL) {
## in case of logical
treat_vec <- treat_vec * 1L
## generate propensity score
prop_score <- propensity_score(
x_mat = x_mat,
treat_vec = treat_vec,
propensity_list = propensity_list
)
prop_dist_mat <- abs(outer(
prop_score[treat_vec == 1],
prop_score[treat_vec == 0],
"-"
))
if (!is.null(caliper_list)) {
prop_dist_mat <- prop_dist_mat + create_caliper(caliper_list,
treat_vec = treat_vec
)
}
bipartite_matches(
dist_mat = prop_dist_mat,
treat_vec = treat_vec,
match_method = match_method,
n_sinks = n_sinks,
tol_val = tol_val
)
}
|
testlist <- list(x = structure(c(-2.28222627323242e+306, 6.37973176711185e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result) | /borrowr/inst/testfiles/matchesToCor/libFuzzer_matchesToCor/matchesToCor_valgrind_files/1609958554-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 221 | r | testlist <- list(x = structure(c(-2.28222627323242e+306, 6.37973176711185e-304, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(7L, 3L)))
result <- do.call(borrowr:::matchesToCor,testlist)
str(result) |
target_distance <- function(dt, target_list, dist = getOption("imabc.target_eval_distance")) {
distance <- sapply(attr(target_list, which = "target_names"), FUN = function(x, dt, target_list, dist_opt) {
# Get simulated target value
sim <- dt[[x]]
# Get desired target value
obs <- target_list$targets[x]
# Control for 0 targets and give alternate distance metric
if (obs == 0 | dist_opt == "stopping_range") {
# If the target value is 0, or the user requests it by setting:
# options(imabc.target_eval_distance = "stopping_range")
# The range of the stopping bounds times 0.5 is used to scale the distance for a given target
new_scale <- target_list$stopping_upper_bounds[x] - target_list$stopping_lower_bounds[x]
new_scale <- new_scale*0.5
return(((obs - sim)^2)/(new_scale^2))
} else {
# Default of getOption("imabc.target_eval_distance") == "chisquare"
return(((obs - sim)^2)/(obs^2))
}
}, dt = dt, target_list = target_list, dist_opt = dist)
# If grouped targets exist, aggregate distances
if (inherits(target_list, "grouped")) {
distance <- t(rowsum(t(distance), attr(target_list, which = "target_groups"), reorder = FALSE))
}
return(distance)
}
| /R/target_distance.R | no_license | cran/imabc | R | false | false | 1,252 | r | target_distance <- function(dt, target_list, dist = getOption("imabc.target_eval_distance")) {
distance <- sapply(attr(target_list, which = "target_names"), FUN = function(x, dt, target_list, dist_opt) {
# Get simulated target value
sim <- dt[[x]]
# Get desired target value
obs <- target_list$targets[x]
# Control for 0 targets and give alternate distance metric
if (obs == 0 | dist_opt == "stopping_range") {
# If the target value is 0, or the user requests it by setting:
# options(imabc.target_eval_distance = "stopping_range")
# The range of the stopping bounds times 0.5 is used to scale the distance for a given target
new_scale <- target_list$stopping_upper_bounds[x] - target_list$stopping_lower_bounds[x]
new_scale <- new_scale*0.5
return(((obs - sim)^2)/(new_scale^2))
} else {
# Default of getOption("imabc.target_eval_distance") == "chisquare"
return(((obs - sim)^2)/(obs^2))
}
}, dt = dt, target_list = target_list, dist_opt = dist)
# If grouped targets exist, aggregate distances
if (inherits(target_list, "grouped")) {
distance <- t(rowsum(t(distance), attr(target_list, which = "target_groups"), reorder = FALSE))
}
return(distance)
}
|
#
# Copyright 2013 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Druid Post-Aggregators
setClass("druid.postaggregator", representation="list", S3methods=TRUE)
druid.postaggregator <- function(...) new("druid.postaggregator", ...)
#' Creates a Druid post-aggregator
#'
#' @param type post-aggregator type
#' @param ... additional post-aggregator parameters
#' @return a Druid post-aggregator object
#' @export
druid.build.postaggregator <- function(type, ...) {
structure(list(type = type, ...), class="druid.postaggregator")
}
#' @method toString druid.postaggregator
#' @export
toString.druid.postaggregator <- function(x, ...) {
switch(x$type,
constant = x$value,
arithmetic = paste("(", toString(x$fields[[1]]), x$fn, toString(x$fields[[2]]), ")"),
fieldAccess = x$name
)
}
#' @method print druid.postaggregator
#' @export
print.druid.postaggregator <- function(x, ...) {
cat("druid postAggregator: ", toString(x), "\n", sep="")
}
#' Creates an arithmetic post-aggregator
#'
#' @param fn operator (e.g. +, -, *, /)
#' @param a left hand side post-aggregator
#' @param b right hand side post-aggregator
#' @param name alias for this post-aggregator
druid.postagg.arithmetic <- function(fn, a, b, name = NULL) {
if(is.null(name)) {
name <- paste("(", a$name, fn, b$name, ")", sep="")
}
druid.build.postaggregator(type="arithmetic", fn=fn, name=name, fields = list(a, b))
}
#' Creates a constant post-aggregator
#'
#' @param x numeric constant
druid.postagg.constant <- function(x) {
druid.build.postaggregator(type="constant", name=as.character(x), value=as.numeric(x))
}
#' Creates a fieldAccess post-aggregator
#'
#' @param name alias for this post-aggregator
#' @param fieldName underlying field this post-aggregator refers to (defaults to name)
druid.postagg.fieldAccess <- function(name, fieldName = name) {
druid.build.postaggregator(type="fieldAccess", name=name, fieldName=fieldName)
}
#' Creates a fieldAccess post-aggregator
#'
#' Defines a reference to a Druid field (e.g. an aggregator)
#'
#' @param x name of the field
#' @export
field <- function(x) {
druid.postagg.fieldAccess(as.character(x))
}
#' helper function to wrap constants in formulas
#'
#' @param x numeric constant or post-aggregator
wrapConstant <- function(x) {
if(is.numeric(x)) druid.postagg.constant(x)
else x
}
#' Sum of Druid post-aggregators
#'
#' @param ... post-aggregators
#' @export
#' @method sum druid.postaggregator
sum.druid.postaggregator <- function(...) {
arglist <- list(...)
# remove base function na.rm argument
arglist <- arglist[!names(arglist) %in% "na.rm"]
if(length(arglist) <= 1) {
return(arglist[[1]])
} else {
return(arglist[[1]] + do.call("sum", arglist[-1]))
}
}
#' Define a post-aggregator by multiplying aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a * b
#' @method * druid.postaggregator
#' @export
`*.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("*", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by dividing aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a / b
#' @method / druid.postaggregator
#' @export
`/.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("/", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by adding aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a + b
#' @method + druid.postaggregator
#' @export
`+.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("+", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by subtracting aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a - b
#' @method - druid.postaggregator
#' @export
`-.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("-", wrapConstant(a), wrapConstant(b))
}
| /R/postaggregator.R | permissive | JesseKolb/RDruid | R | false | false | 4,854 | r | #
# Copyright 2013 Metamarkets Group Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Druid Post-Aggregators
setClass("druid.postaggregator", representation="list", S3methods=TRUE)
druid.postaggregator <- function(...) new("druid.postaggregator", ...)
#' Creates a Druid post-aggregator
#'
#' @param type post-aggregator type
#' @param ... additional post-aggregator parameters
#' @return a Druid post-aggregator object
#' @export
druid.build.postaggregator <- function(type, ...) {
structure(list(type = type, ...), class="druid.postaggregator")
}
#' @method toString druid.postaggregator
#' @export
toString.druid.postaggregator <- function(x, ...) {
switch(x$type,
constant = x$value,
arithmetic = paste("(", toString(x$fields[[1]]), x$fn, toString(x$fields[[2]]), ")"),
fieldAccess = x$name
)
}
#' @method print druid.postaggregator
#' @export
print.druid.postaggregator <- function(x, ...) {
cat("druid postAggregator: ", toString(x), "\n", sep="")
}
#' Creates an arithmetic post-aggregator
#'
#' @param fn operator (e.g. +, -, *, /)
#' @param a left hand side post-aggregator
#' @param b right hand side post-aggregator
#' @param name alias for this post-aggregator
druid.postagg.arithmetic <- function(fn, a, b, name = NULL) {
if(is.null(name)) {
name <- paste("(", a$name, fn, b$name, ")", sep="")
}
druid.build.postaggregator(type="arithmetic", fn=fn, name=name, fields = list(a, b))
}
#' Creates a constant post-aggregator
#'
#' @param x numeric constant
druid.postagg.constant <- function(x) {
druid.build.postaggregator(type="constant", name=as.character(x), value=as.numeric(x))
}
#' Creates a fieldAccess post-aggregator
#'
#' @param name alias for this post-aggregator
#' @param fieldName underlying field this post-aggregator refers to (defaults to name)
druid.postagg.fieldAccess <- function(name, fieldName = name) {
druid.build.postaggregator(type="fieldAccess", name=name, fieldName=fieldName)
}
#' Creates a fieldAccess post-aggregator
#'
#' Defines a reference to a Druid field (e.g. an aggregator)
#'
#' @param x name of the field
#' @export
field <- function(x) {
druid.postagg.fieldAccess(as.character(x))
}
#' helper function to wrap constants in formulas
#'
#' @param x numeric constant or post-aggregator
wrapConstant <- function(x) {
if(is.numeric(x)) druid.postagg.constant(x)
else x
}
#' Sum of Druid post-aggregators
#'
#' @param ... post-aggregators
#' @export
#' @method sum druid.postaggregator
sum.druid.postaggregator <- function(...) {
arglist <- list(...)
# remove base function na.rm argument
arglist <- arglist[!names(arglist) %in% "na.rm"]
if(length(arglist) <= 1) {
return(arglist[[1]])
} else {
return(arglist[[1]] + do.call("sum", arglist[-1]))
}
}
#' Define a post-aggregator by multiplying aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a * b
#' @method * druid.postaggregator
#' @export
`*.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("*", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by dividing aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a / b
#' @method / druid.postaggregator
#' @export
`/.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("/", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by adding aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a + b
#' @method + druid.postaggregator
#' @export
`+.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("+", wrapConstant(a), wrapConstant(b))
}
#' Define a post-aggregator by subtracting aggregators, post-aggregators or constants
#'
#' @param a aggregator, post-aggregator, or constant
#' @param b a aggregator, post-aggregator, or constant
#' @return post-aggregator a - b
#' @method - druid.postaggregator
#' @export
`-.druid.postaggregator` <- function(a, b) {
druid.postagg.arithmetic("-", wrapConstant(a), wrapConstant(b))
}
|
#' @title CRF Training data: download training data for doing Named Entity Recognition (NER)
#' @description Download training data for doing Named Entity Recognition (NER)
#' @param type a character string with the type of data to download. See the function usage for all possible values.
#' These data will be downloaded from either:
#' \itemize{
#' \item{NLTK-data forked repository: }{\url{https://github.com/bnosac-dev/nltk_data/blob/gh-pages/packages/corpora/conll2002.zip}}
#' \item{FOX forked repository of GermanNER: }{\url{https://github.com/bnosac-dev/FOX/tree/master/input/GermanNER}}
#' \item{FOX forked repository of WikiNER: }{\url{https://github.com/bnosac-dev/FOX/tree/master/input/Wikiner}}
#' }
#' Please visit the information on these repositories first before you use these data in any commercial product.
#' @param docs integer indicating how many documents to sample from the data (only used for data from the NLTK repository).
#' This is only used to reduce CRAN R CMD check training time in the examples of this R package.
#' @return a data.frame with training data for a Named Entity Recognition task or an object of try-error in case of failure of downloading the data
#' @export
#' @examples
#' \dontrun{
#' x <- ner_download_modeldata("conll2002-nl")
#' x <- ner_download_modeldata("conll2002-es")
#' x <- ner_download_modeldata("GermanNER")
#' x <- ner_download_modeldata("wikiner-en-wp2")
#' x <- ner_download_modeldata("wikiner-nl-wp3")
#' x <- ner_download_modeldata("wikiner-fr-wp3")
#' }
#' ## reduce number of docs
#' x <- ner_download_modeldata("conll2002-es", docs = 10)
ner_download_modeldata <- function(type = c("conll2002-nl", "conll2002-es", "GermanNER",
"wikiner-de-wp2",
"wikiner-de-wp3",
"wikiner-en-wp2",
"wikiner-en-wp3",
"wikiner-es-wp2",
"wikiner-es-wp3",
"wikiner-fr-wp2",
"wikiner-fr-wp3",
"wikiner-it-wp2",
"wikiner-it-wp3",
"wikiner-nl-wp2",
"wikiner-nl-wp3",
"wikiner-pl-wp3",
"wikiner-pt-wp3",
"wikiner-ru-wp2",
"wikiner-ru-wp3"),
docs = -Inf){
.N <- sentence_id <- doc_id <- txt <- NULL
msg <- suppressWarnings(try({
type <- match.arg(type)
temporary_file <- tempfile()
if(type == "conll2002-nl"){
url <- "https://raw.githubusercontent.com/bnosac-dev/nltk_data/gh-pages/packages/corpora/conll2002.zip"
download.file(url, temporary_file)
rawdata <- list()
f <- unz(temporary_file, filename = "conll2002/ned.train")
rawdata$ned.train <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/ned.testa")
rawdata$testa <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/ned.testb")
rawdata$testb <- readLines(f, encoding = "UTF-8")
close(f)
rawdata <- lapply(rawdata, FUN=function(x){
x <- data.frame(txt = x, stringsAsFactors = FALSE)
x$doc_id <- cumsum(x$txt == "-DOCSTART- -DOCSTART- O")
if(is.finite(docs) & docs > 0){
d <- unique(x$doc_id)
x <- x[x$doc_id %in% sample(d, size = min(docs, length(d))), ]
}
x$sentence_id <- cumsum(x$txt == "") + 1L
x <- x[x$txt != "" & x$txt != "-DOCSTART- -DOCSTART- O", ]
x$txt <- strsplit(x$txt, " ")
x$token <- sapply(x$txt, FUN=function(x) x[1])
x$pos <- sapply(x$txt, FUN=function(x) x[2])
x$label <- sapply(x$txt, FUN=function(x) x[3])
x[, c("doc_id", "sentence_id", "token", "pos", "label")]
})
rawdata <- data.table::rbindlist(rawdata, idcol = "data")
rawdata$doc_id <- as.integer(factor(sprintf("%s-%s", rawdata$data, rawdata$doc_id)))
file.remove(temporary_file)
}else if(type == "conll2002-es"){
url <- "https://raw.githubusercontent.com/bnosac-dev/nltk_data/gh-pages/packages/corpora/conll2002.zip"
download.file(url, temporary_file)
rawdata <- list()
f <- unz(temporary_file, filename = "conll2002/esp.train")
rawdata$train <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/esp.testa")
rawdata$testa <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/esp.testb")
rawdata$testb <- readLines(f, encoding = "UTF-8")
close(f)
rawdata <- lapply(rawdata, FUN=function(x){
x <- data.frame(txt = x, stringsAsFactors = FALSE)
x$doc_id <- cumsum(x$txt == "") + 1L
if(is.finite(docs) & docs > 0){
d <- unique(x$doc_id)
x <- x[x$doc_id %in% sample(d, size = min(docs, length(d))), ]
}
x <- x[x$txt != "", ]
x$txt <- strsplit(x$txt, " ")
x$token <- sapply(x$txt, FUN=function(x) x[1])
x$pos <- sapply(x$txt, FUN=function(x) x[2])
x$label <- sapply(x$txt, FUN=function(x) x[3])
x[, c("doc_id", "token", "pos", "label")]
})
rawdata <- data.table::rbindlist(rawdata, idcol = "data")
rawdata$doc_id <- as.integer(factor(sprintf("%s-%s", rawdata$data, rawdata$doc_id)))
file.remove(temporary_file)
}else if(type == "GermanNER"){
rawdata <- readLines("https://raw.githubusercontent.com/bnosac-dev/FOX/master/input/GermanNER/full_train.tsv", encoding = "UTF-8")
rawdata <- data.frame(txt = rawdata, stringsAsFactors = FALSE)
rawdata$doc_id <- cumsum(rawdata$txt == "") + 1L
rawdata <- rawdata[rawdata$txt != "", ]
rawdata$txt <- strsplit(rawdata$txt, "\t")
rawdata$token <- sapply(rawdata$txt, FUN=function(x) x[1])
rawdata$label <- sapply(rawdata$txt, FUN=function(x) x[2])
rawdata <- rawdata[, c("doc_id", "token", "label")]
}else if(type %in% c("wikiner-de-wp2",
"wikiner-de-wp3",
"wikiner-en-wp2",
"wikiner-en-wp3",
"wikiner-es-wp2",
"wikiner-es-wp3",
"wikiner-fr-wp2",
"wikiner-fr-wp3",
"wikiner-it-wp2",
"wikiner-it-wp3",
"wikiner-nl-wp2",
"wikiner-nl-wp3",
"wikiner-pl-wp3",
"wikiner-pt-wp3",
"wikiner-ru-wp2",
"wikiner-ru-wp3")){
url <- sprintf("https://raw.githubusercontent.com/bnosac-dev/FOX/master/input/Wikiner/aij-%s.bz2", type)
download.file(url, temporary_file)
rawdata <- data.frame(txt = readLines(temporary_file, encoding = "UTF-8"), stringsAsFactors = FALSE)
rawdata$doc_id <- cumsum(rawdata$txt == "")
rawdata <- rawdata[rawdata$txt != "", ]
rawdata <- data.table::setDT(rawdata)
rawdata <- rawdata[, sentence_id := 1:.N, by = list(doc_id)]
rawdata <- rawdata[, list(txt = unlist(strsplit(txt, " "))), by = list(doc_id, sentence_id)]
rawdata <- rawdata[, c("token", "pos", "label") := tstrsplit(txt, "\\|")]
rawdata <- data.table::setDF(rawdata)
rawdata <- rawdata[, c("doc_id", "sentence_id", "token", "pos", "label")]
file.remove(temporary_file)
}
setDT(rawdata)
}, silent = TRUE))
if(inherits(msg, "try-error")){
return(msg)
}
rawdata
}
#' @title Dutch reviews of AirBnB customers on Brussels address locations available at www.insideairbnb.com
#' @description The data contains 500 reviews in Dutch of people who visited an AirBnB appartment in Brussels. \cr
#' The data frame contains the fields
#' \itemize{
#' \item{doc_id: }{a unique identifier of the review}
#' \item{listing_id: }{the airbnb address identifier}
#' \item{text: }{text with the feedback of a customer on his visit in the AirBnB appartment}
#' }
#' @name airbnb
#' @docType data
#' @source \url{http://data.insideairbnb.com/belgium/bru/brussels/2015-10-03/visualisations/reviews.csv}, \url{http://insideairbnb.com/get-the-data.html}
#' @seealso \code{\link{airbnb_chunks}}
#' @examples
#' data(airbnb)
#' str(airbnb)
#' head(airbnb)
NULL
#' @title Dutch reviews of AirBnB customers on Brussels address locations manually tagged with entities
#' @description The \code{\link{airbnb}} dataset was manually annotated with the shiny app inside this R package.
#' The annotation shows chunks of data which have been flagged with the following categories: PERSON, LOCATION, DISTANCE.
#' The dataset is an object of class \code{chunkrange} and of type data.frame which contains the following fields:
#' \itemize{
#' \item{doc_id: }{a unique identifier of the review, which is also available in \code{\link{airbnb}}}
#' \item{listing_id: }{the airbnb address identifier}
#' \item{text: }{text with the feedback of a customer on his visit in the AirBnB appartment}
#' \item{chunk_id: }{a chunk identifier}
#' \item{chunk_entity: }{a chunk entity label}
#' \item{chunk: }{the text of the chunk which is a substring of \code{text}}
#' \item{start: }{the starting position in \code{text} where the \code{chunk} is found}
#' \item{end: }{the end position in \code{text} where the \code{chunk} is found}
#' }
#' @name airbnb_chunks
#' @docType data
#' @seealso \code{\link{airbnb_chunks}}
#' @examples
#' data(airbnb_chunks)
#' str(airbnb_chunks)
#' head(airbnb_chunks)
NULL
| /fuzzedpackages/crfsuite/R/data.R | permissive | akhikolla/testpackages | R | false | false | 10,030 | r | #' @title CRF Training data: download training data for doing Named Entity Recognition (NER)
#' @description Download training data for doing Named Entity Recognition (NER)
#' @param type a character string with the type of data to download. See the function usage for all possible values.
#' These data will be downloaded from either:
#' \itemize{
#' \item{NLTK-data forked repository: }{\url{https://github.com/bnosac-dev/nltk_data/blob/gh-pages/packages/corpora/conll2002.zip}}
#' \item{FOX forked repository of GermanNER: }{\url{https://github.com/bnosac-dev/FOX/tree/master/input/GermanNER}}
#' \item{FOX forked repository of WikiNER: }{\url{https://github.com/bnosac-dev/FOX/tree/master/input/Wikiner}}
#' }
#' Please visit the information on these repositories first before you use these data in any commercial product.
#' @param docs integer indicating how many documents to sample from the data (only used for data from the NLTK repository).
#' This is only used to reduce CRAN R CMD check training time in the examples of this R package.
#' @return a data.frame with training data for a Named Entity Recognition task or an object of try-error in case of failure of downloading the data
#' @export
#' @examples
#' \dontrun{
#' x <- ner_download_modeldata("conll2002-nl")
#' x <- ner_download_modeldata("conll2002-es")
#' x <- ner_download_modeldata("GermanNER")
#' x <- ner_download_modeldata("wikiner-en-wp2")
#' x <- ner_download_modeldata("wikiner-nl-wp3")
#' x <- ner_download_modeldata("wikiner-fr-wp3")
#' }
#' ## reduce number of docs
#' x <- ner_download_modeldata("conll2002-es", docs = 10)
ner_download_modeldata <- function(type = c("conll2002-nl", "conll2002-es", "GermanNER",
"wikiner-de-wp2",
"wikiner-de-wp3",
"wikiner-en-wp2",
"wikiner-en-wp3",
"wikiner-es-wp2",
"wikiner-es-wp3",
"wikiner-fr-wp2",
"wikiner-fr-wp3",
"wikiner-it-wp2",
"wikiner-it-wp3",
"wikiner-nl-wp2",
"wikiner-nl-wp3",
"wikiner-pl-wp3",
"wikiner-pt-wp3",
"wikiner-ru-wp2",
"wikiner-ru-wp3"),
docs = -Inf){
.N <- sentence_id <- doc_id <- txt <- NULL
msg <- suppressWarnings(try({
type <- match.arg(type)
temporary_file <- tempfile()
if(type == "conll2002-nl"){
url <- "https://raw.githubusercontent.com/bnosac-dev/nltk_data/gh-pages/packages/corpora/conll2002.zip"
download.file(url, temporary_file)
rawdata <- list()
f <- unz(temporary_file, filename = "conll2002/ned.train")
rawdata$ned.train <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/ned.testa")
rawdata$testa <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/ned.testb")
rawdata$testb <- readLines(f, encoding = "UTF-8")
close(f)
rawdata <- lapply(rawdata, FUN=function(x){
x <- data.frame(txt = x, stringsAsFactors = FALSE)
x$doc_id <- cumsum(x$txt == "-DOCSTART- -DOCSTART- O")
if(is.finite(docs) & docs > 0){
d <- unique(x$doc_id)
x <- x[x$doc_id %in% sample(d, size = min(docs, length(d))), ]
}
x$sentence_id <- cumsum(x$txt == "") + 1L
x <- x[x$txt != "" & x$txt != "-DOCSTART- -DOCSTART- O", ]
x$txt <- strsplit(x$txt, " ")
x$token <- sapply(x$txt, FUN=function(x) x[1])
x$pos <- sapply(x$txt, FUN=function(x) x[2])
x$label <- sapply(x$txt, FUN=function(x) x[3])
x[, c("doc_id", "sentence_id", "token", "pos", "label")]
})
rawdata <- data.table::rbindlist(rawdata, idcol = "data")
rawdata$doc_id <- as.integer(factor(sprintf("%s-%s", rawdata$data, rawdata$doc_id)))
file.remove(temporary_file)
}else if(type == "conll2002-es"){
url <- "https://raw.githubusercontent.com/bnosac-dev/nltk_data/gh-pages/packages/corpora/conll2002.zip"
download.file(url, temporary_file)
rawdata <- list()
f <- unz(temporary_file, filename = "conll2002/esp.train")
rawdata$train <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/esp.testa")
rawdata$testa <- readLines(f, encoding = "UTF-8")
close(f)
f <- unz(temporary_file, filename = "conll2002/esp.testb")
rawdata$testb <- readLines(f, encoding = "UTF-8")
close(f)
rawdata <- lapply(rawdata, FUN=function(x){
x <- data.frame(txt = x, stringsAsFactors = FALSE)
x$doc_id <- cumsum(x$txt == "") + 1L
if(is.finite(docs) & docs > 0){
d <- unique(x$doc_id)
x <- x[x$doc_id %in% sample(d, size = min(docs, length(d))), ]
}
x <- x[x$txt != "", ]
x$txt <- strsplit(x$txt, " ")
x$token <- sapply(x$txt, FUN=function(x) x[1])
x$pos <- sapply(x$txt, FUN=function(x) x[2])
x$label <- sapply(x$txt, FUN=function(x) x[3])
x[, c("doc_id", "token", "pos", "label")]
})
rawdata <- data.table::rbindlist(rawdata, idcol = "data")
rawdata$doc_id <- as.integer(factor(sprintf("%s-%s", rawdata$data, rawdata$doc_id)))
file.remove(temporary_file)
}else if(type == "GermanNER"){
rawdata <- readLines("https://raw.githubusercontent.com/bnosac-dev/FOX/master/input/GermanNER/full_train.tsv", encoding = "UTF-8")
rawdata <- data.frame(txt = rawdata, stringsAsFactors = FALSE)
rawdata$doc_id <- cumsum(rawdata$txt == "") + 1L
rawdata <- rawdata[rawdata$txt != "", ]
rawdata$txt <- strsplit(rawdata$txt, "\t")
rawdata$token <- sapply(rawdata$txt, FUN=function(x) x[1])
rawdata$label <- sapply(rawdata$txt, FUN=function(x) x[2])
rawdata <- rawdata[, c("doc_id", "token", "label")]
}else if(type %in% c("wikiner-de-wp2",
"wikiner-de-wp3",
"wikiner-en-wp2",
"wikiner-en-wp3",
"wikiner-es-wp2",
"wikiner-es-wp3",
"wikiner-fr-wp2",
"wikiner-fr-wp3",
"wikiner-it-wp2",
"wikiner-it-wp3",
"wikiner-nl-wp2",
"wikiner-nl-wp3",
"wikiner-pl-wp3",
"wikiner-pt-wp3",
"wikiner-ru-wp2",
"wikiner-ru-wp3")){
url <- sprintf("https://raw.githubusercontent.com/bnosac-dev/FOX/master/input/Wikiner/aij-%s.bz2", type)
download.file(url, temporary_file)
rawdata <- data.frame(txt = readLines(temporary_file, encoding = "UTF-8"), stringsAsFactors = FALSE)
rawdata$doc_id <- cumsum(rawdata$txt == "")
rawdata <- rawdata[rawdata$txt != "", ]
rawdata <- data.table::setDT(rawdata)
rawdata <- rawdata[, sentence_id := 1:.N, by = list(doc_id)]
rawdata <- rawdata[, list(txt = unlist(strsplit(txt, " "))), by = list(doc_id, sentence_id)]
rawdata <- rawdata[, c("token", "pos", "label") := tstrsplit(txt, "\\|")]
rawdata <- data.table::setDF(rawdata)
rawdata <- rawdata[, c("doc_id", "sentence_id", "token", "pos", "label")]
file.remove(temporary_file)
}
setDT(rawdata)
}, silent = TRUE))
if(inherits(msg, "try-error")){
return(msg)
}
rawdata
}
#' @title Dutch reviews of AirBnB customers on Brussels address locations available at www.insideairbnb.com
#' @description The data contains 500 reviews in Dutch of people who visited an AirBnB appartment in Brussels. \cr
#' The data frame contains the fields
#' \itemize{
#' \item{doc_id: }{a unique identifier of the review}
#' \item{listing_id: }{the airbnb address identifier}
#' \item{text: }{text with the feedback of a customer on his visit in the AirBnB appartment}
#' }
#' @name airbnb
#' @docType data
#' @source \url{http://data.insideairbnb.com/belgium/bru/brussels/2015-10-03/visualisations/reviews.csv}, \url{http://insideairbnb.com/get-the-data.html}
#' @seealso \code{\link{airbnb_chunks}}
#' @examples
#' data(airbnb)
#' str(airbnb)
#' head(airbnb)
NULL
#' @title Dutch reviews of AirBnB customers on Brussels address locations manually tagged with entities
#' @description The \code{\link{airbnb}} dataset was manually annotated with the shiny app inside this R package.
#' The annotation shows chunks of data which have been flagged with the following categories: PERSON, LOCATION, DISTANCE.
#' The dataset is an object of class \code{chunkrange} and of type data.frame which contains the following fields:
#' \itemize{
#' \item{doc_id: }{a unique identifier of the review, which is also available in \code{\link{airbnb}}}
#' \item{listing_id: }{the airbnb address identifier}
#' \item{text: }{text with the feedback of a customer on his visit in the AirBnB appartment}
#' \item{chunk_id: }{a chunk identifier}
#' \item{chunk_entity: }{a chunk entity label}
#' \item{chunk: }{the text of the chunk which is a substring of \code{text}}
#' \item{start: }{the starting position in \code{text} where the \code{chunk} is found}
#' \item{end: }{the end position in \code{text} where the \code{chunk} is found}
#' }
#' @name airbnb_chunks
#' @docType data
#' @seealso \code{\link{airbnb_chunks}}
#' @examples
#' data(airbnb_chunks)
#' str(airbnb_chunks)
#' head(airbnb_chunks)
NULL
|
test.data.frame<-master.table.8[1:100,]
result.data<-data.frame(mean=rowMeans(test.data.frame),mle=getMLE(df = test.data.frame))
| /stat/test.MLE.R | no_license | axrt/gBLASTer_research | R | false | false | 129 | r | test.data.frame<-master.table.8[1:100,]
result.data<-data.frame(mean=rowMeans(test.data.frame),mle=getMLE(df = test.data.frame))
|
#installing packages
#install.packages("shiny")
library(shiny)
library(ggplot2)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# Define UI for application that plots features of movies
ui <- fluidPage(
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
# Select variable for y-axisdevtools:intall_github("rstudio/shinyapps")
selectInput(inputId = "y",
label = "Y-axis:",
choices = c("imdb_rating", "imdb_num_votes", "critics_score", "audience_score", "runtime"),
selected = "audience_score"),
# Select variable for x-axis
selectInput(inputId = "x",
label = "X-axis:",
choices = c("imdb_rating", "imdb_num_votes", "critics_score", "audience_score", "runtime"),
selected = "critics_score"),
# Select variable for color
selectInput(inputId = "z",
label = "Color by:",
choices = c("title_type", "genre", "mpaa_rating", "critics_rating", "audience_rating"),
selected = "mpaa_rating")
),
# Outputs
mainPanel(
plotOutput(outputId = "scatterplot")
)
)
)
# Define server function required to create the scatterplot
server <- function(input, output) {
# Create the scatterplot object the plotOutput function is expecting
output$scatterplot <- renderPlot({
ggplot(data = movies, aes_string(x = input$x, y = input$y,
color = input$z)) +
geom_point()
})
}
# Create a Shiny app object
shinyApp(ui = ui, server = server)
| /app.R | no_license | eponkratova/shiny | R | false | false | 1,803 | r | #installing packages
#install.packages("shiny")
library(shiny)
library(ggplot2)
load(url("http://s3.amazonaws.com/assets.datacamp.com/production/course_4850/datasets/movies.Rdata"))
# Define UI for application that plots features of movies
ui <- fluidPage(
# Sidebar layout with a input and output definitions
sidebarLayout(
# Inputs
sidebarPanel(
# Select variable for y-axisdevtools:intall_github("rstudio/shinyapps")
selectInput(inputId = "y",
label = "Y-axis:",
choices = c("imdb_rating", "imdb_num_votes", "critics_score", "audience_score", "runtime"),
selected = "audience_score"),
# Select variable for x-axis
selectInput(inputId = "x",
label = "X-axis:",
choices = c("imdb_rating", "imdb_num_votes", "critics_score", "audience_score", "runtime"),
selected = "critics_score"),
# Select variable for color
selectInput(inputId = "z",
label = "Color by:",
choices = c("title_type", "genre", "mpaa_rating", "critics_rating", "audience_rating"),
selected = "mpaa_rating")
),
# Outputs
mainPanel(
plotOutput(outputId = "scatterplot")
)
)
)
# Define server function required to create the scatterplot
server <- function(input, output) {
# Create the scatterplot object the plotOutput function is expecting
output$scatterplot <- renderPlot({
ggplot(data = movies, aes_string(x = input$x, y = input$y,
color = input$z)) +
geom_point()
})
}
# Create a Shiny app object
shinyApp(ui = ui, server = server)
|
library(ggplot2)
library(dplyr)
library(patchwork)
library(forcats)
library(viridis)
out.dir="output"
#Data from RelConditionEPU.R
#No data available for 2020 due to Covid-19
#Removed MAB values in 2017 due to low sampling coverage:
# for use in butterfish plots:
#annualCondition <- condNSpp
#Condition full shelf
annualCondition <- condNshelfSpp
#For SOE plots:
#annualCondition <- condGOM
# annualCondition <- condGB
# annualCondition <- condMAB %>%
# dplyr::filter(!(EPU == "MAB" & YEAR == 2017)) %>%
# dplyr::filter(!(YEAR == 2017))
#change YEAR to continuous numeric for plotting function below:
annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
speciesNames <- annualCondition %>%
# dplyr::filter(sexMF == "F") %>%
group_by(Species) %>%
mutate(scaleCond = scale(MeanCond, scale = TRUE, center = TRUE))
xs = quantile(speciesNames$scaleCond, seq(0,1, length.out = 6), na.rm = TRUE)
speciesNames <- speciesNames %>%
mutate(category = cut(scaleCond, breaks = xs, labels = c( "Poor Condition",
"Below Average",
"Neutral",
"Above Average",
"Good Condition"),
include.lowest = TRUE))
sortNames <- speciesNames %>%
filter(YEAR <= 2014) %>%
group_by(Species) %>%
summarize(total = sum(scaleCond)) %>%
arrange(total) %>%
mutate(Species = factor(Species, levels = unique(Species))) %>%
pull(Species)
speciesNames$Species <- factor(speciesNames$Species, levels = sortNames)
#Adding regime shift lines:
#Regime analysis:
CondRegime <- speciesNames %>% dplyr::select(MeanCond, YEAR)
Regime <- rpart::rpart(MeanCond~YEAR, data=CondRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
saveRDS(Regime[["cptable"]],file = here::here("output", "Cond_Shelf_Regimes_2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
AnnualCondRegime <- CondRegime
#change YEAR to continuous numeric for plotting function below:
AnnualCondRegime$YEAR <- as.numeric(as.character(AnnualCondRegime$YEAR))
ShelfCondRegime <- AnnualCondRegime
#plot of condition with regime lines
#See 5 scale colors for viridis:
#scales::show_col(viridis::viridis_pal()(5))
vir <- viridis::viridis_pal()(5)
#Labeling legend title not working:
p2 <- ggplot(speciesNames, aes(x = YEAR, y = forcats::fct_rev(Species), fill = category)) +
labs(fill="Quintiles of Condition") +
geom_tile() +
coord_equal() +
theme_bw() +
scale_fill_manual(values=vir) +
guides(fill = guide_legend(reverse = TRUE)) +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
# geom_vline(xintercept=SppSplit3, color='red')+
#scale_x_discrete works if don't need to pad final year for missing data. Changed Year to numeric above and this works:
scale_x_continuous(breaks=round(seq(min(1990), max(speciesNames$YEAR), by = 5))) +
theme(legend.position = "right", legend.box = "vertical", legend.title = element_text(size = 8),
legend.text = element_text(size = 6),
axis.title = element_blank(), axis.text.x = element_text(size = 6),
axis.text.y = element_text(size = 6), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
geom_vline(xintercept=SppSplit1, color='red', size = 1.2)+
geom_vline(xintercept=SppSplit2, color='red', size = 1.2)
# geom_vline(xintercept=SppSplit3, color='red', size = 1.2)
ggsave(path= here::here(out.dir),"Shelf_Condition_allsex_2022_regime.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
| /R/Condition_plot_viridis_final.R | no_license | Laurels1/Condition | R | false | false | 4,491 | r | library(ggplot2)
library(dplyr)
library(patchwork)
library(forcats)
library(viridis)
out.dir="output"
#Data from RelConditionEPU.R
#No data available for 2020 due to Covid-19
#Removed MAB values in 2017 due to low sampling coverage:
# for use in butterfish plots:
#annualCondition <- condNSpp
#Condition full shelf
annualCondition <- condNshelfSpp
#For SOE plots:
#annualCondition <- condGOM
# annualCondition <- condGB
# annualCondition <- condMAB %>%
# dplyr::filter(!(EPU == "MAB" & YEAR == 2017)) %>%
# dplyr::filter(!(YEAR == 2017))
#change YEAR to continuous numeric for plotting function below:
annualCondition$YEAR <- as.numeric(as.character(annualCondition$YEAR))
speciesNames <- annualCondition %>%
# dplyr::filter(sexMF == "F") %>%
group_by(Species) %>%
mutate(scaleCond = scale(MeanCond, scale = TRUE, center = TRUE))
xs = quantile(speciesNames$scaleCond, seq(0,1, length.out = 6), na.rm = TRUE)
speciesNames <- speciesNames %>%
mutate(category = cut(scaleCond, breaks = xs, labels = c( "Poor Condition",
"Below Average",
"Neutral",
"Above Average",
"Good Condition"),
include.lowest = TRUE))
sortNames <- speciesNames %>%
filter(YEAR <= 2014) %>%
group_by(Species) %>%
summarize(total = sum(scaleCond)) %>%
arrange(total) %>%
mutate(Species = factor(Species, levels = unique(Species))) %>%
pull(Species)
speciesNames$Species <- factor(speciesNames$Species, levels = sortNames)
#Adding regime shift lines:
#Regime analysis:
CondRegime <- speciesNames %>% dplyr::select(MeanCond, YEAR)
Regime <- rpart::rpart(MeanCond~YEAR, data=CondRegime)
#Selecting best fit (gives optimal CP value associated with the minimum error)::
# Regime$cptable[which.min(Regime$cptable[,"xerror"]),"CP"]
SppPlot <- rpart.plot::rpart.plot(Regime)
#Outputs pruning tree table:
saveRDS(Regime[["cptable"]],file = here::here("output", "Cond_Shelf_Regimes_2022.RDS"))
printcp(Regime)
optimal_cp_index <- as.numeric(which.min(Regime$cptable[,"xerror"]))
optimal_cp <- Regime$cptable[optimal_cp_index,"CP"]
Regime_pruned <- rpart::prune(Regime, cp = optimal_cp)
Regime <- Regime_pruned
#Pull regime shift years into new data frame to add to plot (use the simplest tree
#within one standard error (xstd) of the best tree (lowest xerror)):
Results <- as.data.frame(Regime[["splits"]])
SppSplit1 <- Results$index[1]
SppSplit2 <- Results$index[2]
SppSplit3 <- Results$index[3]
SppSplit4 <- Results$index[4]
SppSplit5 <- Results$index[5]
AnnualCondRegime <- CondRegime
#change YEAR to continuous numeric for plotting function below:
AnnualCondRegime$YEAR <- as.numeric(as.character(AnnualCondRegime$YEAR))
ShelfCondRegime <- AnnualCondRegime
#plot of condition with regime lines
#See 5 scale colors for viridis:
#scales::show_col(viridis::viridis_pal()(5))
vir <- viridis::viridis_pal()(5)
#Labeling legend title not working:
p2 <- ggplot(speciesNames, aes(x = YEAR, y = forcats::fct_rev(Species), fill = category)) +
labs(fill="Quintiles of Condition") +
geom_tile() +
coord_equal() +
theme_bw() +
scale_fill_manual(values=vir) +
guides(fill = guide_legend(reverse = TRUE)) +
geom_vline(xintercept=SppSplit1, color='red')+
geom_vline(xintercept=SppSplit2, color='red')+
# geom_vline(xintercept=SppSplit3, color='red')+
#scale_x_discrete works if don't need to pad final year for missing data. Changed Year to numeric above and this works:
scale_x_continuous(breaks=round(seq(min(1990), max(speciesNames$YEAR), by = 5))) +
theme(legend.position = "right", legend.box = "vertical", legend.title = element_text(size = 8),
legend.text = element_text(size = 6),
axis.title = element_blank(), axis.text.x = element_text(size = 6),
axis.text.y = element_text(size = 6), panel.grid.major = element_blank(),
panel.grid.minor = element_blank()) +
geom_vline(xintercept=SppSplit1, color='red', size = 1.2)+
geom_vline(xintercept=SppSplit2, color='red', size = 1.2)
# geom_vline(xintercept=SppSplit3, color='red', size = 1.2)
ggsave(path= here::here(out.dir),"Shelf_Condition_allsex_2022_regime.jpg", width = 8, height = 3.75, units = "in", dpi = 300)
|
`intersection` <-
function(e1 = "", e2 = "", snames = "") {
if (!isNamespaceLoaded("QCA")) {
requireNamespace("QCA", quietly = TRUE)
}
if (grepl("\\{", e1) | grepl("\\{", e2)) {
cat("\n")
stop(simpleError("This function accepts only bivalent crisp expressions.\n\n"))
}
if (identical(e1, "") | identical(e2, "")) {
cat("\n")
stop(simpleError("Two expressions are needed to intersect.\n\n"))
}
collapse <- ifelse(any(grepl("\\*", c(e1, e2))), "*", "")
if (is.deMorgan(e1)) {
e1 <- paste(e1[[1]][[2]], collapse = " + ")
}
if (is.deMorgan(e2)) {
e2 <- paste(e2[[1]][[2]], collapse = " + ")
}
e1 <- translate(e1, snames)
e2 <- translate(e2, snames)
result <- list()
if (!identical(snames, "")) {
snames <- QCA::splitstr(snames)
}
for (i in seq(nrow(e1))) {
for (j in seq(nrow(e2))) {
ee <- rbind(e1[i, ], e2[j, ])
ee <- ee[ , apply(ee, 2, function(x) any(x >= 0)), drop = FALSE]
if (all(apply(ee, 2, function(x) length(unique(x[x >= 0])) == 1))) {
ee <- apply(ee, 2, function(x) unique(x[x >= 0]))
names(ee)[ee == 0] <- tolower(names(ee)[ee == 0])
result[[length(result) + 1]] <- paste(names(ee), collapse = collapse)
}
}
}
return(paste(unique(unlist(result)), collapse=" + "))
}
| /QCAGUI/R/intersection.R | no_license | ingted/R-Examples | R | false | false | 1,548 | r | `intersection` <-
function(e1 = "", e2 = "", snames = "") {
if (!isNamespaceLoaded("QCA")) {
requireNamespace("QCA", quietly = TRUE)
}
if (grepl("\\{", e1) | grepl("\\{", e2)) {
cat("\n")
stop(simpleError("This function accepts only bivalent crisp expressions.\n\n"))
}
if (identical(e1, "") | identical(e2, "")) {
cat("\n")
stop(simpleError("Two expressions are needed to intersect.\n\n"))
}
collapse <- ifelse(any(grepl("\\*", c(e1, e2))), "*", "")
if (is.deMorgan(e1)) {
e1 <- paste(e1[[1]][[2]], collapse = " + ")
}
if (is.deMorgan(e2)) {
e2 <- paste(e2[[1]][[2]], collapse = " + ")
}
e1 <- translate(e1, snames)
e2 <- translate(e2, snames)
result <- list()
if (!identical(snames, "")) {
snames <- QCA::splitstr(snames)
}
for (i in seq(nrow(e1))) {
for (j in seq(nrow(e2))) {
ee <- rbind(e1[i, ], e2[j, ])
ee <- ee[ , apply(ee, 2, function(x) any(x >= 0)), drop = FALSE]
if (all(apply(ee, 2, function(x) length(unique(x[x >= 0])) == 1))) {
ee <- apply(ee, 2, function(x) unique(x[x >= 0]))
names(ee)[ee == 0] <- tolower(names(ee)[ee == 0])
result[[length(result) + 1]] <- paste(names(ee), collapse = collapse)
}
}
}
return(paste(unique(unlist(result)), collapse=" + "))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{portugal}
\alias{portugal}
\title{Numbers from Portugal's official statistics on governmental deficit, surplus and debt from 1999 to 2019}
\format{
A numeric vector of length 497.
}
\source{
\url{https://ec.europa.eu/eurostat/data/database}
}
\usage{
portugal
}
\description{
Compilation of all the numbers from Portugal's official statistics on governmental deficit, surplus, debt and associated accounts from 1999 to 2019.
}
\details{
Data was extracted from \href{https://ec.europa.eu/eurostat/data/database}{Eurostat's database} in August 2020 through the directory: Database by themes -> Economy and finance -> Government statistics -> Government finance statistics -> Government deficit and debt -> Government deficit/surplus, debt and associated data. After sub-setting by country and selecting only data from 1999 to 2019, all the numbers from all the tables in the aforementioned category were pooled together.
}
\keyword{datasets}
| /man/portugal.Rd | permissive | ptfonseca/daubl | R | false | true | 1,047 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{portugal}
\alias{portugal}
\title{Numbers from Portugal's official statistics on governmental deficit, surplus and debt from 1999 to 2019}
\format{
A numeric vector of length 497.
}
\source{
\url{https://ec.europa.eu/eurostat/data/database}
}
\usage{
portugal
}
\description{
Compilation of all the numbers from Portugal's official statistics on governmental deficit, surplus, debt and associated accounts from 1999 to 2019.
}
\details{
Data was extracted from \href{https://ec.europa.eu/eurostat/data/database}{Eurostat's database} in August 2020 through the directory: Database by themes -> Economy and finance -> Government statistics -> Government finance statistics -> Government deficit and debt -> Government deficit/surplus, debt and associated data. After sub-setting by country and selecting only data from 1999 to 2019, all the numbers from all the tables in the aforementioned category were pooled together.
}
\keyword{datasets}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/expectedInfo.R
\name{expectedInfo}
\alias{expectedInfo}
\title{Computerized Adaptive Testing Survey Expected Information Function}
\usage{
expectedInfo(cat, theta, items)
}
\arguments{
\item{cat}{an object of \code{CATsurv} class.}
\item{theta}{vector consisting of each respondent's position on the latent scale of interest.}
\item{item-level}{parameters data frame containing discrimination parameter, guessing parameter, difficulty parameter, and answer for each item.}
}
\value{
A scalar value representing the expected Fisher's information.
}
\description{
This function calculates the expected Fisher's information for the given theta estimate over these \eqn{k-1} items.
}
\details{
The scalar value of expected Fisher's information calculated by van der Linden and Parsley sum formula where nominator is calculated by \deqn{(\frac{\partial p(\theta)}{\partial \theta})^2=Da_i(1-c_i)\frac{exp[Da_i(\theta-b_i)]}{(1+exp[Da_i(\theta-b_i)])^2}} and denominator is calculated by calling the three.pl function for \eqn{p_i(\theta_j)} and \eqn{q_i(\theta_j)}.
}
\author{
Josh W. Cutler and Jacob M. Montgomery
}
\seealso{
\code{\link{likelihood}},\code{\link{prior}}, \code{\link{estimateTheta}}, \code{\link{estimateSE}}, \code{\link{expectedPV}}, \code{\link{nextItem}}, \code{\link{storeAnswer}}, \code{\link{debugNextItem}}
}
| /catSurv/man/expectedInfo.Rd | no_license | drmiller1220/CATSurv | R | false | false | 1,420 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/expectedInfo.R
\name{expectedInfo}
\alias{expectedInfo}
\title{Computerized Adaptive Testing Survey Expected Information Function}
\usage{
expectedInfo(cat, theta, items)
}
\arguments{
\item{cat}{an object of \code{CATsurv} class.}
\item{theta}{vector consisting of each respondent's position on the latent scale of interest.}
\item{item-level}{parameters data frame containing discrimination parameter, guessing parameter, difficulty parameter, and answer for each item.}
}
\value{
A scalar value representing the expected Fisher's information.
}
\description{
This function calculates the expected Fisher's information for the given theta estimate over these \eqn{k-1} items.
}
\details{
The scalar value of expected Fisher's information calculated by van der Linden and Parsley sum formula where nominator is calculated by \deqn{(\frac{\partial p(\theta)}{\partial \theta})^2=Da_i(1-c_i)\frac{exp[Da_i(\theta-b_i)]}{(1+exp[Da_i(\theta-b_i)])^2}} and denominator is calculated by calling the three.pl function for \eqn{p_i(\theta_j)} and \eqn{q_i(\theta_j)}.
}
\author{
Josh W. Cutler and Jacob M. Montgomery
}
\seealso{
\code{\link{likelihood}},\code{\link{prior}}, \code{\link{estimateTheta}}, \code{\link{estimateSE}}, \code{\link{expectedPV}}, \code{\link{nextItem}}, \code{\link{storeAnswer}}, \code{\link{debugNextItem}}
}
|
#
# implementando indicadores
#pacote especial para ativos
require(quantmod)
#pacote para indicadores
require(TTR)
#começo e fim do estudo
startDate <- as.Date("2018-09-11")
endDate <- as.Date("2019-02-05")
#ativos buscados na internet
tickers <- c("^BVSP","PETR4.SA")
#fonte dos dados
getSymbols(tickers,src = "yahoo",from = startDate, to = endDate)
#grafico de candles
chartSeries(PETR4.SA,TA = NULL)
#indicador
addMACD()
#criando estratégia
macd <- MACD(PETR4.SA$PETR4.SA.Close,nFast = 12,nSlow =
26,nSig = 9,maType = SMA,percent = F)
View(macd)
#iniciando modelo
tradesRulles <- lag(ifelse(macd$macd < macd$signal,-1,1))
# -1 entrou vendido, ou 1 para compra
#retorno com info do tradesRulles
retornos <- ROC(PETR4.SA$PETR4.SA.Close)*tradesRulles
#ROC = calcula os retornos
#view retornos, tabela com valores de retorno
View(retornos)
#plotando resultados no grafico (data de plote)
retornos <- retornos["2018-09-11/2019-02-05"]
#carteira recebe de forma exp. os retornos
#-1 para valores absolutos
carteira <- exp(cumsum(retornos$PETR4.SA.Close))-1
# o plot não funcionou
plot(carteira,colors(T),col = "blue")
==========================================================================
#avaliação de estratégia com performance analytics
# usei as variáveis da estratégia com indicador MACD
require(PerformanceAnalytics)
#buscando os 10 melhores retornos
table.Drawdowns(retornos,top = 10)
#avaliando os riscos da estratégia
table.DownsideRisk(retornos)
#plotando evolução
charts.PerformanceSummary(retornos)
| /avaliação performance analytics.R | permissive | Di82Rquant/avaliando-estrat-gia | R | false | false | 1,553 | r | #
# implementando indicadores
#pacote especial para ativos
require(quantmod)
#pacote para indicadores
require(TTR)
#começo e fim do estudo
startDate <- as.Date("2018-09-11")
endDate <- as.Date("2019-02-05")
#ativos buscados na internet
tickers <- c("^BVSP","PETR4.SA")
#fonte dos dados
getSymbols(tickers,src = "yahoo",from = startDate, to = endDate)
#grafico de candles
chartSeries(PETR4.SA,TA = NULL)
#indicador
addMACD()
#criando estratégia
macd <- MACD(PETR4.SA$PETR4.SA.Close,nFast = 12,nSlow =
26,nSig = 9,maType = SMA,percent = F)
View(macd)
#iniciando modelo
tradesRulles <- lag(ifelse(macd$macd < macd$signal,-1,1))
# -1 entrou vendido, ou 1 para compra
#retorno com info do tradesRulles
retornos <- ROC(PETR4.SA$PETR4.SA.Close)*tradesRulles
#ROC = calcula os retornos
#view retornos, tabela com valores de retorno
View(retornos)
#plotando resultados no grafico (data de plote)
retornos <- retornos["2018-09-11/2019-02-05"]
#carteira recebe de forma exp. os retornos
#-1 para valores absolutos
carteira <- exp(cumsum(retornos$PETR4.SA.Close))-1
# o plot não funcionou
plot(carteira,colors(T),col = "blue")
==========================================================================
#avaliação de estratégia com performance analytics
# usei as variáveis da estratégia com indicador MACD
require(PerformanceAnalytics)
#buscando os 10 melhores retornos
table.Drawdowns(retornos,top = 10)
#avaliando os riscos da estratégia
table.DownsideRisk(retornos)
#plotando evolução
charts.PerformanceSummary(retornos)
|
os <- "C:/Users/serfk/"
#os <- "/Users/se/"
files <- list.files(path=paste(os,"OneDrive/Thesis/Auswertung/Daten/Eye_Tracking/Corrected/output/error_corrected/categorized/",sep = ""), pattern="*.txt", full.names=T, recursive=FALSE)
### READ CORRECTION FILE ###
CORRECTION_FILE = paste(os,"OneDrive/Thesis/Auswertung/Data Analysis/correction_values.csv",sep="")
input_files <- list.files(path=paste(os,"OneDrive/Thesis/Auswertung/Daten/IVIS_Inputs/",sep = ""), pattern="*.csv", full.names=T, recursive=FALSE)
corr <- read.csv(CORRECTION_FILE, header=T, stringsAsFactors = FALSE, sep = ";") # load file
for(i in 1:length(input_files)) {
inputs <- read.csv(input_files[i], header=T, stringsAsFactors = FALSE, sep = ",")
inputs$Milliseconds <- inputs$Milliseconds - (inputs$Milliseconds[inputs$Event=="Start"]-9080)
proband <- strsplit(files[i],"/")[[1]][13]
proband <- as.numeric(strsplit(proband, "\\.")[[1]][1])
glance_file <- paste(os,"OneDrive/Thesis/Auswertung/Daten/Eye_Tracking/Corrected/output/error_corrected/categorized/",proband,".txt",sep = "")
t <- read.table(glance_file, header=T, stringsAsFactors = FALSE) # load file
#filter out glances where proband didnt interact
if(!is.na(corr$start_1[corr$Proband==proband])) {
start_1 <- corr$start_1[corr$Proband==proband]
end_1 <- corr$end_1[corr$Proband==proband]
start_2 <- corr$start_2[corr$Proband==proband]
end_2 <- corr$end_2[corr$Proband==proband]
start_3 <- corr$start_3[corr$Proband==proband]
end_3 <- corr$end_3[corr$Proband==proband]
t <- t[t$Start_Time > start_1 & t$End_Time < end_1
|t$Start_Time > start_2 & t$End_Time < end_2
|t$Start_Time > start_3 & t$End_Time < end_3,]
}
offset <- corr$Offset[corr$Proband==proband]
for(j in 1:nrow(t)) {
row <- t[j,]
inputs[nrow(inputs) + 1,] = list("Swipe","3","","glance_start","",row$Start_Time - offset,"")
inputs[nrow(inputs) + 1,] = list("Swipe","3","","glance_end","",row$End_Time - offset,"")
}
inputs <- inputs[order(inputs$Milliseconds),]
write.csv(inputs, file = paste(os,"OneDrive/Thesis/Auswertung/Daten/Merged/",proband,".csv",sep=""), row.names = FALSE)
}
| /R Scripts/merging.R | no_license | SErfkamp/LCT_Data_Analysis | R | false | false | 2,223 | r | os <- "C:/Users/serfk/"
#os <- "/Users/se/"
files <- list.files(path=paste(os,"OneDrive/Thesis/Auswertung/Daten/Eye_Tracking/Corrected/output/error_corrected/categorized/",sep = ""), pattern="*.txt", full.names=T, recursive=FALSE)
### READ CORRECTION FILE ###
CORRECTION_FILE = paste(os,"OneDrive/Thesis/Auswertung/Data Analysis/correction_values.csv",sep="")
input_files <- list.files(path=paste(os,"OneDrive/Thesis/Auswertung/Daten/IVIS_Inputs/",sep = ""), pattern="*.csv", full.names=T, recursive=FALSE)
corr <- read.csv(CORRECTION_FILE, header=T, stringsAsFactors = FALSE, sep = ";") # load file
for(i in 1:length(input_files)) {
inputs <- read.csv(input_files[i], header=T, stringsAsFactors = FALSE, sep = ",")
inputs$Milliseconds <- inputs$Milliseconds - (inputs$Milliseconds[inputs$Event=="Start"]-9080)
proband <- strsplit(files[i],"/")[[1]][13]
proband <- as.numeric(strsplit(proband, "\\.")[[1]][1])
glance_file <- paste(os,"OneDrive/Thesis/Auswertung/Daten/Eye_Tracking/Corrected/output/error_corrected/categorized/",proband,".txt",sep = "")
t <- read.table(glance_file, header=T, stringsAsFactors = FALSE) # load file
#filter out glances where proband didnt interact
if(!is.na(corr$start_1[corr$Proband==proband])) {
start_1 <- corr$start_1[corr$Proband==proband]
end_1 <- corr$end_1[corr$Proband==proband]
start_2 <- corr$start_2[corr$Proband==proband]
end_2 <- corr$end_2[corr$Proband==proband]
start_3 <- corr$start_3[corr$Proband==proband]
end_3 <- corr$end_3[corr$Proband==proband]
t <- t[t$Start_Time > start_1 & t$End_Time < end_1
|t$Start_Time > start_2 & t$End_Time < end_2
|t$Start_Time > start_3 & t$End_Time < end_3,]
}
offset <- corr$Offset[corr$Proband==proband]
for(j in 1:nrow(t)) {
row <- t[j,]
inputs[nrow(inputs) + 1,] = list("Swipe","3","","glance_start","",row$Start_Time - offset,"")
inputs[nrow(inputs) + 1,] = list("Swipe","3","","glance_end","",row$End_Time - offset,"")
}
inputs <- inputs[order(inputs$Milliseconds),]
write.csv(inputs, file = paste(os,"OneDrive/Thesis/Auswertung/Daten/Merged/",proband,".csv",sep=""), row.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{push_to_month_end}
\alias{push_to_month_end}
\title{Convert date to the last day of the month}
\usage{
push_to_month_end(date)
}
\arguments{
\item{date}{Date on format from Lubridate package}
}
\value{
Returns the date of the last day of the month
}
\description{
\code{push_to_month_end} is a function that returns the date of the last
day of the month for a given date.
}
\details{
Used in scripts: {}
Robust function: FALSE
}
| /man/push_to_month_end.Rd | permissive | AndreSjuve/dretools | R | false | true | 521 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dates.R
\name{push_to_month_end}
\alias{push_to_month_end}
\title{Convert date to the last day of the month}
\usage{
push_to_month_end(date)
}
\arguments{
\item{date}{Date on format from Lubridate package}
}
\value{
Returns the date of the last day of the month
}
\description{
\code{push_to_month_end} is a function that returns the date of the last
day of the month for a given date.
}
\details{
Used in scripts: {}
Robust function: FALSE
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eventrank.R
\name{eventrank}
\alias{eventrank}
\title{Creates a vector of ranks from the left and right of a cutpoint}
\usage{
eventrank(x, cutpoint = 0)
}
\arguments{
\item{x}{a vector to be ranked}
\item{cutpoint}{a scalar number used as cut-point (zero by default)}
}
\value{
vector of ranks. If an element in x is equal to the cut-point, it is assigned '0'.
}
\description{
Creates a vector of ranks from the left and right of a cutpoint
}
\examples{
\dontrun{
x <- sample(0:10, 10, replace=TRUE)
data.frame(x=x, r=eventrank(x, 5) )
}
}
| /man/eventrank.Rd | no_license | sumtxt/itstools | R | false | true | 632 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eventrank.R
\name{eventrank}
\alias{eventrank}
\title{Creates a vector of ranks from the left and right of a cutpoint}
\usage{
eventrank(x, cutpoint = 0)
}
\arguments{
\item{x}{a vector to be ranked}
\item{cutpoint}{a scalar number used as cut-point (zero by default)}
}
\value{
vector of ranks. If an element in x is equal to the cut-point, it is assigned '0'.
}
\description{
Creates a vector of ranks from the left and right of a cutpoint
}
\examples{
\dontrun{
x <- sample(0:10, 10, replace=TRUE)
data.frame(x=x, r=eventrank(x, 5) )
}
}
|
if(getRversion() >= "2.15.1") utils::globalVariables(c("s","calBP"))
#' @title Monte-Carlo simulation test for SPDs
#'
#' @description Comparison of an observed summed radiocarbon date distribution (aka SPD) with simulated outcomes from a theoretical model.
#'
#' @param x A \code{CalDates} object containing calibrated radiocarbon ages
#' @param errors A vector of errors corresponding to each radiocarbon age
#' @param nsim Number of simulations
#' @param bins A vector indicating which bin each radiocarbon date is assigned to.
#' @param runm A number indicating the window size of the moving average to smooth both observed and simulated SPDs. If set to \code{NA} no moving average is applied.Default is \code{NA}.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP. The fitting process is applied considering the SPD within the interval defined by this parameter. If no values are supplied the earliest and latest median calibrated dates of the observed data will be used.
#' @param backsight A single numeric value defining the distance in time between the focal year and the backsight year for computing the rate of change. Default is 10.
#' @param changexpr An expression for calculating the rate of change in SPD between the focal year and a backsight year. Available input options are t1 (the SPD for the focal year), t0 (the SPD for the backsight year), d (the distance between t0 and t1), and any other standard constants and mathematical operators. A sensible default is provided.
#' @param gridclip Whether the sampling of random dates is constrained to the observed range (TRUE) or not (FALSE). Default is TRUE.
#' @param raw A logical variable indicating whether all permuted SPDs should be returned or not. Default is FALSE.
#' @param model A vector indicating the model to be fitted. Currently the acceptable options are \code{'uniform'}, \code{'linear'}, \code{'exponential'} and \code{'custom'}. Default is \code{'exponential'}.
#' @param method Method for the creation of random dates from the fitted model. Either \code{'uncalsample'} or \code{'calsample'}. Default is \code{'uncalsample'}. See below for details.
#' @param predgrid A data.frame containing calendar years (column \code{calBP}) and associated summed probabilities (column \code{PrDens}). Required when \code{model} is set to \code{'custom'}.
#' @param normalised Whether the simulated dates should be normalised or not. Default based on whether x is normalised or not.
#' @param datenormalised Argument kept for backward compatibility with previous versions.
#' @param spdnormalised A logical variable indicating whether the total probability mass of the SPD is normalised to sum to unity for both observed and simulated data.
#' @param edgeSize Controls edge effect by expanding the fitted model beyond the range defined by \code{timeRange}.
#' @param ncores Number of cores used for for parallel execution. Default is 1.
#' @param fitonly A logical variable. If set to TRUE, only the the model fitting is executed and returned. Default is FALSE.
#' @param a Starter value for the exponential fit with the \code{\link{nls}} function using the formula \code{y ~ exp(a + b * x)} where \code{y} is the summed probability and \code{x} is the date. Default is 0.
#' @param b Starter value for the exponential fit with the \code{\link{nls}} function using the formula \code{y ~ exp(a + b * x)} where \code{y} is the summed probability and \code{x} is the date. Default is 0.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#'
#' @details The function implements a Monte-Carlo test for comparing a theoretical or fitted statistical model to an observed summed radiocarbon date distribution (aka SPD) and associated rates of changes. A variety of theoretical expectations can be compared to the observed distribution by setting the \code{model} argument, for example to fit basic \code{'uniform'} (the mean of the SPD), \code{'linear'} (fitted using the \code{\link{lm}} function) or \code{model='exponential'} models (fitted using the \code{\link{nls}} function). Models are fitted to the period spanned by \code{timeRange} although \code{x} can contain dates outside this range to mitigate possible edge effects (see also \code{bracket}). Alternatively, it is possible for the user to provide a model of their own by setting \code{model='custom'} and then supplying a two-column data.frame to \code{predgrid}. The function generates \code{nsim} theoretical SPDs from the fitted model via Monte-Carlo simulation, this is then used to define a 95\% critical envelope for each calendar year. The observed SPD is then compared against the simulation envelope; local departures from the model are defined as instances where the observed SPD is outside such an envelope, while an estimate of the global significance of the observed SPD is also computed by comparing the total areas of observed and simulated SPDs that fall outside the simulation envelope. The theoretical SPDs can be generated using two different sampling approaches defined by the parameter \code{method}. If \code{method} is set to \code{'uncalsample'} each date is drawn after the fitted model is backcalibrated as a whole and adjusted for a baseline expectation; if it is set to \code{'calsample'} samples are drawn from the fitted model in calendar year then individually back calibrated and recalibrated (the approach of Timpson et al. 2014). For each simulation, both approaches produces \eqn{n} samples, with \eqn{n} equal to the number of bins or number of dates (when bins are not defined). Differences between these two approaches are particularly evident at dates coincident with steeper portions of the calibration curve. If more than one type of calibration curve is associated with the observed dates, at each Monte-Carlo iteration, the function randomly assigns each bin to one of the calibration curves with probability based on the proportion of dates within the bin associated to the specific curves. For example, if a bin is composed of four dates and three are calibrated with 'intcal20' the probability of that particular bin being assigned to 'intcal20' is 0.75.
#' @note
#'\itemize{
#'\item {Windows users might receive a memory allocation error with larger time span of analysis (defined by the parameter \code{timeRange}). This can be avoided by increasing the memory limit with the \code{\link{memory.limit}} function.}
#'\item {Users experiencing a \code{Error: cannot allocate vector of size ...} error message can increase the memory size using the \code{Sys.setenv()}, for example: \code{Sys.setenv("R_MAX_VSIZE" = 16e9)}.}
#'\item {The function currently supports only dates calibrated with 'intcal20',intcal13','intcal13nhpine16','shcal20','shcal13','shcal13shkauri16', 'marine20', and 'marine13'.}
#'}
#'
#' @return An object of class \code{SpdModelTest} with the following elements
#' \itemize{
#' \item{\code{result}} {A four column data.frame containing the observed probability density (column \emph{PrDens}) and the lower and the upper values of the simulation envelope (columns \emph{lo} and \emph{hi}) for each calendar year column \emph{calBP}}
#' \item{\code{result.roc}} {A four column data.frame containing the observed rates of change (column \emph{roc}) and the lower and the upper values of the simulation envelope (columns \emph{lo.roc} and \emph{hi.roc}) for the mid point between two chronological blocks \emph{calBP}}
#' \item{\code{sim}} {A matrix containing the simulation results of the summed probabilities. Available only when \code{raw} is set to TRUE}
#' \item{\code{sim.roc}} {A matrix containing the simulation results of the rate of change of summed probabilities. Available only when \code{raw} is set to TRUE}
#' \item{\code{pval}} {A numeric vector containing the p-value of the global significance test for the summed probabilities}
#' \item{\code{pval.roc}} {A numeric vector containing the p-value of the global significance test for the rates of change}
#' \item{\code{fit}} {A data.frame containing the probability densities of the fitted model for each calendar year within the time range of analysis}
#' \item{\code{fitobject}} {Fitted model. Not available when \code{model} is \code{'custom'}}
#' \item{\code{n}} {Number of radiocarbon dates.}
#' \item{\code{nbins}}{Number of bins.}
#' \item{\code{nsim}}{Number of Monte-Carlo simulations.}
#' \item{\code{backsight}}{Backsight size.}
#' }
#'
#' @references
#'
#' Timpson, A., Colledge, S., Crema, E., Edinborough, K., Kerig, T., Manning, K., Thomas, M.G., Shennan, S., (2014). Reconstructing regional population fluctuations in the European Neolithic using radiocarbon dates: a new case-study using an improved method. Journal of Archaeological Science, 52, 549-557. doi:10.1016/j.jas.2014.08.011
#'
#'
#' @examples
#' ## Example with Younger Dryas period Near East, including site bins
#' \dontrun{
#' data(emedyd)
#' caldates <- calibrate(x=emedyd$CRA, errors=emedyd$Error, normalised=FALSE)
#' bins <- binPrep(sites=emedyd$SiteName, ages=emedyd$CRA, h=50)
#' nsim=5 #toy example
#' expnull <- modelTest(caldates, errors=emedyd$Error, bins=bins, nsim=nsim, runm=50,
#' timeRange=c(16000,9000), model="exponential", datenormalised=FALSE)
#' plot(expnull, xlim=c(16000,9000))
#' round(expnull$pval,4) #p-value
#' summary(expnull)
#' }
#' @import utils
#' @import stats
#' @import doSNOW
#' @import snow
#' @import foreach
#' @import iterators
#' @export
modelTest <- function(x, errors, nsim, bins=NA, runm=NA, timeRange=NA,backsight=50,changexpr=expression((t1/t0)^(1/d)-1),gridclip=TRUE, raw=FALSE, model=c("exponential"),method=c("uncalsample"),predgrid=NA, normalised=NA,datenormalised=NA, spdnormalised=FALSE, ncores=1, fitonly=FALSE, a=0, b=0, edgeSize=500,verbose=TRUE){
caltimeRange =c(55000,0)
if (any(x$metadata$CalCurve %in% c("intcal13","shcal13","marine13","intcal13nhpine16","shcal13shkauri16")))
{
caltimeRange =c(50000,0)
}
if (fitonly == TRUE) {nsim <- 1}
if (ncores>1&!requireNamespace("doSNOW", quietly=TRUE)){
warning("the doSnow package is required for multi-core processing; ncores has been set to 1")
ncores=1
} else {
cl <- snow::makeCluster(ncores)
registerDoSNOW(cl)
on.exit(snow::stopCluster(cl))
}
if (!any(method%in%c("uncalsample","calsample")))
{
stop("The 'method' argument must be either 'uncalsample' or 'calsample'")
}
ccrange = c(max(medCal(x)),min(medCal(x)))
if (anyNA(timeRange))
{
timeRange=ccrange
}
if (is.na(normalised))
{
normalised=FALSE
if(x$metadata$Normalised[1]==TRUE)
{
normalised=TRUE
}
}
if (normalised!=x$metadata$Normalised[1])
{
warning("The normalisation setting of x and normalised are different")
}
if (!is.na(datenormalised))
{
if (datenormalised!=normalised)
{
warning("'datenormalised' is not equal to 'normalised'. The datenormalised setting will be used for the normalisation setting of the calibration of simulated dates")
normalised=datenormalised
}
if (datenormalised!=x$metadata$Normalised[1])
{
if (x$metadata$Normalised[1])
{
warning("Input dates are normalised but datenormalised is set to FALSE. The datenormalised setting will be ignored")
normalised=datenormalised
}
if (!x$metadata$Normalised[1])
{
warning("Input dates are not normalised but datenormalised is set to TRUE. The datenormalised setting will be ignored")
normalised=datenormalised
}
}
}
calCurves = x$metadata$CalCurve
if (!all(calCurves%in%c("intcal20","shcal20","marine20",'intcal13','intcal13nhpine16','shcal13','shcal13shkauri16','marine13')))
{
stop("modelTest() currently accepts only dates calibrated with the following calibration curves: 'intcal20','intcal13','intcal13nhpine16','shcal20','shcal13','shcal13shkauri16', 'marine20', and 'marine13'")
}
unique.calCurves = as.character(sort(unique(calCurves)))
ncc = length(unique.calCurves) #count number of unique calibration curves
if (verbose){ print("Aggregating observed dates...") }
#Generate matrix of sample sizes for each curve
if (is.na(bins[1])){
samplesize <- t(matrix(table(calCurves),nrow=ncc,ncol=nsim))
colnames(samplesize) = names(table(calCurves))
} else {
samplesize <- curveSamples(bins=bins,calCurves=calCurves,nsim=nsim)
if (ncc==1) {
samplesize = matrix(samplesize,ncol=1,nrow=length(samplesize))
colnames(samplesize) = names(table(calCurves))
}
}
if (ncc>1) {samplesize=samplesize[,unique.calCurves]}
# Create artificial bins in case bins are not supplied
if (is.na(bins[1])){ bins <- as.character(1:nrow(x$metadata)) }
observed <- spd(x=x, bins=bins, timeRange=timeRange, runm=runm, spdnormalised=spdnormalised, verbose=FALSE, edgeSize=edgeSize)
finalSPD <- observed$grid$PrDens
## Simulation
sim <- matrix(NA,nrow=length(finalSPD),ncol=nsim)
if (verbose & !fitonly){
print("Monte-Carlo test...")
flush.console()
}
fit.time <- seq(timeRange[1],timeRange[2],-1)
pred.time <- fit.time
if (gridclip)
{
st = max(ccrange[1],timeRange[1])+edgeSize
en = min(ccrange[2],timeRange[2])-edgeSize
pred.time <- seq(st,en,-1)
}
fit <- NA
if (model=="exponential"){
fit <- nls(y ~ exp(a + b * x), data=data.frame(x=fit.time, y=finalSPD), start=list(a=a, b=b))
est <- predict(fit, list(x=pred.time))
predgrid <- data.frame(calBP=pred.time, PrDens=est)
} else if (model=="uniform"){
predgrid <- data.frame(calBP=pred.time, PrDens=mean(finalSPD))
} else if (model=="linear"){
fit <- lm(y ~ x, data=data.frame(x=fit.time, y=finalSPD))
est <- predict(fit, list(x=pred.time))
predgrid <- data.frame(calBP=pred.time, PrDens=est)
} else if (model=="custom"){
if (length(predgrid)!=2){
stop("If you choose a custom model, you must provide a proper predgrid argument (two-column data.frame of calBP and predicted densities).")
}
if (!all(colnames(predgrid)%in%c("calBP","PrDens")))
{
stop("Column names in the predgrid argument should be 'calBP' and 'PrDens'")
}
} else {
stop("Specified model not one of current choices.")
}
if (fitonly){
print("Done (SPD and fitted model only).")
predgrid <- subset(predgrid,calBP<=timeRange[1]&calBP>=timeRange[2])
res <- list(result=NA, sim=NA, pval=NA, osbSPD=observed, fit=predgrid, fitobject=fit)
return(res)
}
# Add Extra Edges with PrDens=0
if (edgeSize>0)
{
predgrid = rbind.data.frame(data.frame(calBP=(max(predgrid$calBP)+edgeSize):c(predgrid$calBP[1]+1),PrDens=0),predgrid)
predgrid = rbind.data.frame(predgrid,data.frame(calBP=min(predgrid$calBP):(min(predgrid$calBP)-edgeSize),PrDens=0))
if (any(predgrid$calBP<=0|predgrid$calBP>=caltimeRange[1]))
{
warning("edgeSize reduced")
predgrid = subset(predgrid, calBP<=caltimeRange[1]&calBP>=0)
}
}
# predgrid$PrDens = predgrid$PrDens/sum(predgrid$PrDens)
# Prepare Sampling Grid(s)
cragrids = vector("list",length=ncc)
for (i in 1:ncc)
{
tmp.grid <- uncalibrate(as.CalGrid(predgrid), calCurves=unique.calCurves[i], compact=FALSE, verbose=FALSE)
cragrids[[i]] <- tmp.grid
# Cllipping the uncalibrated grid
if (gridclip)
{
cragrids[[i]] <- tmp.grid[tmp.grid$CRA <= max(x$metadata$CRA) & tmp.grid$CRA >= min(x$metadata$CRA),]
}
}
# Actual Method
opts = NULL
if (verbose)
{
if (ncores>1){ print(paste("Running in parallel on ",getDoParWorkers()," workers...",sep=""))}
pb <- txtProgressBar(min=0, max=nsim, style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
}
if (ncores==1)
{
for (s in 1:nsim){
if (verbose){ setTxtProgressBar(pb, s) }
if (method=="uncalsample")
{
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$PrDens)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
}
if (method=="calsample")
{
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$Raw)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
}
tmp <- calibrate(x=unlist(randomDates),errors=randomSDs, timeRange=timeRange, calCurves=ccurve.tmp, normalised=normalised, ncores=1, verbose=FALSE, calMatrix=TRUE)
simDateMatrix <- tmp$calmatrix
sim[,s] <- apply(simDateMatrix,1,sum)
sim[,s] <- (sim[,s]/sum(sim[,s])) * sum(predgrid$PrDens[predgrid$calBP <= timeRange[1] & predgrid$calBP >= timeRange[2]])
if (spdnormalised){ sim[,s] <- (sim[,s]/sum(sim[,s])) }
if (!is.na(runm)){ sim[,s] <- runMean(sim[,s], runm, edge="fill") }
}
}
if (ncores>1)
{
sim <- foreach (s = 1:nsim, .combine='cbind', .packages='rcarbon',.options.snow = opts) %dopar% {
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
if (method=="uncalsample")
{
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$PrDens)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
}
if (method=="calsample")
{
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$Raw)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
tmp <- calibrate(x=unlist(randomDates),errors=randomSDs, timeRange=timeRange, calCurves=ccurve.tmp, normalised=normalised, ncores=1, verbose=FALSE, calMatrix=TRUE)
simDateMatrix <- tmp$calmatrix
aux <- apply(simDateMatrix,1,sum)
aux <- (aux/sum(aux)) * sum(predgrid$PrDens[predgrid$calBP <= timeRange[1] & predgrid$calBP >= timeRange[2]])
if (spdnormalised){ aux <- (aux/sum(aux)) }
if (!is.na(runm)){
aux <- runMean(aux, runm, edge="fill")
}
aux
}
}
if (verbose){ close(pb) }
## rate of change subroutine
timeSequence = timeRange[1]:timeRange[2]
foo = function(spd,backsight,timeSequence,changexpr)
{
obs=rep(NA,length(timeSequence))
for (i in 1:c(length(obs)-backsight))
{
d=backsight
t0 = spd[i]
t1 = spd[i+backsight]
obs[i+backsight] = eval(changexpr)
if (t1==0|t0==0){obs[i+backsight]=NA}
}
return(obs)
}
obs.roc = foo(finalSPD,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
sim.roc = apply(sim,2,foo,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
## Envelope, z-scores, global p-value
lo <- apply(sim,1,quantile,prob=0.025,na.rm=TRUE)
hi <- apply(sim,1,quantile,prob=0.975,na.rm=TRUE)
lo.roc = apply(sim.roc,1,quantile,prob=0.025,na.rm=TRUE)
hi.roc = apply(sim.roc,1,quantile,prob=0.975,na.rm=TRUE)
Zsim <- t(apply(sim,1,scale))
zLo <- apply(Zsim,1,quantile,prob=0.025,na.rm=TRUE)
zHi <- apply(Zsim,1,quantile,prob=0.975,na.rm=TRUE)
Zsim.roc <- t(apply(sim.roc,1,scale))
zLo.roc <- apply(Zsim.roc,1,quantile,prob=0.025,na.rm=TRUE)
zHi.roc <- apply(Zsim.roc,1,quantile,prob=0.975,na.rm=TRUE)
Zscore_empirical <- (finalSPD - apply(sim, 1, mean))/apply(sim, 1, sd)
Zscore_empirical.roc <- (obs.roc - apply(sim.roc, 1, mean))/apply(sim.roc, 1, sd)
busts <- which(Zscore_empirical< zLo)
booms <- which(Zscore_empirical> zHi)
busts2 <- which(finalSPD< lo)
booms2 <- which(finalSPD> hi)
busts.roc <- which(Zscore_empirical.roc < zLo.roc)
booms.roc <- which(Zscore_empirical.roc > zHi.roc)
busts2.roc <- which(obs.roc < lo.roc)
booms2.roc <- which(obs.roc > hi.roc)
observedStatistic <- sum(c(zLo[busts] - Zscore_empirical[busts]),c(Zscore_empirical[booms]-zHi[booms]))
observedStatistic.roc <- sum(c(zLo.roc[busts.roc] - Zscore_empirical.roc[busts.roc]),c(Zscore_empirical.roc[booms.roc]-zHi.roc[booms.roc]))
expectedstatistic <- abs(apply(Zsim,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=zLo)) + apply(Zsim,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=zHi)
expectedstatistic.roc <- abs(apply(Zsim.roc,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=zLo.roc)) + apply(Zsim.roc,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=zHi.roc)
pvalue <- c(length(expectedstatistic[expectedstatistic > observedStatistic])+1)/c(nsim+1)
pvalue.roc <- c(length(expectedstatistic.roc[expectedstatistic.roc > observedStatistic.roc])+1)/c(nsim+1)
# Results
result <- data.frame(calBP=observed$grid$calBP,PrDens=finalSPD,lo=lo,hi=hi)
result.roc <- data.frame(calBP=timeSequence,roc=obs.roc,lo.roc=lo.roc,hi.roc=hi.roc) #time is midpoint of transition
predgrid <- subset(predgrid,calBP<=timeRange[1]&calBP>=timeRange[2])
if(raw==FALSE){ sim <- NA; sim.roc<-NA }
res <- list(result=result,result.roc=result.roc, sim=sim, sim.roc=sim.roc, pval=pvalue, pval.roc=pvalue.roc, fit=predgrid, fitobject=fit,nbins=length(unique(bins)),n=nrow(x$metadata),nsim=nsim,backsight=backsight)
class(res) <- "SpdModelTest"
if (verbose){ print("Done.") }
return(res)
}
#' @title Random mark permutation test for SPDs
#'
#' @description Global and local significance test for comparing shapes of multiple SPDs using random permutations.
#'
#' @param x A \code{CalDates} class object containing the calibrated radiocarbon dates.
#' @param marks A numerical or character vector containing the marks associated to each radiocarbon date.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP.
#' @param backsight A single numeric value defining the distance in time between the focal year and the backsight year for computing the rate of change. Default is 10.
#' @param changexpr An expression for calculating the rate of change in SPD between the focal year and a backsight year. Available input options are t1 (the SPD for the focal year), t0 (the SPD for the backsight year), d (the distance between t0 and t1), and any other standard constants and mathematical operators. A sensible default is provided.
#' @param bins A vector indicating which bin each radiocarbon date is assigned to.
#' @param nsim Number of random permutations
#' @param runm A number indicating the window size of the moving average to smooth the SPD. If set to \code{NA} no moving average is applied.Default is NA.
#' @param datenormalised If set to TRUE the total probability mass of each calibrated date will be made to sum to unity (the default in most radiocarbon calibration software). This argument will only have an effect if the dates in \code{x} were calibrated without normalisation (via normalised=FALSE in the \code{\link{calibrate}} function), in which case setting \code{datenormalised=TRUE} here will rescale each dates probability mass to sum to unity before aggregating the dates, while setting \code{datenormalised=FALSE} will ensure unnormalised dates are used for both observed and simulated SPDs. Default is FALSE.
#' @param spdnormalised A logical variable indicating whether the total probability mass of the SPD is normalised to sum to unity.
#' @param raw A logical variable indicating whether all permuted SPDs should be returned or not. Default is FALSE.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#'
#' @details The function generates a distribution of expected SPDs by randomly shuffling the marks assigned to each \emph{bin} (see \code{\link{spd}} for details on binning). The resulting distribution of probabilities for each \emph{mark} (i.e. group of dates) for each calendar year is z-transformed, and a 95\% simulation envelope is computed. Local significant departures are defined as instances where the observed SPD (which is also z-transformed) is outside such envelope. A global significance is also computed by comparing the total "area" outside the simulation envelope in the observed and simulated data.
#'
#' @return An object of class \code{SpdPermTest} with the following elements
#' \itemize{
#' \item{\code{observed}} {A list containing data.frames with the summed probability (column \emph{PrDens} for each calendar year (column \emph{calBP} for each mark/group}
#' \item{\code{envelope}} {A list containing matrices with the lower and upper bound values of the simulation envelope for each mark/group}
#' \item{\code{pValueList}} {A list of p-value associated with each mark/group}
#' }
#'
#' @references
#' Crema, E.R., Habu, J., Kobayashi, K., Madella, M., (2016). Summed Probability Distribution of 14 C Dates Suggests Regional Divergences in the Population Dynamics of the Jomon Period in Eastern Japan. PLOS ONE 11, e0154809. doi:10.1371/journal.pone.0154809
#'
#' @examples
#' ## compare demographic trajectories in Netherlands and Denmark
#' \dontrun{
#' data(euroevol)
#' nld.dnk = subset(euroevol,Country=="Netherlands"|Country=="Denmark")
#' bins = binPrep(nld.dnk$SiteID,nld.dnk$C14Age,h=200)
#' dates = calibrate(nld.dnk$C14Age,nld.dnk$C14SD,normalised=FALSE)
#' res = permTest(dates,marks=as.character(nld.dnk$Country),nsim=1000,
#' bins=bins,runm=200,timeRange=c(10000,4000))
#' round(res$pValueList,4) #extract p-values
#' summary(res)
#' par(mfrow=c(2,1))
#' plot(res,focalm="Netherlands",main="Netherlands")
#' plot(res,focalm="Denmark",main="Denmark")
#' }
#' @import utils
#' @import stats
#' @export
permTest <- function(x, marks, timeRange, backsight=10,changexpr=expression((t1/t0)^(1/d)-1),nsim, bins=NA, runm=NA, datenormalised=FALSE, spdnormalised=FALSE, raw=FALSE, verbose=TRUE){
if (is.na(bins[1])){ bins <- as.character(1:nrow(x$metadata)) }
marks <- as.character(marks)
binNames <- unique(bins)
calyears <- data.frame(calBP=seq(timeRange[1], timeRange[2],-1))
binnedMatrix <- matrix(nrow=nrow(calyears), ncol=length(binNames))
GroupList <- vector()
if (verbose & length(binNames)>1){
print("Summing observed groups...")
flush.console()
pb <- txtProgressBar(min=1, max=length(binNames), style=3)
}
caldateTR <- as.numeric(x$metadata[1,c("StartBP","EndBP")])
caldateyears <- seq(caldateTR[1],caldateTR[2],-1)
check <- caldateTR[1] >= timeRange[1] & caldateTR[2] <= timeRange[2]
## Observed SPDs
for (b in 1:length(binNames)){
if (verbose & length(binNames)>1){ setTxtProgressBar(pb, b) }
index <- which(bins==binNames[b])
if (length(x$calmatrix)>1){
if (!check){
stop("The time range of the calibrated dataset must be at least as large as the spd time range.")
} else {
tmp <- x$calmatrix[,index, drop=FALSE]
if (datenormalised){
tmp <- apply(tmp,2,FUN=function(x) x/sum(x))
}
spdtmp <- rowSums(tmp)
if (length(binNames)>1){
spdtmp <- spdtmp / length(index)
}
binnedMatrix[,b] <- spdtmp[caldateyears<=timeRange[1] & caldateyears>=timeRange[2]]
}
} else {
slist <- x$grids[index]
slist <- lapply(slist,FUN=function(x) merge(calyears,x, all.x=TRUE))
slist <- rapply(slist, f=function(x) ifelse(is.na(x),0,x), how="replace")
slist <- lapply(slist, FUN=function(x) x[with(x, order(-calBP)), ])
tmp <- lapply(slist,`[`,2)
if (datenormalised){
outofTR <- lapply(tmp,sum)==0 # date out of range
tmpc <- tmp[!outofTR]
if (length(tmpc)>0){
tmp <- lapply(tmpc,FUN=function(x) x/sum(x))
}
}
if (length(binNames)>1){
spdtmp <- Reduce("+", tmp) / length(index)
} else {
spdtmp <- Reduce("+", tmp)
}
binnedMatrix[,b] <- spdtmp[,1]
}
GroupList[b] <- marks[index][1]
}
if (verbose & length(binNames)>1){ close(pb) }
observedSPD <- vector("list",length=length(unique(GroupList)))
names(observedSPD) <- unique(GroupList)
for (d in 1:length(unique(GroupList))){
focus <- unique(GroupList)[d]
index <- which(GroupList==focus)
tmpSPD <- apply(binnedMatrix[,index,drop=FALSE], 1, sum)
if (!is.na(runm)){
tmpSPD <- runMean(tmpSPD, runm, edge="fill")
}
if (d==1){
dall <- tmpSPD
} else {
dall <- dall+tmpSPD
}
if (spdnormalised){ tmpSPD <- tmpSPD / sum(tmpSPD) }
observedSPD[[d]] <- data.frame(calBP=calyears, PrDens=tmpSPD)
}
## Permutations
simulatedSPD <- vector("list",length=length(unique(GroupList)))
for (d in 1:length(unique(GroupList))){
simulatedSPD[[d]] <- matrix(NA, nrow=nrow(calyears), ncol=nsim)
}
if (verbose){
print("Permuting the groups...")
flush.console()
pb <- txtProgressBar(min=1, max=nsim, style=3)
}
for (s in 1:nsim){
if (verbose){ setTxtProgressBar(pb, s) }
simGroupList <- sample(GroupList)
for (d in 1:length(unique(simGroupList))){
focus <- unique(GroupList)[d]
index <- which(simGroupList==focus)
tmpSPD <- apply(binnedMatrix[,index,drop=FALSE],1,sum)
if (!is.na(runm)){
tmpSPD <- runMean(tmpSPD, runm, edge="fill")
}
if (d==1){
dall <- tmpSPD
} else {
dall <- dall+tmpSPD
}
if (spdnormalised){ tmpSPD <- tmpSPD/sum(tmpSPD) }
simulatedSPD[[d]][,s] <- tmpSPD
}
}
names(simulatedSPD) <- unique(GroupList)
if (verbose){ close(pb) }
#compute rates of change
timeSequence = timeRange[1]:timeRange[2]
foo = function(spd,backsight,timeSequence,changexpr)
{
obs=rep(NA,length(timeSequence))
for (i in 1:c(length(obs)-backsight))
{
d=backsight
t0 = spd[i]
t1 = spd[i+backsight]
obs[i+backsight] = eval(changexpr)
if (t1==0|t0==0){obs[i+backsight]=NA}
}
return(obs)
}
observedROC = simulatedROC = vector("list",length=length(observedSPD))
for (i in 1:length(observedSPD))
{
tmp=foo(observedSPD[[i]][,2],backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
observedROC[[i]] = data.frame(calBP=timeSequence,roc=tmp)
names(observedROC)[[i]]=names(observedSPD)[[i]]
simulatedROC[[i]] = apply(simulatedSPD[[i]],2,foo,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
}
names(simulatedROC) <- unique(GroupList)
## Simulation Envelope
simulatedCIlist = simulatedCIlist.roc = vector("list",length=length(unique(GroupList)))
for (d in 1:length(unique(GroupList))){
simulatedCIlist[[d]] <- cbind(apply(simulatedSPD[[d]],1,quantile,prob=c(0.025),na.rm=TRUE), apply(simulatedSPD[[d]],1,quantile,prob=c(0.975),na.rm=TRUE))
simulatedCIlist.roc[[d]] <- cbind(apply(simulatedROC[[d]],1,quantile,prob=c(0.025),na.rm=TRUE), apply(simulatedROC[[d]],1,quantile,prob=c(0.975),na.rm=TRUE))
names(simulatedCIlist) <- unique(GroupList)
names(simulatedCIlist.roc) <- unique(GroupList)
}
## Compute Global P-value
pValueList = pValueList.roc = numeric(length=length(simulatedSPD))
for (a in 1:length(simulatedSPD)){
zscoreMean <- apply(simulatedSPD[[a]],1,mean)
zscoreSD <- apply(simulatedSPD[[a]],1,sd)
zscoreMean.roc <- apply(simulatedROC[[a]],1,mean,na.rm=TRUE)
zscoreSD.roc <- apply(simulatedROC[[a]],1,sd,na.rm=TRUE)
tmp.sim <- t(apply(simulatedSPD[[a]],1,function(x){ return((x - mean(x))/sd(x)) }))
tmp.sim.roc <- t(apply(simulatedROC[[a]],1,function(x){ return((x - mean(x))/sd(x)) }))
# tmp.sim[is.na(tmp.sim)] <- 0
tmp.obs <- observedSPD[[a]]
tmp.obs[,2] <- (tmp.obs[,2] - zscoreMean) / zscoreSD
# tmp.obs[is.na(tmp.obs[,2]),2] <- 0
tmp.obs.roc <- observedROC[[a]]
tmp.obs.roc[,2] <- (tmp.obs.roc[,2] - zscoreMean.roc) / zscoreSD.roc
tmp.ci <- t(apply(tmp.sim,1, quantile, prob=c(0.025,0.975),na.rm=TRUE))
tmp.ci.roc <- t(apply(tmp.sim.roc,1, quantile, prob=c(0.025,0.975),na.rm=TRUE))
expectedstatistic <- abs(apply(tmp.sim,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=tmp.ci[,1])) + apply(tmp.sim,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=tmp.ci[,2])
expectedstatistic.roc <- abs(apply(tmp.sim.roc,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=tmp.ci.roc[,1])) + apply(tmp.sim.roc,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=tmp.ci.roc[,2])
lower <- tmp.obs[,2] - tmp.ci[,1]
indexLow <- which(tmp.obs[,2] < tmp.ci[,1])
higher <- tmp.obs[,2] - tmp.ci[,2]
indexHi <- which(tmp.obs[,2] > tmp.ci[,2])
lower.roc <- tmp.obs.roc[,2] - tmp.ci.roc[,1]
indexLow.roc <- which(tmp.obs.roc[,2] < tmp.ci.roc[,1])
higher.roc <- tmp.obs.roc[,2] - tmp.ci.roc[,2]
indexHi.roc <- which(tmp.obs.roc[,2] > tmp.ci.roc[,2])
observedStatistic <- sum(abs(lower[indexLow]))+sum(higher[indexHi])
observedStatistic.roc <- sum(abs(lower.roc[indexLow.roc]))+sum(higher.roc[indexHi.roc])
pValueList[[a]] <- 1
pValueList.roc[[a]] <- 1
if (observedStatistic>0){
pValueList[[a]] <-c(length(expectedstatistic[expectedstatistic > observedStatistic])+1)/c(nsim+1)
}
if (observedStatistic.roc>0){
pValueList.roc[[a]] <-c(length(expectedstatistic.roc[expectedstatistic.roc > observedStatistic.roc])+1)/c(nsim+1)
}
names(pValueList) <- unique(GroupList)
}
res <- list(observed=observedSPD, envelope=simulatedCIlist, pValueList=pValueList)
res$observed.roc=observedROC
res$envelope.roc=simulatedCIlist.roc
res$pValueList.roc=pValueList.roc
metadata=vector("list",length=length(unique(GroupList)))
names(metadata) = unique(GroupList)
for (k in 1:length(unique(GroupList)))
{
i=unique(GroupList)[k]
tmp = c(sum(marks%in%i),length(unique(bins[which(marks%in%i)])))
metadata[[k]]=tmp
}
res$nsim = nsim
res$metadata = metadata
res$backsight = backsight
if (raw)
{
res$raw <- simulatedSPD
res$raw.row <- simulatedROC
}
class(res) <- "SpdPermTest"
if (verbose){ print("Done.") }
return(res)
}
#' @title Spatial Permutation Test of summed probability distributions.
#' @description This function is deprecated. Please use \code{\link{sptest}} instead.
#' @return A \code{spatialTest} class object
#' @name SPpermTest-deprecated
#' @usage SPpermTest(calDates, timeRange, bins, locations, breaks,
#' spatialweights,rate=expression((t2/t1)^(1/d)-1), nsim=1000, runm=NA,permute="locations",
#' ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
#' @seealso \code{\link{rcarbon-deprecated}}
#' @keywords internal
NULL
#' @rdname rcarbon-deprecated
#' @section \code{SPpermTest}:
#' For \code{SPpermTest}, use \code{\link{sptest}}.
#' @export
SPpermTest<-function(calDates, timeRange, bins, locations, breaks, spatialweights, rate=expression((t2/t1)^(1/d)-1),nsim=1000, runm=NA,permute="locations",ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
{
.Deprecated("sptest")
sptest(calDates=calDates, timeRange=timeRange, bins=bins, locations=locations, breaks=breaks, spatialweights=spatialweights,rate=rate, nsim=nsim, runm=runm,permute=permute,ncores=ncores,datenormalised=datenormalised,verbose=verbose,raw=raw)
}
#' @title Spatial Permutation Test of summed probability distributions.
#'
#' @description This function carries out local spatial permutation tests of summed radiocarbon probability distributions in order to detect local deviations in growth rates (Crema et al 2017).
#'
#' @param calDates A \code{CalDates} class object.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP
#' @param bins A vector indicating which bin each radiocarbon date is assigned to. Must have the same length as the number of radiocarbon dates. Can be created using the \code{\link{binPrep}}) function. Bin names should follow the format "x_y", where x refers to a unique location (e.g. a site) and y is a integer value (e.g. "S023_1", "S023_2","S034_1", etc.).
#' @param locations A \code{SpatialPoints} or a \code{SpatialPointsDataFrame} class object. Rownames of each point should much the first part of the bin names supplied (e.g. "S023","S034")
#' @param breaks A vector of break points for defining the temporal slices.
#' @param spatialweights A \code{spatialweights} class object defining the spatial weights between the locations (cf. \code{\link{spweights}})
#' @param rate An expression defining how the rate of change is calculated, where \code{t1} is the summed probability for a focal block, \code{t2} is the summed probability for next block, and \code{d} is the duration of the blocks. Default is a geometric growth rate (i.e \code{expression((t2/t1)^(1/d)-1)}).
#' @param nsim The total number of simulations. Default is 1000.
#' @param runm The window size of the moving window average. Must be set to \code{NA} if the rates of change are calculated from the raw SPDs.
#' @param permute Indicates whether the permutations should be based on the \code{"bins"} or the \code{"locations"}. Default is \code{"locations"}.
#' @param ncores Number of cores used for for parallel execution. Default is 1.
#' @param datenormalised A logical variable indicating whether the probability mass of each date within \code{timeRange} is equal to 1. Default is FALSE.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#' @param raw A logical variable indicating whether permuted sets of geometric growth rates for each location should be returned. Default is FALSE.
#'
#' @details The function consists of the following seven steps: 1) for each location (e.g. a site) generate a local SPD of radiocarbon dates, weighting the contribution of dates from neighbouring sites using a weight scheme provided by the \code{spatialweights} class object; 2) define temporal slices (using \code{breaks} as break values), then compute the total probability mass within each slice; 3) compute the rate of change between abutting temporal slices by using the formula: \eqn{(SPD_{t}/SPD_{t+1}^{1/\Delta t}-1)}; 4) randomise the location of individual bins or the entire sequence of bins associated with a given location and carry out steps 1 to 3; 5) repeat step 4 \code{nsim} times and generate, for each location, a distribution of growth rates under the null hypothesis (i.e. spatial independence); 6) compare, for each location, the observed growth rate with the distribution under the null hypothesis and compute the p-values; and 7) compute the false-discovery rate for each location.
#'
#' @return A \code{spatialTest} class object
#'
#' @references
#' Crema, E.R., Bevan, A., Shennan, S. (2017). Spatio-temporal approaches to archaeological radiocarbon dates. Journal of Archaeological Science, 87, 1-9.
#'
#' @seealso \code{\link{permTest}} for a non-spatial permutation test; \code{\link{plot.spatialTest}} for plotting; \code{\link{spweights}} for computing spatial weights; \code{\link{spd2rc}} for computing geometric growth rates.
#'
#' @examples
#' ## Reproduce Crema et al 2017 ##
#'\dontrun{
#' data(euroevol) #load data
#'
#' ## Subset only for 8000 to 5000 Cal BP (c7200-4200 C14BP)
#' edge=800
#' timeRange=c(8000,5000)
#' euroevol2=subset(euroevol,C14Age<=c(timeRange[1]-edge)&C14Age>=c(timeRange[2]-edge))
#'
#' ## define chronological breaks
#' breaks=seq(8000,5000,-500)
#'
#' ## Create a SpatialPoints class object
#' library(sp)
#' sites = unique(data.frame(SiteID=euroevol2$SiteID,
#' Longitude=euroevol2$Longitude,Latitude=euroevol2$Latitude))
#' locations=data.frame(Longitude=sites$Longitude,Latitude=sites$Latitude)
#' rownames(locations)=sites$SiteID
#' coordinates(locations)<-c("Longitude","Latitude")
#' proj4string(locations)<- CRS("+proj=longlat +datum=WGS84")
#'
#' ## Compute Distance and Spatial Weights
#' distSamples=spDists(locations,locations,longlat = TRUE)
#' spatialweights=spweights(distSamples,h=100) #using a kernal bandwidth of 100km
#'
#' ## Calibration and binning
#' bins=binPrep(sites=euroevol2$SiteID,ages=euroevol2$C14Age,h=200)
#' calDates=calibrate(x=euroevol2$C14Age,errors=euroevol2$C14SD,
#' timeRange=timeRange,normalised=FALSE)
#'
#' ## Main Analysis (over 2 cores; requires doSnow package)
#' ## NOTE: the number of simulations should be ideally larger
#' ## to ensure a better resolution of the p/q-values.
#' res.locations=sptest(calDates,timeRange=timeRange,bins=bins,locations=locations,
#' spatialweights=spatialweights,breaks=breaks,ncores=2,nsim=100,
#' permute="locations",datenormalised=FALSE)
#'
#' ## Plot results
#' library(rworldmap)
#' base=getMap(resolution="low") #optionally add base map
#' #retrieve coordinate limits#
#' xrange=bbox(res.locations$locations)[1,]
#' yrange=bbox(res.locations$locations)[2,]
#'
#' par(mfrow=c(2,2))
#' par(mar=c(0.1,0.1,0,0.5))
#' plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
#' plot(res.locations,index=4,add=TRUE,legend=TRUE,option="raw",breakRange=c(-0.005,0.005))
#' par(mar=c(0.1,0.1,0,0.5))
#' plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
#' plot(res.locations,index=4,add=TRUE,legend=TRUE,option="test")
#' }
#' @import utils
#' @import stats
#' @import doSNOW
#' @import snow
#' @import foreach
#' @import iterators
#' @import sp
#' @export
sptest<-function(calDates, timeRange, bins, locations, breaks, spatialweights, rate=expression((t2/t1)^(1/d)-1),nsim=1000, runm=NA,permute="locations",ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
{
#######################
#### Load Packages ####
#######################
if (ncores>1&!requireNamespace('doSNOW',quietly = TRUE)){
warning("the doSNOW package is required for multi-core processing; ncores has been set to 1")
ncores=1
}
##################################
#### Initial warning messages ####
##################################
if (!"CalDates" %in% class(calDates)){
stop("calDates must be an object of class 'calDates'.")
}
if (length(bins)>1){
if (any(is.na(bins))){
stop("Cannot have NA values in bins.")
}
if (length(bins)!=length(calDates$grid)){
stop("bins (if provided) must be the same length as x.")
}
} else {
bins <- rep("0_0",length(calDates$grid))
}
if (!("SpatialPoints" %in% class(locations)[1]|"SpatialPointsDataFrame" %in% class(locations)[1])){
stop("locations must be an object of class 'SpatialPoints' or 'SpatialPointsDataFrame'.")
}
locations.id=row.names(locations@coords)
if (is.null(locations.id))
{
stop("locations must have rownames")
}
if (!all(range(timeRange)==range(breaks)))
{
stop("Range of breaks values must much match the temporal range defined by timeRange")
}
if (length(unique(abs(diff(breaks))))!=1)
{
stop("Unequal break intervals is not supported")
}
if (ncores>1&raw==TRUE)
{
warning("raw==TRUE available only for ncores=1")
raw=FALSE
}
#############################
#### Create binnedMatrix ####
#############################
binNames <- unique(bins)
calyears <- data.frame(calBP=seq(timeRange[1], timeRange[2],-1))
binnedMatrix <- matrix(NA, nrow=nrow(calyears), ncol=length(binNames))
if (verbose & length(binNames)>1){
print("Binning by site/phase...")
flush.console()
pb <- txtProgressBar(min=1, max=length(binNames), style=3, title="Binning by site/phase...")
}
for (b in 1:length(binNames)){
if (verbose & length(binNames)>1){ setTxtProgressBar(pb, b) }
index <- which(bins==binNames[b])
slist <- calDates$grid[index]
slist <- lapply(slist,FUN=function(x) merge(calyears,x, all.x=TRUE))
slist <- rapply(slist, f=function(x) ifelse(is.na(x),0,x), how="replace")
slist <- lapply(slist, FUN=function(x) x[with(x, order(-calBP)), ])
tmp <- lapply(slist,`[`,2)
if (datenormalised){
tmp <- lapply(tmp,FUN=function(x) {if(sum(x)!=0){return(x/sum(x))}else{return(x)}})
}
if (length(binNames)>1){
spd.tmp <- Reduce("+", tmp) / length(index)
} else {
spd.tmp <- Reduce("+", tmp)
}
binnedMatrix[,b] <- spd.tmp[,1]
}
if (verbose & length(binNames)>1){ close(pb) }
################################
### Observed Data Subroutine ###
################################
## Aggregate by Locations ##
origins=unlist(lapply(strsplit(binNames,"_"),function(x){x[[1]]}))
if (!all(origins%in%locations.id))
{
stop("Missing bins or locations")
}
resMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
for (x in 1:length(unique(locations.id)))
{
index=which(origins==unique(locations.id)[x])
if(length(index)>1)
{resMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{resMatrix[x,]=binnedMatrix[,index]}
}
## Aggregate by break s##
nBreaks=length(breaks)-1
obsMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
timeSequence=timeRange[1]:timeRange[2]
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
obsMatrix[,x]=apply(resMatrix[,index],1,sum)
}
## Apply SpatialWeights ##
obsGridVal=t(spatialweights$w)%*%obsMatrix
## Compute Rate of Change #3
rocaObs=t(apply(obsGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
# If no spd for both period the rate is NA
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
#if single transition transpose matrix:
if (nBreaks==2){rocaObs=t(rocaObs)}
##############################
### Permutation Subroutine ###
##############################
opts = NULL
if (ncores>1)
{
cl <- snow::makeCluster(ncores)
registerDoSNOW(cl)
if (verbose)
{
print(paste("Running permutation test in parallel on ",getDoParWorkers()," workers...",sep=""))
pb <- txtProgressBar(min=0, max=length(x), style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
}
sumcombine<-function(a,b)
{
list(a[[1]]+b[[1]],a[[2]]+b[[2]],a[[3]]+b[[3]])
}
resultHiLoEq<-foreach (x=1:nsim,.combine= sumcombine,.options.snow = opts) %dopar% {
simGridVal<-matrix(NA,nrow=nrow(spatialweights$w),ncol=nBreaks)
## Aggregate by Site ##
simResMatrix=matrix(0,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
## Randomly assigne bins to locations.id ##
if (permute=="bins")
{
simOrigins=sample(origins)
for (x in 1:length(unique(locations.id)))
{
index=which(simOrigins==unique(locations.id)[x])
if(length(index)>1)
{simResMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{simResMatrix[x,]=binnedMatrix[,index]}
}
## Aggregate by breaks ##
aggMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
aggMatrix[,x]=apply(simResMatrix[,index],1,sum)
}
## Apply Weights ##
simGridVal=t(spatialweights$w)%*%aggMatrix
}
if (permute=="locations")
{
simMatrix=obsMatrix[sample(nrow(obsMatrix)),]
simGridVal=t(spatialweights$w)%*%simMatrix
}
## Compute Rate of Change ##
rocaSim=t(apply(simGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
lo=rocaObs<rocaSim
hi=rocaObs>rocaSim
eq=rocaObs==rocaSim
return(list(hi,lo,eq))
}
snow::stopCluster(cl)
hi=resultHiLoEq[[1]]
lo=resultHiLoEq[[2]]
eq=resultHiLoEq[[3]]
} else {
hi=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
lo=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
eq=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
if(verbose){
print("Permutation test...")
flush.console()
}
if(raw)
{
rocaSimAll = array(NA,dim=c(nsim,nrow(spatialweights$w),nBreaks-1))
}
if(verbose){pb <- txtProgressBar(min = 1, max = nsim, style=3)}
for (s in 1:nsim)
{
if(verbose){setTxtProgressBar(pb, s)}
simGridVal<-matrix(NA,nrow=nrow(spatialweights$w),ncol=nBreaks)
## Aggregate by Site ##
simResMatrix=matrix(0,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
## Randomly assign bins to locations
if (permute=="bins")
{
simOrigins=sample(origins)
for (x in 1:length(unique(locations.id)))
{
index=which(simOrigins==unique(locations.id)[x])
if(length(index)>1)
{simResMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{simResMatrix[x,]=binnedMatrix[,index]}
}
##Aggregate by breaks##
aggMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
aggMatrix[,x]=apply(simResMatrix[,index],1,sum)
}
##Apply Weights
simGridVal=t(spatialweights$w)%*%aggMatrix
}
if (permute=="locations")
{
simMatrix=obsMatrix[sample(nrow(obsMatrix)),]
simGridVal=t(spatialweights$w)%*%simMatrix
}
##Compute Rate of Change
rocaSim=t(apply(simGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
# if single transition transpose matrix:
if (nBreaks==2) {rocaSim=t(rocaSim)}
hi=hi+(rocaObs>rocaSim)
lo=lo+(rocaObs<rocaSim)
eq=eq+(rocaObs==rocaSim)
if(raw){rocaSimAll[s,,]=rocaSim}
}
if(verbose){close(pb)}
}
############################
### Compute Significance ###
############################
pvalHi=(lo+eq+1)/c(nsim+1)
pvalLo=(hi+eq+1)/c(nsim+1)
pval=pvalHi
pval[which(pvalHi>pvalLo)]=pvalLo[which(pvalHi>pvalLo)]
pval=pval*2
if (max(pval,na.rm=TRUE)>1)
{
pval[which(pval>1)]=1
}
## Compute False Discovery Rate ##
qvalHi=apply(pvalHi,2,function(x){return(p.adjust(x,method="fdr"))})
qvalLo=apply(pvalLo,2,function(x){return(p.adjust(x,method="fdr"))})
qval=apply(pval,2,function(x){return(p.adjust(x,method="fdr"))})
metadata=data.frame(npoints=length(unique(locations.id)),ndates=nrow(calDates$metadata),nbins=length(binNames),nsim=nsim,permutationType=permute,datenormalised=datenormalised,breaks=nBreaks,timeRange=paste(timeRange[1],"-",timeRange[2],sep=""),weights.h=spatialweights$h,weights.kernel=spatialweights$kernel)
if(raw==FALSE){rocaSimAll=NA}
reslist=list(metadata=metadata,rocaSim=rocaSimAll,rocaObs=rocaObs,pval=pval,pvalHi=pvalHi,pvalLo=pvalLo,qval=qval,qvalLo=qvalLo,qvalHi=qvalHi,locations=locations)
class(reslist) <- append(class(reslist),"spatialTest")
return(reslist)
}
#' @title Point to point test of SPD
#'
#' @description Test for evaluating the difference in the summed probability values associated with two points in time.
#'
#' @param x result of \code{\link{modelTest}} with raw=TRUE.
#' @param p1 calendar year (in BP) of start point.
#' @param p2 calendar year (in BP) of end point.
#' @param interactive if set to TRUE enables an interactive selection of p1 and p2 from a graphical display of the SPD. Disabled when \code{p1} and \code{p2} are defined.
#' @param plot if set to TRUE the function plots the location of p1 and p2 on the SPD. Default is FALSE.
#'
#' @details The function compares observed differences in the summed probability values associated with two points in time against a distribution of expected values under the null hypothesis defined with the \code{\link{modelTest}} function. The two points can be specified manually (assigning BP dates to the arguments \code{p1} and \code{p2}) or interactively (clicking on a SPD plot). Note that \code{\link{modelTest}} should be executed setting the argument \code{raw} to \code{TRUE} (default is \code{FALSE}).
#'
#'
#' @return A list with: the BP dates for the two points and the p-value obtained from a two-sided test.
#'
#' @references
#' Edinborough, K., Porcic, M., Martindale, A., Brown, T.J., Supernant, K., Ames, K.M., (2017). Radiocarbon test for demographic events in written and oral history. PNAS 201713012. doi:10.1073/pnas.1713012114
#' @examples
#' ## Example with Younger Dryas period Near East, including site bins
#' \dontrun{
#' data(emedyd)
#' caldates <- calibrate(x=emedyd$CRA, errors=emedyd$Error, normalised=FALSE)
#' bins <- binPrep(sites=emedyd$SiteName, ages=emedyd$CRA, h=50)
#' nsim=10 #toy example
#' expnull <- modelTest(caldates, errors=emedyd$Error, bins=bins, nsim=nsim, runm=50,
#' timeRange=c(16000,9000), model="exponential", datenormalised=FALSE, raw=TRUE)
#' p2pTest(x=expnull,p1=13000,p2=12500) #non-interactive mode
#' p2pTest(x=expnull) #interactive mode
#' }
#' @seealso \code{\link{modelTest}}.
#' @import utils
#' @import stats
#' @export
p2pTest <- function(x,p1=NA,p2=NA,interactive=TRUE,plot=FALSE)
{
if (is.na(x$sim[1]))
{
stop("x should be an SpdModelTest class object produced using modelTest() with raw=TRUE")
}
if (!is.na(p1)&!is.na(p2))
{
if (p2>p1){stop("the end point should be more recent than the start point")}
interactive=FALSE
}
if (interactive)
{
plot(x)
print("select start point")
p1=round(locator(n=1)$x[1])
index1=match(p1,x$result$calBP)
p1.y = x$result[index1,2]
points(p1,p1.y,pch=20)
print("select end point")
p2=round(locator(n=1)$x[1])
if (p2>p1)
{
print("the end point should be more recent than the start point")
while(p2>p1)
{
print("select end point")
p2=round(locator(n=1)$x[1])
}
}
index2=match(p2,x$result$calBP)
p2.y = x$result[index2,2]
points(p2,p2.y,pch=20)
lines(x$result$calBP[index1:index2],x$result$PrDens[index1:index2],lwd=2)
}
if (!interactive)
{
index1=match(p1,x$result$calBP)
p1.y = x$result[index1,2]
index2=match(p2,x$result$calBP)
p2.y = x$result[index2,2]
if (plot)
{
plot(x)
points(p1,p1.y,pch=20)
points(p2,p2.y,pch=20)
lines(x$result$calBP[index1:index2],x$result$PrDens[index1:index2],lwd=2)
}
}
if (!p1%in%x$result$calBP | !p2%in%x$result$calBP)
{
stop("p1 and p2 should be within the temporal range of the spd")
}
obs.diff = p1.y-p2.y
sim.diff = x$sim[index1,]-x$sim[index2,]
nsim = ncol(x$sim)
lo = sum(obs.diff < sim.diff)
hi = sum(obs.diff > sim.diff)
eq = sum(obs.diff == sim.diff)
pvalHi=(lo+eq+1)/c(nsim+1)
pvalLo=(hi+eq+1)/c(nsim+1)
pval=ifelse(pvalHi<pvalLo,pvalHi,pvalLo)*2
return(list(p1=p1,p2=p2,pval=pval))
}
#' @title Summarise a \code{SpdModelTest} class object
#'
#' @description \code{summary} method for class "\code{SpdModelTest}"
#'
#' @param object A \code{SpdModelTest} class object produced using the \code{link{modelTest}} function.
#' @param type Specifies whether the summary should be based on SPD ('spd') or associated rates of change ('roc'). Default is 'spd'.
#' @param ... Ignored
#'
#' @details The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global significance test, and the chronological interval of local positive and negative deviations from the simulation envelope.
#' @seealso \code{\link{modelTest}}.
#' @import utils
#' @import stats
#' @method summary SpdModelTest
#' @export
summary.SpdModelTest<-function(object,type='spd',...) {
if (!type%in%c('spd','roc'))
{
stop("The argument 'type' should be either 'spd' or 'roc'.")
}
cat("'modelTest()' function summary:\n")
cat("\n")
cat(paste("Number of radiocarbon dates: ",object$n,"\n",sep=""))
cat(paste("Number of bins: ",object$nbins,"\n",sep=""))
if(type=='roc'){cat(paste("Backsight size: ",object$backsight,"\n",sep=""))}
cat("\n")
cat(paste("Statistical Significance computed using ",object$nsim," simulations. \n",sep=""))
if(type=='spd'){cat(paste("Global p-value: ",round(object$pval,5),".\n",sep=""))}
if(type=='roc'){cat(paste("Global p-value (rate of change): ",round(object$pval.roc,5),".\n",sep=""))}
cat("\n")
if (type=='spd')
{
obs <- object$result[,1:2]
envelope <- object$result[,3:4]
booms <- which(obs$PrDens>envelope[,2])
busts <- which(obs$PrDens<envelope[,1])
}
if (type=='roc')
{
naindex = which(is.na(object$result.roc$roc))
obs <- object$result.roc[-naindex,1:2]
envelope <- object$result.roc[-naindex,3:4]
booms <- which(obs$roc>envelope[,2])
busts <- which(obs$roc<envelope[,1])
}
if (length(booms)>0)
{
cat(paste("Signficant positive local deviations at:\n"))
i=1
while (i < length(obs$calBP))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]>envelope[i,2])
{
ss=obs$calBP[i]
while(obs[i,2]>envelope[i,2])
{
ee=obs$calBP[i]
i=i+1
if (i>length(obs$calBP))
{
i = length(obs$calBP)
ee=obs$calBP[i]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(booms)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
if (length(busts)>0)
{
cat(paste("Significant negative local deviations at:\n"))
i=1
while (i < length(obs$calBP))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]<envelope[i,1])
{
ss=obs$calBP[i]
while(obs[i,2]<envelope[i,1])
{
ee=obs$calBP[i]
i=i+1
if (i>length(obs$calBP))
{
i = length(obs$calBP)
ee=obs$calBP[i]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(busts)==0)
{
cat(paste("No significant negative local deviations"))
}
}
#' @title Summarise a \code{SpdPermTest} class object
#'
#' @description \code{summary} method for class "\code{SpdPermTest}"
#'
#' @param object A \code{SpdPermTest} class object produced using the \code{link{permTest}} function.
#' @param type Specifies whether the summary should be based on SPD ('spd') or associated rates of change ('roc'). Default is 'spd'.
#' @param ... Ignored
#'
#' @details The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global significance test, and the chronological interval of local positive and negative deviations from the simulation envelope for each set.
#' @seealso \code{\link{permTest}}.
#' @import utils
#' @import stats
#' @method summary SpdPermTest
#' @export
summary.SpdPermTest<-function(object,type='spd',...) {
if (!type%in%c('spd','roc'))
{
stop("The argument 'type' should be either 'spd' or 'roc'.")
}
cat("'permTest()' function summary:\n")
cat("\n")
cat(paste("Number of sets: ",length(object$observed),"\n",sep=""))
if(type=='roc'){cat(paste("Backsight size: ",object$backsight,"\n",sep=""))}
cat(paste("Statistical Significance computed using ",object$nsim," simulations. \n",sep=""))
cat("\n")
for (i in 1:length(object$observed))
{
if (type=='spd'){cat(paste("--- ",names(object$observed)[i]," ---\n",sep=""))}
if (type=='roc'){cat(paste("--- ",names(object$observed.roc)[i]," ---\n",sep=""))}
cat(paste("Number of radiocarbon dates:",object$metadata[[i]][1],"\n",sep=""))
cat(paste("Number of bins:",object$metadata[[i]][2],"\n",sep=""))
cat("\n")
if(type=='spd'){
cat(paste("Global p-value: ",round(object$pValueList[i],5),"\n",sep=""))
obs <- object$observed[[i]]
envelope <-object$envelope[[i]]
booms <- which(obs[,2]>envelope[,2])
busts <- which(obs[,2]<envelope[,1])
}
if(type=='roc'){
cat(paste("Global p-value (rate of change): ",round(object$pValueList.roc[i],5),"\n",sep=""))
obs <- object$observed.roc[[i]]
envelope <-object$envelope.roc[[i]]
booms <- which(obs[,2]>envelope[,2])
busts <- which(obs[,2]<envelope[,1])
}
cat("\n")
if (length(booms)>0)
{
cat(paste("Significant positive local deviations at:\n"))
i=1
while (i < length(obs[,1]))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]>envelope[i,2])
{
ss=obs[i,1]
while(obs[i,2]>envelope[i,2])
{
ee=obs[i,1]
i=i+1
if (i>length(obs[,1]))
{
i = length(obs[,1])
ee=obs[i,1]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(booms)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
if (length(busts)>0)
{
cat(paste("Significant negative local deviations at:\n"))
i=1
while (i < length(obs[,1]))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]<envelope[i,1])
{
ss=obs[i,1]
while(obs[i,2]<envelope[i,1])
{
ee=obs[i,1]
i=i+1
if (i>length(obs[,1]))
{
i = length(obs[,1])
ee=obs[i,1]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(busts)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
}
}
| /R/tests.R | no_license | philriris/rcarbon | R | false | false | 63,161 | r | if(getRversion() >= "2.15.1") utils::globalVariables(c("s","calBP"))
#' @title Monte-Carlo simulation test for SPDs
#'
#' @description Comparison of an observed summed radiocarbon date distribution (aka SPD) with simulated outcomes from a theoretical model.
#'
#' @param x A \code{CalDates} object containing calibrated radiocarbon ages
#' @param errors A vector of errors corresponding to each radiocarbon age
#' @param nsim Number of simulations
#' @param bins A vector indicating which bin each radiocarbon date is assigned to.
#' @param runm A number indicating the window size of the moving average to smooth both observed and simulated SPDs. If set to \code{NA} no moving average is applied.Default is \code{NA}.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP. The fitting process is applied considering the SPD within the interval defined by this parameter. If no values are supplied the earliest and latest median calibrated dates of the observed data will be used.
#' @param backsight A single numeric value defining the distance in time between the focal year and the backsight year for computing the rate of change. Default is 10.
#' @param changexpr An expression for calculating the rate of change in SPD between the focal year and a backsight year. Available input options are t1 (the SPD for the focal year), t0 (the SPD for the backsight year), d (the distance between t0 and t1), and any other standard constants and mathematical operators. A sensible default is provided.
#' @param gridclip Whether the sampling of random dates is constrained to the observed range (TRUE) or not (FALSE). Default is TRUE.
#' @param raw A logical variable indicating whether all permuted SPDs should be returned or not. Default is FALSE.
#' @param model A vector indicating the model to be fitted. Currently the acceptable options are \code{'uniform'}, \code{'linear'}, \code{'exponential'} and \code{'custom'}. Default is \code{'exponential'}.
#' @param method Method for the creation of random dates from the fitted model. Either \code{'uncalsample'} or \code{'calsample'}. Default is \code{'uncalsample'}. See below for details.
#' @param predgrid A data.frame containing calendar years (column \code{calBP}) and associated summed probabilities (column \code{PrDens}). Required when \code{model} is set to \code{'custom'}.
#' @param normalised Whether the simulated dates should be normalised or not. Default based on whether x is normalised or not.
#' @param datenormalised Argument kept for backward compatibility with previous versions.
#' @param spdnormalised A logical variable indicating whether the total probability mass of the SPD is normalised to sum to unity for both observed and simulated data.
#' @param edgeSize Controls edge effect by expanding the fitted model beyond the range defined by \code{timeRange}.
#' @param ncores Number of cores used for for parallel execution. Default is 1.
#' @param fitonly A logical variable. If set to TRUE, only the the model fitting is executed and returned. Default is FALSE.
#' @param a Starter value for the exponential fit with the \code{\link{nls}} function using the formula \code{y ~ exp(a + b * x)} where \code{y} is the summed probability and \code{x} is the date. Default is 0.
#' @param b Starter value for the exponential fit with the \code{\link{nls}} function using the formula \code{y ~ exp(a + b * x)} where \code{y} is the summed probability and \code{x} is the date. Default is 0.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#'
#' @details The function implements a Monte-Carlo test for comparing a theoretical or fitted statistical model to an observed summed radiocarbon date distribution (aka SPD) and associated rates of changes. A variety of theoretical expectations can be compared to the observed distribution by setting the \code{model} argument, for example to fit basic \code{'uniform'} (the mean of the SPD), \code{'linear'} (fitted using the \code{\link{lm}} function) or \code{model='exponential'} models (fitted using the \code{\link{nls}} function). Models are fitted to the period spanned by \code{timeRange} although \code{x} can contain dates outside this range to mitigate possible edge effects (see also \code{bracket}). Alternatively, it is possible for the user to provide a model of their own by setting \code{model='custom'} and then supplying a two-column data.frame to \code{predgrid}. The function generates \code{nsim} theoretical SPDs from the fitted model via Monte-Carlo simulation, this is then used to define a 95\% critical envelope for each calendar year. The observed SPD is then compared against the simulation envelope; local departures from the model are defined as instances where the observed SPD is outside such an envelope, while an estimate of the global significance of the observed SPD is also computed by comparing the total areas of observed and simulated SPDs that fall outside the simulation envelope. The theoretical SPDs can be generated using two different sampling approaches defined by the parameter \code{method}. If \code{method} is set to \code{'uncalsample'} each date is drawn after the fitted model is backcalibrated as a whole and adjusted for a baseline expectation; if it is set to \code{'calsample'} samples are drawn from the fitted model in calendar year then individually back calibrated and recalibrated (the approach of Timpson et al. 2014). For each simulation, both approaches produces \eqn{n} samples, with \eqn{n} equal to the number of bins or number of dates (when bins are not defined). Differences between these two approaches are particularly evident at dates coincident with steeper portions of the calibration curve. If more than one type of calibration curve is associated with the observed dates, at each Monte-Carlo iteration, the function randomly assigns each bin to one of the calibration curves with probability based on the proportion of dates within the bin associated to the specific curves. For example, if a bin is composed of four dates and three are calibrated with 'intcal20' the probability of that particular bin being assigned to 'intcal20' is 0.75.
#' @note
#'\itemize{
#'\item {Windows users might receive a memory allocation error with larger time span of analysis (defined by the parameter \code{timeRange}). This can be avoided by increasing the memory limit with the \code{\link{memory.limit}} function.}
#'\item {Users experiencing a \code{Error: cannot allocate vector of size ...} error message can increase the memory size using the \code{Sys.setenv()}, for example: \code{Sys.setenv("R_MAX_VSIZE" = 16e9)}.}
#'\item {The function currently supports only dates calibrated with 'intcal20',intcal13','intcal13nhpine16','shcal20','shcal13','shcal13shkauri16', 'marine20', and 'marine13'.}
#'}
#'
#' @return An object of class \code{SpdModelTest} with the following elements
#' \itemize{
#' \item{\code{result}} {A four column data.frame containing the observed probability density (column \emph{PrDens}) and the lower and the upper values of the simulation envelope (columns \emph{lo} and \emph{hi}) for each calendar year column \emph{calBP}}
#' \item{\code{result.roc}} {A four column data.frame containing the observed rates of change (column \emph{roc}) and the lower and the upper values of the simulation envelope (columns \emph{lo.roc} and \emph{hi.roc}) for the mid point between two chronological blocks \emph{calBP}}
#' \item{\code{sim}} {A matrix containing the simulation results of the summed probabilities. Available only when \code{raw} is set to TRUE}
#' \item{\code{sim.roc}} {A matrix containing the simulation results of the rate of change of summed probabilities. Available only when \code{raw} is set to TRUE}
#' \item{\code{pval}} {A numeric vector containing the p-value of the global significance test for the summed probabilities}
#' \item{\code{pval.roc}} {A numeric vector containing the p-value of the global significance test for the rates of change}
#' \item{\code{fit}} {A data.frame containing the probability densities of the fitted model for each calendar year within the time range of analysis}
#' \item{\code{fitobject}} {Fitted model. Not available when \code{model} is \code{'custom'}}
#' \item{\code{n}} {Number of radiocarbon dates.}
#' \item{\code{nbins}}{Number of bins.}
#' \item{\code{nsim}}{Number of Monte-Carlo simulations.}
#' \item{\code{backsight}}{Backsight size.}
#' }
#'
#' @references
#'
#' Timpson, A., Colledge, S., Crema, E., Edinborough, K., Kerig, T., Manning, K., Thomas, M.G., Shennan, S., (2014). Reconstructing regional population fluctuations in the European Neolithic using radiocarbon dates: a new case-study using an improved method. Journal of Archaeological Science, 52, 549-557. doi:10.1016/j.jas.2014.08.011
#'
#'
#' @examples
#' ## Example with Younger Dryas period Near East, including site bins
#' \dontrun{
#' data(emedyd)
#' caldates <- calibrate(x=emedyd$CRA, errors=emedyd$Error, normalised=FALSE)
#' bins <- binPrep(sites=emedyd$SiteName, ages=emedyd$CRA, h=50)
#' nsim=5 #toy example
#' expnull <- modelTest(caldates, errors=emedyd$Error, bins=bins, nsim=nsim, runm=50,
#' timeRange=c(16000,9000), model="exponential", datenormalised=FALSE)
#' plot(expnull, xlim=c(16000,9000))
#' round(expnull$pval,4) #p-value
#' summary(expnull)
#' }
#' @import utils
#' @import stats
#' @import doSNOW
#' @import snow
#' @import foreach
#' @import iterators
#' @export
modelTest <- function(x, errors, nsim, bins=NA, runm=NA, timeRange=NA,backsight=50,changexpr=expression((t1/t0)^(1/d)-1),gridclip=TRUE, raw=FALSE, model=c("exponential"),method=c("uncalsample"),predgrid=NA, normalised=NA,datenormalised=NA, spdnormalised=FALSE, ncores=1, fitonly=FALSE, a=0, b=0, edgeSize=500,verbose=TRUE){
caltimeRange =c(55000,0)
if (any(x$metadata$CalCurve %in% c("intcal13","shcal13","marine13","intcal13nhpine16","shcal13shkauri16")))
{
caltimeRange =c(50000,0)
}
if (fitonly == TRUE) {nsim <- 1}
if (ncores>1&!requireNamespace("doSNOW", quietly=TRUE)){
warning("the doSnow package is required for multi-core processing; ncores has been set to 1")
ncores=1
} else {
cl <- snow::makeCluster(ncores)
registerDoSNOW(cl)
on.exit(snow::stopCluster(cl))
}
if (!any(method%in%c("uncalsample","calsample")))
{
stop("The 'method' argument must be either 'uncalsample' or 'calsample'")
}
ccrange = c(max(medCal(x)),min(medCal(x)))
if (anyNA(timeRange))
{
timeRange=ccrange
}
if (is.na(normalised))
{
normalised=FALSE
if(x$metadata$Normalised[1]==TRUE)
{
normalised=TRUE
}
}
if (normalised!=x$metadata$Normalised[1])
{
warning("The normalisation setting of x and normalised are different")
}
if (!is.na(datenormalised))
{
if (datenormalised!=normalised)
{
warning("'datenormalised' is not equal to 'normalised'. The datenormalised setting will be used for the normalisation setting of the calibration of simulated dates")
normalised=datenormalised
}
if (datenormalised!=x$metadata$Normalised[1])
{
if (x$metadata$Normalised[1])
{
warning("Input dates are normalised but datenormalised is set to FALSE. The datenormalised setting will be ignored")
normalised=datenormalised
}
if (!x$metadata$Normalised[1])
{
warning("Input dates are not normalised but datenormalised is set to TRUE. The datenormalised setting will be ignored")
normalised=datenormalised
}
}
}
calCurves = x$metadata$CalCurve
if (!all(calCurves%in%c("intcal20","shcal20","marine20",'intcal13','intcal13nhpine16','shcal13','shcal13shkauri16','marine13')))
{
stop("modelTest() currently accepts only dates calibrated with the following calibration curves: 'intcal20','intcal13','intcal13nhpine16','shcal20','shcal13','shcal13shkauri16', 'marine20', and 'marine13'")
}
unique.calCurves = as.character(sort(unique(calCurves)))
ncc = length(unique.calCurves) #count number of unique calibration curves
if (verbose){ print("Aggregating observed dates...") }
#Generate matrix of sample sizes for each curve
if (is.na(bins[1])){
samplesize <- t(matrix(table(calCurves),nrow=ncc,ncol=nsim))
colnames(samplesize) = names(table(calCurves))
} else {
samplesize <- curveSamples(bins=bins,calCurves=calCurves,nsim=nsim)
if (ncc==1) {
samplesize = matrix(samplesize,ncol=1,nrow=length(samplesize))
colnames(samplesize) = names(table(calCurves))
}
}
if (ncc>1) {samplesize=samplesize[,unique.calCurves]}
# Create artificial bins in case bins are not supplied
if (is.na(bins[1])){ bins <- as.character(1:nrow(x$metadata)) }
observed <- spd(x=x, bins=bins, timeRange=timeRange, runm=runm, spdnormalised=spdnormalised, verbose=FALSE, edgeSize=edgeSize)
finalSPD <- observed$grid$PrDens
## Simulation
sim <- matrix(NA,nrow=length(finalSPD),ncol=nsim)
if (verbose & !fitonly){
print("Monte-Carlo test...")
flush.console()
}
fit.time <- seq(timeRange[1],timeRange[2],-1)
pred.time <- fit.time
if (gridclip)
{
st = max(ccrange[1],timeRange[1])+edgeSize
en = min(ccrange[2],timeRange[2])-edgeSize
pred.time <- seq(st,en,-1)
}
fit <- NA
if (model=="exponential"){
fit <- nls(y ~ exp(a + b * x), data=data.frame(x=fit.time, y=finalSPD), start=list(a=a, b=b))
est <- predict(fit, list(x=pred.time))
predgrid <- data.frame(calBP=pred.time, PrDens=est)
} else if (model=="uniform"){
predgrid <- data.frame(calBP=pred.time, PrDens=mean(finalSPD))
} else if (model=="linear"){
fit <- lm(y ~ x, data=data.frame(x=fit.time, y=finalSPD))
est <- predict(fit, list(x=pred.time))
predgrid <- data.frame(calBP=pred.time, PrDens=est)
} else if (model=="custom"){
if (length(predgrid)!=2){
stop("If you choose a custom model, you must provide a proper predgrid argument (two-column data.frame of calBP and predicted densities).")
}
if (!all(colnames(predgrid)%in%c("calBP","PrDens")))
{
stop("Column names in the predgrid argument should be 'calBP' and 'PrDens'")
}
} else {
stop("Specified model not one of current choices.")
}
if (fitonly){
print("Done (SPD and fitted model only).")
predgrid <- subset(predgrid,calBP<=timeRange[1]&calBP>=timeRange[2])
res <- list(result=NA, sim=NA, pval=NA, osbSPD=observed, fit=predgrid, fitobject=fit)
return(res)
}
# Add Extra Edges with PrDens=0
if (edgeSize>0)
{
predgrid = rbind.data.frame(data.frame(calBP=(max(predgrid$calBP)+edgeSize):c(predgrid$calBP[1]+1),PrDens=0),predgrid)
predgrid = rbind.data.frame(predgrid,data.frame(calBP=min(predgrid$calBP):(min(predgrid$calBP)-edgeSize),PrDens=0))
if (any(predgrid$calBP<=0|predgrid$calBP>=caltimeRange[1]))
{
warning("edgeSize reduced")
predgrid = subset(predgrid, calBP<=caltimeRange[1]&calBP>=0)
}
}
# predgrid$PrDens = predgrid$PrDens/sum(predgrid$PrDens)
# Prepare Sampling Grid(s)
cragrids = vector("list",length=ncc)
for (i in 1:ncc)
{
tmp.grid <- uncalibrate(as.CalGrid(predgrid), calCurves=unique.calCurves[i], compact=FALSE, verbose=FALSE)
cragrids[[i]] <- tmp.grid
# Cllipping the uncalibrated grid
if (gridclip)
{
cragrids[[i]] <- tmp.grid[tmp.grid$CRA <= max(x$metadata$CRA) & tmp.grid$CRA >= min(x$metadata$CRA),]
}
}
# Actual Method
opts = NULL
if (verbose)
{
if (ncores>1){ print(paste("Running in parallel on ",getDoParWorkers()," workers...",sep=""))}
pb <- txtProgressBar(min=0, max=nsim, style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
}
if (ncores==1)
{
for (s in 1:nsim){
if (verbose){ setTxtProgressBar(pb, s) }
if (method=="uncalsample")
{
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$PrDens)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
}
if (method=="calsample")
{
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$Raw)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
}
tmp <- calibrate(x=unlist(randomDates),errors=randomSDs, timeRange=timeRange, calCurves=ccurve.tmp, normalised=normalised, ncores=1, verbose=FALSE, calMatrix=TRUE)
simDateMatrix <- tmp$calmatrix
sim[,s] <- apply(simDateMatrix,1,sum)
sim[,s] <- (sim[,s]/sum(sim[,s])) * sum(predgrid$PrDens[predgrid$calBP <= timeRange[1] & predgrid$calBP >= timeRange[2]])
if (spdnormalised){ sim[,s] <- (sim[,s]/sum(sim[,s])) }
if (!is.na(runm)){ sim[,s] <- runMean(sim[,s], runm, edge="fill") }
}
}
if (ncores>1)
{
sim <- foreach (s = 1:nsim, .combine='cbind', .packages='rcarbon',.options.snow = opts) %dopar% {
randomDates <- vector("list",length=ncc)
ccurve.tmp <- numeric()
if (method=="uncalsample")
{
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$PrDens)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
}
if (method=="calsample")
{
for (i in 1:ncc)
{
randomDates[[i]] = sample(cragrids[[i]]$CRA,replace=TRUE,size=samplesize[s,i],prob=cragrids[[i]]$Raw)
ccurve.tmp = c(ccurve.tmp,rep(unique.calCurves[i],samplesize[s,i]))
}
}
randomSDs <- sample(size=length(unlist(randomDates)), errors, replace=TRUE)
tmp <- calibrate(x=unlist(randomDates),errors=randomSDs, timeRange=timeRange, calCurves=ccurve.tmp, normalised=normalised, ncores=1, verbose=FALSE, calMatrix=TRUE)
simDateMatrix <- tmp$calmatrix
aux <- apply(simDateMatrix,1,sum)
aux <- (aux/sum(aux)) * sum(predgrid$PrDens[predgrid$calBP <= timeRange[1] & predgrid$calBP >= timeRange[2]])
if (spdnormalised){ aux <- (aux/sum(aux)) }
if (!is.na(runm)){
aux <- runMean(aux, runm, edge="fill")
}
aux
}
}
if (verbose){ close(pb) }
## rate of change subroutine
timeSequence = timeRange[1]:timeRange[2]
foo = function(spd,backsight,timeSequence,changexpr)
{
obs=rep(NA,length(timeSequence))
for (i in 1:c(length(obs)-backsight))
{
d=backsight
t0 = spd[i]
t1 = spd[i+backsight]
obs[i+backsight] = eval(changexpr)
if (t1==0|t0==0){obs[i+backsight]=NA}
}
return(obs)
}
obs.roc = foo(finalSPD,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
sim.roc = apply(sim,2,foo,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
## Envelope, z-scores, global p-value
lo <- apply(sim,1,quantile,prob=0.025,na.rm=TRUE)
hi <- apply(sim,1,quantile,prob=0.975,na.rm=TRUE)
lo.roc = apply(sim.roc,1,quantile,prob=0.025,na.rm=TRUE)
hi.roc = apply(sim.roc,1,quantile,prob=0.975,na.rm=TRUE)
Zsim <- t(apply(sim,1,scale))
zLo <- apply(Zsim,1,quantile,prob=0.025,na.rm=TRUE)
zHi <- apply(Zsim,1,quantile,prob=0.975,na.rm=TRUE)
Zsim.roc <- t(apply(sim.roc,1,scale))
zLo.roc <- apply(Zsim.roc,1,quantile,prob=0.025,na.rm=TRUE)
zHi.roc <- apply(Zsim.roc,1,quantile,prob=0.975,na.rm=TRUE)
Zscore_empirical <- (finalSPD - apply(sim, 1, mean))/apply(sim, 1, sd)
Zscore_empirical.roc <- (obs.roc - apply(sim.roc, 1, mean))/apply(sim.roc, 1, sd)
busts <- which(Zscore_empirical< zLo)
booms <- which(Zscore_empirical> zHi)
busts2 <- which(finalSPD< lo)
booms2 <- which(finalSPD> hi)
busts.roc <- which(Zscore_empirical.roc < zLo.roc)
booms.roc <- which(Zscore_empirical.roc > zHi.roc)
busts2.roc <- which(obs.roc < lo.roc)
booms2.roc <- which(obs.roc > hi.roc)
observedStatistic <- sum(c(zLo[busts] - Zscore_empirical[busts]),c(Zscore_empirical[booms]-zHi[booms]))
observedStatistic.roc <- sum(c(zLo.roc[busts.roc] - Zscore_empirical.roc[busts.roc]),c(Zscore_empirical.roc[booms.roc]-zHi.roc[booms.roc]))
expectedstatistic <- abs(apply(Zsim,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=zLo)) + apply(Zsim,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=zHi)
expectedstatistic.roc <- abs(apply(Zsim.roc,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=zLo.roc)) + apply(Zsim.roc,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=zHi.roc)
pvalue <- c(length(expectedstatistic[expectedstatistic > observedStatistic])+1)/c(nsim+1)
pvalue.roc <- c(length(expectedstatistic.roc[expectedstatistic.roc > observedStatistic.roc])+1)/c(nsim+1)
# Results
result <- data.frame(calBP=observed$grid$calBP,PrDens=finalSPD,lo=lo,hi=hi)
result.roc <- data.frame(calBP=timeSequence,roc=obs.roc,lo.roc=lo.roc,hi.roc=hi.roc) #time is midpoint of transition
predgrid <- subset(predgrid,calBP<=timeRange[1]&calBP>=timeRange[2])
if(raw==FALSE){ sim <- NA; sim.roc<-NA }
res <- list(result=result,result.roc=result.roc, sim=sim, sim.roc=sim.roc, pval=pvalue, pval.roc=pvalue.roc, fit=predgrid, fitobject=fit,nbins=length(unique(bins)),n=nrow(x$metadata),nsim=nsim,backsight=backsight)
class(res) <- "SpdModelTest"
if (verbose){ print("Done.") }
return(res)
}
#' @title Random mark permutation test for SPDs
#'
#' @description Global and local significance test for comparing shapes of multiple SPDs using random permutations.
#'
#' @param x A \code{CalDates} class object containing the calibrated radiocarbon dates.
#' @param marks A numerical or character vector containing the marks associated to each radiocarbon date.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP.
#' @param backsight A single numeric value defining the distance in time between the focal year and the backsight year for computing the rate of change. Default is 10.
#' @param changexpr An expression for calculating the rate of change in SPD between the focal year and a backsight year. Available input options are t1 (the SPD for the focal year), t0 (the SPD for the backsight year), d (the distance between t0 and t1), and any other standard constants and mathematical operators. A sensible default is provided.
#' @param bins A vector indicating which bin each radiocarbon date is assigned to.
#' @param nsim Number of random permutations
#' @param runm A number indicating the window size of the moving average to smooth the SPD. If set to \code{NA} no moving average is applied.Default is NA.
#' @param datenormalised If set to TRUE the total probability mass of each calibrated date will be made to sum to unity (the default in most radiocarbon calibration software). This argument will only have an effect if the dates in \code{x} were calibrated without normalisation (via normalised=FALSE in the \code{\link{calibrate}} function), in which case setting \code{datenormalised=TRUE} here will rescale each dates probability mass to sum to unity before aggregating the dates, while setting \code{datenormalised=FALSE} will ensure unnormalised dates are used for both observed and simulated SPDs. Default is FALSE.
#' @param spdnormalised A logical variable indicating whether the total probability mass of the SPD is normalised to sum to unity.
#' @param raw A logical variable indicating whether all permuted SPDs should be returned or not. Default is FALSE.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#'
#' @details The function generates a distribution of expected SPDs by randomly shuffling the marks assigned to each \emph{bin} (see \code{\link{spd}} for details on binning). The resulting distribution of probabilities for each \emph{mark} (i.e. group of dates) for each calendar year is z-transformed, and a 95\% simulation envelope is computed. Local significant departures are defined as instances where the observed SPD (which is also z-transformed) is outside such envelope. A global significance is also computed by comparing the total "area" outside the simulation envelope in the observed and simulated data.
#'
#' @return An object of class \code{SpdPermTest} with the following elements
#' \itemize{
#' \item{\code{observed}} {A list containing data.frames with the summed probability (column \emph{PrDens} for each calendar year (column \emph{calBP} for each mark/group}
#' \item{\code{envelope}} {A list containing matrices with the lower and upper bound values of the simulation envelope for each mark/group}
#' \item{\code{pValueList}} {A list of p-value associated with each mark/group}
#' }
#'
#' @references
#' Crema, E.R., Habu, J., Kobayashi, K., Madella, M., (2016). Summed Probability Distribution of 14 C Dates Suggests Regional Divergences in the Population Dynamics of the Jomon Period in Eastern Japan. PLOS ONE 11, e0154809. doi:10.1371/journal.pone.0154809
#'
#' @examples
#' ## compare demographic trajectories in Netherlands and Denmark
#' \dontrun{
#' data(euroevol)
#' nld.dnk = subset(euroevol,Country=="Netherlands"|Country=="Denmark")
#' bins = binPrep(nld.dnk$SiteID,nld.dnk$C14Age,h=200)
#' dates = calibrate(nld.dnk$C14Age,nld.dnk$C14SD,normalised=FALSE)
#' res = permTest(dates,marks=as.character(nld.dnk$Country),nsim=1000,
#' bins=bins,runm=200,timeRange=c(10000,4000))
#' round(res$pValueList,4) #extract p-values
#' summary(res)
#' par(mfrow=c(2,1))
#' plot(res,focalm="Netherlands",main="Netherlands")
#' plot(res,focalm="Denmark",main="Denmark")
#' }
#' @import utils
#' @import stats
#' @export
permTest <- function(x, marks, timeRange, backsight=10,changexpr=expression((t1/t0)^(1/d)-1),nsim, bins=NA, runm=NA, datenormalised=FALSE, spdnormalised=FALSE, raw=FALSE, verbose=TRUE){
if (is.na(bins[1])){ bins <- as.character(1:nrow(x$metadata)) }
marks <- as.character(marks)
binNames <- unique(bins)
calyears <- data.frame(calBP=seq(timeRange[1], timeRange[2],-1))
binnedMatrix <- matrix(nrow=nrow(calyears), ncol=length(binNames))
GroupList <- vector()
if (verbose & length(binNames)>1){
print("Summing observed groups...")
flush.console()
pb <- txtProgressBar(min=1, max=length(binNames), style=3)
}
caldateTR <- as.numeric(x$metadata[1,c("StartBP","EndBP")])
caldateyears <- seq(caldateTR[1],caldateTR[2],-1)
check <- caldateTR[1] >= timeRange[1] & caldateTR[2] <= timeRange[2]
## Observed SPDs
for (b in 1:length(binNames)){
if (verbose & length(binNames)>1){ setTxtProgressBar(pb, b) }
index <- which(bins==binNames[b])
if (length(x$calmatrix)>1){
if (!check){
stop("The time range of the calibrated dataset must be at least as large as the spd time range.")
} else {
tmp <- x$calmatrix[,index, drop=FALSE]
if (datenormalised){
tmp <- apply(tmp,2,FUN=function(x) x/sum(x))
}
spdtmp <- rowSums(tmp)
if (length(binNames)>1){
spdtmp <- spdtmp / length(index)
}
binnedMatrix[,b] <- spdtmp[caldateyears<=timeRange[1] & caldateyears>=timeRange[2]]
}
} else {
slist <- x$grids[index]
slist <- lapply(slist,FUN=function(x) merge(calyears,x, all.x=TRUE))
slist <- rapply(slist, f=function(x) ifelse(is.na(x),0,x), how="replace")
slist <- lapply(slist, FUN=function(x) x[with(x, order(-calBP)), ])
tmp <- lapply(slist,`[`,2)
if (datenormalised){
outofTR <- lapply(tmp,sum)==0 # date out of range
tmpc <- tmp[!outofTR]
if (length(tmpc)>0){
tmp <- lapply(tmpc,FUN=function(x) x/sum(x))
}
}
if (length(binNames)>1){
spdtmp <- Reduce("+", tmp) / length(index)
} else {
spdtmp <- Reduce("+", tmp)
}
binnedMatrix[,b] <- spdtmp[,1]
}
GroupList[b] <- marks[index][1]
}
if (verbose & length(binNames)>1){ close(pb) }
observedSPD <- vector("list",length=length(unique(GroupList)))
names(observedSPD) <- unique(GroupList)
for (d in 1:length(unique(GroupList))){
focus <- unique(GroupList)[d]
index <- which(GroupList==focus)
tmpSPD <- apply(binnedMatrix[,index,drop=FALSE], 1, sum)
if (!is.na(runm)){
tmpSPD <- runMean(tmpSPD, runm, edge="fill")
}
if (d==1){
dall <- tmpSPD
} else {
dall <- dall+tmpSPD
}
if (spdnormalised){ tmpSPD <- tmpSPD / sum(tmpSPD) }
observedSPD[[d]] <- data.frame(calBP=calyears, PrDens=tmpSPD)
}
## Permutations
simulatedSPD <- vector("list",length=length(unique(GroupList)))
for (d in 1:length(unique(GroupList))){
simulatedSPD[[d]] <- matrix(NA, nrow=nrow(calyears), ncol=nsim)
}
if (verbose){
print("Permuting the groups...")
flush.console()
pb <- txtProgressBar(min=1, max=nsim, style=3)
}
for (s in 1:nsim){
if (verbose){ setTxtProgressBar(pb, s) }
simGroupList <- sample(GroupList)
for (d in 1:length(unique(simGroupList))){
focus <- unique(GroupList)[d]
index <- which(simGroupList==focus)
tmpSPD <- apply(binnedMatrix[,index,drop=FALSE],1,sum)
if (!is.na(runm)){
tmpSPD <- runMean(tmpSPD, runm, edge="fill")
}
if (d==1){
dall <- tmpSPD
} else {
dall <- dall+tmpSPD
}
if (spdnormalised){ tmpSPD <- tmpSPD/sum(tmpSPD) }
simulatedSPD[[d]][,s] <- tmpSPD
}
}
names(simulatedSPD) <- unique(GroupList)
if (verbose){ close(pb) }
#compute rates of change
timeSequence = timeRange[1]:timeRange[2]
foo = function(spd,backsight,timeSequence,changexpr)
{
obs=rep(NA,length(timeSequence))
for (i in 1:c(length(obs)-backsight))
{
d=backsight
t0 = spd[i]
t1 = spd[i+backsight]
obs[i+backsight] = eval(changexpr)
if (t1==0|t0==0){obs[i+backsight]=NA}
}
return(obs)
}
observedROC = simulatedROC = vector("list",length=length(observedSPD))
for (i in 1:length(observedSPD))
{
tmp=foo(observedSPD[[i]][,2],backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
observedROC[[i]] = data.frame(calBP=timeSequence,roc=tmp)
names(observedROC)[[i]]=names(observedSPD)[[i]]
simulatedROC[[i]] = apply(simulatedSPD[[i]],2,foo,backsight=backsight,timeSequence=timeSequence,changexpr=changexpr)
}
names(simulatedROC) <- unique(GroupList)
## Simulation Envelope
simulatedCIlist = simulatedCIlist.roc = vector("list",length=length(unique(GroupList)))
for (d in 1:length(unique(GroupList))){
simulatedCIlist[[d]] <- cbind(apply(simulatedSPD[[d]],1,quantile,prob=c(0.025),na.rm=TRUE), apply(simulatedSPD[[d]],1,quantile,prob=c(0.975),na.rm=TRUE))
simulatedCIlist.roc[[d]] <- cbind(apply(simulatedROC[[d]],1,quantile,prob=c(0.025),na.rm=TRUE), apply(simulatedROC[[d]],1,quantile,prob=c(0.975),na.rm=TRUE))
names(simulatedCIlist) <- unique(GroupList)
names(simulatedCIlist.roc) <- unique(GroupList)
}
## Compute Global P-value
pValueList = pValueList.roc = numeric(length=length(simulatedSPD))
for (a in 1:length(simulatedSPD)){
zscoreMean <- apply(simulatedSPD[[a]],1,mean)
zscoreSD <- apply(simulatedSPD[[a]],1,sd)
zscoreMean.roc <- apply(simulatedROC[[a]],1,mean,na.rm=TRUE)
zscoreSD.roc <- apply(simulatedROC[[a]],1,sd,na.rm=TRUE)
tmp.sim <- t(apply(simulatedSPD[[a]],1,function(x){ return((x - mean(x))/sd(x)) }))
tmp.sim.roc <- t(apply(simulatedROC[[a]],1,function(x){ return((x - mean(x))/sd(x)) }))
# tmp.sim[is.na(tmp.sim)] <- 0
tmp.obs <- observedSPD[[a]]
tmp.obs[,2] <- (tmp.obs[,2] - zscoreMean) / zscoreSD
# tmp.obs[is.na(tmp.obs[,2]),2] <- 0
tmp.obs.roc <- observedROC[[a]]
tmp.obs.roc[,2] <- (tmp.obs.roc[,2] - zscoreMean.roc) / zscoreSD.roc
tmp.ci <- t(apply(tmp.sim,1, quantile, prob=c(0.025,0.975),na.rm=TRUE))
tmp.ci.roc <- t(apply(tmp.sim.roc,1, quantile, prob=c(0.025,0.975),na.rm=TRUE))
expectedstatistic <- abs(apply(tmp.sim,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=tmp.ci[,1])) + apply(tmp.sim,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=tmp.ci[,2])
expectedstatistic.roc <- abs(apply(tmp.sim.roc,2,function(x,y){a=x-y;i=which(a<0);return(sum(a[i]))},y=tmp.ci.roc[,1])) + apply(tmp.sim.roc,2,function(x,y){a=x-y;i=which(a>0);return(sum(a[i]))},y=tmp.ci.roc[,2])
lower <- tmp.obs[,2] - tmp.ci[,1]
indexLow <- which(tmp.obs[,2] < tmp.ci[,1])
higher <- tmp.obs[,2] - tmp.ci[,2]
indexHi <- which(tmp.obs[,2] > tmp.ci[,2])
lower.roc <- tmp.obs.roc[,2] - tmp.ci.roc[,1]
indexLow.roc <- which(tmp.obs.roc[,2] < tmp.ci.roc[,1])
higher.roc <- tmp.obs.roc[,2] - tmp.ci.roc[,2]
indexHi.roc <- which(tmp.obs.roc[,2] > tmp.ci.roc[,2])
observedStatistic <- sum(abs(lower[indexLow]))+sum(higher[indexHi])
observedStatistic.roc <- sum(abs(lower.roc[indexLow.roc]))+sum(higher.roc[indexHi.roc])
pValueList[[a]] <- 1
pValueList.roc[[a]] <- 1
if (observedStatistic>0){
pValueList[[a]] <-c(length(expectedstatistic[expectedstatistic > observedStatistic])+1)/c(nsim+1)
}
if (observedStatistic.roc>0){
pValueList.roc[[a]] <-c(length(expectedstatistic.roc[expectedstatistic.roc > observedStatistic.roc])+1)/c(nsim+1)
}
names(pValueList) <- unique(GroupList)
}
res <- list(observed=observedSPD, envelope=simulatedCIlist, pValueList=pValueList)
res$observed.roc=observedROC
res$envelope.roc=simulatedCIlist.roc
res$pValueList.roc=pValueList.roc
metadata=vector("list",length=length(unique(GroupList)))
names(metadata) = unique(GroupList)
for (k in 1:length(unique(GroupList)))
{
i=unique(GroupList)[k]
tmp = c(sum(marks%in%i),length(unique(bins[which(marks%in%i)])))
metadata[[k]]=tmp
}
res$nsim = nsim
res$metadata = metadata
res$backsight = backsight
if (raw)
{
res$raw <- simulatedSPD
res$raw.row <- simulatedROC
}
class(res) <- "SpdPermTest"
if (verbose){ print("Done.") }
return(res)
}
#' @title Spatial Permutation Test of summed probability distributions.
#' @description This function is deprecated. Please use \code{\link{sptest}} instead.
#' @return A \code{spatialTest} class object
#' @name SPpermTest-deprecated
#' @usage SPpermTest(calDates, timeRange, bins, locations, breaks,
#' spatialweights,rate=expression((t2/t1)^(1/d)-1), nsim=1000, runm=NA,permute="locations",
#' ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
#' @seealso \code{\link{rcarbon-deprecated}}
#' @keywords internal
NULL
#' @rdname rcarbon-deprecated
#' @section \code{SPpermTest}:
#' For \code{SPpermTest}, use \code{\link{sptest}}.
#' @export
SPpermTest<-function(calDates, timeRange, bins, locations, breaks, spatialweights, rate=expression((t2/t1)^(1/d)-1),nsim=1000, runm=NA,permute="locations",ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
{
.Deprecated("sptest")
sptest(calDates=calDates, timeRange=timeRange, bins=bins, locations=locations, breaks=breaks, spatialweights=spatialweights,rate=rate, nsim=nsim, runm=runm,permute=permute,ncores=ncores,datenormalised=datenormalised,verbose=verbose,raw=raw)
}
#' @title Spatial Permutation Test of summed probability distributions.
#'
#' @description This function carries out local spatial permutation tests of summed radiocarbon probability distributions in order to detect local deviations in growth rates (Crema et al 2017).
#'
#' @param calDates A \code{CalDates} class object.
#' @param timeRange A vector of length 2 indicating the start and end date of the analysis in cal BP
#' @param bins A vector indicating which bin each radiocarbon date is assigned to. Must have the same length as the number of radiocarbon dates. Can be created using the \code{\link{binPrep}}) function. Bin names should follow the format "x_y", where x refers to a unique location (e.g. a site) and y is a integer value (e.g. "S023_1", "S023_2","S034_1", etc.).
#' @param locations A \code{SpatialPoints} or a \code{SpatialPointsDataFrame} class object. Rownames of each point should much the first part of the bin names supplied (e.g. "S023","S034")
#' @param breaks A vector of break points for defining the temporal slices.
#' @param spatialweights A \code{spatialweights} class object defining the spatial weights between the locations (cf. \code{\link{spweights}})
#' @param rate An expression defining how the rate of change is calculated, where \code{t1} is the summed probability for a focal block, \code{t2} is the summed probability for next block, and \code{d} is the duration of the blocks. Default is a geometric growth rate (i.e \code{expression((t2/t1)^(1/d)-1)}).
#' @param nsim The total number of simulations. Default is 1000.
#' @param runm The window size of the moving window average. Must be set to \code{NA} if the rates of change are calculated from the raw SPDs.
#' @param permute Indicates whether the permutations should be based on the \code{"bins"} or the \code{"locations"}. Default is \code{"locations"}.
#' @param ncores Number of cores used for for parallel execution. Default is 1.
#' @param datenormalised A logical variable indicating whether the probability mass of each date within \code{timeRange} is equal to 1. Default is FALSE.
#' @param verbose A logical variable indicating whether extra information on progress should be reported. Default is TRUE.
#' @param raw A logical variable indicating whether permuted sets of geometric growth rates for each location should be returned. Default is FALSE.
#'
#' @details The function consists of the following seven steps: 1) for each location (e.g. a site) generate a local SPD of radiocarbon dates, weighting the contribution of dates from neighbouring sites using a weight scheme provided by the \code{spatialweights} class object; 2) define temporal slices (using \code{breaks} as break values), then compute the total probability mass within each slice; 3) compute the rate of change between abutting temporal slices by using the formula: \eqn{(SPD_{t}/SPD_{t+1}^{1/\Delta t}-1)}; 4) randomise the location of individual bins or the entire sequence of bins associated with a given location and carry out steps 1 to 3; 5) repeat step 4 \code{nsim} times and generate, for each location, a distribution of growth rates under the null hypothesis (i.e. spatial independence); 6) compare, for each location, the observed growth rate with the distribution under the null hypothesis and compute the p-values; and 7) compute the false-discovery rate for each location.
#'
#' @return A \code{spatialTest} class object
#'
#' @references
#' Crema, E.R., Bevan, A., Shennan, S. (2017). Spatio-temporal approaches to archaeological radiocarbon dates. Journal of Archaeological Science, 87, 1-9.
#'
#' @seealso \code{\link{permTest}} for a non-spatial permutation test; \code{\link{plot.spatialTest}} for plotting; \code{\link{spweights}} for computing spatial weights; \code{\link{spd2rc}} for computing geometric growth rates.
#'
#' @examples
#' ## Reproduce Crema et al 2017 ##
#'\dontrun{
#' data(euroevol) #load data
#'
#' ## Subset only for 8000 to 5000 Cal BP (c7200-4200 C14BP)
#' edge=800
#' timeRange=c(8000,5000)
#' euroevol2=subset(euroevol,C14Age<=c(timeRange[1]-edge)&C14Age>=c(timeRange[2]-edge))
#'
#' ## define chronological breaks
#' breaks=seq(8000,5000,-500)
#'
#' ## Create a SpatialPoints class object
#' library(sp)
#' sites = unique(data.frame(SiteID=euroevol2$SiteID,
#' Longitude=euroevol2$Longitude,Latitude=euroevol2$Latitude))
#' locations=data.frame(Longitude=sites$Longitude,Latitude=sites$Latitude)
#' rownames(locations)=sites$SiteID
#' coordinates(locations)<-c("Longitude","Latitude")
#' proj4string(locations)<- CRS("+proj=longlat +datum=WGS84")
#'
#' ## Compute Distance and Spatial Weights
#' distSamples=spDists(locations,locations,longlat = TRUE)
#' spatialweights=spweights(distSamples,h=100) #using a kernal bandwidth of 100km
#'
#' ## Calibration and binning
#' bins=binPrep(sites=euroevol2$SiteID,ages=euroevol2$C14Age,h=200)
#' calDates=calibrate(x=euroevol2$C14Age,errors=euroevol2$C14SD,
#' timeRange=timeRange,normalised=FALSE)
#'
#' ## Main Analysis (over 2 cores; requires doSnow package)
#' ## NOTE: the number of simulations should be ideally larger
#' ## to ensure a better resolution of the p/q-values.
#' res.locations=sptest(calDates,timeRange=timeRange,bins=bins,locations=locations,
#' spatialweights=spatialweights,breaks=breaks,ncores=2,nsim=100,
#' permute="locations",datenormalised=FALSE)
#'
#' ## Plot results
#' library(rworldmap)
#' base=getMap(resolution="low") #optionally add base map
#' #retrieve coordinate limits#
#' xrange=bbox(res.locations$locations)[1,]
#' yrange=bbox(res.locations$locations)[2,]
#'
#' par(mfrow=c(2,2))
#' par(mar=c(0.1,0.1,0,0.5))
#' plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
#' plot(res.locations,index=4,add=TRUE,legend=TRUE,option="raw",breakRange=c(-0.005,0.005))
#' par(mar=c(0.1,0.1,0,0.5))
#' plot(base,col="antiquewhite3",border="antiquewhite3",xlim=xrange,ylim=yrange)
#' plot(res.locations,index=4,add=TRUE,legend=TRUE,option="test")
#' }
#' @import utils
#' @import stats
#' @import doSNOW
#' @import snow
#' @import foreach
#' @import iterators
#' @import sp
#' @export
sptest<-function(calDates, timeRange, bins, locations, breaks, spatialweights, rate=expression((t2/t1)^(1/d)-1),nsim=1000, runm=NA,permute="locations",ncores=1,datenormalised=FALSE,verbose=TRUE,raw=FALSE)
{
#######################
#### Load Packages ####
#######################
if (ncores>1&!requireNamespace('doSNOW',quietly = TRUE)){
warning("the doSNOW package is required for multi-core processing; ncores has been set to 1")
ncores=1
}
##################################
#### Initial warning messages ####
##################################
if (!"CalDates" %in% class(calDates)){
stop("calDates must be an object of class 'calDates'.")
}
if (length(bins)>1){
if (any(is.na(bins))){
stop("Cannot have NA values in bins.")
}
if (length(bins)!=length(calDates$grid)){
stop("bins (if provided) must be the same length as x.")
}
} else {
bins <- rep("0_0",length(calDates$grid))
}
if (!("SpatialPoints" %in% class(locations)[1]|"SpatialPointsDataFrame" %in% class(locations)[1])){
stop("locations must be an object of class 'SpatialPoints' or 'SpatialPointsDataFrame'.")
}
locations.id=row.names(locations@coords)
if (is.null(locations.id))
{
stop("locations must have rownames")
}
if (!all(range(timeRange)==range(breaks)))
{
stop("Range of breaks values must much match the temporal range defined by timeRange")
}
if (length(unique(abs(diff(breaks))))!=1)
{
stop("Unequal break intervals is not supported")
}
if (ncores>1&raw==TRUE)
{
warning("raw==TRUE available only for ncores=1")
raw=FALSE
}
#############################
#### Create binnedMatrix ####
#############################
binNames <- unique(bins)
calyears <- data.frame(calBP=seq(timeRange[1], timeRange[2],-1))
binnedMatrix <- matrix(NA, nrow=nrow(calyears), ncol=length(binNames))
if (verbose & length(binNames)>1){
print("Binning by site/phase...")
flush.console()
pb <- txtProgressBar(min=1, max=length(binNames), style=3, title="Binning by site/phase...")
}
for (b in 1:length(binNames)){
if (verbose & length(binNames)>1){ setTxtProgressBar(pb, b) }
index <- which(bins==binNames[b])
slist <- calDates$grid[index]
slist <- lapply(slist,FUN=function(x) merge(calyears,x, all.x=TRUE))
slist <- rapply(slist, f=function(x) ifelse(is.na(x),0,x), how="replace")
slist <- lapply(slist, FUN=function(x) x[with(x, order(-calBP)), ])
tmp <- lapply(slist,`[`,2)
if (datenormalised){
tmp <- lapply(tmp,FUN=function(x) {if(sum(x)!=0){return(x/sum(x))}else{return(x)}})
}
if (length(binNames)>1){
spd.tmp <- Reduce("+", tmp) / length(index)
} else {
spd.tmp <- Reduce("+", tmp)
}
binnedMatrix[,b] <- spd.tmp[,1]
}
if (verbose & length(binNames)>1){ close(pb) }
################################
### Observed Data Subroutine ###
################################
## Aggregate by Locations ##
origins=unlist(lapply(strsplit(binNames,"_"),function(x){x[[1]]}))
if (!all(origins%in%locations.id))
{
stop("Missing bins or locations")
}
resMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
for (x in 1:length(unique(locations.id)))
{
index=which(origins==unique(locations.id)[x])
if(length(index)>1)
{resMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{resMatrix[x,]=binnedMatrix[,index]}
}
## Aggregate by break s##
nBreaks=length(breaks)-1
obsMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
timeSequence=timeRange[1]:timeRange[2]
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
obsMatrix[,x]=apply(resMatrix[,index],1,sum)
}
## Apply SpatialWeights ##
obsGridVal=t(spatialweights$w)%*%obsMatrix
## Compute Rate of Change #3
rocaObs=t(apply(obsGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
# If no spd for both period the rate is NA
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
#if single transition transpose matrix:
if (nBreaks==2){rocaObs=t(rocaObs)}
##############################
### Permutation Subroutine ###
##############################
opts = NULL
if (ncores>1)
{
cl <- snow::makeCluster(ncores)
registerDoSNOW(cl)
if (verbose)
{
print(paste("Running permutation test in parallel on ",getDoParWorkers()," workers...",sep=""))
pb <- txtProgressBar(min=0, max=length(x), style=3)
progress <- function(n) setTxtProgressBar(pb, n)
opts <- list(progress = progress)
}
sumcombine<-function(a,b)
{
list(a[[1]]+b[[1]],a[[2]]+b[[2]],a[[3]]+b[[3]])
}
resultHiLoEq<-foreach (x=1:nsim,.combine= sumcombine,.options.snow = opts) %dopar% {
simGridVal<-matrix(NA,nrow=nrow(spatialweights$w),ncol=nBreaks)
## Aggregate by Site ##
simResMatrix=matrix(0,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
## Randomly assigne bins to locations.id ##
if (permute=="bins")
{
simOrigins=sample(origins)
for (x in 1:length(unique(locations.id)))
{
index=which(simOrigins==unique(locations.id)[x])
if(length(index)>1)
{simResMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{simResMatrix[x,]=binnedMatrix[,index]}
}
## Aggregate by breaks ##
aggMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
aggMatrix[,x]=apply(simResMatrix[,index],1,sum)
}
## Apply Weights ##
simGridVal=t(spatialweights$w)%*%aggMatrix
}
if (permute=="locations")
{
simMatrix=obsMatrix[sample(nrow(obsMatrix)),]
simGridVal=t(spatialweights$w)%*%simMatrix
}
## Compute Rate of Change ##
rocaSim=t(apply(simGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
lo=rocaObs<rocaSim
hi=rocaObs>rocaSim
eq=rocaObs==rocaSim
return(list(hi,lo,eq))
}
snow::stopCluster(cl)
hi=resultHiLoEq[[1]]
lo=resultHiLoEq[[2]]
eq=resultHiLoEq[[3]]
} else {
hi=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
lo=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
eq=matrix(0,nrow=nrow(spatialweights$w),ncol=nBreaks-1)
if(verbose){
print("Permutation test...")
flush.console()
}
if(raw)
{
rocaSimAll = array(NA,dim=c(nsim,nrow(spatialweights$w),nBreaks-1))
}
if(verbose){pb <- txtProgressBar(min = 1, max = nsim, style=3)}
for (s in 1:nsim)
{
if(verbose){setTxtProgressBar(pb, s)}
simGridVal<-matrix(NA,nrow=nrow(spatialweights$w),ncol=nBreaks)
## Aggregate by Site ##
simResMatrix=matrix(0,nrow=length(unique(locations.id)),ncol=nrow(binnedMatrix))
## Randomly assign bins to locations
if (permute=="bins")
{
simOrigins=sample(origins)
for (x in 1:length(unique(locations.id)))
{
index=which(simOrigins==unique(locations.id)[x])
if(length(index)>1)
{simResMatrix[x,]=apply(binnedMatrix[,index],1,sum)}
if(length(index)==1)
{simResMatrix[x,]=binnedMatrix[,index]}
}
##Aggregate by breaks##
aggMatrix=matrix(NA,nrow=length(unique(locations.id)),ncol=nBreaks)
for (x in 1:nBreaks)
{
index=which(timeSequence<=breaks[x]&timeSequence>breaks[x+1])
aggMatrix[,x]=apply(simResMatrix[,index],1,sum)
}
##Apply Weights
simGridVal=t(spatialweights$w)%*%aggMatrix
}
if (permute=="locations")
{
simMatrix=obsMatrix[sample(nrow(obsMatrix)),]
simGridVal=t(spatialweights$w)%*%simMatrix
}
##Compute Rate of Change
rocaSim=t(apply(simGridVal,1,function(x,d,rate){
L=length(x)
res=numeric(length=L-1)
for (i in 1:c(L-1))
{
t2 = x[i+1]
t1 = x[i]
res[i] = eval(rate)
# res[i]=(x[i+1]/x[i])^(1/d)-1
if (x[i+1]==0|x[i]==0)
{
res[i]=NA
}
}
return(res)},
d=abs(breaks[2]-breaks[1]),
rate=rate))
# if single transition transpose matrix:
if (nBreaks==2) {rocaSim=t(rocaSim)}
hi=hi+(rocaObs>rocaSim)
lo=lo+(rocaObs<rocaSim)
eq=eq+(rocaObs==rocaSim)
if(raw){rocaSimAll[s,,]=rocaSim}
}
if(verbose){close(pb)}
}
############################
### Compute Significance ###
############################
pvalHi=(lo+eq+1)/c(nsim+1)
pvalLo=(hi+eq+1)/c(nsim+1)
pval=pvalHi
pval[which(pvalHi>pvalLo)]=pvalLo[which(pvalHi>pvalLo)]
pval=pval*2
if (max(pval,na.rm=TRUE)>1)
{
pval[which(pval>1)]=1
}
## Compute False Discovery Rate ##
qvalHi=apply(pvalHi,2,function(x){return(p.adjust(x,method="fdr"))})
qvalLo=apply(pvalLo,2,function(x){return(p.adjust(x,method="fdr"))})
qval=apply(pval,2,function(x){return(p.adjust(x,method="fdr"))})
metadata=data.frame(npoints=length(unique(locations.id)),ndates=nrow(calDates$metadata),nbins=length(binNames),nsim=nsim,permutationType=permute,datenormalised=datenormalised,breaks=nBreaks,timeRange=paste(timeRange[1],"-",timeRange[2],sep=""),weights.h=spatialweights$h,weights.kernel=spatialweights$kernel)
if(raw==FALSE){rocaSimAll=NA}
reslist=list(metadata=metadata,rocaSim=rocaSimAll,rocaObs=rocaObs,pval=pval,pvalHi=pvalHi,pvalLo=pvalLo,qval=qval,qvalLo=qvalLo,qvalHi=qvalHi,locations=locations)
class(reslist) <- append(class(reslist),"spatialTest")
return(reslist)
}
#' @title Point to point test of SPD
#'
#' @description Test for evaluating the difference in the summed probability values associated with two points in time.
#'
#' @param x result of \code{\link{modelTest}} with raw=TRUE.
#' @param p1 calendar year (in BP) of start point.
#' @param p2 calendar year (in BP) of end point.
#' @param interactive if set to TRUE enables an interactive selection of p1 and p2 from a graphical display of the SPD. Disabled when \code{p1} and \code{p2} are defined.
#' @param plot if set to TRUE the function plots the location of p1 and p2 on the SPD. Default is FALSE.
#'
#' @details The function compares observed differences in the summed probability values associated with two points in time against a distribution of expected values under the null hypothesis defined with the \code{\link{modelTest}} function. The two points can be specified manually (assigning BP dates to the arguments \code{p1} and \code{p2}) or interactively (clicking on a SPD plot). Note that \code{\link{modelTest}} should be executed setting the argument \code{raw} to \code{TRUE} (default is \code{FALSE}).
#'
#'
#' @return A list with: the BP dates for the two points and the p-value obtained from a two-sided test.
#'
#' @references
#' Edinborough, K., Porcic, M., Martindale, A., Brown, T.J., Supernant, K., Ames, K.M., (2017). Radiocarbon test for demographic events in written and oral history. PNAS 201713012. doi:10.1073/pnas.1713012114
#' @examples
#' ## Example with Younger Dryas period Near East, including site bins
#' \dontrun{
#' data(emedyd)
#' caldates <- calibrate(x=emedyd$CRA, errors=emedyd$Error, normalised=FALSE)
#' bins <- binPrep(sites=emedyd$SiteName, ages=emedyd$CRA, h=50)
#' nsim=10 #toy example
#' expnull <- modelTest(caldates, errors=emedyd$Error, bins=bins, nsim=nsim, runm=50,
#' timeRange=c(16000,9000), model="exponential", datenormalised=FALSE, raw=TRUE)
#' p2pTest(x=expnull,p1=13000,p2=12500) #non-interactive mode
#' p2pTest(x=expnull) #interactive mode
#' }
#' @seealso \code{\link{modelTest}}.
#' @import utils
#' @import stats
#' @export
p2pTest <- function(x,p1=NA,p2=NA,interactive=TRUE,plot=FALSE)
{
if (is.na(x$sim[1]))
{
stop("x should be an SpdModelTest class object produced using modelTest() with raw=TRUE")
}
if (!is.na(p1)&!is.na(p2))
{
if (p2>p1){stop("the end point should be more recent than the start point")}
interactive=FALSE
}
if (interactive)
{
plot(x)
print("select start point")
p1=round(locator(n=1)$x[1])
index1=match(p1,x$result$calBP)
p1.y = x$result[index1,2]
points(p1,p1.y,pch=20)
print("select end point")
p2=round(locator(n=1)$x[1])
if (p2>p1)
{
print("the end point should be more recent than the start point")
while(p2>p1)
{
print("select end point")
p2=round(locator(n=1)$x[1])
}
}
index2=match(p2,x$result$calBP)
p2.y = x$result[index2,2]
points(p2,p2.y,pch=20)
lines(x$result$calBP[index1:index2],x$result$PrDens[index1:index2],lwd=2)
}
if (!interactive)
{
index1=match(p1,x$result$calBP)
p1.y = x$result[index1,2]
index2=match(p2,x$result$calBP)
p2.y = x$result[index2,2]
if (plot)
{
plot(x)
points(p1,p1.y,pch=20)
points(p2,p2.y,pch=20)
lines(x$result$calBP[index1:index2],x$result$PrDens[index1:index2],lwd=2)
}
}
if (!p1%in%x$result$calBP | !p2%in%x$result$calBP)
{
stop("p1 and p2 should be within the temporal range of the spd")
}
obs.diff = p1.y-p2.y
sim.diff = x$sim[index1,]-x$sim[index2,]
nsim = ncol(x$sim)
lo = sum(obs.diff < sim.diff)
hi = sum(obs.diff > sim.diff)
eq = sum(obs.diff == sim.diff)
pvalHi=(lo+eq+1)/c(nsim+1)
pvalLo=(hi+eq+1)/c(nsim+1)
pval=ifelse(pvalHi<pvalLo,pvalHi,pvalLo)*2
return(list(p1=p1,p2=p2,pval=pval))
}
#' @title Summarise a \code{SpdModelTest} class object
#'
#' @description \code{summary} method for class "\code{SpdModelTest}"
#'
#' @param object A \code{SpdModelTest} class object produced using the \code{link{modelTest}} function.
#' @param type Specifies whether the summary should be based on SPD ('spd') or associated rates of change ('roc'). Default is 'spd'.
#' @param ... Ignored
#'
#' @details The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global significance test, and the chronological interval of local positive and negative deviations from the simulation envelope.
#' @seealso \code{\link{modelTest}}.
#' @import utils
#' @import stats
#' @method summary SpdModelTest
#' @export
summary.SpdModelTest<-function(object,type='spd',...) {
if (!type%in%c('spd','roc'))
{
stop("The argument 'type' should be either 'spd' or 'roc'.")
}
cat("'modelTest()' function summary:\n")
cat("\n")
cat(paste("Number of radiocarbon dates: ",object$n,"\n",sep=""))
cat(paste("Number of bins: ",object$nbins,"\n",sep=""))
if(type=='roc'){cat(paste("Backsight size: ",object$backsight,"\n",sep=""))}
cat("\n")
cat(paste("Statistical Significance computed using ",object$nsim," simulations. \n",sep=""))
if(type=='spd'){cat(paste("Global p-value: ",round(object$pval,5),".\n",sep=""))}
if(type=='roc'){cat(paste("Global p-value (rate of change): ",round(object$pval.roc,5),".\n",sep=""))}
cat("\n")
if (type=='spd')
{
obs <- object$result[,1:2]
envelope <- object$result[,3:4]
booms <- which(obs$PrDens>envelope[,2])
busts <- which(obs$PrDens<envelope[,1])
}
if (type=='roc')
{
naindex = which(is.na(object$result.roc$roc))
obs <- object$result.roc[-naindex,1:2]
envelope <- object$result.roc[-naindex,3:4]
booms <- which(obs$roc>envelope[,2])
busts <- which(obs$roc<envelope[,1])
}
if (length(booms)>0)
{
cat(paste("Signficant positive local deviations at:\n"))
i=1
while (i < length(obs$calBP))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]>envelope[i,2])
{
ss=obs$calBP[i]
while(obs[i,2]>envelope[i,2])
{
ee=obs$calBP[i]
i=i+1
if (i>length(obs$calBP))
{
i = length(obs$calBP)
ee=obs$calBP[i]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(booms)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
if (length(busts)>0)
{
cat(paste("Significant negative local deviations at:\n"))
i=1
while (i < length(obs$calBP))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]<envelope[i,1])
{
ss=obs$calBP[i]
while(obs[i,2]<envelope[i,1])
{
ee=obs$calBP[i]
i=i+1
if (i>length(obs$calBP))
{
i = length(obs$calBP)
ee=obs$calBP[i]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(busts)==0)
{
cat(paste("No significant negative local deviations"))
}
}
#' @title Summarise a \code{SpdPermTest} class object
#'
#' @description \code{summary} method for class "\code{SpdPermTest}"
#'
#' @param object A \code{SpdPermTest} class object produced using the \code{link{permTest}} function.
#' @param type Specifies whether the summary should be based on SPD ('spd') or associated rates of change ('roc'). Default is 'spd'.
#' @param ... Ignored
#'
#' @details The summary function returns metadata (number of radiocarbon dates, bins, and simulations), the p-value of the global significance test, and the chronological interval of local positive and negative deviations from the simulation envelope for each set.
#' @seealso \code{\link{permTest}}.
#' @import utils
#' @import stats
#' @method summary SpdPermTest
#' @export
summary.SpdPermTest<-function(object,type='spd',...) {
if (!type%in%c('spd','roc'))
{
stop("The argument 'type' should be either 'spd' or 'roc'.")
}
cat("'permTest()' function summary:\n")
cat("\n")
cat(paste("Number of sets: ",length(object$observed),"\n",sep=""))
if(type=='roc'){cat(paste("Backsight size: ",object$backsight,"\n",sep=""))}
cat(paste("Statistical Significance computed using ",object$nsim," simulations. \n",sep=""))
cat("\n")
for (i in 1:length(object$observed))
{
if (type=='spd'){cat(paste("--- ",names(object$observed)[i]," ---\n",sep=""))}
if (type=='roc'){cat(paste("--- ",names(object$observed.roc)[i]," ---\n",sep=""))}
cat(paste("Number of radiocarbon dates:",object$metadata[[i]][1],"\n",sep=""))
cat(paste("Number of bins:",object$metadata[[i]][2],"\n",sep=""))
cat("\n")
if(type=='spd'){
cat(paste("Global p-value: ",round(object$pValueList[i],5),"\n",sep=""))
obs <- object$observed[[i]]
envelope <-object$envelope[[i]]
booms <- which(obs[,2]>envelope[,2])
busts <- which(obs[,2]<envelope[,1])
}
if(type=='roc'){
cat(paste("Global p-value (rate of change): ",round(object$pValueList.roc[i],5),"\n",sep=""))
obs <- object$observed.roc[[i]]
envelope <-object$envelope.roc[[i]]
booms <- which(obs[,2]>envelope[,2])
busts <- which(obs[,2]<envelope[,1])
}
cat("\n")
if (length(booms)>0)
{
cat(paste("Significant positive local deviations at:\n"))
i=1
while (i < length(obs[,1]))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]>envelope[i,2])
{
ss=obs[i,1]
while(obs[i,2]>envelope[i,2])
{
ee=obs[i,1]
i=i+1
if (i>length(obs[,1]))
{
i = length(obs[,1])
ee=obs[i,1]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(booms)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
if (length(busts)>0)
{
cat(paste("Significant negative local deviations at:\n"))
i=1
while (i < length(obs[,1]))
{
if(!is.na(obs[i,2]))
{
if(obs[i,2]<envelope[i,1])
{
ss=obs[i,1]
while(obs[i,2]<envelope[i,1])
{
ee=obs[i,1]
i=i+1
if (i>length(obs[,1]))
{
i = length(obs[,1])
ee=obs[i,1]
break()
}
}
if (ss!=ee)
cat(paste(ss,"~",ee," BP \n",sep=""))
if (ss==ee)
cat(paste(ss," BP \n",sep=""))
}
}
i = i+1
}
}
if (length(busts)==0)
{
cat(paste("No significant positive local deviations"))
}
cat("\n")
}
}
|
# relevel factors
str(dat)
dat$flee <- relevel(dat$flee, ref = c('Not fleeing'))
dat$threat_level <- relevel(dat$threat_level, ref = c('other'))
dat$race <- relevel(dat$race, ref = c('W'))
# get cumulative sum of unarmed and armed deaths by race
dat$unarmed_ind <- ifelse(is.na(dat$armed_fac), 0,
ifelse(dat$armed_fac == 'Unarmed', 1, 0))
# recode NA in race to unknown
dat$race <- ifelse(is.na(as.character(dat$race)), 'unknown', dat$race)
# get cumulative sum of unarmed ind
dat$cum_sum_unarmed <- NA
unique_race <- unique(dat$race)
for(i in 1:length(unique(dat$race))){
this_race <- unique_race[i]
dat$cum_sum_unarmed[dat$race == this_race] <- cumsum(dat$unarmed_ind[dat$race == this_race])
}
# group by race and
# estimate lm
mod <- glm(armed_fac ~ race, data = dat, family = 'binomial')
exp(cbind(coef(mod), confint(mod)))
# estimate lm
mod <- glm(armed_fac ~ manner_of_death + age + gender + race + signs_of_mental_illness + threat_level +
flee + body_camera, data = dat, family = 'binomial')
exp(cbind(coef(mod), confint(mod)))
| /scripts/model_data.R | no_license | benmbrew/police_shootings | R | false | false | 1,094 | r | # relevel factors
str(dat)
dat$flee <- relevel(dat$flee, ref = c('Not fleeing'))
dat$threat_level <- relevel(dat$threat_level, ref = c('other'))
dat$race <- relevel(dat$race, ref = c('W'))
# get cumulative sum of unarmed and armed deaths by race
dat$unarmed_ind <- ifelse(is.na(dat$armed_fac), 0,
ifelse(dat$armed_fac == 'Unarmed', 1, 0))
# recode NA in race to unknown
dat$race <- ifelse(is.na(as.character(dat$race)), 'unknown', dat$race)
# get cumulative sum of unarmed ind
dat$cum_sum_unarmed <- NA
unique_race <- unique(dat$race)
for(i in 1:length(unique(dat$race))){
this_race <- unique_race[i]
dat$cum_sum_unarmed[dat$race == this_race] <- cumsum(dat$unarmed_ind[dat$race == this_race])
}
# group by race and
# estimate lm
mod <- glm(armed_fac ~ race, data = dat, family = 'binomial')
exp(cbind(coef(mod), confint(mod)))
# estimate lm
mod <- glm(armed_fac ~ manner_of_death + age + gender + race + signs_of_mental_illness + threat_level +
flee + body_camera, data = dat, family = 'binomial')
exp(cbind(coef(mod), confint(mod)))
|
#' Permutations
#'
#' Function to generate random permutations. For two groups, permutes labels independently at each time point. For one group time trend, permutes time points.
#' @param groups: number of groups (1 or 2)
#' @param n sample size per group
#' @param t.max number of time points
#' @param B number of permutations per significance test, maximum 2000. Recommend 200 for two group single time point, 100 for two group time course, 1000 for one group time course.
#' @return matrix of permutations
Permutations <- function(groups, n, t.max, B=200){
if(groups==2 && t.max==1){
### two groups single time point
### generate 2000 permutations
permutations <- t(replicate(2000, sample(2*n, n)))
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
if(dim(permutations)[1] > B) permutations <- permutations[1:B,]
} else if(groups==2 && t.max > 1){
### two groups time course
### does B unique permutations of group assignment indicator variable
### generate 2000 permutations of indicator variable
permutations <- matrix(
replicate(2000,
as.vector(replicate(t.max, sample(c(rep(0,n), rep(1,n)))))),
nrow=2000, ncol=t.max*2*n, byrow=TRUE)
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
permutations <- permutations[1:B,]
} else if(groups==1){
### one group time course
### generate 2000 permutations
permutations <- t(replicate(2000, sample(t.max, t.max)))
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
if(dim(permutations)[1] > B) permutations <- permutations[1:B,]
}
return(permutations)
} | /R/Permutations.R | permissive | jcbeer/passtime | R | false | false | 1,888 | r | #' Permutations
#'
#' Function to generate random permutations. For two groups, permutes labels independently at each time point. For one group time trend, permutes time points.
#' @param groups: number of groups (1 or 2)
#' @param n sample size per group
#' @param t.max number of time points
#' @param B number of permutations per significance test, maximum 2000. Recommend 200 for two group single time point, 100 for two group time course, 1000 for one group time course.
#' @return matrix of permutations
Permutations <- function(groups, n, t.max, B=200){
if(groups==2 && t.max==1){
### two groups single time point
### generate 2000 permutations
permutations <- t(replicate(2000, sample(2*n, n)))
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
if(dim(permutations)[1] > B) permutations <- permutations[1:B,]
} else if(groups==2 && t.max > 1){
### two groups time course
### does B unique permutations of group assignment indicator variable
### generate 2000 permutations of indicator variable
permutations <- matrix(
replicate(2000,
as.vector(replicate(t.max, sample(c(rep(0,n), rep(1,n)))))),
nrow=2000, ncol=t.max*2*n, byrow=TRUE)
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
permutations <- permutations[1:B,]
} else if(groups==1){
### one group time course
### generate 2000 permutations
permutations <- t(replicate(2000, sample(t.max, t.max)))
### remove duplicates
permutations <- permutations[!duplicated(permutations),]
### take the first B unique permutations
if(dim(permutations)[1] > B) permutations <- permutations[1:B,]
}
return(permutations)
} |
# READ ME ----------------------------------------------------------------------
#input: 512x512 tiles
#split to 64x64 subtiles without writing
#Predict the probability of each class and save.
#Calculate different kinds of performance and draw their curves.
#Precision, Recall, F1 score curves.
#Sensitivity, specificity, AUC, ROC curve, best threshold.
# Library ----------------------------------------------------------------------
library(keras)
library(tiff) # use tiff package rather than rtiff
# install.packages("ROCR")
library (ROCR)
# install.packages("caret")
library(caret)
# install.packages("e1071")
# install.packages("pROC")
library(pROC)
# Data preparation -------------------------------------------------------------
path="D:\\dl\\5x\\test\\HMNT0132_bis\\512_image_label"
limg=list.files(path, pattern = "\\.tif$")
lroi=list.files(path, pattern = "\\.tiff$")
img=array(dim=c(length(limg),512,512,3))
roi=array(dim=c(length(limg),512,512))
for (i in c(1:length(limg))){
img[i,,,]=readTIFF(paste(path,"\\",limg[i],sep=""))
roi[i,,]=readTIFF(paste(path,"\\",lroi[i],sep=""))
}
# subdivide the image into 64*64
img_subtile = array(dim=c(length(limg)*64,64,64,3))
roi_subtile = array(dim=c(length(lroi)*64,64,64))
every_64 = seq(1,449,64)
num_sub = 0
for(i in c(1:length(limg))) {
for(c in every_64)
{
for(r in every_64)
{
num_sub = num_sub + 1
r_63 = r+63
c_63 = c+63
img_subtile[num_sub,,,] = img[i,r:r_63,c:c_63,]
roi_subtile[num_sub,,] = roi[i,r:r_63,c:c_63]
}
}
}
save.image(file = "D:\\dl\\5x\\test\\HMNT0132_bis\\data_512_64.RData")
# Labels for evalutaion --------------------------------------------------------
y_tum = c(dim=num_sub)
y_nor = c(dim=num_sub)
for(i in c(1:num_sub)) {
if(sum(roi_subtile[i,,] == 2/255) > 3072) {
y_tum[i] = 0 #1 for positive, 0 for negative
y_nor[i] = 1
} else if(sum(roi_subtile[i,,] == 3/255) > 3072){
y_tum[i] = 1
y_nor[i] = 0
} else {
y_tum[i] = 0
y_nor[i] = 0}
}
# Load trained model ------------------------------------------------------------
# use a trained CNN model
model <- load_model_hdf5(filepath = "D:\\dl\\5x\\vgg16_dense_bn\\best_model", compile = TRUE)
# Prediction --------------------------------------------------------------------
#predict the class
# preds_classes <- model %>% predict_classes(img_subtile)
# write.table(preds_classes, "D:\\qinghe\\results\\HMNT0001\\preds_classes", sep=",",row.names=FALSE)
#predict the probability
preds <- model %>% predict(img_subtile)
write.table(preds, "D:\\dl\\5x\\test\\HMNT0132_bis\\results\\vgg_dense_bn_prob", sep=",",
row.names = FALSE, col.names = c("tum", "nor"))
#not completed yet, see the link below:
#https://keras.io/getting-started/faq/#how-can-i-obtain-the-output-of-an-intermediate-layer
# print(get_output_at(get_layer(model,"activation_2"), 1))
# Evaluation ---------------------------------------------------------------------
#package ROCR: Recall-Precision curve, F1-score curve, ROC curve, accuracy
pred_tum <- prediction(preds[,1], y_tum);
pred_nor <- prediction(preds[,2], y_nor);
# Recall-Precision curve
RP.perf_tum <- performance(pred_tum, "prec", "rec");
RP.perf_nor <- performance(pred_nor, "prec", "rec");
plot (RP.perf_tum, col = "red", main= "Precision-Recall graphs");
plot (RP.perf_nor, col = "green", add = T);
# F1-score curve
F1.perf_tum <- performance(pred_tum,"f")
F1.perf_nor <- performance(pred_nor,"f")
plot (F1.perf_tum, col = "red", main= "F1-score graphs");
plot (F1.perf_nor, col = "green", add = T);
# ROC curve
ROC.perf_tum <- performance(pred_tum, "tpr", "fpr");
ROC.perf_nor <- performance(pred_nor, "tpr", "fpr");
plot (ROC.perf_tum, col = "red", main= "ROC graphs");
plot (ROC.perf_nor, col = "green", add = T);
# ROC area under the curve
auc.tmp_tum <- performance(pred_tum,"auc");
print("accuracy of tumoral class:")
slot(auc.tmp_tum,"y.values")[[1]]
auc.tmp_nor <- performance(pred_nor,"auc");
print("accuracy of normal class:")
slot(auc.tmp_nor,"y.values")[[1]]
#package pROC: ROC curve, accuracy, the best threshold
roc_tum <- roc(y_tum, preds[,1])
roc_nor <- roc(y_nor, preds[,2])
plot(roc_tum, col="red", print.thres=TRUE, print.auc=TRUE)
plot.roc(roc_nor, add=TRUE, col="green", print.thres=TRUE)
roc_nor$auc
#package caret: At the BEST THRESHOLD, confusion matrix, accuracy, F1-score, sensitivity, specificity, etc
data_tum = c(dim=(num_sub))
for(i in c(1:num_sub)) {
if(preds[i,1]>=0.436) { #set the best threshold printed above, for tumoral class
data_tum[i] = 1
} else {
data_tum[i] = 0
}
}
data_nor = c(dim=(num_sub))
for(i in c(1:num_sub)) {
if(preds[i,2]>=0.86) { #set the best threshold printed above, for normal class
data_nor[i] = 1
} else {
data_nor[i] = 0
}
}
data_tum = factor(data_tum)
reference = factor(y_tum)
result_tum <- confusionMatrix(data = data_tum, reference = reference, positive = "1")
# View confusion matrix overall
result_tum
# F1 value
result_tum$byClass[7]
data_nor = factor(data_nor)
reference = factor(y_nor)
result_nor <- confusionMatrix(data = data_nor, reference = reference, positive = "1")
# View confusion matrix overall
result_nor
# F1 value
result_nor$byClass[7] | /5x/test/test_evaluation_2class_graph.R | no_license | qinghezeng/DetectHCC | R | false | false | 5,391 | r | # READ ME ----------------------------------------------------------------------
#input: 512x512 tiles
#split to 64x64 subtiles without writing
#Predict the probability of each class and save.
#Calculate different kinds of performance and draw their curves.
#Precision, Recall, F1 score curves.
#Sensitivity, specificity, AUC, ROC curve, best threshold.
# Library ----------------------------------------------------------------------
library(keras)
library(tiff) # use tiff package rather than rtiff
# install.packages("ROCR")
library (ROCR)
# install.packages("caret")
library(caret)
# install.packages("e1071")
# install.packages("pROC")
library(pROC)
# Data preparation -------------------------------------------------------------
path="D:\\dl\\5x\\test\\HMNT0132_bis\\512_image_label"
limg=list.files(path, pattern = "\\.tif$")
lroi=list.files(path, pattern = "\\.tiff$")
img=array(dim=c(length(limg),512,512,3))
roi=array(dim=c(length(limg),512,512))
for (i in c(1:length(limg))){
img[i,,,]=readTIFF(paste(path,"\\",limg[i],sep=""))
roi[i,,]=readTIFF(paste(path,"\\",lroi[i],sep=""))
}
# subdivide the image into 64*64
img_subtile = array(dim=c(length(limg)*64,64,64,3))
roi_subtile = array(dim=c(length(lroi)*64,64,64))
every_64 = seq(1,449,64)
num_sub = 0
for(i in c(1:length(limg))) {
for(c in every_64)
{
for(r in every_64)
{
num_sub = num_sub + 1
r_63 = r+63
c_63 = c+63
img_subtile[num_sub,,,] = img[i,r:r_63,c:c_63,]
roi_subtile[num_sub,,] = roi[i,r:r_63,c:c_63]
}
}
}
save.image(file = "D:\\dl\\5x\\test\\HMNT0132_bis\\data_512_64.RData")
# Labels for evalutaion --------------------------------------------------------
y_tum = c(dim=num_sub)
y_nor = c(dim=num_sub)
for(i in c(1:num_sub)) {
if(sum(roi_subtile[i,,] == 2/255) > 3072) {
y_tum[i] = 0 #1 for positive, 0 for negative
y_nor[i] = 1
} else if(sum(roi_subtile[i,,] == 3/255) > 3072){
y_tum[i] = 1
y_nor[i] = 0
} else {
y_tum[i] = 0
y_nor[i] = 0}
}
# Load trained model ------------------------------------------------------------
# use a trained CNN model
model <- load_model_hdf5(filepath = "D:\\dl\\5x\\vgg16_dense_bn\\best_model", compile = TRUE)
# Prediction --------------------------------------------------------------------
#predict the class
# preds_classes <- model %>% predict_classes(img_subtile)
# write.table(preds_classes, "D:\\qinghe\\results\\HMNT0001\\preds_classes", sep=",",row.names=FALSE)
#predict the probability
preds <- model %>% predict(img_subtile)
write.table(preds, "D:\\dl\\5x\\test\\HMNT0132_bis\\results\\vgg_dense_bn_prob", sep=",",
row.names = FALSE, col.names = c("tum", "nor"))
#not completed yet, see the link below:
#https://keras.io/getting-started/faq/#how-can-i-obtain-the-output-of-an-intermediate-layer
# print(get_output_at(get_layer(model,"activation_2"), 1))
# Evaluation ---------------------------------------------------------------------
#package ROCR: Recall-Precision curve, F1-score curve, ROC curve, accuracy
pred_tum <- prediction(preds[,1], y_tum);
pred_nor <- prediction(preds[,2], y_nor);
# Recall-Precision curve
RP.perf_tum <- performance(pred_tum, "prec", "rec");
RP.perf_nor <- performance(pred_nor, "prec", "rec");
plot (RP.perf_tum, col = "red", main= "Precision-Recall graphs");
plot (RP.perf_nor, col = "green", add = T);
# F1-score curve
F1.perf_tum <- performance(pred_tum,"f")
F1.perf_nor <- performance(pred_nor,"f")
plot (F1.perf_tum, col = "red", main= "F1-score graphs");
plot (F1.perf_nor, col = "green", add = T);
# ROC curve
ROC.perf_tum <- performance(pred_tum, "tpr", "fpr");
ROC.perf_nor <- performance(pred_nor, "tpr", "fpr");
plot (ROC.perf_tum, col = "red", main= "ROC graphs");
plot (ROC.perf_nor, col = "green", add = T);
# ROC area under the curve
auc.tmp_tum <- performance(pred_tum,"auc");
print("accuracy of tumoral class:")
slot(auc.tmp_tum,"y.values")[[1]]
auc.tmp_nor <- performance(pred_nor,"auc");
print("accuracy of normal class:")
slot(auc.tmp_nor,"y.values")[[1]]
#package pROC: ROC curve, accuracy, the best threshold
roc_tum <- roc(y_tum, preds[,1])
roc_nor <- roc(y_nor, preds[,2])
plot(roc_tum, col="red", print.thres=TRUE, print.auc=TRUE)
plot.roc(roc_nor, add=TRUE, col="green", print.thres=TRUE)
roc_nor$auc
#package caret: At the BEST THRESHOLD, confusion matrix, accuracy, F1-score, sensitivity, specificity, etc
data_tum = c(dim=(num_sub))
for(i in c(1:num_sub)) {
if(preds[i,1]>=0.436) { #set the best threshold printed above, for tumoral class
data_tum[i] = 1
} else {
data_tum[i] = 0
}
}
data_nor = c(dim=(num_sub))
for(i in c(1:num_sub)) {
if(preds[i,2]>=0.86) { #set the best threshold printed above, for normal class
data_nor[i] = 1
} else {
data_nor[i] = 0
}
}
data_tum = factor(data_tum)
reference = factor(y_tum)
result_tum <- confusionMatrix(data = data_tum, reference = reference, positive = "1")
# View confusion matrix overall
result_tum
# F1 value
result_tum$byClass[7]
data_nor = factor(data_nor)
reference = factor(y_nor)
result_nor <- confusionMatrix(data = data_nor, reference = reference, positive = "1")
# View confusion matrix overall
result_nor
# F1 value
result_nor$byClass[7] |
library(sf)
library(sp)
mtq <- st_read(system.file("gpkg/mtq.gpkg", package="cartography"), quiet = TRUE)
mtq.borders <- getBorders(x = mtq)
plot(st_geometry(mtq))
expect_silent(discLayer(x = mtq.borders, df = mtq,
var = "MED", col="red4", nclass=3,
method="equal", threshold = 0.4, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuities\n(absolute difference)",
legend.pos = "bottomleft", add=TRUE))
expect_silent(discLayer(x = mtq.borders, df = mtq,
var = "MED", col="red4", nclass=3,
method="equal", threshold = 0.4, sizemin = 0.5,
sizemax = 10, type = "rel",legend.values.rnd = 0,
legend.title.txt = "Discontinuities\n(absolute difference)",
legend.pos = "bottomleft", add=TRUE))
expect_error(discLayer(spdf = mtq.borders))
| /inst/tinytest/test_discLayer.R | no_license | riatelab/cartography | R | false | false | 1,018 | r | library(sf)
library(sp)
mtq <- st_read(system.file("gpkg/mtq.gpkg", package="cartography"), quiet = TRUE)
mtq.borders <- getBorders(x = mtq)
plot(st_geometry(mtq))
expect_silent(discLayer(x = mtq.borders, df = mtq,
var = "MED", col="red4", nclass=3,
method="equal", threshold = 0.4, sizemin = 0.5,
sizemax = 10, type = "abs",legend.values.rnd = 0,
legend.title.txt = "Discontinuities\n(absolute difference)",
legend.pos = "bottomleft", add=TRUE))
expect_silent(discLayer(x = mtq.borders, df = mtq,
var = "MED", col="red4", nclass=3,
method="equal", threshold = 0.4, sizemin = 0.5,
sizemax = 10, type = "rel",legend.values.rnd = 0,
legend.title.txt = "Discontinuities\n(absolute difference)",
legend.pos = "bottomleft", add=TRUE))
expect_error(discLayer(spdf = mtq.borders))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strvalidator-package.r
\docType{data}
\name{set5}
\alias{set5}
\title{ESX17 example data for mixture analysis.}
\format{
A data frame with 1663 rows and 7 variables
}
\usage{
data(set5)
}
\description{
A slimmed dataset containing data from
mixture experiment for Mx analysis.
}
\keyword{datasets}
| /man/set5.Rd | no_license | OskarHansson/strvalidator | R | false | true | 376 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strvalidator-package.r
\docType{data}
\name{set5}
\alias{set5}
\title{ESX17 example data for mixture analysis.}
\format{
A data frame with 1663 rows and 7 variables
}
\usage{
data(set5)
}
\description{
A slimmed dataset containing data from
mixture experiment for Mx analysis.
}
\keyword{datasets}
|
rankall <- function(outcome, num = "best"){
## Read outcome data
outcome.data <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", stringsAsFactors=FALSE)
## Check that outcome are valid
## check outcome to be one of the “heart attack”, “heart failure”, or “pneumonia”.
outcomeColIdx <- numeric(0)
if(outcome == "heart attack"){
outcomeColIdx <- 11
}else if(outcome == "heart failure"){
outcomeColIdx <- 17
}else if(outcome == "pneumonia"){
outcomeColIdx <- 23
}else{
stop("invalid outcome")
}
## For each state, find the hospital of the given rank
suboutcomedata <- subset(outcome.data, !is.na(outcome.data$State) & !is.na(outcome.data[,outcomeColIdx]))
outcomeColLists <- split(suboutcomedata,suboutcomedata$State)
allranks <- data.frame()
for(n in names(outcomeColLists)){
rankIdx <- order(outcomeColLists[[n]][,outcomeColIdx],outcomeColLists[[n]]$Hospital.Name)
h.name <- outcomeColLists[[n]]$Hospital.Name[rankIdx]
h.namebynum <- getbynum(h.name,num)
allranks <- rbind(allranks,cbind("hospital" = h.namebynum,"state" = n))
}
allranks
## Return a data frame with the hospital names and the
## (abbreviated) state name
}
getbynum <- function(data, num){
if(num == "best"){
return(data[1])
}
if(num == "worst"){
return(data[length(data)])
}
if(typeof(num) == "character"){
return(NA)
}
if(num > length(data) | num <1){
return(NA)
}
data[num]
} | /R/PAssignments3/rankall.R | no_license | liccmiao/datasciencecoursera | R | false | false | 1,553 | r | rankall <- function(outcome, num = "best"){
## Read outcome data
outcome.data <- read.csv("outcome-of-care-measures.csv", na.strings = "Not Available", stringsAsFactors=FALSE)
## Check that outcome are valid
## check outcome to be one of the “heart attack”, “heart failure”, or “pneumonia”.
outcomeColIdx <- numeric(0)
if(outcome == "heart attack"){
outcomeColIdx <- 11
}else if(outcome == "heart failure"){
outcomeColIdx <- 17
}else if(outcome == "pneumonia"){
outcomeColIdx <- 23
}else{
stop("invalid outcome")
}
## For each state, find the hospital of the given rank
suboutcomedata <- subset(outcome.data, !is.na(outcome.data$State) & !is.na(outcome.data[,outcomeColIdx]))
outcomeColLists <- split(suboutcomedata,suboutcomedata$State)
allranks <- data.frame()
for(n in names(outcomeColLists)){
rankIdx <- order(outcomeColLists[[n]][,outcomeColIdx],outcomeColLists[[n]]$Hospital.Name)
h.name <- outcomeColLists[[n]]$Hospital.Name[rankIdx]
h.namebynum <- getbynum(h.name,num)
allranks <- rbind(allranks,cbind("hospital" = h.namebynum,"state" = n))
}
allranks
## Return a data frame with the hospital names and the
## (abbreviated) state name
}
getbynum <- function(data, num){
if(num == "best"){
return(data[1])
}
if(num == "worst"){
return(data[length(data)])
}
if(typeof(num) == "character"){
return(NA)
}
if(num > length(data) | num <1){
return(NA)
}
data[num]
} |
#' Simulate lambda process from stable distribution iteratively
#'
#' Simulate lambda process from stable distribution iteratively
#' until target length of result is reached. It uses multi-core capability to
#' run lamp.simulate1 in parallel.
#' If file slot is specified, simulation result will be persisted to it periodically.
#' A plot interface is provided to monitor the progress.
#' A CPU temperature interface is provided to control CPU from overheating.
#'
#' @param object an object of lamp class
#' @param use.mc numeric, number of cores for parallel simulations. Default is 4.
#' @param sim.length numeric, number of Z to simulate. Default is 1000.
#' @param reset.cache logical, to reset simulation cache or not prior the run. Default is FALSE.
#' @param keep.tau numeric, 0 to clean up, 1 to return unused tau, 2 to return all tau. Default is 1.
#' @param drop numeric, number of tau to discard at the end per iteration. Default is 10.
#' @param plot.util function, interface to plot simulation results. Default is lamp.plot_sim4.
#' @param cpu.temperature numeric, temperature above which is overhead. Default is 68.
#' @param cpu.temperature.util function, interface to get CPU temperature. Default is NULL.
#'
#' @return an object of lamp class with Z, B, N populated
#'
#' @keywords simulation
#'
#' @author Stephen H-T. Lihn
#'
#' @export
#'
### <======================================================================>
"lamp.simulate_iter" <- function(object, use.mc = 4,
sim.length = 1000,
reset.cache = FALSE,
drop=10, keep.tau=1,
plot.util = lamp.plot_sim6,
cpu.temperature = 68,
cpu.temperature.util = NULL)
{
lp <- object # keep object original and small, add data to lp
if (reset.cache) {
lp@Z_i <- lp@tau_i <- 0
lp@Z <- lp@B <- lp@N <- lp@tau <- numeric(0)
}
lambda <- lp@lambda
T.inf <- lp@T.inf
cpu <- function(s, loop=TRUE) {
if (!is.null(cpu.temperature.util)) {
while (cpu.temperature.util() > cpu.temperature) {
print(paste("Temperature is high:", cpu.temperature.util()))
Sys.sleep(s)
if (!loop) return(1)
}
return(2)
}
return(0)
}
tm <- Sys.time()
while (lp@Z_i <= sim.length) {
print(paste("simulate Z_i=", lp@Z_i, "T.inf=", lp@T.inf, "time=", tm))
Sys.sleep(1)
f <- function(i) lamp.simulate1(object, drop=drop, keep.tau=keep.tau)
yy <- parallel::mclapply(seq(1,use.mc), f)
for (y in yy) {
lp@Z <- c(lp@Z, y@Z)
lp@N <- c(lp@N, y@N)
lp@B <- c(lp@B, y@B)
lp@Z_i <- length(lp@Z)
lp@tm <- Sys.time()
}
Sys.sleep(1)
cpu(2, loop=FALSE)
# ---------------------------------------------------
if (lp@Z_i > 400 & !is.null(plot.util)) plot.util(lp)
if (length(lp@file) > 0) {
save(lp, file=lp@file)
sz <- file.info(lp@file)$size/1024.0
sz1 <- if (sz>=1024) sprintf("%.1fMB", sz/1024) else sprintf("%.1fkB", sz)
print(paste(" -> data saved, size", sz1, lp@file))
}
tm <- Sys.time()
# pause if temperatture gets too high
cpu(3, loop=TRUE)
}
return(lp)
}
### <---------------------------------------------------------------------->
| /R/lamp-simulate-iter-method.R | no_license | cran/ecd | R | false | false | 3,622 | r | #' Simulate lambda process from stable distribution iteratively
#'
#' Simulate lambda process from stable distribution iteratively
#' until target length of result is reached. It uses multi-core capability to
#' run lamp.simulate1 in parallel.
#' If file slot is specified, simulation result will be persisted to it periodically.
#' A plot interface is provided to monitor the progress.
#' A CPU temperature interface is provided to control CPU from overheating.
#'
#' @param object an object of lamp class
#' @param use.mc numeric, number of cores for parallel simulations. Default is 4.
#' @param sim.length numeric, number of Z to simulate. Default is 1000.
#' @param reset.cache logical, to reset simulation cache or not prior the run. Default is FALSE.
#' @param keep.tau numeric, 0 to clean up, 1 to return unused tau, 2 to return all tau. Default is 1.
#' @param drop numeric, number of tau to discard at the end per iteration. Default is 10.
#' @param plot.util function, interface to plot simulation results. Default is lamp.plot_sim4.
#' @param cpu.temperature numeric, temperature above which is overhead. Default is 68.
#' @param cpu.temperature.util function, interface to get CPU temperature. Default is NULL.
#'
#' @return an object of lamp class with Z, B, N populated
#'
#' @keywords simulation
#'
#' @author Stephen H-T. Lihn
#'
#' @export
#'
### <======================================================================>
"lamp.simulate_iter" <- function(object, use.mc = 4,
sim.length = 1000,
reset.cache = FALSE,
drop=10, keep.tau=1,
plot.util = lamp.plot_sim6,
cpu.temperature = 68,
cpu.temperature.util = NULL)
{
lp <- object # keep object original and small, add data to lp
if (reset.cache) {
lp@Z_i <- lp@tau_i <- 0
lp@Z <- lp@B <- lp@N <- lp@tau <- numeric(0)
}
lambda <- lp@lambda
T.inf <- lp@T.inf
cpu <- function(s, loop=TRUE) {
if (!is.null(cpu.temperature.util)) {
while (cpu.temperature.util() > cpu.temperature) {
print(paste("Temperature is high:", cpu.temperature.util()))
Sys.sleep(s)
if (!loop) return(1)
}
return(2)
}
return(0)
}
tm <- Sys.time()
while (lp@Z_i <= sim.length) {
print(paste("simulate Z_i=", lp@Z_i, "T.inf=", lp@T.inf, "time=", tm))
Sys.sleep(1)
f <- function(i) lamp.simulate1(object, drop=drop, keep.tau=keep.tau)
yy <- parallel::mclapply(seq(1,use.mc), f)
for (y in yy) {
lp@Z <- c(lp@Z, y@Z)
lp@N <- c(lp@N, y@N)
lp@B <- c(lp@B, y@B)
lp@Z_i <- length(lp@Z)
lp@tm <- Sys.time()
}
Sys.sleep(1)
cpu(2, loop=FALSE)
# ---------------------------------------------------
if (lp@Z_i > 400 & !is.null(plot.util)) plot.util(lp)
if (length(lp@file) > 0) {
save(lp, file=lp@file)
sz <- file.info(lp@file)$size/1024.0
sz1 <- if (sz>=1024) sprintf("%.1fMB", sz/1024) else sprintf("%.1fkB", sz)
print(paste(" -> data saved, size", sz1, lp@file))
}
tm <- Sys.time()
# pause if temperatture gets too high
cpu(3, loop=TRUE)
}
return(lp)
}
### <---------------------------------------------------------------------->
|
#' @title Create markdown embed for a Tweet
#' @description The Twitter API GET call returns the tweet in markdown embedded format
#' @param screen_name character, screen name of the user
#' @param status_id character, status id
#' @return character
#' @details Arguments to pass to the API call can be found \href{https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-oembed}{here}
#' @examples
#' name <- "kearneymw"
#' status <- "1087047171306856451"
#'
#' tweet_embed(screen_name = name, status_id = status)
#' @export
#' @importFrom httr GET content
#'
tweet_embed <- function(screen_name, status_id) {
core_url <- "https://publish.twitter.com/oembed?url=https%3A%2F%2Ftwitter.com%2F"
status_url <- "%2Fstatus%2F"
url <- paste0(core_url, screen_name, status_url, status_id)
api_payload <- httr::GET(url)
api_content <- httr::content(api_payload)
api_content[["html"]]
}
| /R/tweet_embed.R | no_license | jthomasmock/tomtom | R | false | false | 927 | r | #' @title Create markdown embed for a Tweet
#' @description The Twitter API GET call returns the tweet in markdown embedded format
#' @param screen_name character, screen name of the user
#' @param status_id character, status id
#' @return character
#' @details Arguments to pass to the API call can be found \href{https://developer.twitter.com/en/docs/tweets/post-and-engage/api-reference/get-statuses-oembed}{here}
#' @examples
#' name <- "kearneymw"
#' status <- "1087047171306856451"
#'
#' tweet_embed(screen_name = name, status_id = status)
#' @export
#' @importFrom httr GET content
#'
tweet_embed <- function(screen_name, status_id) {
core_url <- "https://publish.twitter.com/oembed?url=https%3A%2F%2Ftwitter.com%2F"
status_url <- "%2Fstatus%2F"
url <- paste0(core_url, screen_name, status_url, status_id)
api_payload <- httr::GET(url)
api_content <- httr::content(api_payload)
api_content[["html"]]
}
|
require(acss)
require(stringr)
#source("BDM1D.R")
simultaneousAttackOnString <- function(origString,
blockSize,
offset, base,
numberOfAttackedBits,
evaluateFromMedian) {
kValues <- getKValues(alphabetSize=base)
origStringBDMValue <- evaluateBDM1D(origString, blockSize, offset, base, kValues)
stringVector <- unlist(str_split(origString, pattern=""))
deletionStrings <- c()
for (i in 1: length(stringVector)){
boolIndexVector <- !logical(length(stringVector))
boolIndexVector[i] <- FALSE
back <- paste(stringVector[boolIndexVector], sep = "",
collapse="")
deletionStrings <- c(deletionStrings, back)
}
deletionStringsBDMValues <- unlist(
lapply(deletionStrings,
evaluateBDM1D, blockSize, offset, base, kValues))
if(evaluateFromMedian){
bdmDifferences<- (median(deletionStringsBDMValues) -
deletionStringsBDMValues)
}
else{
bdmDifferences <- origStringBDMValue - deletionStringsBDMValues
}
bdmDf <- data.frame(deletionStrings= deletionStrings,
bdmDifferences = bdmDifferences,
stringsAsFactors=FALSE)
bdmDf$diffRank <- rank(
bdmDf$bdmDifferences, ties.method ="min"
)
sortByDiffRank <- order(bdmDf$diffRank)
indexRank <- as.numeric(rownames(bdmDf[sortByDiffRank,]))
removeddBitsIdx <- indexRank[c(1:numberOfAttackedBits)]
boolIndexVector <- !logical(length(stringVector))
boolIndexVector[removeddBitsIdx] <- FALSE
boolIndexVector
stringVector[boolIndexVector]
removedBitsString <- paste(stringVector[boolIndexVector],
sep="", collapse="")
return (removedBitsString)
}
## no block overlap
#start.time <- Sys.time()
# simultaneousAttackOnString("110001101010111101",
# 12, 12, 2, 10, evaluateFromMedian = TRUE)
# end.time <- Sys.time()
# time.taken <- end.time - start.time
# print(time.taken)
#
##max block overlap
# start.time <- Sys.time()
# simultaneousAttackOnString("110001101010111101",
# 12, 1, 2, 10, evaluateFromMedian = TRUE)
# end.time <- Sys.time()
# time.taken <-end.time - start.time
# print (time.taken)
| /scripts/simultaneousAttackOnStrings.R | no_license | andandandand/MinimalAlgorithmicInformationLoss | R | false | false | 2,432 | r |
require(acss)
require(stringr)
#source("BDM1D.R")
simultaneousAttackOnString <- function(origString,
blockSize,
offset, base,
numberOfAttackedBits,
evaluateFromMedian) {
kValues <- getKValues(alphabetSize=base)
origStringBDMValue <- evaluateBDM1D(origString, blockSize, offset, base, kValues)
stringVector <- unlist(str_split(origString, pattern=""))
deletionStrings <- c()
for (i in 1: length(stringVector)){
boolIndexVector <- !logical(length(stringVector))
boolIndexVector[i] <- FALSE
back <- paste(stringVector[boolIndexVector], sep = "",
collapse="")
deletionStrings <- c(deletionStrings, back)
}
deletionStringsBDMValues <- unlist(
lapply(deletionStrings,
evaluateBDM1D, blockSize, offset, base, kValues))
if(evaluateFromMedian){
bdmDifferences<- (median(deletionStringsBDMValues) -
deletionStringsBDMValues)
}
else{
bdmDifferences <- origStringBDMValue - deletionStringsBDMValues
}
bdmDf <- data.frame(deletionStrings= deletionStrings,
bdmDifferences = bdmDifferences,
stringsAsFactors=FALSE)
bdmDf$diffRank <- rank(
bdmDf$bdmDifferences, ties.method ="min"
)
sortByDiffRank <- order(bdmDf$diffRank)
indexRank <- as.numeric(rownames(bdmDf[sortByDiffRank,]))
removeddBitsIdx <- indexRank[c(1:numberOfAttackedBits)]
boolIndexVector <- !logical(length(stringVector))
boolIndexVector[removeddBitsIdx] <- FALSE
boolIndexVector
stringVector[boolIndexVector]
removedBitsString <- paste(stringVector[boolIndexVector],
sep="", collapse="")
return (removedBitsString)
}
## no block overlap
#start.time <- Sys.time()
# simultaneousAttackOnString("110001101010111101",
# 12, 12, 2, 10, evaluateFromMedian = TRUE)
# end.time <- Sys.time()
# time.taken <- end.time - start.time
# print(time.taken)
#
##max block overlap
# start.time <- Sys.time()
# simultaneousAttackOnString("110001101010111101",
# 12, 1, 2, 10, evaluateFromMedian = TRUE)
# end.time <- Sys.time()
# time.taken <-end.time - start.time
# print (time.taken)
|
#' Extract model information from different classes
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject <- function(x){
UseMethod('ModelPrintObject', x)
}
#
#' ConvertNewToOldMPO
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ConvertNewToOldMPO <- function(x)
{
if (!("mmpVers" %in% names(x) && x$mmpVers >= 2))
return(x)
x$CoefValues <- array(NA, dim = c(nrow(x$Coefficients$Main),1,6), dimnames = list(rownames(x$Coefficients$Main), 'a', c("Coefficient", "SE", "TStat", "Normal", "PValue", "special")))
x$CoefValues[,"a", "Coefficient"] <- x$Coefficients$Main$Coef
if (length(x$Coefficients$Main$SE) > 0) x$CoefValues[,"a", "SE"] <- x$Coefficients$Main$SE
if (length(x$Coefficients$Main$TStat) > 0) x$CoefValues[,"a", "TStat"] <- x$Coefficients$Main$TStat
if (length(x$Coefficients$Main$Z) > 0) x$CoefValues[,"a", "Normal"] <- x$Coefficients$Main$ZStat
if (length(x$Coefficients$Main$PValue) > 0) x$CoefValues[,"a", "PValue"] <- x$Coefficients$Main$PValue
# CoefTemp <- array(x$Coefficients$Main, dim = c(nrow(x$Coefficients$Main), 1, ncol(x$Coefficients$Main)),
# dimnames = list(rownames(x$Coefficients$Main), "a", colnames(x$Coefficients$Main)))
x$DependentVariable <- x$DependendVariables$Main
# browser()
return(x)
}
#' ModelPrintObject.default
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.default <- function(x)
{
return(x)
}
#' ModelPrintObject.rms
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.rms <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
R2 = x$stats['R2'],
LogLik = x$stats['Model L.R.'],
G = x$stats['g'],
Sigma = x$stats['Sigma']
))
if (!is.null(x$orig.var) & is.null(x$clusterInfo))
{
x$Statistics <- append(x$Statistics,
list(
StandardErrorInfo = 'SE by robcov()'
))
}
if (!is.null(x$clusterInfo))
{
x$Statistics <- append(x$Statistics,
list(
StandardErrorInfo = paste('Clustered on ', x$clusterInfo$name, '; n = ', x$clusterInfo$n, sep ='')
))
}
if (is.null(x$CoefValues))
{
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = sqrt(diag(x$var)), # Std. Error
TStat = NA,
Normal= x$coefficients/sqrt(diag(x$var)), # Normal
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
}
x$DependentVariable <- all.vars(x$terms)[1]
return(x)
}
#' ModelPrintObject.rdrobust
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.rdrobust <- function(x)
{
y <- list(Coefficients = list(Main = data.frame(Coef = as.numeric(x$coef),
SE = as.numeric(x$se),
ZStat = as.numeric(x$z),
PValue = as.numeric(x$pv),
row.names = rownames(x$coef)
)))
#y$DependentVariables <- list(Main = x$???)
y$Statistics <- list(NObs = sum(x$N),
NObsLR = list(x = x$N,collapse = ", "),
EffNObsLR = list(x = x$Nh, collapse = ", "),
Kernel = x$kernel,
BWEst = list(x = x$bws[1, ], collapse = ", "),
BWBias = list(x = x$bws[2, ], collapse = ", "),
Rho = list(x = c(x$bws[1, 1]/x$bws[2, 1], x$bws[1, 2]/x$bws[2, 2]), collapse = ", ")
)
y$ModelType <- "local polynomial RD point estimators with robust bias-corrected confidence intervals"
# to do: Defaults should be moved to a "GetModelDefaults" function
y$Defaults <- list(Coefficients = c("Main"),
Statistics = c("NObs", "NObsLR", "EffNObsLR", "Kernel", "BWEst", "BWBias", "Rho"))
y$mmpVers <- 2.0
return(y)
}
#' ModelPrintObject.lrm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lrm <- function(x)
{
x$Statistics$Freq <- x$freq
x$Statistics <- append(x$Statistics,
list(
NObs = x$stats['Obs'],
Brier = x$stats['Brier'],
Gamma = x$stats['Gamma'],
MaxDeriv = x$stats['Max Deriv'],
P = x$stats['P'],
C = x$stats['C'],
Dxy = x$stats['Dxy'],
Taua = x$stats['Tau-a'],
Gr = x$stats['gr'],
Gp = x$stats['gp'],
Freq = x$freq,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
x$ModelType <- 'Logistic regression model'
NextMethod()
}
#' ModelPrintObject.ols
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.ols <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
NObs = x$stats['n'],
R2 = x$stats['R2'],
Sigma = x$stats['Sigma'],
DF = x$df.residual,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
x$Statistics$AdjR2 <- 1 - (1-x$Statistics$R2)*(x$Statistics$NObs-1)/(x$Statistics$DF)
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = sqrt(diag(x$var)), # Std. Error
TStat = x$coefficients/sqrt(diag(x$var)),
Normal= NA, # Normal
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'TStat'], GetPValueFromTStat, DF = x$Statistics$DF ))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.felm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.felm <- function(x)
{
xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs =xsum$N,
R2 = xsum$r.squared,
Sigma = xsum$sigma,
DF = xsum$df[[1]],
AdjR2 = xsum$adj.r.squared,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xsum$coefficients[, 1],
SE = xsum$coefficients[, 2], # Std. Error
TStat = xsum$coefficients[, 3],
Normal= NA, # Normal
PValue = xsum$coefficients[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(xsum$coefficients)
if (!is.null(x$fe) && length(x$fe)>0)
{
x$FixedEffects <- names(x$fe)
CoefTemp <- rbind(CoefTemp, matrix(NA, nrow = length(x$FixedEffects), ncol = 6, dimnames = list(paste0('FE>>', x$FixedEffects), NULL)))
}
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- x$lhs
if (!is.null(x$clustervar)) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(names(x$clustervar), collapse = ', '))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.fixest
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.fixest <- function(x)
{
#xsum <- summary(x)
y <-list()
y$Statistics <-
list(
NObs =x$nobs,
R2 = fitstat(x, type = "r2", simplify = TRUE),
Sigma = x$sigma2,
DF = NA,
AdjR2 = fitstat(x, type = "ar2", simplify = TRUE),
Weights = NA
)
CoefTemp <- cbind(Coefficient = x$coeftable[, 1],
SE = x$coeftable[, 2], # Std. Error
TStat = x$coeftable[, 3],
Normal= NA, # Normal
PValue = x$coeftable[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(x$coeftable)
if (!is.null(x$fixef_vars) && length(x$fixef_vars)>0)
{
y$FixedEffects <- x$fixef_vars
CoefTemp <- rbind(CoefTemp,
matrix(NA, nrow = length(x$fixef_vars),
ncol = 6,
dimnames = list(paste0("FE>>",
x$fixef_vars),
NULL)))
}
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$DependentVariable <- terms(x$fml_all$linear)[[2]]
if (!is.null(x$summary_flags) && length(x$summary_flags$cluster) > 0) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(x$summary_flags$cluster, collapse = ', '))
y$ModelType <- 'Ordinary least squares regression model'
return(y)
}
#' ModelPrintObject.felm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.summary.felm <- function(x)
{
#xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs =x$N,
R2 = x$r.squared,
Sigma = x$sigma,
DF = x$df[[1]],
AdjR2 = x$adj.r.squared,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = x$coefficients[, 1],
SE = x$coefficients[, 2], # Std. Error
TStat = x$coefficients[, 3],
Normal= NA, # Normal
PValue = x$coefficients[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(x$coefficients)
if (!is.null(x$fe) && length(x$fe)>0)
{
x$FixedEffects <- names(x$fe)
CoefTemp <- rbind(CoefTemp, matrix(NA, nrow = length(x$FixedEffects), ncol = 6, dimnames = list(paste0('FE>>', x$FixedEffects), NULL)))
}
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- x$lhs
if (length(grep('obust', colnames(x$coefficients)[2]))>0) x$Statistics$StandardErrorInfo <- 'Robust standard errors'
if (!is.null(x$clustervar)) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(names(x$clustervar), collapse = ', '))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.plm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.plm <- function(x)
{
warning('plm objects are processed slowly and incompletely.')
xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs = nobs(x),
R2 = as.numeric(xsum$r.squared['rsq']),
DF = x$df.residual,
FStat = xsum$fstatistic,
AdjR2 = as.numeric(xsum$r.squared['adjrsq']),
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xsum$coefficients[,1],
SE = xsum$coefficients[,2], # Std. Error
TStat = xsum$coefficients[,3],
Normal= NA, # Normal
PValue = xsum$coefficients[,4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(xsum$coefficients)
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$formula)[1]
x$ModelType <- paste0('Panel data:', paste(x$args[unlist(lapply(x$args, function(x)(!is.null(x))))], collapse = '; '))
NextMethod()
}
#' ModelPrintObject.lm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lm <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
NObs = length(residuals(summary(x))),
R2 = summary(x)$r.squared,
AdjR2 = summary(x)$adj.r.squared,
Sigma = summary(x)$sigma,
DF = summary(x)$df[2],
FStat = summary(x)$fstatistic[1],
Weights = ifelse(is.null(x$weights),"no", "yes")
))
# The following might looks complicated. The aim is to make sure that
# NA variables are not dropped from the model.
CoefTemp <- as.matrix(x$coefficients)
CoefTemp <- merge(CoefTemp, summary(x)$coefficients[,c(2,3)], by = 'row.names', all.x = T, sort = F)
rownames(CoefTemp) <- CoefTemp$Row.names
CoefTemp$Row.names <- NULL
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp <- cbind(CoefTemp, Normal = NA, PValue = NA, SpecialText = NA)
colnames(CoefTemp) <- c('Coefficient', 'SE', 'TStat', 'Normal', 'PValue', 'SpecialText')
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'TStat'], GetPValueFromTStat, DF = x$Statistics$DF ))
x$CoefValues <- array(unlist(CoefTemp), dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
#bpres <- NA
#try(bpres <- bptest(m$terms)[4], silent = T)
if (is.null(x$ModelType))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
ModelPrintObject.glm <- function(x)
{
warning('Basic glm support: Not all model details are supported.')
x$qr
x$ModelType <- paste('Ordinary least squares regression model: ', x$family$family, '/', x$family$link, sep ='')
NextMethod()
}
#' ModelPrintObject.selection
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.selection <- function(x)
{
#x <- a
xx <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs = x$param$nObs,
R2 = xx$rSquared$R2,
Sigma = x$sigma,
DF = x$param$df,
AdjR2 = xx$rSquared$R2adj,
Mills = x$param$index$Mills,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xx$estimate[, 'Estimate'],
SE = xx$estimate[, 'Std. Error'],
TStat = xx$estimate[, 't value'],
Normal= NA, # Normal
PValue = xx$estimate[, 'Pr(>|t|)'],
SpecialText = NA
)
CoefSelection <- CoefTemp[x$param$index$betaS, ]
CoefTemp <- CoefTemp[x$param$index$betaO, ]
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$CoefValues.Selection <- array(CoefSelection, dim = c(nrow(CoefSelection),1,6), dimnames = list(rownames(CoefSelection), 'a', colnames(CoefSelection)))
x$DependentVariable <- NA
x$ModelType <- 'Heckit'
return(x)
}
#' ModelPrintObject.glmerMod
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.glmerMod <- function(x)
{
warning('glmerMod is only partially supported by mmp yet. Only fixed effects are printed.')
y <- list()
# y$Statistics <- append(y$Statistics,
# list(
# R2 = x$stats['R2'],
# LogLik = x$stats['Model L.R.'],
# G = x$stats['g'],
# Sigma = x$stats['Sigma']
# ))
CoefTemp <- cbind(Coefficient = fixef(x),
SE = sqrt(diag(vcov(x, use.hessian = T))),
TStat = NA,
Normal= fixef(x)/sqrt(diag(vcov(x, use.hessian = T))),
PValue = NA,
SpecialText = NA
)
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$DependentVariable <- colnames(model.frame(x))[1]
y$ModelType <- paste('Generalized linear mixed-effects models; link:',family(x)$link, sep =' ')
return(y)
}
#' ModelPrintObject.lmerMod
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lmerMod <- function(x)
{
warning('lmerMod is only partially supported by mmp yet. Only fixed effects are printed.')
#require(r2glmm)
library("piecewiseSEM")
y <- list()
# y$Statistics <- append(y$Statistics,
# list(
# R2 = x$stats['R2'],
# LogLik = x$stats['Model L.R.'],
# G = x$stats['g'],
# Sigma = x$stats['Sigma']
# ))
CoefTemp <- cbind(Coefficient = fixef(x),
SE = sqrt(diag(vcov(x, use.hessian = F))),
TStat = NA,
Normal= fixef(x)/sqrt(diag(vcov(x, use.hessian = F))),
PValue = NA,
SpecialText = NA
)
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$Statistics <- append(y$Statistics,
list(
NObs = nobs(x),
R2 = paste0(round(rsquared(x)$Marginal, digits=2), ', ', round(rsquared(x)$Conditional, digits=2))
))
y$DependentVariable <- colnames(model.frame(x))[1]
y$ModelType <- paste('Linear mixed-effects models; link:',family(x)$link, sep =' ')
return(y)
}
#' ModelPrintObject.boot
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.boot <- function(x)
{
warning('Preliminary support for boot only.')
CoefTemp <- cbind(Coefficient = x$t0,
SE = apply(x$t, 2, sd),
TStat = NA,
Normal= x$t0/apply(x$t, 2, sd),
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$ModelType <-'boot object'
NextMethod()
}
#' ModelPrintObject.boot
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.tobit <- function(x)
{
warning('Preliminary support for tobit only.')
x$Statistics <- append(x$Statistics,
list(
Iterations = x$iter,
LogLik = x$loglik[2],
Scale = x$scale,
DF =x$df
))
TempSE <- sqrt(diag(x$var))[-nrow(x$var)]
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = TempSE,
TStat = NA,
Normal= x$coefficients/TempSE,
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
x$ModelType = 'Tobit Regression'
NextMethod()
}
#' ModelPrintObject.DiscreteEffects
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.DiscreteEffects <- function(x)
{
CoefTemp <- cbind(Coefficient = x$Estimate,
SE = x$SE,
TStat = NA,
Normal= x$Estimate/x$SE,
PValue = x$pvalue,
SpecialText = NA
)
y <- list()
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(y$CoefValues) <- sub('^DE', "", rownames(x) )
y$ModelType <- 'Discrete Effect'
return(y)
}
#' ModelPrintObject.CoefTest
#'
#' @param x
#'
#' @return
#' @export
#'
#' @examples
ModelPrintObject.CoefTest <- function(x)
{
warning('Untested ModelPrintObject. Probably requires output to be a list.')
CoefTemp <- cbind(Coefficient = x$Estimate,
SE = x$SE,
TStat = NA,
Normal= x$Estimate/x$SE,
PValue = x$pvalue,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
x$ModelType <- 'Coef Test'
NextMethod()
}
#' ModelPrintObject.SummaryStat
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.SummaryStat <- function(x)
{
warning('Untested ModelPrintObject. Probably requires output to be a list.')
CoefTemp <- cbind(Coefficient = x,
SE = NA,
TStat = NA,
Normal= NA,
PValue = NA,
SpecialText = NA
)
if (is.null(names(x))) rownames(CoefTemp) <- rownames(x)
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
x$ModelType <- 'SummaryStat'
NextMethod()
}
#' ModelPrintObject.data.frame
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.data.frame <- function(x)
{
# warning('Untested ModelPrintObject. Probably requires output to be a list.')
# CoefTemp <- cbind(Coefficient = x[,1],
# SE = NA,
# TStat = NA,
# Normal= NA,
# PValue = NA,
# SpecialText = NA
# )
# if (is.null(names(x))) rownames(CoefTemp) <- rownames(x)
# x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
# rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
# x$ModelType <- 'Generic data.frame'
NextMethod()
}
| /R/ModelPrintObjects.R | no_license | MarcoPortmann/mmp | R | false | false | 24,174 | r |
#' Extract model information from different classes
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject <- function(x){
UseMethod('ModelPrintObject', x)
}
#
#' ConvertNewToOldMPO
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ConvertNewToOldMPO <- function(x)
{
if (!("mmpVers" %in% names(x) && x$mmpVers >= 2))
return(x)
x$CoefValues <- array(NA, dim = c(nrow(x$Coefficients$Main),1,6), dimnames = list(rownames(x$Coefficients$Main), 'a', c("Coefficient", "SE", "TStat", "Normal", "PValue", "special")))
x$CoefValues[,"a", "Coefficient"] <- x$Coefficients$Main$Coef
if (length(x$Coefficients$Main$SE) > 0) x$CoefValues[,"a", "SE"] <- x$Coefficients$Main$SE
if (length(x$Coefficients$Main$TStat) > 0) x$CoefValues[,"a", "TStat"] <- x$Coefficients$Main$TStat
if (length(x$Coefficients$Main$Z) > 0) x$CoefValues[,"a", "Normal"] <- x$Coefficients$Main$ZStat
if (length(x$Coefficients$Main$PValue) > 0) x$CoefValues[,"a", "PValue"] <- x$Coefficients$Main$PValue
# CoefTemp <- array(x$Coefficients$Main, dim = c(nrow(x$Coefficients$Main), 1, ncol(x$Coefficients$Main)),
# dimnames = list(rownames(x$Coefficients$Main), "a", colnames(x$Coefficients$Main)))
x$DependentVariable <- x$DependendVariables$Main
# browser()
return(x)
}
#' ModelPrintObject.default
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.default <- function(x)
{
return(x)
}
#' ModelPrintObject.rms
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.rms <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
R2 = x$stats['R2'],
LogLik = x$stats['Model L.R.'],
G = x$stats['g'],
Sigma = x$stats['Sigma']
))
if (!is.null(x$orig.var) & is.null(x$clusterInfo))
{
x$Statistics <- append(x$Statistics,
list(
StandardErrorInfo = 'SE by robcov()'
))
}
if (!is.null(x$clusterInfo))
{
x$Statistics <- append(x$Statistics,
list(
StandardErrorInfo = paste('Clustered on ', x$clusterInfo$name, '; n = ', x$clusterInfo$n, sep ='')
))
}
if (is.null(x$CoefValues))
{
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = sqrt(diag(x$var)), # Std. Error
TStat = NA,
Normal= x$coefficients/sqrt(diag(x$var)), # Normal
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
}
x$DependentVariable <- all.vars(x$terms)[1]
return(x)
}
#' ModelPrintObject.rdrobust
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.rdrobust <- function(x)
{
y <- list(Coefficients = list(Main = data.frame(Coef = as.numeric(x$coef),
SE = as.numeric(x$se),
ZStat = as.numeric(x$z),
PValue = as.numeric(x$pv),
row.names = rownames(x$coef)
)))
#y$DependentVariables <- list(Main = x$???)
y$Statistics <- list(NObs = sum(x$N),
NObsLR = list(x = x$N,collapse = ", "),
EffNObsLR = list(x = x$Nh, collapse = ", "),
Kernel = x$kernel,
BWEst = list(x = x$bws[1, ], collapse = ", "),
BWBias = list(x = x$bws[2, ], collapse = ", "),
Rho = list(x = c(x$bws[1, 1]/x$bws[2, 1], x$bws[1, 2]/x$bws[2, 2]), collapse = ", ")
)
y$ModelType <- "local polynomial RD point estimators with robust bias-corrected confidence intervals"
# to do: Defaults should be moved to a "GetModelDefaults" function
y$Defaults <- list(Coefficients = c("Main"),
Statistics = c("NObs", "NObsLR", "EffNObsLR", "Kernel", "BWEst", "BWBias", "Rho"))
y$mmpVers <- 2.0
return(y)
}
#' ModelPrintObject.lrm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lrm <- function(x)
{
x$Statistics$Freq <- x$freq
x$Statistics <- append(x$Statistics,
list(
NObs = x$stats['Obs'],
Brier = x$stats['Brier'],
Gamma = x$stats['Gamma'],
MaxDeriv = x$stats['Max Deriv'],
P = x$stats['P'],
C = x$stats['C'],
Dxy = x$stats['Dxy'],
Taua = x$stats['Tau-a'],
Gr = x$stats['gr'],
Gp = x$stats['gp'],
Freq = x$freq,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
x$ModelType <- 'Logistic regression model'
NextMethod()
}
#' ModelPrintObject.ols
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.ols <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
NObs = x$stats['n'],
R2 = x$stats['R2'],
Sigma = x$stats['Sigma'],
DF = x$df.residual,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
x$Statistics$AdjR2 <- 1 - (1-x$Statistics$R2)*(x$Statistics$NObs-1)/(x$Statistics$DF)
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = sqrt(diag(x$var)), # Std. Error
TStat = x$coefficients/sqrt(diag(x$var)),
Normal= NA, # Normal
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'TStat'], GetPValueFromTStat, DF = x$Statistics$DF ))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.felm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.felm <- function(x)
{
xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs =xsum$N,
R2 = xsum$r.squared,
Sigma = xsum$sigma,
DF = xsum$df[[1]],
AdjR2 = xsum$adj.r.squared,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xsum$coefficients[, 1],
SE = xsum$coefficients[, 2], # Std. Error
TStat = xsum$coefficients[, 3],
Normal= NA, # Normal
PValue = xsum$coefficients[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(xsum$coefficients)
if (!is.null(x$fe) && length(x$fe)>0)
{
x$FixedEffects <- names(x$fe)
CoefTemp <- rbind(CoefTemp, matrix(NA, nrow = length(x$FixedEffects), ncol = 6, dimnames = list(paste0('FE>>', x$FixedEffects), NULL)))
}
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- x$lhs
if (!is.null(x$clustervar)) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(names(x$clustervar), collapse = ', '))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.fixest
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.fixest <- function(x)
{
#xsum <- summary(x)
y <-list()
y$Statistics <-
list(
NObs =x$nobs,
R2 = fitstat(x, type = "r2", simplify = TRUE),
Sigma = x$sigma2,
DF = NA,
AdjR2 = fitstat(x, type = "ar2", simplify = TRUE),
Weights = NA
)
CoefTemp <- cbind(Coefficient = x$coeftable[, 1],
SE = x$coeftable[, 2], # Std. Error
TStat = x$coeftable[, 3],
Normal= NA, # Normal
PValue = x$coeftable[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(x$coeftable)
if (!is.null(x$fixef_vars) && length(x$fixef_vars)>0)
{
y$FixedEffects <- x$fixef_vars
CoefTemp <- rbind(CoefTemp,
matrix(NA, nrow = length(x$fixef_vars),
ncol = 6,
dimnames = list(paste0("FE>>",
x$fixef_vars),
NULL)))
}
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$DependentVariable <- terms(x$fml_all$linear)[[2]]
if (!is.null(x$summary_flags) && length(x$summary_flags$cluster) > 0) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(x$summary_flags$cluster, collapse = ', '))
y$ModelType <- 'Ordinary least squares regression model'
return(y)
}
#' ModelPrintObject.felm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.summary.felm <- function(x)
{
#xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs =x$N,
R2 = x$r.squared,
Sigma = x$sigma,
DF = x$df[[1]],
AdjR2 = x$adj.r.squared,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = x$coefficients[, 1],
SE = x$coefficients[, 2], # Std. Error
TStat = x$coefficients[, 3],
Normal= NA, # Normal
PValue = x$coefficients[, 4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(x$coefficients)
if (!is.null(x$fe) && length(x$fe)>0)
{
x$FixedEffects <- names(x$fe)
CoefTemp <- rbind(CoefTemp, matrix(NA, nrow = length(x$FixedEffects), ncol = 6, dimnames = list(paste0('FE>>', x$FixedEffects), NULL)))
}
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- x$lhs
if (length(grep('obust', colnames(x$coefficients)[2]))>0) x$Statistics$StandardErrorInfo <- 'Robust standard errors'
if (!is.null(x$clustervar)) x$Statistics$StandardErrorInfo <- paste0('Clustered on: ', paste(names(x$clustervar), collapse = ', '))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
#' ModelPrintObject.plm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.plm <- function(x)
{
warning('plm objects are processed slowly and incompletely.')
xsum <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs = nobs(x),
R2 = as.numeric(xsum$r.squared['rsq']),
DF = x$df.residual,
FStat = xsum$fstatistic,
AdjR2 = as.numeric(xsum$r.squared['adjrsq']),
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xsum$coefficients[,1],
SE = xsum$coefficients[,2], # Std. Error
TStat = xsum$coefficients[,3],
Normal= NA, # Normal
PValue = xsum$coefficients[,4],
SpecialText = NA
)
rownames(CoefTemp) <- rownames(xsum$coefficients)
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$formula)[1]
x$ModelType <- paste0('Panel data:', paste(x$args[unlist(lapply(x$args, function(x)(!is.null(x))))], collapse = '; '))
NextMethod()
}
#' ModelPrintObject.lm
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lm <- function(x)
{
x$Statistics <- append(x$Statistics,
list(
NObs = length(residuals(summary(x))),
R2 = summary(x)$r.squared,
AdjR2 = summary(x)$adj.r.squared,
Sigma = summary(x)$sigma,
DF = summary(x)$df[2],
FStat = summary(x)$fstatistic[1],
Weights = ifelse(is.null(x$weights),"no", "yes")
))
# The following might looks complicated. The aim is to make sure that
# NA variables are not dropped from the model.
CoefTemp <- as.matrix(x$coefficients)
CoefTemp <- merge(CoefTemp, summary(x)$coefficients[,c(2,3)], by = 'row.names', all.x = T, sort = F)
rownames(CoefTemp) <- CoefTemp$Row.names
CoefTemp$Row.names <- NULL
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp <- cbind(CoefTemp, Normal = NA, PValue = NA, SpecialText = NA)
colnames(CoefTemp) <- c('Coefficient', 'SE', 'TStat', 'Normal', 'PValue', 'SpecialText')
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'TStat'], GetPValueFromTStat, DF = x$Statistics$DF ))
x$CoefValues <- array(unlist(CoefTemp), dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
#bpres <- NA
#try(bpres <- bptest(m$terms)[4], silent = T)
if (is.null(x$ModelType))
x$ModelType <- 'Ordinary least squares regression model'
NextMethod()
}
ModelPrintObject.glm <- function(x)
{
warning('Basic glm support: Not all model details are supported.')
x$qr
x$ModelType <- paste('Ordinary least squares regression model: ', x$family$family, '/', x$family$link, sep ='')
NextMethod()
}
#' ModelPrintObject.selection
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.selection <- function(x)
{
#x <- a
xx <- summary(x)
x$Statistics <- append(x$Statistics,
list(
NObs = x$param$nObs,
R2 = xx$rSquared$R2,
Sigma = x$sigma,
DF = x$param$df,
AdjR2 = xx$rSquared$R2adj,
Mills = x$param$index$Mills,
Weights = ifelse(is.null(x$weights),"no", "yes")
))
CoefTemp <- cbind(Coefficient = xx$estimate[, 'Estimate'],
SE = xx$estimate[, 'Std. Error'],
TStat = xx$estimate[, 't value'],
Normal= NA, # Normal
PValue = xx$estimate[, 'Pr(>|t|)'],
SpecialText = NA
)
CoefSelection <- CoefTemp[x$param$index$betaS, ]
CoefTemp <- CoefTemp[x$param$index$betaO, ]
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$CoefValues.Selection <- array(CoefSelection, dim = c(nrow(CoefSelection),1,6), dimnames = list(rownames(CoefSelection), 'a', colnames(CoefSelection)))
x$DependentVariable <- NA
x$ModelType <- 'Heckit'
return(x)
}
#' ModelPrintObject.glmerMod
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.glmerMod <- function(x)
{
warning('glmerMod is only partially supported by mmp yet. Only fixed effects are printed.')
y <- list()
# y$Statistics <- append(y$Statistics,
# list(
# R2 = x$stats['R2'],
# LogLik = x$stats['Model L.R.'],
# G = x$stats['g'],
# Sigma = x$stats['Sigma']
# ))
CoefTemp <- cbind(Coefficient = fixef(x),
SE = sqrt(diag(vcov(x, use.hessian = T))),
TStat = NA,
Normal= fixef(x)/sqrt(diag(vcov(x, use.hessian = T))),
PValue = NA,
SpecialText = NA
)
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$DependentVariable <- colnames(model.frame(x))[1]
y$ModelType <- paste('Generalized linear mixed-effects models; link:',family(x)$link, sep =' ')
return(y)
}
#' ModelPrintObject.lmerMod
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.lmerMod <- function(x)
{
warning('lmerMod is only partially supported by mmp yet. Only fixed effects are printed.')
#require(r2glmm)
library("piecewiseSEM")
y <- list()
# y$Statistics <- append(y$Statistics,
# list(
# R2 = x$stats['R2'],
# LogLik = x$stats['Model L.R.'],
# G = x$stats['g'],
# Sigma = x$stats['Sigma']
# ))
CoefTemp <- cbind(Coefficient = fixef(x),
SE = sqrt(diag(vcov(x, use.hessian = F))),
TStat = NA,
Normal= fixef(x)/sqrt(diag(vcov(x, use.hessian = F))),
PValue = NA,
SpecialText = NA
)
loc <- grep("^\\(Intercept\\)$", rownames(CoefTemp), perl = T)
if (length(loc)>0) rownames(CoefTemp)[loc] <- "Intercept"
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
y$Statistics <- append(y$Statistics,
list(
NObs = nobs(x),
R2 = paste0(round(rsquared(x)$Marginal, digits=2), ', ', round(rsquared(x)$Conditional, digits=2))
))
y$DependentVariable <- colnames(model.frame(x))[1]
y$ModelType <- paste('Linear mixed-effects models; link:',family(x)$link, sep =' ')
return(y)
}
#' ModelPrintObject.boot
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.boot <- function(x)
{
warning('Preliminary support for boot only.')
CoefTemp <- cbind(Coefficient = x$t0,
SE = apply(x$t, 2, sd),
TStat = NA,
Normal= x$t0/apply(x$t, 2, sd),
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$ModelType <-'boot object'
NextMethod()
}
#' ModelPrintObject.boot
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.tobit <- function(x)
{
warning('Preliminary support for tobit only.')
x$Statistics <- append(x$Statistics,
list(
Iterations = x$iter,
LogLik = x$loglik[2],
Scale = x$scale,
DF =x$df
))
TempSE <- sqrt(diag(x$var))[-nrow(x$var)]
CoefTemp <- cbind(Coefficient = x$coefficients,
SE = TempSE,
TStat = NA,
Normal= x$coefficients/TempSE,
PValue = NA,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
x$DependentVariable <- all.vars(x$terms)[1]
x$ModelType = 'Tobit Regression'
NextMethod()
}
#' ModelPrintObject.DiscreteEffects
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.DiscreteEffects <- function(x)
{
CoefTemp <- cbind(Coefficient = x$Estimate,
SE = x$SE,
TStat = NA,
Normal= x$Estimate/x$SE,
PValue = x$pvalue,
SpecialText = NA
)
y <- list()
y$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(y$CoefValues) <- sub('^DE', "", rownames(x) )
y$ModelType <- 'Discrete Effect'
return(y)
}
#' ModelPrintObject.CoefTest
#'
#' @param x
#'
#' @return
#' @export
#'
#' @examples
ModelPrintObject.CoefTest <- function(x)
{
warning('Untested ModelPrintObject. Probably requires output to be a list.')
CoefTemp <- cbind(Coefficient = x$Estimate,
SE = x$SE,
TStat = NA,
Normal= x$Estimate/x$SE,
PValue = x$pvalue,
SpecialText = NA
)
CoefTemp[, 'PValue'] <- unlist(lapply(CoefTemp[, 'Normal'], GetPValueFromNormal))
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
x$ModelType <- 'Coef Test'
NextMethod()
}
#' ModelPrintObject.SummaryStat
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.SummaryStat <- function(x)
{
warning('Untested ModelPrintObject. Probably requires output to be a list.')
CoefTemp <- cbind(Coefficient = x,
SE = NA,
TStat = NA,
Normal= NA,
PValue = NA,
SpecialText = NA
)
if (is.null(names(x))) rownames(CoefTemp) <- rownames(x)
x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
x$ModelType <- 'SummaryStat'
NextMethod()
}
#' ModelPrintObject.data.frame
#'
#' @param x
#'
#' @return
#' @export
#' @examples
ModelPrintObject.data.frame <- function(x)
{
# warning('Untested ModelPrintObject. Probably requires output to be a list.')
# CoefTemp <- cbind(Coefficient = x[,1],
# SE = NA,
# TStat = NA,
# Normal= NA,
# PValue = NA,
# SpecialText = NA
# )
# if (is.null(names(x))) rownames(CoefTemp) <- rownames(x)
# x$CoefValues <- array(CoefTemp, dim = c(nrow(CoefTemp),1,6), dimnames = list(rownames(CoefTemp), 'a', colnames(CoefTemp)))
# rownames(x$CoefValues) <- sub('^DE', "", rownames(x) )
# x$ModelType <- 'Generic data.frame'
NextMethod()
}
|
funFIMpop <- function(equation,parameters,beta,o,sigma,t_group,Trand,d,PropSubjects,nbTot){
#List of names of the fixed effects parameters and sampling times
ParametersInEquation<-c(parameters,"t")
PSI<-c(parameters,"sig.inter","sig.slope")
lpsi<-length(PSI)
lengthParameters<-length(parameters)
#(Diagonal Matrix of) variance for inter-subject random effects:
omega<-diag(o)
#Parameters of standard deviation model of residual error (sig.inter+sig.slope*f)^2:
sig.inter<-sigma[1]
sig.slope<-sigma[2]
#get derivatives of sigma
Vmodel<- parse(text = paste0("( sig.inter + sig.slope * fixed )^2"))
dv<-deriv(Vmodel[[1]],PSI)
#gather all groups of protocol
M_f<-list()
M_F <- matrix(rep(0),nrow=lpsi+lengthParameters,ncol=lpsi+lengthParameters)
for(q in 1:length(t_group)){
#design of sampling times
t<-t_group[[q]]
#dose value
dose<-d[q]
#calculate matrix E for n individuals
equatf <- parse(text = equation, n=-1)
f<-function(ParametersInEquation){eval(equatf[[1]])}
#Fixed effects parameters values
for(i in 1:lengthParameters){
assign(parameters[i],beta[i])
}
#calculate the observations with personnal parameters
parameterValues <- c(beta,t)
fixed<-f(parameterValues)
#calculate variance Var
var<-diag((sig.inter+sig.slope*fixed)^2)
#get derivatives for fixed parameters
df<-deriv(equatf[[1]],PSI)
mdf<-attributes(eval(df))$gradient
#delete the last two columns (correspond to sig.inter and sig.slope)
mdfi <- mdf[,-c(lpsi-1,lpsi)]
#complete derivative for random effect model
#Random effect model (1) = additive (2) = exponential
#------------------------------------------------------------------
beta0 <- beta
beta0[which(Trand==1)] <- 1
mdfie <- mdfi %*% diag(beta0)
#calculate variance Vi
Vi <- mdfie %*% omega %*% t(mdfie) + var
#inverse of matrix Vi
SVi <- solve(Vi)
#compute derivatives of sigma
mdv<-attributes(eval(dv))$gradient
#calculate matrix part A
M_A <- t(mdfi) %*% SVi %*% mdfi
#complete the rest of the matrix with 0
M_A <- cbind(M_A,matrix(rep(0),ncol=lpsi,nrow=lengthParameters))
M_A <- rbind(M_A,matrix(rep(0),ncol=lpsi+lengthParameters,nrow=lpsi))
#calculate matrix part B
#initialize the matrix with 0
M_B <- matrix(rep(0),nrow=lpsi+lengthParameters,ncol=lpsi+lengthParameters)
#prepare a list of matrix of derivatives of sigma to simplify usage
if(length(t)==1){
m <- lapply(c((lpsi-1),lpsi),function(i,mdv) mdv[i],mdv=mdv )
}else{
m <- lapply(c((lpsi-1),lpsi),function(i,mdv) diag(mdv[,i]),mdv=mdv )
}
#calculate first three rows of part B
for(i in 1:lengthParameters){
mdfiei <- (mdfie[,i] %*% t(mdfie[,i]))
M_B[lengthParameters+i,seq(lengthParameters+1,lengthParameters+lengthParameters)] <- sapply(
lapply(seq(1,lengthParameters), function(i,mdfie) mdfie[,i],mdfie=mdfie),
function(x) 1/2 * sum(diag((mdfiei) %*% SVi %*% (x %*% t(x)) %*% SVi)))
M_B[lengthParameters+i,c(lengthParameters+lpsi-1,lengthParameters+lpsi)] <- sapply(
m,function(x) 1/2 * sum(diag((mdfiei) %*% SVi %*% x %*% SVi)))
}
#calculate the last two rows of partB
for(i in (lpsi-1):lpsi){
M_B[lengthParameters+i,seq(lengthParameters+1,lengthParameters+lengthParameters)] <- sapply(
lapply(seq(1,lengthParameters), function(i,mdfie) mdfie[,i],mdfie=mdfie),
function(x) 1/2 * sum(diag((m[[i-lengthParameters]]) %*% SVi %*% (x %*% t(x)) %*% SVi)))
M_B[lengthParameters+i,c(lengthParameters+lpsi-1,lengthParameters+lpsi)] <- sapply(
m,function(x) 1/2 * sum(diag(((m[[i-lengthParameters]])) %*% SVi %*% x %*% SVi)))
}
M_f[[q]] <- (M_A+M_B)*PropSubjects[q]
M_F <-M_F+M_f[[q]]
}
M_F <- M_F *nbTot
#set names for vectors
fname <-c(sapply(1:lengthParameters, function(x) paste0("u_",parameters[x])),sapply(
1:lengthParameters, function(x) paste0("w2_",parameters[x])),"sig.inter","sig.slope")
rownames(M_F) <- fname
colnames(M_F) <- fname
if(0 %in% c(o,sigma)){
M_F<-M_F[-c(lengthParameters+which(c(o,sigma)==0)),-c(lengthParameters+which(c(o,sigma)==0))]
PSI<- PSI[-c(lengthParameters+which(c(o,sigma)==0))]
}
if(length(t)==1 ){
return(list(M_F,det(M_F)))
}else{
tryCatch({
deterFim <- det(M_F)
SE <- sqrt(diag(solve(M_F)))
RSE <- 100 * SE / c(beta,o,sigma)[which(c(beta,o,sigma)!=0)]
CritereDopt <- deterFim^(1/length(c(beta,o,sigma)[which(c(beta,o,sigma)!=0)]))
#write the output in console
# cat("******************* FISHER INFORMATION MATRIX *************************\n")
# print(M_F)
#
# cat("\n\n******************* DETERMINANT OF THE MATRIX *********************\n", deterFim,
# "\n\n******************* CRITERION *************************************\n",CritereDopt,
# "\n\n******************* STANDARD ERROR ********************************\n",SE,
# "\n\n******************* RELATIVE STANDARD ERROR ***********************\n",RSE,"\n\n")
return(list(M_F,deterFim,CritereDopt,SE,RSE))
},error=function(e){
return(list(M_F,det(M_F)))
})
# draw the graphical illustration of model
# t<-seq(min(unlist(t_group)),max(unlist(t_group)),(max(unlist(t_group))-min(unlist(t_group)))/100)
# x<-c(beta,t)
# fvalue<-f(x)
# lines(t,fvalue) #then plot the line of function
}
} | /FIMmixte.R | no_license | jeremyseurat/Multiplicative-algorithm-nonlinear-mixed-effect-models | R | false | false | 5,812 | r | funFIMpop <- function(equation,parameters,beta,o,sigma,t_group,Trand,d,PropSubjects,nbTot){
#List of names of the fixed effects parameters and sampling times
ParametersInEquation<-c(parameters,"t")
PSI<-c(parameters,"sig.inter","sig.slope")
lpsi<-length(PSI)
lengthParameters<-length(parameters)
#(Diagonal Matrix of) variance for inter-subject random effects:
omega<-diag(o)
#Parameters of standard deviation model of residual error (sig.inter+sig.slope*f)^2:
sig.inter<-sigma[1]
sig.slope<-sigma[2]
#get derivatives of sigma
Vmodel<- parse(text = paste0("( sig.inter + sig.slope * fixed )^2"))
dv<-deriv(Vmodel[[1]],PSI)
#gather all groups of protocol
M_f<-list()
M_F <- matrix(rep(0),nrow=lpsi+lengthParameters,ncol=lpsi+lengthParameters)
for(q in 1:length(t_group)){
#design of sampling times
t<-t_group[[q]]
#dose value
dose<-d[q]
#calculate matrix E for n individuals
equatf <- parse(text = equation, n=-1)
f<-function(ParametersInEquation){eval(equatf[[1]])}
#Fixed effects parameters values
for(i in 1:lengthParameters){
assign(parameters[i],beta[i])
}
#calculate the observations with personnal parameters
parameterValues <- c(beta,t)
fixed<-f(parameterValues)
#calculate variance Var
var<-diag((sig.inter+sig.slope*fixed)^2)
#get derivatives for fixed parameters
df<-deriv(equatf[[1]],PSI)
mdf<-attributes(eval(df))$gradient
#delete the last two columns (correspond to sig.inter and sig.slope)
mdfi <- mdf[,-c(lpsi-1,lpsi)]
#complete derivative for random effect model
#Random effect model (1) = additive (2) = exponential
#------------------------------------------------------------------
beta0 <- beta
beta0[which(Trand==1)] <- 1
mdfie <- mdfi %*% diag(beta0)
#calculate variance Vi
Vi <- mdfie %*% omega %*% t(mdfie) + var
#inverse of matrix Vi
SVi <- solve(Vi)
#compute derivatives of sigma
mdv<-attributes(eval(dv))$gradient
#calculate matrix part A
M_A <- t(mdfi) %*% SVi %*% mdfi
#complete the rest of the matrix with 0
M_A <- cbind(M_A,matrix(rep(0),ncol=lpsi,nrow=lengthParameters))
M_A <- rbind(M_A,matrix(rep(0),ncol=lpsi+lengthParameters,nrow=lpsi))
#calculate matrix part B
#initialize the matrix with 0
M_B <- matrix(rep(0),nrow=lpsi+lengthParameters,ncol=lpsi+lengthParameters)
#prepare a list of matrix of derivatives of sigma to simplify usage
if(length(t)==1){
m <- lapply(c((lpsi-1),lpsi),function(i,mdv) mdv[i],mdv=mdv )
}else{
m <- lapply(c((lpsi-1),lpsi),function(i,mdv) diag(mdv[,i]),mdv=mdv )
}
#calculate first three rows of part B
for(i in 1:lengthParameters){
mdfiei <- (mdfie[,i] %*% t(mdfie[,i]))
M_B[lengthParameters+i,seq(lengthParameters+1,lengthParameters+lengthParameters)] <- sapply(
lapply(seq(1,lengthParameters), function(i,mdfie) mdfie[,i],mdfie=mdfie),
function(x) 1/2 * sum(diag((mdfiei) %*% SVi %*% (x %*% t(x)) %*% SVi)))
M_B[lengthParameters+i,c(lengthParameters+lpsi-1,lengthParameters+lpsi)] <- sapply(
m,function(x) 1/2 * sum(diag((mdfiei) %*% SVi %*% x %*% SVi)))
}
#calculate the last two rows of partB
for(i in (lpsi-1):lpsi){
M_B[lengthParameters+i,seq(lengthParameters+1,lengthParameters+lengthParameters)] <- sapply(
lapply(seq(1,lengthParameters), function(i,mdfie) mdfie[,i],mdfie=mdfie),
function(x) 1/2 * sum(diag((m[[i-lengthParameters]]) %*% SVi %*% (x %*% t(x)) %*% SVi)))
M_B[lengthParameters+i,c(lengthParameters+lpsi-1,lengthParameters+lpsi)] <- sapply(
m,function(x) 1/2 * sum(diag(((m[[i-lengthParameters]])) %*% SVi %*% x %*% SVi)))
}
M_f[[q]] <- (M_A+M_B)*PropSubjects[q]
M_F <-M_F+M_f[[q]]
}
M_F <- M_F *nbTot
#set names for vectors
fname <-c(sapply(1:lengthParameters, function(x) paste0("u_",parameters[x])),sapply(
1:lengthParameters, function(x) paste0("w2_",parameters[x])),"sig.inter","sig.slope")
rownames(M_F) <- fname
colnames(M_F) <- fname
if(0 %in% c(o,sigma)){
M_F<-M_F[-c(lengthParameters+which(c(o,sigma)==0)),-c(lengthParameters+which(c(o,sigma)==0))]
PSI<- PSI[-c(lengthParameters+which(c(o,sigma)==0))]
}
if(length(t)==1 ){
return(list(M_F,det(M_F)))
}else{
tryCatch({
deterFim <- det(M_F)
SE <- sqrt(diag(solve(M_F)))
RSE <- 100 * SE / c(beta,o,sigma)[which(c(beta,o,sigma)!=0)]
CritereDopt <- deterFim^(1/length(c(beta,o,sigma)[which(c(beta,o,sigma)!=0)]))
#write the output in console
# cat("******************* FISHER INFORMATION MATRIX *************************\n")
# print(M_F)
#
# cat("\n\n******************* DETERMINANT OF THE MATRIX *********************\n", deterFim,
# "\n\n******************* CRITERION *************************************\n",CritereDopt,
# "\n\n******************* STANDARD ERROR ********************************\n",SE,
# "\n\n******************* RELATIVE STANDARD ERROR ***********************\n",RSE,"\n\n")
return(list(M_F,deterFim,CritereDopt,SE,RSE))
},error=function(e){
return(list(M_F,det(M_F)))
})
# draw the graphical illustration of model
# t<-seq(min(unlist(t_group)),max(unlist(t_group)),(max(unlist(t_group))-min(unlist(t_group)))/100)
# x<-c(beta,t)
# fvalue<-f(x)
# lines(t,fvalue) #then plot the line of function
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bpwpm2.R
\name{bpwpm2_gibbs}
\alias{bpwpm2_gibbs}
\title{Bayesian Piece-Wise Polynomial Model 2 Algorithm}
\usage{
bpwpm2_gibbs(Y, X, M, J, K, draws = 10^3, tau = NULL,
beta_init = NULL, mu_beta_0 = NULL, sigma_beta_0_inv = NULL,
precision_beta = 1, eps = 1e-15, verb = FALSE,
debug_verb = FALSE)
}
\arguments{
\item{Y}{Response vector of n binary observatios (integers 0,1 - vector of
size n) Can be encoded as a factor a numeric vector.}
\item{X}{Design matrix of n observations and d covariables (numeric - n*d)}
\item{M}{M minus 1 is the degree of the polinomial (integer - M > 0)}
\item{J}{Number of intervals in each dimention (integer - J > 1)}
\item{K}{Order of continuity in the derivatives (integrer - 0 < K < M)}
\item{draws}{Númber of samples to draw from the Gibbs Sampler (integer - draw
> 0)}
\item{tau}{the initial position of nodes selected by the user. although·
arbitraty they need to match the dimentions. (numeric - (J-1)*d)}
\item{beta_init}{Inital value for the Gibbs Sampler Chain (numeric - vector of
size 1*(1 + Nd))}
\item{mu_beta_0}{Prior Mean of Beta (numeric - matrix of size N*d)}
\item{sigma_beta_0_inv}{sigma_w_0_inv:= Prior Inverse if the Variance-Covariance
Matrices of w (list - d elements, each element is a numeric matrix of size
N*N)}
\item{eps}{Numerical threshold}
\item{verb}{short for verbose, if TRUE, prints aditional information (logical)}
\item{debug_verb}{If TRUE, print even more info to help with debug_verbging (logical)}
\item{precision_w}{If using the default sigmas for w, a diagonal matrix will
be used. Precision controls its magnitude (numeric - precision > 0)}
}
\value{
An object of the class "bpwpm" containing at the following
components:
\describe{
\item{beta: }{A data frame containing the Gibbs sampler simulation for beta}
\item{Psi: }{The PWP Expansion for input matrix X and nodes selected on
percentiles}
\item{tau: }{Nodes used for training}
\item{M: }{Initial parameters}
\item{J: }{Initial parameters}
\item{K: }{Initial parameters}
\item{d: }{Number of dimentions}
\item{indep_terms: }{Logical. If independent terms are keept}
\item{info}{A string that prints the basic information of the mode. Used for
the summary function.}
}
}
\description{
Second take at the bpwpm model, based solely on the Albert + Chibb Paper
}
\examples{
See the main document of thesis for a couple of full examples
with its corresponding analysis.
}
| /man/bpwpm2_gibbs.Rd | no_license | PaoloLuciano/bpwpm2 | R | false | true | 2,499 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bpwpm2.R
\name{bpwpm2_gibbs}
\alias{bpwpm2_gibbs}
\title{Bayesian Piece-Wise Polynomial Model 2 Algorithm}
\usage{
bpwpm2_gibbs(Y, X, M, J, K, draws = 10^3, tau = NULL,
beta_init = NULL, mu_beta_0 = NULL, sigma_beta_0_inv = NULL,
precision_beta = 1, eps = 1e-15, verb = FALSE,
debug_verb = FALSE)
}
\arguments{
\item{Y}{Response vector of n binary observatios (integers 0,1 - vector of
size n) Can be encoded as a factor a numeric vector.}
\item{X}{Design matrix of n observations and d covariables (numeric - n*d)}
\item{M}{M minus 1 is the degree of the polinomial (integer - M > 0)}
\item{J}{Number of intervals in each dimention (integer - J > 1)}
\item{K}{Order of continuity in the derivatives (integrer - 0 < K < M)}
\item{draws}{Númber of samples to draw from the Gibbs Sampler (integer - draw
> 0)}
\item{tau}{the initial position of nodes selected by the user. although·
arbitraty they need to match the dimentions. (numeric - (J-1)*d)}
\item{beta_init}{Inital value for the Gibbs Sampler Chain (numeric - vector of
size 1*(1 + Nd))}
\item{mu_beta_0}{Prior Mean of Beta (numeric - matrix of size N*d)}
\item{sigma_beta_0_inv}{sigma_w_0_inv:= Prior Inverse if the Variance-Covariance
Matrices of w (list - d elements, each element is a numeric matrix of size
N*N)}
\item{eps}{Numerical threshold}
\item{verb}{short for verbose, if TRUE, prints aditional information (logical)}
\item{debug_verb}{If TRUE, print even more info to help with debug_verbging (logical)}
\item{precision_w}{If using the default sigmas for w, a diagonal matrix will
be used. Precision controls its magnitude (numeric - precision > 0)}
}
\value{
An object of the class "bpwpm" containing at the following
components:
\describe{
\item{beta: }{A data frame containing the Gibbs sampler simulation for beta}
\item{Psi: }{The PWP Expansion for input matrix X and nodes selected on
percentiles}
\item{tau: }{Nodes used for training}
\item{M: }{Initial parameters}
\item{J: }{Initial parameters}
\item{K: }{Initial parameters}
\item{d: }{Number of dimentions}
\item{indep_terms: }{Logical. If independent terms are keept}
\item{info}{A string that prints the basic information of the mode. Used for
the summary function.}
}
}
\description{
Second take at the bpwpm model, based solely on the Albert + Chibb Paper
}
\examples{
See the main document of thesis for a couple of full examples
with its corresponding analysis.
}
|
#===========
#run_model.R
#===========
#This script demonstrates the VisionEval framework for a demonstration RPAT Module
#Load libraries
#--------------
library(visioneval)
planType <- 'multiprocess'
ptm <- proc.time()
#Initialize model
#----------------
initializeModel(
ModelScriptFile = "run_model.R",
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
DatastoreName = NULL,
SaveDatastore = TRUE
)
#Run all demo module for all years
#---------------------------------
for(Year in getYears()) {
runModule(
ModuleName = "BuildScenarios",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "RunScenarios",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "VERSPMResults",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "ViewResults",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
}
proc.time() - ptm
| /sources/models/VERSPM_Scenarios/run_model.R | permissive | jslason/Archive-VisionEval | R | false | false | 1,139 | r | #===========
#run_model.R
#===========
#This script demonstrates the VisionEval framework for a demonstration RPAT Module
#Load libraries
#--------------
library(visioneval)
planType <- 'multiprocess'
ptm <- proc.time()
#Initialize model
#----------------
initializeModel(
ModelScriptFile = "run_model.R",
ParamDir = "defs",
RunParamFile = "run_parameters.json",
GeoFile = "geo.csv",
ModelParamFile = "model_parameters.json",
LoadDatastore = FALSE,
DatastoreName = NULL,
SaveDatastore = TRUE
)
#Run all demo module for all years
#---------------------------------
for(Year in getYears()) {
runModule(
ModuleName = "BuildScenarios",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "RunScenarios",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "VERSPMResults",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
runModule(
ModuleName = "ViewResults",
PackageName = "VEScenario",
RunFor = "AllYears",
RunYear = Year
)
}
proc.time() - ptm
|
# Exercise 1: Data Frame Practice
# Install devtools package: allows installations from GitHub
install.packages('devtools')
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
# You should have have access to the `vehicles` data.frame
data("vehicles")
# Create a data.frame of vehicles from 1997
vehicles.1997 <- vehicles[vehicles$year == 1997,]
# Use the `unique` function to verify that there is only 1 value in the `year` column of your new data.frame
unique(vehicles.1997$year)
# Create a data.frame of 2-Wheel Drive vehicles that get more than 20 miles/gallon in the city
twowheeldrive <- vehicles[vehicles$drive == '2-Wheel Drive' & vehicles$cty > 20,]
# Of those vehicles, what is the vehicle ID of the vehicle with the worst hwy mpg?
# Write a function that takes a `year` and a `make` as parameters, and returns
# The vehicle that gets the most hwy miles/gallon of vehicles of that make in that year
# What was the most efficient honda model of 1995?
| /exercise-1/exercise.R | permissive | samsturtevant/m9-dplyr | R | false | false | 1,077 | r | # Exercise 1: Data Frame Practice
# Install devtools package: allows installations from GitHub
install.packages('devtools')
# Install "fueleconomy" package from GitHub
devtools::install_github("hadley/fueleconomy")
# Require/library the fueleconomy package
library(fueleconomy)
# You should have have access to the `vehicles` data.frame
data("vehicles")
# Create a data.frame of vehicles from 1997
vehicles.1997 <- vehicles[vehicles$year == 1997,]
# Use the `unique` function to verify that there is only 1 value in the `year` column of your new data.frame
unique(vehicles.1997$year)
# Create a data.frame of 2-Wheel Drive vehicles that get more than 20 miles/gallon in the city
twowheeldrive <- vehicles[vehicles$drive == '2-Wheel Drive' & vehicles$cty > 20,]
# Of those vehicles, what is the vehicle ID of the vehicle with the worst hwy mpg?
# Write a function that takes a `year` and a `make` as parameters, and returns
# The vehicle that gets the most hwy miles/gallon of vehicles of that make in that year
# What was the most efficient honda model of 1995?
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\name{derive}
\alias{derive}
\title{derive}
\usage{
derive(x, y, step = 3)
}
\arguments{
\item{x}{x vector}
\item{y}{y vector}
\item{step}{value to be interpoleted for calculation (3 or 5)}
}
\value{
gradient vector
}
\description{
Calculate gradient y dx
}
\examples{
#data(choc_ctest)
#ctest(choc_ctest)
}
| /man/derive.Rd | no_license | dan2cil/tecTools | R | false | false | 364 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{derive}
\alias{derive}
\title{derive}
\usage{
derive(x, y, step = 3)
}
\arguments{
\item{x}{x vector}
\item{y}{y vector}
\item{step}{value to be interpoleted for calculation (3 or 5)}
}
\value{
gradient vector
}
\description{
Calculate gradient y dx
}
\examples{
#data(choc_ctest)
#ctest(choc_ctest)
}
|
data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
str(subSetData)
globalActivePower <- as.numeric(subSetData$Global_active_power)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.copy(png,'plot1.png')
dev.off()
| /plot1.R | no_license | ritasaliba12/ExData_Plotting1 | R | false | false | 409 | r | data <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSetData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
str(subSetData)
globalActivePower <- as.numeric(subSetData$Global_active_power)
hist(globalActivePower, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.copy(png,'plot1.png')
dev.off()
|
#' Sys.info() with consistent naming convention
sys_info <- function() {
return(Sys.info())
} | /R/sys_info.r | permissive | woobe/bib | R | false | false | 96 | r | #' Sys.info() with consistent naming convention
sys_info <- function() {
return(Sys.info())
} |
data = readRDS("FullBoxes.rds")
doc.2362 = data$UCD_Lehmann_2362.jpg
#left bottom right top text confidence
doc.2362$text = gsub('\\.', '', doc.2362$text)
doc.2362$text = gsub('\\:', '', doc.2362$text)
doc.2362$text = gsub('\\\\', '', doc.2362$text)
doc.2362$text = gsub('\\|', '', doc.2362$text)
doc.2362$text = gsub('\\~', '', doc.2362$text)
doc.2362$text = gsub('\\#', '', doc.2362$text)
doc.2362$text = gsub('\\,', '', doc.2362$text)
doc.2362$text = gsub('\\-', '', doc.2362$text)
doc.2362$text = gsub('\\®', '', doc.2362$text)
doc.2362$text = gsub('\\}', '', doc.2362$text)
doc.2362$text = gsub('\\!', '', doc.2362$text)
doc.2362$text = gsub('\\£', '', doc.2362$text)
doc.2362$text = gsub('\\¥', '', doc.2362$text)
doc.2362$text = gsub('\\+', '', doc.2362$text)
doc.2362$text = gsub('\\=', '', doc.2362$text)
doc.2362$text = gsub('\\©', '', doc.2362$text)
doc.2362$text = gsub('\\>', '', doc.2362$text)
doc.2362$text = gsub('\\?', '', doc.2362$text)
doc.2362$text = gsub('\\>', '', doc.2362$text)
doc.2362$text = gsub('\\§', '', doc.2362$text)
doc.2362 = doc.2362[!(doc.2362$text ==""),]
left = subset(doc.2362, bottom > 4130 & left < 1270)
left$bottom.diff = ave(left$bottom, FUN=function(x) c(0,diff(x)))
left$index = 0
t = 1
left$index[1] = t
for (i in 1:nrow(left)) {
left$index[i] = t
if (left$bottom.diff[i] > 40) {
t = t+1
left$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(left$index)) {
splitted[[i]] = left[left$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(left$index))
for (i in 1: max(left$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.left = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.left$Name = name
for(i in 1:length(stringsplit)) {
database.left$price1[i] = price[[i]][1]
database.left$price2[i] = price[[i]][2]
}
database.left = na.omit(database.left)
middle = subset(doc.2362, bottom > 4130 & left > 1270 & left < 2500)
middle$bottom.diff = ave(middle$bottom, FUN=function(x) c(0,diff(x)))
middle$index = 0
t = 1
middle$index[1] = t
for (i in 1:nrow(middle)) {
middle$index[i] = t
if (middle$bottom.diff[i] > 40) {
t = t+1
middle$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(middle$index)) {
splitted[[i]] = middle[middle$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(middle$index))
for (i in 1: max(middle$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.middle = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.middle$Name = name
for(i in 1:length(stringsplit)) {
database.middle$price1[i] = price[[i]][1]
database.middle$price2[i] = price[[i]][2]
}
database.middle = na.omit(database.middle)
right = subset(doc.2362, bottom > 4130 & left > 2500)
right$bottom.diff = ave(right$bottom, FUN=function(x) c(0,diff(x)))
right$index = 0
t = 1
right$index[1] = t
for (i in 1:nrow(right)) {
right$index[i] = t
if (right$bottom.diff[i] > 34) {
t = t+1
right$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(right$index)) {
splitted[[i]] = right[right$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(right$index))
for (i in 1: max(right$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.right = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.right$Name = name
for(i in 1:length(stringsplit)) {
database.right$price1[i] = price[[i]][1]
database.right$price2[i] = price[[i]][2]
}
database.right = na.omit(database.right)
output = do.call("rbind", list(database.left, database.middle, database.right))
train = output
train$Name[1] = "BULLOCH & LADE"
train$Name[2] = "HARVEY'S"
train$Name[3] = "JOHN BEGG BLUE CAP"
train$Name[4] = "GILBEY'S SPEY ROYAL"
train$Name[5] = "KING WILLIAM"
train$Name[6] = "KING GEORGE"
train$Name[8] = "HANKY BANNISTER"
train$Name[9] = "DEERSTALKER"
train$Name[10] = "GROUSE"
train$Name[11] = "PETER DAWSON SPECIAL"
train$Name[12] = "CATTO'S GREY SEAL"
train$Name[13] = "GLEN GARRY (S. S. Pierce)"
train$Name[14] = "QUEEN ANNE"
train$Name[15] = "HUDSON'S BAY"
train$Name[20] = "GRAND MACNISH"
train$Name[22] = "MARTIN'S V.V.O"
train$Name[23] = "BALLANTINE"
train$Name[24] = "CUTTY SARK"
train$Name[25] = "WHITE HORSE"
train$Name[26] = "VAT 69"
train$Name[28] = "AMBASSADOR DELUXE"
train$Name[29] = "LONG JOHN"
train$Name[30] = "WHITE HEATHER"
train$Name[34] = "DEWAR'S WHITE LABEL"
train$Name[35] = "J & B"
train$Name[38] = "MACKIES ANCIENT"
train$Name[39] = "BARRISTER'S CHOICE (12 Year)"
train$Name[40] = "BERRY BROS. ST JAMES"
train$Name[42] = "S. S. PIERCE'S LIQUEUR (12 Year)"
train$Name[44] = "JOHNNIE WALKER BLACK LABEL"
train$Name[45] = "KINGS RANSOM"
train$Name[46] = "BUCHANAN DE LUXE"
train$Name[47] = "HIGHLAND NECTAR"
train$Name[48] = "BELL'S ROYAL VAT (12 Years)"
train$Name[49] = "CHIVAS REGAL (12 Years)"
train$Name[54] = "ROBERTSON'S B.E.B."
train$Name[55] = "GRANT'S (12 Years)"
train$Name[56] = "SMITH GLENLIVET (12 Years)"
train$Name[58] = "GRANTS (20 Years)"
train$Name[60] = "BALLANTINE (30 Years)"
train$price1[1] = "5.75"
train$price1[2] = "5.77"
train$price1[3] = "5.79"
train$price1[4] = "5.79"
train$price1[5] = "5.80"
train$price1[6] = "5.85"
train$price1[12] = "6.20"
train$price1[13] = "6.25"
train$price1[14] = "6.29"
train$price1[15] = "6.29"
train$price1[22] = "6.45"
train$price1[23] = "6.47"
train$price1[24] = "6.47"
train$price1[25] = "6.47"
train$price1[26] = "6.49"
train$price1[28] = "6.55"
train$price1[29] = "6.55"
train$price1[34] = "6.65"
train$price1[35] = "6.66"
train$price1[36] = "7.59"
train$price1[37] = "6.99"
train$price1[38] = "7.73"
train$price1[39] = "7.75"
train$price1[41] = "7.95"
train$price1[42] = "7.99"
train$price1[43] = "7.99"
train$price1[44] = "8.22"
train$price1[46] = "8.49"
train$price1[47] = "8.50"
train$price1[48] = "8.60"
train$price1[49] = "8.60"
train$price1[55] = "8.91"
train$price1[56] = "10.79"
train$price1[58] = "12.95"
train$price1[60] = "15.45"
train$price2[1] = "64.87"
train$price2[2] = "65.78"
train$price2[3] = "66.00"
train$price2[4] = "66.01"
train$price2[5] = "66.12"
train$price2[6] = "66.69"
train$price2[14] = "71.70"
train$price2[15] = "71.71"
train$price2[21] = "73.53"
train$price2[22] = "73.53"
train$price2[23] = "73.86"
train$price2[24] = "73.87"
train$price2[25] = "73.76"
train$price2[26] = "73.99"
train$price2[28] = "74.67"
train$price2[29] = "74.67"
train$price2[34] = "75.81"
train$price2[35] = "75.92"
train$price2[37] = "79.60"
train$price2[38] = "88.16"
train$price2[42] = "91.09"
train$price2[44] = "93.71"
train$price2[46] = "97.00"
train$price2[47] = "96.90"
train$price2[49] = "98.04"
train$price2[55] = "101.58"
train$price2[56] = "122.88"
train$price2[58] = "147.63"
train$price2[60] = "176.18"
adist(paste(train$Name, collapse = " "), paste(output$Name, collapse = " "))
length(strsplit(paste(train$Name, collapse = " "), "")[[1]])
adist(paste(train$price1, collapse = " "), paste(output$price1, collapse = " "))
length(strsplit(paste(train$price1, collapse = " "), "")[[1]])
adist(paste(train$price2, collapse = " "), paste(output$price2, collapse = " "))
length(strsplit(paste(train$price2, collapse = " "), "")[[1]])
| /luna-qiu-code/2362.R | no_license | MitchellRLayton/STA160-Wine-Project | R | false | false | 12,393 | r | data = readRDS("FullBoxes.rds")
doc.2362 = data$UCD_Lehmann_2362.jpg
#left bottom right top text confidence
doc.2362$text = gsub('\\.', '', doc.2362$text)
doc.2362$text = gsub('\\:', '', doc.2362$text)
doc.2362$text = gsub('\\\\', '', doc.2362$text)
doc.2362$text = gsub('\\|', '', doc.2362$text)
doc.2362$text = gsub('\\~', '', doc.2362$text)
doc.2362$text = gsub('\\#', '', doc.2362$text)
doc.2362$text = gsub('\\,', '', doc.2362$text)
doc.2362$text = gsub('\\-', '', doc.2362$text)
doc.2362$text = gsub('\\®', '', doc.2362$text)
doc.2362$text = gsub('\\}', '', doc.2362$text)
doc.2362$text = gsub('\\!', '', doc.2362$text)
doc.2362$text = gsub('\\£', '', doc.2362$text)
doc.2362$text = gsub('\\¥', '', doc.2362$text)
doc.2362$text = gsub('\\+', '', doc.2362$text)
doc.2362$text = gsub('\\=', '', doc.2362$text)
doc.2362$text = gsub('\\©', '', doc.2362$text)
doc.2362$text = gsub('\\>', '', doc.2362$text)
doc.2362$text = gsub('\\?', '', doc.2362$text)
doc.2362$text = gsub('\\>', '', doc.2362$text)
doc.2362$text = gsub('\\§', '', doc.2362$text)
doc.2362 = doc.2362[!(doc.2362$text ==""),]
left = subset(doc.2362, bottom > 4130 & left < 1270)
left$bottom.diff = ave(left$bottom, FUN=function(x) c(0,diff(x)))
left$index = 0
t = 1
left$index[1] = t
for (i in 1:nrow(left)) {
left$index[i] = t
if (left$bottom.diff[i] > 40) {
t = t+1
left$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(left$index)) {
splitted[[i]] = left[left$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(left$index))
for (i in 1: max(left$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.left = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.left$Name = name
for(i in 1:length(stringsplit)) {
database.left$price1[i] = price[[i]][1]
database.left$price2[i] = price[[i]][2]
}
database.left = na.omit(database.left)
middle = subset(doc.2362, bottom > 4130 & left > 1270 & left < 2500)
middle$bottom.diff = ave(middle$bottom, FUN=function(x) c(0,diff(x)))
middle$index = 0
t = 1
middle$index[1] = t
for (i in 1:nrow(middle)) {
middle$index[i] = t
if (middle$bottom.diff[i] > 40) {
t = t+1
middle$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(middle$index)) {
splitted[[i]] = middle[middle$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(middle$index))
for (i in 1: max(middle$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.middle = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.middle$Name = name
for(i in 1:length(stringsplit)) {
database.middle$price1[i] = price[[i]][1]
database.middle$price2[i] = price[[i]][2]
}
database.middle = na.omit(database.middle)
right = subset(doc.2362, bottom > 4130 & left > 2500)
right$bottom.diff = ave(right$bottom, FUN=function(x) c(0,diff(x)))
right$index = 0
t = 1
right$index[1] = t
for (i in 1:nrow(right)) {
right$index[i] = t
if (right$bottom.diff[i] > 34) {
t = t+1
right$index[i] = t
}
}
#split data by index. i.e. find each line in table
splitted = NULL
for (i in 1: max(right$index)) {
splitted[[i]] = right[right$index == i, ]
}
#find if name of wine is on multiple lines
diff = numeric(max(right$index))
for (i in 1: max(right$index)) {
diff[i] = splitted[[i]]$right[nrow(splitted[[i]])] - splitted[[i]]$left[1]
}
frame = NULL
for (i in 1:length(splitted)) {
#change bottom.diff in first row to 0 so that subset doesn't remove them
splitted[[i]]$bottom.diff[1] = 0
#gives back dataframe with each line containing info for each wine
frame[i] = paste(splitted[[i]]$text, collapse = ' ')
}
stringsplit = strsplit(frame, " ")
for(i in 1:length(stringsplit)) {
stringsplit[[i]] = stringsplit[[i]][stringsplit[[i]] != ""]
}
stringsplit = stringsplit[lapply(stringsplit,length)>2]
#source: https://stackoverflow.com/questions/19023446/remove-empty-elements-from-list-with-character0
#break up word: strplit
grepled = NULL
first_true = NULL
l = NULL
p = NULL
for(i in 1:length(stringsplit)) {
grepled[[i]] = grepl("[0-9]{3}", stringsplit[[i]])
l[i] = length(grepled[[i]])
first_true[i] = which(grepled[[i]] == TRUE)[1]
#may include an if statement so that first_true = ... when there's a number in the front
#and first_true = ... when there's no number in the front
p[i] = l[i] - first_true[i]
}
for(i in 1:length(stringsplit)) {
if(grepled[[i]][l[i]] == FALSE) {
stringsplit[[i]] = stringsplit[[i]][1:length(stringsplit[[i]])-1]
}
}
mode = function(v) {
uniqv = unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
mode(p)
name = NULL
price = NULL
for(i in 1:length(stringsplit)) {
name[[i]] = stringsplit[[i]][1:(length(stringsplit[[i]]) - mode(p)) - 1]
name[[i]] = paste(name[[i]], collapse = " ")
price[[i]] = stringsplit[[i]][(length(stringsplit[[i]]) - mode(p)):length(stringsplit[[i]])]
price[[i]] = sub("([[:digit:]]{2,2})$", ".\\1", price[[i]])
}
database.right = data.frame("Name" = numeric(length(name)), "price1" = numeric(length(price)), "price2" = numeric(length(price)))
database.right$Name = name
for(i in 1:length(stringsplit)) {
database.right$price1[i] = price[[i]][1]
database.right$price2[i] = price[[i]][2]
}
database.right = na.omit(database.right)
output = do.call("rbind", list(database.left, database.middle, database.right))
train = output
train$Name[1] = "BULLOCH & LADE"
train$Name[2] = "HARVEY'S"
train$Name[3] = "JOHN BEGG BLUE CAP"
train$Name[4] = "GILBEY'S SPEY ROYAL"
train$Name[5] = "KING WILLIAM"
train$Name[6] = "KING GEORGE"
train$Name[8] = "HANKY BANNISTER"
train$Name[9] = "DEERSTALKER"
train$Name[10] = "GROUSE"
train$Name[11] = "PETER DAWSON SPECIAL"
train$Name[12] = "CATTO'S GREY SEAL"
train$Name[13] = "GLEN GARRY (S. S. Pierce)"
train$Name[14] = "QUEEN ANNE"
train$Name[15] = "HUDSON'S BAY"
train$Name[20] = "GRAND MACNISH"
train$Name[22] = "MARTIN'S V.V.O"
train$Name[23] = "BALLANTINE"
train$Name[24] = "CUTTY SARK"
train$Name[25] = "WHITE HORSE"
train$Name[26] = "VAT 69"
train$Name[28] = "AMBASSADOR DELUXE"
train$Name[29] = "LONG JOHN"
train$Name[30] = "WHITE HEATHER"
train$Name[34] = "DEWAR'S WHITE LABEL"
train$Name[35] = "J & B"
train$Name[38] = "MACKIES ANCIENT"
train$Name[39] = "BARRISTER'S CHOICE (12 Year)"
train$Name[40] = "BERRY BROS. ST JAMES"
train$Name[42] = "S. S. PIERCE'S LIQUEUR (12 Year)"
train$Name[44] = "JOHNNIE WALKER BLACK LABEL"
train$Name[45] = "KINGS RANSOM"
train$Name[46] = "BUCHANAN DE LUXE"
train$Name[47] = "HIGHLAND NECTAR"
train$Name[48] = "BELL'S ROYAL VAT (12 Years)"
train$Name[49] = "CHIVAS REGAL (12 Years)"
train$Name[54] = "ROBERTSON'S B.E.B."
train$Name[55] = "GRANT'S (12 Years)"
train$Name[56] = "SMITH GLENLIVET (12 Years)"
train$Name[58] = "GRANTS (20 Years)"
train$Name[60] = "BALLANTINE (30 Years)"
train$price1[1] = "5.75"
train$price1[2] = "5.77"
train$price1[3] = "5.79"
train$price1[4] = "5.79"
train$price1[5] = "5.80"
train$price1[6] = "5.85"
train$price1[12] = "6.20"
train$price1[13] = "6.25"
train$price1[14] = "6.29"
train$price1[15] = "6.29"
train$price1[22] = "6.45"
train$price1[23] = "6.47"
train$price1[24] = "6.47"
train$price1[25] = "6.47"
train$price1[26] = "6.49"
train$price1[28] = "6.55"
train$price1[29] = "6.55"
train$price1[34] = "6.65"
train$price1[35] = "6.66"
train$price1[36] = "7.59"
train$price1[37] = "6.99"
train$price1[38] = "7.73"
train$price1[39] = "7.75"
train$price1[41] = "7.95"
train$price1[42] = "7.99"
train$price1[43] = "7.99"
train$price1[44] = "8.22"
train$price1[46] = "8.49"
train$price1[47] = "8.50"
train$price1[48] = "8.60"
train$price1[49] = "8.60"
train$price1[55] = "8.91"
train$price1[56] = "10.79"
train$price1[58] = "12.95"
train$price1[60] = "15.45"
train$price2[1] = "64.87"
train$price2[2] = "65.78"
train$price2[3] = "66.00"
train$price2[4] = "66.01"
train$price2[5] = "66.12"
train$price2[6] = "66.69"
train$price2[14] = "71.70"
train$price2[15] = "71.71"
train$price2[21] = "73.53"
train$price2[22] = "73.53"
train$price2[23] = "73.86"
train$price2[24] = "73.87"
train$price2[25] = "73.76"
train$price2[26] = "73.99"
train$price2[28] = "74.67"
train$price2[29] = "74.67"
train$price2[34] = "75.81"
train$price2[35] = "75.92"
train$price2[37] = "79.60"
train$price2[38] = "88.16"
train$price2[42] = "91.09"
train$price2[44] = "93.71"
train$price2[46] = "97.00"
train$price2[47] = "96.90"
train$price2[49] = "98.04"
train$price2[55] = "101.58"
train$price2[56] = "122.88"
train$price2[58] = "147.63"
train$price2[60] = "176.18"
adist(paste(train$Name, collapse = " "), paste(output$Name, collapse = " "))
length(strsplit(paste(train$Name, collapse = " "), "")[[1]])
adist(paste(train$price1, collapse = " "), paste(output$price1, collapse = " "))
length(strsplit(paste(train$price1, collapse = " "), "")[[1]])
adist(paste(train$price2, collapse = " "), paste(output$price2, collapse = " "))
length(strsplit(paste(train$price2, collapse = " "), "")[[1]])
|
#' ProductPackageService
#'
#' Provides methods for updating and retrieving ProductPackage objects.
#' A
#' ProductPackage represents a group of products which will be sold together.
#'
#' To use this service, you need to have the new sales management solution
#' enabled on your network. If you do not see a "Sales" tab in <a
#' href="https://www.google.com/dfp">DoubleClick for Publishers (DFP)</a>, you
#' will not be able to use this service.
#'
#' createProductPackages
#'
#' Creates new ProductPackage objects.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#createProductPackages}{Google Documentation for createProductPackages}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a createProductPackagesResponse
#' @examples
#' \dontrun{
#' res <- dfp_createProductPackages(request_data)
#' }
#' @export
dfp_createProductPackages <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='createProductPackages', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='createProductPackagesResponse', as_df=as_df)
return(result)
}
#'
#' getProductPackagesByStatement
#'
#' Gets a ProductPackagePage of ProductPackage objects that satisfy the filtering criteria specified by given Statement query. The following fields are supported for filtering:
#' \itemize{
#' \item{id}
#' \item{name}
#' \item{notes}
#' \item{status}
#' \item{isArchived}
#' \item{lastModifiedDateTime}
#' }
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#getProductPackagesByStatement}{Google Documentation for getProductPackagesByStatement}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a getProductPackagesByStatementResponse
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getProductPackagesByStatement(dat)
#' }
#' @export
dfp_getProductPackagesByStatement <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='getProductPackagesByStatement', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='getProductPackagesByStatementResponse', as_df=as_df)
return(result)
}
#'
#' performProductPackageAction
#'
#' Performs actions on ProductPackage objects that match the given \{@@link Statement query\}.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#performProductPackageAction}{Google Documentation for performProductPackageAction}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a performProductPackageActionResponse
#' @examples
#' \dontrun{
#' res <- dfp_performProductPackageAction(request_data)
#' }
#' @export
dfp_performProductPackageAction <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='performProductPackageAction', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='performProductPackageActionResponse', as_df=as_df)
return(result)
}
#'
#' updateProductPackages
#'
#' Updates the specified ProductPackage objects.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#updateProductPackages}{Google Documentation for updateProductPackages}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a updateProductPackagesResponse
#' @examples
#' \dontrun{
#' res <- dfp_updateProductPackages(request_data)
#' }
#' @export
dfp_updateProductPackages <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='updateProductPackages', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='updateProductPackagesResponse', as_df=as_df)
return(result)
}
#'
| /R/ProductPackageService.R | no_license | StevenMMortimer/rdfp | R | false | false | 5,919 | r | #' ProductPackageService
#'
#' Provides methods for updating and retrieving ProductPackage objects.
#' A
#' ProductPackage represents a group of products which will be sold together.
#'
#' To use this service, you need to have the new sales management solution
#' enabled on your network. If you do not see a "Sales" tab in <a
#' href="https://www.google.com/dfp">DoubleClick for Publishers (DFP)</a>, you
#' will not be able to use this service.
#'
#' createProductPackages
#'
#' Creates new ProductPackage objects.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#createProductPackages}{Google Documentation for createProductPackages}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a createProductPackagesResponse
#' @examples
#' \dontrun{
#' res <- dfp_createProductPackages(request_data)
#' }
#' @export
dfp_createProductPackages <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='createProductPackages', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='createProductPackagesResponse', as_df=as_df)
return(result)
}
#'
#' getProductPackagesByStatement
#'
#' Gets a ProductPackagePage of ProductPackage objects that satisfy the filtering criteria specified by given Statement query. The following fields are supported for filtering:
#' \itemize{
#' \item{id}
#' \item{name}
#' \item{notes}
#' \item{status}
#' \item{isArchived}
#' \item{lastModifiedDateTime}
#' }
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#getProductPackagesByStatement}{Google Documentation for getProductPackagesByStatement}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a getProductPackagesByStatementResponse
#' @examples
#' \dontrun{
#' dat <- list(filterStatement=list('query'="WHERE status='ACTIVE'"))
#' res <- dfp_getProductPackagesByStatement(dat)
#' }
#' @export
dfp_getProductPackagesByStatement <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='getProductPackagesByStatement', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='getProductPackagesByStatementResponse', as_df=as_df)
return(result)
}
#'
#' performProductPackageAction
#'
#' Performs actions on ProductPackage objects that match the given \{@@link Statement query\}.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#performProductPackageAction}{Google Documentation for performProductPackageAction}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a performProductPackageActionResponse
#' @examples
#' \dontrun{
#' res <- dfp_performProductPackageAction(request_data)
#' }
#' @export
dfp_performProductPackageAction <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='performProductPackageAction', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='performProductPackageActionResponse', as_df=as_df)
return(result)
}
#'
#' updateProductPackages
#'
#' Updates the specified ProductPackage objects.
#'
#' @seealso \href{https://developers.google.com/ad-manager/api/reference/v201905/ProductPackageService#updateProductPackages}{Google Documentation for updateProductPackages}
#'
#' @param request_data a \code{list} or \code{data.frame} of data elements
#' to be formatted for a SOAP
#' request (XML format, but passed as character string)
#' @param as_df a boolean indicating whether to attempt to parse the result into
#' a \code{data.frame}
#' @param verbose a boolean indicating whether to print the service URL and POSTed XML
#' @return a \code{data.frame} or \code{list} containing all the elements of a updateProductPackagesResponse
#' @examples
#' \dontrun{
#' res <- dfp_updateProductPackages(request_data)
#' }
#' @export
dfp_updateProductPackages <- function(request_data, as_df=TRUE, verbose=FALSE){
request_body <- form_request_body(service='ProductPackageService', root_name='updateProductPackages', data=request_data)
httr_response <- execute_soap_request(request_body=request_body, verbose=verbose)
result <- parse_soap_response(httr_response=httr_response, resp_element='updateProductPackagesResponse', as_df=as_df)
return(result)
}
#'
|
with(aeb8c015ffd484d39bb40b9681d8b1faf, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/34bb7d35-e1c0-4de5-b378-034f770a7361';FRAME258804 <- FRAME258804[,host_name_ip_1:=NULL];}); | /34bb7d35-e1c0-4de5-b378-034f770a7361/R/Temp/ajZZnGoPeb7xK.R | no_license | ayanmanna8/test | R | false | false | 245 | r | with(aeb8c015ffd484d39bb40b9681d8b1faf, {ROOT <- 'D:/ATS2.0/SEMOSS/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/34bb7d35-e1c0-4de5-b378-034f770a7361';FRAME258804 <- FRAME258804[,host_name_ip_1:=NULL];}); |
## making boxplots of all bootstrapped parameters from 2011, 2012, and 2013 VonB parameter estimations
### 2011 Data ###
mydata11 <- read.csv("2011_Age_length.csv", header=TRUE,sep=",")
x11 = data.frame(mydata11$Fractional.Age)
y11 = data.frame(mydata11$Fork.Length..cm.)
small11= cbind(x11, y11)
small11 <- small11[complete.cases(small11),]
smalldf11= data.frame(small11)
colnames(smalldf11) <- c("x11", "y11")
# setting starting variables where P1= Linf, P2= K and P3= t0
p1_11 = 100
p2_11 = .18
p3_11= -5
fit2011 = nls(y11 ~ p1_11*(1-exp(-p2_11*(x11-p3_11))), start=list(p1_11=p1_11, p2_11=p2_11,p3_11=p3_11), data=smalldf11)
summary(fit2011)
RSS <-sum(resid(fit2011)^2)
TSS <-sum((ydata-mean(ydata))^2))
Rsq <- 1- (RSS/TSS) ## R square fit to data! not sure if this works with non linear data...
confint(fit2011) # parametric Confidence Intervals
library(nlstools)
#http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2011 <- nlsBoot(fit2011, niter=999) # method to get non parametric- bootstrapped- confidence intervals
#boot2011$bootCI #contains the bootstrap medians and the bootstrap 95% CI
boot2011Coef <- data.frame(boot2011$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2011Coef) <- c("p1_11", "p2_11", "p3_11")
### 2012 DATA ###
mydata12 <- read.csv("2012_Age_length.csv", header=TRUE,sep=",")
x12 = mydata12$Fractional.Age
y12 = mydata12$Fork.Length..cm
small12=cbind(x12, y12)
smalldf12= data.frame(small12)
p1_12 = 80
p2_12 = .18
p3_12= .55
fit2012 = nls(y12 ~ p1_12*(1-exp(-p2_12*(x12-p3_12))), start=list(p1_12=p1_12, p2_12=p2_12,p3_12=p3_12), data=smalldf12)
summary(fit2012)
RSS =sum(resid(fit2012)^2)
TSS= sum((ydata-mean(ydata))^2))
Rsq <- 1-(RSS/TSS)
confint(fit2012)
# # library(nlstools)
# #http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2012 <- nlsBoot(fit2012, niter=999)
# boot2012$bootCI #contains the bootstrap medians and the bootstrap 95% CI
# plot(boot2012, type="boxplot")
# title("2012")
# boot2012$coefboot # contains the bootstrap parameter estimates
boot2012Coef <- data.frame(boot2012$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2012Coef) <- c("p1_12", "p2_12", "p3_12")
#### 2013 DATA #####
mydata13 <- read.csv("2013_Age_length.csv", header=TRUE,sep=",")
x13 = mydata13$Fractional.Age
y13 = mydata13$Fork.Length..cm
small13= cbind(x13, y13)
smalldf13= data.frame(small13)
plot(x13, y13)
# setting starting variables where P1= Linf, P2= K and P3= t0
p1_13= 80
p2_13 = .18
p3_13= .55
fit2013 = nls(y13 ~ p1_13*(1-exp(-p2_13*(x13-p3_13))), start=list(p1_13=p1_13, p2_13=p2_13, p3_13=p3_13), data=smalldf13)
summary(fit2013)
#getting sum of squared residuals
RSS =sum(resid(fit2013)^2)
TSS= sum((ydata-mean(ydata))^2))
Rsq <- 1-(RSS/TSS)
#parameter confidence intervals
confint(fit2013)
#http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2013 <- nlsBoot(fit2013, niter=999)
# boot2013$bootCI #contains the bootstrap medians and the bootstrap 95% CI
# plot(boot2013, type="boxplot")
# boot2013$coefboot # contains the bootstrap parameter estimates
# plot(xdata,ydata)
# lines(xdata, predict(fit2013))
boot2013Coef <- data.frame(boot2013$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2013Coef) <- c("p1_13", "p2_13", "p3_13")
## NOW JOINING TOGETHER ###
#### LINF #####
#formatting correctly for boxplots
Linf2011 <- data.frame(boot2011Coef$p1)
colnames(Linf2011) <- c("Linf")
Linf2012<- data.frame(boot2012Coef$p1_12)
colnames(Linf2012) <- c("Linf")
Linf2013 <- data.frame(boot2013Coef$p1_13)
colnames(Linf2013) <- c("Linf")
LinfAll <- rbind(Linf2011, Linf2012, Linf2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995)) # for some reason this ran only 997 times unlike the two ones above that ran 999
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_Linf <- data.frame(YearLabelAll, LinfAll)
colnames(working_Linf) <- c("Year", "Linf")
#http://www.cookbook-r.com/Graphs/Plotting_distributions_(ggplot2)/
library(ggplot2)
boxplotLinf <- ggplot(working_Linf, aes(x=as.factor(Year), y=Linf, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("Linf")+
scale_y_continuous(limits=c(50,250), breaks=seq(80, 250, 50))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped Linf Estimations")
### K #####
#formatting correctly for boxplots
K2011 <- data.frame(boot2011Coef$p2)
colnames(K2011) <- c("K")
K2012<- data.frame(boot2012Coef$p2_12)
colnames(K2012) <- c("K")
K2013 <- data.frame(boot2013Coef$p2_13)
colnames(K2013) <- c("K")
KAll <- rbind(K2011, K2012, K2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995))
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_K <- data.frame(YearLabelAll, KAll)
colnames(working_K) <- c("Year", "K")
boxplotK <- ggplot(working_K, aes(x=as.factor(Year), y=K, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("k")+
scale_y_continuous(limits=c(0,.8), breaks=seq(0,.9,.1))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped k Estimation")
### t0#####
#formatting correctly for boxplots
t2011 <- data.frame(boot2011Coef$p3)
colnames(t2011) <- c("t")
t2012 <- data.frame(boot2012Coef$p3_12)
colnames(t2012) <- c("t")
t2013 <- data.frame(boot2013Coef$p3_13)
colnames(t2013) <- c("t")
tAll <- rbind(t2011, t2012, t2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995))
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_t <- data.frame(YearLabelAll, tAll)
colnames(working_t) <- c("Year", "t")
boxplott0 <- ggplot(working_t, aes(x=as.factor(Year), y=t, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("t0")+
scale_y_continuous(breaks=seq(-8,3,1))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped t0 Estimation")
tiff("Boxplots_VonB_by_Year.tiff", width= 7, height=9, unit="in", res=300)
multiplot(boxplotLinf, boxplotK, boxplott0, cols=1)
dev.off()
## BANANA Plots of L infinity and K for 2011, 2012 and 2013 ###
banana_2011 <- data.frame(Linf2011, K2011)
colnames(banana_2011) <- c("Linf11", "K11")
banana_2012 <- data.frame(Linf2012, K2012)
colnames(banana_2012) <- c("Linf12", "K12")
banana_2013 <- data.frame(Linf2013, K2013)
colnames(banana_2013) <- c("Linf13", "K13")
bp_2011 <- ggplot(banana_2011, aes(x=K11, y=Linf11))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2011 bootstrapped Linf and k values")
bp_2012 <- ggplot(banana_2012, aes(x=K12, y=Linf12))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2012 bootstrapped Linf and k values")
bp_2013 <- ggplot(banana_2013, aes(x=K13, y=Linf13))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2013 bootstrapped Linf and k values")
tiff("Bananaplot_Years.tiff", width=7, height=7, res=300)
multiplot(bp_2011, bp_2012, bp_2013, cols=1)
dev.off()
| /Age Length/Boxplots_VonB_by Year& Banana plots-7:16:14.R | no_license | eherdter/r-work | R | false | false | 9,483 | r | ## making boxplots of all bootstrapped parameters from 2011, 2012, and 2013 VonB parameter estimations
### 2011 Data ###
mydata11 <- read.csv("2011_Age_length.csv", header=TRUE,sep=",")
x11 = data.frame(mydata11$Fractional.Age)
y11 = data.frame(mydata11$Fork.Length..cm.)
small11= cbind(x11, y11)
small11 <- small11[complete.cases(small11),]
smalldf11= data.frame(small11)
colnames(smalldf11) <- c("x11", "y11")
# setting starting variables where P1= Linf, P2= K and P3= t0
p1_11 = 100
p2_11 = .18
p3_11= -5
fit2011 = nls(y11 ~ p1_11*(1-exp(-p2_11*(x11-p3_11))), start=list(p1_11=p1_11, p2_11=p2_11,p3_11=p3_11), data=smalldf11)
summary(fit2011)
RSS <-sum(resid(fit2011)^2)
TSS <-sum((ydata-mean(ydata))^2))
Rsq <- 1- (RSS/TSS) ## R square fit to data! not sure if this works with non linear data...
confint(fit2011) # parametric Confidence Intervals
library(nlstools)
#http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2011 <- nlsBoot(fit2011, niter=999) # method to get non parametric- bootstrapped- confidence intervals
#boot2011$bootCI #contains the bootstrap medians and the bootstrap 95% CI
boot2011Coef <- data.frame(boot2011$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2011Coef) <- c("p1_11", "p2_11", "p3_11")
### 2012 DATA ###
mydata12 <- read.csv("2012_Age_length.csv", header=TRUE,sep=",")
x12 = mydata12$Fractional.Age
y12 = mydata12$Fork.Length..cm
small12=cbind(x12, y12)
smalldf12= data.frame(small12)
p1_12 = 80
p2_12 = .18
p3_12= .55
fit2012 = nls(y12 ~ p1_12*(1-exp(-p2_12*(x12-p3_12))), start=list(p1_12=p1_12, p2_12=p2_12,p3_12=p3_12), data=smalldf12)
summary(fit2012)
RSS =sum(resid(fit2012)^2)
TSS= sum((ydata-mean(ydata))^2))
Rsq <- 1-(RSS/TSS)
confint(fit2012)
# # library(nlstools)
# #http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2012 <- nlsBoot(fit2012, niter=999)
# boot2012$bootCI #contains the bootstrap medians and the bootstrap 95% CI
# plot(boot2012, type="boxplot")
# title("2012")
# boot2012$coefboot # contains the bootstrap parameter estimates
boot2012Coef <- data.frame(boot2012$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2012Coef) <- c("p1_12", "p2_12", "p3_12")
#### 2013 DATA #####
mydata13 <- read.csv("2013_Age_length.csv", header=TRUE,sep=",")
x13 = mydata13$Fractional.Age
y13 = mydata13$Fork.Length..cm
small13= cbind(x13, y13)
smalldf13= data.frame(small13)
plot(x13, y13)
# setting starting variables where P1= Linf, P2= K and P3= t0
p1_13= 80
p2_13 = .18
p3_13= .55
fit2013 = nls(y13 ~ p1_13*(1-exp(-p2_13*(x13-p3_13))), start=list(p1_13=p1_13, p2_13=p2_13, p3_13=p3_13), data=smalldf13)
summary(fit2013)
#getting sum of squared residuals
RSS =sum(resid(fit2013)^2)
TSS= sum((ydata-mean(ydata))^2))
Rsq <- 1-(RSS/TSS)
#parameter confidence intervals
confint(fit2013)
#http://cran.r-project.org/web/packages/nlstools/nlstools.pdf#
boot2013 <- nlsBoot(fit2013, niter=999)
# boot2013$bootCI #contains the bootstrap medians and the bootstrap 95% CI
# plot(boot2013, type="boxplot")
# boot2013$coefboot # contains the bootstrap parameter estimates
# plot(xdata,ydata)
# lines(xdata, predict(fit2013))
boot2013Coef <- data.frame(boot2013$coefboot) ## 999 Bootstrapped parameter estimates
colnames(boot2013Coef) <- c("p1_13", "p2_13", "p3_13")
## NOW JOINING TOGETHER ###
#### LINF #####
#formatting correctly for boxplots
Linf2011 <- data.frame(boot2011Coef$p1)
colnames(Linf2011) <- c("Linf")
Linf2012<- data.frame(boot2012Coef$p1_12)
colnames(Linf2012) <- c("Linf")
Linf2013 <- data.frame(boot2013Coef$p1_13)
colnames(Linf2013) <- c("Linf")
LinfAll <- rbind(Linf2011, Linf2012, Linf2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995)) # for some reason this ran only 997 times unlike the two ones above that ran 999
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_Linf <- data.frame(YearLabelAll, LinfAll)
colnames(working_Linf) <- c("Year", "Linf")
#http://www.cookbook-r.com/Graphs/Plotting_distributions_(ggplot2)/
library(ggplot2)
boxplotLinf <- ggplot(working_Linf, aes(x=as.factor(Year), y=Linf, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("Linf")+
scale_y_continuous(limits=c(50,250), breaks=seq(80, 250, 50))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped Linf Estimations")
### K #####
#formatting correctly for boxplots
K2011 <- data.frame(boot2011Coef$p2)
colnames(K2011) <- c("K")
K2012<- data.frame(boot2012Coef$p2_12)
colnames(K2012) <- c("K")
K2013 <- data.frame(boot2013Coef$p2_13)
colnames(K2013) <- c("K")
KAll <- rbind(K2011, K2012, K2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995))
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_K <- data.frame(YearLabelAll, KAll)
colnames(working_K) <- c("Year", "K")
boxplotK <- ggplot(working_K, aes(x=as.factor(Year), y=K, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("k")+
scale_y_continuous(limits=c(0,.8), breaks=seq(0,.9,.1))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped k Estimation")
### t0#####
#formatting correctly for boxplots
t2011 <- data.frame(boot2011Coef$p3)
colnames(t2011) <- c("t")
t2012 <- data.frame(boot2012Coef$p3_12)
colnames(t2012) <- c("t")
t2013 <- data.frame(boot2013Coef$p3_13)
colnames(t2013) <- c("t")
tAll <- rbind(t2011, t2012, t2013)
YearLabel2011 <- data.frame(rep(2011, 999))
colnames(YearLabel2011) <- c("Year")
YearLabel2012<- data.frame(rep(2012, 997))
colnames(YearLabel2012) <- c("Year")
YearLabel2013 <- data.frame(rep(2013,995))
colnames(YearLabel2013) <- c("Year")
YearLabelAll <- rbind(YearLabel2011, YearLabel2012, YearLabel2013)
working_t <- data.frame(YearLabelAll, tAll)
colnames(working_t) <- c("Year", "t")
boxplott0 <- ggplot(working_t, aes(x=as.factor(Year), y=t, colour=as.factor(Year)))+
geom_boxplot()+
scale_colour_discrete(name="Year")+ #changing the title of the legend
xlab("Year")+
ylab("t0")+
scale_y_continuous(breaks=seq(-8,3,1))+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"))+
ggtitle("Bootstrapped t0 Estimation")
tiff("Boxplots_VonB_by_Year.tiff", width= 7, height=9, unit="in", res=300)
multiplot(boxplotLinf, boxplotK, boxplott0, cols=1)
dev.off()
## BANANA Plots of L infinity and K for 2011, 2012 and 2013 ###
banana_2011 <- data.frame(Linf2011, K2011)
colnames(banana_2011) <- c("Linf11", "K11")
banana_2012 <- data.frame(Linf2012, K2012)
colnames(banana_2012) <- c("Linf12", "K12")
banana_2013 <- data.frame(Linf2013, K2013)
colnames(banana_2013) <- c("Linf13", "K13")
bp_2011 <- ggplot(banana_2011, aes(x=K11, y=Linf11))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2011 bootstrapped Linf and k values")
bp_2012 <- ggplot(banana_2012, aes(x=K12, y=Linf12))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2012 bootstrapped Linf and k values")
bp_2013 <- ggplot(banana_2013, aes(x=K13, y=Linf13))+
geom_point()+
xlab("k")+
ylab("Linf")+
theme(panel.grid.minor=element_blank(), panel.grid.major=element_blank(), panel.background=element_rect(fill='white', colour='black'),
legend.position="none", axis.text.x=element_text(colour="black"),
axis.text.y=element_text(colour="black"),
plot.title=element_text(size=14))+
ggtitle("Plot of 2013 bootstrapped Linf and k values")
tiff("Bananaplot_Years.tiff", width=7, height=7, res=300)
multiplot(bp_2011, bp_2012, bp_2013, cols=1)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds-plots.R
\name{ds_plot_bar}
\alias{ds_plot_bar}
\title{Generate bar plots}
\usage{
ds_plot_bar(data, ..., fill = "blue", print_plot = TRUE)
}
\arguments{
\item{data}{A \code{data.frame} or \code{tibble}.}
\item{...}{Column(s) in \code{data}.}
\item{fill}{Color of the bars.}
\item{print_plot}{logical; if \code{TRUE}, prints the plot else returns a plot object.}
}
\description{
Creates bar plots if the data has categorical variables.
}
\examples{
ds_plot_bar(mtcarz)
ds_plot_bar(mtcarz, cyl)
ds_plot_bar(mtcarz, cyl, gear)
}
| /man/ds_plot_bar.Rd | no_license | DuyDN/descriptr | R | false | true | 611 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ds-plots.R
\name{ds_plot_bar}
\alias{ds_plot_bar}
\title{Generate bar plots}
\usage{
ds_plot_bar(data, ..., fill = "blue", print_plot = TRUE)
}
\arguments{
\item{data}{A \code{data.frame} or \code{tibble}.}
\item{...}{Column(s) in \code{data}.}
\item{fill}{Color of the bars.}
\item{print_plot}{logical; if \code{TRUE}, prints the plot else returns a plot object.}
}
\description{
Creates bar plots if the data has categorical variables.
}
\examples{
ds_plot_bar(mtcarz)
ds_plot_bar(mtcarz, cyl)
ds_plot_bar(mtcarz, cyl, gear)
}
|
#install and import all the required packages
install.packages("RWeka", dependencies = TRUE)
install.packages("e1071", dependencies = TRUE)
install.packages("caret", dependencies = TRUE)
install.packages("dplyr", dependencies = TRUE)
install.packages("class", dependencies = TRUE)
install.packages("readxl", dependencies = TRUE)
library(readxl)
library("e1071")
library(class)
library(caret)
library(rJava)
library(RWeka)
#Dataset division into training and test by sampling
divideDataset<-function(data,seed){
## 80% of the sample size
smp_size <- floor(0.80 * nrow(data))
## set the seed to make your partition reproductible
set.seed(seed)
train_ind <- sample(seq_len(nrow(data)), size = smp_size)
colums<-c("Overall Life","Male Life", "Female Life", "Continent")
data_train <- data[train_ind,colums ]
data_test <- data[-train_ind,colums ]
t_train <- data$Continent[train_ind]
t_test <- data$Continent[-train_ind]
output<-list(data_train,data_test,t_train,t_test)
return(output)
}
#SVM
mySvm<-function(data,seed){
#print("SVM")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
#Fit model
svmfit <- svm(Continent ~., data = let_train, kernel = "linear", cost = 1, scale = FALSE)
svmfit
#Tune to check best performance
tuned <- tune(svm, Continent ~., data = let_train, kernel = "linear", ranges = list(cost=c(0.001,0.01,.1,1,10,100)))
summary(tuned)
#Make predictions
p <- predict(svmfit, let_test, type="class")
length(let_test$Continent)
table(p, let_test$Continent)
#Analyse results
#Confusion matrix
confusionMatrix(p, let_test$Continent )
#Accuracy
#print(mean(p== let_test$Continent))
svmoutput<-mean(p== let_test$Continent)
return(svmoutput)
}
#KNN
myKnn<-function(data,seed){
#print("KNN")
#Slicing
knn_train_test<-divideDataset(data,seed)
let_train<-knn_train_test[[1]]
let_test<-knn_train_test[[2]]
#Preprocessing and training
trainX <- let_train[,names(let_train) != "Continent"]
preProcValues <- preProcess(x = trainX,method = c("center", "scale"))
preProcValues
#Fit Model- Using Caret's train model to find best k
ctrl <- trainControl(method="repeatedcv",repeats = 3) #,classProbs=TRUE,summaryFunction = twoClassSummary)
knnFit <- train(Continent~., data = let_train, method = "knn", trControl = ctrl,preProcess = c("center","scale"), tuneLength = 20)
#knnFit
plot(knnFit)
#Make predictions
knnPredict <- predict(knnFit,newdata = let_test )
knnPredict
#Summarize accuracy
#Confusion Matrix
confusionMatrix(knnPredict, let_test$Continent )
#Accuracy
#print(mean(knnPredict == let_test$Continent))
knnoutput<-mean(knnPredict== let_test$Continent)
return(knnoutput)
}
#RIPPER
myRipper<-function(data,seed){
#print("RIPPER")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
# fit model-Using Weka Control function of JRip to tune
fit <- JRip(Continent~., data=let_train, control = Weka_control( F =50))
# summarize the fit
summary(fit)
# make predictions
predictions <- predict(fit, let_test)
# summarize accuracy
tb<-table(predictions, let_test$Continent)
#Confusion Matrix
confusionMatrix(predictions, let_test$Continent )
#Accuracy
#print(mean(predictions== let_test$Continent))
ripsoutput<-mean(predictions== let_test$Continent)
return(ripsoutput)
}
#c45
myc45<-function(data,seed){
#print("C45")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
# fit model-Using Weka Control function of J48 to tune
fit <- J48(Continent~., data=let_train, control = Weka_control(R = TRUE, M = 9))
# summarize the fit
summary(fit)
# make predictions
c45predictions <- predict(fit, let_test)
# summarize accuracy
tb<-table(c45predictions, let_test$Continent)
#Confusion Matrix
confusionMatrix(c45predictions, let_test$Continent )
#Accuracy
#print(mean(c45predictions== let_test$Continent))
c45output<-mean(c45predictions== let_test$Continent)
return(c45output)
}
#MAIN
main<-function(){
#reading from dataset address location
Life_expectancy_dataset <- read_excel("Life_expectancy_dataset.xlsx")
View(Life_expectancy_dataset)
#Converting Continent to factor
Life_expectancy_dataset[c("Continent")]<- lapply(Life_expectancy_dataset[c("Continent")], factor)
data=Life_expectancy_dataset
randomSeeds<-list(2018,2166,2289,2322,2408)
lp<-length(randomSeeds)
results <- list(kknn=c(), kc45=c(), krip=c(), ksvn=c())
#Reporting results for 5 groups for each algorithm with help of seeds
for (i in 1:lp)
{
for(j in 1:4){
res <- myKnn(data,randomSeeds[[i]])
if(j==2){
res <- myc45(data,randomSeeds[[i]])
}
if(j==3){
res <- myRipper(data,randomSeeds[[i]])
}
if(j==4){
res <- mySvm(data,randomSeeds[[i]])
}
results[[j]] <- c(results[[j]], res)
}
}
#Calculating average accuracy and average standard deviation
avg <- c()
msd <- c()
for (i in 1:4){
temp <- c()
for (each in results[i]){
temp <- append(temp, each)
}
avg <- append(avg, mean(temp))
msd <- append(msd, sd(temp))
}
#present the results
for(i in 1:4){
umethod <- "KNN"
if(i==2){
umethod <- "C4.5"
}
if(i==3){
umethod <- "RIPPER"
}
if(i==4){
umethod <- "SVM"
}
print(sprintf("method name: %s; averaged accuracy: %.2f; accuracy standard deviation: %.3f", umethod, avg[i], msd[i]))
}
}
#Calling main executes the entire program
main()
| /my_data_mining.R | no_license | vagishatyagi/Data-Mining-Classification-techniques | R | false | false | 5,763 | r |
#install and import all the required packages
install.packages("RWeka", dependencies = TRUE)
install.packages("e1071", dependencies = TRUE)
install.packages("caret", dependencies = TRUE)
install.packages("dplyr", dependencies = TRUE)
install.packages("class", dependencies = TRUE)
install.packages("readxl", dependencies = TRUE)
library(readxl)
library("e1071")
library(class)
library(caret)
library(rJava)
library(RWeka)
#Dataset division into training and test by sampling
divideDataset<-function(data,seed){
## 80% of the sample size
smp_size <- floor(0.80 * nrow(data))
## set the seed to make your partition reproductible
set.seed(seed)
train_ind <- sample(seq_len(nrow(data)), size = smp_size)
colums<-c("Overall Life","Male Life", "Female Life", "Continent")
data_train <- data[train_ind,colums ]
data_test <- data[-train_ind,colums ]
t_train <- data$Continent[train_ind]
t_test <- data$Continent[-train_ind]
output<-list(data_train,data_test,t_train,t_test)
return(output)
}
#SVM
mySvm<-function(data,seed){
#print("SVM")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
#Fit model
svmfit <- svm(Continent ~., data = let_train, kernel = "linear", cost = 1, scale = FALSE)
svmfit
#Tune to check best performance
tuned <- tune(svm, Continent ~., data = let_train, kernel = "linear", ranges = list(cost=c(0.001,0.01,.1,1,10,100)))
summary(tuned)
#Make predictions
p <- predict(svmfit, let_test, type="class")
length(let_test$Continent)
table(p, let_test$Continent)
#Analyse results
#Confusion matrix
confusionMatrix(p, let_test$Continent )
#Accuracy
#print(mean(p== let_test$Continent))
svmoutput<-mean(p== let_test$Continent)
return(svmoutput)
}
#KNN
myKnn<-function(data,seed){
#print("KNN")
#Slicing
knn_train_test<-divideDataset(data,seed)
let_train<-knn_train_test[[1]]
let_test<-knn_train_test[[2]]
#Preprocessing and training
trainX <- let_train[,names(let_train) != "Continent"]
preProcValues <- preProcess(x = trainX,method = c("center", "scale"))
preProcValues
#Fit Model- Using Caret's train model to find best k
ctrl <- trainControl(method="repeatedcv",repeats = 3) #,classProbs=TRUE,summaryFunction = twoClassSummary)
knnFit <- train(Continent~., data = let_train, method = "knn", trControl = ctrl,preProcess = c("center","scale"), tuneLength = 20)
#knnFit
plot(knnFit)
#Make predictions
knnPredict <- predict(knnFit,newdata = let_test )
knnPredict
#Summarize accuracy
#Confusion Matrix
confusionMatrix(knnPredict, let_test$Continent )
#Accuracy
#print(mean(knnPredict == let_test$Continent))
knnoutput<-mean(knnPredict== let_test$Continent)
return(knnoutput)
}
#RIPPER
myRipper<-function(data,seed){
#print("RIPPER")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
# fit model-Using Weka Control function of JRip to tune
fit <- JRip(Continent~., data=let_train, control = Weka_control( F =50))
# summarize the fit
summary(fit)
# make predictions
predictions <- predict(fit, let_test)
# summarize accuracy
tb<-table(predictions, let_test$Continent)
#Confusion Matrix
confusionMatrix(predictions, let_test$Continent )
#Accuracy
#print(mean(predictions== let_test$Continent))
ripsoutput<-mean(predictions== let_test$Continent)
return(ripsoutput)
}
#c45
myc45<-function(data,seed){
#print("C45")
train_test<-divideDataset(data,seed)
let_train<-train_test[[1]]
let_test<-train_test[[2]]
# fit model-Using Weka Control function of J48 to tune
fit <- J48(Continent~., data=let_train, control = Weka_control(R = TRUE, M = 9))
# summarize the fit
summary(fit)
# make predictions
c45predictions <- predict(fit, let_test)
# summarize accuracy
tb<-table(c45predictions, let_test$Continent)
#Confusion Matrix
confusionMatrix(c45predictions, let_test$Continent )
#Accuracy
#print(mean(c45predictions== let_test$Continent))
c45output<-mean(c45predictions== let_test$Continent)
return(c45output)
}
#MAIN
main<-function(){
#reading from dataset address location
Life_expectancy_dataset <- read_excel("Life_expectancy_dataset.xlsx")
View(Life_expectancy_dataset)
#Converting Continent to factor
Life_expectancy_dataset[c("Continent")]<- lapply(Life_expectancy_dataset[c("Continent")], factor)
data=Life_expectancy_dataset
randomSeeds<-list(2018,2166,2289,2322,2408)
lp<-length(randomSeeds)
results <- list(kknn=c(), kc45=c(), krip=c(), ksvn=c())
#Reporting results for 5 groups for each algorithm with help of seeds
for (i in 1:lp)
{
for(j in 1:4){
res <- myKnn(data,randomSeeds[[i]])
if(j==2){
res <- myc45(data,randomSeeds[[i]])
}
if(j==3){
res <- myRipper(data,randomSeeds[[i]])
}
if(j==4){
res <- mySvm(data,randomSeeds[[i]])
}
results[[j]] <- c(results[[j]], res)
}
}
#Calculating average accuracy and average standard deviation
avg <- c()
msd <- c()
for (i in 1:4){
temp <- c()
for (each in results[i]){
temp <- append(temp, each)
}
avg <- append(avg, mean(temp))
msd <- append(msd, sd(temp))
}
#present the results
for(i in 1:4){
umethod <- "KNN"
if(i==2){
umethod <- "C4.5"
}
if(i==3){
umethod <- "RIPPER"
}
if(i==4){
umethod <- "SVM"
}
print(sprintf("method name: %s; averaged accuracy: %.2f; accuracy standard deviation: %.3f", umethod, avg[i], msd[i]))
}
}
#Calling main executes the entire program
main()
|
library(dynwrap)
library(jsonlite)
library(readr)
library(dplyr)
library(purrr)
library(rstan)
library(coda)
library(MCMCglmm)
library(dyndimred)
# ____________________________________________________________________________
# Load data ####
data <- read_rds('/input/data.rds')
params <- jsonlite::read_json('/input/params.json')
# ____________________________________________________________________________
# Infer trajectory ####
run_fun <- function(
expression,
c("dimreds = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE", "dimreds = )"),
chains = 3,
iter = 100,
smoothing_alpha = 10,
smoothing_beta = 3,
pseudotime_mean = 0.5,
pseudotime_var = 1,
initialise_from = "random"
) {
requireNamespace("pseudogp")
requireNamespace("rstan")
requireNamespace("coda")
requireNamespace("MCMCglmm")
# perform dimreds
dimred_names <- names(dyndimred::list_dimred_methods())[as.logical(dimreds)]
spaces <- map(dimred_names, ~ dimred(expression, method = ., ndim = 2)) # only 2 dimensions per dimred are allowed
# TIMING: done with preproc
tl <- add_timing_checkpoint(NULL, "method_afterpreproc")
# fit probabilistic pseudotime model
fit <- pseudogp::fitPseudotime(
X = spaces,
smoothing_alpha = smoothing_alpha,
smoothing_beta = smoothing_beta,
iter = iter,
chains = chains,
initialise_from = initialise_from,
pseudotime_var = pseudotime_var,
pseudotime_mean = pseudotime_mean
)
# TIMING: done with method
tl <- tl %>% add_timing_checkpoint("method_aftermethod")
# extract pseudotime
pst <- rstan::extract(fit, pars = "t")$t
tmcmc <- coda::mcmc(pst)
pseudotime <- MCMCglmm::posterior.mode(tmcmc) %>%
setNames(rownames(expression))
# collect data for visualisation purposes
# code is adapted from pseudogp::posteriorCurvePlot
pst <- rstan::extract(fit, pars = "t", permute = FALSE)
lambda <- rstan::extract(fit, pars = "lambda", permute = FALSE)
sigma <- rstan::extract(fit, pars = "sigma", permute = FALSE)
# return output
wrap_prediction_model(
cell_ids = rownames(expression)
) %>% add_linear_trajectory(
pseudotime = pseudotime,
spaces = spaces,
chains = chains,
pst = pst,
lambda = lambda,
sigma = sigma
) %>% add_timings(
timings = tl %>% add_timing_checkpoint("method_afterpostproc")
)
}
args <- params[intersect(names(params), names(formals(run_fun)))]
model <- do.call(run_fun, c(args, data))
# ____________________________________________________________________________
# Save output ####
write_rds(model, '/output/output.rds') | /containers/pseudogp/run.R | no_license | ManuSetty/dynmethods | R | false | false | 2,791 | r | library(dynwrap)
library(jsonlite)
library(readr)
library(dplyr)
library(purrr)
library(rstan)
library(coda)
library(MCMCglmm)
library(dyndimred)
# ____________________________________________________________________________
# Load data ####
data <- read_rds('/input/data.rds')
params <- jsonlite::read_json('/input/params.json')
# ____________________________________________________________________________
# Infer trajectory ####
run_fun <- function(
expression,
c("dimreds = c(TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE", "dimreds = )"),
chains = 3,
iter = 100,
smoothing_alpha = 10,
smoothing_beta = 3,
pseudotime_mean = 0.5,
pseudotime_var = 1,
initialise_from = "random"
) {
requireNamespace("pseudogp")
requireNamespace("rstan")
requireNamespace("coda")
requireNamespace("MCMCglmm")
# perform dimreds
dimred_names <- names(dyndimred::list_dimred_methods())[as.logical(dimreds)]
spaces <- map(dimred_names, ~ dimred(expression, method = ., ndim = 2)) # only 2 dimensions per dimred are allowed
# TIMING: done with preproc
tl <- add_timing_checkpoint(NULL, "method_afterpreproc")
# fit probabilistic pseudotime model
fit <- pseudogp::fitPseudotime(
X = spaces,
smoothing_alpha = smoothing_alpha,
smoothing_beta = smoothing_beta,
iter = iter,
chains = chains,
initialise_from = initialise_from,
pseudotime_var = pseudotime_var,
pseudotime_mean = pseudotime_mean
)
# TIMING: done with method
tl <- tl %>% add_timing_checkpoint("method_aftermethod")
# extract pseudotime
pst <- rstan::extract(fit, pars = "t")$t
tmcmc <- coda::mcmc(pst)
pseudotime <- MCMCglmm::posterior.mode(tmcmc) %>%
setNames(rownames(expression))
# collect data for visualisation purposes
# code is adapted from pseudogp::posteriorCurvePlot
pst <- rstan::extract(fit, pars = "t", permute = FALSE)
lambda <- rstan::extract(fit, pars = "lambda", permute = FALSE)
sigma <- rstan::extract(fit, pars = "sigma", permute = FALSE)
# return output
wrap_prediction_model(
cell_ids = rownames(expression)
) %>% add_linear_trajectory(
pseudotime = pseudotime,
spaces = spaces,
chains = chains,
pst = pst,
lambda = lambda,
sigma = sigma
) %>% add_timings(
timings = tl %>% add_timing_checkpoint("method_afterpostproc")
)
}
args <- params[intersect(names(params), names(formals(run_fun)))]
model <- do.call(run_fun, c(args, data))
# ____________________________________________________________________________
# Save output ####
write_rds(model, '/output/output.rds') |
\name{VptreeIndex}
\alias{VptreeIndex-class}
\alias{VptreeIndex}
\alias{VptreeIndex_nodes}
\title{The VptreeIndex class}
\description{A class to hold the vantage point tree for exact nearest neighbor identification.}
\usage{
VptreeIndex(data, nodes, order, NAMES=NULL, distance="Euclidean")
VptreeIndex_nodes(x)
}
\arguments{
\item{data}{A numeric matrix with data points in columns and dimensions in rows.}
\item{nodes}{A list of vectors specifying the structure of the VP tree.}
\item{order}{An integer vector of length equal to \code{ncol(data)}, specifying the order of observations.}
\item{NAMES}{A character vector of sample names or \code{NULL}.}
\item{distance}{A string specifying the distance metric to use.}
\item{x}{A VptreeIndex object.}
}
\details{
The VptreeIndex class holds the indexing structure required to run the VP tree algorithm.
Users should never need to call the constructor explicitly, but should generate instances of VptreeIndex classes with \code{\link{buildVptree}}.
}
\value{
The \code{VptreeIndex} constructor will return an instance of the VptreeIndex class.
\code{VptreeIndex_nodes} will return the corresponding slots of \code{x}.
}
\author{
Aaron Lun
}
\seealso{
\code{\link{buildVptree}}
}
\examples{
example(buildVptree)
str(VptreeIndex_nodes(out))
}
| /man/VptreeIndex.Rd | no_license | lorenzonegro/BiocNeighbors | R | false | false | 1,326 | rd | \name{VptreeIndex}
\alias{VptreeIndex-class}
\alias{VptreeIndex}
\alias{VptreeIndex_nodes}
\title{The VptreeIndex class}
\description{A class to hold the vantage point tree for exact nearest neighbor identification.}
\usage{
VptreeIndex(data, nodes, order, NAMES=NULL, distance="Euclidean")
VptreeIndex_nodes(x)
}
\arguments{
\item{data}{A numeric matrix with data points in columns and dimensions in rows.}
\item{nodes}{A list of vectors specifying the structure of the VP tree.}
\item{order}{An integer vector of length equal to \code{ncol(data)}, specifying the order of observations.}
\item{NAMES}{A character vector of sample names or \code{NULL}.}
\item{distance}{A string specifying the distance metric to use.}
\item{x}{A VptreeIndex object.}
}
\details{
The VptreeIndex class holds the indexing structure required to run the VP tree algorithm.
Users should never need to call the constructor explicitly, but should generate instances of VptreeIndex classes with \code{\link{buildVptree}}.
}
\value{
The \code{VptreeIndex} constructor will return an instance of the VptreeIndex class.
\code{VptreeIndex_nodes} will return the corresponding slots of \code{x}.
}
\author{
Aaron Lun
}
\seealso{
\code{\link{buildVptree}}
}
\examples{
example(buildVptree)
str(VptreeIndex_nodes(out))
}
|
source("~/NavarraBiomed/analysis/parkinson/code/experiment.r")
dataDir <- "~/NavarraBiomed/analysis/parkinson/data/1_no_outliers"
workDir <- "~/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL"
infoDir <- "~/NavarraBiomed/analysis/parkinson/sampleInfo/"
setwd(workDir)
load("raw.Rdata")
load("BS.Rdata")
load("oxBS.Rdata")
load("~/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL/5hmC.Rdata")
targets <- read.metharray.sheet(infoDir, pattern="BS.csv$")
rownames(targets) <- targets$Sample_Name
## remove sample
cols <- cols[1:9]
hmC_naive <- hmC_naive[,cols]
targets <- targets[cols,]
colnames(hmC_naive) <- cols
clases <- targets$Sample_Group
myDMP <- champ.DMP(beta = hmC_naive, pheno=targets$Sample_Group, adjPVal = 1)
p <- 0.01
adjP <- 0.4
deltaBeta <- 0.1
dmpSel <- myDMP$PD_to_CTRL[(abs(myDMP$PD_to_CTRL$deltaBeta) > deltaBeta) & (myDMP$PD_to_CTRL$P.Value < p) ,]
dmpSel <- myDMP$PD_to_CTRL[1:100,]
sel_dmp <- rownames(dmpSel)
lista_2 <- sel_dmp
plot.new()
venn_diagram2(lista_1, lista_2, "BS", "5hmC")
p <- innerheatmap(hmC_naive[sel_dmp, order(clases)])
p
write.table(EPIC.hg19[rownames(myDMP$PD_to_CTRL),c('seqnames','start','end')], file='selected.h5mC.hg37.bed', quote=F, sep="\t", row.names=F, col.names=F)
write.table(EPIC.hg19[sel_dmp,c('seqnames','start','end')], file='selected.h5mC.PDvsCTRL.hg37.bed', quote=F, sep="\t", row.names=F, col.names=F)
write.table(rownames(myDMP$PD_to_CTRL), file="selected.5hmC.cpgs.txt", row.names=F, col.names=F)
write.table(sel_dmp, file="selected.5hmC.PDvsCTRL.cpgs.txt", row.names=F, col.names=F)
###########################################################################################################
###########################################################################################################
targetsOX <- read.metharray.sheet(dataDir, pattern="OX.csv$")
rownames(targetsOX) <- targetsOX$Sample_Name
cols <- targetsBS[colnames(beta.BS),'Sample_Name']
common <- rownames(beta.BS)[ rownames(beta.BS) %in% rownames(beta.oxBS) ]
hmC_naive <- beta.BS[common, ] - beta.oxBS[common, targetsOX[cols, 'Index'] ]
C_naive <- 1-beta.BS
mC_naive <- beta.oxBS
## remove sample
cols <- cols[1:9]
hmC_naive <- hmC_naive[,cols]
targets <- targets[cols,]
colnames(hmC_naive) <- cols
dmp <- champ.DMP(beta = hmC_naive, pheno=targets$Sample_Group, adjPVal = 1)
plot(dmp$PD_to_CTRL$deltaBeta,-log10(dmp$PD_to_CTRL$adj.P.Val), pch=20)
multidensity(hmC_naive)
hist(dmp$PD_to_CTRL$adj.P.Val)
DMP.GUI(DMP=dmp$PD_to_CTRL, beta=hmC_naive, pheno=targets$Sample_Group )
p <- 0.00001
adjP <- 0.4
deltaBeta <- 0.1
dmpSel <- dmp$PD_to_CTRL[(abs(dmp$PD_to_CTRL$deltaBeta) > deltaBeta) & (dmp$PD_to_CTRL$adj.P.Val < adjP) ,]
cpgSel <- rownames(dmpSel)
## GREAT
write.table(EPIC.manifest.hg38[cpgSel,c('seqnames','start','end')], file='selected.cpgsPDvsCTRL.bed', quote=F, sep="\t", row.names=F, col.names=F)
cols_no <- cols[1:9]
clases <- as.factor(c(rep("BS",9), rep("oxBS",9)))
colnames(M.BS) <- targetsBS$Sample_Name
colnames(M.oxBS) <- targetsOX$Sample_Name
colnames(beta.BS) <- targetsBS$Sample_Name
colnames(beta.oxBS) <- targetsOX$Sample_Name
Mtotal <- cbind(M.BS[common, cols_no], M.oxBS[common, cols_no])
beta <- cbind(beta.BS[common, cols_no], beta.oxBS[common, cols_no])
myDMP <- champ.DMP(beta = beta, pheno=clases, adjPVal = 1)
plot(myDMP[[1]]$logFC,-log10(myDMP[[1]]$adj.P.Val), pch=20)
DMP.GUI(DMP=myDMP[[1]], beta=beta, pheno=clases)
save(hmC_naive, myDMP, file = "5hmC.Rdata")
##### mCSEA
#infoDir <- "/home/alabarga/NavarraBiomed/analysis/parkinson/sampleInfo"
targetsBS <- read.metharray.sheet(infoDir, pattern="BS.csv$")
pheno <- targetsBS[,c('Sample_Group', 'Age','Sex')]
#colnames(pheno) <- c('expla', 'cov1', 'cov2')
rownames(pheno) <- targetsBS[,'Sample_Name']
pheno$Age <- as.numeric(pheno$Age)
myRank <- rankProbes(hmC_naive, pheno, refGroup = "CTRL", typeInput = "M", covariates = c('Sex'), continuous=c('Age'))
hmC_beta = 2^hmC_naive/(2^hmC_naive + 1)
myResults <- mCSEATest(myRank, hmC_beta, pheno, regionsTypes = "promoters", platform = "EPIC")
proms <- myResults[["promoters"]][-7]
attach(proms)
head(proms[order(padj),],20)
subset(proms, pval <0.001 & size > 10)
for (dmrName in rownames(subset(proms, pval <0.001 & size > 10))) {
mCSEAPlot(myResults, regionType = "promoters",
dmrName = dmrName, genes=TRUE,
transcriptAnnotation = "symbol", makePDF = FALSE)
}
detach(proms)
head(myResults[["promoters"]][,-7],20)
mCSEAPlotGSEA(myRank, myResults, regionType = "promoters", dmrName = "ACER3")
#####
cols <-colnames(hmC_naive)
cols_no <- cols[1:9]
selDMP <- myDMP[[1]][(myDMP[[1]]$logFC > 0) & (myDMP[[1]]$adj.P.Val < 0.2), ]
beta <- hmC_naive[rownames(selDMP), cols_no]
myDMP <- champ.DMP(beta = beta, pheno=targetsOX[cols_no, 'Sample_Group'], adjPVal = 1, arraytype = "EPIC")
DMP.GUI(DMP=myDMP[[1]], beta=beta, pheno=targetsOX[cols_no, 'Sample_Group'])
myDMPsel <- myDMP[[1]][myDMP[[1]]$P.Value < 0.05,]
myDMR <- champ.DMR(beta = beta, pheno=targetsOX[cols_no, 'Sample_Group'], method = "Bumphunter", B=100, minProbes=3, arraytype="EPIC")
myGSEA <- champ.GSEA(beta = hmC_naive[rownames(selDMP),], DMP=myDMPsel, DMR=NULL, arraytype = "EPIC")
myDMR <- champ.DMR(beta = hmC_naive, pheno=targetsOX[cols, 'Sample_Group'], method = "Bumphunter", B=100, adjPvalDmr=0.1, adjPvalProbe=0.8, arraytype="EPIC")
DMP.GUI(DMP=myDMP.2[[1]], beta=-hmC_naive[rownames(selDMP),], pheno=targetsOX[cols, 'Sample_Group'])
rgBS <- preprocessFunnormRedGreen(RGsetBS, sex= 1*(targetsBS$Sex == "M"))
rgOxBS <- preprocessFunnormRedGreen(RGsetOX, sex= 1*(targetsOX$Sex == "M"))
qc <- QCinfo(RGsetEx)
## BS vs OX
pD <- pData(mdat)
remove_samples <- c('201096090140_R07C01')
mdat <- mdat[,!(pD$Index %in% remove_samples)]
pD <- pData(mdat)
BS <- pD$Index[pD$Methylation == "5mC"]
oxBS <- pD$Index[pD$Methylation == "5hmC"]
M.BS <- minfi::getMeth(mdat)[,BS]
U.BS <- getUnmeth(mdat)[,BS]
M.oxBS<- getMeth(mdat)[,oxBS]
U.oxBS <- getUnmeth(mdat)[,oxBS]
N.BS <- M.BS + U.BS
N.oxBS <- M.oxBS + U.oxBS
beta.BS <- beta[, BS]
beta.oxBS <- beta[, oxBS]
colnames(beta.BS) <- pD$Sample_Name[pD$Type == 'BS']
colnames(beta.oxBS) <- pD$Sample_Name[pD$Type == 'OX']
colnames(N.BS) <- pD$Sample_Name[pD$Type == 'BS']
colnames(N.oxBS) <- pD$Sample_Name[pD$Type == 'OX']
## Naive estimates
## The naive approach to obtain 5-hmC levels is $\beta_{BS} - \beta_{OxBS}$. This approach results in negative values for the 5-hmC levels.
hmC_naive <- beta.BS - beta.oxBS
C_naive <- 1-beta.BS
mC_naive <- beta_oxBS
naive_estimation <- 0
mle_estimation <- oxBS.MLE(beta.BS, beta.oxBS, N.BS, N.oxBS)
## MLML: consistent simultaneous estimates of DNA methylation and hydroxymethylation
## https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3789553/
mlml_estimation <- MLML(T = M.BS , U = U.BS, L = U.oxBS, M = M.oxBS, exact=TRUE)
par(mfrow =c(1,3))
multidensity(mlml_estimation$mC, main="5-mC using MLML")
multidensity(mlml_estimation$hmC, main="5-hmC using MLML")
multidensity(mlml_estimation$C, main="5-C using MLML")
hmc <- mlml_estimation$hmC
hmC_naive <- M.BS - M.oxBS
pd.hmc <- pd[colnames(hmc),]
hmc <- hmC_naive
mvalues.limma <- logit2(hmc)
mvalues.limma <- hmC_naive
betas <- 2^hmC_naive / (2^hmC_naive + 1)
betas[betas <= 0] <- 10^-10
betas[betas >= 1] <- 1- 10^-10
mvalues.limma <- betas
mod.limma = model.matrix( ~ -1 + pd.hmc$Status + pd.hmc$Sex + pd.hmc$Age)
colnames(mod.limma) <- c('CTRL','PD','Sex','Age')
cont.matrix <- makeContrasts(
PDvsCTRL=PD-CTRL,
levels=mod.limma)
fit.2 <- lmFit(mvalues.limma, mod.limma)
fit.2 <- contrasts.fit(fit.2, cont.matrix)
fit.2 <- eBayes(fit.2)
top <- topTable(fit.2,coef="PDvsCTRL",sort.by="p",number=nrow(mvalues.limma))
head(top)
sel_limma <- rownames(top)[1:1000]
p <- innerheatmap(hmC_naive[, order(clases)])
p
###########################################################################3
bedfile <- read.table('/home/alabarga/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL/selected.cpgs.4hmC.PDvsCTRL.hg19.bed')
my.granges <- GRanges(seqnames = bedfile$V1, ranges = IRanges(start = bedfile$V2, end = bedfile$V3))
o <- findOverlaps(my.granges,EPIC.hg19.manifest)
original.selected.cpgs <- names(EPIC.hg19.manifest)[subjectHits(o)]
write.table(EPIC.hg19[original.selected.cpgs,c('seqnames','start','end','cpgname')], file='original.cpgs.5hmC.PDvsCTRL.hg19.bed', quote=F, sep="\t", row.names=F, col.names=F)
cpgs.1062 <- read.table('/home/alabarga/NavarraBiomed/analysis/parkinson/results/selected.cpgs.5hmC.PDvsCTRL.txt')
write.table(EPIC.hg19[cpgs.1062$V1,c('seqnames','start','end','cpgname')], file='selected.cpgs.5hmC.PDvsCTRL.1062.bed', quote=F, sep="\t", row.names=F, col.names=F)
'original.cpgs.5hmC.PDvsCTRL.hg19.bed'
| /hydroxymethylation/3_hmC_PD_vs_CTRL.R | no_license | alabarga/methylation-recipes | R | false | false | 8,740 | r | source("~/NavarraBiomed/analysis/parkinson/code/experiment.r")
dataDir <- "~/NavarraBiomed/analysis/parkinson/data/1_no_outliers"
workDir <- "~/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL"
infoDir <- "~/NavarraBiomed/analysis/parkinson/sampleInfo/"
setwd(workDir)
load("raw.Rdata")
load("BS.Rdata")
load("oxBS.Rdata")
load("~/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL/5hmC.Rdata")
targets <- read.metharray.sheet(infoDir, pattern="BS.csv$")
rownames(targets) <- targets$Sample_Name
## remove sample
cols <- cols[1:9]
hmC_naive <- hmC_naive[,cols]
targets <- targets[cols,]
colnames(hmC_naive) <- cols
clases <- targets$Sample_Group
myDMP <- champ.DMP(beta = hmC_naive, pheno=targets$Sample_Group, adjPVal = 1)
p <- 0.01
adjP <- 0.4
deltaBeta <- 0.1
dmpSel <- myDMP$PD_to_CTRL[(abs(myDMP$PD_to_CTRL$deltaBeta) > deltaBeta) & (myDMP$PD_to_CTRL$P.Value < p) ,]
dmpSel <- myDMP$PD_to_CTRL[1:100,]
sel_dmp <- rownames(dmpSel)
lista_2 <- sel_dmp
plot.new()
venn_diagram2(lista_1, lista_2, "BS", "5hmC")
p <- innerheatmap(hmC_naive[sel_dmp, order(clases)])
p
write.table(EPIC.hg19[rownames(myDMP$PD_to_CTRL),c('seqnames','start','end')], file='selected.h5mC.hg37.bed', quote=F, sep="\t", row.names=F, col.names=F)
write.table(EPIC.hg19[sel_dmp,c('seqnames','start','end')], file='selected.h5mC.PDvsCTRL.hg37.bed', quote=F, sep="\t", row.names=F, col.names=F)
write.table(rownames(myDMP$PD_to_CTRL), file="selected.5hmC.cpgs.txt", row.names=F, col.names=F)
write.table(sel_dmp, file="selected.5hmC.PDvsCTRL.cpgs.txt", row.names=F, col.names=F)
###########################################################################################################
###########################################################################################################
targetsOX <- read.metharray.sheet(dataDir, pattern="OX.csv$")
rownames(targetsOX) <- targetsOX$Sample_Name
cols <- targetsBS[colnames(beta.BS),'Sample_Name']
common <- rownames(beta.BS)[ rownames(beta.BS) %in% rownames(beta.oxBS) ]
hmC_naive <- beta.BS[common, ] - beta.oxBS[common, targetsOX[cols, 'Index'] ]
C_naive <- 1-beta.BS
mC_naive <- beta.oxBS
## remove sample
cols <- cols[1:9]
hmC_naive <- hmC_naive[,cols]
targets <- targets[cols,]
colnames(hmC_naive) <- cols
dmp <- champ.DMP(beta = hmC_naive, pheno=targets$Sample_Group, adjPVal = 1)
plot(dmp$PD_to_CTRL$deltaBeta,-log10(dmp$PD_to_CTRL$adj.P.Val), pch=20)
multidensity(hmC_naive)
hist(dmp$PD_to_CTRL$adj.P.Val)
DMP.GUI(DMP=dmp$PD_to_CTRL, beta=hmC_naive, pheno=targets$Sample_Group )
p <- 0.00001
adjP <- 0.4
deltaBeta <- 0.1
dmpSel <- dmp$PD_to_CTRL[(abs(dmp$PD_to_CTRL$deltaBeta) > deltaBeta) & (dmp$PD_to_CTRL$adj.P.Val < adjP) ,]
cpgSel <- rownames(dmpSel)
## GREAT
write.table(EPIC.manifest.hg38[cpgSel,c('seqnames','start','end')], file='selected.cpgsPDvsCTRL.bed', quote=F, sep="\t", row.names=F, col.names=F)
cols_no <- cols[1:9]
clases <- as.factor(c(rep("BS",9), rep("oxBS",9)))
colnames(M.BS) <- targetsBS$Sample_Name
colnames(M.oxBS) <- targetsOX$Sample_Name
colnames(beta.BS) <- targetsBS$Sample_Name
colnames(beta.oxBS) <- targetsOX$Sample_Name
Mtotal <- cbind(M.BS[common, cols_no], M.oxBS[common, cols_no])
beta <- cbind(beta.BS[common, cols_no], beta.oxBS[common, cols_no])
myDMP <- champ.DMP(beta = beta, pheno=clases, adjPVal = 1)
plot(myDMP[[1]]$logFC,-log10(myDMP[[1]]$adj.P.Val), pch=20)
DMP.GUI(DMP=myDMP[[1]], beta=beta, pheno=clases)
save(hmC_naive, myDMP, file = "5hmC.Rdata")
##### mCSEA
#infoDir <- "/home/alabarga/NavarraBiomed/analysis/parkinson/sampleInfo"
targetsBS <- read.metharray.sheet(infoDir, pattern="BS.csv$")
pheno <- targetsBS[,c('Sample_Group', 'Age','Sex')]
#colnames(pheno) <- c('expla', 'cov1', 'cov2')
rownames(pheno) <- targetsBS[,'Sample_Name']
pheno$Age <- as.numeric(pheno$Age)
myRank <- rankProbes(hmC_naive, pheno, refGroup = "CTRL", typeInput = "M", covariates = c('Sex'), continuous=c('Age'))
hmC_beta = 2^hmC_naive/(2^hmC_naive + 1)
myResults <- mCSEATest(myRank, hmC_beta, pheno, regionsTypes = "promoters", platform = "EPIC")
proms <- myResults[["promoters"]][-7]
attach(proms)
head(proms[order(padj),],20)
subset(proms, pval <0.001 & size > 10)
for (dmrName in rownames(subset(proms, pval <0.001 & size > 10))) {
mCSEAPlot(myResults, regionType = "promoters",
dmrName = dmrName, genes=TRUE,
transcriptAnnotation = "symbol", makePDF = FALSE)
}
detach(proms)
head(myResults[["promoters"]][,-7],20)
mCSEAPlotGSEA(myRank, myResults, regionType = "promoters", dmrName = "ACER3")
#####
cols <-colnames(hmC_naive)
cols_no <- cols[1:9]
selDMP <- myDMP[[1]][(myDMP[[1]]$logFC > 0) & (myDMP[[1]]$adj.P.Val < 0.2), ]
beta <- hmC_naive[rownames(selDMP), cols_no]
myDMP <- champ.DMP(beta = beta, pheno=targetsOX[cols_no, 'Sample_Group'], adjPVal = 1, arraytype = "EPIC")
DMP.GUI(DMP=myDMP[[1]], beta=beta, pheno=targetsOX[cols_no, 'Sample_Group'])
myDMPsel <- myDMP[[1]][myDMP[[1]]$P.Value < 0.05,]
myDMR <- champ.DMR(beta = beta, pheno=targetsOX[cols_no, 'Sample_Group'], method = "Bumphunter", B=100, minProbes=3, arraytype="EPIC")
myGSEA <- champ.GSEA(beta = hmC_naive[rownames(selDMP),], DMP=myDMPsel, DMR=NULL, arraytype = "EPIC")
myDMR <- champ.DMR(beta = hmC_naive, pheno=targetsOX[cols, 'Sample_Group'], method = "Bumphunter", B=100, adjPvalDmr=0.1, adjPvalProbe=0.8, arraytype="EPIC")
DMP.GUI(DMP=myDMP.2[[1]], beta=-hmC_naive[rownames(selDMP),], pheno=targetsOX[cols, 'Sample_Group'])
rgBS <- preprocessFunnormRedGreen(RGsetBS, sex= 1*(targetsBS$Sex == "M"))
rgOxBS <- preprocessFunnormRedGreen(RGsetOX, sex= 1*(targetsOX$Sex == "M"))
qc <- QCinfo(RGsetEx)
## BS vs OX
pD <- pData(mdat)
remove_samples <- c('201096090140_R07C01')
mdat <- mdat[,!(pD$Index %in% remove_samples)]
pD <- pData(mdat)
BS <- pD$Index[pD$Methylation == "5mC"]
oxBS <- pD$Index[pD$Methylation == "5hmC"]
M.BS <- minfi::getMeth(mdat)[,BS]
U.BS <- getUnmeth(mdat)[,BS]
M.oxBS<- getMeth(mdat)[,oxBS]
U.oxBS <- getUnmeth(mdat)[,oxBS]
N.BS <- M.BS + U.BS
N.oxBS <- M.oxBS + U.oxBS
beta.BS <- beta[, BS]
beta.oxBS <- beta[, oxBS]
colnames(beta.BS) <- pD$Sample_Name[pD$Type == 'BS']
colnames(beta.oxBS) <- pD$Sample_Name[pD$Type == 'OX']
colnames(N.BS) <- pD$Sample_Name[pD$Type == 'BS']
colnames(N.oxBS) <- pD$Sample_Name[pD$Type == 'OX']
## Naive estimates
## The naive approach to obtain 5-hmC levels is $\beta_{BS} - \beta_{OxBS}$. This approach results in negative values for the 5-hmC levels.
hmC_naive <- beta.BS - beta.oxBS
C_naive <- 1-beta.BS
mC_naive <- beta_oxBS
naive_estimation <- 0
mle_estimation <- oxBS.MLE(beta.BS, beta.oxBS, N.BS, N.oxBS)
## MLML: consistent simultaneous estimates of DNA methylation and hydroxymethylation
## https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3789553/
mlml_estimation <- MLML(T = M.BS , U = U.BS, L = U.oxBS, M = M.oxBS, exact=TRUE)
par(mfrow =c(1,3))
multidensity(mlml_estimation$mC, main="5-mC using MLML")
multidensity(mlml_estimation$hmC, main="5-hmC using MLML")
multidensity(mlml_estimation$C, main="5-C using MLML")
hmc <- mlml_estimation$hmC
hmC_naive <- M.BS - M.oxBS
pd.hmc <- pd[colnames(hmc),]
hmc <- hmC_naive
mvalues.limma <- logit2(hmc)
mvalues.limma <- hmC_naive
betas <- 2^hmC_naive / (2^hmC_naive + 1)
betas[betas <= 0] <- 10^-10
betas[betas >= 1] <- 1- 10^-10
mvalues.limma <- betas
mod.limma = model.matrix( ~ -1 + pd.hmc$Status + pd.hmc$Sex + pd.hmc$Age)
colnames(mod.limma) <- c('CTRL','PD','Sex','Age')
cont.matrix <- makeContrasts(
PDvsCTRL=PD-CTRL,
levels=mod.limma)
fit.2 <- lmFit(mvalues.limma, mod.limma)
fit.2 <- contrasts.fit(fit.2, cont.matrix)
fit.2 <- eBayes(fit.2)
top <- topTable(fit.2,coef="PDvsCTRL",sort.by="p",number=nrow(mvalues.limma))
head(top)
sel_limma <- rownames(top)[1:1000]
p <- innerheatmap(hmC_naive[, order(clases)])
p
###########################################################################3
bedfile <- read.table('/home/alabarga/NavarraBiomed/analysis/parkinson/results/3_hmC_PD_vs_CTRL/selected.cpgs.4hmC.PDvsCTRL.hg19.bed')
my.granges <- GRanges(seqnames = bedfile$V1, ranges = IRanges(start = bedfile$V2, end = bedfile$V3))
o <- findOverlaps(my.granges,EPIC.hg19.manifest)
original.selected.cpgs <- names(EPIC.hg19.manifest)[subjectHits(o)]
write.table(EPIC.hg19[original.selected.cpgs,c('seqnames','start','end','cpgname')], file='original.cpgs.5hmC.PDvsCTRL.hg19.bed', quote=F, sep="\t", row.names=F, col.names=F)
cpgs.1062 <- read.table('/home/alabarga/NavarraBiomed/analysis/parkinson/results/selected.cpgs.5hmC.PDvsCTRL.txt')
write.table(EPIC.hg19[cpgs.1062$V1,c('seqnames','start','end','cpgname')], file='selected.cpgs.5hmC.PDvsCTRL.1062.bed', quote=F, sep="\t", row.names=F, col.names=F)
'original.cpgs.5hmC.PDvsCTRL.hg19.bed'
|
#' Parameter of a Bivariate Copula for a given Kendall's Tau Value
#'
#' This function computes the parameter of a (one parameter) bivariate copula
#' for a given value of Kendall's tau.
#'
#'
#' @param family integer; single number or vector of size `n`; defines the
#' bivariate copula family: \cr `0` = independence copula \cr `1` =
#' Gaussian copula \cr `2` = Student t copula (Here only the first
#' parameter can be computed) \cr `3` = Clayton copula \cr `4` =
#' Gumbel copula \cr `5` = Frank copula \cr `6` = Joe copula \cr
#' `13` = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr
#' `14` = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr
#' `16` = rotated Joe copula (180 degrees; ``survival Joe'') \cr `23`
#' = rotated Clayton copula (90 degrees) \cr `24` = rotated Gumbel copula
#' (90 degrees) \cr `26` = rotated Joe copula (90 degrees) \cr `33` =
#' rotated Clayton copula (270 degrees) \cr `34` = rotated Gumbel copula
#' (270 degrees) \cr `36` = rotated Joe copula (270 degrees)\cr Note that
#' (with exception of the t-copula) two parameter bivariate copula families
#' cannot be used.
#' @param tau numeric; single number or vector of size `n`; Kendall's tau
#' value (vector with elements in \eqn{[-1,1]}).
#' @param check.taus logical; default is `TRUE`; if `FALSE`, checks
#' for family/tau-consistency are omitted (should only be used with care).
#'
#' @return Parameter (vector) corresponding to the bivariate copula family and
#' the value(s) of Kendall's tau (\eqn{\tau}). \tabular{ll}{ No.
#' (`family`) \tab Parameter (`par`) \cr `1, 2` \tab
#' \eqn{\sin(\tau \frac{\pi}{2})}{sin(\tau \pi/2)} \cr `3, 13` \tab
#' \eqn{2\frac{\tau}{1-\tau}}{2\tau/(1-\tau)} \cr `4, 14` \tab
#' \eqn{\frac{1}{1-\tau}}{1/(1-\tau)} \cr `5` \tab no closed form
#' expression (numerical inversion) \cr `6, 16` \tab no closed form
#' expression (numerical inversion) \cr `23, 33` \tab
#' \eqn{2\frac{\tau}{1+\tau}}{2\tau/(1+\tau)} \cr `24, 34` \tab
#' \eqn{-\frac{1}{1+\tau}}{-1/(1+\tau)} \cr `26, 36` \tab no closed form
#' expression (numerical inversion) }
#'
#' @note The number `n` can be chosen arbitrarily, but must agree across
#' arguments.
#'
#' @author Jakob Stoeber, Eike Brechmann, Tobias Erhardt
#'
#' @seealso [BiCopPar2Tau()]
#'
#' @references Joe, H. (1997). Multivariate Models and Dependence Concepts.
#' Chapman and Hall, London.
#'
#' Czado, C., U. Schepsmeier, and A. Min (2012). Maximum likelihood estimation
#' of mixed C-vines with application to exchange rates. Statistical Modelling,
#' 12(3), 229-255.
#'
#' @examples
#' ## Example 1: Gaussian copula
#' tau0 <- 0.5
#' rho <- BiCopTau2Par(family = 1, tau = tau0)
#' BiCop(1, tau = tau0)$par # alternative
#'
#' ## Example 2:
#' vtau <- seq(from = 0.1, to = 0.8, length.out = 100)
#' thetaC <- BiCopTau2Par(family = 3, tau = vtau)
#' thetaG <- BiCopTau2Par(family = 4, tau = vtau)
#' thetaF <- BiCopTau2Par(family = 5, tau = vtau)
#' thetaJ <- BiCopTau2Par(family = 6, tau = vtau)
#' plot(thetaC ~ vtau, type = "l", ylim = range(thetaF))
#' lines(thetaG ~ vtau, col = 2)
#' lines(thetaF ~ vtau, col = 3)
#' lines(thetaJ ~ vtau, col = 4)
#'
#' ## Example 3: different copula families
#' theta <- BiCopTau2Par(family = c(3,4,6), tau = c(0.4, 0.5, 0.6))
#' BiCopPar2Tau(family = c(3,4,6), par = theta)
#'
#' \dontshow{
#' # Test BiCopTau2Par
#' BiCopTau2Par(family = 0, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 1, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 2, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 3, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 4, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 5, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 6, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 13, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 14, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 16, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 23, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 24, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 26, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 33, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 34, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 36, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 41, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 51, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 61, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 71, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 41, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 51, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 61, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 71, tau = -c(0.4,0.5,0.6))
#' }
#'
BiCopTau2Par <- function(family, tau, check.taus = TRUE) {
## sanity check
if (any(family %in% setdiff(allfams[twopar], 2)))
stop("For two parameter copulas (except t) Kendall's tau cannot be inverted.")
if (check.taus && any(abs(tau) > 0.99999))
stop("some tau is too close to -1 or 1")
# fix for SemiParBIVProbit package
dims <- set_dims(family, tau = tau)
## adjust length for input vectors; stop if not matching
family <- c(family)
tau <- c(tau)
n <- max(length(family), length(tau))
if (length(family) == 1)
family <- rep(family, n)
if (length(tau) == 1)
par <- rep(tau, n)
if (!all(c(length(family), length(tau)) %in% c(1, n)))
stop("Input lenghts don't match")
## check for family/tau consistency
if (check.taus)
BiCopCheckTaus(family, tau)
## calculate the parameter
par <- vapply(seq_along(tau),
function(i) calcPar(family[i], tau[i]),
numeric(1))
par <- vapply(seq_along(par),
function(i) adjustPars(family[i], par[i], 0)[1],
numeric(1))
## return result
if (length(dims) > 1)
par <- array(par, dim = dims)
par
}
calcPar <- function(family, tau) {
## calculation of parameter(s) depending on pair-copula family
if (family == 0) {
par <- rep(0, times = length(tau))
} else if (family %in% 1:2) {
par <- sin(pi * tau/2)
} else if (family %in% c(3, 13)) {
par <- 2 * tau/(1 - tau)
} else if (family %in% c(4, 14)) {
par <- 1/(1 - tau)
} else if (family == 5) {
par <- if (tau == 0) 0 else Frank.itau.JJ(tau)
} else if (family %in% c(6, 16)) {
par <- Joe.itau.JJ(tau)
} else if (family %in% c(23, 33)) {
par <- 2 * tau/(1 + tau)
} else if (family %in% c(24, 34)) {
par <- -(1/(1 + tau))
} else if (family %in% c(26, 36)) {
par <- -Joe.itau.JJ(-tau)
} else if (family %in% c(41, 51)) {
par <- ipsA.tau2cpar(tau)
} else if (family %in% c(61, 71)) {
par <- -ipsA.tau2cpar(-tau)
}
## return result
par
}
Frank.itau.JJ <- function(tau) {
if (abs(tau) > 0.99999) return(Inf)
a <- 1
if (tau < 0) {
a <- -1
tau <- -tau
}
v <- uniroot(function(x) tau - frankTau(x),
lower = 0 + .Machine$double.eps^0.5, upper = 100,
tol = .Machine$double.eps^0.5)$root
return(a*v)
}
Joe.itau.JJ <- function(tau) {
if (abs(tau) > 0.99999) return(Inf)
if (tau < 0) {
return(1.000001)
} else {
tauF <- function(par) {
param1 <- 2/par + 1
tem <- digamma(2) - digamma(param1)
tau <- 1 + tem * 2/(2 - par)
tau[par == 2] <- 1 - trigamma(2)
tau
}
v <- uniroot(function(x) tau - tauF(x),
lower = 1,
upper = 5e5,
tol = .Machine$double.eps^0.5)$root
return(v)
}
}
ipsA.tau2cpar <- function(tau, mxiter = 20, eps = 1e-06, dstart = 0, iprint = FALSE) {
con <- log((1 - tau) * sqrt(pi)/2)
de <- dstart
if (dstart <= 0)
de <- tau + 1
iter <- 0
diff <- 1
while (iter < mxiter & max(abs(diff)) > eps) {
g <- con + lgamma(1 + de) - lgamma(de + 0.5)
gp <- digamma(1 + de) - digamma(de + 0.5)
iter <- iter + 1
diff <- g/gp
de <- de - diff
while (min(de) <= 0) {
diff <- diff/2
de <- de + diff
}
if (iprint)
cat(iter, " ", de, " ", diff, "\n")
}
if (iter >= mxiter)
cat("did not converge\n")
de
}
| /R/BiCopTau2Par.R | no_license | BenGraeler/VineCopula | R | false | false | 8,316 | r | #' Parameter of a Bivariate Copula for a given Kendall's Tau Value
#'
#' This function computes the parameter of a (one parameter) bivariate copula
#' for a given value of Kendall's tau.
#'
#'
#' @param family integer; single number or vector of size `n`; defines the
#' bivariate copula family: \cr `0` = independence copula \cr `1` =
#' Gaussian copula \cr `2` = Student t copula (Here only the first
#' parameter can be computed) \cr `3` = Clayton copula \cr `4` =
#' Gumbel copula \cr `5` = Frank copula \cr `6` = Joe copula \cr
#' `13` = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr
#' `14` = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr
#' `16` = rotated Joe copula (180 degrees; ``survival Joe'') \cr `23`
#' = rotated Clayton copula (90 degrees) \cr `24` = rotated Gumbel copula
#' (90 degrees) \cr `26` = rotated Joe copula (90 degrees) \cr `33` =
#' rotated Clayton copula (270 degrees) \cr `34` = rotated Gumbel copula
#' (270 degrees) \cr `36` = rotated Joe copula (270 degrees)\cr Note that
#' (with exception of the t-copula) two parameter bivariate copula families
#' cannot be used.
#' @param tau numeric; single number or vector of size `n`; Kendall's tau
#' value (vector with elements in \eqn{[-1,1]}).
#' @param check.taus logical; default is `TRUE`; if `FALSE`, checks
#' for family/tau-consistency are omitted (should only be used with care).
#'
#' @return Parameter (vector) corresponding to the bivariate copula family and
#' the value(s) of Kendall's tau (\eqn{\tau}). \tabular{ll}{ No.
#' (`family`) \tab Parameter (`par`) \cr `1, 2` \tab
#' \eqn{\sin(\tau \frac{\pi}{2})}{sin(\tau \pi/2)} \cr `3, 13` \tab
#' \eqn{2\frac{\tau}{1-\tau}}{2\tau/(1-\tau)} \cr `4, 14` \tab
#' \eqn{\frac{1}{1-\tau}}{1/(1-\tau)} \cr `5` \tab no closed form
#' expression (numerical inversion) \cr `6, 16` \tab no closed form
#' expression (numerical inversion) \cr `23, 33` \tab
#' \eqn{2\frac{\tau}{1+\tau}}{2\tau/(1+\tau)} \cr `24, 34` \tab
#' \eqn{-\frac{1}{1+\tau}}{-1/(1+\tau)} \cr `26, 36` \tab no closed form
#' expression (numerical inversion) }
#'
#' @note The number `n` can be chosen arbitrarily, but must agree across
#' arguments.
#'
#' @author Jakob Stoeber, Eike Brechmann, Tobias Erhardt
#'
#' @seealso [BiCopPar2Tau()]
#'
#' @references Joe, H. (1997). Multivariate Models and Dependence Concepts.
#' Chapman and Hall, London.
#'
#' Czado, C., U. Schepsmeier, and A. Min (2012). Maximum likelihood estimation
#' of mixed C-vines with application to exchange rates. Statistical Modelling,
#' 12(3), 229-255.
#'
#' @examples
#' ## Example 1: Gaussian copula
#' tau0 <- 0.5
#' rho <- BiCopTau2Par(family = 1, tau = tau0)
#' BiCop(1, tau = tau0)$par # alternative
#'
#' ## Example 2:
#' vtau <- seq(from = 0.1, to = 0.8, length.out = 100)
#' thetaC <- BiCopTau2Par(family = 3, tau = vtau)
#' thetaG <- BiCopTau2Par(family = 4, tau = vtau)
#' thetaF <- BiCopTau2Par(family = 5, tau = vtau)
#' thetaJ <- BiCopTau2Par(family = 6, tau = vtau)
#' plot(thetaC ~ vtau, type = "l", ylim = range(thetaF))
#' lines(thetaG ~ vtau, col = 2)
#' lines(thetaF ~ vtau, col = 3)
#' lines(thetaJ ~ vtau, col = 4)
#'
#' ## Example 3: different copula families
#' theta <- BiCopTau2Par(family = c(3,4,6), tau = c(0.4, 0.5, 0.6))
#' BiCopPar2Tau(family = c(3,4,6), par = theta)
#'
#' \dontshow{
#' # Test BiCopTau2Par
#' BiCopTau2Par(family = 0, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 1, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 2, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 3, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 4, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 5, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 6, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 13, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 14, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 16, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 23, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 24, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 26, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 33, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 34, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 36, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 41, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 51, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 61, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 71, tau = c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 41, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 51, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 61, tau = -c(0.4,0.5,0.6))
#' BiCopTau2Par(family = 71, tau = -c(0.4,0.5,0.6))
#' }
#'
BiCopTau2Par <- function(family, tau, check.taus = TRUE) {
## sanity check
if (any(family %in% setdiff(allfams[twopar], 2)))
stop("For two parameter copulas (except t) Kendall's tau cannot be inverted.")
if (check.taus && any(abs(tau) > 0.99999))
stop("some tau is too close to -1 or 1")
# fix for SemiParBIVProbit package
dims <- set_dims(family, tau = tau)
## adjust length for input vectors; stop if not matching
family <- c(family)
tau <- c(tau)
n <- max(length(family), length(tau))
if (length(family) == 1)
family <- rep(family, n)
if (length(tau) == 1)
par <- rep(tau, n)
if (!all(c(length(family), length(tau)) %in% c(1, n)))
stop("Input lenghts don't match")
## check for family/tau consistency
if (check.taus)
BiCopCheckTaus(family, tau)
## calculate the parameter
par <- vapply(seq_along(tau),
function(i) calcPar(family[i], tau[i]),
numeric(1))
par <- vapply(seq_along(par),
function(i) adjustPars(family[i], par[i], 0)[1],
numeric(1))
## return result
if (length(dims) > 1)
par <- array(par, dim = dims)
par
}
calcPar <- function(family, tau) {
## calculation of parameter(s) depending on pair-copula family
if (family == 0) {
par <- rep(0, times = length(tau))
} else if (family %in% 1:2) {
par <- sin(pi * tau/2)
} else if (family %in% c(3, 13)) {
par <- 2 * tau/(1 - tau)
} else if (family %in% c(4, 14)) {
par <- 1/(1 - tau)
} else if (family == 5) {
par <- if (tau == 0) 0 else Frank.itau.JJ(tau)
} else if (family %in% c(6, 16)) {
par <- Joe.itau.JJ(tau)
} else if (family %in% c(23, 33)) {
par <- 2 * tau/(1 + tau)
} else if (family %in% c(24, 34)) {
par <- -(1/(1 + tau))
} else if (family %in% c(26, 36)) {
par <- -Joe.itau.JJ(-tau)
} else if (family %in% c(41, 51)) {
par <- ipsA.tau2cpar(tau)
} else if (family %in% c(61, 71)) {
par <- -ipsA.tau2cpar(-tau)
}
## return result
par
}
Frank.itau.JJ <- function(tau) {
if (abs(tau) > 0.99999) return(Inf)
a <- 1
if (tau < 0) {
a <- -1
tau <- -tau
}
v <- uniroot(function(x) tau - frankTau(x),
lower = 0 + .Machine$double.eps^0.5, upper = 100,
tol = .Machine$double.eps^0.5)$root
return(a*v)
}
Joe.itau.JJ <- function(tau) {
if (abs(tau) > 0.99999) return(Inf)
if (tau < 0) {
return(1.000001)
} else {
tauF <- function(par) {
param1 <- 2/par + 1
tem <- digamma(2) - digamma(param1)
tau <- 1 + tem * 2/(2 - par)
tau[par == 2] <- 1 - trigamma(2)
tau
}
v <- uniroot(function(x) tau - tauF(x),
lower = 1,
upper = 5e5,
tol = .Machine$double.eps^0.5)$root
return(v)
}
}
ipsA.tau2cpar <- function(tau, mxiter = 20, eps = 1e-06, dstart = 0, iprint = FALSE) {
con <- log((1 - tau) * sqrt(pi)/2)
de <- dstart
if (dstart <= 0)
de <- tau + 1
iter <- 0
diff <- 1
while (iter < mxiter & max(abs(diff)) > eps) {
g <- con + lgamma(1 + de) - lgamma(de + 0.5)
gp <- digamma(1 + de) - digamma(de + 0.5)
iter <- iter + 1
diff <- g/gp
de <- de - diff
while (min(de) <= 0) {
diff <- diff/2
de <- de + diff
}
if (iprint)
cat(iter, " ", de, " ", diff, "\n")
}
if (iter >= mxiter)
cat("did not converge\n")
de
}
|
library(testthat)
library(GeneIdentifierUtilsR)
test_check("GeneIdentifierUtilsR")
| /tests/testthat.R | permissive | selkamand/GeneIdentifierUtilsR | R | false | false | 84 | r | library(testthat)
library(GeneIdentifierUtilsR)
test_check("GeneIdentifierUtilsR")
|
########################################
## PREDICT 422
## Charity Project - Part 4 (The Mailing List Problem)
##
## SaveYourModels.R
########################################
# In order to use your chosen models from Part 2 and Part 3 of the project to predict
# values for the Validation data and Test data for Part 4 of the project, you need a
# means to bring your chosen models into the R Environment. There are several ways to
# accomplish this (a few possibilities listed here). Note that you may want to
# alter the model naming convention to distinguish between Part 2 (Regression Problem)
# models and Part 3 (Classification Problem) models.
# - Run code from Part 2 and Part 3 in the current R session. If you leave the R
# session running for your Part 4 code, then those models will still be in memory
# as your work on Part 4.
# - Save your chosen models from Part 2 and Part 3 to .RData files. This is a means
# of saving R objects to a file that is able to be opened again in R.
#
# I'm going with the second approach.
# Save Part 2 Model:
# 0. For the sake of discussion, let's use modelC1 from SampleCodePart2.R for this
# part. While modelC1 wasn't the top model on the Part 2 test data, it was close,
# and it will be more illustrative for me to use a Lasso model here than a
# regression model.
# 1. Go to your Part 2 code and execute the commands that you need so that your
# chosen model is in memory (or you can execute ALL of your code from Part 2).
# You want the model to be trained on the Part 2 training set, as before, so you
# don't need to make any changes to your Part 2 code.
# 2. While your chosen Part 2 model is in memory, execute the following commands
# (modified for your particular situation):
# outPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422","Project - Charity Mailing","Project Data Files")
# modelPart2 = modelC1
# save(modelPart2,file=file.path(outPath,"modelPart2.RData"))
#
# - The first command defines the file path where you want to save the file. It
# can be set the same as your inPath or not. You need to define a valid file
# path for your file system.
# - The second command renames my modelC1 to be called modelPart2. I do this only
# for the reason that my chosen Part 3 model might also be modelC1. Renaming
# the model distinguishes my Part 2 model from my Part 3 model. Also, note that
# you would specify your chosen model here in place of "modelC1".
# - The third command performs the saving action, specifying which R objects to
# save (modelPart2 in this case) and the file name (with file path) to save to.
# You can save more than one object in this command (as you will see in the
# Part 3 example). For example, the Lasso model for modelC1 was built for the
# single value cvLasso$lambda.min. If modelC1 had been trained over a range of
# lambda values, then we would want to save the chosen optimal value of
# cvLasso$lambda.min along with the model.
# 3. You now have your chosen Part 2 model saved to your computer. You can close or
# terminate your R session, and you will still be able to come back later and
# load the model from the .RData file. You can go to the file location that you
# specified above, and you should see a file named modelPart2.RData.
# Save Part 3 Model:
# 0. I'll use modelA1 from SampleCodePart3.R for this part. Based on the results
# obtained in Part 3, modelA1 had the highest TP rate on the Test set.
# While the "accuracy" for modelA1 wasn't as high as for some of the other models,
# it is really the TP rate that will drive the mailing list selection.
# 1. Go to your Part 3 code and execute the commands that you need so that your
# chosen model is in memory. It is a good idea to start with a fresh R session
# here. That way you don't have any unintentional carry-over from your Part 2
# code.
# 2. While your chosen Part 3 model is in memory, execute the following commands
# (modified for your particular situation):
# outPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422","Project - Charity Mailing","Project Data Files")
# modelPart3 = modelA1
# optThreshPart3 = threshA1
# save(modelPart3,optThreshPart3,file=file.path(outPath,"modelPart3.RData"))
#
# - When saving to a .RData file, you can list as many objects and values as
# you want. Follow those objects with the file specification. R will bundle
# everything up and save it to one .RData file. For example, you could save
# multiple models and the data used to train the models as follows:
# save(classData2,modelA1,optThreshA1,modelB1,optThreshB1,file=file.path(outPath,"exampleSave.RData"))
# 3. You now have your chosen Part 3 model saved to your computer. You can close or
# terminate your R session, and you will still be able to come back later and
# load the model from the .RData file. You can go to the file location that you
# specified above, and you should see a file named modelPart3.RData.
# Once you have saved your Part 2 and Part 3 models, I recommend you start with a
# fresh R session again. To load the models from Part 2 and Part 3, we have the
# following commands:
# Specify the file path where your .RData files are located
modelPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422",
"Project - Charity Mailing","Project Data Files")
# Use the load command to load objects from your .RData files.
load(file.path(modelPath,"modelPart2.RData"))
load(file.path(modelPath,"modelPart3.RData"))
# You should now see modelPart2 and modelPart3 (plus any hyper-parameters that you
# saved) in your R Environment (i.e. in memory). | /SampleCode SaveYourModels.R | no_license | bingdong1/Machine-Learning-in-R | R | false | false | 5,961 | r | ########################################
## PREDICT 422
## Charity Project - Part 4 (The Mailing List Problem)
##
## SaveYourModels.R
########################################
# In order to use your chosen models from Part 2 and Part 3 of the project to predict
# values for the Validation data and Test data for Part 4 of the project, you need a
# means to bring your chosen models into the R Environment. There are several ways to
# accomplish this (a few possibilities listed here). Note that you may want to
# alter the model naming convention to distinguish between Part 2 (Regression Problem)
# models and Part 3 (Classification Problem) models.
# - Run code from Part 2 and Part 3 in the current R session. If you leave the R
# session running for your Part 4 code, then those models will still be in memory
# as your work on Part 4.
# - Save your chosen models from Part 2 and Part 3 to .RData files. This is a means
# of saving R objects to a file that is able to be opened again in R.
#
# I'm going with the second approach.
# Save Part 2 Model:
# 0. For the sake of discussion, let's use modelC1 from SampleCodePart2.R for this
# part. While modelC1 wasn't the top model on the Part 2 test data, it was close,
# and it will be more illustrative for me to use a Lasso model here than a
# regression model.
# 1. Go to your Part 2 code and execute the commands that you need so that your
# chosen model is in memory (or you can execute ALL of your code from Part 2).
# You want the model to be trained on the Part 2 training set, as before, so you
# don't need to make any changes to your Part 2 code.
# 2. While your chosen Part 2 model is in memory, execute the following commands
# (modified for your particular situation):
# outPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422","Project - Charity Mailing","Project Data Files")
# modelPart2 = modelC1
# save(modelPart2,file=file.path(outPath,"modelPart2.RData"))
#
# - The first command defines the file path where you want to save the file. It
# can be set the same as your inPath or not. You need to define a valid file
# path for your file system.
# - The second command renames my modelC1 to be called modelPart2. I do this only
# for the reason that my chosen Part 3 model might also be modelC1. Renaming
# the model distinguishes my Part 2 model from my Part 3 model. Also, note that
# you would specify your chosen model here in place of "modelC1".
# - The third command performs the saving action, specifying which R objects to
# save (modelPart2 in this case) and the file name (with file path) to save to.
# You can save more than one object in this command (as you will see in the
# Part 3 example). For example, the Lasso model for modelC1 was built for the
# single value cvLasso$lambda.min. If modelC1 had been trained over a range of
# lambda values, then we would want to save the chosen optimal value of
# cvLasso$lambda.min along with the model.
# 3. You now have your chosen Part 2 model saved to your computer. You can close or
# terminate your R session, and you will still be able to come back later and
# load the model from the .RData file. You can go to the file location that you
# specified above, and you should see a file named modelPart2.RData.
# Save Part 3 Model:
# 0. I'll use modelA1 from SampleCodePart3.R for this part. Based on the results
# obtained in Part 3, modelA1 had the highest TP rate on the Test set.
# While the "accuracy" for modelA1 wasn't as high as for some of the other models,
# it is really the TP rate that will drive the mailing list selection.
# 1. Go to your Part 3 code and execute the commands that you need so that your
# chosen model is in memory. It is a good idea to start with a fresh R session
# here. That way you don't have any unintentional carry-over from your Part 2
# code.
# 2. While your chosen Part 3 model is in memory, execute the following commands
# (modified for your particular situation):
# outPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422","Project - Charity Mailing","Project Data Files")
# modelPart3 = modelA1
# optThreshPart3 = threshA1
# save(modelPart3,optThreshPart3,file=file.path(outPath,"modelPart3.RData"))
#
# - When saving to a .RData file, you can list as many objects and values as
# you want. Follow those objects with the file specification. R will bundle
# everything up and save it to one .RData file. For example, you could save
# multiple models and the data used to train the models as follows:
# save(classData2,modelA1,optThreshA1,modelB1,optThreshB1,file=file.path(outPath,"exampleSave.RData"))
# 3. You now have your chosen Part 3 model saved to your computer. You can close or
# terminate your R session, and you will still be able to come back later and
# load the model from the .RData file. You can go to the file location that you
# specified above, and you should see a file named modelPart3.RData.
# Once you have saved your Part 2 and Part 3 models, I recommend you start with a
# fresh R session again. To load the models from Part 2 and Part 3, we have the
# following commands:
# Specify the file path where your .RData files are located
modelPath = file.path("/Users","JLW","Documents","Northwestern MSPA","PREDICT 422",
"Project - Charity Mailing","Project Data Files")
# Use the load command to load objects from your .RData files.
load(file.path(modelPath,"modelPart2.RData"))
load(file.path(modelPath,"modelPart3.RData"))
# You should now see modelPart2 and modelPart3 (plus any hyper-parameters that you
# saved) in your R Environment (i.e. in memory). |
library(Biobase)
library(affy)
#load("data/minn2007.RData")
#load("data/nki2002.RData")
#load("data/sotiriou2006.RData")
#load("data/miller2005.RData")
#load("data/transbig2006affy.RData")
#load("data/schmidt2008.RData")
##> intersect(colnames(pData(vdx)),intersect(colnames(pData(upp)),intersect(colnames(pData(unt)),intersect(colnames(pData(transbig)),intersect(colnames(pData(mainz)),colnames(pData(nki)))))))
## [1] "samplename" "series" "dataset" "filename"
## [5] "id" "e.dmfs" "t.dmfs" "node"
## [9] "t.rfs" "e.rfs" "er" "size"
##[13] "age" "grade" "treatment" "tissue"
##[17] "t.os" "e.os" "pgr" "her2"
##[21] "brca.mutation"
## set the rank of columnnames with the names from the intersection of all 6 eSets
columnRank <- c("samplename","dataset","series","id","filename","size","age","er","grade","pgr","her2","brca.mutation","e.dmfs","t.dmfs","node","t.rfs","e.rfs","treatment","tissue","t.os","e.os")
## creation of the first eSet
load("data/minn2007.RData")
demo$T <- demo$size
demo <- demo[,!is.element(colnames(demo), "size.cat")]
demo <- demo[,!is.element(colnames(demo), "primary.bc.unt")]
demo$size <- c(rep(NA,length(demo$size)))
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "VDX",
lab = "Veridex LLC, a Johnson & Johnson Company, San Diego, CA, USA. Department of Radiaton and Cellular Oncology, Center for Molecular Oncology, and Ludwig Center for Metastasis Research, University of Chicago, Chicago, IL, USA",
contact = "Dr John Foekens, Erasmus MC Josephine Nefkens Institute, Netherlands: <j.foekens@erasmusmc.nl>, Joan Massague, <j-massagie@ski.mskcc.org>",
title = "Gene-expression profiles to predict distant metastasis of kymph-node-negative primary breast cancer. Lung metastasis genes coule breast tumor size and metastatic spread.",
abstract = "Wang et al. 2005 and Minn et al. 2007. The association between large tumor size and metastatic risk in a majority of clinical cancers has led to questions as to whether these observations are causally related or whether one is simply a marker for the other. This is partly due to an uncertainty about how metastasis-promoting gene expression changes can arise in primary tumors. We investigated this question through the analysis of a previously defined 'lung metastasis gene-expression signa- ture'(LMS) that mediates experimental breast cancer metastasis selectively to the lung and is expressed by primary human breast cancer with a high risk for developing lung metastasis. Experimentally, we demonstrate that the LMS promotes primary tumor growth that enriches for LMS cells, and it allows for intravasation after reaching a critical tumor size. Clinically, this corresponds to LMS tumors being larger at diagnosis compared with LMS tumors and to a marked rise in the incidence of metastasis after LMS tumors reach 2 cm. Patients with LMS-expressing primary tumors selectively fail in the lung compared with the bone or other visceral sites and have a worse overall survival. The mechanistic linkage between metastasis gene expression, accelerated tumor growth, and likelihood of metastatic recurrence provided by the LMS may help to explain observations of prognostic gene signatures in primary cancer and how tumor growth can both lead to metastasis and be a marker for cells destined to metastasize.",
url = "GEO accession number: GSE2034 & GSE5327 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE2034>, <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE5327>",
pubMedIds = "17420468")
arrayT <- "hgu133a"
#arraytype is an string
vdx <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the second eSet
load("data/nki2002.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "NKI",
lab = "Divisions of Diagnostic Oncology, Radiotherapy and Molecular Carvinogenesis and Center for Biomedical Genetics, The Netherland Cancer Institute, Amsterdam, The Netherlands.",
contact = "Stephen H. Friend <stephen_friend@merck.com>",
title = "Gene expression profiling predicts clinical outcome of breast cancer. A gene-expresion signature as a predictor of survival in breast cancer.",
abstract = "Van de Vijver et al. 2002 and Laura J. van't Veer 2002. Series of 295 concescutive patients with primary breast carcinomas as having a gene-expression signature associated with either a poor prognosis or a good prognosis. All patients had stage I or II breast cancer and were younger than 53 years old; 151 had lymph-node-negaitve disease, and 144 had lymph-node-positive disease.",
url = "http://www.rii.com/publications/2002/vantveer.html")
arrayT <- "rosetta"
#arraytype is an string
nki <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the third eSet
load("data/sotiriou2006.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "UNT",
lab = "Functional Genomics and Translational Research Unit, Jules Bordet Institute, Universite Libre de Bruxelles, Brussels, Belgium.",
contact = "Christos Sotiriou <christos.sotiriou@bordet.be>",
title = "Gene expression profiling in breast cancer: understanding the molecular basis of histologic grade to improve prognosis.",
abstract = "Sotiriou et al. 2006. Background: Histologic grade in breast cancer provides clinically important prognostic information. However, 30%-60% of tumors are classified as histologic grade 2. This grade is associated with an intermediate risk of recurrence and is thus not informative for clinical decision making. We examined whether histologic grade was associated with gene expression profiles of breast cancers and whether such profiles could be used to improve histologic grading. Methods: We analyzed microarray data from 189 invasive breast carcinomas and from three published gene expression datasets from breast carcinomas. We identified differentially expressed genes in a training set of 64 estrogen receptor (ER)-positive tumor samples by comparing expression profiles between histologic grade 3 tumors and histologic grade 1 tumors and used the expression of these genes to define the gene expression grade index. Data from 597 independent tumors were used to evaluate the association between relapse-free survival and the gene expression grade index in a Kaplan-Meier analysis. All statistical tests were two-sided. Results: We identified 97 genes in our training set that were associated with histologic grade; most of these genes were involved in cell cycle regulation and proliferation. In validation datasets, the gene expression grade index was strongly associated with histologic grade 1 and 3 status; however, among histologic grade 2 tumors, the index spanned the values for histologic grade 1-3 tumors. Among patients with histologic grade 2 tumors, a high gene expression grade index was associated with a higher risk of recurrence than a low gene expression grade index (hazard ratio = 3.61, 95% confidence interval = 2.25 to 5.78; P < .001, log-rank test). Conclusions: Gene expression grade index appeared to reclassify patients with histologic grade 2 tumors into two groups with high versus low risks of recurrence. This approach may improve the accuracy of tumor grading and thus its prognostic value.",
pubMedIds = "16478745",
url = "GEO accession number: GEO GSE2990 & GSE6532 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE2990>, <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE6532>")
arrayT <- "hgu133ab"
#arraytype is an string
unt <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the forth eSet
load("data/miller2005.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,!is.element(colnames(demo), "treatment2")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "UPP",
lab = "Genome Institute of Singapore, Singapore and Department of Oncology and Pathology, Radiumhemmet, Karolinksa Institue and Hospital, Stockholm, Sweden",
contact = "Lance D. Miller <millerl@gis.a-star.edu.sg> and Edison T. Liu <luie@gis.a-star.edu.sg>",
title = "An expression signature for p53 status in human breast caner predicts mutation status, transcriptional effects, and patient survival.",
abstract = "Miller et al. 2005. Perturbations of the p53 pathway are associated with more aggressive and therapeutically refractory tumors. However, molecular assessment of p53 status, by using sequence analysis and immunohistochemistry, are incomplete assessors of p53 functional effects. We posited that the transcriptional fingerprint is a more definitive downstream indicator of p53 function. Herein, we analyzed transcript profiles of 251 p53-sequenced primary breast tumors and identified a clinically embedded 32-gene expression signature that distinguishes p53-mutant and wild-type tumors of different histologies and outperforms sequence-based assessments of p53 in predicting prognosis and therapeutic response. Moreover, the p53 signature identified a subset of aggressive tumors absent of sequence mutations in p53 yet exhibiting expression characteristics consistent with p53 deficiency because of attenuated p53 transcript levels. Our results show the primary importance of p53 functional status in predicting clinical breast cancer behavior.",
url = "GEO accession number: GSE3494 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE3494>",
pubMedIds = "16141321")
arrayT <- "hgu133ab"
## create eSet
upp <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the fifth eSet
load("data/transbig2006affy.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,!is.element(colnames(demo), "veridex_score")]
demo <- demo[,!is.element(colnames(demo), "agendia_score")]
demo <- demo[,!is.element(colnames(demo), "Date_of_Birth")]
demo <- demo[,!is.element(colnames(demo), "Date_of_Diagnosis")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "TRANSBIG",
lab = "Institute Jules Bordet, Universite Libre de Bruxelles, Brussels, Belgium",
contact = "TRANSBIG Consortium <transbig@bordet.be>",
title = "Strong Time dependence of the 76-gene prognostic signature for node-negative breast cancer patients int the TRANSBIG multicenter independent validation series",
abstract = "Desmedt et al. 2007. Recently a 76-gene prognostic signature able to predict distant metastases in lymph node-negative (N-) breast cancer patients was reported. The aims of this study conducted by TRANSBIG were to independently validate these results and to compare the outcome with clinical risk assessment. Materials and Methods: Gene expression profiling of frozen samples from 198 N- systemically untreated patients was performed at the Bordet Institute, blinded to clinical data and independent of Veridex. Genomic risk was defined by Veridex, blinded to clinical data. Survival analyses, done by an independent statistician, were performed with the genomic risk and adjusted for the clinical risk, defined by Adjuvant!Online. Results: The actual 5- and 10-year time to distant metastasis (TDM) were 98% (88%-100%) and 94% (83%-98%) respectively for the good profile group and 76% (68%- 82%) and 73% (65%-79%) for the poor profile group. The actual 5- and 10-year overall survival (OS) were 98% (88%-100%) and 87% (73%-94%) respectively for the good profile group and 84% (77%-89%) and 72% (63%-78%) for the poor profile group. We observed a strong time-dependency of this signature, leading to an adjusted HR of 13.58 (1.85-99.63) and 8.20 (1.10-60.90) at 5 years, and 5.11 (1.57-16.67) and 2.55 (1.07-6.10) at 10 years for TDM and OS respectively. Conclusion: This independent validation confirmed the performance of the 76-gene signature and adds to the growing evidence that gene expression signatures are of clinical relevance, especially for identifying patients at high risk of early distant metastases.",
url = "GEO accession number: GSE7390 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE7390>",
pubMedIds = "17545524")
arrayT <- "hgu133a"
## create eSet
transbig <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the sixth eSet
load("data/schmidt2008.RData")
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "MAINZ",
lab = "Department of Obstetrics and Gynecology, Medical School, Johannes Gutenberg University, Mainz, Germany",
contact = "Mathias Gehrmann <mathias.gehrmann@siemens.com>",
title = "The humoral immune system has a key prognostic impact in node-negative breast cancer.",
abstract = "Schmidt et al. 2008. Background: Estrogen receptor (ER) expression and proliferative activity are established prognostic factors in breast cancer. In a search for additional prognostic motifs, we analyzed the gene expression patterns of 200 tumors of patients who were not treated by systemic therapy after surgery using a discovery approach. After performing hierarchical cluster analysis, we identified coregulated genes related to the biological process of proliferation, steroid hormone receptor expression, as well as B-cell and T-cell infiltration. We calculated metagenes as a surrogate for all genes contained within a particular cluster and visualized the relative expression in relation to time to metastasis with principal component analysis. Distinct pat- terns led to the hypothesis of a prognostic role of the immune system in tumors with high expression of proliferation- associated genes. In multivariate Cox regression analysis, the proliferation metagene showed a significant association with metastasis-free survival of the whole discovery cohort [hazard ratio (HR), 2.20; 95% confidence interval (95% CI), 1.40-3.46]. The B-cell metagene showed additional indepen- dent prognostic information in carcinomas with high prolif- erative activity (HR, 0.66; 95% CI, 0.46-0.97). A prognostic influence of the B-cell metagene was independently confirmed by multivariate analysis in a first validation cohort enriched for high-grade tumors (n = 286; HR, 0.78; 95% CI, 0.62-0.98) and a second validation cohort enriched for younger patients (n = 302; HR, 0.83; 95% CI, 0.7-0.97). Thus, we could show in three cohorts of untreated, node-negative breast cancer patients that the humoral immune system plays a pivotal role in metastasis-free survival of carcinomas of the breast.",
url = "GEO accession number: GSE11121 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE11121>",
pubMedIds = "18593943")
arrayT <- "hgu133a"
## create eSet
mainz <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
##package.skeleton("datasets")
| /R/make-6-eSets.r | no_license | mschroed/oncosurf | R | false | false | 17,878 | r | library(Biobase)
library(affy)
#load("data/minn2007.RData")
#load("data/nki2002.RData")
#load("data/sotiriou2006.RData")
#load("data/miller2005.RData")
#load("data/transbig2006affy.RData")
#load("data/schmidt2008.RData")
##> intersect(colnames(pData(vdx)),intersect(colnames(pData(upp)),intersect(colnames(pData(unt)),intersect(colnames(pData(transbig)),intersect(colnames(pData(mainz)),colnames(pData(nki)))))))
## [1] "samplename" "series" "dataset" "filename"
## [5] "id" "e.dmfs" "t.dmfs" "node"
## [9] "t.rfs" "e.rfs" "er" "size"
##[13] "age" "grade" "treatment" "tissue"
##[17] "t.os" "e.os" "pgr" "her2"
##[21] "brca.mutation"
## set the rank of columnnames with the names from the intersection of all 6 eSets
columnRank <- c("samplename","dataset","series","id","filename","size","age","er","grade","pgr","her2","brca.mutation","e.dmfs","t.dmfs","node","t.rfs","e.rfs","treatment","tissue","t.os","e.os")
## creation of the first eSet
load("data/minn2007.RData")
demo$T <- demo$size
demo <- demo[,!is.element(colnames(demo), "size.cat")]
demo <- demo[,!is.element(colnames(demo), "primary.bc.unt")]
demo$size <- c(rep(NA,length(demo$size)))
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "VDX",
lab = "Veridex LLC, a Johnson & Johnson Company, San Diego, CA, USA. Department of Radiaton and Cellular Oncology, Center for Molecular Oncology, and Ludwig Center for Metastasis Research, University of Chicago, Chicago, IL, USA",
contact = "Dr John Foekens, Erasmus MC Josephine Nefkens Institute, Netherlands: <j.foekens@erasmusmc.nl>, Joan Massague, <j-massagie@ski.mskcc.org>",
title = "Gene-expression profiles to predict distant metastasis of kymph-node-negative primary breast cancer. Lung metastasis genes coule breast tumor size and metastatic spread.",
abstract = "Wang et al. 2005 and Minn et al. 2007. The association between large tumor size and metastatic risk in a majority of clinical cancers has led to questions as to whether these observations are causally related or whether one is simply a marker for the other. This is partly due to an uncertainty about how metastasis-promoting gene expression changes can arise in primary tumors. We investigated this question through the analysis of a previously defined 'lung metastasis gene-expression signa- ture'(LMS) that mediates experimental breast cancer metastasis selectively to the lung and is expressed by primary human breast cancer with a high risk for developing lung metastasis. Experimentally, we demonstrate that the LMS promotes primary tumor growth that enriches for LMS cells, and it allows for intravasation after reaching a critical tumor size. Clinically, this corresponds to LMS tumors being larger at diagnosis compared with LMS tumors and to a marked rise in the incidence of metastasis after LMS tumors reach 2 cm. Patients with LMS-expressing primary tumors selectively fail in the lung compared with the bone or other visceral sites and have a worse overall survival. The mechanistic linkage between metastasis gene expression, accelerated tumor growth, and likelihood of metastatic recurrence provided by the LMS may help to explain observations of prognostic gene signatures in primary cancer and how tumor growth can both lead to metastasis and be a marker for cells destined to metastasize.",
url = "GEO accession number: GSE2034 & GSE5327 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE2034>, <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE5327>",
pubMedIds = "17420468")
arrayT <- "hgu133a"
#arraytype is an string
vdx <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the second eSet
load("data/nki2002.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "NKI",
lab = "Divisions of Diagnostic Oncology, Radiotherapy and Molecular Carvinogenesis and Center for Biomedical Genetics, The Netherland Cancer Institute, Amsterdam, The Netherlands.",
contact = "Stephen H. Friend <stephen_friend@merck.com>",
title = "Gene expression profiling predicts clinical outcome of breast cancer. A gene-expresion signature as a predictor of survival in breast cancer.",
abstract = "Van de Vijver et al. 2002 and Laura J. van't Veer 2002. Series of 295 concescutive patients with primary breast carcinomas as having a gene-expression signature associated with either a poor prognosis or a good prognosis. All patients had stage I or II breast cancer and were younger than 53 years old; 151 had lymph-node-negaitve disease, and 144 had lymph-node-positive disease.",
url = "http://www.rii.com/publications/2002/vantveer.html")
arrayT <- "rosetta"
#arraytype is an string
nki <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the third eSet
load("data/sotiriou2006.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "UNT",
lab = "Functional Genomics and Translational Research Unit, Jules Bordet Institute, Universite Libre de Bruxelles, Brussels, Belgium.",
contact = "Christos Sotiriou <christos.sotiriou@bordet.be>",
title = "Gene expression profiling in breast cancer: understanding the molecular basis of histologic grade to improve prognosis.",
abstract = "Sotiriou et al. 2006. Background: Histologic grade in breast cancer provides clinically important prognostic information. However, 30%-60% of tumors are classified as histologic grade 2. This grade is associated with an intermediate risk of recurrence and is thus not informative for clinical decision making. We examined whether histologic grade was associated with gene expression profiles of breast cancers and whether such profiles could be used to improve histologic grading. Methods: We analyzed microarray data from 189 invasive breast carcinomas and from three published gene expression datasets from breast carcinomas. We identified differentially expressed genes in a training set of 64 estrogen receptor (ER)-positive tumor samples by comparing expression profiles between histologic grade 3 tumors and histologic grade 1 tumors and used the expression of these genes to define the gene expression grade index. Data from 597 independent tumors were used to evaluate the association between relapse-free survival and the gene expression grade index in a Kaplan-Meier analysis. All statistical tests were two-sided. Results: We identified 97 genes in our training set that were associated with histologic grade; most of these genes were involved in cell cycle regulation and proliferation. In validation datasets, the gene expression grade index was strongly associated with histologic grade 1 and 3 status; however, among histologic grade 2 tumors, the index spanned the values for histologic grade 1-3 tumors. Among patients with histologic grade 2 tumors, a high gene expression grade index was associated with a higher risk of recurrence than a low gene expression grade index (hazard ratio = 3.61, 95% confidence interval = 2.25 to 5.78; P < .001, log-rank test). Conclusions: Gene expression grade index appeared to reclassify patients with histologic grade 2 tumors into two groups with high versus low risks of recurrence. This approach may improve the accuracy of tumor grading and thus its prognostic value.",
pubMedIds = "16478745",
url = "GEO accession number: GEO GSE2990 & GSE6532 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE2990>, <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE6532>")
arrayT <- "hgu133ab"
#arraytype is an string
unt <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the forth eSet
load("data/miller2005.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,!is.element(colnames(demo), "treatment2")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "UPP",
lab = "Genome Institute of Singapore, Singapore and Department of Oncology and Pathology, Radiumhemmet, Karolinksa Institue and Hospital, Stockholm, Sweden",
contact = "Lance D. Miller <millerl@gis.a-star.edu.sg> and Edison T. Liu <luie@gis.a-star.edu.sg>",
title = "An expression signature for p53 status in human breast caner predicts mutation status, transcriptional effects, and patient survival.",
abstract = "Miller et al. 2005. Perturbations of the p53 pathway are associated with more aggressive and therapeutically refractory tumors. However, molecular assessment of p53 status, by using sequence analysis and immunohistochemistry, are incomplete assessors of p53 functional effects. We posited that the transcriptional fingerprint is a more definitive downstream indicator of p53 function. Herein, we analyzed transcript profiles of 251 p53-sequenced primary breast tumors and identified a clinically embedded 32-gene expression signature that distinguishes p53-mutant and wild-type tumors of different histologies and outperforms sequence-based assessments of p53 in predicting prognosis and therapeutic response. Moreover, the p53 signature identified a subset of aggressive tumors absent of sequence mutations in p53 yet exhibiting expression characteristics consistent with p53 deficiency because of attenuated p53 transcript levels. Our results show the primary importance of p53 functional status in predicting clinical breast cancer behavior.",
url = "GEO accession number: GSE3494 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE3494>",
pubMedIds = "16141321")
arrayT <- "hgu133ab"
## create eSet
upp <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the fifth eSet
load("data/transbig2006affy.RData")
demo <- demo[,!is.element(colnames(demo), "gg")]
demo <- demo[,!is.element(colnames(demo), "veridex_score")]
demo <- demo[,!is.element(colnames(demo), "agendia_score")]
demo <- demo[,!is.element(colnames(demo), "Date_of_Birth")]
demo <- demo[,!is.element(colnames(demo), "Date_of_Diagnosis")]
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "TRANSBIG",
lab = "Institute Jules Bordet, Universite Libre de Bruxelles, Brussels, Belgium",
contact = "TRANSBIG Consortium <transbig@bordet.be>",
title = "Strong Time dependence of the 76-gene prognostic signature for node-negative breast cancer patients int the TRANSBIG multicenter independent validation series",
abstract = "Desmedt et al. 2007. Recently a 76-gene prognostic signature able to predict distant metastases in lymph node-negative (N-) breast cancer patients was reported. The aims of this study conducted by TRANSBIG were to independently validate these results and to compare the outcome with clinical risk assessment. Materials and Methods: Gene expression profiling of frozen samples from 198 N- systemically untreated patients was performed at the Bordet Institute, blinded to clinical data and independent of Veridex. Genomic risk was defined by Veridex, blinded to clinical data. Survival analyses, done by an independent statistician, were performed with the genomic risk and adjusted for the clinical risk, defined by Adjuvant!Online. Results: The actual 5- and 10-year time to distant metastasis (TDM) were 98% (88%-100%) and 94% (83%-98%) respectively for the good profile group and 76% (68%- 82%) and 73% (65%-79%) for the poor profile group. The actual 5- and 10-year overall survival (OS) were 98% (88%-100%) and 87% (73%-94%) respectively for the good profile group and 84% (77%-89%) and 72% (63%-78%) for the poor profile group. We observed a strong time-dependency of this signature, leading to an adjusted HR of 13.58 (1.85-99.63) and 8.20 (1.10-60.90) at 5 years, and 5.11 (1.57-16.67) and 2.55 (1.07-6.10) at 10 years for TDM and OS respectively. Conclusion: This independent validation confirmed the performance of the 76-gene signature and adds to the growing evidence that gene expression signatures are of clinical relevance, especially for identifying patients at high risk of early distant metastases.",
url = "GEO accession number: GSE7390 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE7390>",
pubMedIds = "17545524")
arrayT <- "hgu133a"
## create eSet
transbig <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
## creation of the sixth eSet
load("data/schmidt2008.RData")
demo <- demo[,columnRank]
#annt=phenotyp daten
metadata<-data.frame(labelDescription=colnames(demo), row.names=colnames(demo))
phenoD<-new("AnnotatedDataFrame", data=demo, varMetadata=metadata)
## probe annotations
metadata<-data.frame(labelDescription=colnames(annot), row.names=colnames(annot))
featureD <- new("AnnotatedDataFrame", data=annot, varMetadata=metadata)
experimentD <- new("MIAME",
name = "MAINZ",
lab = "Department of Obstetrics and Gynecology, Medical School, Johannes Gutenberg University, Mainz, Germany",
contact = "Mathias Gehrmann <mathias.gehrmann@siemens.com>",
title = "The humoral immune system has a key prognostic impact in node-negative breast cancer.",
abstract = "Schmidt et al. 2008. Background: Estrogen receptor (ER) expression and proliferative activity are established prognostic factors in breast cancer. In a search for additional prognostic motifs, we analyzed the gene expression patterns of 200 tumors of patients who were not treated by systemic therapy after surgery using a discovery approach. After performing hierarchical cluster analysis, we identified coregulated genes related to the biological process of proliferation, steroid hormone receptor expression, as well as B-cell and T-cell infiltration. We calculated metagenes as a surrogate for all genes contained within a particular cluster and visualized the relative expression in relation to time to metastasis with principal component analysis. Distinct pat- terns led to the hypothesis of a prognostic role of the immune system in tumors with high expression of proliferation- associated genes. In multivariate Cox regression analysis, the proliferation metagene showed a significant association with metastasis-free survival of the whole discovery cohort [hazard ratio (HR), 2.20; 95% confidence interval (95% CI), 1.40-3.46]. The B-cell metagene showed additional indepen- dent prognostic information in carcinomas with high prolif- erative activity (HR, 0.66; 95% CI, 0.46-0.97). A prognostic influence of the B-cell metagene was independently confirmed by multivariate analysis in a first validation cohort enriched for high-grade tumors (n = 286; HR, 0.78; 95% CI, 0.62-0.98) and a second validation cohort enriched for younger patients (n = 302; HR, 0.83; 95% CI, 0.7-0.97). Thus, we could show in three cohorts of untreated, node-negative breast cancer patients that the humoral immune system plays a pivotal role in metastasis-free survival of carcinomas of the breast.",
url = "GEO accession number: GSE11121 <http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE11121>",
pubMedIds = "18593943")
arrayT <- "hgu133a"
## create eSet
mainz <- new("ExpressionSet",
exprs=t(data),
phenoData=phenoD,
featureData=featureD,
annotation=arrayT,
experimentData=experimentD)
##package.skeleton("datasets")
|
# add_try <- function(x,y){
# x+y
# }
# above10 <-function(x){
# use <- x>10
# x[use]
# }
# x<- 1:10
# if(x>5){
# x<-0
# }
# print(x)
# f <- function(x){
# g<- function(y){
# y+z
# }
# z <- 4
# x +g(x)
# }
x <-5
y <- if(x<3){
NA
}else{
10
}
print(y) | /try_function.R | no_license | niranjan272/R-Codes | R | false | false | 277 | r | # add_try <- function(x,y){
# x+y
# }
# above10 <-function(x){
# use <- x>10
# x[use]
# }
# x<- 1:10
# if(x>5){
# x<-0
# }
# print(x)
# f <- function(x){
# g<- function(y){
# y+z
# }
# z <- 4
# x +g(x)
# }
x <-5
y <- if(x<3){
NA
}else{
10
}
print(y) |
#########################################
## Multiblock analysis Body Mass Index ##
#########################################
rm(list=ls()) ; gc()
# set working directory
setwd("/neurospin/brainomics/2013_imagen_bmi/data")
source("../scripts/functions.R")
# install newest version of RGCCA ("RGCCA_0.2.tar.gz")
# install.packages("scripts/RGCCA_2.0.tar.gz",repos=NULL,type="source")
#... and load package
require(RGCCA)
# load data
load("multiblock_dataset_residualized_smoothed_images_BUG.RData")
#load("multiblock_dataset_residualized_images.RData")
# A$IMGs <- matrix(rnorm(prod(dim(A$IMGs))),nrow(A$IMGs),ncol(A$IMGs))
# check dimensions : sapply(A,dim)
# To test RGCCA with random datasets:
# n <- 100
# img <- matrix(rnorm(n*40),n,40)
# snp <- matrix(rnorm(n*15),n,15)
# y <- matrix(rnorm(n*3),n,3)
# A <- list(SNPS=snp,IMGs=img,Y=y)
#
## To much memory required for the computation of the optimal tau
## system.time(res <- rgcca(A,tau="optimal",verbose=TRUE))
## Trying with one possible value for the regularization parameters
n <- nrow(A[[1]])
ind <- sample(n,0.64*n)
Asc <- lapply(A,scale2)
# system.time(res <- sgcca(lapply(Asc, function(mm)mm[ ind,]), c1 = c(0.6,0.6,1),verbose=TRUE,scale=F))
# system.time(res2 <- sgcca(lapply(Asc, function(mm)mm[ ind,]), c1 = c(0.1,0.1,1),verbose=TRUE,scale=F))
# cortest1 <- sgcca.predict(Asc, Asc, res)
# cortest2 <- sgcca.predict(Asc, Asc, res2)
A.train <- lapply(A, function(mm)mm[ ind,,drop=FALSE])
A.train.scaled <- lapply(A.train, scale2)
A.test <- lapply(A, function(mm) mm[-ind,,drop=FALSE])
CC <- 1-diag(3)
model.train1 <- sgcca(A=A.train.scaled,C=CC,c1=c(0.1,0.1,1),scale=F)
model.train2 <- sgcca(A=A.train.scaled,C=CC,c1=c(0.9,0.9,1),scale=F)
## Building the classifier on the first components
## ... and Compute the test correlation
mscortest1 <- sgcca.predict(A.train.scaled, A.test, model.train1)
mscortest2 <- sgcca.predict(A.train.scaled, A.test, model.train2)
sapply(model.train1$a,function(u) sum(u==0))
sapply(model.train2$a,function(u) sum(u==0))
## Cross validation
cl <- tryCatch(clusters.parallel(), error = function(e) NULL)
set.seed(456)
trainmat <- GenerateLearningsets(n,method="CV",fold=5)@learnmatrix
params <- expand.grid(c11 = 1:4/5, c12 = 1:4/5, c13 = 1)
# 1 --> complete design ; 2 --> hierarchical design
C1 <- 1 - diag(3)
C2 <- cbind(c(0,0,1),c(0,0,1),c(1,1,0))
paropt1 <- paropt2 <- matrix(NA,nrow(trainmat),3)
model.train1 <- model.train2 <- NULL
cortest1 <- cortest2 <- rep(NA,nrow(trainmat))
# ### Cross validation loop
for (i in 1:nrow(trainmat)){
cat("itération : ", i, "\n")
## Get train and test datasets for this iteration
ind <- trainmat[i,]
A.train <- lapply(A, function(mm)mm[ ind,,drop=FALSE])
A.train.scaled <- lapply(A.train, scale2)
A.test <- lapply(A, function(mm) mm[-ind,,drop=FALSE])
## Estimating the optimal parameters on the training set
paropt1[i,] <- sgcca.cca.cv(A.train,C=C1,scale=T,params=params,nfold=5, cl=cl)
paropt2[i,] <- sgcca.cca.cv(A.train,C=C2,scale=T,params=params,nfold=5, cl=cl)
model.train1[[i]] <- sgcca(A=A.train.scaled,C=C1,c1=paropt1[i,],scale=F)
model.train2[[i]] <- sgcca(A=A.train.scaled,C=C2,c1=paropt2[i,],scale=F)
## Building the classifier on the first components
## ... and Compute the test correlation
cortest1[i] <- sgcca.predict(A.train.scaled, A.test, model.train1[[i]])
cortest2[i] <- sgcca.predict(A.train.scaled, A.test, model.train2[[i]])
}
### Finally a boxplot to represent
par(mar=c(9,4,4,2)+0.1)
boxplot(data.frame(cortest1,cortest2),
names=c("Complete design","Hierarchical design"),las=2,
main=paste("R squared of the linear model on the test data") ,ylim=0:1)
#Stop cluster
stopCluster(cl)
snp_sel1 <- sapply(model.train1,function(m) (m$astar$SNPs !=0)+0 )
snp_sel2 <- sapply(model.train2,function(m) (m$astar$SNPs !=0)+0 )
img_sel1 <- sapply(model.train1,function(m) (m$astar$IMGs !=0)+0 )
img_sel2 <- sapply(model.train2,function(m) (m$astar$IMGs !=0)+0 )
require(irr)
kappam.fleiss(snp_sel1)
kappam.fleiss(snp_sel2)
kappam.fleiss(img_sel1)
kappam.fleiss(img_sel2)
##### generate two models with the optimal weights ####
opt_weights1 <- colMeans(paropt1)
opt_weights2 <- colMeans(paropt2)
opt_model1 <- sgcca(A=A,C=C1,c1=opt_weights1,scale=T)
opt_model2 <- sgcca(A=A,C=C2,c1=opt_weights2,scale=T)
snp_signature1 <- colnames(A$SNPs)[ opt_model1$a$SNPs != 0 ]
snp_signature2 <- colnames(A$SNPs)[ opt_model2$a$SNPs != 0 ]
img_signature1 <- which( opt_model1$a$IMGs != 0 )
img_signature2 <- which( opt_model2$a$IMGs != 0 )
require(oro.nifti)
mask <- readNIfTI("/neurospin/brainomics/2013_imagen_bmi/data/rmask.nii")@.Data
#### Convert the two signatures into two NIfTI files ####
img_opt_model1 <- array(0, dim(mask))
img_opt_model1[mask!=0] = opt_model1$a$IMGs
writeNIfTI(img_opt_model1, 'img_opt_model1')
img_opt_model2 <- array(0, dim(mask))
img_opt_model2[mask!=0] = opt_model2$a$IMGs
writeNIfTI(img_opt_model2, 'img_opt_model2')
library(biomaRt)
snpmart <- useMart("snp", dataset="hsapiens_snp")
ensembl <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
a1 <- getBM( attributes = c("refsnp_id","ensembl_gene_stable_id","consequence_type_tv","chrom_start"),
filter = "snp_filter",snp_signature1,mart=snpmart)
a2 <- getBM( attributes = c("refsnp_id","ensembl_gene_stable_id","consequence_type_tv","chrom_start"),
filter = "snp_filter",snp_signature2,mart=snpmart)
b1 <- getBM(attributes = c( "ensembl_gene_id","hgnc_symbol","chromosome_name", "band"), filters = "ensembl_gene_id",
values =a1[,"ensembl_gene_stable_id"], mart = ensembl)
b2 <- getBM(attributes = c( "ensembl_gene_id","hgnc_symbol","chromosome_name", "band"), filters = "ensembl_gene_id",
values =a2[,"ensembl_gene_stable_id"], mart = ensembl)
ab1 <- merge(a1, b1, by.x="ensembl_gene_stable_id", by.y="ensembl_gene_id", all=TRUE)
ab2 <- merge(a2, b2, by.x="ensembl_gene_stable_id", by.y="ensembl_gene_id", all=TRUE)
#### represent with a Venn diagram ####
library(venneuler)
xx <- sort(unique(ab1$hgnc_symbol))[-1]
yy <- sort(unique(ab2$hgnc_symbol))[-1]
AB <- intersect(xx,yy)
AA <- setdiff(xx,AB)
BB <- setdiff(yy,AB)
ABtot <- union(xx,yy)
## Count how many clusters contain each combination of letters
counts <- length(ABtot)
## Convert to proportions for venneuler()
ps <- c(length(AA),length(BB),length(AB))/counts
## Calculate the Venn diagram
vd <- venneuler(c(a=ps[1], b = ps[2], "a&b" = ps[3]))
## Plot it!
plot(vd)
require(gplots)
pdf("venn.pdf")
venn(list("Complete"=xx,"Hierarchical"=yy))
dev.off()
write.table(xx,"snpslist_complete.txt",quote=F, row.names = F, col.names = F)
write.table(yy,"snpslist_hierarchical.txt",quote=F, row.names = F, col.names = F)
| /2013_imagen_bmi/scripts/R_code/multiblock_analysis_with_optimal_final_models.R | no_license | neurospin/scripts | R | false | false | 6,738 | r | #########################################
## Multiblock analysis Body Mass Index ##
#########################################
rm(list=ls()) ; gc()
# set working directory
setwd("/neurospin/brainomics/2013_imagen_bmi/data")
source("../scripts/functions.R")
# install newest version of RGCCA ("RGCCA_0.2.tar.gz")
# install.packages("scripts/RGCCA_2.0.tar.gz",repos=NULL,type="source")
#... and load package
require(RGCCA)
# load data
load("multiblock_dataset_residualized_smoothed_images_BUG.RData")
#load("multiblock_dataset_residualized_images.RData")
# A$IMGs <- matrix(rnorm(prod(dim(A$IMGs))),nrow(A$IMGs),ncol(A$IMGs))
# check dimensions : sapply(A,dim)
# To test RGCCA with random datasets:
# n <- 100
# img <- matrix(rnorm(n*40),n,40)
# snp <- matrix(rnorm(n*15),n,15)
# y <- matrix(rnorm(n*3),n,3)
# A <- list(SNPS=snp,IMGs=img,Y=y)
#
## To much memory required for the computation of the optimal tau
## system.time(res <- rgcca(A,tau="optimal",verbose=TRUE))
## Trying with one possible value for the regularization parameters
n <- nrow(A[[1]])
ind <- sample(n,0.64*n)
Asc <- lapply(A,scale2)
# system.time(res <- sgcca(lapply(Asc, function(mm)mm[ ind,]), c1 = c(0.6,0.6,1),verbose=TRUE,scale=F))
# system.time(res2 <- sgcca(lapply(Asc, function(mm)mm[ ind,]), c1 = c(0.1,0.1,1),verbose=TRUE,scale=F))
# cortest1 <- sgcca.predict(Asc, Asc, res)
# cortest2 <- sgcca.predict(Asc, Asc, res2)
A.train <- lapply(A, function(mm)mm[ ind,,drop=FALSE])
A.train.scaled <- lapply(A.train, scale2)
A.test <- lapply(A, function(mm) mm[-ind,,drop=FALSE])
CC <- 1-diag(3)
model.train1 <- sgcca(A=A.train.scaled,C=CC,c1=c(0.1,0.1,1),scale=F)
model.train2 <- sgcca(A=A.train.scaled,C=CC,c1=c(0.9,0.9,1),scale=F)
## Building the classifier on the first components
## ... and Compute the test correlation
mscortest1 <- sgcca.predict(A.train.scaled, A.test, model.train1)
mscortest2 <- sgcca.predict(A.train.scaled, A.test, model.train2)
sapply(model.train1$a,function(u) sum(u==0))
sapply(model.train2$a,function(u) sum(u==0))
## Cross validation
cl <- tryCatch(clusters.parallel(), error = function(e) NULL)
set.seed(456)
trainmat <- GenerateLearningsets(n,method="CV",fold=5)@learnmatrix
params <- expand.grid(c11 = 1:4/5, c12 = 1:4/5, c13 = 1)
# 1 --> complete design ; 2 --> hierarchical design
C1 <- 1 - diag(3)
C2 <- cbind(c(0,0,1),c(0,0,1),c(1,1,0))
paropt1 <- paropt2 <- matrix(NA,nrow(trainmat),3)
model.train1 <- model.train2 <- NULL
cortest1 <- cortest2 <- rep(NA,nrow(trainmat))
# ### Cross validation loop
for (i in 1:nrow(trainmat)){
cat("itération : ", i, "\n")
## Get train and test datasets for this iteration
ind <- trainmat[i,]
A.train <- lapply(A, function(mm)mm[ ind,,drop=FALSE])
A.train.scaled <- lapply(A.train, scale2)
A.test <- lapply(A, function(mm) mm[-ind,,drop=FALSE])
## Estimating the optimal parameters on the training set
paropt1[i,] <- sgcca.cca.cv(A.train,C=C1,scale=T,params=params,nfold=5, cl=cl)
paropt2[i,] <- sgcca.cca.cv(A.train,C=C2,scale=T,params=params,nfold=5, cl=cl)
model.train1[[i]] <- sgcca(A=A.train.scaled,C=C1,c1=paropt1[i,],scale=F)
model.train2[[i]] <- sgcca(A=A.train.scaled,C=C2,c1=paropt2[i,],scale=F)
## Building the classifier on the first components
## ... and Compute the test correlation
cortest1[i] <- sgcca.predict(A.train.scaled, A.test, model.train1[[i]])
cortest2[i] <- sgcca.predict(A.train.scaled, A.test, model.train2[[i]])
}
### Finally a boxplot to represent
par(mar=c(9,4,4,2)+0.1)
boxplot(data.frame(cortest1,cortest2),
names=c("Complete design","Hierarchical design"),las=2,
main=paste("R squared of the linear model on the test data") ,ylim=0:1)
#Stop cluster
stopCluster(cl)
snp_sel1 <- sapply(model.train1,function(m) (m$astar$SNPs !=0)+0 )
snp_sel2 <- sapply(model.train2,function(m) (m$astar$SNPs !=0)+0 )
img_sel1 <- sapply(model.train1,function(m) (m$astar$IMGs !=0)+0 )
img_sel2 <- sapply(model.train2,function(m) (m$astar$IMGs !=0)+0 )
require(irr)
kappam.fleiss(snp_sel1)
kappam.fleiss(snp_sel2)
kappam.fleiss(img_sel1)
kappam.fleiss(img_sel2)
##### generate two models with the optimal weights ####
opt_weights1 <- colMeans(paropt1)
opt_weights2 <- colMeans(paropt2)
opt_model1 <- sgcca(A=A,C=C1,c1=opt_weights1,scale=T)
opt_model2 <- sgcca(A=A,C=C2,c1=opt_weights2,scale=T)
snp_signature1 <- colnames(A$SNPs)[ opt_model1$a$SNPs != 0 ]
snp_signature2 <- colnames(A$SNPs)[ opt_model2$a$SNPs != 0 ]
img_signature1 <- which( opt_model1$a$IMGs != 0 )
img_signature2 <- which( opt_model2$a$IMGs != 0 )
require(oro.nifti)
mask <- readNIfTI("/neurospin/brainomics/2013_imagen_bmi/data/rmask.nii")@.Data
#### Convert the two signatures into two NIfTI files ####
img_opt_model1 <- array(0, dim(mask))
img_opt_model1[mask!=0] = opt_model1$a$IMGs
writeNIfTI(img_opt_model1, 'img_opt_model1')
img_opt_model2 <- array(0, dim(mask))
img_opt_model2[mask!=0] = opt_model2$a$IMGs
writeNIfTI(img_opt_model2, 'img_opt_model2')
library(biomaRt)
snpmart <- useMart("snp", dataset="hsapiens_snp")
ensembl <- useMart("ensembl", dataset = "hsapiens_gene_ensembl")
a1 <- getBM( attributes = c("refsnp_id","ensembl_gene_stable_id","consequence_type_tv","chrom_start"),
filter = "snp_filter",snp_signature1,mart=snpmart)
a2 <- getBM( attributes = c("refsnp_id","ensembl_gene_stable_id","consequence_type_tv","chrom_start"),
filter = "snp_filter",snp_signature2,mart=snpmart)
b1 <- getBM(attributes = c( "ensembl_gene_id","hgnc_symbol","chromosome_name", "band"), filters = "ensembl_gene_id",
values =a1[,"ensembl_gene_stable_id"], mart = ensembl)
b2 <- getBM(attributes = c( "ensembl_gene_id","hgnc_symbol","chromosome_name", "band"), filters = "ensembl_gene_id",
values =a2[,"ensembl_gene_stable_id"], mart = ensembl)
ab1 <- merge(a1, b1, by.x="ensembl_gene_stable_id", by.y="ensembl_gene_id", all=TRUE)
ab2 <- merge(a2, b2, by.x="ensembl_gene_stable_id", by.y="ensembl_gene_id", all=TRUE)
#### represent with a Venn diagram ####
library(venneuler)
xx <- sort(unique(ab1$hgnc_symbol))[-1]
yy <- sort(unique(ab2$hgnc_symbol))[-1]
AB <- intersect(xx,yy)
AA <- setdiff(xx,AB)
BB <- setdiff(yy,AB)
ABtot <- union(xx,yy)
## Count how many clusters contain each combination of letters
counts <- length(ABtot)
## Convert to proportions for venneuler()
ps <- c(length(AA),length(BB),length(AB))/counts
## Calculate the Venn diagram
vd <- venneuler(c(a=ps[1], b = ps[2], "a&b" = ps[3]))
## Plot it!
plot(vd)
require(gplots)
pdf("venn.pdf")
venn(list("Complete"=xx,"Hierarchical"=yy))
dev.off()
write.table(xx,"snpslist_complete.txt",quote=F, row.names = F, col.names = F)
write.table(yy,"snpslist_hierarchical.txt",quote=F, row.names = F, col.names = F)
|
context("utils")
test_that("Testing the file utils.R - .round.choose", {
# Check whether the results are correct
expect_equal(.round.choose(41, 5, 1), 45)
expect_equal(.round.choose(41, 5, 0), 40)
})
test_that("Testing the file utils.R - .background_map_fun", {
# Check whether the result is correct
example_box <- as(raster::extent(-180, +180, -90, +90), "SpatialPolygons")
p <- {
raster::plot(example_box)
.background_map_fun()
}
expect_equal(length(p$layers), 0)
})
test_that("Testing the file utils.R - .quant_function", {
# Check whether the result is correct
x <- structure(c(65.4, 84.4, 78, 89.1, 78.8, 77.9, 52.4, 65.3, 58.3,
53, 68.2, 84.9, 82.6, 89.6, 82.4, 84.5, 68.5, 72.8,
66.3, 54.3, 69.6, 85.1, 84.9, 89.8, 84.1, 87.8, 76.6,
76.6, 70.3, 55, 71, 85.4, 87.2, 90.1, 85.9, 91.1, 84.7,
80.3, 74.3, 55.6, 71.8, 85.5, 88.6, 90.2, 87, 93.1, 89.5,
82.6, 76.7, 56), .Dim = c(10L, 5L),
.Dimnames = list(c("X1993.01.01", "X1993.01.02", "X1993.01.03",
"X1993.01.04", "X1993.01.05", "X1993.01.06",
"X1993.01.07", "X1993.01.08", "X1993.01.09",
"X1993.01.10"),
c("75%", "85%", "90%", "95%", "98%")))
expect_equal(x, round(.quant_function(b[[1:10]]), 1))
})
test_that("Testing the file utils.R - .convert_long_from_180_to_360", {
x <- .convert_long_from_180_to_360(-10)
expect_equal(x, 350)
})
test_that("Testing the file utils.R - .transform_raster", {
x <- .transform_raster(raster_in = r, variable_name = "FWI")
expect_equal(raster::cellStats(x, "sum"), raster::cellStats(r, "sum"))
x <- .transform_raster(raster_in = r, variable_name = "BasisRegions")
expect_equal(raster::cellStats(x, "sum"), raster::cellStats(r, "sum"))
})
test_that("Testing the file utils.R - .create_rat", {
x2compare <- structure(list(ID = structure(1:3, .Label = c("1", "2", "3"),
class = "factor"),
Class = structure(1:3, .Label = c("A", "B", "C"),
class = "factor")),
row.names = c(NA, 3L), class = "data.frame")
x <- .create_rat(ids = c(1, 2, 3), classes = c("A", "B", "C"))
expect_equal(x, x2compare)
})
test_that("Testing the file utils.R - .get_layers_for_clima", {
x <- .get_layers_for_clima(b = b, raster_date = as.Date("2016-01-01"),
expand = FALSE)
expect_equal(nlayers(x), 3)
})
test_that("Testing the file utils.R - .classify_hazard", {
# Check whether the result is correct
x <- .classify_hazard(b[[1]][], b[[2]][], b[[3]][],
b[[4]][], b[[5]][], b[[6]][])
# Check the function does not generate a dummy layer
expect_equal(sum(x), 6)
})
test_that("Testing the file utils.R - .utci_classification", {
# Check whether the result is correct
x <- .utci_classification(rtp = r)
# Check the function does not generate a dummy layer
expect_equal(raster::cellStats(x, sum)[[1]], 120)
})
| /tests/testthat/test-utils.R | permissive | ecmwf/caliver | R | false | false | 3,222 | r | context("utils")
test_that("Testing the file utils.R - .round.choose", {
# Check whether the results are correct
expect_equal(.round.choose(41, 5, 1), 45)
expect_equal(.round.choose(41, 5, 0), 40)
})
test_that("Testing the file utils.R - .background_map_fun", {
# Check whether the result is correct
example_box <- as(raster::extent(-180, +180, -90, +90), "SpatialPolygons")
p <- {
raster::plot(example_box)
.background_map_fun()
}
expect_equal(length(p$layers), 0)
})
test_that("Testing the file utils.R - .quant_function", {
# Check whether the result is correct
x <- structure(c(65.4, 84.4, 78, 89.1, 78.8, 77.9, 52.4, 65.3, 58.3,
53, 68.2, 84.9, 82.6, 89.6, 82.4, 84.5, 68.5, 72.8,
66.3, 54.3, 69.6, 85.1, 84.9, 89.8, 84.1, 87.8, 76.6,
76.6, 70.3, 55, 71, 85.4, 87.2, 90.1, 85.9, 91.1, 84.7,
80.3, 74.3, 55.6, 71.8, 85.5, 88.6, 90.2, 87, 93.1, 89.5,
82.6, 76.7, 56), .Dim = c(10L, 5L),
.Dimnames = list(c("X1993.01.01", "X1993.01.02", "X1993.01.03",
"X1993.01.04", "X1993.01.05", "X1993.01.06",
"X1993.01.07", "X1993.01.08", "X1993.01.09",
"X1993.01.10"),
c("75%", "85%", "90%", "95%", "98%")))
expect_equal(x, round(.quant_function(b[[1:10]]), 1))
})
test_that("Testing the file utils.R - .convert_long_from_180_to_360", {
x <- .convert_long_from_180_to_360(-10)
expect_equal(x, 350)
})
test_that("Testing the file utils.R - .transform_raster", {
x <- .transform_raster(raster_in = r, variable_name = "FWI")
expect_equal(raster::cellStats(x, "sum"), raster::cellStats(r, "sum"))
x <- .transform_raster(raster_in = r, variable_name = "BasisRegions")
expect_equal(raster::cellStats(x, "sum"), raster::cellStats(r, "sum"))
})
test_that("Testing the file utils.R - .create_rat", {
x2compare <- structure(list(ID = structure(1:3, .Label = c("1", "2", "3"),
class = "factor"),
Class = structure(1:3, .Label = c("A", "B", "C"),
class = "factor")),
row.names = c(NA, 3L), class = "data.frame")
x <- .create_rat(ids = c(1, 2, 3), classes = c("A", "B", "C"))
expect_equal(x, x2compare)
})
test_that("Testing the file utils.R - .get_layers_for_clima", {
x <- .get_layers_for_clima(b = b, raster_date = as.Date("2016-01-01"),
expand = FALSE)
expect_equal(nlayers(x), 3)
})
test_that("Testing the file utils.R - .classify_hazard", {
# Check whether the result is correct
x <- .classify_hazard(b[[1]][], b[[2]][], b[[3]][],
b[[4]][], b[[5]][], b[[6]][])
# Check the function does not generate a dummy layer
expect_equal(sum(x), 6)
})
test_that("Testing the file utils.R - .utci_classification", {
# Check whether the result is correct
x <- .utci_classification(rtp = r)
# Check the function does not generate a dummy layer
expect_equal(raster::cellStats(x, sum)[[1]], 120)
})
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
| /cachematrix.R | no_license | Dantigu/ProgrammingAssignment2 | R | false | false | 769 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()){
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() {x}
setInverse <- function(inverse) {inv <<- inverse}
getInverse <- function() {inv}
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
mat <- x$get()
inv <- solve(mat, ...)
x$setInverse(inv)
inv
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfuncEstim.R
\name{dfuncEstim}
\alias{dfuncEstim}
\title{Estimate a detection function from distance-sampling data}
\usage{
dfuncEstim(
formula,
detectionData,
siteData,
likelihood = "halfnorm",
pointSurvey = FALSE,
w.lo = 0,
w.hi = NULL,
expansions = 0,
series = "cosine",
x.scl = 0,
g.x.scl = 1,
observer = "both",
warn = TRUE,
transectID = NULL,
pointID = "point",
length = "length",
control = RdistanceControls()
)
}
\arguments{
\item{formula}{A standard formula object (e.g., \code{dist ~ 1},
\code{dist ~ covar1 + covar2}). The left-hand side (before \code{~})
is the name of the vector containing distances (off-transect or
radial). The right-hand side (after \code{~})
contains the names of covariate vectors to fit in the detection
function. If covariates do not appear in \code{data}, they must
be found in the parent frame (similar to \code{lm}, \code{glm}, etc.)}
\item{detectionData}{A data frame containing detection distances
(either perpendicular for line-transect or radial for point-transect
designs), with one row per detected object or group.
This data frame must contain at least the following
information:
\itemize{
\item Detection Distances: A single column containing
detection distances must be specified on the left-hand
side of \code{formula}.
\item Site IDs: The ID of the transect or point
(i.e., the 'site') where each object or group was detected.
The site ID column(s) (see arguments \code{transectID} and
\code{pointID}) must
specify the site (transect or point) so that this
data frame can be merged with \code{siteData}.
}
Optionally, this data frame can also contain the following
information:
\itemize{
\item Group Sizes: The number of individuals in the group
associated with each detection. This column is not
required to estimate a distance function. This column
is required to estimate abundance (i.e., in function `abundEstim`).
\item In a later release, \code{Rdistance} will allow detection-level
covariates. When that happens, detection-level
covariates will appear in this data frame.
}
See example data set \code{\link{sparrowDetectionData}}.
See also \bold{Input data frames} below
for information on when \code{detectionData} and
\code{siteData} are required inputs.}
\item{siteData}{A data.frame containing site (transect or point)
IDs and any
\emph{site level} covariates to include in the detection function.
Every unique surveyed site (transect or point) is represented on
one row of this data set, whether or not targets were sighted
at the site. See arguments \code{transectID} and
\code{pointID} for an explanation of site and transect ID's.
If sites are transects,
this data frame must also contain transect length. By
default, transect length is assumed to be in column 'length'
but can be specified using argument \code{length}.
The total number of sites surveyed is \code{nrow(siteData)}.
Duplicate site-level IDs are not allowed in \code{siteData}.
See \bold{Input data frames}
for when \code{detectionData} and \code{siteData} are required inputs.}
\item{likelihood}{String specifying the likelihood to fit. Built-in
likelihoods at present are "uniform", "halfnorm",
"hazrate", "negexp", and "Gamma". See vignette for a way to use
user-define likelihoods.}
\item{pointSurvey}{A logical scalar specifying whether input data come
from point-transect surveys (TRUE),
or line-transect surveys (FALSE).}
\item{w.lo}{Lower or left-truncation limit of the distances in distance data.
This is the minimum possible off-transect distance. Default is 0.}
\item{w.hi}{Upper or right-truncation limit of the distances
in \code{dist}. This is the maximum off-transect distance that
could be observed. If left unspecified (i.e., at the default of
NULL), right-truncation is set to the maximum of the observed
distances.}
\item{expansions}{A scalar specifying the number of terms
in \code{series} to compute. Depending on the series,
this could be 0 through 5. The default of 0 equates
to no expansion terms of any type. No expansion terms
are allowed (i.e., \code{expansions} is forced to 0) if
covariates are present in the detection function
(i.e., right-hand side of \code{formula} includes
something other than \code{1}).}
\item{series}{If \code{expansions} > 0, this string
specifies the type of expansion to use. Valid values at
present are 'simple', 'hermite', and 'cosine'.}
\item{x.scl}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{g.x.scl}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{observer}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{warn}{A logical scalar specifying whether to issue
an R warning if the estimation did not converge or if one
or more parameter estimates are at their boundaries.
For estimation, \code{warn} should generally be left at
its default value of \code{TRUE}. When computing bootstrap
confidence intervals, setting \code{warn = FALSE}
turns off annoying warnings when an iteration does
not converge. Regardless of \code{warn}, messages about
convergence and boundary conditions are printed
by \code{print.dfunc}, \code{print.abund}, and
\code{plot.dfunc}, so there should be little harm in
setting \code{warn = FALSE}.}
\item{transectID}{A character vector naming the transect ID column(s) in
\code{detectionData} and \code{siteData}. \code{Rdistance}
accommodates two kinds of transects: continuous and point.
When continuous transects are used, detections can occur at
any point along the route and these are generally called
line-transects. When point transects are used,
detections can only occur at a series of stops (points)
along the route and are generally called point-transects.
Transects themselves are the
basic sampling unit when \code{pointSurvey}=FALSE and
are synonymous with sites in this case. Transects
may contain multiple sampling
units (i.e., points) when \code{pointSurvey}=TRUE.
For line-transects, the \code{transectID} column(s) alone is
sufficient to specify unique sample sites.
For point-transects, the combination of \code{transectID} and
\code{pointID} specify unique sampling sites.
See \bold{Input data frames} below.}
\item{pointID}{When point-transects are used, this is the
ID of points on a transect. When \code{pointSurvey}=TRUE,
the combination of \code{transectID} and
\code{pointID} specify unique sampling sites.
See \bold{Input data frames}.
If single points are surveyed,
meaning surveyed points were not grouped into transects, each 'transect' consists
of one point. In this case, set \code{transectID} equal to
the point's ID and set \code{pointID} equal to 1 for all points.}
\item{length}{Character string specifying the (single) column in
\code{siteData} that contains transect length. This is ignored if
\code{pointSurvey} = TRUE.}
\item{control}{A list containing optimization control parameters such
as the maximum number of iterations, tolerance, the optimizer to use,
etc. See the
\code{\link{RdistanceControls}} function for explanation of each value,
the defaults, and the requirements for this list.
See examples below for how to change controls.}
}
\value{
An object of class 'dfunc'. Objects of class 'dfunc'
are lists containing the following components:
\item{parameters}{The vector of estimated parameter values.
Length of this vector for built-in likelihoods is one
(for the function's parameter) plus the
number of expansion terms plus one if the likelihood is
either 'hazrate' or 'uniform' (hazrate and uniform have
two parameters). }
\item{varcovar}{The variance-covariance matrix for coefficients
of the distance function, estimated by the inverse of the Hessian
of the fit evaluated at the estimates. There is no guarantee this
matrix is positive-definite and should be viewed with caution.
Error estimates derived from bootstrapping are generally more reliable.}
\item{loglik}{The maximized value of the log likelihood
(more specifically, the minimized value of the negative
log likelihood).}
\item{convergence}{The convergence code. This code
is returned by \code{optim}. Values other than 0 indicate suspect
convergence.}
\item{like.form}{The name of the likelihood. This is
the value of the argument \code{likelihood}. }
\item{w.lo}{Left-truncation value used during the fit.}
\item{w.hi}{Right-truncation value used during the fit.}
\item{dist}{The input vector of observed distances.}
\item{covars}{A \code{model.matrix} containing the covariates
used in the fit. }
\item{expansions}{The number of expansion terms used during estimation.}
\item{series}{The type of expansion used during estimation.}
\item{call}{The original call of this function.}
\item{call.x.scl}{The distance at which the distance function
is scaled. This is the x at which g(x) = \code{g.x.scl}.
Normally, \code{call.x.scl} = 0.}
\item{call.g.x.scl}{The value of the distance function at distance
\code{call.x.scl}. Normally, \code{call.g.x.scl} = 1.}
\item{call.observer}{The value of input parameter \code{observer}.}
\item{fit}{The fitted object returned by \code{optim}.
See documentation for \code{optim}.}
\item{factor.names}{The names of any factors in \code{formula}}
\item{pointSurvey}{The input value of \code{pointSurvey}.
This is TRUE if distances are radial from a point. FALSE
if distances are perpendicular off-transect. }
\item{formula}{The formula specified for the detection function.}
}
\description{
Fit a specific detection function off-transect
or off-point (radial) distances.
}
\section{Input data frames}{
To save space and to easily specify
sites without detections,
all site ID's, regardless of whether a detection occurred there,
and \emph{site level} covariates are stored in
the \code{siteData} data frame. Detection distances and group
sizes are measured at the \emph{detection level} and
are stored in the
\code{detectionData} data frame.
\subsection{Data frame requirements}{The following explains
conditions under which various combinations of the input data frames
are required.
\enumerate{
\item \bold{Detection data and site data both required:}\cr
Both \code{detectionData} and \code{siteData}
are required if \emph{site level} covariates are
specified on the right-hand side of \code{formula}.
\emph{Detection level} covariates are not currently allowed.
\item \bold{Detection data only required:}\cr
The \code{detectionData} data frame alone can be
specified if no covariates
are included in the distance function (i.e., right-hand side of
\code{formula} is "~1"). Note that this routine (\code{dfuncEstim})
does not need to know about sites where zero targets were detected, hence
\code{siteData} can be missing when no covariates are involved.
\item \bold{Neither detection data nor site data required}\cr
Neither \code{detectionData} nor \code{siteData}
are required if all variables specified in \code{formula}
are within the scope of this routine (e.g., in the global working
environment). Scoping rules here work the same as for other modeling
routines in R such as \code{lm} and \code{glm}. Like other modeling
routines, it is possible to mix and match the location of variables in
the model. Some variables can be in the \code{.GlobalEnv} while others
are in either \code{detectionData} or \code{siteData}.
}
}
\subsection{Relationship between data frames (transect and point ID's)}{
The input data frames, \code{detectionData} and \code{siteData},
must be merge-able on unique sites. For line-transects,
site ID's specify transects or routes and are unique values of
the \code{transectID} column in \code{siteData}. In this case,
the following merge must work:
\code{merge(detectionData,siteData,by=transectID)}.
For point-transects,
site ID's specify individual points and are unique values
of the combination \code{paste(transectID,pointID)}.
In this case, the following merge must work:
\code{merge(detectionData,siteData,by=c(transectID, pointID)}.
By default,\code{transectID} and \code{pointID} are NULL and
the merge is done on all common columns.
That is, when \code{transectID} is NULL, this routine assumes unique
\emph{transects} are specified by unique combinations of the
common variables (i.e., unique values of
\code{intersect(names(detectionData), names(siteData))}).
An error occurs if there are no common column names between
\code{detectionData} and \code{siteData}.
Duplicate site IDs are not allowed in \code{siteData}.
If the same site is surveyed in
multiple years, specify another transect ID column (e.g., \code{transectID =
c("year","transectID")}). Duplicate site ID's are allowed in
\code{detectionData}.
To help envision the relationship between data frames, bear in
mind that during bootstrap estimation of variance
in \code{\link{abundEstim}},
unique \emph{transects} (i.e., unique values of
the transect ID column(s)), not \emph{detections} or
\emph{points}, are resampled with replacement.
}
}
\section{Likelihood functions}{
Given a specified sighting function (e.g., "halfnorm"),
maximum likelihood is used to estimate the parameter(s) of
the function (e.g., standard error) that best fit the distance data.
When plotted (see Examples), histogram bins are plotted
behind the detection
function for visualization; however, the function is fit to
the actual data, not to the bins.
}
\examples{
# Load example sparrow data (line transect survey type)
data(sparrowDetectionData)
data(sparrowSiteData)
# Fit half-normal detection function
dfunc <- dfuncEstim(formula=dist~1,
detectionData=sparrowDetectionData,
likelihood="halfnorm", w.hi=100)
# Fit a second half-normal detection function, now including
# a categorical covariate for observer who surveyed the site (factor, 5 levels)
# Increase maximum iterations
dfuncObs <- dfuncEstim(formula=dist~observer,
detectionData=sparrowDetectionData,
siteData=sparrowSiteData,
likelihood="halfnorm", w.hi=100, pointSurvey=FALSE,
control=RdistanceControls(maxIter=1000))
# Print results
# And plot the detection function for each observer
dfuncObs
plot(dfuncObs,
newdata=data.frame(observer=levels(sparrowSiteData$observer)))
# Show some plotting options
plot(dfuncObs,
newdata=data.frame(observer=levels(sparrowSiteData$observer)),
vertLines = FALSE, lty=c(1,1),
col.dfunc=heat.colors(length(levels(sparrowSiteData$observer))),
col=c("grey","lightgrey"), border=NA,
xlab="Distance (m)",
main="Showing plot options")
}
\references{
Buckland, S.T., D.R. Anderson, K.P. Burnham, J.L. Laake, D.L. Borchers,
and L. Thomas. (2001) \emph{Introduction to distance sampling: estimating
abundance of biological populations}. Oxford University Press, Oxford, UK.
}
\seealso{
\code{\link{abundEstim}}, \code{\link{autoDistSamp}}.
See likelihood-specific help files (e.g., \code{\link{halfnorm.like}}) for
details on each built-in likelihood. See package vignettes for information on custom,
user-defined likelihoods.
}
\author{
Trent McDonald, WEST Inc., \email{tmcdonald@west-inc.com}\cr
Jason Carlisle, University of Wyoming and WEST Inc., \email{jcarlisle@west-inc.com}\cr
Aidan McDonald, WEST Inc., \email{aidan@mcdcentral.org}
}
\keyword{model}
| /man/dfuncEstim.Rd | no_license | wmcdonald1/Rdistance | R | false | true | 15,982 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfuncEstim.R
\name{dfuncEstim}
\alias{dfuncEstim}
\title{Estimate a detection function from distance-sampling data}
\usage{
dfuncEstim(
formula,
detectionData,
siteData,
likelihood = "halfnorm",
pointSurvey = FALSE,
w.lo = 0,
w.hi = NULL,
expansions = 0,
series = "cosine",
x.scl = 0,
g.x.scl = 1,
observer = "both",
warn = TRUE,
transectID = NULL,
pointID = "point",
length = "length",
control = RdistanceControls()
)
}
\arguments{
\item{formula}{A standard formula object (e.g., \code{dist ~ 1},
\code{dist ~ covar1 + covar2}). The left-hand side (before \code{~})
is the name of the vector containing distances (off-transect or
radial). The right-hand side (after \code{~})
contains the names of covariate vectors to fit in the detection
function. If covariates do not appear in \code{data}, they must
be found in the parent frame (similar to \code{lm}, \code{glm}, etc.)}
\item{detectionData}{A data frame containing detection distances
(either perpendicular for line-transect or radial for point-transect
designs), with one row per detected object or group.
This data frame must contain at least the following
information:
\itemize{
\item Detection Distances: A single column containing
detection distances must be specified on the left-hand
side of \code{formula}.
\item Site IDs: The ID of the transect or point
(i.e., the 'site') where each object or group was detected.
The site ID column(s) (see arguments \code{transectID} and
\code{pointID}) must
specify the site (transect or point) so that this
data frame can be merged with \code{siteData}.
}
Optionally, this data frame can also contain the following
information:
\itemize{
\item Group Sizes: The number of individuals in the group
associated with each detection. This column is not
required to estimate a distance function. This column
is required to estimate abundance (i.e., in function `abundEstim`).
\item In a later release, \code{Rdistance} will allow detection-level
covariates. When that happens, detection-level
covariates will appear in this data frame.
}
See example data set \code{\link{sparrowDetectionData}}.
See also \bold{Input data frames} below
for information on when \code{detectionData} and
\code{siteData} are required inputs.}
\item{siteData}{A data.frame containing site (transect or point)
IDs and any
\emph{site level} covariates to include in the detection function.
Every unique surveyed site (transect or point) is represented on
one row of this data set, whether or not targets were sighted
at the site. See arguments \code{transectID} and
\code{pointID} for an explanation of site and transect ID's.
If sites are transects,
this data frame must also contain transect length. By
default, transect length is assumed to be in column 'length'
but can be specified using argument \code{length}.
The total number of sites surveyed is \code{nrow(siteData)}.
Duplicate site-level IDs are not allowed in \code{siteData}.
See \bold{Input data frames}
for when \code{detectionData} and \code{siteData} are required inputs.}
\item{likelihood}{String specifying the likelihood to fit. Built-in
likelihoods at present are "uniform", "halfnorm",
"hazrate", "negexp", and "Gamma". See vignette for a way to use
user-define likelihoods.}
\item{pointSurvey}{A logical scalar specifying whether input data come
from point-transect surveys (TRUE),
or line-transect surveys (FALSE).}
\item{w.lo}{Lower or left-truncation limit of the distances in distance data.
This is the minimum possible off-transect distance. Default is 0.}
\item{w.hi}{Upper or right-truncation limit of the distances
in \code{dist}. This is the maximum off-transect distance that
could be observed. If left unspecified (i.e., at the default of
NULL), right-truncation is set to the maximum of the observed
distances.}
\item{expansions}{A scalar specifying the number of terms
in \code{series} to compute. Depending on the series,
this could be 0 through 5. The default of 0 equates
to no expansion terms of any type. No expansion terms
are allowed (i.e., \code{expansions} is forced to 0) if
covariates are present in the detection function
(i.e., right-hand side of \code{formula} includes
something other than \code{1}).}
\item{series}{If \code{expansions} > 0, this string
specifies the type of expansion to use. Valid values at
present are 'simple', 'hermite', and 'cosine'.}
\item{x.scl}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{g.x.scl}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{observer}{This parameter is passed to \code{F.gx.estim}.
See \code{F.gx.estim} documentation for definition.}
\item{warn}{A logical scalar specifying whether to issue
an R warning if the estimation did not converge or if one
or more parameter estimates are at their boundaries.
For estimation, \code{warn} should generally be left at
its default value of \code{TRUE}. When computing bootstrap
confidence intervals, setting \code{warn = FALSE}
turns off annoying warnings when an iteration does
not converge. Regardless of \code{warn}, messages about
convergence and boundary conditions are printed
by \code{print.dfunc}, \code{print.abund}, and
\code{plot.dfunc}, so there should be little harm in
setting \code{warn = FALSE}.}
\item{transectID}{A character vector naming the transect ID column(s) in
\code{detectionData} and \code{siteData}. \code{Rdistance}
accommodates two kinds of transects: continuous and point.
When continuous transects are used, detections can occur at
any point along the route and these are generally called
line-transects. When point transects are used,
detections can only occur at a series of stops (points)
along the route and are generally called point-transects.
Transects themselves are the
basic sampling unit when \code{pointSurvey}=FALSE and
are synonymous with sites in this case. Transects
may contain multiple sampling
units (i.e., points) when \code{pointSurvey}=TRUE.
For line-transects, the \code{transectID} column(s) alone is
sufficient to specify unique sample sites.
For point-transects, the combination of \code{transectID} and
\code{pointID} specify unique sampling sites.
See \bold{Input data frames} below.}
\item{pointID}{When point-transects are used, this is the
ID of points on a transect. When \code{pointSurvey}=TRUE,
the combination of \code{transectID} and
\code{pointID} specify unique sampling sites.
See \bold{Input data frames}.
If single points are surveyed,
meaning surveyed points were not grouped into transects, each 'transect' consists
of one point. In this case, set \code{transectID} equal to
the point's ID and set \code{pointID} equal to 1 for all points.}
\item{length}{Character string specifying the (single) column in
\code{siteData} that contains transect length. This is ignored if
\code{pointSurvey} = TRUE.}
\item{control}{A list containing optimization control parameters such
as the maximum number of iterations, tolerance, the optimizer to use,
etc. See the
\code{\link{RdistanceControls}} function for explanation of each value,
the defaults, and the requirements for this list.
See examples below for how to change controls.}
}
\value{
An object of class 'dfunc'. Objects of class 'dfunc'
are lists containing the following components:
\item{parameters}{The vector of estimated parameter values.
Length of this vector for built-in likelihoods is one
(for the function's parameter) plus the
number of expansion terms plus one if the likelihood is
either 'hazrate' or 'uniform' (hazrate and uniform have
two parameters). }
\item{varcovar}{The variance-covariance matrix for coefficients
of the distance function, estimated by the inverse of the Hessian
of the fit evaluated at the estimates. There is no guarantee this
matrix is positive-definite and should be viewed with caution.
Error estimates derived from bootstrapping are generally more reliable.}
\item{loglik}{The maximized value of the log likelihood
(more specifically, the minimized value of the negative
log likelihood).}
\item{convergence}{The convergence code. This code
is returned by \code{optim}. Values other than 0 indicate suspect
convergence.}
\item{like.form}{The name of the likelihood. This is
the value of the argument \code{likelihood}. }
\item{w.lo}{Left-truncation value used during the fit.}
\item{w.hi}{Right-truncation value used during the fit.}
\item{dist}{The input vector of observed distances.}
\item{covars}{A \code{model.matrix} containing the covariates
used in the fit. }
\item{expansions}{The number of expansion terms used during estimation.}
\item{series}{The type of expansion used during estimation.}
\item{call}{The original call of this function.}
\item{call.x.scl}{The distance at which the distance function
is scaled. This is the x at which g(x) = \code{g.x.scl}.
Normally, \code{call.x.scl} = 0.}
\item{call.g.x.scl}{The value of the distance function at distance
\code{call.x.scl}. Normally, \code{call.g.x.scl} = 1.}
\item{call.observer}{The value of input parameter \code{observer}.}
\item{fit}{The fitted object returned by \code{optim}.
See documentation for \code{optim}.}
\item{factor.names}{The names of any factors in \code{formula}}
\item{pointSurvey}{The input value of \code{pointSurvey}.
This is TRUE if distances are radial from a point. FALSE
if distances are perpendicular off-transect. }
\item{formula}{The formula specified for the detection function.}
}
\description{
Fit a specific detection function off-transect
or off-point (radial) distances.
}
\section{Input data frames}{
To save space and to easily specify
sites without detections,
all site ID's, regardless of whether a detection occurred there,
and \emph{site level} covariates are stored in
the \code{siteData} data frame. Detection distances and group
sizes are measured at the \emph{detection level} and
are stored in the
\code{detectionData} data frame.
\subsection{Data frame requirements}{The following explains
conditions under which various combinations of the input data frames
are required.
\enumerate{
\item \bold{Detection data and site data both required:}\cr
Both \code{detectionData} and \code{siteData}
are required if \emph{site level} covariates are
specified on the right-hand side of \code{formula}.
\emph{Detection level} covariates are not currently allowed.
\item \bold{Detection data only required:}\cr
The \code{detectionData} data frame alone can be
specified if no covariates
are included in the distance function (i.e., right-hand side of
\code{formula} is "~1"). Note that this routine (\code{dfuncEstim})
does not need to know about sites where zero targets were detected, hence
\code{siteData} can be missing when no covariates are involved.
\item \bold{Neither detection data nor site data required}\cr
Neither \code{detectionData} nor \code{siteData}
are required if all variables specified in \code{formula}
are within the scope of this routine (e.g., in the global working
environment). Scoping rules here work the same as for other modeling
routines in R such as \code{lm} and \code{glm}. Like other modeling
routines, it is possible to mix and match the location of variables in
the model. Some variables can be in the \code{.GlobalEnv} while others
are in either \code{detectionData} or \code{siteData}.
}
}
\subsection{Relationship between data frames (transect and point ID's)}{
The input data frames, \code{detectionData} and \code{siteData},
must be merge-able on unique sites. For line-transects,
site ID's specify transects or routes and are unique values of
the \code{transectID} column in \code{siteData}. In this case,
the following merge must work:
\code{merge(detectionData,siteData,by=transectID)}.
For point-transects,
site ID's specify individual points and are unique values
of the combination \code{paste(transectID,pointID)}.
In this case, the following merge must work:
\code{merge(detectionData,siteData,by=c(transectID, pointID)}.
By default,\code{transectID} and \code{pointID} are NULL and
the merge is done on all common columns.
That is, when \code{transectID} is NULL, this routine assumes unique
\emph{transects} are specified by unique combinations of the
common variables (i.e., unique values of
\code{intersect(names(detectionData), names(siteData))}).
An error occurs if there are no common column names between
\code{detectionData} and \code{siteData}.
Duplicate site IDs are not allowed in \code{siteData}.
If the same site is surveyed in
multiple years, specify another transect ID column (e.g., \code{transectID =
c("year","transectID")}). Duplicate site ID's are allowed in
\code{detectionData}.
To help envision the relationship between data frames, bear in
mind that during bootstrap estimation of variance
in \code{\link{abundEstim}},
unique \emph{transects} (i.e., unique values of
the transect ID column(s)), not \emph{detections} or
\emph{points}, are resampled with replacement.
}
}
\section{Likelihood functions}{
Given a specified sighting function (e.g., "halfnorm"),
maximum likelihood is used to estimate the parameter(s) of
the function (e.g., standard error) that best fit the distance data.
When plotted (see Examples), histogram bins are plotted
behind the detection
function for visualization; however, the function is fit to
the actual data, not to the bins.
}
\examples{
# Load example sparrow data (line transect survey type)
data(sparrowDetectionData)
data(sparrowSiteData)
# Fit half-normal detection function
dfunc <- dfuncEstim(formula=dist~1,
detectionData=sparrowDetectionData,
likelihood="halfnorm", w.hi=100)
# Fit a second half-normal detection function, now including
# a categorical covariate for observer who surveyed the site (factor, 5 levels)
# Increase maximum iterations
dfuncObs <- dfuncEstim(formula=dist~observer,
detectionData=sparrowDetectionData,
siteData=sparrowSiteData,
likelihood="halfnorm", w.hi=100, pointSurvey=FALSE,
control=RdistanceControls(maxIter=1000))
# Print results
# And plot the detection function for each observer
dfuncObs
plot(dfuncObs,
newdata=data.frame(observer=levels(sparrowSiteData$observer)))
# Show some plotting options
plot(dfuncObs,
newdata=data.frame(observer=levels(sparrowSiteData$observer)),
vertLines = FALSE, lty=c(1,1),
col.dfunc=heat.colors(length(levels(sparrowSiteData$observer))),
col=c("grey","lightgrey"), border=NA,
xlab="Distance (m)",
main="Showing plot options")
}
\references{
Buckland, S.T., D.R. Anderson, K.P. Burnham, J.L. Laake, D.L. Borchers,
and L. Thomas. (2001) \emph{Introduction to distance sampling: estimating
abundance of biological populations}. Oxford University Press, Oxford, UK.
}
\seealso{
\code{\link{abundEstim}}, \code{\link{autoDistSamp}}.
See likelihood-specific help files (e.g., \code{\link{halfnorm.like}}) for
details on each built-in likelihood. See package vignettes for information on custom,
user-defined likelihoods.
}
\author{
Trent McDonald, WEST Inc., \email{tmcdonald@west-inc.com}\cr
Jason Carlisle, University of Wyoming and WEST Inc., \email{jcarlisle@west-inc.com}\cr
Aidan McDonald, WEST Inc., \email{aidan@mcdcentral.org}
}
\keyword{model}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
{inve<-NULL
set<-function(y){
x<<-y
inve<<-NULL
}
get<-function()x
setinverse<-function(inverse) inve <<-inverse
getinverse<-function()inve
list(set=set, get=get,setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inve<-x$getinverse()
if (!is.null(inve)){
message(" reaching cached data ")
return(inve)
}
data<-x$get()
inve<-solve(data,...)
x$setinverse(inve)
}
| /cachematrix.R | no_license | bsafaker/ProgrammingAssignment2 | R | false | false | 736 | r |
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
{inve<-NULL
set<-function(y){
x<<-y
inve<<-NULL
}
get<-function()x
setinverse<-function(inverse) inve <<-inverse
getinverse<-function()inve
list(set=set, get=get,setinverse=setinverse, getinverse=getinverse)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inve<-x$getinverse()
if (!is.null(inve)){
message(" reaching cached data ")
return(inve)
}
data<-x$get()
inve<-solve(data,...)
x$setinverse(inve)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/price-roundness.R
\name{plot.bid.patterns}
\alias{plot.bid.patterns}
\title{Plot round numbers in bid prices.}
\usage{
\method{plot}{bid.patterns}(bids)
}
\arguments{
\item{bids}{Data frame of bids, with the columns "currency" and "contract.number"}
}
\value{
A ggplot plot
}
\description{
Unusually round bid prices raise suspicioun of bid rigging.
Given a data frame of bids, this function plots the number of
round bids and the number of total bids for each contract.
}
| /man/plot.bid.patterns.Rd | no_license | tlevine/red-flags | R | false | false | 560 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/price-roundness.R
\name{plot.bid.patterns}
\alias{plot.bid.patterns}
\title{Plot round numbers in bid prices.}
\usage{
\method{plot}{bid.patterns}(bids)
}
\arguments{
\item{bids}{Data frame of bids, with the columns "currency" and "contract.number"}
}
\value{
A ggplot plot
}
\description{
Unusually round bid prices raise suspicioun of bid rigging.
Given a data frame of bids, this function plots the number of
round bids and the number of total bids for each contract.
}
|
msg <- envelope() %>%
from(SMTP_USERNAME) %>%
to(SMTP_USERNAME)
msg_no_recipient <- envelope() %>%
from(SMTP_USERNAME)
msg_no_sender <- envelope() %>%
to(SMTP_USERNAME)
test_that("server type", {
expect_type(smtp, "closure")
})
test_that("error if sender missing", {
skip_on_cran()
expect_error(smtp(msg_no_sender), "Must specify who the email is from.")
})
test_that("error if recipient missing", {
skip_on_cran()
expect_error(smtp(msg_no_recipient), "Must specify at least one email recipient.")
})
test_that("sends text message", {
msg <- msg %>%
subject("Text body") %>%
text("Hello, World!")
skip_on_cran()
expect_error(smtp(msg), NA)
})
test_that("sends message with insecure = TRUE", {
skip_on_cran()
expect_error(smtp_insecure(msg), NA)
})
test_that("sends with SSL", {
skip_on_cran()
skip_on_ci()
expect_error(smtp_gmail(msg %>% subject("{emayili} test")), NA)
})
test_that("sends HTML message", {
msg <- msg %>%
subject("HTML body") %>%
html("<p><strong>Hello</strong>, <em>World</em>! You can also <u>underline</u> text.</p>")
skip_on_cran()
expect_error(smtp(msg), NA)
})
test_that("sends message with text attachment", {
msg <- msg %>%
attachment(TXTPATH)
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Text attachment")), NA)
})
test_that("sends message with image attachment", {
msg <- msg %>%
attachment(JPGPATH)
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Image attachment")), NA)
})
test_that("sends message with image attachment (using CID)", {
msg <- msg %>%
html('<img src="cid:r-logo"/>') %>%
attachment(JPGPATH, cid = "r-logo", type = "image/jpg")
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Image attachment (using CID)")), NA)
})
test_that("verbose output", {
skip_on_cran()
expect_match(
capture.output(smtp(msg, verbose = TRUE), type = "message") %>%
paste(collapse = "\n"),
"250 Message accepted",
fixed = TRUE
)
expect_length(capture.output(smtp(msg), type = "message"), 0)
})
test_that("replace bare line feeds", {
msg <- envelope() %>% render("Hello!")
expect_false(as.character(msg) %>% str_detect(REGEX_BARE_LINEFEED))
})
| /tests/testthat/test-server.R | no_license | Swechhya/emayili | R | false | false | 2,260 | r | msg <- envelope() %>%
from(SMTP_USERNAME) %>%
to(SMTP_USERNAME)
msg_no_recipient <- envelope() %>%
from(SMTP_USERNAME)
msg_no_sender <- envelope() %>%
to(SMTP_USERNAME)
test_that("server type", {
expect_type(smtp, "closure")
})
test_that("error if sender missing", {
skip_on_cran()
expect_error(smtp(msg_no_sender), "Must specify who the email is from.")
})
test_that("error if recipient missing", {
skip_on_cran()
expect_error(smtp(msg_no_recipient), "Must specify at least one email recipient.")
})
test_that("sends text message", {
msg <- msg %>%
subject("Text body") %>%
text("Hello, World!")
skip_on_cran()
expect_error(smtp(msg), NA)
})
test_that("sends message with insecure = TRUE", {
skip_on_cran()
expect_error(smtp_insecure(msg), NA)
})
test_that("sends with SSL", {
skip_on_cran()
skip_on_ci()
expect_error(smtp_gmail(msg %>% subject("{emayili} test")), NA)
})
test_that("sends HTML message", {
msg <- msg %>%
subject("HTML body") %>%
html("<p><strong>Hello</strong>, <em>World</em>! You can also <u>underline</u> text.</p>")
skip_on_cran()
expect_error(smtp(msg), NA)
})
test_that("sends message with text attachment", {
msg <- msg %>%
attachment(TXTPATH)
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Text attachment")), NA)
})
test_that("sends message with image attachment", {
msg <- msg %>%
attachment(JPGPATH)
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Image attachment")), NA)
})
test_that("sends message with image attachment (using CID)", {
msg <- msg %>%
html('<img src="cid:r-logo"/>') %>%
attachment(JPGPATH, cid = "r-logo", type = "image/jpg")
skip_on_cran()
expect_error(smtp(msg %>% subject("{emayili} test: Image attachment (using CID)")), NA)
})
test_that("verbose output", {
skip_on_cran()
expect_match(
capture.output(smtp(msg, verbose = TRUE), type = "message") %>%
paste(collapse = "\n"),
"250 Message accepted",
fixed = TRUE
)
expect_length(capture.output(smtp(msg), type = "message"), 0)
})
test_that("replace bare line feeds", {
msg <- envelope() %>% render("Hello!")
expect_false(as.character(msg) %>% str_detect(REGEX_BARE_LINEFEED))
})
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.