content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(ggplot2)
library(RColorBrewer)
library(ggmap)
library(maps)
library(rgdal)
library(scales)
library(maptools)
library(gridExtra)
library(rgeos)
states_shape = readOGR(file.choose())
class(states_shape)
names(states_shape)
print(states_shape$ID_1)
print(states_shape$NAME_1)
plot(states_shape, main = "Administrative Map of India")
dat=read.csv(file.choose())
fortify_shape = fortify(states_shape, region = "ID_1")
class(fortify_shape)
Merged_data = merge(fortify_shape, dat,by="id", all.x=TRUE)
Map_plot = Merged_data[order(Merged_data$order), ]
plot1=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Total.Cases),
color = "red", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Reds" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Total Cases in India")
plot1
plot2=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Active.Cases),
color = "blue", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Blues" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Active Cases in India")
plot2
plot3=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Deaths.),
color = "red", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "YlOrRd" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Total Deaths in India")
plot3
plot4=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Cured...Migrated),
color = "black", size = .2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Greens" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Cured+Migrated")
plot4
library(gridExtra)
grid.arrange(plot1, plot2, plot3, plot4)
| /covidanalysis_india.R | no_license | beastleohfs/choropleth | R | false | false | 2,112 | r | library(ggplot2)
library(RColorBrewer)
library(ggmap)
library(maps)
library(rgdal)
library(scales)
library(maptools)
library(gridExtra)
library(rgeos)
states_shape = readOGR(file.choose())
class(states_shape)
names(states_shape)
print(states_shape$ID_1)
print(states_shape$NAME_1)
plot(states_shape, main = "Administrative Map of India")
dat=read.csv(file.choose())
fortify_shape = fortify(states_shape, region = "ID_1")
class(fortify_shape)
Merged_data = merge(fortify_shape, dat,by="id", all.x=TRUE)
Map_plot = Merged_data[order(Merged_data$order), ]
plot1=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Total.Cases),
color = "red", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Reds" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Total Cases in India")
plot1
plot2=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Active.Cases),
color = "blue", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Blues" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Active Cases in India")
plot2
plot3=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Deaths.),
color = "red", size = 0.2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "YlOrRd" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Total Deaths in India")
plot3
plot4=ggplot() +
geom_polygon(data = Map_plot,
aes(x = long, y = lat, group = group, fill = Cured...Migrated),
color = "black", size = .2) +
coord_map() + scale_fill_distiller(name="Total.Cases", palette = "Greens" , breaks = pretty_breaks(n = 9))+
theme_nothing(legend = TRUE)+
labs(title=" Cured+Migrated")
plot4
library(gridExtra)
grid.arrange(plot1, plot2, plot3, plot4)
|
#!/usr/bin/env Rscript
sys = modules::import('klmr/sys')
sys$run({
library(methods)
args = sys$cmd$parse(opt('i', 'faidx', 'Fasta index file'),
arg('chip', 'The ChIP BedGraph file'),
arg('input', 'The input BedGraph file'))
fx = modules::import('klmr/functional')
# Use these to ensure that Rle objects have the same lengths and can be
# divided at the end.
seqlengths = read.delim(args$faidx, header = FALSE)$V2
chip_coverage = args$chip %>%
rtracklayer::import.bedGraph() %>%
GenomeInfoDb::`seqlengths<-`(seqlengths) %>%
GenomicRanges::coverage(weight = GenomicRanges::score(.))
input_coverage = args$input %>%
rtracklayer::import.bedGraph() %>%
GenomeInfoDb::`seqlengths<-`(seqlengths) %>%
GenomicRanges::coverage(weight = GenomicRanges::score(.)) %>%
lapply(as.numeric %|>% fx$p(zoo::rollmean, k = 50, fill = 'extend')) %>%
IRanges::RleList()
# This ensures that no division by zero happens. Interpolation would
# arguably be a better strategy; I’m not sure.
run_nulls = input_coverage <= 0
input_coverage[run_nulls] = as.list(min(input_coverage[! run_nulls]))
gc()
normalized_coverage = GenomicRanges::GRanges(chip_coverage / input_coverage)
rm(chip_coverage, input_coverage)
gc()
rtracklayer::export.bedGraph(normalized_coverage, stdout())
})
# vim: ft=r
| /scripts/normalize_to_input | no_license | klmr/pichip | R | false | false | 1,454 | #!/usr/bin/env Rscript
sys = modules::import('klmr/sys')
sys$run({
library(methods)
args = sys$cmd$parse(opt('i', 'faidx', 'Fasta index file'),
arg('chip', 'The ChIP BedGraph file'),
arg('input', 'The input BedGraph file'))
fx = modules::import('klmr/functional')
# Use these to ensure that Rle objects have the same lengths and can be
# divided at the end.
seqlengths = read.delim(args$faidx, header = FALSE)$V2
chip_coverage = args$chip %>%
rtracklayer::import.bedGraph() %>%
GenomeInfoDb::`seqlengths<-`(seqlengths) %>%
GenomicRanges::coverage(weight = GenomicRanges::score(.))
input_coverage = args$input %>%
rtracklayer::import.bedGraph() %>%
GenomeInfoDb::`seqlengths<-`(seqlengths) %>%
GenomicRanges::coverage(weight = GenomicRanges::score(.)) %>%
lapply(as.numeric %|>% fx$p(zoo::rollmean, k = 50, fill = 'extend')) %>%
IRanges::RleList()
# This ensures that no division by zero happens. Interpolation would
# arguably be a better strategy; I’m not sure.
run_nulls = input_coverage <= 0
input_coverage[run_nulls] = as.list(min(input_coverage[! run_nulls]))
gc()
normalized_coverage = GenomicRanges::GRanges(chip_coverage / input_coverage)
rm(chip_coverage, input_coverage)
gc()
rtracklayer::export.bedGraph(normalized_coverage, stdout())
})
# vim: ft=r
| |
files <- list.files(pattern = 'r.csv')
total_files <- length(files)
total_files <- total_files - total_files %% 10
# print(total_files)
ONEFILE <- read.csv(files[1])
for (FILE in files[-1]){
df <- read.csv(FILE)
ONEFILE <- merge(ONEFILE, df, by='ExperimentName')
pct <- which(files==FILE)
if ((((pct/total_files)*100) %%5) ==0){
print(paste0(pct/total_files* 100,'% Completed'))
}
# print(FILE)
# print(nrow(df))
# cat("\n")
# cat(print(nrow(ONEFILE)))
}
rownames(ONEFILE)<-ONEFILE[,1]
# ONEFILE <- ONEFILE[,-1]
# write.csv(ONEFILE, 'onefile1.csv')
ONEFILE <- as.data.frame(t(ONEFILE[,-1]))
write.csv(ONEFILE, 'onefile.csv')
| /create_one_file.R | no_license | ardimirzaei/HISB_Mapping_Databases | R | false | false | 642 | r |
files <- list.files(pattern = 'r.csv')
total_files <- length(files)
total_files <- total_files - total_files %% 10
# print(total_files)
ONEFILE <- read.csv(files[1])
for (FILE in files[-1]){
df <- read.csv(FILE)
ONEFILE <- merge(ONEFILE, df, by='ExperimentName')
pct <- which(files==FILE)
if ((((pct/total_files)*100) %%5) ==0){
print(paste0(pct/total_files* 100,'% Completed'))
}
# print(FILE)
# print(nrow(df))
# cat("\n")
# cat(print(nrow(ONEFILE)))
}
rownames(ONEFILE)<-ONEFILE[,1]
# ONEFILE <- ONEFILE[,-1]
# write.csv(ONEFILE, 'onefile1.csv')
ONEFILE <- as.data.frame(t(ONEFILE[,-1]))
write.csv(ONEFILE, 'onefile.csv')
|
# author: Jack Huey
library(ArchR)
library(ggrepel)
library(universalmotif)
library(TFBSTools)
## Read motifs
files <- list.files(path="Mus_musculus_2021_05_06_12_37_pm/pwms_all_motifs", full.names=TRUE)
motifs.raw = lapply(files, function(f) {
id = gsub(".txt", "", basename(f))
tryCatch({
motif = universalmotif::read_cisbp(f)
motif@name = id
convert_motifs(motif, "TFBSTools-PWMatrix")
}, error=function(e){
#print(paste0(id, " ", name))
})
})
motifs.raw <- motifs.raw[!sapply(motifs.raw,is.null)]
names(motifs.raw) = sapply(motifs.raw, function(f) { f@name })
all.motifs = list()
con = file("Mus_musculus_2021_05_06_12_37_pm/TF_Information.txt", "r")
readLines(con, n = 1)
while ( TRUE ) {
line = readLines(con, n = 1)
if (length(line) == 0) {
break
}
cols = strsplit(line, "\t")
id = cols[[1]][4]
if (id == ".") {
next
}
name = cols[[1]][7]
motif = motifs.raw[[id]]
if (is.null(motif)) {
print(name)
next
}
idx = length(grep(name, names(all.motifs))) + 1
if (idx > 1) {
name = paste0(name, "_", idx)
}
motif@name = name
all.motifs[name] = motif
}
close(con)
#all.motifs["Foxn1_2"] = convert_motifs(read_meme("MA1684.1.meme"), "TFBSTools-PWMatrix") this is to add a custom foxn1 motif from meme, didnt use in E11_E12
motifsList = do.call(PWMatrixList, all.motifs)
## E11_E12
proj_e11_e12_cisbp2 = loadArchRProject("ML_0414_ABSOLUTELY_FINAL_object_whole_analysis")
proj_e11_e12_cisbp2 <- addMotifAnnotations(ArchRProj = proj_e11_e12_cisbp2, motifPWMs = motifsList, name = "MotifCisbp2_only")
proj_e11_e12_cisbp2 <- addDeviationsMatrix(ArchRProj = proj_e11_e12_cisbp2, peakAnnotation = "MotifCisbp2_only")
saveArchRProject(proj_e11_e12_cisbp2, "E11_E12_cisbp2_deviations")
| /scATAC_qc_analysis/scATAC_analysis/add_cisbp2_motifs.R | no_license | maehrlab/pharyngeal_endoderm_development | R | false | false | 1,824 | r | # author: Jack Huey
library(ArchR)
library(ggrepel)
library(universalmotif)
library(TFBSTools)
## Read motifs
files <- list.files(path="Mus_musculus_2021_05_06_12_37_pm/pwms_all_motifs", full.names=TRUE)
motifs.raw = lapply(files, function(f) {
id = gsub(".txt", "", basename(f))
tryCatch({
motif = universalmotif::read_cisbp(f)
motif@name = id
convert_motifs(motif, "TFBSTools-PWMatrix")
}, error=function(e){
#print(paste0(id, " ", name))
})
})
motifs.raw <- motifs.raw[!sapply(motifs.raw,is.null)]
names(motifs.raw) = sapply(motifs.raw, function(f) { f@name })
all.motifs = list()
con = file("Mus_musculus_2021_05_06_12_37_pm/TF_Information.txt", "r")
readLines(con, n = 1)
while ( TRUE ) {
line = readLines(con, n = 1)
if (length(line) == 0) {
break
}
cols = strsplit(line, "\t")
id = cols[[1]][4]
if (id == ".") {
next
}
name = cols[[1]][7]
motif = motifs.raw[[id]]
if (is.null(motif)) {
print(name)
next
}
idx = length(grep(name, names(all.motifs))) + 1
if (idx > 1) {
name = paste0(name, "_", idx)
}
motif@name = name
all.motifs[name] = motif
}
close(con)
#all.motifs["Foxn1_2"] = convert_motifs(read_meme("MA1684.1.meme"), "TFBSTools-PWMatrix") this is to add a custom foxn1 motif from meme, didnt use in E11_E12
motifsList = do.call(PWMatrixList, all.motifs)
## E11_E12
proj_e11_e12_cisbp2 = loadArchRProject("ML_0414_ABSOLUTELY_FINAL_object_whole_analysis")
proj_e11_e12_cisbp2 <- addMotifAnnotations(ArchRProj = proj_e11_e12_cisbp2, motifPWMs = motifsList, name = "MotifCisbp2_only")
proj_e11_e12_cisbp2 <- addDeviationsMatrix(ArchRProj = proj_e11_e12_cisbp2, peakAnnotation = "MotifCisbp2_only")
saveArchRProject(proj_e11_e12_cisbp2, "E11_E12_cisbp2_deviations")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operator.R
\name{\%<+\%}
\alias{\%<+\%}
\title{\%<+\%}
\usage{
pg \%<+\% data
}
\arguments{
\item{pg}{ggplot2 object}
\item{data}{annotation data}
}
\value{
ggplot object with annotation data added
}
\description{
add annotation data to a tree
}
\examples{
nwk <- system.file("extdata", "sample.nwk", package="treeio")
tree <- read.tree(nwk)
p <- ggtree(tree)
dd <- data.frame(taxa=LETTERS[1:13],
place=c(rep("GZ", 5), rep("HK", 3), rep("CZ", 4), NA),
value=round(abs(rnorm(13, mean=70, sd=10)), digits=1))
row.names(dd) <- NULL
p \%<+\% dd + geom_text(aes(color=place, label=label), hjust=-0.5)
}
\author{
Guangchuang Yu
}
| /man/add-TREEDATA.Rd | no_license | Sungsu/ggtree | R | false | true | 722 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/operator.R
\name{\%<+\%}
\alias{\%<+\%}
\title{\%<+\%}
\usage{
pg \%<+\% data
}
\arguments{
\item{pg}{ggplot2 object}
\item{data}{annotation data}
}
\value{
ggplot object with annotation data added
}
\description{
add annotation data to a tree
}
\examples{
nwk <- system.file("extdata", "sample.nwk", package="treeio")
tree <- read.tree(nwk)
p <- ggtree(tree)
dd <- data.frame(taxa=LETTERS[1:13],
place=c(rep("GZ", 5), rep("HK", 3), rep("CZ", 4), NA),
value=round(abs(rnorm(13, mean=70, sd=10)), digits=1))
row.names(dd) <- NULL
p \%<+\% dd + geom_text(aes(color=place, label=label), hjust=-0.5)
}
\author{
Guangchuang Yu
}
|
library(tidyr)
library(dplyr)
# Use 'select' to get some variables (columns) from data frame
# Lấy data frame từ data frame iris bao gồm các cột
# Sepal.Length, Sepal.Width, Species và ID
select(iris, Sepal.Length, Sepal.Width, Species, ID)
# Use 'filter' to filter out observations (rows) from data frame
# Lọc ra những mẫu nào có Species là setosa
filter(iris, Species == 'setosa')
# 'mutate' is used to create a new variable (column) based on other variables
# Tạo 1 cột mới có tên là Sepal.Ratio được tính bằng tỉ lệ Sepal.Length va Sepal.Width
mutate(iris, Sepal.Ratio = Sepal.Length / Sepal.Width)
# special operator %>%
# %>% là ký tự đặc biệt cho phép mình dùng kết quả của phép tính trước
# làm đầu vào của phép tính sau
iris %>% # dùng data frame iris cho các phép biến đổi sau
select(Sepal.Length, Sepal.Width, Species, ID) %>%
mutate(Sepal.Ratio = Sepal.Length / Sepal.Width) %>%
filter(Sepal.Ratio > 1.5)
# chuỗi biến đổi trên sẽ đi từ data frame iris
# select lấy ra các cột Sepal.Length, Sepal.Width, Species, ID
# mutate tạo thêm 1 cột Sepal.Ratio
# filter lọc ra những mẫu nào có Sepal.Ratio trên 1.5
# ** Lưu ý %>% dùng data frame kết quả của phép biến đổi trước
# để làm data frame đầu vào cho phép biến đổi sau
| /src/data_manipulation.R | permissive | foxience/training | R | false | false | 1,395 | r | library(tidyr)
library(dplyr)
# Use 'select' to get some variables (columns) from data frame
# Lấy data frame từ data frame iris bao gồm các cột
# Sepal.Length, Sepal.Width, Species và ID
select(iris, Sepal.Length, Sepal.Width, Species, ID)
# Use 'filter' to filter out observations (rows) from data frame
# Lọc ra những mẫu nào có Species là setosa
filter(iris, Species == 'setosa')
# 'mutate' is used to create a new variable (column) based on other variables
# Tạo 1 cột mới có tên là Sepal.Ratio được tính bằng tỉ lệ Sepal.Length va Sepal.Width
mutate(iris, Sepal.Ratio = Sepal.Length / Sepal.Width)
# special operator %>%
# %>% là ký tự đặc biệt cho phép mình dùng kết quả của phép tính trước
# làm đầu vào của phép tính sau
iris %>% # dùng data frame iris cho các phép biến đổi sau
select(Sepal.Length, Sepal.Width, Species, ID) %>%
mutate(Sepal.Ratio = Sepal.Length / Sepal.Width) %>%
filter(Sepal.Ratio > 1.5)
# chuỗi biến đổi trên sẽ đi từ data frame iris
# select lấy ra các cột Sepal.Length, Sepal.Width, Species, ID
# mutate tạo thêm 1 cột Sepal.Ratio
# filter lọc ra những mẫu nào có Sepal.Ratio trên 1.5
# ** Lưu ý %>% dùng data frame kết quả của phép biến đổi trước
# để làm data frame đầu vào cho phép biến đổi sau
|
#!/usr/bin/env Rscript
suppressPackageStartupMessages({
library("R6")
library("pandocfilters")
library("dplyr")
})
pandoc_to_json <- function(file, from = "markdown") {
args <- sprintf("-f %s -t json %s", from, file)
out <- system2("pandoc", args, stdout = TRUE)
jsonlite::fromJSON(out, simplifyVector = FALSE, simplifyDataFrame = FALSE,
simplifyMatrix = FALSE)
}
# https://stackoverflow.com/questions/2436688/append-an-object-to-a-list-in-r-in-amortized-constant-time-o1
# https://stackoverflow.com/questions/29461530/efficient-linked-list-ordered-set-in-r/29482211#29482211
ExpandingList <- R6Class("ExpandingList",
public = list(
initialize = function() {
private$data <- rlang::new_environment()
},
add = function(val) {
n <- length(private$data)
private$data[[as.character(n + 1L)]] <- val
invisible(self)
},
as.list = function() {
x <- as.list(private$data, sorted = FALSE)
x[order(as.numeric(names(x)))]
}
),
private = list(
data = NULL
)
)
is_url <- function(x) {
stringr::str_detect(x, stringr::regex("^(https?|doi):", ignore_case = TRUE))
}
stringify <- function(x, meta) {
results <- ExpandingList$new()
go <- function(key, value, ...) {
if (key %in% c("Str", "MetaString")) {
if (!is_url(value)) {
results$add(value)
}
} else if (key %in% c("Code", "Math", "RawInline", "Cite")) {
list()
}
}
x <- astrapply(x, go)
purrr::flatten_chr(results$as.list())
}
parse_text_md <- function(path, from = "markdown") {
x <- pandoc_to_json(path, from = from)
stringr::str_c(stringify(x), collapse = " ")
}
normalize_lang <- function(lang = NULL){
if (!length(lang) || !nchar(lang)) {
message(str_c("DESCRIPTION does not contain 'Language' field. ",
"Defaulting to 'en-US'."))
lang <- "en-US"
}
if (tolower(lang) == "en" || tolower(lang) == "eng") {
message("Found ambiguous language 'en'. Defaulting to 'en-US")
lang <- "en-US"
}
if (nchar(lang) == 2) {
oldlang <- lang
lang <- paste(tolower(lang), toupper(lang), sep = "_")
message(sprintf("Found ambiguous language '%s'. Defaulting to '%s'",
oldlang, lang))
}
lang <- gsub("-", "_", lang, fixed = TRUE)
parts <- strsplit(lang, "_", fixed = TRUE)[[1]]
parts[1] <- tolower(parts[1])
parts[-1] <- toupper(parts[-1])
paste(parts, collapse = "_")
}
spell_check_pandoc_one <- function(path, dict) {
text <- parse_text_md(path)
bad_words <- purrr::flatten_chr(hunspell::hunspell(text, dict = dict))
out <- tibble::tibble(words = bad_words) %>%
count(words) %>%
rename(count = n)
if (nrow(out) > 0) {
out[["path"]] <- path
}
out
}
spell_check_pandoc <- function(path, ignore = character(), lang = "en_US") {
stopifnot(is.character(ignore))
lang <- normalize_lang(lang)
dict <- hunspell::dictionary(lang, add_words = ignore)
path <- normalizePath(path, mustWork = TRUE)
purrr::map_df(sort(path), spell_check_pandoc_one, dict = dict) %>%
group_by(path, words) %>%
summarise(count = sum(count)) %>%
arrange(path, words) %>%
ungroup() %>%
mutate(path = basename(path))
}
files <- c(dir(here::here(), pattern = "\\.(Rmd)"),
here::here("README.md"))
ignore <- readLines(here::here("WORDLIST"))
print(spell_check_pandoc(files, ignore = ignore), n = 100)
| /_spelling.R | no_license | Dr-Dong/bayesian_notes | R | false | false | 3,411 | r | #!/usr/bin/env Rscript
suppressPackageStartupMessages({
library("R6")
library("pandocfilters")
library("dplyr")
})
pandoc_to_json <- function(file, from = "markdown") {
args <- sprintf("-f %s -t json %s", from, file)
out <- system2("pandoc", args, stdout = TRUE)
jsonlite::fromJSON(out, simplifyVector = FALSE, simplifyDataFrame = FALSE,
simplifyMatrix = FALSE)
}
# https://stackoverflow.com/questions/2436688/append-an-object-to-a-list-in-r-in-amortized-constant-time-o1
# https://stackoverflow.com/questions/29461530/efficient-linked-list-ordered-set-in-r/29482211#29482211
ExpandingList <- R6Class("ExpandingList",
public = list(
initialize = function() {
private$data <- rlang::new_environment()
},
add = function(val) {
n <- length(private$data)
private$data[[as.character(n + 1L)]] <- val
invisible(self)
},
as.list = function() {
x <- as.list(private$data, sorted = FALSE)
x[order(as.numeric(names(x)))]
}
),
private = list(
data = NULL
)
)
is_url <- function(x) {
stringr::str_detect(x, stringr::regex("^(https?|doi):", ignore_case = TRUE))
}
stringify <- function(x, meta) {
results <- ExpandingList$new()
go <- function(key, value, ...) {
if (key %in% c("Str", "MetaString")) {
if (!is_url(value)) {
results$add(value)
}
} else if (key %in% c("Code", "Math", "RawInline", "Cite")) {
list()
}
}
x <- astrapply(x, go)
purrr::flatten_chr(results$as.list())
}
parse_text_md <- function(path, from = "markdown") {
x <- pandoc_to_json(path, from = from)
stringr::str_c(stringify(x), collapse = " ")
}
normalize_lang <- function(lang = NULL){
if (!length(lang) || !nchar(lang)) {
message(str_c("DESCRIPTION does not contain 'Language' field. ",
"Defaulting to 'en-US'."))
lang <- "en-US"
}
if (tolower(lang) == "en" || tolower(lang) == "eng") {
message("Found ambiguous language 'en'. Defaulting to 'en-US")
lang <- "en-US"
}
if (nchar(lang) == 2) {
oldlang <- lang
lang <- paste(tolower(lang), toupper(lang), sep = "_")
message(sprintf("Found ambiguous language '%s'. Defaulting to '%s'",
oldlang, lang))
}
lang <- gsub("-", "_", lang, fixed = TRUE)
parts <- strsplit(lang, "_", fixed = TRUE)[[1]]
parts[1] <- tolower(parts[1])
parts[-1] <- toupper(parts[-1])
paste(parts, collapse = "_")
}
spell_check_pandoc_one <- function(path, dict) {
text <- parse_text_md(path)
bad_words <- purrr::flatten_chr(hunspell::hunspell(text, dict = dict))
out <- tibble::tibble(words = bad_words) %>%
count(words) %>%
rename(count = n)
if (nrow(out) > 0) {
out[["path"]] <- path
}
out
}
spell_check_pandoc <- function(path, ignore = character(), lang = "en_US") {
stopifnot(is.character(ignore))
lang <- normalize_lang(lang)
dict <- hunspell::dictionary(lang, add_words = ignore)
path <- normalizePath(path, mustWork = TRUE)
purrr::map_df(sort(path), spell_check_pandoc_one, dict = dict) %>%
group_by(path, words) %>%
summarise(count = sum(count)) %>%
arrange(path, words) %>%
ungroup() %>%
mutate(path = basename(path))
}
files <- c(dir(here::here(), pattern = "\\.(Rmd)"),
here::here("README.md"))
ignore <- readLines(here::here("WORDLIST"))
print(spell_check_pandoc(files, ignore = ignore), n = 100)
|
#!/usr/bin/env Rscript
#Normalises a SingleCellExperiment object
# Load optparse we need to check inputs
library(optparse)
library(workflowscriptscommon)
library(LoomExperiment)
library(scater)
# parse options
option_list = list(
make_option(
c("-i", "--input-loom"),
action = "store",
default = NA,
type = 'character',
help = "A SingleCellExperiment object file in Loom format."
),
make_option(
c("-o", "--output-loom"),
action = "store",
default = NA,
type = 'character',
help = "File name in which to store the SingleCellExperiment object in Loom format."
)
)
opt <- wsc_parse_args(option_list, mandatory = c('input_loom', 'output_loom'))
# Check parameter values
if ( ! file.exists(opt$input_loom)){
stop((paste('File', opt$input_loom, 'does not exist')))
}
# Input from Loom format
scle <- import(opt$input_loom, format='loom', type='SingleCellLoomExperiment')
print(paste("Normalising...."))
#Normalise
scle <- normalize(scle, exprs_values = 1)
print(paste("Finished normalising"))
# Output to a Loom file
if (file.exists(opt$output_loom)) {
file.remove(opt$output_loom)
}
export(scle, opt$output_loom, format='loom')
| /tools/scater/scater-normalize.R | permissive | thobalose/tools-iuc | R | false | false | 1,186 | r | #!/usr/bin/env Rscript
#Normalises a SingleCellExperiment object
# Load optparse we need to check inputs
library(optparse)
library(workflowscriptscommon)
library(LoomExperiment)
library(scater)
# parse options
option_list = list(
make_option(
c("-i", "--input-loom"),
action = "store",
default = NA,
type = 'character',
help = "A SingleCellExperiment object file in Loom format."
),
make_option(
c("-o", "--output-loom"),
action = "store",
default = NA,
type = 'character',
help = "File name in which to store the SingleCellExperiment object in Loom format."
)
)
opt <- wsc_parse_args(option_list, mandatory = c('input_loom', 'output_loom'))
# Check parameter values
if ( ! file.exists(opt$input_loom)){
stop((paste('File', opt$input_loom, 'does not exist')))
}
# Input from Loom format
scle <- import(opt$input_loom, format='loom', type='SingleCellLoomExperiment')
print(paste("Normalising...."))
#Normalise
scle <- normalize(scle, exprs_values = 1)
print(paste("Finished normalising"))
# Output to a Loom file
if (file.exists(opt$output_loom)) {
file.remove(opt$output_loom)
}
export(scle, opt$output_loom, format='loom')
|
## ---------------------------------------------------------------------------------------------------------------------
library(readxl)
HUN_FIN_2018_R <- read_excel("C:/Users/bokan/Desktop/Egyetem/Szakdoga/Szakdolgozat/Adatbazis/HUN_FIN_2018_R.xlsx")
HUN_FIN_2018_R$Sorszám <- NULL
library("mice")
library("dplyr")
library("car")
library("HH")
library("lmtest")
library("sandwich")
library("corrplot")
## ---- message=FALSE---------------------------------------------------------------------------------------------------
imp_single18 <- mice(HUN_FIN_2018_R, m = 1, method = "pmm")
PISA2018IMP <- complete(imp_single18)
PISA2018IMP$CNT = factor(PISA2018IMP$CNT,levels = c('HUN', 'FIN'),labels = c(0, 1))
names(PISA2018IMP)[13] <- "KO_Koz"
names(PISA2018IMP)[47] <- "WLE_TeacherInterest"
names(PISA2018IMP)[50] <- "WLE_SubjectiveWellBeing"
## ---------------------------------------------------------------------------------------------------------------------
library(dplyr)
PISA2018IMP %>% dplyr::select(CNT) -> PISA2018
PISA2018$Nem = factor(PISA2018IMP$Nem,levels = c(0, 1),labels = c('Nő', 'Férfi'))
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OF_Jo[i] ==0 && PISA2018IMP$OF_Kiv[i] == 0)
{
PISA2018$OF[i] = 'Rossz'
}
else
{
if (PISA2018IMP$OF_Jo[i] ==1)
{
PISA2018$OF[i] = 'Jó'
}
else
{
PISA2018$OF[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$K_25_100[i] ==0 && PISA2018IMP$K_100_200[i] == 0 && PISA2018IMP$K_200_500[i] == 0)
{
PISA2018$Könyvek[i] = '0-25'
}
else
{
if (PISA2018IMP$K_25_100[i] ==1)
{
PISA2018$Könyvek[i] = '25-100'
}
else
{
if (PISA2018IMP$K_100_200[i] ==1)
{
PISA2018$Könyvek[i] = '100-200'
}
else
{
PISA2018$Könyvek[i] = '200-500'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$TH_Koz[i] ==0 && PISA2018IMP$TH_Kiv[i] == 0)
{
PISA2018$TH[i] = 'Rossz'
}
else
{
if (PISA2018IMP$TH_Koz[i] ==1)
{
PISA2018$TH[i] = 'Közepes'
}
else
{
PISA2018$TH[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$TT_Koz[i] ==0 && PISA2018IMP$TT_Jo[i] == 0 && PISA2018IMP$TT_Kiv[i] == 0)
{
PISA2018$TT[i] = 'Rossz'
}
else
{
if (PISA2018IMP$TT_Koz[i] ==1)
{
PISA2018$TT[i] = 'Közepes'
}
else
{
if (PISA2018IMP$TT_Jo[i] ==1)
{
PISA2018$TT[i] = 'Jó'
}
else
{
PISA2018$TT[i] = 'Kiváló'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$KO_Koz[i] ==0 && PISA2018IMP$KO_Kiv[i] == 0)
{
PISA2018$KO[i] = 'Nincs'
}
else
{
if (PISA2018IMP$KO_Koz[i] ==1)
{
PISA2018$KO[i] = 'Közepes'
}
else
{
PISA2018$KO[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OI_Koz[i] ==0 && PISA2018IMP$OI_Sok[i] == 0)
{
PISA2018$OI[i] = 'Kevés'
}
else
{
if (PISA2018IMP$OI_Koz[i] ==1)
{
PISA2018$OI[i] = 'Közepes'
}
else
{
PISA2018$OI[i] = 'Sok'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$IK_Koz[i] ==0 && PISA2018IMP$IK_Jo[i] == 0 && PISA2018IMP$IK_Kiv[i] == 0)
{
PISA2018$IK[i] = 'Rossz'
}
else
{
if (PISA2018IMP$IK_Koz[i] ==1)
{
PISA2018$IK[i] = 'Közepes'
}
else
{
if (PISA2018IMP$IK_Jo[i] ==1)
{
PISA2018$IK[i] = 'Jó'
}
else
{
PISA2018$IK[i] = 'Kiváló'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$SzT_Koz[i] ==0 && PISA2018IMP$SzT_Kiv[i] == 0)
{
PISA2018$SzT[i] = 'Rossz'
}
else
{
if (PISA2018IMP$SzT_Koz[i] ==1)
{
PISA2018$SzT[i] = 'Közepes'
}
else
{
PISA2018$SzT[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$Ver_Koz[i] ==0 && PISA2018IMP$Ver_Nagy[i] == 0)
{
PISA2018$Ver[i] = 'Kicsi'
}
else
{
if (PISA2018IMP$Ver_Koz[i] ==1)
{
PISA2018$Ver[i] = 'Közepes'
}
else
{
PISA2018$Ver[i] = 'Nagy'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$Bull_Soh[i] ==0 && PISA2018IMP$Bull_Neh[i] == 0)
{
PISA2018$Bull[i] = 'Gyakran'
}
else
{
if (PISA2018IMP$Bull_Neh[i] ==1)
{
PISA2018$Bull[i] = 'Néha'
}
else
{
PISA2018$Bull[i] = 'Soha'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OED_Koz[i] ==0 && PISA2018IMP$OED_Sok[i] == 0)
{
PISA2018$OED[i] = 'Kevés'
}
else
{
if (PISA2018IMP$OED_Koz[i] ==1)
{
PISA2018$OED[i] = 'Közepes'
}
else
{
PISA2018$OED[i] = 'Sok'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$DE_Van[i] ==0 && PISA2018IMP$DE_Kev[i] == 0)
{
PISA2018$DE[i] = 'Nincs'
}
else
{
if (PISA2018IMP$DE_Kev[i] ==1)
{
PISA2018$DE[i] = 'Kevés'
}
else
{
PISA2018$DE[i] = 'Sok'
}
}
}
PISA2018$Satisfact <- PISA2018IMP$Satisfact
PISA2018$WeeklyClass <- PISA2018IMP$WeeklyClass
PISA2018$MISCED <- PISA2018IMP$MISCED
PISA2018$FISCED <- PISA2018IMP$FISCED
PISA2018$BMMJ_Mother <- PISA2018IMP$BMMJ_Mother
PISA2018$BFMJ_Father <- PISA2018IMP$BFMJ_Father
PISA2018$MINS_Maths <- PISA2018IMP$MINS_Maths
PISA2018$MINS_Language <- PISA2018IMP$MINS_Language
PISA2018$ESCS <- PISA2018IMP$ESCS
PISA2018$ICT_Home <- PISA2018IMP$ICT_Home
PISA2018$ICT_School <- PISA2018IMP$ICT_School
PISA2018$WLE_Cultural <- PISA2018IMP$WLE_Cultural
PISA2018$WLE_Home <- PISA2018IMP$WLE_Home
PISA2018$WLE_Wealth <- PISA2018IMP$WLE_Wealth
PISA2018$WLE_TeacherSupport <- PISA2018IMP$WLE_TeacherSupport
PISA2018$WLE_TeacherInstruction <- PISA2018IMP$WLE_TeacherInstruction
PISA2018$WLE_Emotional <- PISA2018IMP$WLE_Emotional
PISA2018$WLE_TeacherInterest <- PISA2018IMP$WLE_TeacherInterest
PISA2018$WLE_Attitude <- PISA2018IMP$WLE_Attitude
PISA2018$WLE_Competitiveness <- PISA2018IMP$WLE_Competitiveness
PISA2018$WLE_SubjectiveWellBeing <- PISA2018IMP$WLE_SubjectiveWellBeing
PISA2018$WLE_GoalOrientation <- PISA2018IMP$WLE_GoalOrientation
PISA2018$WLE_SchoolWellBeing <- PISA2018IMP$WLE_SchoolWellBeing
PISA2018$PERCOOP <- PISA2018IMP$PERCOOP
PISA2018$GFOFAIL <- PISA2018IMP$GFOFAIL
PISA2018$PERFEED <- PISA2018IMP$PERFEED
PISA2018$RESILIENCE <- PISA2018IMP$RESILIENCE
PISA2018$PV1READ <- PISA2018IMP$PV1READ
PISA2018$PV1MATH <- PISA2018IMP$PV1MATH
PISA2018$PV1SCIE <- PISA2018IMP$PV1SCIE
PISA2018$OF <- as.factor(PISA2018$OF)
PISA2018$Könyvek <- as.factor(PISA2018$Könyvek)
PISA2018$TH <- as.factor(PISA2018$TH)
PISA2018$TT <- as.factor(PISA2018$TT)
PISA2018$KO <- as.factor(PISA2018$KO)
PISA2018$OI <- as.factor(PISA2018$OI)
PISA2018$IK <- as.factor(PISA2018$IK)
PISA2018$SzT <- as.factor(PISA2018$SzT)
PISA2018$Ver <- as.factor(PISA2018$Ver)
PISA2018$Bull <- as.factor(PISA2018$Bull)
PISA2018$OED <- as.factor(PISA2018$OED)
PISA2018$DE <- as.factor(PISA2018$DE)
## ---------------------------------------------------------------------------------------------------------------------
TreatsFIN <- subset(PISA2018IMP, PISA2018IMP$CNT == 1, select = -c(CNT))
ControlHUN <- subset(PISA2018IMP, PISA2018IMP$CNT == 0, select = -c(CNT))
summary(TreatsFIN$PV1MATH)
summary(ControlHUN$PV1MATH)
summary(TreatsFIN$PV1READ)
summary(ControlHUN$PV1READ)
summary(TreatsFIN$PV1SCIE)
summary(ControlHUN$PV1SCIE)
## ---------------------------------------------------------------------------------------------------------------------
with(PISA2018IMP, t.test(PV1MATH ~ CNT))
with(PISA2018IMP, t.test(PV1READ ~ CNT))
with(PISA2018IMP, t.test(PV1SCIE ~ CNT))
PISA2018IMP_cov <- c("FISCED", "MISCED", "ESCS")
lapply(PISA2018IMP_cov, function(v) {t.test(PISA2018IMP[, v] ~ PISA2018IMP[, 'CNT'])})
## ---------------------------------------------------------------------------------------------------------------------
pscores.model <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ,family = binomial("logit"),data = PISA2018)
summary(pscores.model)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018[abs(rstudent(pscores.model))>3,] -> kilogok
PISA2018 <- PISA2018[abs(rstudent(pscores.model))<=3, ]
## ---------------------------------------------------------------------------------------------------------------------
vif(pscores.model)
corr <- PISA2018[, c(15, 16, 17,18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)]
cor(corr)
corr2 <- PISA2018[, c(17,18, 19, 20, 23, 24, 26, 27, 28, 31)]
cor(corr2)
## ---------------------------------------------------------------------------------------------------------------------
fokomp1 <- princomp(scale(PISA2018[ , c(17,18, 19, 20, 23, 24, 26, 27, 28, 31)]))
fokomp1$sdev^2
summary(fokomp1)
#Kaiser-kritérium alapján az első 2-t hagyjuk meg
PISA2018 <- cbind(PISA2018, fokomp1$scores[ ,1:2])
## ---------------------------------------------------------------------------------------------------------------------
fokomp1$loadings
## ---------------------------------------------------------------------------------------------------------------------
colnames(PISA2018)[45:46] <- c("SocioecStat", "ParentalEduc")
## ---------------------------------------------------------------------------------------------------------------------
pscores.model2 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model2)
vif(pscores.model2)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$TT <- as.character(PISA2018$TT)
PISA2018$TT[PISA2018$TT=="Kiváló"] <- "Jó"
PISA2018$TT <- as.factor(PISA2018$TT)
pscores.model3 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model3)
PISA2018$Könyvek <- as.character(PISA2018$Könyvek)
PISA2018$Könyvek[PISA2018$Könyvek=="100-200"] <- "100-500"
PISA2018$Könyvek[PISA2018$Könyvek=="200-500"] <- "100-500"
PISA2018$Könyvek <- as.factor(PISA2018$Könyvek)
pscores.model4 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model4)
vif(pscores.model4)
## ---------------------------------------------------------------------------------------------------------------------
resettest(pscores.model4)
crPlots(pscores.model4, ~Nem)
crPlots(pscores.model4, ~OF)
crPlots(pscores.model4, ~Könyvek)
crPlots(pscores.model4, ~TH)
crPlots(pscores.model4, ~TT)
crPlots(pscores.model4, ~KO)
crPlots(pscores.model4, ~OI)
crPlots(pscores.model4, ~IK)
crPlots(pscores.model4, ~SzT)
crPlots(pscores.model4, ~Ver)
crPlots(pscores.model4, ~Bull)
crPlots(pscores.model4, ~OED)
crPlots(pscores.model4, ~DE)
crPlots(pscores.model4, ~Satisfact)
crPlots(pscores.model4, ~WeeklyClass)
crPlots(pscores.model4, ~MINS_Maths)
crPlots(pscores.model4, ~MINS_Language)
crPlots(pscores.model4, ~ICT_School)
crPlots(pscores.model4, ~WLE_TeacherSupport)
crPlots(pscores.model4, ~WLE_TeacherInstruction)
crPlots(pscores.model4, ~WLE_TeacherInterest)
crPlots(pscores.model4, ~WLE_Attitude)
crPlots(pscores.model4, ~WLE_Competitiveness)
crPlots(pscores.model4, ~WLE_SubjectiveWellBeing)
crPlots(pscores.model4, ~WLE_GoalOrientation)
crPlots(pscores.model4, ~WLE_SchoolWellBeing)
crPlots(pscores.model4, ~PERCOOP)
crPlots(pscores.model4, ~GFOFAIL)
crPlots(pscores.model4, ~PERFEED)
crPlots(pscores.model4, ~RESILIENCE)
crPlots(pscores.model4, ~SocioecStat)
crPlots(pscores.model4, ~ParentalEduc)
## ---------------------------------------------------------------------------------------------------------------------
pscores.model5 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc+pmax(MINS_Maths-875, 0),family = binomial("logit"),data = PISA2018)
summary(pscores.model5)
BIC(pscores.model5)
AIC(pscores.model5)
resettest(pscores.model5)
## ---------------------------------------------------------------------------------------------------------------------
pscores.model4 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model4)
AIC(pscores.model4)
BIC(pscores.model4)
vif(pscores.model4)
pscores.model6 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT,family = binomial("logit"),data = PISA2018)
summary(pscores.model6)
AIC(pscores.model6)
BIC(pscores.model6)
vif(pscores.model6)
pscores.model7 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED,family = binomial("logit"),data = PISA2018)
summary(pscores.model7)
AIC(pscores.model7)
BIC(pscores.model7)
vif(pscores.model7)
pscores.model8 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness,family = binomial("logit"),data = PISA2018)
summary(pscores.model8)
AIC(pscores.model8)
BIC(pscores.model8)
vif(pscores.model8)
pscores.model9 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness-PERCOOP,family = binomial("logit"),data = PISA2018)
summary(pscores.model9)
AIC(pscores.model9)
BIC(pscores.model9)
vif(pscores.model9)
PropScores <- fitted(glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness-PERCOOP,family = binomial("logit"),data = PISA2018))
## ---------------------------------------------------------------------------------------------------------------------
becsles <- predict(pscores.model9, PISA2018, type="response")
PISA2018$becsult <- ifelse(becsles>0.5, 1,0)
xtabs(~CNT+becsult, PISA2018)
sum(PISA2018$CNT==PISA2018$becsult)/nrow(PISA2018)
library("pROC")
ROCgorbe <- roc(PISA2018$CNT~becsles)
plot(ROCgorbe)
2*auc(ROCgorbe)-1
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$PScores <- pscores.model6$fitted.values
hist(PISA2018$PScores[PISA2018$CNT==1],main = "Pscores of Response = 1")
hist(PISA2018$PScores[PISA2018$CNT==0],main = "Pscores of Response = 0")
## ---------------------------------------------------------------------------------------------------------------------
xvars <- c("Nem","OF","Könyvek","TH","TT","KO","OI","IK","Ver","Bull","OED","DE","Satisfact","WeeklyClass","MINS_Maths","MINS_Language","ICT_School", "WLE_TeacherSupport", "WLE_TeacherInstruction", "WLE_TeacherInterest", "WLE_Attitude", "WLE_SubjectiveWellBeing", "WLE_GoalOrientation", "WLE_SchoolWellBeing", "GFOFAIL", "RESILIENCE", "SocioecStat", "ParentalEduc")
library(tableone)
table1 <- CreateTableOne(vars = xvars,strata = "CNT",data = PISA2018, test = FALSE)
print(table1, smd = TRUE)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$becsult <- NULL
library(MatchIt)
match <- matchit(CNT~.-PV1MATH-PV1READ-PV1SCIE-PScores-PERCOOP, data = PISA2018, distance=PropScores, method = "nearest")
plot(match, type="jitter")
plot(match, type="hist")
summary(match, standardized=T)
summary(match, standardized=T) -> BalanceNN
TableNN <- BalanceNN$sum.matched
## ---------------------------------------------------------------------------------------------------------------------
matchdata <- match.data(match)
table_match2 <- CreateTableOne(vars = xvars,strata = "CNT",data = matchdata,test = FALSE)
print(table_match2, smd = TRUE)
## ---------------------------------------------------------------------------------------------------------------------
with(matchdata, t.test(PV1MATH ~ CNT))
lm_treat1 <- lm(PV1MATH ~ CNT, data = matchdata)
summary(lm_treat1)
## ---------------------------------------------------------------------------------------------------------------------
mean(matchdata$PV1MATH[matchdata$CNT == 1]) - mean(matchdata$PV1MATH[matchdata$CNT == 0])
## ---------------------------------------------------------------------------------------------------------------------
library(cobalt)
m.sum <- summary(match)
plot(m.sum, var.order = "unmatched", cex=0.75)
love.plot(match, binary = "std")
| /PISA_2018_NN.R | no_license | bokanyimonika/Szakdolgozat | R | false | false | 18,468 | r | ## ---------------------------------------------------------------------------------------------------------------------
library(readxl)
HUN_FIN_2018_R <- read_excel("C:/Users/bokan/Desktop/Egyetem/Szakdoga/Szakdolgozat/Adatbazis/HUN_FIN_2018_R.xlsx")
HUN_FIN_2018_R$Sorszám <- NULL
library("mice")
library("dplyr")
library("car")
library("HH")
library("lmtest")
library("sandwich")
library("corrplot")
## ---- message=FALSE---------------------------------------------------------------------------------------------------
imp_single18 <- mice(HUN_FIN_2018_R, m = 1, method = "pmm")
PISA2018IMP <- complete(imp_single18)
PISA2018IMP$CNT = factor(PISA2018IMP$CNT,levels = c('HUN', 'FIN'),labels = c(0, 1))
names(PISA2018IMP)[13] <- "KO_Koz"
names(PISA2018IMP)[47] <- "WLE_TeacherInterest"
names(PISA2018IMP)[50] <- "WLE_SubjectiveWellBeing"
## ---------------------------------------------------------------------------------------------------------------------
library(dplyr)
PISA2018IMP %>% dplyr::select(CNT) -> PISA2018
PISA2018$Nem = factor(PISA2018IMP$Nem,levels = c(0, 1),labels = c('Nő', 'Férfi'))
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OF_Jo[i] ==0 && PISA2018IMP$OF_Kiv[i] == 0)
{
PISA2018$OF[i] = 'Rossz'
}
else
{
if (PISA2018IMP$OF_Jo[i] ==1)
{
PISA2018$OF[i] = 'Jó'
}
else
{
PISA2018$OF[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$K_25_100[i] ==0 && PISA2018IMP$K_100_200[i] == 0 && PISA2018IMP$K_200_500[i] == 0)
{
PISA2018$Könyvek[i] = '0-25'
}
else
{
if (PISA2018IMP$K_25_100[i] ==1)
{
PISA2018$Könyvek[i] = '25-100'
}
else
{
if (PISA2018IMP$K_100_200[i] ==1)
{
PISA2018$Könyvek[i] = '100-200'
}
else
{
PISA2018$Könyvek[i] = '200-500'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$TH_Koz[i] ==0 && PISA2018IMP$TH_Kiv[i] == 0)
{
PISA2018$TH[i] = 'Rossz'
}
else
{
if (PISA2018IMP$TH_Koz[i] ==1)
{
PISA2018$TH[i] = 'Közepes'
}
else
{
PISA2018$TH[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$TT_Koz[i] ==0 && PISA2018IMP$TT_Jo[i] == 0 && PISA2018IMP$TT_Kiv[i] == 0)
{
PISA2018$TT[i] = 'Rossz'
}
else
{
if (PISA2018IMP$TT_Koz[i] ==1)
{
PISA2018$TT[i] = 'Közepes'
}
else
{
if (PISA2018IMP$TT_Jo[i] ==1)
{
PISA2018$TT[i] = 'Jó'
}
else
{
PISA2018$TT[i] = 'Kiváló'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$KO_Koz[i] ==0 && PISA2018IMP$KO_Kiv[i] == 0)
{
PISA2018$KO[i] = 'Nincs'
}
else
{
if (PISA2018IMP$KO_Koz[i] ==1)
{
PISA2018$KO[i] = 'Közepes'
}
else
{
PISA2018$KO[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OI_Koz[i] ==0 && PISA2018IMP$OI_Sok[i] == 0)
{
PISA2018$OI[i] = 'Kevés'
}
else
{
if (PISA2018IMP$OI_Koz[i] ==1)
{
PISA2018$OI[i] = 'Közepes'
}
else
{
PISA2018$OI[i] = 'Sok'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$IK_Koz[i] ==0 && PISA2018IMP$IK_Jo[i] == 0 && PISA2018IMP$IK_Kiv[i] == 0)
{
PISA2018$IK[i] = 'Rossz'
}
else
{
if (PISA2018IMP$IK_Koz[i] ==1)
{
PISA2018$IK[i] = 'Közepes'
}
else
{
if (PISA2018IMP$IK_Jo[i] ==1)
{
PISA2018$IK[i] = 'Jó'
}
else
{
PISA2018$IK[i] = 'Kiváló'
}
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$SzT_Koz[i] ==0 && PISA2018IMP$SzT_Kiv[i] == 0)
{
PISA2018$SzT[i] = 'Rossz'
}
else
{
if (PISA2018IMP$SzT_Koz[i] ==1)
{
PISA2018$SzT[i] = 'Közepes'
}
else
{
PISA2018$SzT[i] = 'Kiváló'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$Ver_Koz[i] ==0 && PISA2018IMP$Ver_Nagy[i] == 0)
{
PISA2018$Ver[i] = 'Kicsi'
}
else
{
if (PISA2018IMP$Ver_Koz[i] ==1)
{
PISA2018$Ver[i] = 'Közepes'
}
else
{
PISA2018$Ver[i] = 'Nagy'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$Bull_Soh[i] ==0 && PISA2018IMP$Bull_Neh[i] == 0)
{
PISA2018$Bull[i] = 'Gyakran'
}
else
{
if (PISA2018IMP$Bull_Neh[i] ==1)
{
PISA2018$Bull[i] = 'Néha'
}
else
{
PISA2018$Bull[i] = 'Soha'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$OED_Koz[i] ==0 && PISA2018IMP$OED_Sok[i] == 0)
{
PISA2018$OED[i] = 'Kevés'
}
else
{
if (PISA2018IMP$OED_Koz[i] ==1)
{
PISA2018$OED[i] = 'Közepes'
}
else
{
PISA2018$OED[i] = 'Sok'
}
}
}
for (i in 1:length(PISA2018$CNT))
{
if (PISA2018IMP$DE_Van[i] ==0 && PISA2018IMP$DE_Kev[i] == 0)
{
PISA2018$DE[i] = 'Nincs'
}
else
{
if (PISA2018IMP$DE_Kev[i] ==1)
{
PISA2018$DE[i] = 'Kevés'
}
else
{
PISA2018$DE[i] = 'Sok'
}
}
}
PISA2018$Satisfact <- PISA2018IMP$Satisfact
PISA2018$WeeklyClass <- PISA2018IMP$WeeklyClass
PISA2018$MISCED <- PISA2018IMP$MISCED
PISA2018$FISCED <- PISA2018IMP$FISCED
PISA2018$BMMJ_Mother <- PISA2018IMP$BMMJ_Mother
PISA2018$BFMJ_Father <- PISA2018IMP$BFMJ_Father
PISA2018$MINS_Maths <- PISA2018IMP$MINS_Maths
PISA2018$MINS_Language <- PISA2018IMP$MINS_Language
PISA2018$ESCS <- PISA2018IMP$ESCS
PISA2018$ICT_Home <- PISA2018IMP$ICT_Home
PISA2018$ICT_School <- PISA2018IMP$ICT_School
PISA2018$WLE_Cultural <- PISA2018IMP$WLE_Cultural
PISA2018$WLE_Home <- PISA2018IMP$WLE_Home
PISA2018$WLE_Wealth <- PISA2018IMP$WLE_Wealth
PISA2018$WLE_TeacherSupport <- PISA2018IMP$WLE_TeacherSupport
PISA2018$WLE_TeacherInstruction <- PISA2018IMP$WLE_TeacherInstruction
PISA2018$WLE_Emotional <- PISA2018IMP$WLE_Emotional
PISA2018$WLE_TeacherInterest <- PISA2018IMP$WLE_TeacherInterest
PISA2018$WLE_Attitude <- PISA2018IMP$WLE_Attitude
PISA2018$WLE_Competitiveness <- PISA2018IMP$WLE_Competitiveness
PISA2018$WLE_SubjectiveWellBeing <- PISA2018IMP$WLE_SubjectiveWellBeing
PISA2018$WLE_GoalOrientation <- PISA2018IMP$WLE_GoalOrientation
PISA2018$WLE_SchoolWellBeing <- PISA2018IMP$WLE_SchoolWellBeing
PISA2018$PERCOOP <- PISA2018IMP$PERCOOP
PISA2018$GFOFAIL <- PISA2018IMP$GFOFAIL
PISA2018$PERFEED <- PISA2018IMP$PERFEED
PISA2018$RESILIENCE <- PISA2018IMP$RESILIENCE
PISA2018$PV1READ <- PISA2018IMP$PV1READ
PISA2018$PV1MATH <- PISA2018IMP$PV1MATH
PISA2018$PV1SCIE <- PISA2018IMP$PV1SCIE
PISA2018$OF <- as.factor(PISA2018$OF)
PISA2018$Könyvek <- as.factor(PISA2018$Könyvek)
PISA2018$TH <- as.factor(PISA2018$TH)
PISA2018$TT <- as.factor(PISA2018$TT)
PISA2018$KO <- as.factor(PISA2018$KO)
PISA2018$OI <- as.factor(PISA2018$OI)
PISA2018$IK <- as.factor(PISA2018$IK)
PISA2018$SzT <- as.factor(PISA2018$SzT)
PISA2018$Ver <- as.factor(PISA2018$Ver)
PISA2018$Bull <- as.factor(PISA2018$Bull)
PISA2018$OED <- as.factor(PISA2018$OED)
PISA2018$DE <- as.factor(PISA2018$DE)
## ---------------------------------------------------------------------------------------------------------------------
TreatsFIN <- subset(PISA2018IMP, PISA2018IMP$CNT == 1, select = -c(CNT))
ControlHUN <- subset(PISA2018IMP, PISA2018IMP$CNT == 0, select = -c(CNT))
summary(TreatsFIN$PV1MATH)
summary(ControlHUN$PV1MATH)
summary(TreatsFIN$PV1READ)
summary(ControlHUN$PV1READ)
summary(TreatsFIN$PV1SCIE)
summary(ControlHUN$PV1SCIE)
## ---------------------------------------------------------------------------------------------------------------------
with(PISA2018IMP, t.test(PV1MATH ~ CNT))
with(PISA2018IMP, t.test(PV1READ ~ CNT))
with(PISA2018IMP, t.test(PV1SCIE ~ CNT))
PISA2018IMP_cov <- c("FISCED", "MISCED", "ESCS")
lapply(PISA2018IMP_cov, function(v) {t.test(PISA2018IMP[, v] ~ PISA2018IMP[, 'CNT'])})
## ---------------------------------------------------------------------------------------------------------------------
pscores.model <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ,family = binomial("logit"),data = PISA2018)
summary(pscores.model)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018[abs(rstudent(pscores.model))>3,] -> kilogok
PISA2018 <- PISA2018[abs(rstudent(pscores.model))<=3, ]
## ---------------------------------------------------------------------------------------------------------------------
vif(pscores.model)
corr <- PISA2018[, c(15, 16, 17,18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37)]
cor(corr)
corr2 <- PISA2018[, c(17,18, 19, 20, 23, 24, 26, 27, 28, 31)]
cor(corr2)
## ---------------------------------------------------------------------------------------------------------------------
fokomp1 <- princomp(scale(PISA2018[ , c(17,18, 19, 20, 23, 24, 26, 27, 28, 31)]))
fokomp1$sdev^2
summary(fokomp1)
#Kaiser-kritérium alapján az első 2-t hagyjuk meg
PISA2018 <- cbind(PISA2018, fokomp1$scores[ ,1:2])
## ---------------------------------------------------------------------------------------------------------------------
fokomp1$loadings
## ---------------------------------------------------------------------------------------------------------------------
colnames(PISA2018)[45:46] <- c("SocioecStat", "ParentalEduc")
## ---------------------------------------------------------------------------------------------------------------------
pscores.model2 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model2)
vif(pscores.model2)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$TT <- as.character(PISA2018$TT)
PISA2018$TT[PISA2018$TT=="Kiváló"] <- "Jó"
PISA2018$TT <- as.factor(PISA2018$TT)
pscores.model3 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model3)
PISA2018$Könyvek <- as.character(PISA2018$Könyvek)
PISA2018$Könyvek[PISA2018$Könyvek=="100-200"] <- "100-500"
PISA2018$Könyvek[PISA2018$Könyvek=="200-500"] <- "100-500"
PISA2018$Könyvek <- as.factor(PISA2018$Könyvek)
pscores.model4 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model4)
vif(pscores.model4)
## ---------------------------------------------------------------------------------------------------------------------
resettest(pscores.model4)
crPlots(pscores.model4, ~Nem)
crPlots(pscores.model4, ~OF)
crPlots(pscores.model4, ~Könyvek)
crPlots(pscores.model4, ~TH)
crPlots(pscores.model4, ~TT)
crPlots(pscores.model4, ~KO)
crPlots(pscores.model4, ~OI)
crPlots(pscores.model4, ~IK)
crPlots(pscores.model4, ~SzT)
crPlots(pscores.model4, ~Ver)
crPlots(pscores.model4, ~Bull)
crPlots(pscores.model4, ~OED)
crPlots(pscores.model4, ~DE)
crPlots(pscores.model4, ~Satisfact)
crPlots(pscores.model4, ~WeeklyClass)
crPlots(pscores.model4, ~MINS_Maths)
crPlots(pscores.model4, ~MINS_Language)
crPlots(pscores.model4, ~ICT_School)
crPlots(pscores.model4, ~WLE_TeacherSupport)
crPlots(pscores.model4, ~WLE_TeacherInstruction)
crPlots(pscores.model4, ~WLE_TeacherInterest)
crPlots(pscores.model4, ~WLE_Attitude)
crPlots(pscores.model4, ~WLE_Competitiveness)
crPlots(pscores.model4, ~WLE_SubjectiveWellBeing)
crPlots(pscores.model4, ~WLE_GoalOrientation)
crPlots(pscores.model4, ~WLE_SchoolWellBeing)
crPlots(pscores.model4, ~PERCOOP)
crPlots(pscores.model4, ~GFOFAIL)
crPlots(pscores.model4, ~PERFEED)
crPlots(pscores.model4, ~RESILIENCE)
crPlots(pscores.model4, ~SocioecStat)
crPlots(pscores.model4, ~ParentalEduc)
## ---------------------------------------------------------------------------------------------------------------------
pscores.model5 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc+pmax(MINS_Maths-875, 0),family = binomial("logit"),data = PISA2018)
summary(pscores.model5)
BIC(pscores.model5)
AIC(pscores.model5)
resettest(pscores.model5)
## ---------------------------------------------------------------------------------------------------------------------
pscores.model4 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc,family = binomial("logit"),data = PISA2018)
summary(pscores.model4)
AIC(pscores.model4)
BIC(pscores.model4)
vif(pscores.model4)
pscores.model6 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT,family = binomial("logit"),data = PISA2018)
summary(pscores.model6)
AIC(pscores.model6)
BIC(pscores.model6)
vif(pscores.model6)
pscores.model7 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED,family = binomial("logit"),data = PISA2018)
summary(pscores.model7)
AIC(pscores.model7)
BIC(pscores.model7)
vif(pscores.model7)
pscores.model8 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness,family = binomial("logit"),data = PISA2018)
summary(pscores.model8)
AIC(pscores.model8)
BIC(pscores.model8)
vif(pscores.model8)
pscores.model9 <- glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness-PERCOOP,family = binomial("logit"),data = PISA2018)
summary(pscores.model9)
AIC(pscores.model9)
BIC(pscores.model9)
vif(pscores.model9)
PropScores <- fitted(glm(as.factor(CNT) ~.-PV1MATH-PV1SCIE-PV1READ-MISCED-FISCED-BMMJ_Mother-BFMJ_Father-ESCS-ICT_Home-WLE_Cultural-WLE_Home-WLE_Emotional-WLE_Wealth+SocioecStat+ParentalEduc-SzT-PERFEED-WLE_Competitiveness-PERCOOP,family = binomial("logit"),data = PISA2018))
## ---------------------------------------------------------------------------------------------------------------------
becsles <- predict(pscores.model9, PISA2018, type="response")
PISA2018$becsult <- ifelse(becsles>0.5, 1,0)
xtabs(~CNT+becsult, PISA2018)
sum(PISA2018$CNT==PISA2018$becsult)/nrow(PISA2018)
library("pROC")
ROCgorbe <- roc(PISA2018$CNT~becsles)
plot(ROCgorbe)
2*auc(ROCgorbe)-1
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$PScores <- pscores.model6$fitted.values
hist(PISA2018$PScores[PISA2018$CNT==1],main = "Pscores of Response = 1")
hist(PISA2018$PScores[PISA2018$CNT==0],main = "Pscores of Response = 0")
## ---------------------------------------------------------------------------------------------------------------------
xvars <- c("Nem","OF","Könyvek","TH","TT","KO","OI","IK","Ver","Bull","OED","DE","Satisfact","WeeklyClass","MINS_Maths","MINS_Language","ICT_School", "WLE_TeacherSupport", "WLE_TeacherInstruction", "WLE_TeacherInterest", "WLE_Attitude", "WLE_SubjectiveWellBeing", "WLE_GoalOrientation", "WLE_SchoolWellBeing", "GFOFAIL", "RESILIENCE", "SocioecStat", "ParentalEduc")
library(tableone)
table1 <- CreateTableOne(vars = xvars,strata = "CNT",data = PISA2018, test = FALSE)
print(table1, smd = TRUE)
## ---------------------------------------------------------------------------------------------------------------------
PISA2018$becsult <- NULL
library(MatchIt)
match <- matchit(CNT~.-PV1MATH-PV1READ-PV1SCIE-PScores-PERCOOP, data = PISA2018, distance=PropScores, method = "nearest")
plot(match, type="jitter")
plot(match, type="hist")
summary(match, standardized=T)
summary(match, standardized=T) -> BalanceNN
TableNN <- BalanceNN$sum.matched
## ---------------------------------------------------------------------------------------------------------------------
matchdata <- match.data(match)
table_match2 <- CreateTableOne(vars = xvars,strata = "CNT",data = matchdata,test = FALSE)
print(table_match2, smd = TRUE)
## ---------------------------------------------------------------------------------------------------------------------
with(matchdata, t.test(PV1MATH ~ CNT))
lm_treat1 <- lm(PV1MATH ~ CNT, data = matchdata)
summary(lm_treat1)
## ---------------------------------------------------------------------------------------------------------------------
mean(matchdata$PV1MATH[matchdata$CNT == 1]) - mean(matchdata$PV1MATH[matchdata$CNT == 0])
## ---------------------------------------------------------------------------------------------------------------------
library(cobalt)
m.sum <- summary(match)
plot(m.sum, var.order = "unmatched", cex=0.75)
love.plot(match, binary = "std")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_default_decisions.R
\name{add_default_decisions}
\alias{add_default_decisions}
\title{Add default decisions}
\usage{
add_default_decisions(x)
}
\arguments{
\item{x}{\code{\link{ConservationProblem-class}} object.}
}
\description{
This function adds the default decision types to a conservation planning
\code{\link{problem}}. The default types are binary and are added using
the \code{\link{add_binary_decisions}} function.
}
\seealso{
\code{\link{decisions}}.
}
| /man/add_default_decisions.Rd | no_license | IsaakBM/prioritizr | R | false | true | 545 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_default_decisions.R
\name{add_default_decisions}
\alias{add_default_decisions}
\title{Add default decisions}
\usage{
add_default_decisions(x)
}
\arguments{
\item{x}{\code{\link{ConservationProblem-class}} object.}
}
\description{
This function adds the default decision types to a conservation planning
\code{\link{problem}}. The default types are binary and are added using
the \code{\link{add_binary_decisions}} function.
}
\seealso{
\code{\link{decisions}}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SaveLoadPlp.R
\name{savePlpModel}
\alias{savePlpModel}
\title{Saves the plp model}
\usage{
savePlpModel(plpModel, dirPath)
}
\arguments{
\item{plpModel}{A trained classifier returned by running \code{runPlp()$model}}
\item{dirPath}{A location to save the model to}
}
\description{
Saves the plp model
}
\details{
Saves the plp model to a user specificed folder
}
| /man/savePlpModel.Rd | permissive | OHDSI/PatientLevelPrediction | R | false | true | 442 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SaveLoadPlp.R
\name{savePlpModel}
\alias{savePlpModel}
\title{Saves the plp model}
\usage{
savePlpModel(plpModel, dirPath)
}
\arguments{
\item{plpModel}{A trained classifier returned by running \code{runPlp()$model}}
\item{dirPath}{A location to save the model to}
}
\description{
Saves the plp model
}
\details{
Saves the plp model to a user specificed folder
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/team-types.R
\name{recode_team_type}
\alias{recode_team_type}
\title{Recode team type for labels and contrast coding.}
\usage{
recode_team_type(frame)
}
\description{
If no data is provided, the team type map is returned.
}
| /ratchets/man/recode_team_type.Rd | no_license | pedmiston/ratchets | R | false | true | 302 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/team-types.R
\name{recode_team_type}
\alias{recode_team_type}
\title{Recode team type for labels and contrast coding.}
\usage{
recode_team_type(frame)
}
\description{
If no data is provided, the team type map is returned.
}
|
#Load libraries
library(xts)
library(quantmod)
library(forecast)
library(tidyr)
library(lubridate)
library(dplyr)
library(ggplot2)
library(fpp2)
library(tsbox)
library(TSstudio)
library(tseries)
library(forecastHybrid)
library(moments)
library(opera)
library(dtw)
library(liqueueR)
library(collections)
library(calculus)
devAskNewPage(ask = FALSE)
# Get S&P500 data
start_date <- as.Date("2009-01-01")
spy <- Ad(getSymbols("SPY", auto.assign = FALSE, from=start_date, warning=FALSE))
names(spy) <- "adjusted"
spy_ts <- ts_ts(spy)
spy_ts <- na.remove(spy_ts)
# Steps
# 1 - take diff logs for stationarity
# 2 - walk forward find the top (n_similar) similar periods with appropriate
# distancing to ensure actually different periods
# - return n_similar most similar periods, with their distance measures
# 3 - prediction options: (need to convert back to untransformed scale)
# - take some weighting using the ranks of the periods (recession paper) and
# get likelihood of passing some threshold
# - approach as regression problem - generate time series forecasts from the
# original periods and combine them
get_similar <- function(dl_ts=diff(log(spy_ts)), interval_length=45, skip_val=45, n_top_similar=20) {
s_date_ref <- length(dl_ts) - interval_length + 1
ref <- subset(dl_ts, start=s_date_ref)
right_start_range <- s_date_ref - 2*interval_length
q <- priority_queue()
q_dist <- priority_queue()
for (i in seq(1, right_start_range, skip_val)) {
end <- i + interval_length - 1
query <- subset(dl_ts, start = i, end = end)
alignment <- dtw(query, ref, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(query, priority = -dist)
q_dist$push(dist, priority = -dist)
}
top_similar_intervals <- q$as_list()[1:n_top_similar]
min_distances <- unlist(q_dist$as_list()[1:n_top_similar], use.names = FALSE)
return(list(top_similar_intervals, min_distances, ref))
}
int <- ints[[1]]
end_int <- end(int)
window_length <- 22
dl_ts <- diff(log(spy_ts))
freq <- frequency(int)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
window_after %>% autoplot()
threshold<- 0.01
outside_thresh <- sum(!(window_after <= threshold & window_after >= -threshold))
outside_thresh
# modify get_similar() to make periods non overlapping
# define a diff log threshold
# define a time window
# label the intervals with 0 and 1 depending on whether or not the threshold was crossed within the next time_window days
# use rank ordered centroids for the weights
# find probabilities with dot product
roc <- function(n) {
x <- 1:n
y <- 1/x
z <- numeric(n)
for (i in 1:n) {
z[i] <- sum(y[i:length(y)])/n
}
return(z)
}
prob_exceeding <- function(similar_intervals, dl_ts=diff(log(spy_ts)), threshold=0.05, window_length=22) {
exceed <- numeric(length(similar_intervals))
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
outside_thresh <- sum(!(window_after <= threshold & window_after >= -threshold))
if (outside_thresh > 0) exceed[i] <- 1
}
roc <- roc(length(similar_intervals))
prob_exceeds <- sum(roc * exceed)
return(prob_exceeds)
}
ls <- get_similar()
ints <- ls[[1]]
ref <- ls[[3]]
p <- autoplot(spy_ts)
for (i in 1:length(ints)) {
interval <- ints[[i]]
start <- start(interval)
end <- end(interval)
window <- window(spy_ts, start=start, end=end)
p <- p + autolayer(window, colour = TRUE, series = paste(i))
}
p <- p + autolayer(window(spy_ts, start=start(ref), end=end(ref)), colour = TRUE, series="Current")
print(p)
top_int <- ints[[1]]
top_int
s <- start(top_int)
e <- end(top_int)
# dollar progress (during similar ints)
for (interval in ints) {
curr <- numeric(length(interval) + 1)
curr[1] <- 1
for (i in 1:length(interval)) {
curr[i + 1] <- curr[i] * (1 + interval[i])
}
print(curr[length(curr)])
}
# dollar progress (out of sample)
dollar_progress <- function(similar_intervals, dl_ts=diff(log(spy_ts)), threshold=0.05, window_length=22) {
dollar_performance <- matrix(nrow=(window_length+1), ncol=0)
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
curr <- numeric(length(window_after) + 1)
curr[1] <- 1
for (i in 1:length(window_after)) {
curr[i + 1] <- curr[i] * (1 + window_after[i])
}
dollar_performance <- cbind(dollar_performance, curr)
}
colnames(dollar_performance) <- 1:20
return(dollar_performance)
}
returns_mat <- dollar_progress(ints)
matplot(returns_mat, type="l")
ints
mean(returns_mat[nrow(returns_mat), ])
sd(returns_mat[nrow(returns_mat), ])
# forecast with similar
# for each of the top similar intervals - run opera for the duration of the period,
# come up with weights - weight the weights with roc?
spy_ts
p_m_forecast <- function(similar_intervals, curr_interval, test_len=22, num_models=3, ts=spy_ts) {
roc <- roc(length(similar_intervals))
weight_mat <- matrix(0, nrow=(test_len), ncol=num_models)
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int = end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (test_len/freq)
test <- window(ts, start=start, end = end)
h <- length(test)
HW <- forecast(HoltWinters(train, gamma=FALSE), h=h)
ETS <- forecast(ets(train), h=h)
ARIMA <- forecast(auto.arima(train), h=h)
X <- cbind(HW=HW$mean, ETS=ETS$mean, ARIMA=ARIMA$mean)
df <- cbind(spy_ts, X)
colnames(df) <- c("SPY" ,"HW", "ETS", "ARIMA")
MLpol0 <- mixture(model="MLpol", loss.type="square")
weights <- predict(MLpol0, X, test, type="weights")
weight_mat <- weight_mat + weights * roc[i]
}
return(weight_mat)
}
train_len <- 45
h <- 22
train <- subset(spy_ts, start = length(spy_ts) - train_len + 1)
arima_fc <- forecast(auto.arima(train, biasadj=TRUE),h=h)$mean
hw_fc <- forecast(HoltWinters(train, gamma=FALSE), h=h)$mean
ets_fc <- forecast(ets(train), h=h)$mean
fc_mat <- cbind(hw_fc, ets_fc, arima_fc)
w <- p_m_forecast(ints)
View(fc_mat)
View(w)
ncol(w)
nrow(fc_mat)
ncol(fc_mat)
weighted_fc <- numeric(nrow(fc_mat))
fc_mat
w
for (i in 1:nrow(fc_mat)) {
weighted_fc[i] <- sum(w[i,]*fc_mat[i,])
}
plot(weighted_fc)
weighted_fc
# do this for rolling x-day windows treated as "recent" - make sure all have
# past out sample, only use past observations for each
# get N top similar past intervals
# get NEXT_WINDOW-length periods, for each of these periods
# for ALL intervals (recent, top similar, next-windows following top similar), get
# dollar performance
# variance of returns
# skewness and kurtosis of return distribution
#
#
#
#
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
curr <- numeric(length(window_after) + 1)
get_rolling_windows <- function(time_series, range_start, range_end, window_length, dist_between_windows) {
rolling_windows <- list()
freq <- frequency(time_series)
start_int <- range_start[1] + range_start[2]/freq
end_int <- start_int + window_length/freq
rolling_windows[[1]] <- window(time_series, start=start_int, end=end_int)
range_end_index <- range_end[1] + range_end[2]/freq
i <- 1
while (end_int < range_end_index) {
i <- i + 1
start_int <- end_int + dist_between_windows/freq
end_int <- start_int + window_length/freq
new_int <- window(time_series, start=start_int, end=end_int)
rolling_windows[[i]] <- window(time_series, start=start_int, end=end_int)
}
return(rolling_windows)
}
k <- get_rolling_windows(spy_ts, c(2016, 1), c(2020, 5), 22, 22)
x <- 1
while (x < 5) {
x <- x + 1
print("ed")
}
l <- list(numeric)
l[[1]] <- 5
l <- append(l, 2)
l
s <- floor(start(spy_ts))
f <- frequency(spy_ts)
s <- c(2009, 2)
w <- window(spy_ts, start=s, end=s+2)
w
k <- window(spy_ts, start=(2009+1/freq))
length(k)
length(w)
start(spy_ts)
class(w)
# call as
# return
# list of window indices - correspond to (2016, 1, 1)-(2016, 2, 1), etc
get_rolling_range <- function(time_series, query_window) {
start <- start(time_series)
end <- start(query_window) - 2 * length(query_window) / frequency(time_series)
return(c(start, end))
}
ex_rolling <- k[[1]]
length(ex_rolling)
vec <- get_rolling_range(spy_ts, ex_rolling)
get_top_similar_intervals <- function(time_series_original, query_window, start_range, end_range, num_top_intervals) {
time_series <- diff(log(time_series_original))
interval_length <- length(query_window)
q <- priority_queue()
q_dist <- priority_queue()
freq <- frequency(time_series)
start_int <- start_range
end_int <- start_int + interval_length/freq
curr_window <- window(time_series, start=start_int, end=end_int)
alignment <- dtw(curr_window, query_window, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(curr_window, priority = -dist)
q_dist$push(dist, priority = -dist)
while (end_int < end_range) {
start_int <- end_int + interval_length/freq
end_int <- start_int + interval_length/freq
curr_window <- window(time_series, start=start_int, end=end_int)
alignment <- dtw(curr_window, query_window, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(curr_window, priority = -dist)
q_dist$push(dist, priority = -dist)
}
top_similar_intervals <- q$as_list()[1:num_top_intervals]
min_distances <- unlist(q_dist$as_list()[1:num_top_intervals], use.names = FALSE)
return(list(top_similar_intervals, min_distances, query_window))
}
sim <- get_top_similar_intervals(spy_ts, ex_rolling, vec[1], vec[2], 20)
sim[[1]]
get_following_windows <- function(time_series, intervals) {
following_windows <- list()
for (interval in intervals) {
start <- end(interval) + 1/freq
end <- start + length(interval)/freq
following <- window(time_series, start=start, end=end)
append(following_windows, following)
}
return(following_windows)
}
get_returns <- function(window) {
}
compute_summary_statistics <- function(returns) {
}
# pseudocode
# what is interesting
# can direction be predicted? (if 15/20 ended up at positive, will this one end up as positive too)
# can dispersion be predicted (if past similar were volatile, will returns be volatile right after this one too)
rolling_windows <- get_rolling_windows()
for (qry_window in rolling_windows) {
following_window_actual <- get_following(qry_window)
c(start_range, end_range) <- get_rolling_range(qry_window)
similar_ints <- get_top_similar_intervals(qry_window, start_range, end_range)
following_windows_similar <- get_following(windows(similar_ints))
# 1 direction
# dollar progress, weighted by similarity, of "following windows similar"
# dollar progress of following window actual
# save both in list to graph/visualize later
# save single values as data points to plot, calculate correlation, etc
# 2 volatility
# same as above with volatility instead of dollar progress
}
| /pattern_matching.R | no_license | edufheinsen/forecasting | R | false | false | 11,494 | r | #Load libraries
library(xts)
library(quantmod)
library(forecast)
library(tidyr)
library(lubridate)
library(dplyr)
library(ggplot2)
library(fpp2)
library(tsbox)
library(TSstudio)
library(tseries)
library(forecastHybrid)
library(moments)
library(opera)
library(dtw)
library(liqueueR)
library(collections)
library(calculus)
devAskNewPage(ask = FALSE)
# Get S&P500 data
start_date <- as.Date("2009-01-01")
spy <- Ad(getSymbols("SPY", auto.assign = FALSE, from=start_date, warning=FALSE))
names(spy) <- "adjusted"
spy_ts <- ts_ts(spy)
spy_ts <- na.remove(spy_ts)
# Steps
# 1 - take diff logs for stationarity
# 2 - walk forward find the top (n_similar) similar periods with appropriate
# distancing to ensure actually different periods
# - return n_similar most similar periods, with their distance measures
# 3 - prediction options: (need to convert back to untransformed scale)
# - take some weighting using the ranks of the periods (recession paper) and
# get likelihood of passing some threshold
# - approach as regression problem - generate time series forecasts from the
# original periods and combine them
get_similar <- function(dl_ts=diff(log(spy_ts)), interval_length=45, skip_val=45, n_top_similar=20) {
s_date_ref <- length(dl_ts) - interval_length + 1
ref <- subset(dl_ts, start=s_date_ref)
right_start_range <- s_date_ref - 2*interval_length
q <- priority_queue()
q_dist <- priority_queue()
for (i in seq(1, right_start_range, skip_val)) {
end <- i + interval_length - 1
query <- subset(dl_ts, start = i, end = end)
alignment <- dtw(query, ref, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(query, priority = -dist)
q_dist$push(dist, priority = -dist)
}
top_similar_intervals <- q$as_list()[1:n_top_similar]
min_distances <- unlist(q_dist$as_list()[1:n_top_similar], use.names = FALSE)
return(list(top_similar_intervals, min_distances, ref))
}
int <- ints[[1]]
end_int <- end(int)
window_length <- 22
dl_ts <- diff(log(spy_ts))
freq <- frequency(int)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
window_after %>% autoplot()
threshold<- 0.01
outside_thresh <- sum(!(window_after <= threshold & window_after >= -threshold))
outside_thresh
# modify get_similar() to make periods non overlapping
# define a diff log threshold
# define a time window
# label the intervals with 0 and 1 depending on whether or not the threshold was crossed within the next time_window days
# use rank ordered centroids for the weights
# find probabilities with dot product
roc <- function(n) {
x <- 1:n
y <- 1/x
z <- numeric(n)
for (i in 1:n) {
z[i] <- sum(y[i:length(y)])/n
}
return(z)
}
prob_exceeding <- function(similar_intervals, dl_ts=diff(log(spy_ts)), threshold=0.05, window_length=22) {
exceed <- numeric(length(similar_intervals))
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
outside_thresh <- sum(!(window_after <= threshold & window_after >= -threshold))
if (outside_thresh > 0) exceed[i] <- 1
}
roc <- roc(length(similar_intervals))
prob_exceeds <- sum(roc * exceed)
return(prob_exceeds)
}
ls <- get_similar()
ints <- ls[[1]]
ref <- ls[[3]]
p <- autoplot(spy_ts)
for (i in 1:length(ints)) {
interval <- ints[[i]]
start <- start(interval)
end <- end(interval)
window <- window(spy_ts, start=start, end=end)
p <- p + autolayer(window, colour = TRUE, series = paste(i))
}
p <- p + autolayer(window(spy_ts, start=start(ref), end=end(ref)), colour = TRUE, series="Current")
print(p)
top_int <- ints[[1]]
top_int
s <- start(top_int)
e <- end(top_int)
# dollar progress (during similar ints)
for (interval in ints) {
curr <- numeric(length(interval) + 1)
curr[1] <- 1
for (i in 1:length(interval)) {
curr[i + 1] <- curr[i] * (1 + interval[i])
}
print(curr[length(curr)])
}
# dollar progress (out of sample)
dollar_progress <- function(similar_intervals, dl_ts=diff(log(spy_ts)), threshold=0.05, window_length=22) {
dollar_performance <- matrix(nrow=(window_length+1), ncol=0)
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
curr <- numeric(length(window_after) + 1)
curr[1] <- 1
for (i in 1:length(window_after)) {
curr[i + 1] <- curr[i] * (1 + window_after[i])
}
dollar_performance <- cbind(dollar_performance, curr)
}
colnames(dollar_performance) <- 1:20
return(dollar_performance)
}
returns_mat <- dollar_progress(ints)
matplot(returns_mat, type="l")
ints
mean(returns_mat[nrow(returns_mat), ])
sd(returns_mat[nrow(returns_mat), ])
# forecast with similar
# for each of the top similar intervals - run opera for the duration of the period,
# come up with weights - weight the weights with roc?
spy_ts
p_m_forecast <- function(similar_intervals, curr_interval, test_len=22, num_models=3, ts=spy_ts) {
roc <- roc(length(similar_intervals))
weight_mat <- matrix(0, nrow=(test_len), ncol=num_models)
for (i in 1:length(similar_intervals)) {
interval <- similar_intervals[[i]]
end_int = end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (test_len/freq)
test <- window(ts, start=start, end = end)
h <- length(test)
HW <- forecast(HoltWinters(train, gamma=FALSE), h=h)
ETS <- forecast(ets(train), h=h)
ARIMA <- forecast(auto.arima(train), h=h)
X <- cbind(HW=HW$mean, ETS=ETS$mean, ARIMA=ARIMA$mean)
df <- cbind(spy_ts, X)
colnames(df) <- c("SPY" ,"HW", "ETS", "ARIMA")
MLpol0 <- mixture(model="MLpol", loss.type="square")
weights <- predict(MLpol0, X, test, type="weights")
weight_mat <- weight_mat + weights * roc[i]
}
return(weight_mat)
}
train_len <- 45
h <- 22
train <- subset(spy_ts, start = length(spy_ts) - train_len + 1)
arima_fc <- forecast(auto.arima(train, biasadj=TRUE),h=h)$mean
hw_fc <- forecast(HoltWinters(train, gamma=FALSE), h=h)$mean
ets_fc <- forecast(ets(train), h=h)$mean
fc_mat <- cbind(hw_fc, ets_fc, arima_fc)
w <- p_m_forecast(ints)
View(fc_mat)
View(w)
ncol(w)
nrow(fc_mat)
ncol(fc_mat)
weighted_fc <- numeric(nrow(fc_mat))
fc_mat
w
for (i in 1:nrow(fc_mat)) {
weighted_fc[i] <- sum(w[i,]*fc_mat[i,])
}
plot(weighted_fc)
weighted_fc
# do this for rolling x-day windows treated as "recent" - make sure all have
# past out sample, only use past observations for each
# get N top similar past intervals
# get NEXT_WINDOW-length periods, for each of these periods
# for ALL intervals (recent, top similar, next-windows following top similar), get
# dollar performance
# variance of returns
# skewness and kurtosis of return distribution
#
#
#
#
end_int <- end(interval)
freq <- frequency(interval)
start <- end_int+(1/freq)
end <- end_int + (window_length/freq)
window_after <- window(dl_ts, start=start, end = end)
curr <- numeric(length(window_after) + 1)
get_rolling_windows <- function(time_series, range_start, range_end, window_length, dist_between_windows) {
rolling_windows <- list()
freq <- frequency(time_series)
start_int <- range_start[1] + range_start[2]/freq
end_int <- start_int + window_length/freq
rolling_windows[[1]] <- window(time_series, start=start_int, end=end_int)
range_end_index <- range_end[1] + range_end[2]/freq
i <- 1
while (end_int < range_end_index) {
i <- i + 1
start_int <- end_int + dist_between_windows/freq
end_int <- start_int + window_length/freq
new_int <- window(time_series, start=start_int, end=end_int)
rolling_windows[[i]] <- window(time_series, start=start_int, end=end_int)
}
return(rolling_windows)
}
k <- get_rolling_windows(spy_ts, c(2016, 1), c(2020, 5), 22, 22)
x <- 1
while (x < 5) {
x <- x + 1
print("ed")
}
l <- list(numeric)
l[[1]] <- 5
l <- append(l, 2)
l
s <- floor(start(spy_ts))
f <- frequency(spy_ts)
s <- c(2009, 2)
w <- window(spy_ts, start=s, end=s+2)
w
k <- window(spy_ts, start=(2009+1/freq))
length(k)
length(w)
start(spy_ts)
class(w)
# call as
# return
# list of window indices - correspond to (2016, 1, 1)-(2016, 2, 1), etc
get_rolling_range <- function(time_series, query_window) {
start <- start(time_series)
end <- start(query_window) - 2 * length(query_window) / frequency(time_series)
return(c(start, end))
}
ex_rolling <- k[[1]]
length(ex_rolling)
vec <- get_rolling_range(spy_ts, ex_rolling)
get_top_similar_intervals <- function(time_series_original, query_window, start_range, end_range, num_top_intervals) {
time_series <- diff(log(time_series_original))
interval_length <- length(query_window)
q <- priority_queue()
q_dist <- priority_queue()
freq <- frequency(time_series)
start_int <- start_range
end_int <- start_int + interval_length/freq
curr_window <- window(time_series, start=start_int, end=end_int)
alignment <- dtw(curr_window, query_window, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(curr_window, priority = -dist)
q_dist$push(dist, priority = -dist)
while (end_int < end_range) {
start_int <- end_int + interval_length/freq
end_int <- start_int + interval_length/freq
curr_window <- window(time_series, start=start_int, end=end_int)
alignment <- dtw(curr_window, query_window, keep=TRUE)
dist <- alignment$normalizedDistance
q$push(curr_window, priority = -dist)
q_dist$push(dist, priority = -dist)
}
top_similar_intervals <- q$as_list()[1:num_top_intervals]
min_distances <- unlist(q_dist$as_list()[1:num_top_intervals], use.names = FALSE)
return(list(top_similar_intervals, min_distances, query_window))
}
sim <- get_top_similar_intervals(spy_ts, ex_rolling, vec[1], vec[2], 20)
sim[[1]]
get_following_windows <- function(time_series, intervals) {
following_windows <- list()
for (interval in intervals) {
start <- end(interval) + 1/freq
end <- start + length(interval)/freq
following <- window(time_series, start=start, end=end)
append(following_windows, following)
}
return(following_windows)
}
get_returns <- function(window) {
}
compute_summary_statistics <- function(returns) {
}
# pseudocode
# what is interesting
# can direction be predicted? (if 15/20 ended up at positive, will this one end up as positive too)
# can dispersion be predicted (if past similar were volatile, will returns be volatile right after this one too)
rolling_windows <- get_rolling_windows()
for (qry_window in rolling_windows) {
following_window_actual <- get_following(qry_window)
c(start_range, end_range) <- get_rolling_range(qry_window)
similar_ints <- get_top_similar_intervals(qry_window, start_range, end_range)
following_windows_similar <- get_following(windows(similar_ints))
# 1 direction
# dollar progress, weighted by similarity, of "following windows similar"
# dollar progress of following window actual
# save both in list to graph/visualize later
# save single values as data points to plot, calculate correlation, etc
# 2 volatility
# same as above with volatility instead of dollar progress
}
|
#######################################
#
# High-dimensional clustering methods test
#
# Author: Luis B. Rei
# Created: 29/12/2017
#
#######################################
# Clear workspace
rm(list=ls())
graphics.off()
require(microbenchmark)
library(subspace)
library(haven)
##############################
# SYNTHENTIC DATA
##############################
set.seed(12)
data(subspace_dataset)
#Load the true clustering for this dataset
path_to_clustering <- paste(path.package("subspace"),"/extdata/subspace_dataset.true",sep="")
clust0 <- clustering_from_file(file_path=path_to_clustering)
p <- ggvis::prop(property="shape",x="cross")
#plot(clust0,subspace_dataset,props=p)
clust1 <- ProClus(subspace_dataset,k=10,d=2.5)
#plot(clust1,subspace_dataset)
clust2 <- CLIQUE(subspace_dataset, xi = 40, tau = 0.07)
#plot(clust2,subspace_dataset)
clust3 <- SubClu(subspace_dataset,epsilon=1,minSupport=5)
#plot(clust3,subspace_dataset)
clust4 <- FIRES(subspace_dataset)
#plot(clust4,subspace_dataset)
clust5 <- P3C(subspace_dataset,PoissonThreshold=3)
#plot(clust5,subspace_dataset)
##############################
# BREAST CANCER DATA
##############################
# Load TCCGA data
db = file.choose()
data = read_sav(db)
data <- na.omit(data) # listwise deletion of missing
data.res <- data[,2:10]
data.class <- data[,11]
clust6 <- ProClus(data.res,k=2,d=2.5)
#plot(clust6,data.res)
clust7 <- CLIQUE(data.res, xi = 40, tau = 0.5)
#plot(clust7,data.res)
clust8 <- SubClu(data.res,epsilon=1,minSupport=5)
#plot(clust8,data.res)
clust9 <- FIRES(data.res)
#plot(clust9,data.res)
clust10 <- P3C(data.res,PoissonThreshold=3)
#plot(clust10,data.res)
#######################
# BENCHMARKS
#######################
res1 <- microbenchmark(ProClus(subspace_dataset,k=10,d=2.5), CLIQUE(subspace_dataset, xi = 40, tau = 0.07), SubClu(subspace_dataset,epsilon=1,minSupport=5), FIRES(subspace_dataset), P3C(subspace_dataset,PoissonThreshold=3), times=25)
res2 <- microbenchmark(ProClus(data.res,k=2,d=2.5), CLIQUE(data.res, xi = 40, tau = 0.07), SubClu(data.res,epsilon=1,minSupport=5), FIRES(data.res), P3C(data.res,PoissonThreshold=3), times=25)
| /Preliminary_results/subspace_high_dim_clust.R | no_license | luisrei/explore-IIEEC | R | false | false | 2,298 | r | #######################################
#
# High-dimensional clustering methods test
#
# Author: Luis B. Rei
# Created: 29/12/2017
#
#######################################
# Clear workspace
rm(list=ls())
graphics.off()
require(microbenchmark)
library(subspace)
library(haven)
##############################
# SYNTHENTIC DATA
##############################
set.seed(12)
data(subspace_dataset)
#Load the true clustering for this dataset
path_to_clustering <- paste(path.package("subspace"),"/extdata/subspace_dataset.true",sep="")
clust0 <- clustering_from_file(file_path=path_to_clustering)
p <- ggvis::prop(property="shape",x="cross")
#plot(clust0,subspace_dataset,props=p)
clust1 <- ProClus(subspace_dataset,k=10,d=2.5)
#plot(clust1,subspace_dataset)
clust2 <- CLIQUE(subspace_dataset, xi = 40, tau = 0.07)
#plot(clust2,subspace_dataset)
clust3 <- SubClu(subspace_dataset,epsilon=1,minSupport=5)
#plot(clust3,subspace_dataset)
clust4 <- FIRES(subspace_dataset)
#plot(clust4,subspace_dataset)
clust5 <- P3C(subspace_dataset,PoissonThreshold=3)
#plot(clust5,subspace_dataset)
##############################
# BREAST CANCER DATA
##############################
# Load TCCGA data
db = file.choose()
data = read_sav(db)
data <- na.omit(data) # listwise deletion of missing
data.res <- data[,2:10]
data.class <- data[,11]
clust6 <- ProClus(data.res,k=2,d=2.5)
#plot(clust6,data.res)
clust7 <- CLIQUE(data.res, xi = 40, tau = 0.5)
#plot(clust7,data.res)
clust8 <- SubClu(data.res,epsilon=1,minSupport=5)
#plot(clust8,data.res)
clust9 <- FIRES(data.res)
#plot(clust9,data.res)
clust10 <- P3C(data.res,PoissonThreshold=3)
#plot(clust10,data.res)
#######################
# BENCHMARKS
#######################
res1 <- microbenchmark(ProClus(subspace_dataset,k=10,d=2.5), CLIQUE(subspace_dataset, xi = 40, tau = 0.07), SubClu(subspace_dataset,epsilon=1,minSupport=5), FIRES(subspace_dataset), P3C(subspace_dataset,PoissonThreshold=3), times=25)
res2 <- microbenchmark(ProClus(data.res,k=2,d=2.5), CLIQUE(data.res, xi = 40, tau = 0.07), SubClu(data.res,epsilon=1,minSupport=5), FIRES(data.res), P3C(data.res,PoissonThreshold=3), times=25)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDescriptionStatsBy.R
\name{getDescriptionStatsBy}
\alias{getDescriptionStatsBy}
\title{Creating of description statistics}
\usage{
getDescriptionStatsBy(x, by, digits = 1, html = TRUE,
numbers_first = TRUE, statistics = FALSE, statistics.sig_lim = 10^-4,
statistics.two_dec_lim = 10^-2, useNA = c("ifany", "no", "always"),
useNA.digits = digits, continuous_fn = describeMean,
prop_fn = describeProp, factor_fn = describeFactors,
show_all_values = FALSE, hrzl_prop = FALSE, add_total_col,
total_col_show_perc = TRUE, use_units = FALSE, default_ref,
NEJMstyle = FALSE, percentage_sign = TRUE, header_count, ...)
}
\arguments{
\item{x}{The variable that you want the statistics for}
\item{by}{The variable that you want to split into different
columns}
\item{digits}{The number of decimals used}
\item{html}{If HTML compatible output should be used. If \code{FALSE}
it outputs LaTeX formatting}
\item{numbers_first}{If the number should be given or if the percentage
should be presented first. The second is encapsulated in parentheses ().}
\item{statistics}{Add statistics, fisher test for proportions and Wilcoxon
for continuous variables. See details below for more customization.}
\item{statistics.sig_lim}{The significance limit for < sign, i.e. p-value 0.0000312
should be < 0.0001 with the default setting.}
\item{statistics.two_dec_lim}{The limit for showing two decimals. E.g.
the p-value may be 0.056 and we may want to keep the two decimals in order
to emphasize the proximity to the all-mighty 0.05 p-value and set this to
\eqn{10^-2}. This allows that a value of 0.0056 is rounded to 0.006 and this
makes intuitive sense as the 0.0056 level as this is well below
the 0.05 value and thus not as interesting to know the exact proximity to
0.05. \emph{Disclaimer:} The 0.05-limit is really silly and debated, unfortunately
it remains a standard and this package tries to adapt to the current standards in order
to limit publication associated issues.}
\item{useNA}{This indicates if missing should be added as a separate
row below all other. See \code{\link[base]{table}} for \code{useNA}-options.
\emph{Note:} defaults to ifany and not "no" as \code{\link[base]{table}} does.}
\item{useNA.digits}{The number of digits to use for the
missing percentage, defaults to the overall \code{digits}.}
\item{continuous_fn}{The method to describe continuous variables. The
default is \code{\link{describeMean}}.}
\item{prop_fn}{The method used to describe proportions, see \code{\link{describeProp}}.}
\item{factor_fn}{The method used to describe factors, see \code{\link{describeFactors}}.}
\item{show_all_values}{This is by default false as for instance if there is
no missing and there is only one variable then it is most sane to only show
one option as the other one will just be a complement to the first. For instance
sex - if you know gender then automatically you know the distribution of the
other sex as it's 100 \% - other \%. To choose which one you want to show then
set the \code{default_ref} parameter.}
\item{hrzl_prop}{This is default FALSE and indicates
that the proportions are to be interpreted in a vertical manner.
If we want the data to be horizontal, i.e. the total should be shown
and then how these differ in the different groups then set this to TRUE.}
\item{add_total_col}{This adds a total column to the resulting table.
You can also specify if you want the total column "first" or "last"
in the column order.}
\item{total_col_show_perc}{This is by default true but if
requested the percentages are surpressed as this sometimes may be confusing.}
\item{use_units}{If the Hmisc package's units() function has been employed
it may be interesting to have a column at the far right that indicates the
unit measurement. If this column is specified then the total column will
appear before the units (if specified as last). You can also set he value to
\code{"name"} and the units will be added to the name as a parenthesis,
e.g. Age (years).}
\item{default_ref}{The default reference, either first,
the level name or a number within the levels. If left out
it defaults to the first value.}
\item{NEJMstyle}{Adds - no (\%) at the end to proportions}
\item{percentage_sign}{If you want to surpress the percentage sign you
can set this variable to FALSE. You can also choose something else that
the default \% if you so wish by setting this variable.}
\item{header_count}{Set to \code{TRUE} if you want to add a header count,
e.g. Smoking; No. 25 observations, where there is a new line after the
factor name. If you want a different text for the second line you can
speficy use the \code{\link[base]{sprintf}} formatting, e.g. "No. \%s patients".}
\item{...}{Currently only used for generating warnings of deprecated call
parameters.}
}
\value{
Returns a vector if vars wasn't specified and it's a
continuous or binary statistic. If vars was a matrix then it
appends the result to the end of that matrix. If the x variable
is a factor then it does not append and you get a warning.
}
\description{
A function that returns a description statistic that can be used
for creating a publication "table 1" when you want it by groups.
The function identifies if the variable is a continuous, binary
or a factored variable. The format is inspired by NEJM, Lancet &
BMJ.
}
\section{Customizing statistics}{
You can specify what function that you want for statistic by providing a function
that takes two arguments \code{x} and \code{by} and returns a p-value. There are
a few functions already prepared for this see \code{\link{getPvalAnova}},
\code{\link{getPvalChiSq}}
\code{\link{getPvalFisher}}
\code{\link{getPvalKruskal}}
\code{\link{getPvalWilcox}}.
The default functions used are \code{getPvalFisher} and \code{getPvalWilcox} (unless the by
argument has more than three unique levels where it defaults to \code{getPvalAnova}).
If you want the function to select functions depending on the type of input
you can provide a list with the names \code{'continuous'}, \code{'proportion'}, \code{'factor'} and
the function will choose accordingly. If you fail to define a certain category
it will default to the above.
}
\examples{
data(mtcars)
# For labelling we use the label()
# function from the Hmisc package
library(Hmisc)
label(mtcars$mpg) <- "Gas"
units(mtcars$mpg) <- "Miles/(US) gallon"
label(mtcars$wt) <- "Weight"
units(mtcars$wt) <- "10<sup>3</sup> kg" # not sure the unit is correct
mtcars$am <- factor(mtcars$am, levels=0:1, labels=c("Automatic", "Manual"))
label(mtcars$am) <- "Transmission"
mtcars$gear <- factor(mtcars$gear)
label(mtcars$gear) <- "Gears"
# Make up some data for making it slightly more interesting
mtcars$col <- factor(sample(c("red", "black", "silver"),
size=NROW(mtcars), replace=TRUE))
label(mtcars$col) <- "Car color"
mergeDesc(getDescriptionStatsBy(mtcars$mpg, mtcars$am,
header_count = TRUE,
use_units = TRUE),
getDescriptionStatsBy(mtcars$wt, mtcars$am,
header_count = TRUE,
use_units = TRUE),
htmlTable_args = list(caption = "Basic continuous stats from the mtcars dataset"))
tll <- list()
tll[["Gear (3 to 5)"]] <- getDescriptionStatsBy(mtcars$gear, mtcars$am)
tll <- c(tll,
list(getDescriptionStatsBy(mtcars$col, mtcars$am)))
mergeDesc(tll,
htmlTable_args = list(caption = "Factored variables"))
tl_no_units <- list()
tl_no_units[["Gas (mile/gallons)"]] <-
getDescriptionStatsBy(mtcars$mpg, mtcars$am,
header_count = TRUE)
tl_no_units[["Weight (10<sup>3</sup> kg)"]] <-
getDescriptionStatsBy(mtcars$wt, mtcars$am,
header_count = TRUE)
mergeDesc(tl_no_units, tll,
# Remove the formatting for the groups
htmlTable_args = list(css.rgroup = ""))
# A little more advanced
mtcars$mpg[sample(1:NROW(mtcars), size=5)] <- NA
getDescriptionStatsBy(mtcars$mpg, mtcars$am, statistics=TRUE)
# Do the horizontal version
getDescriptionStatsBy(mtcars$col, mtcars$am,
statistics=TRUE, hrzl_prop = TRUE)
mtcars$wt_with_missing <- mtcars$wt
mtcars$wt_with_missing[sample(1:NROW(mtcars), size=8)] <- NA
getDescriptionStatsBy(mtcars$wt_with_missing, mtcars$am, statistics=TRUE,
hrzl_prop = TRUE, total_col_show_perc = FALSE)
mtcars$col_with_missing <- mtcars$col
mtcars$col_with_missing[sample(1:NROW(mtcars), size=5)] <- NA
getDescriptionStatsBy(mtcars$col_with_missing, mtcars$am, statistics=TRUE,
hrzl_prop = TRUE, total_col_show_perc = FALSE)
\dontrun{
## There is also a LaTeX wrapper
tll <- list(
getDescriptionStatsBy(mtcars$gear, mtcars$am),
getDescriptionStatsBy(mtcars$col, mtcars$am))
latex(mergeDesc(tll),
caption = "Factored variables",
file="")
}
}
\seealso{
Other descriptive functions: \code{\link{describeFactors}},
\code{\link{describeMean}}, \code{\link{describeMedian}},
\code{\link{describeProp}}, \code{\link{getPvalWilcox}}
}
| /man/getDescriptionStatsBy.Rd | no_license | aeron15/Gmisc | R | false | true | 9,191 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getDescriptionStatsBy.R
\name{getDescriptionStatsBy}
\alias{getDescriptionStatsBy}
\title{Creating of description statistics}
\usage{
getDescriptionStatsBy(x, by, digits = 1, html = TRUE,
numbers_first = TRUE, statistics = FALSE, statistics.sig_lim = 10^-4,
statistics.two_dec_lim = 10^-2, useNA = c("ifany", "no", "always"),
useNA.digits = digits, continuous_fn = describeMean,
prop_fn = describeProp, factor_fn = describeFactors,
show_all_values = FALSE, hrzl_prop = FALSE, add_total_col,
total_col_show_perc = TRUE, use_units = FALSE, default_ref,
NEJMstyle = FALSE, percentage_sign = TRUE, header_count, ...)
}
\arguments{
\item{x}{The variable that you want the statistics for}
\item{by}{The variable that you want to split into different
columns}
\item{digits}{The number of decimals used}
\item{html}{If HTML compatible output should be used. If \code{FALSE}
it outputs LaTeX formatting}
\item{numbers_first}{If the number should be given or if the percentage
should be presented first. The second is encapsulated in parentheses ().}
\item{statistics}{Add statistics, fisher test for proportions and Wilcoxon
for continuous variables. See details below for more customization.}
\item{statistics.sig_lim}{The significance limit for < sign, i.e. p-value 0.0000312
should be < 0.0001 with the default setting.}
\item{statistics.two_dec_lim}{The limit for showing two decimals. E.g.
the p-value may be 0.056 and we may want to keep the two decimals in order
to emphasize the proximity to the all-mighty 0.05 p-value and set this to
\eqn{10^-2}. This allows that a value of 0.0056 is rounded to 0.006 and this
makes intuitive sense as the 0.0056 level as this is well below
the 0.05 value and thus not as interesting to know the exact proximity to
0.05. \emph{Disclaimer:} The 0.05-limit is really silly and debated, unfortunately
it remains a standard and this package tries to adapt to the current standards in order
to limit publication associated issues.}
\item{useNA}{This indicates if missing should be added as a separate
row below all other. See \code{\link[base]{table}} for \code{useNA}-options.
\emph{Note:} defaults to ifany and not "no" as \code{\link[base]{table}} does.}
\item{useNA.digits}{The number of digits to use for the
missing percentage, defaults to the overall \code{digits}.}
\item{continuous_fn}{The method to describe continuous variables. The
default is \code{\link{describeMean}}.}
\item{prop_fn}{The method used to describe proportions, see \code{\link{describeProp}}.}
\item{factor_fn}{The method used to describe factors, see \code{\link{describeFactors}}.}
\item{show_all_values}{This is by default false as for instance if there is
no missing and there is only one variable then it is most sane to only show
one option as the other one will just be a complement to the first. For instance
sex - if you know gender then automatically you know the distribution of the
other sex as it's 100 \% - other \%. To choose which one you want to show then
set the \code{default_ref} parameter.}
\item{hrzl_prop}{This is default FALSE and indicates
that the proportions are to be interpreted in a vertical manner.
If we want the data to be horizontal, i.e. the total should be shown
and then how these differ in the different groups then set this to TRUE.}
\item{add_total_col}{This adds a total column to the resulting table.
You can also specify if you want the total column "first" or "last"
in the column order.}
\item{total_col_show_perc}{This is by default true but if
requested the percentages are surpressed as this sometimes may be confusing.}
\item{use_units}{If the Hmisc package's units() function has been employed
it may be interesting to have a column at the far right that indicates the
unit measurement. If this column is specified then the total column will
appear before the units (if specified as last). You can also set he value to
\code{"name"} and the units will be added to the name as a parenthesis,
e.g. Age (years).}
\item{default_ref}{The default reference, either first,
the level name or a number within the levels. If left out
it defaults to the first value.}
\item{NEJMstyle}{Adds - no (\%) at the end to proportions}
\item{percentage_sign}{If you want to surpress the percentage sign you
can set this variable to FALSE. You can also choose something else that
the default \% if you so wish by setting this variable.}
\item{header_count}{Set to \code{TRUE} if you want to add a header count,
e.g. Smoking; No. 25 observations, where there is a new line after the
factor name. If you want a different text for the second line you can
speficy use the \code{\link[base]{sprintf}} formatting, e.g. "No. \%s patients".}
\item{...}{Currently only used for generating warnings of deprecated call
parameters.}
}
\value{
Returns a vector if vars wasn't specified and it's a
continuous or binary statistic. If vars was a matrix then it
appends the result to the end of that matrix. If the x variable
is a factor then it does not append and you get a warning.
}
\description{
A function that returns a description statistic that can be used
for creating a publication "table 1" when you want it by groups.
The function identifies if the variable is a continuous, binary
or a factored variable. The format is inspired by NEJM, Lancet &
BMJ.
}
\section{Customizing statistics}{
You can specify what function that you want for statistic by providing a function
that takes two arguments \code{x} and \code{by} and returns a p-value. There are
a few functions already prepared for this see \code{\link{getPvalAnova}},
\code{\link{getPvalChiSq}}
\code{\link{getPvalFisher}}
\code{\link{getPvalKruskal}}
\code{\link{getPvalWilcox}}.
The default functions used are \code{getPvalFisher} and \code{getPvalWilcox} (unless the by
argument has more than three unique levels where it defaults to \code{getPvalAnova}).
If you want the function to select functions depending on the type of input
you can provide a list with the names \code{'continuous'}, \code{'proportion'}, \code{'factor'} and
the function will choose accordingly. If you fail to define a certain category
it will default to the above.
}
\examples{
data(mtcars)
# For labelling we use the label()
# function from the Hmisc package
library(Hmisc)
label(mtcars$mpg) <- "Gas"
units(mtcars$mpg) <- "Miles/(US) gallon"
label(mtcars$wt) <- "Weight"
units(mtcars$wt) <- "10<sup>3</sup> kg" # not sure the unit is correct
mtcars$am <- factor(mtcars$am, levels=0:1, labels=c("Automatic", "Manual"))
label(mtcars$am) <- "Transmission"
mtcars$gear <- factor(mtcars$gear)
label(mtcars$gear) <- "Gears"
# Make up some data for making it slightly more interesting
mtcars$col <- factor(sample(c("red", "black", "silver"),
size=NROW(mtcars), replace=TRUE))
label(mtcars$col) <- "Car color"
mergeDesc(getDescriptionStatsBy(mtcars$mpg, mtcars$am,
header_count = TRUE,
use_units = TRUE),
getDescriptionStatsBy(mtcars$wt, mtcars$am,
header_count = TRUE,
use_units = TRUE),
htmlTable_args = list(caption = "Basic continuous stats from the mtcars dataset"))
tll <- list()
tll[["Gear (3 to 5)"]] <- getDescriptionStatsBy(mtcars$gear, mtcars$am)
tll <- c(tll,
list(getDescriptionStatsBy(mtcars$col, mtcars$am)))
mergeDesc(tll,
htmlTable_args = list(caption = "Factored variables"))
tl_no_units <- list()
tl_no_units[["Gas (mile/gallons)"]] <-
getDescriptionStatsBy(mtcars$mpg, mtcars$am,
header_count = TRUE)
tl_no_units[["Weight (10<sup>3</sup> kg)"]] <-
getDescriptionStatsBy(mtcars$wt, mtcars$am,
header_count = TRUE)
mergeDesc(tl_no_units, tll,
# Remove the formatting for the groups
htmlTable_args = list(css.rgroup = ""))
# A little more advanced
mtcars$mpg[sample(1:NROW(mtcars), size=5)] <- NA
getDescriptionStatsBy(mtcars$mpg, mtcars$am, statistics=TRUE)
# Do the horizontal version
getDescriptionStatsBy(mtcars$col, mtcars$am,
statistics=TRUE, hrzl_prop = TRUE)
mtcars$wt_with_missing <- mtcars$wt
mtcars$wt_with_missing[sample(1:NROW(mtcars), size=8)] <- NA
getDescriptionStatsBy(mtcars$wt_with_missing, mtcars$am, statistics=TRUE,
hrzl_prop = TRUE, total_col_show_perc = FALSE)
mtcars$col_with_missing <- mtcars$col
mtcars$col_with_missing[sample(1:NROW(mtcars), size=5)] <- NA
getDescriptionStatsBy(mtcars$col_with_missing, mtcars$am, statistics=TRUE,
hrzl_prop = TRUE, total_col_show_perc = FALSE)
\dontrun{
## There is also a LaTeX wrapper
tll <- list(
getDescriptionStatsBy(mtcars$gear, mtcars$am),
getDescriptionStatsBy(mtcars$col, mtcars$am))
latex(mergeDesc(tll),
caption = "Factored variables",
file="")
}
}
\seealso{
Other descriptive functions: \code{\link{describeFactors}},
\code{\link{describeMean}}, \code{\link{describeMedian}},
\code{\link{describeProp}}, \code{\link{getPvalWilcox}}
}
|
require(devtools)
load_all("~/git/rmaize")
require(ape)
require(ggtree)
require(ggforce)
require(Rtsne)
dirp = '~/projects/rnaseq'
dird = file.path(dirp, 'data')
dirc = '/scratch.global/zhoux379/rnaseq'
t_cfg = read_gspread_master(lib='rnaseq')
#f_yml = file.path(dird, '10.cfg.yaml')
#Sys.setenv("R_CONFIG_FILE" = f_yml)
read_rnaseq <- function(yid) {
#{{{
res = rnaseq_cpm(yid)
th = res$th; tm = res$tm
th = th %>% replace_na(list(Tissue='',Genotype='B73',Treatment='')) %>%
mutate(Tissue=as.character(Tissue)) %>%
mutate(Genotype=as.character(Genotype)) %>%
mutate(Treatment=as.character(Treatment))
yids_dev = c('rn10a','rn11a','rn13b','rn14b','rn14c','rn14e',"rn16b","rn16c","rn18g")
if(yid == 'rn12a') {
th = th %>% filter(Treatment == 'WT')
} else if(yid == 'rn17c') {
th = th %>% filter(Treatment == 'con')
} else if(yid %in% c(yids_dev,'rn19c')) {
if(yid == 'rn13b') th = th %>% filter(!str_detect(Treatment, "^ET"))
if(yid == 'rn18g') th = th %>% filter(Genotype == 'B73')
th = th %>% mutate(Tissue=str_c(Tissue,Treatment, sep="_")) %>%
mutate(Treatment=yid)
}
th = th %>% mutate(study = yid) %>%
mutate(SampleID = str_c(study, SampleID, sep='_')) %>%
replace_na(list(Treatment='')) %>%
select(SampleID, Tissue, Genotype, Treatment, Replicate, study)
tm = tm %>% mutate(SampleID = str_c(yid, SampleID, sep='_')) %>%
filter(SampleID %in% th$SampleID)
list(th=th, tm=tm)
#}}}
}
read_multiqc_trimmomatic <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
types = c("surviving", "forward_only_surviving", "reverse_only_surviving", "dropped")
if (paired == F) {
nd = ti %>% mutate(nd = input_reads - surviving - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti %>% mutate(SampleID = Sample, total = input_reads,
surviving_f=0, surviving_r=0)
} else if(paired == T | paired == 'both') {
ti2 = ti %>%
separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>% select(-suf) %>%
mutate(surviving_f = forward_only_surviving,
surviving_r = reverse_only_surviving)
if(paired == 'both')
ti2 = ti2 %>%
replace_na(list('input_reads'=0, 'input_read_pairs'=0,
'surviving_f'=0, 'surviving_r'=0)) %>%
mutate(input_read_pairs =
ifelse(input_read_pairs == 0, input_reads, input_read_pairs))
nd = ti2 %>% mutate(nd = input_read_pairs - surviving -
surviving_f - surviving_r - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti2 %>% mutate(total = input_read_pairs)
} else {
stop(sprintf("unsupported option: %s", paired))
}
to %>%
select(SampleID, total, surviving, surviving_f, surviving_r, dropped)
#}}}
}
read_multiqc_star <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>% mutate(SampleID = Sample) %>% select(-Sample)
} else {
ti2 = ti %>% separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>%
select(-suf)
}
ti2 = ti2 %>%
transmute(SampleID = SampleID, total = total_reads,
uniquely_mapped = uniquely_mapped,
multimapped = multimapped + multimapped_toomany,
unmapped = unmapped_mismatches + unmapped_tooshort + unmapped_other,
nd = total - uniquely_mapped - multimapped - unmapped)
stopifnot(sum(ti2$nd) < 1000)
ti2 = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to = ti2 %>% select(SampleID, uniquely_mapped, multimapped, unmapped)
to
#}}}
}
read_multiqc_hisat2 <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>%
transmute(SampleID = Sample,
total = unpaired_total,
uniquely_mapped = unpaired_aligned_one,
multimapped = unpaired_aligned_multi,
unmapped = unpaired_aligned_none)
} else {
ti2 = ti %>%
transmute(SampleID = Sample,
#total = paired_total + unpaired_total,
#uniquely_mapped = paired_aligned_one+paired_aligned_discord_one+unpaired_aligned_one,
#multimapped = paired_aligned_multi+unpaired_aligned_multi,
#unmapped = paired_aligned_none+unpaired_aligned_none)
total = paired_total,
uniquely_mapped = paired_aligned_one+paired_aligned_discord_one,
multimapped = paired_aligned_multi,
unmapped = paired_aligned_none)
}
ti2 = ti2 %>% mutate(nd = total - uniquely_mapped - multimapped - unmapped)
cat(sum(ti2$nd),"\n")
stopifnot(sum(ti2$nd) < 1000)
to = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to
#}}}
}
read_multiqc_featurecounts <- function(fi) {
#{{{
ti = read_tsv(fi)
ti2 = ti %>% mutate(SampleID = Sample) %>%
select(SampleID, Total, Assigned, Unassigned_Unmapped,
Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity) %>%
mutate(nd = Total-Assigned-Unassigned_Unmapped-Unassigned_MultiMapping-Unassigned_NoFeatures-Unassigned_Ambiguity)
stopifnot(sum(as.numeric(ti2$nd)) == 0)
#
types = c("Assigned", "Unassigned_MultiMapping", "Unassigned_Unmapped",
"Unassigned_NoFeatures", "Unassigned_Ambiguity")
to = ti2 %>% select(SampleID, Assigned, Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity,
Unassigned_Unmapped)
to
#}}}
}
plot_pca0 <- function(tp, fo, opt = 'col=tis', labsize = 2.5, wd = 8, ht = 8) {
#{{{
if(opt == 'col=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=gen,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Genotype, shape = Tissue)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=gen') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Genotype)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,col=sid,sha=sid') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, color = sid, shape = sid)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Treatment)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', fill = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else {
stop(sprintf("unknown opt: %s", opt))
}
ggsave(p1, filename = fo, width = wd, height = ht)
#}}}
}
| /src/functions.R | permissive | 1010stone/rnaseq | R | false | false | 14,000 | r | require(devtools)
load_all("~/git/rmaize")
require(ape)
require(ggtree)
require(ggforce)
require(Rtsne)
dirp = '~/projects/rnaseq'
dird = file.path(dirp, 'data')
dirc = '/scratch.global/zhoux379/rnaseq'
t_cfg = read_gspread_master(lib='rnaseq')
#f_yml = file.path(dird, '10.cfg.yaml')
#Sys.setenv("R_CONFIG_FILE" = f_yml)
read_rnaseq <- function(yid) {
#{{{
res = rnaseq_cpm(yid)
th = res$th; tm = res$tm
th = th %>% replace_na(list(Tissue='',Genotype='B73',Treatment='')) %>%
mutate(Tissue=as.character(Tissue)) %>%
mutate(Genotype=as.character(Genotype)) %>%
mutate(Treatment=as.character(Treatment))
yids_dev = c('rn10a','rn11a','rn13b','rn14b','rn14c','rn14e',"rn16b","rn16c","rn18g")
if(yid == 'rn12a') {
th = th %>% filter(Treatment == 'WT')
} else if(yid == 'rn17c') {
th = th %>% filter(Treatment == 'con')
} else if(yid %in% c(yids_dev,'rn19c')) {
if(yid == 'rn13b') th = th %>% filter(!str_detect(Treatment, "^ET"))
if(yid == 'rn18g') th = th %>% filter(Genotype == 'B73')
th = th %>% mutate(Tissue=str_c(Tissue,Treatment, sep="_")) %>%
mutate(Treatment=yid)
}
th = th %>% mutate(study = yid) %>%
mutate(SampleID = str_c(study, SampleID, sep='_')) %>%
replace_na(list(Treatment='')) %>%
select(SampleID, Tissue, Genotype, Treatment, Replicate, study)
tm = tm %>% mutate(SampleID = str_c(yid, SampleID, sep='_')) %>%
filter(SampleID %in% th$SampleID)
list(th=th, tm=tm)
#}}}
}
read_multiqc_trimmomatic <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
types = c("surviving", "forward_only_surviving", "reverse_only_surviving", "dropped")
if (paired == F) {
nd = ti %>% mutate(nd = input_reads - surviving - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti %>% mutate(SampleID = Sample, total = input_reads,
surviving_f=0, surviving_r=0)
} else if(paired == T | paired == 'both') {
ti2 = ti %>%
separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>% select(-suf) %>%
mutate(surviving_f = forward_only_surviving,
surviving_r = reverse_only_surviving)
if(paired == 'both')
ti2 = ti2 %>%
replace_na(list('input_reads'=0, 'input_read_pairs'=0,
'surviving_f'=0, 'surviving_r'=0)) %>%
mutate(input_read_pairs =
ifelse(input_read_pairs == 0, input_reads, input_read_pairs))
nd = ti2 %>% mutate(nd = input_read_pairs - surviving -
surviving_f - surviving_r - dropped) %>%
group_by(1) %>% summarise(nd = sum(nd)) %>% pull(nd)
stopifnot(nd == 0)
to = ti2 %>% mutate(total = input_read_pairs)
} else {
stop(sprintf("unsupported option: %s", paired))
}
to %>%
select(SampleID, total, surviving, surviving_f, surviving_r, dropped)
#}}}
}
read_multiqc_star <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>% mutate(SampleID = Sample) %>% select(-Sample)
} else {
ti2 = ti %>% separate(Sample, c("SampleID",'suf'), sep="_", fill='right', extra='merge') %>%
select(-suf)
}
ti2 = ti2 %>%
transmute(SampleID = SampleID, total = total_reads,
uniquely_mapped = uniquely_mapped,
multimapped = multimapped + multimapped_toomany,
unmapped = unmapped_mismatches + unmapped_tooshort + unmapped_other,
nd = total - uniquely_mapped - multimapped - unmapped)
stopifnot(sum(ti2$nd) < 1000)
ti2 = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to = ti2 %>% select(SampleID, uniquely_mapped, multimapped, unmapped)
to
#}}}
}
read_multiqc_hisat2 <- function(fi, paired = T) {
#{{{
ti = read_tsv(fi)
if(paired == F) {
ti2 = ti %>%
transmute(SampleID = Sample,
total = unpaired_total,
uniquely_mapped = unpaired_aligned_one,
multimapped = unpaired_aligned_multi,
unmapped = unpaired_aligned_none)
} else {
ti2 = ti %>%
transmute(SampleID = Sample,
#total = paired_total + unpaired_total,
#uniquely_mapped = paired_aligned_one+paired_aligned_discord_one+unpaired_aligned_one,
#multimapped = paired_aligned_multi+unpaired_aligned_multi,
#unmapped = paired_aligned_none+unpaired_aligned_none)
total = paired_total,
uniquely_mapped = paired_aligned_one+paired_aligned_discord_one,
multimapped = paired_aligned_multi,
unmapped = paired_aligned_none)
}
ti2 = ti2 %>% mutate(nd = total - uniquely_mapped - multimapped - unmapped)
cat(sum(ti2$nd),"\n")
stopifnot(sum(ti2$nd) < 1000)
to = ti2 %>% group_by(SampleID) %>%
summarise(uniquely_mapped = sum(uniquely_mapped),
multimapped = sum(multimapped),
unmapped = sum(unmapped))
types = c("uniquely_mapped", "multimapped", "unmapped")
to
#}}}
}
read_multiqc_featurecounts <- function(fi) {
#{{{
ti = read_tsv(fi)
ti2 = ti %>% mutate(SampleID = Sample) %>%
select(SampleID, Total, Assigned, Unassigned_Unmapped,
Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity) %>%
mutate(nd = Total-Assigned-Unassigned_Unmapped-Unassigned_MultiMapping-Unassigned_NoFeatures-Unassigned_Ambiguity)
stopifnot(sum(as.numeric(ti2$nd)) == 0)
#
types = c("Assigned", "Unassigned_MultiMapping", "Unassigned_Unmapped",
"Unassigned_NoFeatures", "Unassigned_Ambiguity")
to = ti2 %>% select(SampleID, Assigned, Unassigned_MultiMapping,
Unassigned_NoFeatures, Unassigned_Ambiguity,
Unassigned_Unmapped)
to
#}}}
}
plot_pca0 <- function(tp, fo, opt = 'col=tis', labsize = 2.5, wd = 8, ht = 8) {
#{{{
if(opt == 'col=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Treatment)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=gen,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Genotype, shape = Tissue)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,sha=gen') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, shape = Genotype)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'lab=tis,col=sid,sha=sid') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, label = Tissue, color = sid, shape = sid)) +
geom_point(size = 1.5) +
geom_text_repel(size = labsize) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tre') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Treatment)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', fill = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tis,sha=tis') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Tissue, shape = Tissue)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'col=tre,sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, color = Treatment, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else if(opt == 'sha=rep') {
#{{{
p1 = ggplot(tp, aes(x = PC1, y = PC2, shape = Replicate)) +
geom_point(size = 1.5) +
scale_x_continuous(name = xlab) + scale_y_continuous(name = ylab) +
scale_color_d3() +
scale_shape_manual(values = shapes) +
guides(direction = 'vertical', color = guide_legend(ncol = 1)) +
guides(shape = guide_legend(ncol = 1, byrow = T)) +
otheme(legend.pos = 'top.left', xgrid = T, ygrid = T, xtitle = T, ytitle = T, xtext = T, ytext = T)
#}}}
} else {
stop(sprintf("unknown opt: %s", opt))
}
ggsave(p1, filename = fo, width = wd, height = ht)
#}}}
}
|
# Question 2
# Are there differences between males and females with respect to body image?
# ----
# reflect on question
# Perhaps, certain societal prejudice exists that pays more attention to females
# body image than males, however, it is important to state in current times,
# it doesnt matter much. Notwithstanding, the insights from the data will be
# interesting to see
# ----
# variables selected - Gender, WtFeel
# Both variables are categorical
# Gender is explanatory('x'), WtFeel is response ('y')
# Exploratory Data Analysis
# using a count table and conditional percentages,
# we'd examine the relationship between two categorical variables
tbl = table(data.frame(data$Gender, data$WtFeel)); # count table
100*tbl/rowSums(tbl) # row percentages
# The percentages for About Right, Overweight, and Underweight
# for females are 73.79%, 22.07%, and 4.138%, respectively.
# The same percentages for males are: 66.67%, 17.86%, 15.48%. | /02-Lab exercise - Body Image and Academic Performance of College Students/02_Question2.R | no_license | kayomotunde/probability-and-statistics | R | false | false | 947 | r | # Question 2
# Are there differences between males and females with respect to body image?
# ----
# reflect on question
# Perhaps, certain societal prejudice exists that pays more attention to females
# body image than males, however, it is important to state in current times,
# it doesnt matter much. Notwithstanding, the insights from the data will be
# interesting to see
# ----
# variables selected - Gender, WtFeel
# Both variables are categorical
# Gender is explanatory('x'), WtFeel is response ('y')
# Exploratory Data Analysis
# using a count table and conditional percentages,
# we'd examine the relationship between two categorical variables
tbl = table(data.frame(data$Gender, data$WtFeel)); # count table
100*tbl/rowSums(tbl) # row percentages
# The percentages for About Right, Overweight, and Underweight
# for females are 73.79%, 22.07%, and 4.138%, respectively.
# The same percentages for males are: 66.67%, 17.86%, 15.48%. |
## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
} | /cachematrix.R | no_license | tejasvithani/ProgrammingAssignment2 | R | false | false | 1,573 | r | ## A pair of functions that cache the inverse of a matrix
## Creates a special matrix object that can cache its inverse
makeCacheMatrix <- function( m = matrix() ) {
## Initialize the inverse property
i <- NULL
## Method to set the matrix
set <- function( matrix ) {
m <<- matrix
i <<- NULL
}
## Method the get the matrix
get <- function() {
## Return the matrix
m
}
## Method to set the inverse of the matrix
setInverse <- function(inverse) {
i <<- inverse
}
## Method to get the inverse of the matrix
getInverse <- function() {
## Return the inverse property
i
}
## Return a list of the methods
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Compute the inverse of the special matrix returned by "makeCacheMatrix"
## above. If the inverse has already been calculated (and the matrix has not
## changed), then the "cachesolve" should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getInverse()
## Just return the inverse if its already set
if( !is.null(m) ) {
message("getting cached data")
return(m)
}
## Get the matrix from our object
data <- x$get()
## Calculate the inverse using matrix multiplication
m <- solve(data) %*% data
## Set the inverse to the object
x$setInverse(m)
## Return the matrix
m
} |
# http://www.dcc.fc.up.pt/~ltorgo/DM1_1718/Rclustering.html
#############
## Code of Slides: Clustering in R
#############
#
####### Section: Distance Functions
##
data(iris)
dm <- dist(iris[,-5]) # excluding the nominal target
as.matrix(dm)[1,4] # because dm is of class "dist"
##
library(cluster)
data(iris)
dm <- daisy(iris)
as.matrix(dm)[1,4] # because dm is of class "dist"
####### Section: Partitional Methods
##
data(iris)
set.seed(50)
k3 <- kmeans(iris[,-5],centers=3,iter.max=200)
####### Section: Clustering Validation
##
table(k3$cluster,iris$Species)
library(cluster)
s <- silhouette(k3$cluster,
dist(iris[,-5]))
##
plot(s)
##
set.seed(50)
d <- dist(iris[,-5])
avgS <- c()
for(k in 2:6) {
cl <- kmeans(iris[,-5],centers=k,iter.max=200)
s <- silhouette(cl$cluster,d)
avgS <- c(avgS,mean(s[,3]))
}
##
library(ggplot2)
ggplot(data.frame(nClus=2:6,Silh=avgS),
aes(x=nClus,y=Silh)) +
geom_point(size=3,color="red") + geom_line() +
xlab("Nr.Clusters") + ylab("Silh.Coef.")
####### Section: Partitional Methods
##
library(cluster)
pc <- pam(iris[,-5],k=3)
table(pc$clustering,iris$Species)
s <- silhouette(pc$clustering,
dist(iris[,-5]))
##
plot(s)
clusplot(pc)
##
library(cluster)
cl <- clara(iris[,-5],3)
table(cl$clustering,iris$Species)
##
clusplot(cl)
#install.packages("fpc")
##
library(fpc)
d <- scale(iris[,-5])
db <- dbscan(d,0.9)
db
table(db$cluster,iris$Species)
##
plot(db,d)
##
library(cluster)
f <- fanny(iris[,-5],3,metric='euclidean',stand=T)
head(f$membership)
table(f$clustering,iris$Species)
##
clusplot(f)
####### Section: Hierarchical Methods
##
data(iris)
d <- dist(scale(iris[,-5]))
h <- hclust(d)
##
cls <- cutree(h,3)
table(cls,iris$Species)
##
plot(h)
##
library(cluster)
d <- dist(scale(iris[,-5]))
methds <- c('complete','single','average')
avgS <- matrix(NA,ncol=3,nrow=5,
dimnames=list(2:6,methds))
for(k in 2:6)
for(m in seq_along(methds)) {
h <- hclust(d,meth=methds[m])
c <- cutree(h,k)
s <- silhouette(c,d)
avgS[k-1,m] <- mean(s[,3])
}
library(reshape2)
dt <- melt(avgS)
colnames(dt) <- c("NClusts","Meth","AvgS")
library(ggplot2)
ggplot(dt,aes(x=NClusts,y=AvgS,color=Meth)) +
geom_line()
##
library(cluster)
di <- diana(iris[,-5],
metric='euclidean',
stand=TRUE)
table(cutree(di,3),iris$Species)
##
pltree(di)
| /Cluster with Validation - Silhouette/Rclustering.R | no_license | alciomarhollanda/clustering | R | false | false | 2,407 | r | # http://www.dcc.fc.up.pt/~ltorgo/DM1_1718/Rclustering.html
#############
## Code of Slides: Clustering in R
#############
#
####### Section: Distance Functions
##
data(iris)
dm <- dist(iris[,-5]) # excluding the nominal target
as.matrix(dm)[1,4] # because dm is of class "dist"
##
library(cluster)
data(iris)
dm <- daisy(iris)
as.matrix(dm)[1,4] # because dm is of class "dist"
####### Section: Partitional Methods
##
data(iris)
set.seed(50)
k3 <- kmeans(iris[,-5],centers=3,iter.max=200)
####### Section: Clustering Validation
##
table(k3$cluster,iris$Species)
library(cluster)
s <- silhouette(k3$cluster,
dist(iris[,-5]))
##
plot(s)
##
set.seed(50)
d <- dist(iris[,-5])
avgS <- c()
for(k in 2:6) {
cl <- kmeans(iris[,-5],centers=k,iter.max=200)
s <- silhouette(cl$cluster,d)
avgS <- c(avgS,mean(s[,3]))
}
##
library(ggplot2)
ggplot(data.frame(nClus=2:6,Silh=avgS),
aes(x=nClus,y=Silh)) +
geom_point(size=3,color="red") + geom_line() +
xlab("Nr.Clusters") + ylab("Silh.Coef.")
####### Section: Partitional Methods
##
library(cluster)
pc <- pam(iris[,-5],k=3)
table(pc$clustering,iris$Species)
s <- silhouette(pc$clustering,
dist(iris[,-5]))
##
plot(s)
clusplot(pc)
##
library(cluster)
cl <- clara(iris[,-5],3)
table(cl$clustering,iris$Species)
##
clusplot(cl)
#install.packages("fpc")
##
library(fpc)
d <- scale(iris[,-5])
db <- dbscan(d,0.9)
db
table(db$cluster,iris$Species)
##
plot(db,d)
##
library(cluster)
f <- fanny(iris[,-5],3,metric='euclidean',stand=T)
head(f$membership)
table(f$clustering,iris$Species)
##
clusplot(f)
####### Section: Hierarchical Methods
##
data(iris)
d <- dist(scale(iris[,-5]))
h <- hclust(d)
##
cls <- cutree(h,3)
table(cls,iris$Species)
##
plot(h)
##
library(cluster)
d <- dist(scale(iris[,-5]))
methds <- c('complete','single','average')
avgS <- matrix(NA,ncol=3,nrow=5,
dimnames=list(2:6,methds))
for(k in 2:6)
for(m in seq_along(methds)) {
h <- hclust(d,meth=methds[m])
c <- cutree(h,k)
s <- silhouette(c,d)
avgS[k-1,m] <- mean(s[,3])
}
library(reshape2)
dt <- melt(avgS)
colnames(dt) <- c("NClusts","Meth","AvgS")
library(ggplot2)
ggplot(dt,aes(x=NClusts,y=AvgS,color=Meth)) +
geom_line()
##
library(cluster)
di <- diana(iris[,-5],
metric='euclidean',
stand=TRUE)
table(cutree(di,3),iris$Species)
##
pltree(di)
|
# Downloading Files
# Do this in R, so it can be included in Data Preparation Script
getwd() # current working directory
# setwd() sets the current directory to something of your choice
# Be Aware of Relative versus Absolute Paths
# In Windows, use backslashes instead of forward slashes
# Checking for and creating directories
# file.exists("directoryName") will check to see if the directory exists
file.exists("C:/MOOCs/Coursera/Getting_cleaning_Data/Week1") # returns TRUE
# dir.create("directoryName") will create a directory if it doesn't exist
dir.create("C:/MOOCs/Coursera/Getting_cleaning_Data/Week1/test_dir") # create test_dir
# Here is an example checking for a "data" directory and creating it if it doesn't exist
# if (!file.exists("data")){
# dir.create("data")
# }
#
# Getting data from the internet - download.file()
# Downloads a file from the internet; helps with reproducibiity
# Important parameters are url (location on web), destfile (destination file), method
# Useful for csv, tab delimited, etc. Agnostic to file type
fileURL <- "http://data.baltimorecity.gov/api/views/k78j-azhn/rows.csv?accessType=DOWNLOAD"
download.file(fileURL, destfile="./towing.csv")
list.files("./")
# Note above, this is slightly different than code from slides
# My code above is inspired by: http://stackoverflow.com/questions/17300582/download-file-in-r-has-non-zero-exit-status
# I removed method="curl". In Windows, it by default works... On MAC, set to curl.
# Also, the original URL was https... But I changed it to http
# Take track of time - sometimes datasets change depending on the time...
dateDownloaded <- date()
dateDownloaded
# Reading Local Files (4:55)
# This is also covered in R Programming Course
# read.table() is main function for reading data into R
# Flexible and robust, but requires more parameters
# Don't use if working with big data
# Important parameters - file, header, sep, row.names, nrows
# Related: read.csv(), read.csv2()
fileUrl <- "http://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile="./cameras.csv")
dateDownloaded <- date()
cameraData <- read.table("./cameras.csv")
head(cameraData)
cameraData <- read.table("./cameras.csv", sep=",", header=TRUE)
head(cameraData)
cameraData <- read.csv("./cameras.csv") # best if *.csv file
head(cameraData)
# Some more important parameters
# quote - tell R whether there are any quoted values
# na.strings - set characters that represents missing value
# nrows - how many rows to read of the file
# skip - number of lines to skip before starting to read
# Reading Excel Files
# Now, let's download the Excel version of this spreadsheet
fileUrl <- "http://data.baltimorecity.gov/api/views/dz54-2aru/rows.xlsx?accessType=DOWNLOAD"
download.file(fileUrl, destfile="./cameras.xlsx", mode='wb')
# Note: for xlsx package, must include mode='wb'
# Explanation is here: http://stackoverflow.com/questions/28325744/r-xlsx-package-error
dateDownloaded <- date()
install.packages("xlsx")
library("xlsx")
cameraData <- read.xlsx("./cameras.xlsx", sheetIndex=1, header=TRUE)
head(cameraData)
# Reading specific rows and columns
colIndex <- 2:3
rowIndex <- 1:4
cameraDataSubset <- read.xlsx("./cameras.xlsx", sheetIndex=1, colIndex=colIndex, rowIndex=rowIndex)
cameraDataSubset
# Reading XML
# XML - Extensible Markup Language
# XML is the bsis for most web scraping
# Components - Markup (labels that give the text structure) and Content (actual text)
# Tags correspond to general labels: start tags <section> and end tags </section>
# Empty tags <line-break />
# Elements are specific examples of tags: <Greeting> Hello, world </Greeting>
# Attributes are components of the label:
# <img src="jeff.jpg" alt="instructor"/>
# Read the (XML) file into R
install.packages("XML")
library("XML")
fileUrl <- "http://www.w3schools.com/xml/simple.xml"
doc <- xmlTreeParse(fileUrl, useInternal=TRUE) # Loads doc into R memory
rootNode <- xmlRoot(doc)
xmlName(rootNode) # rootNode - wrapper element for entire document
names(rootNode)
# Directly access parts of the XML document
rootNode[[1]] # returns first food element
rootNode[[1]][[1]]
xmlSApply(rootNode, xmlValue) # loops through rootNode and gets xmlValue
# XPath
# /node top level node of each element
# //node node at any level
# node[@attr-name] Node with an attribute name
# node[@attr-name='bob'] Node with attribute name attr-name='bob'
xpathSApply(rootNode, "//name", xmlValue) # returns all elements with "name" tag
xpathSApply(rootNode, "//price", xmlValue) #
# Extract Content by Attributes
fileUrl <- "http://espn.go.com/nfl/team/_/name/bal/baltimore-ravens"
doc <- htmlTreeParse(fileUrl, useInternal=TRUE)
scores <- xpathSApply(doc, "//li[@class='score']", xmlValue)
teams <- xpathSApply(doc, "//li[@class='team-name']", xmlValue)
scores # returns nothing... I believe the XML tags have changed...
teams
# They provide XML tutorials from website...
# Reading JSON
# Javascript Object Notation - common data format for API's
# I think BASIS data is JSON.
# Reading JSON data jsonlite package
install.packages("jsonlite")
install.packages('curl')
library(jsonlite)
jsonData <- fromJSON("https://api.github.com/users/jtleek/repos")
names(jsonData)
names(jsonData$owner)
jsonData$owner$login
# Writing data frames to JSON
myjson <- toJSON(iris, pretty=TRUE)
cat(myjson)
# Convert back to JSON
iris2 <- fromJSON(myjson)
head(iris2)
# This might be the best way to work with BASIS JSON data...
# there are online tutorials for working with this...
# The data.table Package
# All functions that accept data.frame can work on data.table
# Written in C so it is much faster
# Create data tables just like data frames
library(data.table)
DF=data.frame(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DF, 3)
DT = data.table(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DT,3)
# See all the data tables in memory
tables()
# Subsetting Rows
DT[2,]
DT[DT$y=="a",]
DT[c(2,3)]
# Subsetting Columns - not the same as a data frame... - don't yet understand
# Calculating Values for Variables with Expressions
DT[, list(mean(x), sum(z))]
# Adding new columns
DT[, w:=z^2]
DT
DT2 <- DT
DT[, y:=2]
head(DT, n=3)
head(DT2, n=3)
# Need to explicitly make a copy of data table instead of just assigning it.
# Multiple Operations
DT[,m:= {tmp <- (x+z); log2(tmp+5)}]
# plyr like operations
DT[,a:=x>0]
DT
# plyr is extremely useful for my migraine dataset...
DT[,b:= mean(x+w), by=a]
DT
# Special Variables
# .N is an integer, length 1, containing the number of times a group appears
set.seed(123);
DT <- data.table(x=sample(letters[1:3], 1E5, TRUE))
DT[, .N, by=x]
# Keys
DT <- data.table(x=rep(c("a","b","c"), each=100), y=rnorm(300))
setkey(DT, x)
DT['a']
#Joins
DT1 <- data.table(x=c('a', 'a', 'b', 'dt1'), y=1:4)
DT2 <- data.table(x=c('a', 'b', 'dt2'), z=5:7)
setkey(DT1, x); setkey(DT2, x)
merge(DT1, DT2)
# data.table is a lot quicker in reading...
# Quiz 1
# Question 1
# Download the 2006 microdata survey about housing for the state of Idaho
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileURL, destfile="./ACS.csv")
list.files("./")
# Load data into Dataframe
acsData <- read.csv("ACS.csv")
# Look at names and data
names(acsData)
# How many properties are worth $1,000,000 or more?
# Which variable represents the property value?
# I think it is VAL
summary(acsData$VAL)
table(acsData$VAL)
# Since 1,000,000 or more properties are with code = 24, the answer is 53...
# Question 2
# Use the data you loaded from Question 1. Consider the variable FES in the code
# book. Which of the "tidy data" principles does this variable violate?
# Question 3
# Download the Excel spreadsheet on Natural Gas Aquisition Program here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(fileUrl, destfile="./NGAP.xlsx", mode='wb')
# Read rows 18-23 and columns 7-15 into R and assign the result to a variable called
# dat
library(xlsx)
colIndex <- 7:15
rowIndex <- 18:23
dat <- read.xlsx("NGAP.xlsx", sheetIndex=1, header=TRUE, colIndex=colIndex, rowIndex=rowIndex)
# What is the value of:
sum(dat$Zip*dat$Ext,na.rm=T)
# Question 4
# Read the XML data on Baltimore restaurants from here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml
library("XML")
fileUrl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
doc <- xmlTreeParse(fileUrl, useInternal=TRUE) # Loads doc into R memory
rootNode <- xmlRoot(doc)
xmlName(rootNode) # rootNode - wrapper element for entire document
names(rootNode)
# How many restaurants have zipcode 21231?
xmlSApply(rootNode, xmlValue)
xpathSApply(rootNode, "//zipcode", xmlValue) # returns all elements with "zipcode" tag
isZip <- xpathSApply(rootNode, "//zipcode", xmlValue) == 21231
table(isZip)
# Answer is 127
# Question 5
# The American Community Survey distributes downloadable data about United States
# communities. Download the 2006 microdata survey about housing for the state of
# Idaho using download.file() from here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
download.file(fileURL, destfile="./ACS_housing.csv")
list.files("./")
# using the fread() command load the data into an R object
DT <- fread("ACS_housing.csv")
summary(DT)
# proc.time for calculating elapsed time for multiple statements
ptm <- proc.time()
rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]
proc.time() - ptm
ptm <- proc.time()
mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15)
proc.time() - ptm
# system.time is best for single statements
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
system.time(DT[,mean(pwgtp15),by=SEX])
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(mean(DT$pwgtp15,by=DT$SEX))
# My answer for Number 5 was wrong. Need to figure out what went wrong...
| /Week1.R | no_license | bwbelljr/Getting_Cleaning_Data | R | false | false | 10,466 | r | # Downloading Files
# Do this in R, so it can be included in Data Preparation Script
getwd() # current working directory
# setwd() sets the current directory to something of your choice
# Be Aware of Relative versus Absolute Paths
# In Windows, use backslashes instead of forward slashes
# Checking for and creating directories
# file.exists("directoryName") will check to see if the directory exists
file.exists("C:/MOOCs/Coursera/Getting_cleaning_Data/Week1") # returns TRUE
# dir.create("directoryName") will create a directory if it doesn't exist
dir.create("C:/MOOCs/Coursera/Getting_cleaning_Data/Week1/test_dir") # create test_dir
# Here is an example checking for a "data" directory and creating it if it doesn't exist
# if (!file.exists("data")){
# dir.create("data")
# }
#
# Getting data from the internet - download.file()
# Downloads a file from the internet; helps with reproducibiity
# Important parameters are url (location on web), destfile (destination file), method
# Useful for csv, tab delimited, etc. Agnostic to file type
fileURL <- "http://data.baltimorecity.gov/api/views/k78j-azhn/rows.csv?accessType=DOWNLOAD"
download.file(fileURL, destfile="./towing.csv")
list.files("./")
# Note above, this is slightly different than code from slides
# My code above is inspired by: http://stackoverflow.com/questions/17300582/download-file-in-r-has-non-zero-exit-status
# I removed method="curl". In Windows, it by default works... On MAC, set to curl.
# Also, the original URL was https... But I changed it to http
# Take track of time - sometimes datasets change depending on the time...
dateDownloaded <- date()
dateDownloaded
# Reading Local Files (4:55)
# This is also covered in R Programming Course
# read.table() is main function for reading data into R
# Flexible and robust, but requires more parameters
# Don't use if working with big data
# Important parameters - file, header, sep, row.names, nrows
# Related: read.csv(), read.csv2()
fileUrl <- "http://data.baltimorecity.gov/api/views/dz54-2aru/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl, destfile="./cameras.csv")
dateDownloaded <- date()
cameraData <- read.table("./cameras.csv")
head(cameraData)
cameraData <- read.table("./cameras.csv", sep=",", header=TRUE)
head(cameraData)
cameraData <- read.csv("./cameras.csv") # best if *.csv file
head(cameraData)
# Some more important parameters
# quote - tell R whether there are any quoted values
# na.strings - set characters that represents missing value
# nrows - how many rows to read of the file
# skip - number of lines to skip before starting to read
# Reading Excel Files
# Now, let's download the Excel version of this spreadsheet
fileUrl <- "http://data.baltimorecity.gov/api/views/dz54-2aru/rows.xlsx?accessType=DOWNLOAD"
download.file(fileUrl, destfile="./cameras.xlsx", mode='wb')
# Note: for xlsx package, must include mode='wb'
# Explanation is here: http://stackoverflow.com/questions/28325744/r-xlsx-package-error
dateDownloaded <- date()
install.packages("xlsx")
library("xlsx")
cameraData <- read.xlsx("./cameras.xlsx", sheetIndex=1, header=TRUE)
head(cameraData)
# Reading specific rows and columns
colIndex <- 2:3
rowIndex <- 1:4
cameraDataSubset <- read.xlsx("./cameras.xlsx", sheetIndex=1, colIndex=colIndex, rowIndex=rowIndex)
cameraDataSubset
# Reading XML
# XML - Extensible Markup Language
# XML is the bsis for most web scraping
# Components - Markup (labels that give the text structure) and Content (actual text)
# Tags correspond to general labels: start tags <section> and end tags </section>
# Empty tags <line-break />
# Elements are specific examples of tags: <Greeting> Hello, world </Greeting>
# Attributes are components of the label:
# <img src="jeff.jpg" alt="instructor"/>
# Read the (XML) file into R
install.packages("XML")
library("XML")
fileUrl <- "http://www.w3schools.com/xml/simple.xml"
doc <- xmlTreeParse(fileUrl, useInternal=TRUE) # Loads doc into R memory
rootNode <- xmlRoot(doc)
xmlName(rootNode) # rootNode - wrapper element for entire document
names(rootNode)
# Directly access parts of the XML document
rootNode[[1]] # returns first food element
rootNode[[1]][[1]]
xmlSApply(rootNode, xmlValue) # loops through rootNode and gets xmlValue
# XPath
# /node top level node of each element
# //node node at any level
# node[@attr-name] Node with an attribute name
# node[@attr-name='bob'] Node with attribute name attr-name='bob'
xpathSApply(rootNode, "//name", xmlValue) # returns all elements with "name" tag
xpathSApply(rootNode, "//price", xmlValue) #
# Extract Content by Attributes
fileUrl <- "http://espn.go.com/nfl/team/_/name/bal/baltimore-ravens"
doc <- htmlTreeParse(fileUrl, useInternal=TRUE)
scores <- xpathSApply(doc, "//li[@class='score']", xmlValue)
teams <- xpathSApply(doc, "//li[@class='team-name']", xmlValue)
scores # returns nothing... I believe the XML tags have changed...
teams
# They provide XML tutorials from website...
# Reading JSON
# Javascript Object Notation - common data format for API's
# I think BASIS data is JSON.
# Reading JSON data jsonlite package
install.packages("jsonlite")
install.packages('curl')
library(jsonlite)
jsonData <- fromJSON("https://api.github.com/users/jtleek/repos")
names(jsonData)
names(jsonData$owner)
jsonData$owner$login
# Writing data frames to JSON
myjson <- toJSON(iris, pretty=TRUE)
cat(myjson)
# Convert back to JSON
iris2 <- fromJSON(myjson)
head(iris2)
# This might be the best way to work with BASIS JSON data...
# there are online tutorials for working with this...
# The data.table Package
# All functions that accept data.frame can work on data.table
# Written in C so it is much faster
# Create data tables just like data frames
library(data.table)
DF=data.frame(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DF, 3)
DT = data.table(x=rnorm(9), y=rep(c("a","b","c"), each=3), z=rnorm(9))
head(DT,3)
# See all the data tables in memory
tables()
# Subsetting Rows
DT[2,]
DT[DT$y=="a",]
DT[c(2,3)]
# Subsetting Columns - not the same as a data frame... - don't yet understand
# Calculating Values for Variables with Expressions
DT[, list(mean(x), sum(z))]
# Adding new columns
DT[, w:=z^2]
DT
DT2 <- DT
DT[, y:=2]
head(DT, n=3)
head(DT2, n=3)
# Need to explicitly make a copy of data table instead of just assigning it.
# Multiple Operations
DT[,m:= {tmp <- (x+z); log2(tmp+5)}]
# plyr like operations
DT[,a:=x>0]
DT
# plyr is extremely useful for my migraine dataset...
DT[,b:= mean(x+w), by=a]
DT
# Special Variables
# .N is an integer, length 1, containing the number of times a group appears
set.seed(123);
DT <- data.table(x=sample(letters[1:3], 1E5, TRUE))
DT[, .N, by=x]
# Keys
DT <- data.table(x=rep(c("a","b","c"), each=100), y=rnorm(300))
setkey(DT, x)
DT['a']
#Joins
DT1 <- data.table(x=c('a', 'a', 'b', 'dt1'), y=1:4)
DT2 <- data.table(x=c('a', 'b', 'dt2'), z=5:7)
setkey(DT1, x); setkey(DT2, x)
merge(DT1, DT2)
# data.table is a lot quicker in reading...
# Quiz 1
# Question 1
# Download the 2006 microdata survey about housing for the state of Idaho
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv"
download.file(fileURL, destfile="./ACS.csv")
list.files("./")
# Load data into Dataframe
acsData <- read.csv("ACS.csv")
# Look at names and data
names(acsData)
# How many properties are worth $1,000,000 or more?
# Which variable represents the property value?
# I think it is VAL
summary(acsData$VAL)
table(acsData$VAL)
# Since 1,000,000 or more properties are with code = 24, the answer is 53...
# Question 2
# Use the data you loaded from Question 1. Consider the variable FES in the code
# book. Which of the "tidy data" principles does this variable violate?
# Question 3
# Download the Excel spreadsheet on Natural Gas Aquisition Program here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FDATA.gov_NGAP.xlsx"
download.file(fileUrl, destfile="./NGAP.xlsx", mode='wb')
# Read rows 18-23 and columns 7-15 into R and assign the result to a variable called
# dat
library(xlsx)
colIndex <- 7:15
rowIndex <- 18:23
dat <- read.xlsx("NGAP.xlsx", sheetIndex=1, header=TRUE, colIndex=colIndex, rowIndex=rowIndex)
# What is the value of:
sum(dat$Zip*dat$Ext,na.rm=T)
# Question 4
# Read the XML data on Baltimore restaurants from here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml
library("XML")
fileUrl <- "http://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Frestaurants.xml"
doc <- xmlTreeParse(fileUrl, useInternal=TRUE) # Loads doc into R memory
rootNode <- xmlRoot(doc)
xmlName(rootNode) # rootNode - wrapper element for entire document
names(rootNode)
# How many restaurants have zipcode 21231?
xmlSApply(rootNode, xmlValue)
xpathSApply(rootNode, "//zipcode", xmlValue) # returns all elements with "zipcode" tag
isZip <- xpathSApply(rootNode, "//zipcode", xmlValue) == 21231
table(isZip)
# Answer is 127
# Question 5
# The American Community Survey distributes downloadable data about United States
# communities. Download the 2006 microdata survey about housing for the state of
# Idaho using download.file() from here:
# https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv
fileURL <- "https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06pid.csv"
download.file(fileURL, destfile="./ACS_housing.csv")
list.files("./")
# using the fread() command load the data into an R object
DT <- fread("ACS_housing.csv")
summary(DT)
# proc.time for calculating elapsed time for multiple statements
ptm <- proc.time()
rowMeans(DT)[DT$SEX==1]; rowMeans(DT)[DT$SEX==2]
proc.time() - ptm
ptm <- proc.time()
mean(DT[DT$SEX==1,]$pwgtp15); mean(DT[DT$SEX==2,]$pwgtp15)
proc.time() - ptm
# system.time is best for single statements
system.time(sapply(split(DT$pwgtp15,DT$SEX),mean))
system.time(DT[,mean(pwgtp15),by=SEX])
system.time(tapply(DT$pwgtp15,DT$SEX,mean))
system.time(mean(DT$pwgtp15,by=DT$SEX))
# My answer for Number 5 was wrong. Need to figure out what went wrong...
|
library(reshape)
attitudeData<-read.delim("Attitude.dat", header = TRUE)
longAttitude <-melt(attitudeData, id = "participant",
measured = c( "beerpos", "beerneg", "beerneut",
"winepos", "wineneg", "wineneut",
"waterpos", "waterneg", "waterneut"))
names(longAttitude)<-c("participant", "groups", "attitude")
longAttitude$drink<-gl(3, 60, labels = c("Beer", "Wine", "Water"))
longAttitude$imagery<-gl(3, 20, 180, labels = c("Positive", "Negative", "Neutral"))
baseline<-lme(attitude ~ 1, random = ~1|participant/drink/imagery, data = longAttitude, method = "ML")
drinkModel<-update(baseline, .~. + drink)
imageryModel<-update(drinkModel, .~. + imagery)
attitudeModel<-update(imageryModel, .~. + drink:imagery)
anova(baseline, drinkModel, imageryModel, attitudeModel)
summary(attitudeModel)
| /Misc/Attitude.R | no_license | alexkayal/R-stuff | R | false | false | 880 | r | library(reshape)
attitudeData<-read.delim("Attitude.dat", header = TRUE)
longAttitude <-melt(attitudeData, id = "participant",
measured = c( "beerpos", "beerneg", "beerneut",
"winepos", "wineneg", "wineneut",
"waterpos", "waterneg", "waterneut"))
names(longAttitude)<-c("participant", "groups", "attitude")
longAttitude$drink<-gl(3, 60, labels = c("Beer", "Wine", "Water"))
longAttitude$imagery<-gl(3, 20, 180, labels = c("Positive", "Negative", "Neutral"))
baseline<-lme(attitude ~ 1, random = ~1|participant/drink/imagery, data = longAttitude, method = "ML")
drinkModel<-update(baseline, .~. + drink)
imageryModel<-update(drinkModel, .~. + imagery)
attitudeModel<-update(imageryModel, .~. + drink:imagery)
anova(baseline, drinkModel, imageryModel, attitudeModel)
summary(attitudeModel)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kuenm_feval.R
\name{kuenm_feval}
\alias{kuenm_feval}
\title{Evaluation of final Maxent models with independent data}
\usage{
kuenm_feval(path, occ.joint, occ.ind, replicates, out.eval, threshold = 5,
rand.percent = 50, iterations = 500, parallel.proc = FALSE)
}
\arguments{
\item{path}{(character) directory in which folders containig final models are being created or
were created.}
\item{occ.joint}{(character) the csv file with training and testing occurrences combined,
or the file containing occurrences used to create final models; columns must be: species,
longitude, latitude.}
\item{occ.ind}{(character) the name of the csv file with independent occurrences for model
evaluation; these occurrences were not used when creating final models; columns as in occ.joint.}
\item{replicates}{(logical) whether or not final models were created performing replicates.}
\item{out.eval}{(character) name of the folder where evaluation results will be written.}
\item{threshold}{(numeric) the percentage of omission error allowed (E), default = 5.}
\item{rand.percent}{(numeric) the percentage of data to be used for the bootstrapping process
when calculating partial ROCs; default = 50.}
\item{iterations}{(numeric) the number of times that the bootstrap is going to be repeated;
default = 500.}
\item{parallel.proc}{(logical) if TRUE, pROC calculations will be performed in parallel using the available
cores of the computer. This will demand more RAM and almost full use of the CPU; hence, its use
is more recommended in high-performance computers. Using this option will speed up the analyses
only if models are large RasterLayers or if \code{iterations} are more than 5000. Default = FALSE.}
}
\value{
A list with two dataframes containing results from the evaluation process, and
a folder, in the working directory, containing a csv file with the final models evaluation
results.
}
\description{
kuenm_feval evaluates final Maxent models in terms of statistical
significance (partial ROC) and omission rates with a user-defined threshold (E).
}
\details{
This function is used after or during the creation of final models.
}
| /man/kuenm_feval.Rd | no_license | mromerosanchez/kuenm | R | false | true | 2,226 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kuenm_feval.R
\name{kuenm_feval}
\alias{kuenm_feval}
\title{Evaluation of final Maxent models with independent data}
\usage{
kuenm_feval(path, occ.joint, occ.ind, replicates, out.eval, threshold = 5,
rand.percent = 50, iterations = 500, parallel.proc = FALSE)
}
\arguments{
\item{path}{(character) directory in which folders containig final models are being created or
were created.}
\item{occ.joint}{(character) the csv file with training and testing occurrences combined,
or the file containing occurrences used to create final models; columns must be: species,
longitude, latitude.}
\item{occ.ind}{(character) the name of the csv file with independent occurrences for model
evaluation; these occurrences were not used when creating final models; columns as in occ.joint.}
\item{replicates}{(logical) whether or not final models were created performing replicates.}
\item{out.eval}{(character) name of the folder where evaluation results will be written.}
\item{threshold}{(numeric) the percentage of omission error allowed (E), default = 5.}
\item{rand.percent}{(numeric) the percentage of data to be used for the bootstrapping process
when calculating partial ROCs; default = 50.}
\item{iterations}{(numeric) the number of times that the bootstrap is going to be repeated;
default = 500.}
\item{parallel.proc}{(logical) if TRUE, pROC calculations will be performed in parallel using the available
cores of the computer. This will demand more RAM and almost full use of the CPU; hence, its use
is more recommended in high-performance computers. Using this option will speed up the analyses
only if models are large RasterLayers or if \code{iterations} are more than 5000. Default = FALSE.}
}
\value{
A list with two dataframes containing results from the evaluation process, and
a folder, in the working directory, containing a csv file with the final models evaluation
results.
}
\description{
kuenm_feval evaluates final Maxent models in terms of statistical
significance (partial ROC) and omission rates with a user-defined threshold (E).
}
\details{
This function is used after or during the creation of final models.
}
|
library(dplyr)
library(ggplot2)
library(ggmap)
library(tidyr)
data <- read.csv("/home/aaron/policedata/Seattle_Police_Department_911_Incident_Response.csv")
bikedata <- data %>% filter(data$Event.Clearance.Group == "BIKE")
map_seattle <- get_map(location = c(lon = mean(bikedata$Longitude), lat = mean(bikedata$Latitude)),
zoom = 11,
maptype="terrain",
scale = 2)
# head(bikedata, n = 1)
# bikedata2 <- bikedata %>% separate(Event.Clearance.Date, c("DATE","TIME"), " ") %>%
# separate(DATE,c("MM","DD","YYYY"),"/") %>% filter(YYYY >= 2015) %>%
# droplevels()
write.csv(bikedata2, "/home/aaron/policedata/biketheft.csv", row.names = FALSE)
bikedata <- read.csv("/home/aaron/policedata/biketheft.csv")
map_seattle <- get_map(location = c(lon = mean(bikedata$Longitude), lat = mean(bikedata$Latitude)),
zoom = 13,
maptype="terrain",
scale = 2)
ggmap(map_seattle) +
geom_point(data=bikedata[which(bikedata2$YYYY == 2015 &
bikedata2$MM == "07"),],
aes(x = Longitude, y = Latitude),
fill = "red",
alpha = 0.5,
size = 4,
shape = 21) +
scale_fill_brewer(palette = "Paired")
ggmap(map_seattle) +
geom_point(data=bikedata[which(bikedata2$YYYY == 2015),],
aes(x = Longitude, y = Latitude, fill = MM),
alpha = 0.5,
size = 4,
shape = 21)
zoning <- read.csv("/home/aaron/policedata/census10.csv")
ggplot(zoning, aes(x = INTPTLON10, y = INTPTLAT10, fill = as.factor(TRACT))) +
geom_polygon() +
coord_map()
library(maptools)
kingcounty.shp <- maptools::readShapeSpatial("/home/aaron/policedata/WGS84/City_of_Seattle_Zoning.shp")
library(broom)
kingcounty <- broom::tidy(kingcounty.shp)
kingcounty.data <- kingcounty.shp@data
kingcounty.data$id <- "0"
for (i in c(1:nrow(kingcounty.data))) {
kingcounty.data$id[i] <- kingcounty.shp@polygons[[i]]@ID
}
kingcounty.all <- left_join(kingcounty,kingcounty.data, by = c("id" = "id"))
kingcounty.all$ZONEING <- NA
kingcounty.all$ZONEING[grep(pattern = "downtown", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Downtown"
kingcounty.all$ZONEING[grep(pattern = "commercial", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Commercial"
kingcounty.all$ZONEING[grep(pattern = "industrial", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Industrial"
kingcounty.all$ZONEING[grep(pattern = "rise", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Rise"
kingcounty.all$ZONEING[grep(pattern = "neighborhood", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Neighborhood"
kingcounty.all$ZONEING[grep(pattern = "residential", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Residential"
kingcounty.all$ZONEING[grep(pattern = "mixed", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Mixed"
library(gridExtra)
a <- ggplot() +
geom_polygon(data = kingcounty.all, aes(x = long, y = lat, group = group, fill = ZONELUT_DE), color = "black") +
coord_map()
b <- ggplot() +
geom_polygon(data = kingcounty.all, aes(x = long, y = lat, group = group, fill = ZONEING), color = "black") +
coord_map() +
scale_fill_discrete(na.value = NA)
grid.arrange(a,b,ncol=2)
ggplot() +
geom_polygon(data = kingcounty, aes(x = long, y = lat, group = group, fill = as.factor(id)), color = "black") +
coord_map() +
theme(legend.position = "none")
ggplot() +
geom_polygon(data = kingcounty, aes(x = long, y = lat, group = group), fill = NA, color = "black") +
geom_point(data=bikedata[which(bikedata$YYYY == 2015),],
aes(x = Longitude, y = Latitude, fill = as.factor(MM)),
alpha = 0.5,
size = 1,
shape = 21) +
coord_map() + theme(legend.position = "none")
library(mgcv)
## ?in.out
kingcounty.bnd <- do.call(rbind, lapply(split(kingcounty.all, kingcounty.all$id),
function(x) rbind(x,
within(x[nrow(x),], {lat <- NA; long <- NA}))))[,c("lat","long","id","ZONEING")]
in.out(as.matrix(kingcounty.bnd[,c("lat","long")]), as.matrix(bikedata[,c("Latitude","Longitude")]))
coordinates(bikedata) <- ~ Longitude + Latitude
proj4string(bikedata) <- proj4string(bikedata)
coordinates(kingcounty.all) <- ~ long + lat
proj4string(kingcounty.all) <- proj4string(kingcounty.all)
over.table <- over(kingcounty.all,SpatialPoints(bikedata))
bikedata <- cbind(bikedata, over(kingcounty.all,bikedata))
| /policedatachallenge.R | no_license | ClassicSours/policedatachallenge | R | false | false | 4,650 | r | library(dplyr)
library(ggplot2)
library(ggmap)
library(tidyr)
data <- read.csv("/home/aaron/policedata/Seattle_Police_Department_911_Incident_Response.csv")
bikedata <- data %>% filter(data$Event.Clearance.Group == "BIKE")
map_seattle <- get_map(location = c(lon = mean(bikedata$Longitude), lat = mean(bikedata$Latitude)),
zoom = 11,
maptype="terrain",
scale = 2)
# head(bikedata, n = 1)
# bikedata2 <- bikedata %>% separate(Event.Clearance.Date, c("DATE","TIME"), " ") %>%
# separate(DATE,c("MM","DD","YYYY"),"/") %>% filter(YYYY >= 2015) %>%
# droplevels()
write.csv(bikedata2, "/home/aaron/policedata/biketheft.csv", row.names = FALSE)
bikedata <- read.csv("/home/aaron/policedata/biketheft.csv")
map_seattle <- get_map(location = c(lon = mean(bikedata$Longitude), lat = mean(bikedata$Latitude)),
zoom = 13,
maptype="terrain",
scale = 2)
ggmap(map_seattle) +
geom_point(data=bikedata[which(bikedata2$YYYY == 2015 &
bikedata2$MM == "07"),],
aes(x = Longitude, y = Latitude),
fill = "red",
alpha = 0.5,
size = 4,
shape = 21) +
scale_fill_brewer(palette = "Paired")
ggmap(map_seattle) +
geom_point(data=bikedata[which(bikedata2$YYYY == 2015),],
aes(x = Longitude, y = Latitude, fill = MM),
alpha = 0.5,
size = 4,
shape = 21)
zoning <- read.csv("/home/aaron/policedata/census10.csv")
ggplot(zoning, aes(x = INTPTLON10, y = INTPTLAT10, fill = as.factor(TRACT))) +
geom_polygon() +
coord_map()
library(maptools)
kingcounty.shp <- maptools::readShapeSpatial("/home/aaron/policedata/WGS84/City_of_Seattle_Zoning.shp")
library(broom)
kingcounty <- broom::tidy(kingcounty.shp)
kingcounty.data <- kingcounty.shp@data
kingcounty.data$id <- "0"
for (i in c(1:nrow(kingcounty.data))) {
kingcounty.data$id[i] <- kingcounty.shp@polygons[[i]]@ID
}
kingcounty.all <- left_join(kingcounty,kingcounty.data, by = c("id" = "id"))
kingcounty.all$ZONEING <- NA
kingcounty.all$ZONEING[grep(pattern = "downtown", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Downtown"
kingcounty.all$ZONEING[grep(pattern = "commercial", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Commercial"
kingcounty.all$ZONEING[grep(pattern = "industrial", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Industrial"
kingcounty.all$ZONEING[grep(pattern = "rise", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Rise"
kingcounty.all$ZONEING[grep(pattern = "neighborhood", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Neighborhood"
kingcounty.all$ZONEING[grep(pattern = "residential", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Residential"
kingcounty.all$ZONEING[grep(pattern = "mixed", ignore.case = TRUE, x = kingcounty.all$ZONELUT_DE )] <- "Mixed"
library(gridExtra)
a <- ggplot() +
geom_polygon(data = kingcounty.all, aes(x = long, y = lat, group = group, fill = ZONELUT_DE), color = "black") +
coord_map()
b <- ggplot() +
geom_polygon(data = kingcounty.all, aes(x = long, y = lat, group = group, fill = ZONEING), color = "black") +
coord_map() +
scale_fill_discrete(na.value = NA)
grid.arrange(a,b,ncol=2)
ggplot() +
geom_polygon(data = kingcounty, aes(x = long, y = lat, group = group, fill = as.factor(id)), color = "black") +
coord_map() +
theme(legend.position = "none")
ggplot() +
geom_polygon(data = kingcounty, aes(x = long, y = lat, group = group), fill = NA, color = "black") +
geom_point(data=bikedata[which(bikedata$YYYY == 2015),],
aes(x = Longitude, y = Latitude, fill = as.factor(MM)),
alpha = 0.5,
size = 1,
shape = 21) +
coord_map() + theme(legend.position = "none")
library(mgcv)
## ?in.out
kingcounty.bnd <- do.call(rbind, lapply(split(kingcounty.all, kingcounty.all$id),
function(x) rbind(x,
within(x[nrow(x),], {lat <- NA; long <- NA}))))[,c("lat","long","id","ZONEING")]
in.out(as.matrix(kingcounty.bnd[,c("lat","long")]), as.matrix(bikedata[,c("Latitude","Longitude")]))
coordinates(bikedata) <- ~ Longitude + Latitude
proj4string(bikedata) <- proj4string(bikedata)
coordinates(kingcounty.all) <- ~ long + lat
proj4string(kingcounty.all) <- proj4string(kingcounty.all)
over.table <- over(kingcounty.all,SpatialPoints(bikedata))
bikedata <- cbind(bikedata, over(kingcounty.all,bikedata))
|
library(tidyverse)
library(lubridate)
library(ggthemes)
library(cowplot)
library(scales)
# Read and clean data -----------------------------------------------------
sleep <- read.csv("./data/sleepdata.csv", sep = ",") %>%
as_tibble() %>%
rename(start = Start,
end = End,
quality = Sleep.Quality,
duration = Time.asleep..seconds.) %>%
mutate(date = as_date(ymd_hms(end)) - 1,
day = wday(date, label = TRUE),
weekend = ifelse(grepl("Sat|Sun", day),"Weekend","Weekday") %>%
as.factor(),
bedtime = as_datetime(hour(start) + minute(start) / 60),
rise = as_datetime(hour(end) + minute(end) / 60),
quality = as.numeric(str_replace(quality, "\\%", "")),
duration = dseconds(duration) / 3600) %>%
select(date,
day,
weekend,
bedtime,
rise,
duration,
quality) %>%
filter(duration != 0)
# Last 7 days -------------------------------------------------------------
# Duration
duration_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(breaks = breaks_extended(4)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Quality
quality_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Quality (%)",
title = "Sleep Quality",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(limits = c(0, 100),
breaks = seq(0, 100, 25)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Bedtime
bedtime_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = bedtime - 24 * (bedtime > 12))) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Bedtime",
title = "Bedtime",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = breaks_extended(3)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_7, quality_7, bedtime_7,
nrow = 3,
align = "v"
)
# Last 30 days ------------------------------------------------------------
# Duration
duration_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = seq(0, 14, 2)) +
theme_solarized_2(light = FALSE)
# Quality
quality_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Quality (%)",
title = "Sleep Quality",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous() +
theme_solarized_2(light = FALSE)
# Bedtime
bedtime_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = bedtime - 24 * (bedtime > 12))) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Bedtime",
title = "Bedtime",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = breaks_extended(3)) +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_30, quality_30, bedtime_30,
nrow = 3,
align = "v"
)
# NOTES: Need to fix bedtime plot.
# Long term sleep duration data ------------------------------------------
# Sleep duration - all data
sleep %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "Date",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "4 months", date_labels = "%b %y") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2020
duration_2020 <- sleep %>%
filter(date >= as.Date("2020-01-01") & date <= as.Date("2021-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2020",
y = "Duration (hours)",
title = "Sleep Duration 2020",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2019
duration_2019 <- sleep %>%
filter(date >= as.Date("2019-01-01") & date <= as.Date("2020-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2019",
y = "Duration (hours)",
title = "Sleep Duration 2019",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2018
duration_2018 <- sleep %>%
filter(date >= as.Date("2018-01-01") & date <= as.Date("2019-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2018",
y = "Duration (hours)",
title = "Sleep Duration 2018",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_2020, duration_2019,
nrow = 2,
align = "v"
)
plot_grid(duration_2020, duration_2019, duration_2018,
nrow = 3,
align = "v"
)
duration_2020
# Long term sleep quality data --------------------------------------------
quality_2020 <- sleep %>%
filter(date >= as.Date("2020-01-01") & date <= as.Date("2021-01-01")) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2020",
y = "Duration (hours)",
title = "Sleep Quality 2020",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0, 0),
limits = c(0, 105),
breaks = seq(0, 100, 25)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_2020, quality_2020,
nrow = 2,
align = "v"
)
| /sleep-cycle.R | no_license | robynfsj/sleep | R | false | false | 9,632 | r |
library(tidyverse)
library(lubridate)
library(ggthemes)
library(cowplot)
library(scales)
# Read and clean data -----------------------------------------------------
sleep <- read.csv("./data/sleepdata.csv", sep = ",") %>%
as_tibble() %>%
rename(start = Start,
end = End,
quality = Sleep.Quality,
duration = Time.asleep..seconds.) %>%
mutate(date = as_date(ymd_hms(end)) - 1,
day = wday(date, label = TRUE),
weekend = ifelse(grepl("Sat|Sun", day),"Weekend","Weekday") %>%
as.factor(),
bedtime = as_datetime(hour(start) + minute(start) / 60),
rise = as_datetime(hour(end) + minute(end) / 60),
quality = as.numeric(str_replace(quality, "\\%", "")),
duration = dseconds(duration) / 3600) %>%
select(date,
day,
weekend,
bedtime,
rise,
duration,
quality) %>%
filter(duration != 0)
# Last 7 days -------------------------------------------------------------
# Duration
duration_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(breaks = breaks_extended(4)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Quality
quality_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Quality (%)",
title = "Sleep Quality",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(limits = c(0, 100),
breaks = seq(0, 100, 25)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Bedtime
bedtime_7 <- sleep %>%
filter(date >= today() - days(7)) %>%
ggplot(aes(x = date, y = bedtime - 24 * (bedtime > 12))) +
geom_line(size = 1, colour = "cadetblue2") +
geom_point(shape = 16, size = 3, colour = "cadetblue2") +
labs(x = "",
y = "Bedtime",
title = "Bedtime",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = breaks_extended(3)) +
scale_x_date(date_breaks = "1 day", date_labels = "%b %d") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_7, quality_7, bedtime_7,
nrow = 3,
align = "v"
)
# Last 30 days ------------------------------------------------------------
# Duration
duration_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = seq(0, 14, 2)) +
theme_solarized_2(light = FALSE)
# Quality
quality_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Quality (%)",
title = "Sleep Quality",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous() +
theme_solarized_2(light = FALSE)
# Bedtime
bedtime_30 <- sleep %>%
filter(date >= today() - days(30)) %>%
ggplot(aes(x = date, y = bedtime - 24 * (bedtime > 12))) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "",
y = "Bedtime",
title = "Bedtime",
caption = "Data recorded with Sleep Cycle") +
scale_y_continuous(breaks = breaks_extended(3)) +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_30, quality_30, bedtime_30,
nrow = 3,
align = "v"
)
# NOTES: Need to fix bedtime plot.
# Long term sleep duration data ------------------------------------------
# Sleep duration - all data
sleep %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "Date",
y = "Duration (hours)",
title = "Sleep Duration",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "4 months", date_labels = "%b %y") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2020
duration_2020 <- sleep %>%
filter(date >= as.Date("2020-01-01") & date <= as.Date("2021-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2020",
y = "Duration (hours)",
title = "Sleep Duration 2020",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2019
duration_2019 <- sleep %>%
filter(date >= as.Date("2019-01-01") & date <= as.Date("2020-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2019",
y = "Duration (hours)",
title = "Sleep Duration 2019",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Sleep duration - 2018
duration_2018 <- sleep %>%
filter(date >= as.Date("2018-01-01") & date <= as.Date("2019-01-01")) %>%
ggplot(aes(x = date, y = duration)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2018",
y = "Duration (hours)",
title = "Sleep Duration 2018",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0,0),
limits = c(0, 14),
breaks = seq(0, 14, 2)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_2020, duration_2019,
nrow = 2,
align = "v"
)
plot_grid(duration_2020, duration_2019, duration_2018,
nrow = 3,
align = "v"
)
duration_2020
# Long term sleep quality data --------------------------------------------
quality_2020 <- sleep %>%
filter(date >= as.Date("2020-01-01") & date <= as.Date("2021-01-01")) %>%
ggplot(aes(x = date, y = quality)) +
geom_line(size = 0.2, colour = "lightsteelblue1") +
stat_smooth(geom = "area",
span = 0.4,
alpha = 0.2,
fill = "lightsteelblue3") +
geom_smooth(span = 0.4,
size = 1,
colour = "cadetblue2",
se = FALSE) +
labs(x = "2020",
y = "Duration (hours)",
title = "Sleep Quality 2020",
caption = "Data recorded with Sleep Cycle",
colour = "Sleep Quality\n") +
scale_y_continuous(expand = c(0, 0),
limits = c(0, 105),
breaks = seq(0, 100, 25)) +
scale_x_date(expand = c(0,0),
date_breaks = "1 month", date_labels = "%b") +
theme_solarized_2(light = FALSE)
# Plot together
plot_grid(duration_2020, quality_2020,
nrow = 2,
align = "v"
)
|
assignment <-
assignments <-
GetAssignment <-
GetAssignments <-
function (assignment = NULL, hit = NULL, hit.type = NULL, status = NULL,
return.all = FALSE, pagenumber = "1", pagesize = "10", sortproperty = "SubmitTime",
sortdirection = "Ascending", response.group = NULL, keypair = credentials(),
print = getOption('MTurkR.print'), browser = getOption('MTurkR.browser'),
log.requests = getOption('MTurkR.log'), sandbox = getOption('MTurkR.sandbox'),
return.assignment.dataframe = TRUE, validation.test = getOption('MTurkR.test')) {
if(!is.null(keypair)) {
keyid <- keypair[1]
secret <- keypair[2]
}
else
stop("No keypair provided or 'credentials' object not stored")
if(!sortproperty %in% c("AcceptTime", "SubmitTime", "AssignmentStatus"))
stop("'sortproperty' must be 'AcceptTime' | 'SubmitTime' | 'AssignmentStatus'")
if(!sortdirection %in% c("Ascending", "Descending"))
stop("'sortdirection' must be 'Ascending' | 'Descending'")
if(as.numeric(pagesize) < 1 || as.numeric(pagesize) > 100)
stop("'pagesize' must be in range (1,100)")
if(as.numeric(pagenumber) < 1)
stop("'pagenumber' must be > 1")
GETresponsegroup <- ""
if(!is.null(response.group)) {
if(!is.null(assignment)) {
if(!response.group %in% c("Request", "Minimal",
"AssignmentFeedback", "HITDetail", "HITQuestion"))
stop("ResponseGroup must be in c(Request,Minimal,AssignmentFeedback,HITDetail,HITQuestion)")
}
else {
if(!response.group %in% c("Request", "Minimal", "AssignmentFeedback"))
stop("ResponseGroup must be in c(Request,Minimal,AssignmentFeedback)")
}
if(length(response.group) == 1)
GETresponsegroup <- paste("&ResponseGroup=", response.group, sep = "")
else {
for(i in 1:length(response.group)){
GETresponsegroup <- paste( "&ResponseGroup", i-1,
"=", response.group[i], sep = "")
}
}
}
if (!is.null(assignment)) {
operation <- "GetAssignment"
for(i in 1:length(assignment)) {
GETparameters <- paste("&AssignmentId=", assignment[i], GETresponsegroup, sep = "")
auth <- authenticate(operation, secret)
if(browser == TRUE) {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, browser = browser,
sandbox = sandbox, log.requests = log.requests,
validation.test = validation.test)
if(validation.test)
invisible(request)
}
else {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, log.requests = log.requests,
sandbox = sandbox, validation.test = validation.test)
if(validation.test)
invisible(request)
QualificationRequirements <- list()
if(request$valid == TRUE) {
a <- AssignmentsToDataFrame(xml = request$xml)$assignments
a$Answer <- NULL
if(i == 1)
Assignments <- a
else
Assignments <- merge(Assignments, a, all=TRUE)
if(print == TRUE)
message(i, ": Assignment ", assignment[i], " Retrieved")
}
}
}
invisible(Assignments)#, HITs = HITs,
#QualificationRequirements = QualificationRequirements))
}
else {
operation <- "GetAssignmentsForHIT"
if((is.null(hit) & is.null(hit.type)) | (!is.null(hit) & !is.null(hit.type)))
stop("Must provide 'assignment' xor 'hit' xor 'hit.type'")
else if(!is.null(hit)){
if(is.factor(hit))
hit <- as.character(hit)
hitlist <- hit
}
else if(!is.null(hit.type)) {
if(is.factor(hit.type))
hit.type <- as.character(hit.type)
hitsearch <- SearchHITs(keypair = keypair, print = FALSE,
log.requests = log.requests, sandbox = sandbox,
return.qual.dataframe = FALSE)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$HITTypeId %in% hit.type]
if(length(hitlist) == 0)
stop("No HITs found for HITType")
}
if(return.all == TRUE | length(hitlist)>1) {
sortproperty <- "SubmitTime"
sortdirection <- "Ascending"
pagesize <- "100"
pagenumber <- "1"
}
batch <- function(batchhit, pagenumber) {
GETiteration <- ""
if(!is.null(status)) {
if(all(status %in% c("Approved", "Rejected", "Submitted")))
GETiteration <- paste(GETiteration, "&AssignmentStatus=",
paste(status,collapse=","),
GETresponsegroup, sep = "")
else
status <- NULL
}
GETiteration <- paste("&HITId=", batchhit, "&PageNumber=",
pagenumber, "&PageSize=", pagesize, "&SortProperty=",
sortproperty, "&SortDirection=", sortdirection,
GETiteration, sep = "")
auth <- authenticate(operation, secret)
batch <- request( keyid, auth$operation, auth$signature,
auth$timestamp, GETiteration, log.requests = log.requests,
sandbox = sandbox, validation.test = validation.test)
if(validation.test)
invisible(batch)
batch$total <- as.numeric(strsplit(strsplit(batch$xml,
"<TotalNumResults>")[[1]][2], "</TotalNumResults>")[[1]][1])
batch$batch.total <- length(xpathApply(xmlParse(batch$xml), "//Assignment"))
if(batch$batch.total > 0 & return.assignment.dataframe == TRUE) {
batch$assignments <- AssignmentsToDataFrame(xml = batch$xml)$assignments
batch$assignments$Answer <- NULL
}
else if(batch$batch.total > 0 & return.assignment.dataframe == FALSE)
batch$assignments <- NULL
else
batch$assignments <- NA
return(batch)
}
cumulative <- 0
for(i in 1:length(hitlist)) {
if(i == 1){
request <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(request)
runningtotal <- request$batch.total
}
else{
pagenumber <- 1
nextrequest <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(nextrequest)
request$total <- request$total + nextrequest$total
if (return.assignment.dataframe == TRUE)
request$assignments <- merge(request$assignments,
nextrequest$assignments, all=TRUE)
request$pages.returned <- pagenumber
runningtotal <- nextrequest$batch.total
}
if(return.all == TRUE) {
pagenumber <- 2
while (request$total > runningtotal) {
nextbatch <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(nextbatch)
if(return.assignment.dataframe == TRUE)
request$assignments <- merge(request$assignments,
nextbatch$assignments, all=TRUE)
request$pages.returned <- pagenumber
runningtotal <- runningtotal + nextbatch$batch.total
pagenumber <- pagenumber + 1
}
}
cumulative <- cumulative + runningtotal
request$batch.total <- NULL
if(!is.null(hit.type))
request$assignments["HITTypeId"] <- hit.type
}
if(print == TRUE)
message(cumulative, " of ", request$total, " Assignments Retrieved")
invisible(request$assignments)
}
}
| /R/GetAssignment.R | no_license | imclab/MTurkR | R | false | false | 8,638 | r | assignment <-
assignments <-
GetAssignment <-
GetAssignments <-
function (assignment = NULL, hit = NULL, hit.type = NULL, status = NULL,
return.all = FALSE, pagenumber = "1", pagesize = "10", sortproperty = "SubmitTime",
sortdirection = "Ascending", response.group = NULL, keypair = credentials(),
print = getOption('MTurkR.print'), browser = getOption('MTurkR.browser'),
log.requests = getOption('MTurkR.log'), sandbox = getOption('MTurkR.sandbox'),
return.assignment.dataframe = TRUE, validation.test = getOption('MTurkR.test')) {
if(!is.null(keypair)) {
keyid <- keypair[1]
secret <- keypair[2]
}
else
stop("No keypair provided or 'credentials' object not stored")
if(!sortproperty %in% c("AcceptTime", "SubmitTime", "AssignmentStatus"))
stop("'sortproperty' must be 'AcceptTime' | 'SubmitTime' | 'AssignmentStatus'")
if(!sortdirection %in% c("Ascending", "Descending"))
stop("'sortdirection' must be 'Ascending' | 'Descending'")
if(as.numeric(pagesize) < 1 || as.numeric(pagesize) > 100)
stop("'pagesize' must be in range (1,100)")
if(as.numeric(pagenumber) < 1)
stop("'pagenumber' must be > 1")
GETresponsegroup <- ""
if(!is.null(response.group)) {
if(!is.null(assignment)) {
if(!response.group %in% c("Request", "Minimal",
"AssignmentFeedback", "HITDetail", "HITQuestion"))
stop("ResponseGroup must be in c(Request,Minimal,AssignmentFeedback,HITDetail,HITQuestion)")
}
else {
if(!response.group %in% c("Request", "Minimal", "AssignmentFeedback"))
stop("ResponseGroup must be in c(Request,Minimal,AssignmentFeedback)")
}
if(length(response.group) == 1)
GETresponsegroup <- paste("&ResponseGroup=", response.group, sep = "")
else {
for(i in 1:length(response.group)){
GETresponsegroup <- paste( "&ResponseGroup", i-1,
"=", response.group[i], sep = "")
}
}
}
if (!is.null(assignment)) {
operation <- "GetAssignment"
for(i in 1:length(assignment)) {
GETparameters <- paste("&AssignmentId=", assignment[i], GETresponsegroup, sep = "")
auth <- authenticate(operation, secret)
if(browser == TRUE) {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, browser = browser,
sandbox = sandbox, log.requests = log.requests,
validation.test = validation.test)
if(validation.test)
invisible(request)
}
else {
request <- request(keyid, auth$operation, auth$signature,
auth$timestamp, GETparameters, log.requests = log.requests,
sandbox = sandbox, validation.test = validation.test)
if(validation.test)
invisible(request)
QualificationRequirements <- list()
if(request$valid == TRUE) {
a <- AssignmentsToDataFrame(xml = request$xml)$assignments
a$Answer <- NULL
if(i == 1)
Assignments <- a
else
Assignments <- merge(Assignments, a, all=TRUE)
if(print == TRUE)
message(i, ": Assignment ", assignment[i], " Retrieved")
}
}
}
invisible(Assignments)#, HITs = HITs,
#QualificationRequirements = QualificationRequirements))
}
else {
operation <- "GetAssignmentsForHIT"
if((is.null(hit) & is.null(hit.type)) | (!is.null(hit) & !is.null(hit.type)))
stop("Must provide 'assignment' xor 'hit' xor 'hit.type'")
else if(!is.null(hit)){
if(is.factor(hit))
hit <- as.character(hit)
hitlist <- hit
}
else if(!is.null(hit.type)) {
if(is.factor(hit.type))
hit.type <- as.character(hit.type)
hitsearch <- SearchHITs(keypair = keypair, print = FALSE,
log.requests = log.requests, sandbox = sandbox,
return.qual.dataframe = FALSE)
hitlist <- hitsearch$HITs$HITId[hitsearch$HITs$HITTypeId %in% hit.type]
if(length(hitlist) == 0)
stop("No HITs found for HITType")
}
if(return.all == TRUE | length(hitlist)>1) {
sortproperty <- "SubmitTime"
sortdirection <- "Ascending"
pagesize <- "100"
pagenumber <- "1"
}
batch <- function(batchhit, pagenumber) {
GETiteration <- ""
if(!is.null(status)) {
if(all(status %in% c("Approved", "Rejected", "Submitted")))
GETiteration <- paste(GETiteration, "&AssignmentStatus=",
paste(status,collapse=","),
GETresponsegroup, sep = "")
else
status <- NULL
}
GETiteration <- paste("&HITId=", batchhit, "&PageNumber=",
pagenumber, "&PageSize=", pagesize, "&SortProperty=",
sortproperty, "&SortDirection=", sortdirection,
GETiteration, sep = "")
auth <- authenticate(operation, secret)
batch <- request( keyid, auth$operation, auth$signature,
auth$timestamp, GETiteration, log.requests = log.requests,
sandbox = sandbox, validation.test = validation.test)
if(validation.test)
invisible(batch)
batch$total <- as.numeric(strsplit(strsplit(batch$xml,
"<TotalNumResults>")[[1]][2], "</TotalNumResults>")[[1]][1])
batch$batch.total <- length(xpathApply(xmlParse(batch$xml), "//Assignment"))
if(batch$batch.total > 0 & return.assignment.dataframe == TRUE) {
batch$assignments <- AssignmentsToDataFrame(xml = batch$xml)$assignments
batch$assignments$Answer <- NULL
}
else if(batch$batch.total > 0 & return.assignment.dataframe == FALSE)
batch$assignments <- NULL
else
batch$assignments <- NA
return(batch)
}
cumulative <- 0
for(i in 1:length(hitlist)) {
if(i == 1){
request <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(request)
runningtotal <- request$batch.total
}
else{
pagenumber <- 1
nextrequest <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(nextrequest)
request$total <- request$total + nextrequest$total
if (return.assignment.dataframe == TRUE)
request$assignments <- merge(request$assignments,
nextrequest$assignments, all=TRUE)
request$pages.returned <- pagenumber
runningtotal <- nextrequest$batch.total
}
if(return.all == TRUE) {
pagenumber <- 2
while (request$total > runningtotal) {
nextbatch <- batch(hitlist[i], pagenumber)
if(validation.test)
invisible(nextbatch)
if(return.assignment.dataframe == TRUE)
request$assignments <- merge(request$assignments,
nextbatch$assignments, all=TRUE)
request$pages.returned <- pagenumber
runningtotal <- runningtotal + nextbatch$batch.total
pagenumber <- pagenumber + 1
}
}
cumulative <- cumulative + runningtotal
request$batch.total <- NULL
if(!is.null(hit.type))
request$assignments["HITTypeId"] <- hit.type
}
if(print == TRUE)
message(cumulative, " of ", request$total, " Assignments Retrieved")
invisible(request$assignments)
}
}
|
% Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{UFO}
\alias{UFO}
\alias{UFO-package}
\title{R package: UFO}
\description{
Find the latest reported UFO sightings in different states, provinces and territories across Canda and the USA
}
\details{
The functions in the UFO package help you find the most recent sightings by area and
the types (i.e., shapes) of UFO's reported
}
\seealso{
\code{\link{latest.sightings}}, \code{\link{sightings.by.shape}}
}
| /man/ufo.Rd | no_license | kurtis14/UFO | R | false | false | 482 | rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{UFO}
\alias{UFO}
\alias{UFO-package}
\title{R package: UFO}
\description{
Find the latest reported UFO sightings in different states, provinces and territories across Canda and the USA
}
\details{
The functions in the UFO package help you find the most recent sightings by area and
the types (i.e., shapes) of UFO's reported
}
\seealso{
\code{\link{latest.sightings}}, \code{\link{sightings.by.shape}}
}
|
safe_efficiency <- function(fit, type) {
res <- try(efficiency(fit, type = type, plot = FALSE)[c(type, "eff", "fluo")],
silent = TRUE)
if (class(res) == "try-error") {
res <- rep(NaN, length(c(type, "eff", "fluo")))
} else {
if (length(res[["eff"]]) > 1)
res$eff <- NaN
}
unlist(res)
} | /R/safe_efficiency.R | no_license | gaoce/dpcR | R | false | false | 324 | r | safe_efficiency <- function(fit, type) {
res <- try(efficiency(fit, type = type, plot = FALSE)[c(type, "eff", "fluo")],
silent = TRUE)
if (class(res) == "try-error") {
res <- rep(NaN, length(c(type, "eff", "fluo")))
} else {
if (length(res[["eff"]]) > 1)
res$eff <- NaN
}
unlist(res)
} |
rm(list = ls())
##setwd("/Users/soniasingh/Desktop/Coursera/Data Science Specialization")
##Read all data
data_all <- read.csv("/Users/soniasingh/Desktop/Coursera/Data Science Specialization/household_power_consumption 2.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
newdata <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_all)
## Convert date & time
datetime <- paste(as.Date(newdata$Date), newdata$Time)
newdata$Datetime <- as.POSIXct(datetime)
##Create plot-4
par(mfrow=c(2,2), mar=c(5,5,2,1), oma=c(0,0,0,2)) #used par to query graphical parameters
with(newdata, {
{plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l", # l is for line plot
ylab="Energy sub-metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
}
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",#supress legend box
legend=c("Submetering1", "Submetering2", "Submetering3"))
#extra curly brackets make output neater
{plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
}
})
## Create .png file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() | /plot4.R | no_license | soniaclairesingh/ExData_Plotting1 | R | false | false | 1,653 | r | rm(list = ls())
##setwd("/Users/soniasingh/Desktop/Coursera/Data Science Specialization")
##Read all data
data_all <- read.csv("/Users/soniasingh/Desktop/Coursera/Data Science Specialization/household_power_consumption 2.txt", header=T, sep=';', na.strings="?",
nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data_all$Date <- as.Date(data_all$Date, format="%d/%m/%Y")
## Subset data
newdata <- subset(data_all, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
rm(data_all)
## Convert date & time
datetime <- paste(as.Date(newdata$Date), newdata$Time)
newdata$Datetime <- as.POSIXct(datetime)
##Create plot-4
par(mfrow=c(2,2), mar=c(5,5,2,1), oma=c(0,0,0,2)) #used par to query graphical parameters
with(newdata, {
{plot(Global_active_power~Datetime, type="l",
ylab="Global Active Power", xlab="")
plot(Voltage~Datetime, type="l",
ylab="Voltage", xlab="datetime")
plot(Sub_metering_1~Datetime, type="l", # l is for line plot
ylab="Energy sub-metering", xlab="")
lines(Sub_metering_2~Datetime,col='Red')
lines(Sub_metering_3~Datetime,col='Blue')
}
legend("topright", col=c("black", "red", "blue"), lty=1, lwd=2, bty="n",#supress legend box
legend=c("Submetering1", "Submetering2", "Submetering3"))
#extra curly brackets make output neater
{plot(Global_reactive_power~Datetime, type="l",
ylab="Global_reactive_power",xlab="datetime")
}
})
## Create .png file
dev.copy(png, file="plot4.png", height=480, width=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{REIDS_Analysis}
\alias{REIDS_Analysis}
\title{REIDS_Analysis}
\usage{
REIDS_Analysis(geneIDs, Indices, DataFile, nsim = 5000,
informativeCalls = FALSE, Summarize = TRUE, rho = 0.5,
Exonthreshold = 0.5, significancelevel = 0.05, Groups, paired = FALSE,
Low_AllSamples = c(), Low_GSamples = c(), Juninfo = "User",
JAnnotI = NULL, JAnnot = NULL, EandTrAnnotI = NULL,
EandTrAnnot = NULL, PartiallyAnnotated = FALSE, positionData = NULL,
transcriptData = NULL, Location = "Output", Name = "REIDSAnalysis")
}
\arguments{
\item{geneIDs}{A vector with the geneIDs to analyze.}
\item{Indices}{The .csv file created by Line_Indexer.py which contains indices for every gene.}
\item{DataFile}{The .csv file created by PivotTransformation.}
\item{nsim}{The number of iterations to perform. Defaults to 1000.}
\item{informativeCalls}{Logical. Should the I/NI calls method be perform before applying the REIDS model?}
\item{Summarize}{A character vector specifying the wich summarization method to be performed. The choices are using "EqualAll", "WeightedAll", "EqualConst", "WeightedConst". The former two use all probe sets while the latter to use only the consituitive probe sets. Summarization on the consistuitive probe sets will only be performed if ASPSR is specified.}
\item{rho}{The threshold for filtering in the I/NI calls method. Probesets with scores higher than rho are kept.}
\item{Exonthreshold}{The exon score threshold to be maintained. If not NULL, probesets with an exon score lower than this value are not considered further and the p-values will be adjusted for multiplicity after testing. If NULL, all probesets are considered and a multiplicity correction is not performed.}
\item{significancelevel}{The significance level to be maintained on the p-values. The filtering on the significance is conducted only if an Exonthreshold is specified and the p-value are adjusted for multiplicity.}
\item{Groups}{A list with elements specifying the columns of the data in each group.}
\item{paired}{Logical. Are the groups paired? If TRUE the mean paired differences are calculated and tested whether these are significantly different from zero or not.}
\item{Low_AllSamples}{A character vector containing the probe sets which are not DABG in all samples.}
\item{Low_GSamples}{A list with a character vector per group containing the probe sets which are not DABG in that group.}
\item{Juninfo}{A parameter specifying wether the annotations are user of Ensembl defined. If JunInfo is "User" (default) the annotations provided in EandTrAnnot are used. If JunInfo is "Ensembl" the annotations in EandTrAnnot are used to set up tje junction associations but the gene name and position in transcriptData and positionData are used to connect with the Ensembl data base and retrieve corresponding information.}
\item{JAnnotI}{The file name with line indices for the junction associations.}
\item{JAnnot}{The file name with the junction associations.}
\item{EandTrAnnotI}{The file name with line indices for the exon and isoform annotations.}
\item{EandTrAnnot}{The file name with the exon and isoform annotations.}
\item{PartiallyAnnotated}{Logical. Should the exon annotations with partially annotated probe sets still be included? If FALSE, these are excluded. If TRUE, these are included. Default is FALSE.}
\item{positionData}{The file with the chromosome start and ends for the probe sets. Only needed in JunInfo=Ensembl.}
\item{transcriptData}{The file with gene name of the transcripts. Only needed in JunInfo=Ensembl.}
\item{Location}{A character string indication the place where the outputs are saved. Defaults to Output.}
\item{Name}{A character string with the name of the ouput file. Defaults to "REIDSAnalysis".}
}
\value{
The output will be written to each of the corresponding .txt files of the called upon functions.
}
\description{
The REIDS_Analysis is a wrapper function for the REIDSFunction, the ASExon function, the REIDS_JunctionAssesment function and the REIDS_IsoformAssesment function.
}
\examples{
\dontrun{
data(TC1500264)
PivotTransformData(Data=TC1500264,GeneID=NULL,ExonID=NULL,
REMAPSplitFile="TC1500264_Gene_SplitFile.txt",Location="Output/",Name="TC1500264_Pivot")
REIDS_Analysis(Indices="Output/TC1500264_LineIndex.csv",DataFile="Output/TC1500264_Pivot.csv",
nsim=100,informativeCalls=FALSE,Summarize=c("WeightedAll","EqualAll","WeightedConst","EqualConst"),
rho=0.5,Exonthreshold=0.5,significancelevel=0.05,Groups=Groups,paired=FALSE,Low_AllSamples=c()
,Low_GSamples=c(),Juninfo="User",JAnnotI=NULL,JAnnot=NULL,EandTrAnnotI="Output/REMAP_Indices.txt",
EandTrAnnot="Output/HJAY_REMAP.txt",positionData=NULL,transcriptData=NULL,
Location="OutputREIDSAnalysis",Name="TC1500264")
}
}
| /man/REIDS_Analysis.Rd | no_license | cran/REIDS | R | false | true | 4,923 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Functions.R
\name{REIDS_Analysis}
\alias{REIDS_Analysis}
\title{REIDS_Analysis}
\usage{
REIDS_Analysis(geneIDs, Indices, DataFile, nsim = 5000,
informativeCalls = FALSE, Summarize = TRUE, rho = 0.5,
Exonthreshold = 0.5, significancelevel = 0.05, Groups, paired = FALSE,
Low_AllSamples = c(), Low_GSamples = c(), Juninfo = "User",
JAnnotI = NULL, JAnnot = NULL, EandTrAnnotI = NULL,
EandTrAnnot = NULL, PartiallyAnnotated = FALSE, positionData = NULL,
transcriptData = NULL, Location = "Output", Name = "REIDSAnalysis")
}
\arguments{
\item{geneIDs}{A vector with the geneIDs to analyze.}
\item{Indices}{The .csv file created by Line_Indexer.py which contains indices for every gene.}
\item{DataFile}{The .csv file created by PivotTransformation.}
\item{nsim}{The number of iterations to perform. Defaults to 1000.}
\item{informativeCalls}{Logical. Should the I/NI calls method be perform before applying the REIDS model?}
\item{Summarize}{A character vector specifying the wich summarization method to be performed. The choices are using "EqualAll", "WeightedAll", "EqualConst", "WeightedConst". The former two use all probe sets while the latter to use only the consituitive probe sets. Summarization on the consistuitive probe sets will only be performed if ASPSR is specified.}
\item{rho}{The threshold for filtering in the I/NI calls method. Probesets with scores higher than rho are kept.}
\item{Exonthreshold}{The exon score threshold to be maintained. If not NULL, probesets with an exon score lower than this value are not considered further and the p-values will be adjusted for multiplicity after testing. If NULL, all probesets are considered and a multiplicity correction is not performed.}
\item{significancelevel}{The significance level to be maintained on the p-values. The filtering on the significance is conducted only if an Exonthreshold is specified and the p-value are adjusted for multiplicity.}
\item{Groups}{A list with elements specifying the columns of the data in each group.}
\item{paired}{Logical. Are the groups paired? If TRUE the mean paired differences are calculated and tested whether these are significantly different from zero or not.}
\item{Low_AllSamples}{A character vector containing the probe sets which are not DABG in all samples.}
\item{Low_GSamples}{A list with a character vector per group containing the probe sets which are not DABG in that group.}
\item{Juninfo}{A parameter specifying wether the annotations are user of Ensembl defined. If JunInfo is "User" (default) the annotations provided in EandTrAnnot are used. If JunInfo is "Ensembl" the annotations in EandTrAnnot are used to set up tje junction associations but the gene name and position in transcriptData and positionData are used to connect with the Ensembl data base and retrieve corresponding information.}
\item{JAnnotI}{The file name with line indices for the junction associations.}
\item{JAnnot}{The file name with the junction associations.}
\item{EandTrAnnotI}{The file name with line indices for the exon and isoform annotations.}
\item{EandTrAnnot}{The file name with the exon and isoform annotations.}
\item{PartiallyAnnotated}{Logical. Should the exon annotations with partially annotated probe sets still be included? If FALSE, these are excluded. If TRUE, these are included. Default is FALSE.}
\item{positionData}{The file with the chromosome start and ends for the probe sets. Only needed in JunInfo=Ensembl.}
\item{transcriptData}{The file with gene name of the transcripts. Only needed in JunInfo=Ensembl.}
\item{Location}{A character string indication the place where the outputs are saved. Defaults to Output.}
\item{Name}{A character string with the name of the ouput file. Defaults to "REIDSAnalysis".}
}
\value{
The output will be written to each of the corresponding .txt files of the called upon functions.
}
\description{
The REIDS_Analysis is a wrapper function for the REIDSFunction, the ASExon function, the REIDS_JunctionAssesment function and the REIDS_IsoformAssesment function.
}
\examples{
\dontrun{
data(TC1500264)
PivotTransformData(Data=TC1500264,GeneID=NULL,ExonID=NULL,
REMAPSplitFile="TC1500264_Gene_SplitFile.txt",Location="Output/",Name="TC1500264_Pivot")
REIDS_Analysis(Indices="Output/TC1500264_LineIndex.csv",DataFile="Output/TC1500264_Pivot.csv",
nsim=100,informativeCalls=FALSE,Summarize=c("WeightedAll","EqualAll","WeightedConst","EqualConst"),
rho=0.5,Exonthreshold=0.5,significancelevel=0.05,Groups=Groups,paired=FALSE,Low_AllSamples=c()
,Low_GSamples=c(),Juninfo="User",JAnnotI=NULL,JAnnot=NULL,EandTrAnnotI="Output/REMAP_Indices.txt",
EandTrAnnot="Output/HJAY_REMAP.txt",positionData=NULL,transcriptData=NULL,
Location="OutputREIDSAnalysis",Name="TC1500264")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/martingaleDifference.R
\name{CVT_PtildePower}
\alias{CVT_PtildePower}
\title{Power MD-lag function}
\usage{
CVT_PtildePower(param = 4)
}
\arguments{
\item{param}{power to which \eqn{\abs(2P-1)} is raised}
}
\value{
\eqn{h(P)} MD-lag function
}
\description{
h(P) scaled to have mean 0, variance 1 when P uniform
}
| /man/CVT_PtildePower.Rd | no_license | ajmcneil/spectralBacktest | R | false | true | 392 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/martingaleDifference.R
\name{CVT_PtildePower}
\alias{CVT_PtildePower}
\title{Power MD-lag function}
\usage{
CVT_PtildePower(param = 4)
}
\arguments{
\item{param}{power to which \eqn{\abs(2P-1)} is raised}
}
\value{
\eqn{h(P)} MD-lag function
}
\description{
h(P) scaled to have mean 0, variance 1 when P uniform
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest_tokens.R
\name{unnest_tokens}
\alias{unnest_tokens}
\title{Split a column into tokens using the tokenizers package}
\usage{
unnest_tokens(
tbl,
output,
input,
token = "words",
format = c("text", "man", "latex", "html", "xml"),
to_lower = TRUE,
drop = TRUE,
collapse = NULL,
...
)
}
\arguments{
\item{tbl}{A data frame}
\item{output}{Output column to be created as string or symbol.}
\item{input}{Input column that gets split as string or symbol.
The output/input arguments are passed by expression and support
\link[rlang]{quasiquotation}; you can unquote strings and symbols.}
\item{token}{Unit for tokenizing, or a custom tokenizing function. Built-in
options are "words" (default), "characters", "character_shingles", "ngrams",
"skip_ngrams", "sentences", "lines", "paragraphs", "regex", "tweets"
(tokenization by word that preserves usernames, hashtags, and URLS ), and
"ptb" (Penn Treebank). If a function, should take a character vector and
return a list of character vectors of the same length.}
\item{format}{Either "text", "man", "latex", "html", or "xml". If not text,
this uses the hunspell tokenizer, and can tokenize only by "word"}
\item{to_lower}{Whether to convert tokens to lowercase. If tokens include
URLS (such as with \code{token = "tweets"}), such converted URLs may no
longer be correct.}
\item{drop}{Whether original input column should get dropped. Ignored
if the original input and new output column have the same name.}
\item{collapse}{Whether to combine text with newlines first in case tokens
(such as sentences or paragraphs) span multiple lines. If NULL, collapses
when token method is "ngrams", "skip_ngrams", "sentences", "lines",
"paragraphs", or "regex".}
\item{...}{Extra arguments passed on to \link[tokenizers]{tokenizers}, such
as \code{strip_punct} for "words" and "tweets", \code{n} and \code{k} for
"ngrams" and "skip_ngrams", \code{strip_url} for "tweets", and
\code{pattern} for "regex".}
}
\description{
Split a column into tokens using the tokenizers package, splitting the table
into one-token-per-row. This function supports non-standard evaluation
through the tidyeval framework.
}
\details{
If the unit for tokenizing is ngrams, skip_ngrams, sentences, lines,
paragraphs, or regex, the entire input will be collapsed together before
tokenizing unless \code{collapse = FALSE}.
If format is anything other than "text", this uses the
\code{\link[hunspell]{hunspell_parse}} tokenizer instead of the tokenizers package.
This does not yet have support for tokenizing by any unit other than words.
}
\examples{
library(dplyr)
library(janeaustenr)
d <- tibble(txt = prideprejudice)
d
d \%>\%
unnest_tokens(word, txt)
d \%>\%
unnest_tokens(sentence, txt, token = "sentences")
d \%>\%
unnest_tokens(ngram, txt, token = "ngrams", n = 2)
d \%>\%
unnest_tokens(chapter, txt, token = "regex", pattern = "Chapter [\\\\\\\\d]")
d \%>\%
unnest_tokens(shingle, txt, token = "character_shingles", n = 4)
# custom function
d \%>\%
unnest_tokens(word, txt, token = stringr::str_split, pattern = " ")
# tokenize HTML
h <- tibble(row = 1:2,
text = c("<h1>Text <b>is</b>", "<a href='example.com'>here</a>"))
h \%>\%
unnest_tokens(word, text, format = "html")
}
| /man/unnest_tokens.Rd | permissive | donume/tidytext | R | false | true | 3,341 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unnest_tokens.R
\name{unnest_tokens}
\alias{unnest_tokens}
\title{Split a column into tokens using the tokenizers package}
\usage{
unnest_tokens(
tbl,
output,
input,
token = "words",
format = c("text", "man", "latex", "html", "xml"),
to_lower = TRUE,
drop = TRUE,
collapse = NULL,
...
)
}
\arguments{
\item{tbl}{A data frame}
\item{output}{Output column to be created as string or symbol.}
\item{input}{Input column that gets split as string or symbol.
The output/input arguments are passed by expression and support
\link[rlang]{quasiquotation}; you can unquote strings and symbols.}
\item{token}{Unit for tokenizing, or a custom tokenizing function. Built-in
options are "words" (default), "characters", "character_shingles", "ngrams",
"skip_ngrams", "sentences", "lines", "paragraphs", "regex", "tweets"
(tokenization by word that preserves usernames, hashtags, and URLS ), and
"ptb" (Penn Treebank). If a function, should take a character vector and
return a list of character vectors of the same length.}
\item{format}{Either "text", "man", "latex", "html", or "xml". If not text,
this uses the hunspell tokenizer, and can tokenize only by "word"}
\item{to_lower}{Whether to convert tokens to lowercase. If tokens include
URLS (such as with \code{token = "tweets"}), such converted URLs may no
longer be correct.}
\item{drop}{Whether original input column should get dropped. Ignored
if the original input and new output column have the same name.}
\item{collapse}{Whether to combine text with newlines first in case tokens
(such as sentences or paragraphs) span multiple lines. If NULL, collapses
when token method is "ngrams", "skip_ngrams", "sentences", "lines",
"paragraphs", or "regex".}
\item{...}{Extra arguments passed on to \link[tokenizers]{tokenizers}, such
as \code{strip_punct} for "words" and "tweets", \code{n} and \code{k} for
"ngrams" and "skip_ngrams", \code{strip_url} for "tweets", and
\code{pattern} for "regex".}
}
\description{
Split a column into tokens using the tokenizers package, splitting the table
into one-token-per-row. This function supports non-standard evaluation
through the tidyeval framework.
}
\details{
If the unit for tokenizing is ngrams, skip_ngrams, sentences, lines,
paragraphs, or regex, the entire input will be collapsed together before
tokenizing unless \code{collapse = FALSE}.
If format is anything other than "text", this uses the
\code{\link[hunspell]{hunspell_parse}} tokenizer instead of the tokenizers package.
This does not yet have support for tokenizing by any unit other than words.
}
\examples{
library(dplyr)
library(janeaustenr)
d <- tibble(txt = prideprejudice)
d
d \%>\%
unnest_tokens(word, txt)
d \%>\%
unnest_tokens(sentence, txt, token = "sentences")
d \%>\%
unnest_tokens(ngram, txt, token = "ngrams", n = 2)
d \%>\%
unnest_tokens(chapter, txt, token = "regex", pattern = "Chapter [\\\\\\\\d]")
d \%>\%
unnest_tokens(shingle, txt, token = "character_shingles", n = 4)
# custom function
d \%>\%
unnest_tokens(word, txt, token = stringr::str_split, pattern = " ")
# tokenize HTML
h <- tibble(row = 1:2,
text = c("<h1>Text <b>is</b>", "<a href='example.com'>here</a>"))
h \%>\%
unnest_tokens(word, text, format = "html")
}
|
####
# This simulation is testing whether the first version of Insertion method has unbiased property.
# With the external validation data
#### 0 Simulation set up####
library(parallel)
library(scales)
## 0.1 Generate the seed for the simulation ####
set.seed(2018)
seed_i <- sample(1000000,1000)
## 0.2 Global Parameters ####
ncores <- 30
ProjectName <- "Simulation_INS6"
nsample <- 1000
## 0.3 Functions ####
## 0.3.1 Original Functions ####
GEE_UI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
# cat(theta, " \n")
return(GEE_UfuncIns(Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
beta1=theta[1:3], beta2=theta[4:6], sigma = theta[7], xi = theta[8],
gamma1=gamma1, gamma=gamma, alpha1=alpha1, alpha0=alpha0, sigma_e=sigma_e))
}
GEE_SIGMA <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
return(GEE_SIGMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
theta[1:3], theta[4:6], sigma = theta[7], xi = theta[8],
gamma1, gamma, alpha1, alpha0, sigma_e))
}
GEE_GAMMA <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2){
GAMMA <- GEE_GAMMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, beta1=theta[1:3], beta2=theta[4:6],
xi=theta[8], sigma = theta[7])
return(GAMMA)
}
GEE_GAMMA.inv <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2){
GAMMA <- GEE_GAMMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, beta1=theta[1:3], beta2=theta[4:6],
xi=theta[8], sigma = theta[7])
GAMMA.inv <- solve(GAMMA,tol=1e-200)
return(GAMMA.inv)
}
GEE_cov <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
GAMMA <- GEE_GAMMA(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2)
GAMMA.inv <- GEE_GAMMA.inv(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2)
SIGMA <- GEE_SIGMA(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e)
covmatrix <- GAMMA.inv %*% SIGMA %*% t(as.matrix(GAMMA.inv))
return(covmatrix)
# return(list(GAMMA=GAMMA,SIGMA=SIGMA,covmatrix))
}
## 0.3.1 Functions with External Validation ####
GEE_UI_EV <- function(theta, data.mismeasure,
gamma1, gamma, alpha1, alpha0, sigma_e){
# cat(theta, " \n")
return(GEE_UfuncIns(Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
beta1=theta[1:3], beta2=theta[4:6], sigma = theta[7], xi = theta[8],
gamma1=gamma1, gamma=gamma, alpha1=alpha1, alpha0=alpha0, sigma_e=sigma_e))
}
GEE_GAMMA_EV0 <- function(theta, Y1star, Y2star, Y1, Y2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_GAMMAInsEV0(Y1star, Y2star, Y1, Y2,
CovMis1, CovMis2, ncov1=3, ncov2=3,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_GAMMA_EVI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_GAMMAInsEVI(Y1star, Y2star, DesignMatrix1, DesignMatrix2,
CovMis1, CovMis2,
beta1=theta[1:3], beta2=theta[4:6], xi=theta[8], sigma=theta[7],
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_SIGMA_EV0 <- function(theta, Y1star, Y2star, Y1, Y2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_SIGMAInsEV0(Y1star, Y2star, Y1, Y2,
CovMis1, CovMis2, ncov1 =3, ncov2=3,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_SIGMA_EVI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_SIGMAInsEVI(Y1star, Y2star, DesignMatrix1, DesignMatrix2,
CovMis1, CovMis2,
beta1=theta[1:3], beta2=theta[4:6], xi=theta[8], sigma=theta[7],
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_covEV <- function(theta, data.validation, data.mismeasure,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
nvalidation <- dim(data.validation)[1]
nsample <- dim(data.mismeasure)[1] + nvalidation
M0 <- GEE_GAMMA_EV0(theta,
Y1star=data.validation$Y1star,
Y2star=data.validation$Y2star,
Y1 = data.validation$Y1,
Y2 = data.validation$Y2,
CovMis1 = as.matrix(data.validation[,5:6]),
CovMis2 = as.matrix(data.validation[,7]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
M1 <- GEE_GAMMA_EVI(theta,
Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
GAMMA_EV <- M1 + M0
B0 <- GEE_SIGMA_EV0(theta,
Y1star=data.validation$Y1star,
Y2star=data.validation$Y2star,
Y1 = data.validation$Y1,
Y2 = data.validation$Y2,
CovMis1 = as.matrix(data.validation[,5:6]),
CovMis2 = as.matrix(data.validation[,7]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
B1 <- GEE_SIGMA_EVI(theta,
Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
SIGMA_EV <- B1 + B0
GAMMA.inv <- solve(GAMMA_EV,tol=1e-200)
covmatrix <- GAMMA.inv %*% SIGMA_EV %*% t(as.matrix(GAMMA.inv))
return(covmatrix)
# return(list(M1=M1,M0=M0,B1=B1,B0=B0,GAMMA_IV=GAMMA_IV,SIGMA_IV=SIGMA_IV,covmatrix))
}
##### 1 Implementation Function ####
### A example in deguging
i <- 1
nsample <- 1500
nvalidation <- 500
INS_int <- function(i, nsample, nvalidation){
## 1.1 Set up ####
set.seed(2019)
seed_i <- sample(1000000,1000)
set.seed(seed_i[i])
library(GeneErrorMis)
library(nleqslv)
## 1.2 Data Generation ####
## true parameters
beta1 <- c(0.7,1.5,-1)
beta2 <- c(0.7,-1.5,1)
# theta <- c(beta1, beta2, sigma, xi)
sigma <- 1
rho <- 0
sigma_e <- 0.1
gamma <- 0.8
alpha <- -2.197
# gamma <- 0.8
### Generate the true data sets
# nsample <- 1000
# nvalidation <- 500
X <- runif(nsample,-3,4)
W <- rnorm(nsample,0,sd=1)
mu1 <- beta1[1] + beta1[2] * X + beta1[3] * W
mu2 <- beta2[1] + beta2[2] * X + beta2[3] * W
expit <- function(x){
value <- exp(x)/(1+exp(x))
ifelse(is.na(value),1,value)
}
## Response
epsilon <- rnorm(nsample,0,1)
U <- runif(nsample,0,1)
mu2expit <- expit(mu2)
Y1 <- mu1 + epsilon
Y2 <- ifelse(U < mu2expit,1,0)
## obtain the actuall correlation
rho <- cor(Y1-mu1,Y2-mu2expit)
## measurement error and misclassification
e <- rnorm(nsample,0,sigma_e)
U2 <- runif(nsample,0,1)
Y1star <- Y1 + gamma * Y2 + e
Y2star <- ifelse(U2 > expit(alpha),Y2,1-Y2)
## Naive model
naive.model1 <- lm(Y1star ~ X + W)
true.model1 <- lm(Y1 ~ X + W)
naive.model2 <- glm(Y2star ~ X + W, family = binomial(link = logit))
true.model2 <- glm(Y2 ~ X + W, family = binomial(link = logit))
## 1.3 Implementation Generation ###
## 1.3.1 Preperation ###
DesignMatrix1 <- DesignMatrix2 <- cbind(rep(1,length(X)),X,W)
CovMis1 <- cbind(rep(0,length(X)),rep(1,length(X)))
CovMis2 <- c(rep(1,length(X)))
DesignMatrix1 <- as.matrix(DesignMatrix1)
DesignMatrix2 <- as.matrix(DesignMatrix2)
CovMis1 <- as.matrix(CovMis1)
CovMis2 <- as.matrix (CovMis2)
## Create the mismeasured data and the validation data
data.mismeasure <- data.frame(Y1star=Y1star[1:(nsample - nvalidation)],Y2star=Y2star[1:(nsample - nvalidation)], DesignMatrix1[1:(nsample - nvalidation),],CovMis1[1:(nsample - nvalidation),],CovMis2[1:(nsample - nvalidation),])
data.validation <- data.frame(Y1=Y1[(nsample - nvalidation+1):nsample],Y2=Y2[(nsample - nvalidation+1):nsample], Y1star=Y1star[(nsample - nvalidation+1):nsample],Y2star=Y2star[(nsample - nvalidation+1):nsample],
CovMis1[(nsample - nvalidation+1):nsample,],CovMis2[(nsample - nvalidation+1):nsample,])
## 1.3.2 Prepare different choices of initial variables ###
beta_Y1_0 <- mean(Y1star)
beta_Y2_0 <- log(mean(Y2star)/(1-mean(Y2star)))
intial3 <- c(beta_Y1_0,0,0,beta_Y2_0,0,0,1,0.001)
intial4 <- c(naive.model1$coefficients,naive.model2$coefficients,1,0)
## 1.4 Estimating Procedure
## 1.4.1 Measurement Error and Misclassification Parameters
model.measure <- lm(Y1star ~ -1 + offset(Y1) + Y2,data = data.validation)
model.class1 <- glm((1-Y2star) ~ 1, data = data.validation[data.validation$Y2==1,],family = binomial(link="logit"))
model.class0 <- glm(Y2star ~ 1, data = data.validation[data.validation$Y2==0,],family = binomial(link="logit"))
gamma2 <- model.measure$coefficients
sigma_e <- sigma(model.measure)
alpha1 <- model.class1$coefficients
alpha0 <- model.class0$coefficients
tryCatch({
# 1.4.2 The proposed method ####
NR <- nleqslv(intial4, GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,gamma2), alpha1= alpha1, alpha0= alpha0, sigma_e = sigma_e)
betahat <- ifelse(abs(NR$x)<10,NR$x,NA)
### variance estimation with validation data
if (!any(is.na(betahat))) {
cov <- GEE_covEV (betahat, data.validation, data.mismeasure,
gamma1=1, gamma = c(0,gamma2), alpha1, alpha0, sigma_e,
fixgamma1=1, fixgamma=c(1,0), fixsigma_e=0, fixalpha1=0, fixalpha0=0)
sd <- sqrt(diag(cov))} else {
sd <- rep(NA,length(betahat))
}
# 1.4.3 Naive Model of only consider the measurement error ###
measonly <- nleqslv(intial4,GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,gamma2), alpha1= -Inf, alpha0= -Inf, sigma_e = sigma_e)
betahat_measonly <- ifelse(abs(measonly$x)<10,measonly$x,NA)
if (!any(is.na(betahat_measonly))) {
cov <- GEE_covEV(betahat_measonly, data.validation, data.mismeasure,
gamma1=1, gamma = c(0,gamma), alpha1= -Inf, alpha0= -Inf, sigma_e = sigma_e,
fixgamma1=1, fixgamma=c(1,0), fixsigma_e=0, fixalpha1=1, fixalpha0=1)
sd_measonly <- sqrt(diag(cov))} else {
sd_measonly <- rep(NA,length(betahat_measonly))
}
# 1.4.4 Naive Model of only consider the misclassification error ###
misconly <- nleqslv(intial4, GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,0), alpha1= alpha1, alpha0= alpha0, sigma_e = 0)
betahat_misconly <- ifelse(abs(misconly$x)<10,misconly$x,NA)
if (!any(is.na(betahat_misconly))) {
cov <- GEE_covEV(betahat_measonly, data.validation, data.mismeasure,
gamma1 = 1, gamma=c(0,0), alpha1= alpha1, alpha0= alpha0, sigma_e = 0,
fixgamma1=1, fixgamma=c(1,1), fixsigma_e=1, fixalpha1=0, fixalpha0=0)
sd_misconly <- sqrt(diag(cov))} else {
sd_misconly <- rep(NA,length(betahat_misconly))
}
return(list(seed = seed_i[i],
naive1coef = naive.model1$coefficients,
naive1vcov = vcov(naive.model1),
naive2coef = naive.model2$coefficients,
naive2vcov = vcov(naive.model2),
betameasonly = c(betahat_measonly,gamma2,sigma_e,0,0),
sdmeasonly = c(sd_measonly,0,0),
betamisconly = c(betahat_misconly,0,0,alpha1,alpha0),
sdmisconly = c(sd_misconly[1:8],0,0,sd_misconly[9:10]),
betahat = c(betahat,gamma2,sigma_e,alpha1,alpha0),
sd = sd))
}, error = function(e) return(NULL))
}
### 4.1 Simulation 1: under different degree of measurement error ####
results_1 <- lapply(c(750,1500,3000), FUN= function(x){
results_x <- lapply(1:1000, FUN = INS_int,
nsample = x, nvalidation = x*1/3)
return(results_x)
})
# re1 <- INS_int(1,nsample=nsample, nvalidation=nvalidation, omega_j= omega_j)
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0)
sigma_e_range <- c(0.1,0.5,0.7)
# truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,1,0.1,0.1,-1.39,-1.39)
save(results_1,file="SSEV_R1.RData")
nsample <- c(750,1500,3000)
Results <- NULL
for (k in 1:3) {
results <- results_1[[k]]
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,0.8,0.1,-2.197,-2.197)
naive1coef <- NULL
naive1sd <- NULL
naive2coef <- NULL
naive2sd <- NULL
CI1naive <- NULL
CI2naive <- NULL
measonlycoef <- NULL
measonlysd <- NULL
CImeasonly <- NULL
misconlycoef <- NULL
misconlysd <- NULL
CImisconly <- NULL
betas <- NULL
sds <- NULL
CIs <- NULL
for (i in 1:1000){
if (is.null(results[[i]])) {
next}
naive1coef <- rbind(naive1coef, results[[i]]$naive1coef)
naive1sd <- rbind(naive1sd,sqrt(diag( results[[i]]$naive1vcov)))
naive2coef <- rbind(naive2coef, results[[i]]$naive2coef)
naive2sd <- rbind(naive2sd,sqrt(diag( results[[i]]$naive2vcov)))
if ((!is.null(results[[i]]$betameasonly)) & (!is.null(results[[i]]$sdmeasonly))) {
measonlycoef <- rbind(measonlycoef,as.vector(results[[i]]$betameasonly))
measonlysd_i <- results[[i]]$sdmeasonly
measonlysd_i <- ifelse(abs(measonlysd_i)<10,measonlysd_i,NA)
measonlysd <- rbind(measonlysd,measonlysd_i)
CILBmeasonly <- results[[i]]$betameasonly - 1.96 *(measonlysd_i)
CIUBmeasonly <- results[[i]]$betameasonly + 1.96 *(measonlysd_i)
CImeasonly <- rbind(CImeasonly,ifelse((truebeta<as.vector(CIUBmeasonly)) & (truebeta>as.vector(CILBmeasonly)),1,0))
}
if ((!is.null(results[[i]]$betamisconly)) & (!is.null(results[[i]]$sdmisconly))) {
misconlycoef <- rbind(misconlycoef,as.vector(results[[i]]$betamisconly))
misconlysd_i <- (results[[i]]$sdmisconly)
misconlysd_i <- ifelse(abs(misconlysd_i)<10,misconlysd_i,NA)
misconlysd <- rbind(misconlysd, misconlysd_i)
CILBmisconly <- results[[i]]$betamisconly - 1.96 *(misconlysd_i)
CIUBmisconly <- results[[i]]$betamisconly + 1.96 *(misconlysd_i)
CImisconly <- rbind(CImisconly,ifelse((truebeta<as.vector(CIUBmisconly)) & (truebeta>as.vector(CILBmisconly)),1,0))
}
betahat0 <- results[[i]]$betahat
sd0 <- results[[i]]$sd
sd0 <- ifelse(abs(sd0)<10,sd0,NA)
betas <- rbind(betas, betahat0)
sds <- rbind(sds, sd0)
CILBnaive1 <- results[[i]]$naive1coef - 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CIUBnaive1 <- results[[i]]$naive1coef + 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CI1naive <- rbind(CI1naive,ifelse((truebeta[1:3]<CIUBnaive1) & (truebeta[1:3]>CILBnaive1),1,0))
CILBnaive2 <- results[[i]]$naive2coef - 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CIUBnaive2 <- results[[i]]$naive2coef + 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CI2naive <- rbind(CI2naive,ifelse((truebeta[4:6]<CIUBnaive2) & (truebeta[4:6]>CILBnaive2),1,0))
CILB <- betahat0 - 1.96 *(sd0)
CIUB <- betahat0 + 1.96 *(sd0)
CIs <- rbind(CIs,ifelse((truebeta<as.vector(CIUB)) & (truebeta>as.vector(CILB)),1,0))
}
biasnaive1 <- colMeans(naive1coef,na.rm=T)-truebeta[1:3]
biasnaive2 <- colMeans(naive2coef,na.rm=T)-truebeta[4:6]
naive_esd <- apply(cbind(naive1coef,naive2coef), MARGIN = 2 , FUN=sd, na.rm=T)
sdnaive1 <- colMeans(naive1sd,na.rm=T)
sdnaive2 <- colMeans(naive2sd,na.rm=T)
CInaive1 <- colMeans(CI1naive,na.rm=T)
CInaive2 <- colMeans(CI2naive,na.rm=T)
naivebias <- c(biasnaive1,biasnaive2,rep(0,6))
naive_esd <- c(naive_esd,rep(0,6))
naivesd <- c(sdnaive1,sdnaive2,rep(0,6))
naiveCI <- c(CInaive1,CInaive2,rep(0,6))
bias_measonly <- colMeans(na.omit(measonlycoef),na.rm = T) - truebeta
sd_emp_measonly <- apply(na.omit(measonlycoef),MARGIN = 2, FUN = sd)
sd_mod_measonly <- colMeans(na.omit(measonlysd),na.rm = T)
CI_measonly <- colMeans(na.omit(CImeasonly),na.rm = T)
bias_misconly <- colMeans(na.omit(misconlycoef),na.rm = T) - truebeta
sd_emp_misconly <- apply(na.omit(misconlycoef),MARGIN = 2, FUN = sd)
sd_mod_misconly <- colMeans(na.omit(misconlysd),na.rm = T)
CI_misconly <- colMeans(na.omit(CImisconly),na.rm = T)
bias1 <- colMeans(na.omit(betas),na.rm = T) - truebeta
sd_emp <- apply(na.omit(betas),MARGIN = 2, FUN = sd)
sd_mod <- colMeans(na.omit(sds),na.rm = T)
CIrate <- colMeans(na.omit(CIs),na.rm = T)
Results0 <- data.frame(nsample = nsample[k],
naivebias=round(naivebias,3),naive_esd=round(naive_esd,3),naivesd=round(naivesd,3),naiveCI=percent(round(naiveCI,3)),
biasmeas=round(bias_measonly,3),esdmeas=round(sd_emp_measonly,3),sdmeas=round(sd_mod_measonly,3),CImeas=percent(round(CI_measonly,3)),
biasmisc=round(bias_misconly,3),esdmisc=round(sd_emp_misconly,3),sdmisc=round(sd_mod_misconly,3),CImisc=percent(round(CI_misconly,3)),
biasprop=round(bias1,3),propose_esd=round(sd_emp,3),sdpropose=round(sd_mod,3),CI_propose=percent(round(CIrate,3)))
Results <- rbind(Results,Results0)
}
save(Results,file="SSEV_RTable1.RData")
library(xtable)
xtable(Results,digits = 3)
### 4.2 Simulation 2: under different degree of misclassification rates ####
results_2 <- lapply(c(1/3,1/2,2/3), FUN= function(x){
results_x <- lapply(1:1000, FUN = INS_int,
nsample = 1500, nvalidation = round(x*1500))
return(results_x)
})
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,0.005,0)
alpha_range <- c(-4.595,-2.197,-1.386)
nrate <- round(c(1/3,1/2,2/3),3)
save(results_2,file="SSEV_R2.RData")
Results <- NULL
for (k in 1:3) {
results <- results_2[[k]]
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,0.8,0.1,-2.197,-2.197)
naive1coef <- NULL
naive1sd <- NULL
naive2coef <- NULL
naive2sd <- NULL
CI1naive <- NULL
CI2naive <- NULL
measonlycoef <- NULL
measonlysd <- NULL
CImeasonly <- NULL
misconlycoef <- NULL
misconlysd <- NULL
CImisconly <- NULL
betas <- NULL
sds <- NULL
CIs <- NULL
for (i in 1:1000){
if (is.null(results[[i]])) {
next}
naive1coef <- rbind(naive1coef, results[[i]]$naive1coef)
naive1sd <- rbind(naive1sd,sqrt(diag( results[[i]]$naive1vcov)))
naive2coef <- rbind(naive2coef, results[[i]]$naive2coef)
naive2sd <- rbind(naive2sd,sqrt(diag( results[[i]]$naive2vcov)))
if ((!is.null(results[[i]]$betameasonly)) & (!is.null(results[[i]]$sdmeasonly))) {
measonlycoef <- rbind(measonlycoef,as.vector(results[[i]]$betameasonly))
measonlysd_i <- results[[i]]$sdmeasonly
measonlysd_i <- ifelse(abs(measonlysd_i)<10,measonlysd_i,NA)
measonlysd <- rbind(measonlysd,measonlysd_i)
CILBmeasonly <- results[[i]]$betameasonly - 1.96 *(measonlysd_i)
CIUBmeasonly <- results[[i]]$betameasonly + 1.96 *(measonlysd_i)
CImeasonly <- rbind(CImeasonly,ifelse((truebeta<as.vector(CIUBmeasonly)) & (truebeta>as.vector(CILBmeasonly)),1,0))
}
if ((!is.null(results[[i]]$betamisconly)) & (!is.null(results[[i]]$sdmisconly))) {
misconlycoef <- rbind(misconlycoef,as.vector(results[[i]]$betamisconly))
misconlysd_i <- (results[[i]]$sdmisconly)
misconlysd_i <- ifelse(abs(misconlysd_i)<10,misconlysd_i,NA)
misconlysd <- rbind(misconlysd, misconlysd_i)
CILBmisconly <- results[[i]]$betamisconly - 1.96 *(misconlysd_i)
CIUBmisconly <- results[[i]]$betamisconly + 1.96 *(misconlysd_i)
CImisconly <- rbind(CImisconly,ifelse((truebeta<as.vector(CIUBmisconly)) & (truebeta>as.vector(CILBmisconly)),1,0))
}
betahat0 <- results[[i]]$betahat
sd0 <- results[[i]]$sd
sd0 <- ifelse(abs(sd0)<10,sd0,NA)
betas <- rbind(betas, betahat0)
sds <- rbind(sds, sd0)
CILBnaive1 <- results[[i]]$naive1coef - 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CIUBnaive1 <- results[[i]]$naive1coef + 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CI1naive <- rbind(CI1naive,ifelse((truebeta[1:3]<CIUBnaive1) & (truebeta[1:3]>CILBnaive1),1,0))
CILBnaive2 <- results[[i]]$naive2coef - 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CIUBnaive2 <- results[[i]]$naive2coef + 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CI2naive <- rbind(CI2naive,ifelse((truebeta[4:6]<CIUBnaive2) & (truebeta[4:6]>CILBnaive2),1,0))
CILB <- betahat0 - 1.96 *(sd0)
CIUB <- betahat0 + 1.96 *(sd0)
CIs <- rbind(CIs,ifelse((truebeta<as.vector(CIUB)) & (truebeta>as.vector(CILB)),1,0))
}
biasnaive1 <- colMeans(naive1coef,na.rm=T)-truebeta[1:3]
biasnaive2 <- colMeans(naive2coef,na.rm=T)-truebeta[4:6]
naive_esd <- apply(cbind(naive1coef,naive2coef), MARGIN = 2 , FUN=sd, na.rm=T)
sdnaive1 <- colMeans(naive1sd,na.rm=T)
sdnaive2 <- colMeans(naive2sd,na.rm=T)
CInaive1 <- colMeans(CI1naive,na.rm=T)
CInaive2 <- colMeans(CI2naive,na.rm=T)
naivebias <- c(biasnaive1,biasnaive2,rep(0,6))
naive_esd <- c(naive_esd,rep(0,6))
naivesd <- c(sdnaive1,sdnaive2,rep(0,6))
naiveCI <- c(CInaive1,CInaive2,rep(0,6))
bias_measonly <- colMeans(na.omit(measonlycoef),na.rm = T) - truebeta
sd_emp_measonly <- apply(na.omit(measonlycoef),MARGIN = 2, FUN = sd)
sd_mod_measonly <- colMeans(na.omit(measonlysd),na.rm = T)
CI_measonly <- colMeans(na.omit(CImeasonly),na.rm = T)
bias_misconly <- colMeans(na.omit(misconlycoef),na.rm = T) - truebeta
sd_emp_misconly <- apply(na.omit(misconlycoef),MARGIN = 2, FUN = sd)
sd_mod_misconly <- colMeans(na.omit(misconlysd),na.rm = T)
CI_misconly <- colMeans(na.omit(CImisconly),na.rm = T)
bias1 <- colMeans(na.omit(betas),na.rm = T) - truebeta
sd_emp <- apply(na.omit(betas),MARGIN = 2, FUN = sd)
sd_mod <- colMeans(na.omit(sds),na.rm = T)
CIrate <- colMeans(na.omit(CIs),na.rm = T)
Results0 <- data.frame(nrate = nrate[k],
naivebias=round(naivebias,3),naive_esd=round(naive_esd,3),naivesd=round(naivesd,3),naiveCI=percent(round(naiveCI,3)),
biasmeas=round(bias_measonly,3),esdmeas=round(sd_emp_measonly,3),sdmeas=round(sd_mod_measonly,3),CImeas=percent(round(CI_measonly,3)),
biasmisc=round(bias_misconly,3),esdmisc=round(sd_emp_misconly,3),sdmisc=round(sd_mod_misconly,3),CImisc=percent(round(CI_misconly,3)),
biasprop=round(bias1,3),propose_esd=round(sd_emp,3),sdpropose=round(sd_mod,3),CI_propose=percent(round(CIrate,3)))
Results <- rbind(Results,Results0)
}
save(Results,file="SSEV_RTable2.RData")
library(xtable)
xtable(Results,digits = 3) | /code/Simulation/Simulation_INS6.R | no_license | QihuangZhang/GEEmix | R | false | false | 25,592 | r | ####
# This simulation is testing whether the first version of Insertion method has unbiased property.
# With the external validation data
#### 0 Simulation set up####
library(parallel)
library(scales)
## 0.1 Generate the seed for the simulation ####
set.seed(2018)
seed_i <- sample(1000000,1000)
## 0.2 Global Parameters ####
ncores <- 30
ProjectName <- "Simulation_INS6"
nsample <- 1000
## 0.3 Functions ####
## 0.3.1 Original Functions ####
GEE_UI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
# cat(theta, " \n")
return(GEE_UfuncIns(Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
beta1=theta[1:3], beta2=theta[4:6], sigma = theta[7], xi = theta[8],
gamma1=gamma1, gamma=gamma, alpha1=alpha1, alpha0=alpha0, sigma_e=sigma_e))
}
GEE_SIGMA <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
return(GEE_SIGMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
theta[1:3], theta[4:6], sigma = theta[7], xi = theta[8],
gamma1, gamma, alpha1, alpha0, sigma_e))
}
GEE_GAMMA <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2){
GAMMA <- GEE_GAMMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, beta1=theta[1:3], beta2=theta[4:6],
xi=theta[8], sigma = theta[7])
return(GAMMA)
}
GEE_GAMMA.inv <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2){
GAMMA <- GEE_GAMMAIns(Y1star, Y2star, DesignMatrix1, DesignMatrix2, beta1=theta[1:3], beta2=theta[4:6],
xi=theta[8], sigma = theta[7])
GAMMA.inv <- solve(GAMMA,tol=1e-200)
return(GAMMA.inv)
}
GEE_cov <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e){
GAMMA <- GEE_GAMMA(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2)
GAMMA.inv <- GEE_GAMMA.inv(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2)
SIGMA <- GEE_SIGMA(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e)
covmatrix <- GAMMA.inv %*% SIGMA %*% t(as.matrix(GAMMA.inv))
return(covmatrix)
# return(list(GAMMA=GAMMA,SIGMA=SIGMA,covmatrix))
}
## 0.3.1 Functions with External Validation ####
GEE_UI_EV <- function(theta, data.mismeasure,
gamma1, gamma, alpha1, alpha0, sigma_e){
# cat(theta, " \n")
return(GEE_UfuncIns(Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
beta1=theta[1:3], beta2=theta[4:6], sigma = theta[7], xi = theta[8],
gamma1=gamma1, gamma=gamma, alpha1=alpha1, alpha0=alpha0, sigma_e=sigma_e))
}
GEE_GAMMA_EV0 <- function(theta, Y1star, Y2star, Y1, Y2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_GAMMAInsEV0(Y1star, Y2star, Y1, Y2,
CovMis1, CovMis2, ncov1=3, ncov2=3,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_GAMMA_EVI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_GAMMAInsEVI(Y1star, Y2star, DesignMatrix1, DesignMatrix2,
CovMis1, CovMis2,
beta1=theta[1:3], beta2=theta[4:6], xi=theta[8], sigma=theta[7],
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_SIGMA_EV0 <- function(theta, Y1star, Y2star, Y1, Y2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_SIGMAInsEV0(Y1star, Y2star, Y1, Y2,
CovMis1, CovMis2, ncov1 =3, ncov2=3,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_SIGMA_EVI <- function(theta, Y1star, Y2star, DesignMatrix1, DesignMatrix2, CovMis1, CovMis2,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
return(GEE_SIGMAInsEVI(Y1star, Y2star, DesignMatrix1, DesignMatrix2,
CovMis1, CovMis2,
beta1=theta[1:3], beta2=theta[4:6], xi=theta[8], sigma=theta[7],
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1=fixgamma1, fixgamma=fixgamma, fixsigma_e=fixsigma_e, fixalpha1=fixalpha1, fixalpha0=fixalpha0)
)
}
GEE_covEV <- function(theta, data.validation, data.mismeasure,
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0){
nvalidation <- dim(data.validation)[1]
nsample <- dim(data.mismeasure)[1] + nvalidation
M0 <- GEE_GAMMA_EV0(theta,
Y1star=data.validation$Y1star,
Y2star=data.validation$Y2star,
Y1 = data.validation$Y1,
Y2 = data.validation$Y2,
CovMis1 = as.matrix(data.validation[,5:6]),
CovMis2 = as.matrix(data.validation[,7]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
M1 <- GEE_GAMMA_EVI(theta,
Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
GAMMA_EV <- M1 + M0
B0 <- GEE_SIGMA_EV0(theta,
Y1star=data.validation$Y1star,
Y2star=data.validation$Y2star,
Y1 = data.validation$Y1,
Y2 = data.validation$Y2,
CovMis1 = as.matrix(data.validation[,5:6]),
CovMis2 = as.matrix(data.validation[,7]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
B1 <- GEE_SIGMA_EVI(theta,
Y1star=data.mismeasure$Y1star,
Y2star=data.mismeasure$Y2star,
DesignMatrix1 = as.matrix(data.mismeasure[,3:5]),
DesignMatrix2 = as.matrix(data.mismeasure[,3:5]),
CovMis1 = as.matrix(data.mismeasure[,6:7]),
CovMis2 = as.matrix(data.mismeasure[,8]),
gamma1, gamma, alpha1, alpha0, sigma_e,
fixgamma1,fixgamma,fixsigma_e,fixalpha1,fixalpha0)
SIGMA_EV <- B1 + B0
GAMMA.inv <- solve(GAMMA_EV,tol=1e-200)
covmatrix <- GAMMA.inv %*% SIGMA_EV %*% t(as.matrix(GAMMA.inv))
return(covmatrix)
# return(list(M1=M1,M0=M0,B1=B1,B0=B0,GAMMA_IV=GAMMA_IV,SIGMA_IV=SIGMA_IV,covmatrix))
}
##### 1 Implementation Function ####
### A example in deguging
i <- 1
nsample <- 1500
nvalidation <- 500
INS_int <- function(i, nsample, nvalidation){
## 1.1 Set up ####
set.seed(2019)
seed_i <- sample(1000000,1000)
set.seed(seed_i[i])
library(GeneErrorMis)
library(nleqslv)
## 1.2 Data Generation ####
## true parameters
beta1 <- c(0.7,1.5,-1)
beta2 <- c(0.7,-1.5,1)
# theta <- c(beta1, beta2, sigma, xi)
sigma <- 1
rho <- 0
sigma_e <- 0.1
gamma <- 0.8
alpha <- -2.197
# gamma <- 0.8
### Generate the true data sets
# nsample <- 1000
# nvalidation <- 500
X <- runif(nsample,-3,4)
W <- rnorm(nsample,0,sd=1)
mu1 <- beta1[1] + beta1[2] * X + beta1[3] * W
mu2 <- beta2[1] + beta2[2] * X + beta2[3] * W
expit <- function(x){
value <- exp(x)/(1+exp(x))
ifelse(is.na(value),1,value)
}
## Response
epsilon <- rnorm(nsample,0,1)
U <- runif(nsample,0,1)
mu2expit <- expit(mu2)
Y1 <- mu1 + epsilon
Y2 <- ifelse(U < mu2expit,1,0)
## obtain the actuall correlation
rho <- cor(Y1-mu1,Y2-mu2expit)
## measurement error and misclassification
e <- rnorm(nsample,0,sigma_e)
U2 <- runif(nsample,0,1)
Y1star <- Y1 + gamma * Y2 + e
Y2star <- ifelse(U2 > expit(alpha),Y2,1-Y2)
## Naive model
naive.model1 <- lm(Y1star ~ X + W)
true.model1 <- lm(Y1 ~ X + W)
naive.model2 <- glm(Y2star ~ X + W, family = binomial(link = logit))
true.model2 <- glm(Y2 ~ X + W, family = binomial(link = logit))
## 1.3 Implementation Generation ###
## 1.3.1 Preperation ###
DesignMatrix1 <- DesignMatrix2 <- cbind(rep(1,length(X)),X,W)
CovMis1 <- cbind(rep(0,length(X)),rep(1,length(X)))
CovMis2 <- c(rep(1,length(X)))
DesignMatrix1 <- as.matrix(DesignMatrix1)
DesignMatrix2 <- as.matrix(DesignMatrix2)
CovMis1 <- as.matrix(CovMis1)
CovMis2 <- as.matrix (CovMis2)
## Create the mismeasured data and the validation data
data.mismeasure <- data.frame(Y1star=Y1star[1:(nsample - nvalidation)],Y2star=Y2star[1:(nsample - nvalidation)], DesignMatrix1[1:(nsample - nvalidation),],CovMis1[1:(nsample - nvalidation),],CovMis2[1:(nsample - nvalidation),])
data.validation <- data.frame(Y1=Y1[(nsample - nvalidation+1):nsample],Y2=Y2[(nsample - nvalidation+1):nsample], Y1star=Y1star[(nsample - nvalidation+1):nsample],Y2star=Y2star[(nsample - nvalidation+1):nsample],
CovMis1[(nsample - nvalidation+1):nsample,],CovMis2[(nsample - nvalidation+1):nsample,])
## 1.3.2 Prepare different choices of initial variables ###
beta_Y1_0 <- mean(Y1star)
beta_Y2_0 <- log(mean(Y2star)/(1-mean(Y2star)))
intial3 <- c(beta_Y1_0,0,0,beta_Y2_0,0,0,1,0.001)
intial4 <- c(naive.model1$coefficients,naive.model2$coefficients,1,0)
## 1.4 Estimating Procedure
## 1.4.1 Measurement Error and Misclassification Parameters
model.measure <- lm(Y1star ~ -1 + offset(Y1) + Y2,data = data.validation)
model.class1 <- glm((1-Y2star) ~ 1, data = data.validation[data.validation$Y2==1,],family = binomial(link="logit"))
model.class0 <- glm(Y2star ~ 1, data = data.validation[data.validation$Y2==0,],family = binomial(link="logit"))
gamma2 <- model.measure$coefficients
sigma_e <- sigma(model.measure)
alpha1 <- model.class1$coefficients
alpha0 <- model.class0$coefficients
tryCatch({
# 1.4.2 The proposed method ####
NR <- nleqslv(intial4, GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,gamma2), alpha1= alpha1, alpha0= alpha0, sigma_e = sigma_e)
betahat <- ifelse(abs(NR$x)<10,NR$x,NA)
### variance estimation with validation data
if (!any(is.na(betahat))) {
cov <- GEE_covEV (betahat, data.validation, data.mismeasure,
gamma1=1, gamma = c(0,gamma2), alpha1, alpha0, sigma_e,
fixgamma1=1, fixgamma=c(1,0), fixsigma_e=0, fixalpha1=0, fixalpha0=0)
sd <- sqrt(diag(cov))} else {
sd <- rep(NA,length(betahat))
}
# 1.4.3 Naive Model of only consider the measurement error ###
measonly <- nleqslv(intial4,GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,gamma2), alpha1= -Inf, alpha0= -Inf, sigma_e = sigma_e)
betahat_measonly <- ifelse(abs(measonly$x)<10,measonly$x,NA)
if (!any(is.na(betahat_measonly))) {
cov <- GEE_covEV(betahat_measonly, data.validation, data.mismeasure,
gamma1=1, gamma = c(0,gamma), alpha1= -Inf, alpha0= -Inf, sigma_e = sigma_e,
fixgamma1=1, fixgamma=c(1,0), fixsigma_e=0, fixalpha1=1, fixalpha0=1)
sd_measonly <- sqrt(diag(cov))} else {
sd_measonly <- rep(NA,length(betahat_measonly))
}
# 1.4.4 Naive Model of only consider the misclassification error ###
misconly <- nleqslv(intial4, GEE_UI_EV, data.mismeasure = data.mismeasure, jacobian=T, control=list(maxit=2000),
gamma1 = 1, gamma=c(0,0), alpha1= alpha1, alpha0= alpha0, sigma_e = 0)
betahat_misconly <- ifelse(abs(misconly$x)<10,misconly$x,NA)
if (!any(is.na(betahat_misconly))) {
cov <- GEE_covEV(betahat_measonly, data.validation, data.mismeasure,
gamma1 = 1, gamma=c(0,0), alpha1= alpha1, alpha0= alpha0, sigma_e = 0,
fixgamma1=1, fixgamma=c(1,1), fixsigma_e=1, fixalpha1=0, fixalpha0=0)
sd_misconly <- sqrt(diag(cov))} else {
sd_misconly <- rep(NA,length(betahat_misconly))
}
return(list(seed = seed_i[i],
naive1coef = naive.model1$coefficients,
naive1vcov = vcov(naive.model1),
naive2coef = naive.model2$coefficients,
naive2vcov = vcov(naive.model2),
betameasonly = c(betahat_measonly,gamma2,sigma_e,0,0),
sdmeasonly = c(sd_measonly,0,0),
betamisconly = c(betahat_misconly,0,0,alpha1,alpha0),
sdmisconly = c(sd_misconly[1:8],0,0,sd_misconly[9:10]),
betahat = c(betahat,gamma2,sigma_e,alpha1,alpha0),
sd = sd))
}, error = function(e) return(NULL))
}
### 4.1 Simulation 1: under different degree of measurement error ####
results_1 <- lapply(c(750,1500,3000), FUN= function(x){
results_x <- lapply(1:1000, FUN = INS_int,
nsample = x, nvalidation = x*1/3)
return(results_x)
})
# re1 <- INS_int(1,nsample=nsample, nvalidation=nvalidation, omega_j= omega_j)
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0)
sigma_e_range <- c(0.1,0.5,0.7)
# truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,1,0.1,0.1,-1.39,-1.39)
save(results_1,file="SSEV_R1.RData")
nsample <- c(750,1500,3000)
Results <- NULL
for (k in 1:3) {
results <- results_1[[k]]
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,0.8,0.1,-2.197,-2.197)
naive1coef <- NULL
naive1sd <- NULL
naive2coef <- NULL
naive2sd <- NULL
CI1naive <- NULL
CI2naive <- NULL
measonlycoef <- NULL
measonlysd <- NULL
CImeasonly <- NULL
misconlycoef <- NULL
misconlysd <- NULL
CImisconly <- NULL
betas <- NULL
sds <- NULL
CIs <- NULL
for (i in 1:1000){
if (is.null(results[[i]])) {
next}
naive1coef <- rbind(naive1coef, results[[i]]$naive1coef)
naive1sd <- rbind(naive1sd,sqrt(diag( results[[i]]$naive1vcov)))
naive2coef <- rbind(naive2coef, results[[i]]$naive2coef)
naive2sd <- rbind(naive2sd,sqrt(diag( results[[i]]$naive2vcov)))
if ((!is.null(results[[i]]$betameasonly)) & (!is.null(results[[i]]$sdmeasonly))) {
measonlycoef <- rbind(measonlycoef,as.vector(results[[i]]$betameasonly))
measonlysd_i <- results[[i]]$sdmeasonly
measonlysd_i <- ifelse(abs(measonlysd_i)<10,measonlysd_i,NA)
measonlysd <- rbind(measonlysd,measonlysd_i)
CILBmeasonly <- results[[i]]$betameasonly - 1.96 *(measonlysd_i)
CIUBmeasonly <- results[[i]]$betameasonly + 1.96 *(measonlysd_i)
CImeasonly <- rbind(CImeasonly,ifelse((truebeta<as.vector(CIUBmeasonly)) & (truebeta>as.vector(CILBmeasonly)),1,0))
}
if ((!is.null(results[[i]]$betamisconly)) & (!is.null(results[[i]]$sdmisconly))) {
misconlycoef <- rbind(misconlycoef,as.vector(results[[i]]$betamisconly))
misconlysd_i <- (results[[i]]$sdmisconly)
misconlysd_i <- ifelse(abs(misconlysd_i)<10,misconlysd_i,NA)
misconlysd <- rbind(misconlysd, misconlysd_i)
CILBmisconly <- results[[i]]$betamisconly - 1.96 *(misconlysd_i)
CIUBmisconly <- results[[i]]$betamisconly + 1.96 *(misconlysd_i)
CImisconly <- rbind(CImisconly,ifelse((truebeta<as.vector(CIUBmisconly)) & (truebeta>as.vector(CILBmisconly)),1,0))
}
betahat0 <- results[[i]]$betahat
sd0 <- results[[i]]$sd
sd0 <- ifelse(abs(sd0)<10,sd0,NA)
betas <- rbind(betas, betahat0)
sds <- rbind(sds, sd0)
CILBnaive1 <- results[[i]]$naive1coef - 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CIUBnaive1 <- results[[i]]$naive1coef + 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CI1naive <- rbind(CI1naive,ifelse((truebeta[1:3]<CIUBnaive1) & (truebeta[1:3]>CILBnaive1),1,0))
CILBnaive2 <- results[[i]]$naive2coef - 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CIUBnaive2 <- results[[i]]$naive2coef + 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CI2naive <- rbind(CI2naive,ifelse((truebeta[4:6]<CIUBnaive2) & (truebeta[4:6]>CILBnaive2),1,0))
CILB <- betahat0 - 1.96 *(sd0)
CIUB <- betahat0 + 1.96 *(sd0)
CIs <- rbind(CIs,ifelse((truebeta<as.vector(CIUB)) & (truebeta>as.vector(CILB)),1,0))
}
biasnaive1 <- colMeans(naive1coef,na.rm=T)-truebeta[1:3]
biasnaive2 <- colMeans(naive2coef,na.rm=T)-truebeta[4:6]
naive_esd <- apply(cbind(naive1coef,naive2coef), MARGIN = 2 , FUN=sd, na.rm=T)
sdnaive1 <- colMeans(naive1sd,na.rm=T)
sdnaive2 <- colMeans(naive2sd,na.rm=T)
CInaive1 <- colMeans(CI1naive,na.rm=T)
CInaive2 <- colMeans(CI2naive,na.rm=T)
naivebias <- c(biasnaive1,biasnaive2,rep(0,6))
naive_esd <- c(naive_esd,rep(0,6))
naivesd <- c(sdnaive1,sdnaive2,rep(0,6))
naiveCI <- c(CInaive1,CInaive2,rep(0,6))
bias_measonly <- colMeans(na.omit(measonlycoef),na.rm = T) - truebeta
sd_emp_measonly <- apply(na.omit(measonlycoef),MARGIN = 2, FUN = sd)
sd_mod_measonly <- colMeans(na.omit(measonlysd),na.rm = T)
CI_measonly <- colMeans(na.omit(CImeasonly),na.rm = T)
bias_misconly <- colMeans(na.omit(misconlycoef),na.rm = T) - truebeta
sd_emp_misconly <- apply(na.omit(misconlycoef),MARGIN = 2, FUN = sd)
sd_mod_misconly <- colMeans(na.omit(misconlysd),na.rm = T)
CI_misconly <- colMeans(na.omit(CImisconly),na.rm = T)
bias1 <- colMeans(na.omit(betas),na.rm = T) - truebeta
sd_emp <- apply(na.omit(betas),MARGIN = 2, FUN = sd)
sd_mod <- colMeans(na.omit(sds),na.rm = T)
CIrate <- colMeans(na.omit(CIs),na.rm = T)
Results0 <- data.frame(nsample = nsample[k],
naivebias=round(naivebias,3),naive_esd=round(naive_esd,3),naivesd=round(naivesd,3),naiveCI=percent(round(naiveCI,3)),
biasmeas=round(bias_measonly,3),esdmeas=round(sd_emp_measonly,3),sdmeas=round(sd_mod_measonly,3),CImeas=percent(round(CI_measonly,3)),
biasmisc=round(bias_misconly,3),esdmisc=round(sd_emp_misconly,3),sdmisc=round(sd_mod_misconly,3),CImisc=percent(round(CI_misconly,3)),
biasprop=round(bias1,3),propose_esd=round(sd_emp,3),sdpropose=round(sd_mod,3),CI_propose=percent(round(CIrate,3)))
Results <- rbind(Results,Results0)
}
save(Results,file="SSEV_RTable1.RData")
library(xtable)
xtable(Results,digits = 3)
### 4.2 Simulation 2: under different degree of misclassification rates ####
results_2 <- lapply(c(1/3,1/2,2/3), FUN= function(x){
results_x <- lapply(1:1000, FUN = INS_int,
nsample = 1500, nvalidation = round(x*1500))
return(results_x)
})
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,0.005,0)
alpha_range <- c(-4.595,-2.197,-1.386)
nrate <- round(c(1/3,1/2,2/3),3)
save(results_2,file="SSEV_R2.RData")
Results <- NULL
for (k in 1:3) {
results <- results_2[[k]]
truebeta <- c(0.7,1.5,-1,0.7,-1.5,1,1,0,0.8,0.1,-2.197,-2.197)
naive1coef <- NULL
naive1sd <- NULL
naive2coef <- NULL
naive2sd <- NULL
CI1naive <- NULL
CI2naive <- NULL
measonlycoef <- NULL
measonlysd <- NULL
CImeasonly <- NULL
misconlycoef <- NULL
misconlysd <- NULL
CImisconly <- NULL
betas <- NULL
sds <- NULL
CIs <- NULL
for (i in 1:1000){
if (is.null(results[[i]])) {
next}
naive1coef <- rbind(naive1coef, results[[i]]$naive1coef)
naive1sd <- rbind(naive1sd,sqrt(diag( results[[i]]$naive1vcov)))
naive2coef <- rbind(naive2coef, results[[i]]$naive2coef)
naive2sd <- rbind(naive2sd,sqrt(diag( results[[i]]$naive2vcov)))
if ((!is.null(results[[i]]$betameasonly)) & (!is.null(results[[i]]$sdmeasonly))) {
measonlycoef <- rbind(measonlycoef,as.vector(results[[i]]$betameasonly))
measonlysd_i <- results[[i]]$sdmeasonly
measonlysd_i <- ifelse(abs(measonlysd_i)<10,measonlysd_i,NA)
measonlysd <- rbind(measonlysd,measonlysd_i)
CILBmeasonly <- results[[i]]$betameasonly - 1.96 *(measonlysd_i)
CIUBmeasonly <- results[[i]]$betameasonly + 1.96 *(measonlysd_i)
CImeasonly <- rbind(CImeasonly,ifelse((truebeta<as.vector(CIUBmeasonly)) & (truebeta>as.vector(CILBmeasonly)),1,0))
}
if ((!is.null(results[[i]]$betamisconly)) & (!is.null(results[[i]]$sdmisconly))) {
misconlycoef <- rbind(misconlycoef,as.vector(results[[i]]$betamisconly))
misconlysd_i <- (results[[i]]$sdmisconly)
misconlysd_i <- ifelse(abs(misconlysd_i)<10,misconlysd_i,NA)
misconlysd <- rbind(misconlysd, misconlysd_i)
CILBmisconly <- results[[i]]$betamisconly - 1.96 *(misconlysd_i)
CIUBmisconly <- results[[i]]$betamisconly + 1.96 *(misconlysd_i)
CImisconly <- rbind(CImisconly,ifelse((truebeta<as.vector(CIUBmisconly)) & (truebeta>as.vector(CILBmisconly)),1,0))
}
betahat0 <- results[[i]]$betahat
sd0 <- results[[i]]$sd
sd0 <- ifelse(abs(sd0)<10,sd0,NA)
betas <- rbind(betas, betahat0)
sds <- rbind(sds, sd0)
CILBnaive1 <- results[[i]]$naive1coef - 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CIUBnaive1 <- results[[i]]$naive1coef + 1.96 *(sqrt(diag(results[[i]]$naive1vcov)))
CI1naive <- rbind(CI1naive,ifelse((truebeta[1:3]<CIUBnaive1) & (truebeta[1:3]>CILBnaive1),1,0))
CILBnaive2 <- results[[i]]$naive2coef - 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CIUBnaive2 <- results[[i]]$naive2coef + 1.96 *(sqrt(diag(results[[i]]$naive2vcov)))
CI2naive <- rbind(CI2naive,ifelse((truebeta[4:6]<CIUBnaive2) & (truebeta[4:6]>CILBnaive2),1,0))
CILB <- betahat0 - 1.96 *(sd0)
CIUB <- betahat0 + 1.96 *(sd0)
CIs <- rbind(CIs,ifelse((truebeta<as.vector(CIUB)) & (truebeta>as.vector(CILB)),1,0))
}
biasnaive1 <- colMeans(naive1coef,na.rm=T)-truebeta[1:3]
biasnaive2 <- colMeans(naive2coef,na.rm=T)-truebeta[4:6]
naive_esd <- apply(cbind(naive1coef,naive2coef), MARGIN = 2 , FUN=sd, na.rm=T)
sdnaive1 <- colMeans(naive1sd,na.rm=T)
sdnaive2 <- colMeans(naive2sd,na.rm=T)
CInaive1 <- colMeans(CI1naive,na.rm=T)
CInaive2 <- colMeans(CI2naive,na.rm=T)
naivebias <- c(biasnaive1,biasnaive2,rep(0,6))
naive_esd <- c(naive_esd,rep(0,6))
naivesd <- c(sdnaive1,sdnaive2,rep(0,6))
naiveCI <- c(CInaive1,CInaive2,rep(0,6))
bias_measonly <- colMeans(na.omit(measonlycoef),na.rm = T) - truebeta
sd_emp_measonly <- apply(na.omit(measonlycoef),MARGIN = 2, FUN = sd)
sd_mod_measonly <- colMeans(na.omit(measonlysd),na.rm = T)
CI_measonly <- colMeans(na.omit(CImeasonly),na.rm = T)
bias_misconly <- colMeans(na.omit(misconlycoef),na.rm = T) - truebeta
sd_emp_misconly <- apply(na.omit(misconlycoef),MARGIN = 2, FUN = sd)
sd_mod_misconly <- colMeans(na.omit(misconlysd),na.rm = T)
CI_misconly <- colMeans(na.omit(CImisconly),na.rm = T)
bias1 <- colMeans(na.omit(betas),na.rm = T) - truebeta
sd_emp <- apply(na.omit(betas),MARGIN = 2, FUN = sd)
sd_mod <- colMeans(na.omit(sds),na.rm = T)
CIrate <- colMeans(na.omit(CIs),na.rm = T)
Results0 <- data.frame(nrate = nrate[k],
naivebias=round(naivebias,3),naive_esd=round(naive_esd,3),naivesd=round(naivesd,3),naiveCI=percent(round(naiveCI,3)),
biasmeas=round(bias_measonly,3),esdmeas=round(sd_emp_measonly,3),sdmeas=round(sd_mod_measonly,3),CImeas=percent(round(CI_measonly,3)),
biasmisc=round(bias_misconly,3),esdmisc=round(sd_emp_misconly,3),sdmisc=round(sd_mod_misconly,3),CImisc=percent(round(CI_misconly,3)),
biasprop=round(bias1,3),propose_esd=round(sd_emp,3),sdpropose=round(sd_mod,3),CI_propose=percent(round(CIrate,3)))
Results <- rbind(Results,Results0)
}
save(Results,file="SSEV_RTable2.RData")
library(xtable)
xtable(Results,digits = 3) |
# Katherine Raney
# Coursera Exploratory Data Analysis
# December 2014
# Course Project 2, Plot 3
# setwd("C:\\raneykat_git\\ExploratoryDataAnalysis\\ExploratoryAnalysis\\CourseProject")
#get the data file we need and unzip it
library(downloader)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(url,dest="dataset.zip", mode = "wb")
unzip ("dataset.zip",exdir = ".")
# read in the datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have seen
# decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this question.
# plot emissions by year and type for the given city - GIVEN CITY FOR BOTH QUESTIONS?
library(dplyr)
nei2 <- NEI %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
mutate(TotalEmissions=sum(Emissions)) %>%
select(year, type, TotalEmissions)
library(ggplot2)
# send plot to a png file
png(file="plot3.png",bg="transparent",width = 800, height = 800)
qplot(year, TotalEmissions, data = nei2, facets =.~ type,ylab="Total PM2.5 Emissions", xlab="Year"
,main="Baltimore City PM2.5 Emissions by Type",geom="line")
dev.off()
| /CourseProjectv2/plot3.R | no_license | raneykat/ExploratoryAnalysis | R | false | false | 1,399 | r | # Katherine Raney
# Coursera Exploratory Data Analysis
# December 2014
# Course Project 2, Plot 3
# setwd("C:\\raneykat_git\\ExploratoryDataAnalysis\\ExploratoryAnalysis\\CourseProject")
#get the data file we need and unzip it
library(downloader)
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download(url,dest="dataset.zip", mode = "wb")
unzip ("dataset.zip",exdir = ".")
# read in the datasets
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Of the four types of sources indicated by the type (point, nonpoint,
# onroad, nonroad) variable, which of these four sources have seen
# decreases in emissions from 1999–2008 for Baltimore City?
# Which have seen increases in emissions from 1999–2008?
# Use the ggplot2 plotting system to make a plot answer this question.
# plot emissions by year and type for the given city - GIVEN CITY FOR BOTH QUESTIONS?
library(dplyr)
nei2 <- NEI %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
mutate(TotalEmissions=sum(Emissions)) %>%
select(year, type, TotalEmissions)
library(ggplot2)
# send plot to a png file
png(file="plot3.png",bg="transparent",width = 800, height = 800)
qplot(year, TotalEmissions, data = nei2, facets =.~ type,ylab="Total PM2.5 Emissions", xlab="Year"
,main="Baltimore City PM2.5 Emissions by Type",geom="line")
dev.off()
|
library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$Q1 <- renderPlot({
print(ggplot(bills_month_wide[bills_month_wide$Year == input$year,], aes(x=Month,y=Bill ,fill=SubID)) + geom_bar(stat="identity"))
})
})
| /CASH_Q01/server.R | no_license | thinkcache/Financial_Analysis | R | false | false | 536 | r | library(shiny)
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
# Expression that generates a histogram. The expression is
# wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$Q1 <- renderPlot({
print(ggplot(bills_month_wide[bills_month_wide$Year == input$year,], aes(x=Month,y=Bill ,fill=SubID)) + geom_bar(stat="identity"))
})
})
|
# ML-Prediction-Assignment-Writeup
pml_training<-read.csv("C:/Ebooks/R/coursera/Machine learning/Raw data/pml-training.csv")
pml_testing<-read.csv("C:/Ebooks/R/coursera/Machine learning/Raw data/pml-testing.csv")
library(caret)
library(kernlab)
intrain<-createDataPartition(pml_training$classe,p=.75,list=FALSE)
training<-pml_training[intrain,]
test<-pml_training[-intrain,]
############Data cleaning#############
###remove column x which is the just the index###
train_cln1<-training[,-1]
###Removing variables with more than 60% NAs####
remove_var<-rep(NA,1)
temp <-vector('character')
for (i in 1:length(train_cln1))
{
if (sum(is.na(train_cln1[i]))/nrow(train_cln1[i]) >=.6) temp<-colnames(train_cln1[i])
if (length(temp)==1) remove_var<-unique(rbind(remove_var,temp))
}
remove_var <-as.vector(remove_var)
varNA <-names(train_cln1) %in% remove_var
train_cln2 <-train_cln1[!varNA]
####remove Near Zero Variance variables##########
dataNZV <- nearZeroVar(train_cln2, saveMetrics=TRUE)
NZVvar <-as.vector(row.names(dataNZV[dataNZV$nzv=="TRUE",]))
NZVvar_bin <-names(train_cln2) %in% NZVvar
train_cln3 <-train_cln2[!NZVvar_bin]
#########cleaning both validation and test data sets########
cln1 <-names(train_cln3)
cln2 <-names(train_cln3[-58])
test <-test[cln1]
pml_testing1 <-pml_testing[cln2]
levels(pml_testing1$cvtd_timestamp)<-levels(train_cln3$cvtd_timestamp)
##############Prediction using Decision Tree############
dtree_fit<-rpart(classe~.,method="class",data=train_cln3)
library(rattle)
fancyRpartPlot(dtree_fit)
dtree_predict<-predict(dtree_fit,test,type="class")
confusionMatrix(dtree_predict,test$classe)
###########Predictino using random forest###########
rf_fit<-randomForest(classe~.,data=train_cln3)
rf_predict<-predict(rf_fit,test,type="class")
confusionMatrix(rf_predict,test$classe)
#######since random forest method gives better accuracy using that on test data####
rf_predict1<-predict(rf_fit,pml_testing1,type="class")
#####function to write output files###
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
getwd()
pml_write_files(rf_predict1)
| /R_markdown.R | no_license | desikazone/ML-Prediction-Assignment-Writeup | R | false | false | 2,307 | r | # ML-Prediction-Assignment-Writeup
pml_training<-read.csv("C:/Ebooks/R/coursera/Machine learning/Raw data/pml-training.csv")
pml_testing<-read.csv("C:/Ebooks/R/coursera/Machine learning/Raw data/pml-testing.csv")
library(caret)
library(kernlab)
intrain<-createDataPartition(pml_training$classe,p=.75,list=FALSE)
training<-pml_training[intrain,]
test<-pml_training[-intrain,]
############Data cleaning#############
###remove column x which is the just the index###
train_cln1<-training[,-1]
###Removing variables with more than 60% NAs####
remove_var<-rep(NA,1)
temp <-vector('character')
for (i in 1:length(train_cln1))
{
if (sum(is.na(train_cln1[i]))/nrow(train_cln1[i]) >=.6) temp<-colnames(train_cln1[i])
if (length(temp)==1) remove_var<-unique(rbind(remove_var,temp))
}
remove_var <-as.vector(remove_var)
varNA <-names(train_cln1) %in% remove_var
train_cln2 <-train_cln1[!varNA]
####remove Near Zero Variance variables##########
dataNZV <- nearZeroVar(train_cln2, saveMetrics=TRUE)
NZVvar <-as.vector(row.names(dataNZV[dataNZV$nzv=="TRUE",]))
NZVvar_bin <-names(train_cln2) %in% NZVvar
train_cln3 <-train_cln2[!NZVvar_bin]
#########cleaning both validation and test data sets########
cln1 <-names(train_cln3)
cln2 <-names(train_cln3[-58])
test <-test[cln1]
pml_testing1 <-pml_testing[cln2]
levels(pml_testing1$cvtd_timestamp)<-levels(train_cln3$cvtd_timestamp)
##############Prediction using Decision Tree############
dtree_fit<-rpart(classe~.,method="class",data=train_cln3)
library(rattle)
fancyRpartPlot(dtree_fit)
dtree_predict<-predict(dtree_fit,test,type="class")
confusionMatrix(dtree_predict,test$classe)
###########Predictino using random forest###########
rf_fit<-randomForest(classe~.,data=train_cln3)
rf_predict<-predict(rf_fit,test,type="class")
confusionMatrix(rf_predict,test$classe)
#######since random forest method gives better accuracy using that on test data####
rf_predict1<-predict(rf_fit,pml_testing1,type="class")
#####function to write output files###
pml_write_files = function(x){
n = length(x)
for(i in 1:n){
filename = paste0("problem_id_",i,".txt")
write.table(x[i],file=filename,quote=FALSE,row.names=FALSE,col.names=FALSE)
}
}
getwd()
pml_write_files(rf_predict1)
|
# Data summary, visualization and GIS mapping of Chlamydotheca species from India
#libraries used
library(vegan)
library(tidyverse)
library(readxl)
library(magrittr)
library(leaflet.providers)
# Data file path (It is assumed that the sample data is saved in the working directory and saved as an excel file)
data_path<-"C:/Users/samee/Downloads/Chlamydotheca/chlamydotheca.xlsx"
# Chlamydotheca data
chlamydo<-read_excel(data_path,
sheet=2)
head(chlamydo,3)
# Data summary of the dataset
library(psych)
data_summ_chlamydo<-psych::describe(chlamydo%>%select_if(is.numeric),
na.rm = T)
View(data_summ_chlamydo)
# Converting from wide to long form
chlamydo_long<-chlamydo%>%
dplyr::select(pH,
temp,
sal,
total_sp_community)%>%
dplyr::rename('Temperature'='temp',
'Salinity'='sal',
'Total_species'='total_sp_community')%>%
tidyr::gather(env_var,values,
pH:Temperature,
Salinity,
Total_species)
# plot for env var
chlamydo_long%>%
ggplot(aes(x=env_var,
y=values))+
geom_boxplot(col='black',
fill='forestgreen',
lwd=1)+
theme_bw(base_size = 19)+
facet_wrap(~env_var,
scales = 'free')+
labs(x="Environmental variables",
y="Value")
# Local distribution of Chlamydotheca using leaflet
require(leaflet)
# getting the data from the main dataset
locality_data<-chlamydo%>%
dplyr::select(long,
lat,
Locality,
type,
Alt,
veg,
temp,
cond,
sal)
# Color palette for the map
pal <- colorFactor(
palette = c('steelblue',
'forestgreen',
'black',
'purple',
'orange',
'grey60'),
domain = locality_data$Locality)
# Map
chlamydo_map<-leaflet(locality_data) %>%
addProviderTiles("Stamen.Terrain") %>%
addCircleMarkers(
color = ~pal(Locality),
opacity = 1,
stroke = TRUE,
lng = ~long,
lat = ~lat,
label = ~as.character(Locality),
radius = 4)
# Adding scale bar
addScaleBar(chlamydo_map,
position = 'topright')%>%
addProviderTiles(providers$Esri.WorldStreetMap) %>%
addMiniMap(
tiles = providers$Esri.WorldStreetMap,
toggleDisplay = TRUE)
| /ongoing_studies/Chlamydotheca faunistics.R | no_license | sameerpadhye/Personal-projects | R | false | false | 2,612 | r | # Data summary, visualization and GIS mapping of Chlamydotheca species from India
#libraries used
library(vegan)
library(tidyverse)
library(readxl)
library(magrittr)
library(leaflet.providers)
# Data file path (It is assumed that the sample data is saved in the working directory and saved as an excel file)
data_path<-"C:/Users/samee/Downloads/Chlamydotheca/chlamydotheca.xlsx"
# Chlamydotheca data
chlamydo<-read_excel(data_path,
sheet=2)
head(chlamydo,3)
# Data summary of the dataset
library(psych)
data_summ_chlamydo<-psych::describe(chlamydo%>%select_if(is.numeric),
na.rm = T)
View(data_summ_chlamydo)
# Converting from wide to long form
chlamydo_long<-chlamydo%>%
dplyr::select(pH,
temp,
sal,
total_sp_community)%>%
dplyr::rename('Temperature'='temp',
'Salinity'='sal',
'Total_species'='total_sp_community')%>%
tidyr::gather(env_var,values,
pH:Temperature,
Salinity,
Total_species)
# plot for env var
chlamydo_long%>%
ggplot(aes(x=env_var,
y=values))+
geom_boxplot(col='black',
fill='forestgreen',
lwd=1)+
theme_bw(base_size = 19)+
facet_wrap(~env_var,
scales = 'free')+
labs(x="Environmental variables",
y="Value")
# Local distribution of Chlamydotheca using leaflet
require(leaflet)
# getting the data from the main dataset
locality_data<-chlamydo%>%
dplyr::select(long,
lat,
Locality,
type,
Alt,
veg,
temp,
cond,
sal)
# Color palette for the map
pal <- colorFactor(
palette = c('steelblue',
'forestgreen',
'black',
'purple',
'orange',
'grey60'),
domain = locality_data$Locality)
# Map
chlamydo_map<-leaflet(locality_data) %>%
addProviderTiles("Stamen.Terrain") %>%
addCircleMarkers(
color = ~pal(Locality),
opacity = 1,
stroke = TRUE,
lng = ~long,
lat = ~lat,
label = ~as.character(Locality),
radius = 4)
# Adding scale bar
addScaleBar(chlamydo_map,
position = 'topright')%>%
addProviderTiles(providers$Esri.WorldStreetMap) %>%
addMiniMap(
tiles = providers$Esri.WorldStreetMap,
toggleDisplay = TRUE)
|
#' @title Plot of object mfa
#' @description Plots factor score, partial factor score and partial loadings
#' @param mfa an object of class \code{"mfa"}
#' @param type indicating which plot to output. 1: factor score plot; 2:factor score and partial loadings; 3: produce factor score and partial loadings on all components; 4: partial factor score; 5: partial loadings
#' @param d1 indicating the x-axis
#' @param d2 indicating the y-axis
#' @param X indicating the target table to produce a plot
#' @param loading_labels the labels for partial loadings
#' @param cex parameter for controlling the size of title, lengends, labels, and points
#' @param \dots arguments to be passed to/from other methods
#' @examples
#'
#' wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
#' sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
#' scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
#' mymfa <- mfa(wines, sets, ncomps = 2, T, scaling_vec)
#'
#' plot(mymfa, type = 1)
#' plot(mymfa, type = 2, X = 1)
#' plot(mymfa, type = 2, X = 1, loading_labels = c("Cat Pee", "Passion Fruit", "Green Pepper", "Mineral", "Smoky", "Citrus"))
#' plot(mymfa, type = 3, loading_labels = NULL)
#' plot(mymfa, type = 4, X = 1)
#' plot(mymfa, type = 5, X = 1)
#'
#' @export
plot.mfa <- function(mfa, type, d1 = 1, d2 = 2, X = 1, loading_labels = NULL, cex = 1, ...) {
if (type != 1 & type != 2 & type != 3 & type != 4 & type != 5) {
stop("invalid type input: pecify from 1, 2, 3, 4 or 5")
}
else {
#type = 1, plot_factor_scores
if (type == 1) {
plot_factor_scores(mymfa = mfa, d1 = d1, d2 = d2, cex = cex)
}
#type = 2, plot_pfs_vl
else if (type ==2 ) {
plot_pfs_vl(mymfa = mfa, X=X, d1 = d1, d2 = d2, loading_labels = loading_labels, cex = cex)
}
#type = 4, plot_pfs
else if (type ==4 ) {
plot_pfs(mymfa = mfa, X=X, d1 = d1, d2 = d2, cex = cex)
}
#type = 5, plot_vl
else if (type ==5 ) {
plot_vl(mymfa = mfa, X=X, d1 = d1, d2 = d2, loading_labels = loading_labels , cex = cex)
}
#type = 3, plot_pfs_vl_all
else {
plot_pfs_vl_all(mymfa=mfa, d1 = d1, d2 = d2, loading_labels = loading_labels, cex = cex)
}
}
}
#all below are auxiliary functions for plot.mfa() method
#plot for factor scores
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
plot_factor_scores <- function(mymfa, d1 = 1, d2 = 2, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: common factor score does not have enough dimension")
}
else {
#generated random color for each data point
data <- data.frame(x = mymfa$common_factor_scores, objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA, main = "Factor Scores", cex = cex, cex.main = cex*1.5)
legend("bottomleft", cex=0.7 *cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for partial factor scores
plot_pfs <- function (mymfa, X=1, d1 = 1, d2 = 2, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: partial factor score does not have enough dimension")
}
else {
data <- data.frame(mymfa$partial_factor_scores[[X]],objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main ="Partial Factor Scores" , cex = cex, cex.main = 1.5 * cex)
legend("bottomleft", cex= 0.7 * cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for variable loadings
plot_vl <- function (mymfa, X=1, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: variable loading do not have enough dimension")
}
else {
loadingdata <- data.frame(mymfa$partial_loadings[[X]] )
#talked to Gaston, he said the rescale in the paper has some issues
#Gaston confirmed that we can rescale using the factors as we want
rescaled_loadings <- data.frame(x = 0.8*loadingdata[,d1]/sd(loadingdata[,d1]), y = 0.4*loadingdata[,d2]/sd(loadingdata[,d2]))
if(is.null(loading_labels)){
v <- NULL
for (i in 1:nrow(loadingdata)){
v <- c(v, paste("loading",i,sep = ""))
}
}else{
v <- loading_labels
}
data <- data.frame(rescaled_loadings, objects = v, cl = rainbow(length(v)))
margin <- max(max(abs(data[,1])),max(abs(data[,2])))
plot(data[, 1], data[, 2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main = "Variable Loadings" , cex = cex, cex.main = 1.5 * cex)
if (!is.null(loading_labels)) {
#decided to let user input data labels themselves since it is not included in csv file
text(rescaled_loadings[,d1], rescaled_loadings[,d2], labels=loading_labels[1:nrow(rescaled_loadings)], cex= 0.7*cex, pos=4)
}
else {
legend("bottomleft", cex= 0.7 * cex,legend = data$objects ,col=data$cl ,pch=16)
}
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for partial factor scores and variable loadings
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
#X: which data table you would like to plot for
#loading_labels: the label that user could input themselves
plot_pfs_vl <- function (mymfa, X=1, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: factor score and loadings do not have enough dimension")
}
else {
data <- data.frame(mymfa$partial_factor_scores[[X]],objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
loadingdata <- data.frame(mymfa$partial_loadings[[X]] )
#talked to Gaston, he said the rescale in the paper has some issues
#Gaston confirmed that we can rescale using the factors as we want
rescaled_loadings <- data.frame(x = 0.8*loadingdata[,d1]/sd(loadingdata[,d1]), y = 0.4*loadingdata[,d2]/sd(loadingdata[,d2]))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])), max(abs(rescaled_loadings[,d1])), max(abs(rescaled_loadings[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main = "Partial Factor Score \nand Variable Loadings" , cex = cex, cex.main = 1 * cex)
legend("bottomleft", cex= 0.7 * cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
points(rescaled_loadings[,d1], rescaled_loadings[,d2], col = "grey", pch=12, cex = cex)
if (!is.null(loading_labels)) {
#decided to let user input data labels themselves since it is not included in csv file
text(rescaled_loadings[,d1], rescaled_loadings[,d2], labels=loading_labels[1:nrow(rescaled_loadings)], cex= 0.7*cex, pos=4)
}
}
}
#printing all 10 plots
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
#loading_labels: the label that user could input themselves
plot_pfs_vl_all <- function (mymfa, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
total <- length(mymfa$partial_factor_scores)
if (is.null(loading_labels)) {
#the reason I didn't output all plots into one page
#is because that we are developing a package that could do MFA for any eligible data
#while plotting for other eligible data, we are not sure totally how many plots it needs to produce
#in this case, producing all plots on one single sheet is very risky
#since we are not sure how many plots we totally could have
#I talked to Gaston and he agreed with me that we'd better not produce all plots on one page
#thus I am plotting them out one by one in new windows
for (i in 1:total) {
#dev.new()
plot_pfs_vl(mymfa, i, d1, d2, loading_labels, cex = cex)
}
}
else {
for (i in 1:total) {
#dev.new()
plot_pfs_vl(mymfa, i, d1, d2, loading_labels = loading_labels[,i], cex = cex)
}
}
}
| /plot.mfa.R | no_license | BeaGir/JLNXB_243 | R | false | false | 10,313 | r | #' @title Plot of object mfa
#' @description Plots factor score, partial factor score and partial loadings
#' @param mfa an object of class \code{"mfa"}
#' @param type indicating which plot to output. 1: factor score plot; 2:factor score and partial loadings; 3: produce factor score and partial loadings on all components; 4: partial factor score; 5: partial loadings
#' @param d1 indicating the x-axis
#' @param d2 indicating the y-axis
#' @param X indicating the target table to produce a plot
#' @param loading_labels the labels for partial loadings
#' @param cex parameter for controlling the size of title, lengends, labels, and points
#' @param \dots arguments to be passed to/from other methods
#' @examples
#'
#' wines <- read.csv("https://raw.githubusercontent.com/ucb-stat243/stat243-fall-2016/master/problem-sets/final-project/data/wines.csv", stringsAsFactors = FALSE)
#' sets <- list(2:7, 8:13, 14:19, 20:24, 25:30, 31:35, 36:39, 40:45, 46:50, 51:54)
#' scaling_vec <- apply(subset(wines, select = unlist(sets)), 2, function(x) sqrt(sum((x - mean(x))^2)))
#' mymfa <- mfa(wines, sets, ncomps = 2, T, scaling_vec)
#'
#' plot(mymfa, type = 1)
#' plot(mymfa, type = 2, X = 1)
#' plot(mymfa, type = 2, X = 1, loading_labels = c("Cat Pee", "Passion Fruit", "Green Pepper", "Mineral", "Smoky", "Citrus"))
#' plot(mymfa, type = 3, loading_labels = NULL)
#' plot(mymfa, type = 4, X = 1)
#' plot(mymfa, type = 5, X = 1)
#'
#' @export
plot.mfa <- function(mfa, type, d1 = 1, d2 = 2, X = 1, loading_labels = NULL, cex = 1, ...) {
if (type != 1 & type != 2 & type != 3 & type != 4 & type != 5) {
stop("invalid type input: pecify from 1, 2, 3, 4 or 5")
}
else {
#type = 1, plot_factor_scores
if (type == 1) {
plot_factor_scores(mymfa = mfa, d1 = d1, d2 = d2, cex = cex)
}
#type = 2, plot_pfs_vl
else if (type ==2 ) {
plot_pfs_vl(mymfa = mfa, X=X, d1 = d1, d2 = d2, loading_labels = loading_labels, cex = cex)
}
#type = 4, plot_pfs
else if (type ==4 ) {
plot_pfs(mymfa = mfa, X=X, d1 = d1, d2 = d2, cex = cex)
}
#type = 5, plot_vl
else if (type ==5 ) {
plot_vl(mymfa = mfa, X=X, d1 = d1, d2 = d2, loading_labels = loading_labels , cex = cex)
}
#type = 3, plot_pfs_vl_all
else {
plot_pfs_vl_all(mymfa=mfa, d1 = d1, d2 = d2, loading_labels = loading_labels, cex = cex)
}
}
}
#all below are auxiliary functions for plot.mfa() method
#plot for factor scores
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
plot_factor_scores <- function(mymfa, d1 = 1, d2 = 2, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: common factor score does not have enough dimension")
}
else {
#generated random color for each data point
data <- data.frame(x = mymfa$common_factor_scores, objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA, main = "Factor Scores", cex = cex, cex.main = cex*1.5)
legend("bottomleft", cex=0.7 *cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for partial factor scores
plot_pfs <- function (mymfa, X=1, d1 = 1, d2 = 2, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: partial factor score does not have enough dimension")
}
else {
data <- data.frame(mymfa$partial_factor_scores[[X]],objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main ="Partial Factor Scores" , cex = cex, cex.main = 1.5 * cex)
legend("bottomleft", cex= 0.7 * cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for variable loadings
plot_vl <- function (mymfa, X=1, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: variable loading do not have enough dimension")
}
else {
loadingdata <- data.frame(mymfa$partial_loadings[[X]] )
#talked to Gaston, he said the rescale in the paper has some issues
#Gaston confirmed that we can rescale using the factors as we want
rescaled_loadings <- data.frame(x = 0.8*loadingdata[,d1]/sd(loadingdata[,d1]), y = 0.4*loadingdata[,d2]/sd(loadingdata[,d2]))
if(is.null(loading_labels)){
v <- NULL
for (i in 1:nrow(loadingdata)){
v <- c(v, paste("loading",i,sep = ""))
}
}else{
v <- loading_labels
}
data <- data.frame(rescaled_loadings, objects = v, cl = rainbow(length(v)))
margin <- max(max(abs(data[,1])),max(abs(data[,2])))
plot(data[, 1], data[, 2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main = "Variable Loadings" , cex = cex, cex.main = 1.5 * cex)
if (!is.null(loading_labels)) {
#decided to let user input data labels themselves since it is not included in csv file
text(rescaled_loadings[,d1], rescaled_loadings[,d2], labels=loading_labels[1:nrow(rescaled_loadings)], cex= 0.7*cex, pos=4)
}
else {
legend("bottomleft", cex= 0.7 * cex,legend = data$objects ,col=data$cl ,pch=16)
}
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
}
}
#plot for partial factor scores and variable loadings
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
#X: which data table you would like to plot for
#loading_labels: the label that user could input themselves
plot_pfs_vl <- function (mymfa, X=1, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
if (ncol(mymfa$common_factor_scores) < max(d1, d2)) {
stop("invalid dimension input: factor score and loadings do not have enough dimension")
}
else {
data <- data.frame(mymfa$partial_factor_scores[[X]],objects = mymfa$observation_names, cl = rainbow(nrow(mymfa$common_factor_scores)))
loadingdata <- data.frame(mymfa$partial_loadings[[X]] )
#talked to Gaston, he said the rescale in the paper has some issues
#Gaston confirmed that we can rescale using the factors as we want
rescaled_loadings <- data.frame(x = 0.8*loadingdata[,d1]/sd(loadingdata[,d1]), y = 0.4*loadingdata[,d2]/sd(loadingdata[,d2]))
margin <- max(max(abs(data[,d1])),max(abs(data[,d2])), max(abs(rescaled_loadings[,d1])), max(abs(rescaled_loadings[,d2])))
plot(data[, d1], data[, d2], col= data$cl, pch=16, axes = FALSE,
panel.first = grid(),
xlim = c(-1*margin-0.5,margin+0.5),
ylim = c(-1*margin-0.5,margin+0.5),
xlab = NA,
ylab = NA,
main = "Partial Factor Score \nand Variable Loadings" , cex = cex, cex.main = 1 * cex)
legend("bottomleft", cex= 0.7 * cex ,legend = data$objects ,col=data$cl ,pch=16)
arrows(x0 = -1*margin-0.2, y0 = 0, x1 = margin+0.2, y1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
arrows(y0 = -1*margin-0.2, x0 = 0, y1 = margin+0.2, x1 = 0, length=0.05,angle=20,
code = 2, lwd = 2)
d1_string <- paste(d1)
d2_string <- paste(d2)
text(0, margin+0.3, d2_string, cex= cex)
text(margin+0.3, 0, d1_string, cex= cex)
points(rescaled_loadings[,d1], rescaled_loadings[,d2], col = "grey", pch=12, cex = cex)
if (!is.null(loading_labels)) {
#decided to let user input data labels themselves since it is not included in csv file
text(rescaled_loadings[,d1], rescaled_loadings[,d2], labels=loading_labels[1:nrow(rescaled_loadings)], cex= 0.7*cex, pos=4)
}
}
}
#printing all 10 plots
#mymfa: the mfa object
#d1: horizontal axes
#d2: vertical axes
#loading_labels: the label that user could input themselves
plot_pfs_vl_all <- function (mymfa, d1 = 1, d2 = 2, loading_labels = NULL, cex = 1) {
total <- length(mymfa$partial_factor_scores)
if (is.null(loading_labels)) {
#the reason I didn't output all plots into one page
#is because that we are developing a package that could do MFA for any eligible data
#while plotting for other eligible data, we are not sure totally how many plots it needs to produce
#in this case, producing all plots on one single sheet is very risky
#since we are not sure how many plots we totally could have
#I talked to Gaston and he agreed with me that we'd better not produce all plots on one page
#thus I am plotting them out one by one in new windows
for (i in 1:total) {
#dev.new()
plot_pfs_vl(mymfa, i, d1, d2, loading_labels, cex = cex)
}
}
else {
for (i in 1:total) {
#dev.new()
plot_pfs_vl(mymfa, i, d1, d2, loading_labels = loading_labels[,i], cex = cex)
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/out-of-bag-pipes.r
\name{blocked_prequential}
\alias{blocked_prequential}
\title{Prequential Procedure in Blocks}
\usage{
blocked_prequential(x, nfolds, FUN, .rbind = TRUE, ...)
}
\arguments{
\item{x}{data to split into \code{nfolds} blocks;}
\item{nfolds}{number of blocks to split data into;}
\item{FUN}{to apply to train/test;}
\item{.rbind}{logical. If TRUE, the results from
FUN are \strong{rbind}ed;}
\item{...}{further parameters to FUN}
}
\description{
Prequential Procedure in Blocks
}
\seealso{
\code{\link{intraining_estimations}}
function to use as \strong{FUN} parameter.
}
\keyword{internal}
| /man/blocked_prequential.Rd | no_license | vcerqueira/tsensembler | R | false | true | 688 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/out-of-bag-pipes.r
\name{blocked_prequential}
\alias{blocked_prequential}
\title{Prequential Procedure in Blocks}
\usage{
blocked_prequential(x, nfolds, FUN, .rbind = TRUE, ...)
}
\arguments{
\item{x}{data to split into \code{nfolds} blocks;}
\item{nfolds}{number of blocks to split data into;}
\item{FUN}{to apply to train/test;}
\item{.rbind}{logical. If TRUE, the results from
FUN are \strong{rbind}ed;}
\item{...}{further parameters to FUN}
}
\description{
Prequential Procedure in Blocks
}
\seealso{
\code{\link{intraining_estimations}}
function to use as \strong{FUN} parameter.
}
\keyword{internal}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.83028393277323e-232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_beta/AFL_communities_individual_based_sampling_beta/communities_individual_based_sampling_beta_valgrind_files/1615828049-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 361 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 1.22810536108214e+146, 1.83028393277323e-232, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_beta,testlist)
str(result) |
## Fit statistical model to smooth the raw cell level biomass.
## The model fits in two parts - first the proportion of points occupied by trees
## (this is much more important for the taxon-level fitting)
## then the average biomass for occupied points (called potential biomass).
## Estimated biomass is the product of occupancy and potential.
library(dplyr)
library(assertthat)
load(file.path(interim_results_dir, paste0('cell_with_biomass_grid',
ifelse(use_agb, '_agb', ''), '.Rda')))
## Allow for parallelization across taxa, including on Berkeley Statistics cluster with SLURM scheduler
library(doParallel)
if(n_cores == 0) {
if(Sys.getenv("SLURM_JOB_ID") != "") {
n_cores <- Sys.getenv("SLURM_CPUS_PER_TASK")
} else n_cores <- detectCores()
}
registerDoParallel(cores = n_cores)
if(!exists('k_pot_taxon_biomass'))
stop("Must specify 'k_pot_taxon_biomass'")
if(!exists('k_occ_taxon_biomass'))
stop("Must specify 'k_occ_taxon_biomass'")
taxa_to_fit <- taxa
print(taxa_to_fit)
biomass_taxon <- foreach(taxonIdx = seq_along(taxa_to_fit)) %dopar% {
taxon <- taxa_to_fit[taxonIdx]
## add taxon-specific point-level biomass to dataset
tmp <- mw %>% mutate(biomass_focal = calc_biomass_taxon(num_trees, biomass1, biomass2, density_for_biomass, L3s_tree1, L3s_tree2, taxon))
assert_that(sum(is.na(tmp$biomass_focal)) == 0,
msg = paste0("Found missing biomass values for taxon ", taxon))
## add total point-level biomass to dataset
cell_full_taxon <- tmp %>% group_by(cell) %>% summarize(points_total = n())
## biomass stats averaged over occupied points
cell_occ <- tmp %>% filter(biomass_focal > 0) %>%
group_by(cell) %>%
summarize(avg = mean(biomass_focal),
geom_avg = mean(log(biomass_focal)),
points_occ = n())
## should have total number of points, occupied number of points, and biomass stats (for occupied points)
cell_full_taxon <- cell_full_taxon %>% left_join(cell_occ, by = c("cell" = "cell")) %>%
left_join(grid, by = c("cell" = "cell")) %>%
mutate(points_occ = ifelse(is.na(points_occ), 0 , points_occ))
## fit stats model
try(fit(cell_full_taxon, newdata = pred_grid_west, k_occ = k_occ_taxon_biomass, k_pot = k_pot_taxon_biomass, unc = TRUE, return_model = FALSE, type_pot = fit_scale_biomass, num_draws = n_stat_samples, save_draws = TRUE, use_bam = TRUE, bound_draws_low = TRUE))
}
names(biomass_taxon) <- taxa_to_fit
save(biomass_taxon, file = file.path(interim_results_dir, paste0('fitted_taxon_biomass', ifelse(use_agb, '_agb',''), '.Rda')))
| /R/stat_modeling/3_fit_taxon_biomass.R | no_license | ian-shuman/PLS_products | R | false | false | 2,678 | r | ## Fit statistical model to smooth the raw cell level biomass.
## The model fits in two parts - first the proportion of points occupied by trees
## (this is much more important for the taxon-level fitting)
## then the average biomass for occupied points (called potential biomass).
## Estimated biomass is the product of occupancy and potential.
library(dplyr)
library(assertthat)
load(file.path(interim_results_dir, paste0('cell_with_biomass_grid',
ifelse(use_agb, '_agb', ''), '.Rda')))
## Allow for parallelization across taxa, including on Berkeley Statistics cluster with SLURM scheduler
library(doParallel)
if(n_cores == 0) {
if(Sys.getenv("SLURM_JOB_ID") != "") {
n_cores <- Sys.getenv("SLURM_CPUS_PER_TASK")
} else n_cores <- detectCores()
}
registerDoParallel(cores = n_cores)
if(!exists('k_pot_taxon_biomass'))
stop("Must specify 'k_pot_taxon_biomass'")
if(!exists('k_occ_taxon_biomass'))
stop("Must specify 'k_occ_taxon_biomass'")
taxa_to_fit <- taxa
print(taxa_to_fit)
biomass_taxon <- foreach(taxonIdx = seq_along(taxa_to_fit)) %dopar% {
taxon <- taxa_to_fit[taxonIdx]
## add taxon-specific point-level biomass to dataset
tmp <- mw %>% mutate(biomass_focal = calc_biomass_taxon(num_trees, biomass1, biomass2, density_for_biomass, L3s_tree1, L3s_tree2, taxon))
assert_that(sum(is.na(tmp$biomass_focal)) == 0,
msg = paste0("Found missing biomass values for taxon ", taxon))
## add total point-level biomass to dataset
cell_full_taxon <- tmp %>% group_by(cell) %>% summarize(points_total = n())
## biomass stats averaged over occupied points
cell_occ <- tmp %>% filter(biomass_focal > 0) %>%
group_by(cell) %>%
summarize(avg = mean(biomass_focal),
geom_avg = mean(log(biomass_focal)),
points_occ = n())
## should have total number of points, occupied number of points, and biomass stats (for occupied points)
cell_full_taxon <- cell_full_taxon %>% left_join(cell_occ, by = c("cell" = "cell")) %>%
left_join(grid, by = c("cell" = "cell")) %>%
mutate(points_occ = ifelse(is.na(points_occ), 0 , points_occ))
## fit stats model
try(fit(cell_full_taxon, newdata = pred_grid_west, k_occ = k_occ_taxon_biomass, k_pot = k_pot_taxon_biomass, unc = TRUE, return_model = FALSE, type_pot = fit_scale_biomass, num_draws = n_stat_samples, save_draws = TRUE, use_bam = TRUE, bound_draws_low = TRUE))
}
names(biomass_taxon) <- taxa_to_fit
save(biomass_taxon, file = file.path(interim_results_dir, paste0('fitted_taxon_biomass', ifelse(use_agb, '_agb',''), '.Rda')))
|
queue <- read.csv('/home/zeio/ml-in-r/linear-regression/queue.csv', header=T, sep="\t")
print("Mean of the column values from the read queue dataset:")
colMeans(queue[,c("x", "y")])
linear_regression_model <- lm(y ~ x, data=queue)
print("Summary of the fitted linear regression model:")
print(summary(linear_regression_model))
candies=read.csv("/home/zeio/ml-in-r/linear-regression/candies.csv", header=T, sep=",")
filtered_candies <- candies[candies$competitorname != "Haribo Twin Snakes" & candies$competitorname != "Hersheys Krackel",]
multivariate_linear_regression_model = lm(winpercent ~ chocolate + fruity + caramel + peanutyalmondy + nougat + crispedricewafer + hard + bar + pluribus + sugarpercent + pricepercent, data=filtered_candies)
test_existing_candies = candies[candies$competitorname == "Haribo Twin Snakes" | candies$competitorname == "Hersheys Krackel",]
test_new_candies <- data.frame(chocolate=c(0), fruity=c(1), caramel=c(1), peanutyalmondy=c(1), nougat=c(0), crispedricewafer=c(0), hard=c(1), bar=c(0), pluribus=c(0), sugarpercent=c(0.32), pricepercent=c(0.219))
test_candies = rbind(test_existing_candies[,c("chocolate", "fruity", "caramel", "peanutyalmondy", "nougat", "crispedricewafer", "hard", "bar", "pluribus", "sugarpercent", "pricepercent")], test_new_candies)
predictions = predict(multivariate_linear_regression_model, newdata=test_candies)
print("Predictions from the multivariate linear regression model: ")
print(predictions)
| /linear-regression/fit-test.r | no_license | zeionara/ml-in-r | R | false | false | 1,463 | r | queue <- read.csv('/home/zeio/ml-in-r/linear-regression/queue.csv', header=T, sep="\t")
print("Mean of the column values from the read queue dataset:")
colMeans(queue[,c("x", "y")])
linear_regression_model <- lm(y ~ x, data=queue)
print("Summary of the fitted linear regression model:")
print(summary(linear_regression_model))
candies=read.csv("/home/zeio/ml-in-r/linear-regression/candies.csv", header=T, sep=",")
filtered_candies <- candies[candies$competitorname != "Haribo Twin Snakes" & candies$competitorname != "Hersheys Krackel",]
multivariate_linear_regression_model = lm(winpercent ~ chocolate + fruity + caramel + peanutyalmondy + nougat + crispedricewafer + hard + bar + pluribus + sugarpercent + pricepercent, data=filtered_candies)
test_existing_candies = candies[candies$competitorname == "Haribo Twin Snakes" | candies$competitorname == "Hersheys Krackel",]
test_new_candies <- data.frame(chocolate=c(0), fruity=c(1), caramel=c(1), peanutyalmondy=c(1), nougat=c(0), crispedricewafer=c(0), hard=c(1), bar=c(0), pluribus=c(0), sugarpercent=c(0.32), pricepercent=c(0.219))
test_candies = rbind(test_existing_candies[,c("chocolate", "fruity", "caramel", "peanutyalmondy", "nougat", "crispedricewafer", "hard", "bar", "pluribus", "sugarpercent", "pricepercent")], test_new_candies)
predictions = predict(multivariate_linear_regression_model, newdata=test_candies)
print("Predictions from the multivariate linear regression model: ")
print(predictions)
|
testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875219e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_bootMatrix/AFL_cpp_bootMatrix/cpp_bootMatrix_valgrind_files/1615766430-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,803 | r | testlist <- list(AgeVector = c(-4.73074171454048e-167, 2.2262381097027e-76, -9.12990429452974e-204, 5.97087417427845e-79, 4.7390525269307e-300, 6.58361441690132e-121, 3.58611068565168e-154, -2.94504776827523e-186, 2.62380314702636e-116, -6.78950518864266e+23, 6.99695749856012e-167, 86485.676793021, 1.11271562183704e+230, 1.94114173595984e-186, 1.44833381226225e-178, -6.75217876587581e-69, 1.17166524186752e-15, -4.66902120197297e-64, -1.96807327384856e+304, 4.43806122192432e-53, 9.29588680224717e-276, -6.49633240047463e-239, -1.22140819059424e-138, 5.03155164774999e-80, -6.36956558303921e-38, 7.15714506860012e-155, -1.05546603899445e-274, -3.66720914317747e-169, -6.94681701552128e+38, 2.93126040859825e-33, 2.03804078100055e-84, 3.62794352816579e+190, 3.84224576683191e+202, 2.90661893502594e+44, -5.43046915655589e-132, -1.22315376742253e-152), ExpressionMatrix = structure(c(4.80597147865938e+96, 6.97343932706536e+155, 1.3267342810479e+281, 1.34663897260867e+171, 1.76430141680543e+158, 1.20021255064002e-241, 1.72046093489436e+274, 4.64807629890539e-66, 3.23566990107388e-38, 3.70896378162114e-42, 1.09474740380531e+92, 7.49155705745727e-308, 3.26639180474928e+224, 3.21841801500177e-79, 4.26435540037564e-295, 1.40002857639358e+82, 47573397570345336, 2.00517157311369e-187, 2.74035572944044e+70, 2.89262435086883e-308, 6.65942057982148e-198, 1.10979548758712e-208, 1.40208057226312e-220, 6.25978904299555e-111, 1.06191688875219e+167, 1.1857452172049, 7.01135380962132e-157, 4.49610615342627e-308, 8.04053421408348e+261, 6.23220855980985e+275, 1.91601752509744e+141, 2.27737212344351e-244, 1.6315101795754e+126, 3.83196182917788e+160, 1.53445011275161e-192), .Dim = c(5L, 7L)), permutations = 415362983L)
result <- do.call(myTAI:::cpp_bootMatrix,testlist)
str(result) |
#Figure S4
# sockeye - pink figure for appendix
dat = read.csv("data/salmon data/data for analysis/PWS_Wild_Chum_CSV.csv")
pdf("figures/Fig S4 Unakwik.pdf")
plot(dat$Year, dat$Total_Rtn/1000, xlab="Year",
ylab="Total run (thousands)", type="b", lwd=2,pch=16)
dev.off()
| /figures/Figure S4.R | no_license | NCEAS/pfx-covariation-pws | R | false | false | 273 | r | #Figure S4
# sockeye - pink figure for appendix
dat = read.csv("data/salmon data/data for analysis/PWS_Wild_Chum_CSV.csv")
pdf("figures/Fig S4 Unakwik.pdf")
plot(dat$Year, dat$Total_Rtn/1000, xlab="Year",
ylab="Total run (thousands)", type="b", lwd=2,pch=16)
dev.off()
|
##useful numbers====
options(scipen = 999)
##data loading and cleaning====
did <- read.csv("C:/Users/Owner/Desktop/Victoria Data - Long.csv")
did$Treatment <- factor(did$Treatment,
levels = c(2,1),
labels = c("Placebo","Drug"))
did$Time <- factor(did$Time,
levels = c(1,2),
labels = c("Time 1","Time 2"))
##difference in differences model
did_model <- lm(Attitude ~ Treatment*Time, data = did)
summary(did_model)
##plot a graph====
##looks like a difference even if it doesn't reach statistical significance
library(tidyverse)
did_summary <- did %>%
group_by(Treatment, Time) %>%
na.omit() %>%
summarise(Attitude = mean(Attitude))
ggplot(did_summary, aes(x = Time, y = Attitude, color = Treatment)) +
geom_point() +
geom_line(aes(group = Treatment))
| /Difference-in-Differences-Example.R | no_license | Suggestions-Only/Difference-in-Differences-Analysis | R | false | false | 852 | r | ##useful numbers====
options(scipen = 999)
##data loading and cleaning====
did <- read.csv("C:/Users/Owner/Desktop/Victoria Data - Long.csv")
did$Treatment <- factor(did$Treatment,
levels = c(2,1),
labels = c("Placebo","Drug"))
did$Time <- factor(did$Time,
levels = c(1,2),
labels = c("Time 1","Time 2"))
##difference in differences model
did_model <- lm(Attitude ~ Treatment*Time, data = did)
summary(did_model)
##plot a graph====
##looks like a difference even if it doesn't reach statistical significance
library(tidyverse)
did_summary <- did %>%
group_by(Treatment, Time) %>%
na.omit() %>%
summarise(Attitude = mean(Attitude))
ggplot(did_summary, aes(x = Time, y = Attitude, color = Treatment)) +
geom_point() +
geom_line(aes(group = Treatment))
|
setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190429_GSEA_scRNA_organoid_layers")
data(pGSEA)
#write.gsea(location='CENTRAL', gsea.list=gsea_CENTRAL)
#write.gsea(location='MIDDLE', gsea.list=gsea_MIDDLE)
#write.gsea(location='SURFACE', gsea.list=gsea_SURFACE)
| /docs/Code/pdx_biopsy_analysis/GSEA_biopsy_clusters_042320/Codes/03GSEA_writeGSEA.R | no_license | davidbmorse/SEEP | R | false | false | 320 | r | setwd("/Users/morsedb/Documents/Projects/PSS paint sort sequence/Analysis/organoidsComb_190226/R_scripts/190429_GSEA_scRNA_organoid_layers")
data(pGSEA)
#write.gsea(location='CENTRAL', gsea.list=gsea_CENTRAL)
#write.gsea(location='MIDDLE', gsea.list=gsea_MIDDLE)
#write.gsea(location='SURFACE', gsea.list=gsea_SURFACE)
|
#' Infer interest rates.
#'
#' Infers the interest rates for a given data set.
#'
#'@param topred Must be a data.frame in the same format as the one obtained from \link{postprocess}.
#'@param model If \code{NULL} the model result already included in the package will be used. Otherwise this option is meant to be used when running \link{reproduceAnalysis}.
#'
#'@return A data.frame with two columns: Estimate and SE. The names are self explanatory.
#'
#'@export
inferInterestRate <- function(topred, model=NULL) {
require(randomForest)
if(is.null(model)) {
## Load model data
toload <- system.file("lcollado754run/VarSelect/model.RData", package="lcollado754")
load(toload)
}
## Subset the data
predictions <- predict(object=model, newdata=topred, type="response", predict.all=TRUE)
## Build the result
se <- apply(predictions$individual, 1, sd)
res <- data.frame(Estimate=predictions$aggregate, SE=se)
rownames(res) <- rownames(topred)
## Done
return(res)
}
| /final/lcollado754/R/inferInterestRate.R | no_license | lcolladotor/lcollado754 | R | false | false | 985 | r | #' Infer interest rates.
#'
#' Infers the interest rates for a given data set.
#'
#'@param topred Must be a data.frame in the same format as the one obtained from \link{postprocess}.
#'@param model If \code{NULL} the model result already included in the package will be used. Otherwise this option is meant to be used when running \link{reproduceAnalysis}.
#'
#'@return A data.frame with two columns: Estimate and SE. The names are self explanatory.
#'
#'@export
inferInterestRate <- function(topred, model=NULL) {
require(randomForest)
if(is.null(model)) {
## Load model data
toload <- system.file("lcollado754run/VarSelect/model.RData", package="lcollado754")
load(toload)
}
## Subset the data
predictions <- predict(object=model, newdata=topred, type="response", predict.all=TRUE)
## Build the result
se <- apply(predictions$individual, 1, sd)
res <- data.frame(Estimate=predictions$aggregate, SE=se)
rownames(res) <- rownames(topred)
## Done
return(res)
}
|
#' @title
#' Write/Import records to a REDCap project
#'
#' @description
#' This function uses REDCap's API to select and return data.
#'
#' @param ds The [base::data.frame()] or [tibble::tibble()]
#' to be imported into the REDCap project.
#' Required.
#' @param redcap_uri The
#' [uri](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier)/url
#' of the REDCap server
#' typically formatted as "https://server.org/apps/redcap/api/".
#' Required.
#' @param token The user-specific string that serves as the password for a
#' project. Required.
#' @param overwrite_with_blanks A boolean value indicating if
#' blank/`NA` values in the R data frame
#' will overwrite data on the server.
#' This is the default behavior for REDCapR,
#' which essentially deletes the cell's value
#' If `FALSE`, blank/`NA` values in the data frame
#' will be ignored. Optional.
#' @param convert_logical_to_integer If `TRUE`, all [base::logical] columns
#' in `ds` are cast to an integer before uploading to REDCap.
#' Boolean values are typically represented as 0/1 in REDCap radio buttons.
#' Optional.
#' @param verbose A boolean value indicating if `message`s should be printed
#' to the R console during the operation. The verbose output might contain
#' sensitive information (*e.g.* PHI), so turn this off if the output might
#' be visible somewhere public. Optional.
#' @param config_options A list of options passed to [httr::POST()].
#' See details at [httr::httr_options()]. Optional.
#' @param handle_httr The value passed to the `handle` parameter of
#' [httr::POST()].
#' This is useful for only unconventional authentication approaches. It
#' should be `NULL` for most institutions. Optional.
#'
#' @return
#' Currently, a list is returned with the following elements:
#' * `success`: A boolean value indicating if the operation was apparently
#' successful.
#' * `status_code`: The
#' [http status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes)
#' of the operation.
#' * `outcome_message`: A human readable string indicating the operation's
#' outcome.
#' * `records_affected_count`: The number of records inserted or updated.
#' * `affected_ids`: The subject IDs of the inserted or updated records.
#' * `elapsed_seconds`: The duration of the function.
#' * `raw_text`: If an operation is NOT successful, the text returned by
#' REDCap. If an operation is successful, the `raw_text` is returned as an
#' empty string to save RAM.
#'
#' @details
#' Currently, the function doesn't modify any variable types to conform to
#' REDCap's supported variables. See [validate_for_write()] for a helper
#' function that checks for some common important conflicts.
#'
#' @author
#' Will Beasley
#'
#' @references
#' The official documentation can be found on the 'API Help Page'
#' and 'API Examples' pages on the REDCap wiki (*i.e.*,
#' https://community.projectredcap.org/articles/456/api-documentation.html and
#' https://community.projectredcap.org/articles/462/api-examples.html).
#' If you do not have an account for the wiki, please ask your campus REDCap
#' administrator to send you the static material.
#'
#' @examples
#' if (FALSE) {
#' # Define some constants
#' uri <- "https://bbmc.ouhsc.edu/redcap/api/"
#' token <- "D70F9ACD1EDD6F151C6EA78683944E98"
#'
#' # Read the dataset for the first time.
#' result_read1 <- REDCapR::redcap_read_oneshot(redcap_uri=uri, token=token)
#' ds1 <- result_read1$data
#' ds1$telephone
#'
#' # Manipulate a field in the dataset in a VALID way
#' ds1$telephone <- paste0("(405) 321-000", seq_len(nrow(ds1)))
#'
#' ds1 <- ds1[1:3, ]
#' ds1$age <- NULL; ds1$bmi <- NULL # Drop the calculated fields before writing.
#' result_write <- REDCapR::redcap_write_oneshot(ds=ds1, redcap_uri=uri, token=token)
#'
#' # Read the dataset for the second time.
#' result_read2 <- REDCapR::redcap_read_oneshot(redcap_uri=uri, token=token)
#' ds2 <- result_read2$data
#' ds2$telephone
#'
#' # Manipulate a field in the dataset in an INVALID way. A US exchange can't be '111'.
#' ds1$telephone <- paste0("(405) 321-000", seq_len(nrow(ds1)))
#'
#' # This next line will throw an error.
#' result_write <- REDCapR::redcap_write_oneshot(ds=ds1, redcap_uri=uri, token=token)
#' result_write$raw_text
#' }
#' @export
redcap_write_oneshot <- function(
ds,
redcap_uri,
token,
overwrite_with_blanks = TRUE,
convert_logical_to_integer = FALSE,
verbose = TRUE,
config_options = NULL,
handle_httr = NULL
) {
# This prevents the R CHECK NOTE: 'No visible binding for global variable Note in R CMD check';
# Also see if( getRversion() >= "2.15.1" ) utils::globalVariables(names=c("csv_elements"))
# https://stackoverflow.com/questions/8096313/; https://stackoverflow.com/questions/9439256
csv_elements <- NULL
checkmate::assert_character(redcap_uri, any.missing=FALSE, len=1, pattern="^.{1,}$")
checkmate::assert_character(token , any.missing=FALSE, len=1, pattern="^.{1,}$")
token <- sanitize_token(token)
verbose <- verbose_prepare(verbose)
overwrite_with_blanks <- dplyr::if_else(overwrite_with_blanks, "overwrite", "normal")
if (convert_logical_to_integer) {
ds <-
ds %>%
dplyr::mutate_if(is.logical, as.integer)
}
con <- base::textConnection(
object = "csv_elements",
open = "w",
local = TRUE
)
utils::write.csv(ds, con, row.names = FALSE, na = "")
close(con)
csv <- paste(csv_elements, collapse = "\n")
rm(csv_elements, con)
post_body <- list(
token = token,
content = "record",
format = "csv",
type = "flat",
# These next values separate the import from the export API call
# overwriteBehavior:
# *normal* - blank/empty values will be ignored [default];
# *overwrite* - blank/empty values are valid and will overwrite data
data = csv,
overwriteBehavior = overwrite_with_blanks,
returnContent = "ids",
returnFormat = "csv"
)
# This is the important call that communicates with the REDCap server.
kernel <-
kernel_api(
redcap_uri = redcap_uri,
post_body = post_body,
config_options = config_options,
handle_httr = handle_httr
)
if (kernel$success) {
elements <- unlist(strsplit(kernel$raw_text, split="\\n"))
affected_ids <- as.character(elements[-1])
records_affected_count <- length(affected_ids)
outcome_message <- sprintf(
"%s records were written to REDCap in %0.1f seconds.",
format(records_affected_count, big.mark = ",", scientific = FALSE, trim = TRUE),
kernel$elapsed_seconds
)
# If an operation is successful, the `raw_text` is no longer returned to save RAM. The content is not really necessary with httr's status message exposed.
kernel$raw_text <- ""
} else { # If the returned content wasn't recognized as valid IDs, then
affected_ids <- character(0) # Return an empty array
records_affected_count <- NA_integer_
outcome_message <- sprintf(
"The REDCapR write/import operation was not successful. The error message was:\n%s",
kernel$raw_text
)
}
if (verbose)
message(outcome_message)
list(
success = kernel$success,
status_code = kernel$status_code,
outcome_message = outcome_message,
records_affected_count = records_affected_count,
affected_ids = affected_ids,
elapsed_seconds = kernel$elapsed_seconds,
raw_text = kernel$raw_text
)
}
| /R/redcap-write-oneshot.R | permissive | the-mad-statter/REDCapR | R | false | false | 7,731 | r | #' @title
#' Write/Import records to a REDCap project
#'
#' @description
#' This function uses REDCap's API to select and return data.
#'
#' @param ds The [base::data.frame()] or [tibble::tibble()]
#' to be imported into the REDCap project.
#' Required.
#' @param redcap_uri The
#' [uri](https://en.wikipedia.org/wiki/Uniform_Resource_Identifier)/url
#' of the REDCap server
#' typically formatted as "https://server.org/apps/redcap/api/".
#' Required.
#' @param token The user-specific string that serves as the password for a
#' project. Required.
#' @param overwrite_with_blanks A boolean value indicating if
#' blank/`NA` values in the R data frame
#' will overwrite data on the server.
#' This is the default behavior for REDCapR,
#' which essentially deletes the cell's value
#' If `FALSE`, blank/`NA` values in the data frame
#' will be ignored. Optional.
#' @param convert_logical_to_integer If `TRUE`, all [base::logical] columns
#' in `ds` are cast to an integer before uploading to REDCap.
#' Boolean values are typically represented as 0/1 in REDCap radio buttons.
#' Optional.
#' @param verbose A boolean value indicating if `message`s should be printed
#' to the R console during the operation. The verbose output might contain
#' sensitive information (*e.g.* PHI), so turn this off if the output might
#' be visible somewhere public. Optional.
#' @param config_options A list of options passed to [httr::POST()].
#' See details at [httr::httr_options()]. Optional.
#' @param handle_httr The value passed to the `handle` parameter of
#' [httr::POST()].
#' This is useful for only unconventional authentication approaches. It
#' should be `NULL` for most institutions. Optional.
#'
#' @return
#' Currently, a list is returned with the following elements:
#' * `success`: A boolean value indicating if the operation was apparently
#' successful.
#' * `status_code`: The
#' [http status code](https://en.wikipedia.org/wiki/List_of_HTTP_status_codes)
#' of the operation.
#' * `outcome_message`: A human readable string indicating the operation's
#' outcome.
#' * `records_affected_count`: The number of records inserted or updated.
#' * `affected_ids`: The subject IDs of the inserted or updated records.
#' * `elapsed_seconds`: The duration of the function.
#' * `raw_text`: If an operation is NOT successful, the text returned by
#' REDCap. If an operation is successful, the `raw_text` is returned as an
#' empty string to save RAM.
#'
#' @details
#' Currently, the function doesn't modify any variable types to conform to
#' REDCap's supported variables. See [validate_for_write()] for a helper
#' function that checks for some common important conflicts.
#'
#' @author
#' Will Beasley
#'
#' @references
#' The official documentation can be found on the 'API Help Page'
#' and 'API Examples' pages on the REDCap wiki (*i.e.*,
#' https://community.projectredcap.org/articles/456/api-documentation.html and
#' https://community.projectredcap.org/articles/462/api-examples.html).
#' If you do not have an account for the wiki, please ask your campus REDCap
#' administrator to send you the static material.
#'
#' @examples
#' if (FALSE) {
#' # Define some constants
#' uri <- "https://bbmc.ouhsc.edu/redcap/api/"
#' token <- "D70F9ACD1EDD6F151C6EA78683944E98"
#'
#' # Read the dataset for the first time.
#' result_read1 <- REDCapR::redcap_read_oneshot(redcap_uri=uri, token=token)
#' ds1 <- result_read1$data
#' ds1$telephone
#'
#' # Manipulate a field in the dataset in a VALID way
#' ds1$telephone <- paste0("(405) 321-000", seq_len(nrow(ds1)))
#'
#' ds1 <- ds1[1:3, ]
#' ds1$age <- NULL; ds1$bmi <- NULL # Drop the calculated fields before writing.
#' result_write <- REDCapR::redcap_write_oneshot(ds=ds1, redcap_uri=uri, token=token)
#'
#' # Read the dataset for the second time.
#' result_read2 <- REDCapR::redcap_read_oneshot(redcap_uri=uri, token=token)
#' ds2 <- result_read2$data
#' ds2$telephone
#'
#' # Manipulate a field in the dataset in an INVALID way. A US exchange can't be '111'.
#' ds1$telephone <- paste0("(405) 321-000", seq_len(nrow(ds1)))
#'
#' # This next line will throw an error.
#' result_write <- REDCapR::redcap_write_oneshot(ds=ds1, redcap_uri=uri, token=token)
#' result_write$raw_text
#' }
#' @export
redcap_write_oneshot <- function(
ds,
redcap_uri,
token,
overwrite_with_blanks = TRUE,
convert_logical_to_integer = FALSE,
verbose = TRUE,
config_options = NULL,
handle_httr = NULL
) {
# This prevents the R CHECK NOTE: 'No visible binding for global variable Note in R CMD check';
# Also see if( getRversion() >= "2.15.1" ) utils::globalVariables(names=c("csv_elements"))
# https://stackoverflow.com/questions/8096313/; https://stackoverflow.com/questions/9439256
csv_elements <- NULL
checkmate::assert_character(redcap_uri, any.missing=FALSE, len=1, pattern="^.{1,}$")
checkmate::assert_character(token , any.missing=FALSE, len=1, pattern="^.{1,}$")
token <- sanitize_token(token)
verbose <- verbose_prepare(verbose)
overwrite_with_blanks <- dplyr::if_else(overwrite_with_blanks, "overwrite", "normal")
if (convert_logical_to_integer) {
ds <-
ds %>%
dplyr::mutate_if(is.logical, as.integer)
}
con <- base::textConnection(
object = "csv_elements",
open = "w",
local = TRUE
)
utils::write.csv(ds, con, row.names = FALSE, na = "")
close(con)
csv <- paste(csv_elements, collapse = "\n")
rm(csv_elements, con)
post_body <- list(
token = token,
content = "record",
format = "csv",
type = "flat",
# These next values separate the import from the export API call
# overwriteBehavior:
# *normal* - blank/empty values will be ignored [default];
# *overwrite* - blank/empty values are valid and will overwrite data
data = csv,
overwriteBehavior = overwrite_with_blanks,
returnContent = "ids",
returnFormat = "csv"
)
# This is the important call that communicates with the REDCap server.
kernel <-
kernel_api(
redcap_uri = redcap_uri,
post_body = post_body,
config_options = config_options,
handle_httr = handle_httr
)
if (kernel$success) {
elements <- unlist(strsplit(kernel$raw_text, split="\\n"))
affected_ids <- as.character(elements[-1])
records_affected_count <- length(affected_ids)
outcome_message <- sprintf(
"%s records were written to REDCap in %0.1f seconds.",
format(records_affected_count, big.mark = ",", scientific = FALSE, trim = TRUE),
kernel$elapsed_seconds
)
# If an operation is successful, the `raw_text` is no longer returned to save RAM. The content is not really necessary with httr's status message exposed.
kernel$raw_text <- ""
} else { # If the returned content wasn't recognized as valid IDs, then
affected_ids <- character(0) # Return an empty array
records_affected_count <- NA_integer_
outcome_message <- sprintf(
"The REDCapR write/import operation was not successful. The error message was:\n%s",
kernel$raw_text
)
}
if (verbose)
message(outcome_message)
list(
success = kernel$success,
status_code = kernel$status_code,
outcome_message = outcome_message,
records_affected_count = records_affected_count,
affected_ids = affected_ids,
elapsed_seconds = kernel$elapsed_seconds,
raw_text = kernel$raw_text
)
}
|
library(matrixStats)
library(abind)
library(ineq)
###############################
## Set up species/coef names ##
###############################
c.name <- colnames(PA) # species
coef.name <- c("intercept", colnames(Env))
##########
##########
## BETA ##
##########
##########
beta.test <- JSDM$parameterTables$betaMu
beta.chain <- JSDM$chains$bgibbs # store beta chain data
beta.chain <- beta.chain[-(1:JSDM$modelList$burnin),] # remove burnin
beta.chain <- beta.chain[seq(1,nrow(beta.chain), 50),] # implement thinning
#######################################
### Standardise posteriors post-hoc ###
#######################################
dataset_sd <- read.csv("Fungi_sd.csv") # load in original data sd
dataset_sd <- dataset_sd[,2]
dataset_sd <- c(1, dataset_sd) # add intercept
beta.chain_standardised <- t(t(beta.chain)/dataset_sd)
# for(i in seq(length(full_standardised$trace$B))){ # species ## EASIER TO DO DURING EXTRACTON FOR BORAL
# for(j in seq(length(dataset_sd))){ #covariate
# full_standardised$trace$B[[i]][,j] <- (full_standardised$trace$B[[i]][,j])/dataset_sd[j]
# }
# }
# Non-standardised extraction setup
mean.beta <- colMeans(beta.chain) # mean beta
sd.beta <- colSds(beta.chain) # sd beta
lower.beta <- colQuantiles(beta.chain, probs = 0.025) # lower CI beta
upper.beta <- colQuantiles(beta.chain, probs = 0.975) # upper CI beta
coefVar <- function(vector){
sd.cv <- sd(vector)
mean.cv <- mean(vector)
cv <- sd.cv/mean.cv
return(cv)
}
cv.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- coefVar(beta.chain[,i])
cv.beta <- c(cv.beta, tmp)
}
qcd <- function(vector){
q1 <- quantile(vector, probs = 0.25)
q3 <- quantile(vector, probs = 0.75)
qcd <- (q3-q1)/(q3+q1)
return(qcd)
}
qcd.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- qcd(beta.chain[,i])
qcd.beta <- c(qcd.beta, tmp)
}
qcd2 <- function(vector){
q1 <- quantile(vector, probs = 0.025)
q3 <- quantile(vector, probs = 0.975)
qcd2 <- (q3-q1)/(q3+q1)
return(qcd2)
}
qcd2.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- qcd2(beta.chain[,i])
qcd2.beta <- c(qcd2.beta, tmp)
}
gini.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- ineq(beta.chain[,i], type = "Gini")
gini.beta <- c(gini.beta, tmp)
}
# Standardised extraction setup
mean.beta_standardised <- colMeans(beta.chain_standardised) # mean beta
sd.beta_standardised <- colSds(beta.chain_standardised) # sd beta
lower.beta_standardised <- colQuantiles(beta.chain_standardised, probs = 0.025) # lower CI beta
upper.beta_standardised <- colQuantiles(beta.chain_standardised, probs = 0.975) # upper CI beta
cv.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- coefVar(beta.chain_standardised[,i])
cv.beta_standardised <- c(cv.beta_standardised, tmp)
}
qcd.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- qcd(beta.chain_standardised[,i])
qcd.beta_standardised <- c(qcd.beta_standardised, tmp)
}
qcd2.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- qcd2(beta.chain_standardised[,i])
qcd2.beta_standardised <- c(qcd2.beta_standardised, tmp)
}
gini.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- ineq(beta.chain_standardised[,i], type = "Gini")
gini.beta_standardised <- c(gini.beta_standardised, tmp)
}
############################
## Create blank dataframe ## # Non-standardised
############################
df <- data.frame(coefficient = numeric(0), posterior.mean = numeric(0), lower = numeric(0),
upper = numeric(0), sd = numeric(0), coefVar = numeric(0), qcd = numeric(0),
qcd2 = numeric(0), gini = numeric(0), model = numeric(0), species = numeric(0))
##########################
## Extract to dataframe ## # Non-standardised
##########################
for(i in 1:ncol(PA)){
dfr <- cbind(coef.name,
mean.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
lower.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
upper.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
sd.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
cv.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd2.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
gini.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
rep("Clark", (length(coef.name))), rep(c.name[i], length(coef.name)))
colnames(dfr) <- c("coefficient", "posterior.mean", "lower", "upper", "sd",
"coefVar", "qcd", "qcd2", "gini", "model", "species")
dfr <- as.data.frame(dfr)
df <- rbind(df, dfr)
}
############################
## Create blank dataframe ## # Standardised
############################
df_standardised <- data.frame(coefficient = numeric(0), posterior.mean = numeric(0), lower = numeric(0),
upper = numeric(0), sd = numeric(0), coefVar = numeric(0), qcd = numeric(0),
qcd2 = numeric(0), gini = numeric(0), model = numeric(0), species = numeric(0))
##########################
## Extract to dataframe ## # Dtandardised
##########################
for(i in 1:ncol(PA)){
dfr <- cbind(coef.name,
mean.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
lower.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
upper.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
sd.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
cv.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd2.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
gini.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
rep("Clark", (length(coef.name))), rep(c.name[i], length(coef.name)))
colnames(dfr) <- c("coefficient", "posterior.mean", "lower", "upper", "sd",
"coefVar", "qcd", "qcd2", "gini", "model", "species")
dfr <- as.data.frame(dfr)
df_standardised <- rbind(df_standardised, dfr)
}
# Combine required parts of standardised/non-standardised
df_merge <- df
df_merge[,5:9] <- df_standardised[,5:9]
rownames(df_merge) <- NULL
#########
#########
## RHO ##
#########
#########
rho.test <- JSDM$parameterTables$corMu
corr.chain <- JSDM$chains$sgibbs # store correlation chain data
corr.chain <- corr.chain[-(1:JSDM$modelList$burnin),] # remove burnin
corr.chain <- corr.chain[seq(1, nrow(corr.chain), 50),] # implement thinning
corr.array <- matrix(nrow = ncol(PA), ncol = ncol(PA)) # create blank matrix to start array
for (i in 1:nrow(corr.chain)){
x <- corr.chain[i,] # extract first covariance table as vector
a <- diag(nrow = ncol(PA)) # build diag matrix
a[lower.tri(a, diag = T)] <- x # fill lower.tri with covariance
a <- t(a) # transpose to fill upper.tri
corr.array <- abind(corr.array, cov2cor(a), along = 3) # bind matrix to array, convert to correlation matrix
}
corr.array <- corr.array[,,-1] # remove NA matrix from beginning of array
## now can apply functions over each cell in array
upper.corr.array <- t(apply(corr.array, 3, '[', upper.tri(corr.array[,,1], diag = T))) # returns matrix of col = each element from upper (diag=T), rows = #samples, diag = T to track future shape changes
dim(upper.corr.array) # check dimension
mean.upper.corr.array <- colMeans(upper.corr.array) # take mean value for each correlation element
mean.corr <- diag(nrow = ncol(PA)) # blank data.frame
mean.corr[upper.tri(mean.corr, diag=T)] <- mean.upper.corr.array # fill with mean value
mean.corr
lower.upper.corr.array <- colQuantiles(upper.corr.array, probs = 0.025) # take lower quantile value for each correlation element
lower.corr <- diag(nrow = ncol(PA)) # blank data.frame
lower.corr[upper.tri(lower.corr, diag=T)] <- lower.upper.corr.array
lower.corr
upper.upper.corr.array <- colQuantiles(upper.corr.array, probs = 0.975) # take upper quantile value for each correlation element
upper.corr <- diag(nrow = ncol(PA)) # blank data.frame
upper.corr[upper.tri(upper.corr, diag=T)] <- upper.upper.corr.array
upper.corr
sd.upper.corr.array <- colSds(upper.corr.array) # take sd value for each correlation element
sd.corr <- diag(nrow = ncol(PA)) # blank data.frame
sd.corr[upper.tri(sd.corr, diag=T)] <- sd.upper.corr.array
sd.corr
####################
#### Write CSVs ####
####################
write.csv(df_merge, "Beta_Fungi_Clark.csv")
write.csv(mean.corr, "Rho_mean_Fungi_Clark.csv")
write.csv(lower.corr, "Rho_lower_Fungi_Clark.csv")
write.csv(upper.corr, "Rho_upper_Fungi_Clark.csv")
write.csv(sd.corr, "Rho_sd_Fungi_Clark.csv")
| /Clark - Fungi/data_extraction_Clark.R | no_license | dansmi-hub/JSDM_Inference | R | false | false | 9,324 | r | library(matrixStats)
library(abind)
library(ineq)
###############################
## Set up species/coef names ##
###############################
c.name <- colnames(PA) # species
coef.name <- c("intercept", colnames(Env))
##########
##########
## BETA ##
##########
##########
beta.test <- JSDM$parameterTables$betaMu
beta.chain <- JSDM$chains$bgibbs # store beta chain data
beta.chain <- beta.chain[-(1:JSDM$modelList$burnin),] # remove burnin
beta.chain <- beta.chain[seq(1,nrow(beta.chain), 50),] # implement thinning
#######################################
### Standardise posteriors post-hoc ###
#######################################
dataset_sd <- read.csv("Fungi_sd.csv") # load in original data sd
dataset_sd <- dataset_sd[,2]
dataset_sd <- c(1, dataset_sd) # add intercept
beta.chain_standardised <- t(t(beta.chain)/dataset_sd)
# for(i in seq(length(full_standardised$trace$B))){ # species ## EASIER TO DO DURING EXTRACTON FOR BORAL
# for(j in seq(length(dataset_sd))){ #covariate
# full_standardised$trace$B[[i]][,j] <- (full_standardised$trace$B[[i]][,j])/dataset_sd[j]
# }
# }
# Non-standardised extraction setup
mean.beta <- colMeans(beta.chain) # mean beta
sd.beta <- colSds(beta.chain) # sd beta
lower.beta <- colQuantiles(beta.chain, probs = 0.025) # lower CI beta
upper.beta <- colQuantiles(beta.chain, probs = 0.975) # upper CI beta
coefVar <- function(vector){
sd.cv <- sd(vector)
mean.cv <- mean(vector)
cv <- sd.cv/mean.cv
return(cv)
}
cv.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- coefVar(beta.chain[,i])
cv.beta <- c(cv.beta, tmp)
}
qcd <- function(vector){
q1 <- quantile(vector, probs = 0.25)
q3 <- quantile(vector, probs = 0.75)
qcd <- (q3-q1)/(q3+q1)
return(qcd)
}
qcd.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- qcd(beta.chain[,i])
qcd.beta <- c(qcd.beta, tmp)
}
qcd2 <- function(vector){
q1 <- quantile(vector, probs = 0.025)
q3 <- quantile(vector, probs = 0.975)
qcd2 <- (q3-q1)/(q3+q1)
return(qcd2)
}
qcd2.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- qcd2(beta.chain[,i])
qcd2.beta <- c(qcd2.beta, tmp)
}
gini.beta <- numeric(0)
for(i in seq(ncol(beta.chain))){
tmp <- ineq(beta.chain[,i], type = "Gini")
gini.beta <- c(gini.beta, tmp)
}
# Standardised extraction setup
mean.beta_standardised <- colMeans(beta.chain_standardised) # mean beta
sd.beta_standardised <- colSds(beta.chain_standardised) # sd beta
lower.beta_standardised <- colQuantiles(beta.chain_standardised, probs = 0.025) # lower CI beta
upper.beta_standardised <- colQuantiles(beta.chain_standardised, probs = 0.975) # upper CI beta
cv.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- coefVar(beta.chain_standardised[,i])
cv.beta_standardised <- c(cv.beta_standardised, tmp)
}
qcd.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- qcd(beta.chain_standardised[,i])
qcd.beta_standardised <- c(qcd.beta_standardised, tmp)
}
qcd2.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- qcd2(beta.chain_standardised[,i])
qcd2.beta_standardised <- c(qcd2.beta_standardised, tmp)
}
gini.beta_standardised <- numeric(0)
for(i in seq(ncol(beta.chain_standardised))){
tmp <- ineq(beta.chain_standardised[,i], type = "Gini")
gini.beta_standardised <- c(gini.beta_standardised, tmp)
}
############################
## Create blank dataframe ## # Non-standardised
############################
df <- data.frame(coefficient = numeric(0), posterior.mean = numeric(0), lower = numeric(0),
upper = numeric(0), sd = numeric(0), coefVar = numeric(0), qcd = numeric(0),
qcd2 = numeric(0), gini = numeric(0), model = numeric(0), species = numeric(0))
##########################
## Extract to dataframe ## # Non-standardised
##########################
for(i in 1:ncol(PA)){
dfr <- cbind(coef.name,
mean.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
lower.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
upper.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
sd.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
cv.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd2.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
gini.beta[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
rep("Clark", (length(coef.name))), rep(c.name[i], length(coef.name)))
colnames(dfr) <- c("coefficient", "posterior.mean", "lower", "upper", "sd",
"coefVar", "qcd", "qcd2", "gini", "model", "species")
dfr <- as.data.frame(dfr)
df <- rbind(df, dfr)
}
############################
## Create blank dataframe ## # Standardised
############################
df_standardised <- data.frame(coefficient = numeric(0), posterior.mean = numeric(0), lower = numeric(0),
upper = numeric(0), sd = numeric(0), coefVar = numeric(0), qcd = numeric(0),
qcd2 = numeric(0), gini = numeric(0), model = numeric(0), species = numeric(0))
##########################
## Extract to dataframe ## # Dtandardised
##########################
for(i in 1:ncol(PA)){
dfr <- cbind(coef.name,
mean.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
lower.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
upper.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
sd.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
cv.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
qcd2.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
gini.beta_standardised[(i*length(coef.name)-(length(coef.name)-1)):(i*length(coef.name))],
rep("Clark", (length(coef.name))), rep(c.name[i], length(coef.name)))
colnames(dfr) <- c("coefficient", "posterior.mean", "lower", "upper", "sd",
"coefVar", "qcd", "qcd2", "gini", "model", "species")
dfr <- as.data.frame(dfr)
df_standardised <- rbind(df_standardised, dfr)
}
# Combine required parts of standardised/non-standardised
df_merge <- df
df_merge[,5:9] <- df_standardised[,5:9]
rownames(df_merge) <- NULL
#########
#########
## RHO ##
#########
#########
rho.test <- JSDM$parameterTables$corMu
corr.chain <- JSDM$chains$sgibbs # store correlation chain data
corr.chain <- corr.chain[-(1:JSDM$modelList$burnin),] # remove burnin
corr.chain <- corr.chain[seq(1, nrow(corr.chain), 50),] # implement thinning
corr.array <- matrix(nrow = ncol(PA), ncol = ncol(PA)) # create blank matrix to start array
for (i in 1:nrow(corr.chain)){
x <- corr.chain[i,] # extract first covariance table as vector
a <- diag(nrow = ncol(PA)) # build diag matrix
a[lower.tri(a, diag = T)] <- x # fill lower.tri with covariance
a <- t(a) # transpose to fill upper.tri
corr.array <- abind(corr.array, cov2cor(a), along = 3) # bind matrix to array, convert to correlation matrix
}
corr.array <- corr.array[,,-1] # remove NA matrix from beginning of array
## now can apply functions over each cell in array
upper.corr.array <- t(apply(corr.array, 3, '[', upper.tri(corr.array[,,1], diag = T))) # returns matrix of col = each element from upper (diag=T), rows = #samples, diag = T to track future shape changes
dim(upper.corr.array) # check dimension
mean.upper.corr.array <- colMeans(upper.corr.array) # take mean value for each correlation element
mean.corr <- diag(nrow = ncol(PA)) # blank data.frame
mean.corr[upper.tri(mean.corr, diag=T)] <- mean.upper.corr.array # fill with mean value
mean.corr
lower.upper.corr.array <- colQuantiles(upper.corr.array, probs = 0.025) # take lower quantile value for each correlation element
lower.corr <- diag(nrow = ncol(PA)) # blank data.frame
lower.corr[upper.tri(lower.corr, diag=T)] <- lower.upper.corr.array
lower.corr
upper.upper.corr.array <- colQuantiles(upper.corr.array, probs = 0.975) # take upper quantile value for each correlation element
upper.corr <- diag(nrow = ncol(PA)) # blank data.frame
upper.corr[upper.tri(upper.corr, diag=T)] <- upper.upper.corr.array
upper.corr
sd.upper.corr.array <- colSds(upper.corr.array) # take sd value for each correlation element
sd.corr <- diag(nrow = ncol(PA)) # blank data.frame
sd.corr[upper.tri(sd.corr, diag=T)] <- sd.upper.corr.array
sd.corr
####################
#### Write CSVs ####
####################
write.csv(df_merge, "Beta_Fungi_Clark.csv")
write.csv(mean.corr, "Rho_mean_Fungi_Clark.csv")
write.csv(lower.corr, "Rho_lower_Fungi_Clark.csv")
write.csv(upper.corr, "Rho_upper_Fungi_Clark.csv")
write.csv(sd.corr, "Rho_sd_Fungi_Clark.csv")
|
library(shiny)
library(networkD3)
data(MisLinks)
data(MisNodes)
#### Server ####
server <- function(input, output) {
output$simple <- renderSimpleNetwork({
src <- c("A", "A", "A", "A", "B", "B", "C", "C", "D")
target <- c("B", "C", "D", "J", "E", "F", "G", "H", "I")
networkData <- data.frame(src, target)
simpleNetwork(networkData, opacity = input$opacity)
})
output$force <- renderForceNetwork({
forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
Group = "group", opacity = input$opacity, arrows = TRUE, zoom = TRUE)
})
}
#### UI ####
ui <- shinyUI(fluidPage(
titlePanel("Shiny networkD3 "),
sidebarLayout(
sidebarPanel(
sliderInput("opacity", "Opacity (not for Sankey)", 0.6, min = 0.1,
max = 1, step = .1)
),
mainPanel(
tabsetPanel(
tabPanel("Simple Network", simpleNetworkOutput("simple")),
tabPanel("Force Network", forceNetworkOutput("force"))
)
)
)
))
#### Run ####
shinyApp(ui = ui, server = server) | /RStudioProjects/local2global/shiny/app.R | no_license | kelvinyangli/PhDProjects | R | false | false | 1,134 | r | library(shiny)
library(networkD3)
data(MisLinks)
data(MisNodes)
#### Server ####
server <- function(input, output) {
output$simple <- renderSimpleNetwork({
src <- c("A", "A", "A", "A", "B", "B", "C", "C", "D")
target <- c("B", "C", "D", "J", "E", "F", "G", "H", "I")
networkData <- data.frame(src, target)
simpleNetwork(networkData, opacity = input$opacity)
})
output$force <- renderForceNetwork({
forceNetwork(Links = MisLinks, Nodes = MisNodes, Source = "source",
Target = "target", Value = "value", NodeID = "name",
Group = "group", opacity = input$opacity, arrows = TRUE, zoom = TRUE)
})
}
#### UI ####
ui <- shinyUI(fluidPage(
titlePanel("Shiny networkD3 "),
sidebarLayout(
sidebarPanel(
sliderInput("opacity", "Opacity (not for Sankey)", 0.6, min = 0.1,
max = 1, step = .1)
),
mainPanel(
tabsetPanel(
tabPanel("Simple Network", simpleNetworkOutput("simple")),
tabPanel("Force Network", forceNetworkOutput("force"))
)
)
)
))
#### Run ####
shinyApp(ui = ui, server = server) |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% TabularTextFile.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{hasColumnHeader.TabularTextFile}
\alias{hasColumnHeader.TabularTextFile}
\alias{TabularTextFile.hasColumnHeader}
\alias{hasColumnHeader,TabularTextFile-method}
\title{Checks if there are column names in the header}
\description{
Checks if there are column names in the header.
}
\usage{
\method{hasColumnHeader}{TabularTextFile}(this, ...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{logical}}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{TabularTextFile}}.
}
\keyword{internal}
\keyword{methods}
\keyword{IO}
\keyword{programming}
| /man/hasColumnHeader.TabularTextFile.Rd | no_license | HenrikBengtsson/R.filesets | R | false | false | 911 | rd | %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% TabularTextFile.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{hasColumnHeader.TabularTextFile}
\alias{hasColumnHeader.TabularTextFile}
\alias{TabularTextFile.hasColumnHeader}
\alias{hasColumnHeader,TabularTextFile-method}
\title{Checks if there are column names in the header}
\description{
Checks if there are column names in the header.
}
\usage{
\method{hasColumnHeader}{TabularTextFile}(this, ...)
}
\arguments{
\item{...}{Not used.}
}
\value{
Returns a \code{\link[base]{logical}}.
}
\author{Henrik Bengtsson}
\seealso{
For more information see \code{\link{TabularTextFile}}.
}
\keyword{internal}
\keyword{methods}
\keyword{IO}
\keyword{programming}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{blblm}
\alias{blblm}
\title{Computes Linear Regression with Little Bag of Bootstraps}
\usage{
blblm(formula, data, m = 10, B = 5000, parallel = FALSE)
}
\arguments{
\item{formula}{linear regression formula}
\item{data}{dataframe}
\item{m}{an integer giving the number of subsets for the data}
\item{B}{an integer giving the number of bootstraps}
\item{parallel}{logical value indicating TRUE or FALSE for parallelization}
}
\value{
list of estimates and formula
}
\description{
Give a formula, data, value for m, and value for B. The user should
run plan(multisession, workers = 4) if they want to use parallelization
for example, parallel = TRUE, and they can change the number of workers to
any numeric value they wish to use.
}
\examples{
blblm(mpg ~ wt * hp, data = mtcars, m = 3, B = 100)
}
| /man/blblm.Rd | permissive | mgood338/blblm | R | false | true | 893 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/blblm.R
\name{blblm}
\alias{blblm}
\title{Computes Linear Regression with Little Bag of Bootstraps}
\usage{
blblm(formula, data, m = 10, B = 5000, parallel = FALSE)
}
\arguments{
\item{formula}{linear regression formula}
\item{data}{dataframe}
\item{m}{an integer giving the number of subsets for the data}
\item{B}{an integer giving the number of bootstraps}
\item{parallel}{logical value indicating TRUE or FALSE for parallelization}
}
\value{
list of estimates and formula
}
\description{
Give a formula, data, value for m, and value for B. The user should
run plan(multisession, workers = 4) if they want to use parallelization
for example, parallel = TRUE, and they can change the number of workers to
any numeric value they wish to use.
}
\examples{
blblm(mpg ~ wt * hp, data = mtcars, m = 3, B = 100)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{MarketplaceDeal}
\alias{MarketplaceDeal}
\title{MarketplaceDeal Object}
\usage{
MarketplaceDeal(buyerPrivateData = NULL, creationTimeMs = NULL,
creativePreApprovalPolicy = NULL, creativeSafeFrameCompatibility = NULL,
dealId = NULL, dealServingMetadata = NULL, deliveryControl = NULL,
externalDealId = NULL, flightEndTimeMs = NULL, flightStartTimeMs = NULL,
inventoryDescription = NULL, isRfpTemplate = NULL,
lastUpdateTimeMs = NULL, name = NULL, productId = NULL,
productRevisionNumber = NULL, programmaticCreativeSource = NULL,
proposalId = NULL, sellerContacts = NULL, sharedTargetings = NULL,
syndicationProduct = NULL, terms = NULL, webPropertyCode = NULL)
}
\arguments{
\item{buyerPrivateData}{Buyer private data (hidden from seller)}
\item{creationTimeMs}{The time (ms since epoch) of the deal creation}
\item{creativePreApprovalPolicy}{Specifies the creative pre-approval policy (buyer-readonly)}
\item{creativeSafeFrameCompatibility}{Specifies whether the creative is safeFrame compatible (buyer-readonly)}
\item{dealId}{A unique deal-id for the deal (readonly)}
\item{dealServingMetadata}{Metadata about the serving status of this deal (readonly, writes via custom actions)}
\item{deliveryControl}{The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable}
\item{externalDealId}{The external deal id assigned to this deal once the deal is finalized}
\item{flightEndTimeMs}{Proposed flight end time of the deal (ms since epoch) This will generally be stored in a granularity of a second}
\item{flightStartTimeMs}{Proposed flight start time of the deal (ms since epoch) This will generally be stored in a granularity of a second}
\item{inventoryDescription}{Description for the deal terms}
\item{isRfpTemplate}{Indicates whether the current deal is a RFP template}
\item{lastUpdateTimeMs}{The time (ms since epoch) when the deal was last updated}
\item{name}{The name of the deal}
\item{productId}{The product-id from which this deal was created}
\item{productRevisionNumber}{The revision number of the product that the deal was created from (readonly, except on create)}
\item{programmaticCreativeSource}{Specifies the creative source for programmatic deals, PUBLISHER means creative is provided by seller and ADVERTISR means creative is provided by buyer}
\item{proposalId}{No description}
\item{sellerContacts}{Optional Seller contact information for the deal (buyer-readonly)}
\item{sharedTargetings}{The shared targeting visible to buyers and sellers}
\item{syndicationProduct}{The syndication product associated with the deal}
\item{terms}{The negotiable terms of the deal}
\item{webPropertyCode}{No description}
}
\value{
MarketplaceDeal object
}
\description{
MarketplaceDeal Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A proposal can contain multiple deals. A deal contains the terms and targeting information that is used for serving.
}
| /googleadexchangebuyerv14.auto/man/MarketplaceDeal.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 3,092 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/adexchangebuyer_objects.R
\name{MarketplaceDeal}
\alias{MarketplaceDeal}
\title{MarketplaceDeal Object}
\usage{
MarketplaceDeal(buyerPrivateData = NULL, creationTimeMs = NULL,
creativePreApprovalPolicy = NULL, creativeSafeFrameCompatibility = NULL,
dealId = NULL, dealServingMetadata = NULL, deliveryControl = NULL,
externalDealId = NULL, flightEndTimeMs = NULL, flightStartTimeMs = NULL,
inventoryDescription = NULL, isRfpTemplate = NULL,
lastUpdateTimeMs = NULL, name = NULL, productId = NULL,
productRevisionNumber = NULL, programmaticCreativeSource = NULL,
proposalId = NULL, sellerContacts = NULL, sharedTargetings = NULL,
syndicationProduct = NULL, terms = NULL, webPropertyCode = NULL)
}
\arguments{
\item{buyerPrivateData}{Buyer private data (hidden from seller)}
\item{creationTimeMs}{The time (ms since epoch) of the deal creation}
\item{creativePreApprovalPolicy}{Specifies the creative pre-approval policy (buyer-readonly)}
\item{creativeSafeFrameCompatibility}{Specifies whether the creative is safeFrame compatible (buyer-readonly)}
\item{dealId}{A unique deal-id for the deal (readonly)}
\item{dealServingMetadata}{Metadata about the serving status of this deal (readonly, writes via custom actions)}
\item{deliveryControl}{The set of fields around delivery control that are interesting for a buyer to see but are non-negotiable}
\item{externalDealId}{The external deal id assigned to this deal once the deal is finalized}
\item{flightEndTimeMs}{Proposed flight end time of the deal (ms since epoch) This will generally be stored in a granularity of a second}
\item{flightStartTimeMs}{Proposed flight start time of the deal (ms since epoch) This will generally be stored in a granularity of a second}
\item{inventoryDescription}{Description for the deal terms}
\item{isRfpTemplate}{Indicates whether the current deal is a RFP template}
\item{lastUpdateTimeMs}{The time (ms since epoch) when the deal was last updated}
\item{name}{The name of the deal}
\item{productId}{The product-id from which this deal was created}
\item{productRevisionNumber}{The revision number of the product that the deal was created from (readonly, except on create)}
\item{programmaticCreativeSource}{Specifies the creative source for programmatic deals, PUBLISHER means creative is provided by seller and ADVERTISR means creative is provided by buyer}
\item{proposalId}{No description}
\item{sellerContacts}{Optional Seller contact information for the deal (buyer-readonly)}
\item{sharedTargetings}{The shared targeting visible to buyers and sellers}
\item{syndicationProduct}{The syndication product associated with the deal}
\item{terms}{The negotiable terms of the deal}
\item{webPropertyCode}{No description}
}
\value{
MarketplaceDeal object
}
\description{
MarketplaceDeal Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A proposal can contain multiple deals. A deal contains the terms and targeting information that is used for serving.
}
|
library(RatingScaleReduction)
### Name: SHSData
### Title: Somerville Happiness Survey
### Aliases: SHSData
### Keywords: datasets
### ** Examples
data(SHSData)
## maybe str(SHSData) ; plot(SHSData) ...
| /data/genthat_extracted_code/RatingScaleReduction/examples/SHSData.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 210 | r | library(RatingScaleReduction)
### Name: SHSData
### Title: Somerville Happiness Survey
### Aliases: SHSData
### Keywords: datasets
### ** Examples
data(SHSData)
## maybe str(SHSData) ; plot(SHSData) ...
|
# identifyPatientsAndGenes.R
# for this example data package, we need a small number of patients and genes.
# the data is drawn from the TCGA GBM data used in oncoscape 1.1 and 1.2
# the genes contain at least a few mutations
# and their expression exhibits some variability
# the patients were picked so that half of them are short survivors, half of them long
#
# NA for missing values
# sample names for rownames
# gene symbols for colnames
# policies yet to be worked out for gene isoforms and multiple measurements for each sample
#
#----------------------------------------------------------------------------------------------------
library(RUnit)
# hand-chosen sample ids which are found in mrna, mut, and copy number data
long.survivors <- c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028", "TCGA.02.0080", "TCGA.02.0114",
"TCGA.06.0409", "TCGA.08.0344", "TCGA.12.0656", "TCGA.06.0125", "TCGA.02.0432")
short.survivors <- c("TCGA.12.0657", "TCGA.06.0140", "TCGA.06.0402", "TCGA.06.0201", "TCGA.06.0747",
"TCGA.06.0749", "TCGA.02.0033", "TCGA.06.0413", "TCGA.02.0037", "TCGA.06.0182")
# find all common genes, all common samples
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/mrnaGBM-304patients-1375genes.RData"))
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/mutGBM-574patients-1582genes.RData"))
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/cnvGBM-563patients-1582genes.RData"))
goi <- sort(intersect(colnames(tbl.mrna), intersect(colnames(tbl.mut), colnames(tbl.cn))))
checkEquals(length(goi), 1290)
soi <- sort(intersect(rownames(tbl.mrna), intersect(rownames(tbl.mut), rownames(tbl.cn))))
checkEquals(length(soi), 297)
checkTrue(all(short.survivors %in% soi))
checkTrue(all(long.survivors %in% soi))
patients <- sort(c(short.survivors, long.survivors))
mtx.mrna <- as.matrix(tbl.mrna[patients,])
checkEquals(dim(mtx.mrna), c(20, 1375))
# all good values
checkEquals(length(which(is.na(mtx.mrna))), 0)
checkEquals(fivenum(mtx.mrna), c(-4.1935652, -0.7241125, -0.0369353, 0.6967585, 7.8256909))
# identify the most variable genes
sd <- apply(mtx.mrna[, goi], 2, function(row) sd(row, na.rm=TRUE))
# get the 54 most variable genes
genes <- sort(names(sort(sd, decreasing=TRUE))[1:54])
mtx.mut <- as.matrix(tbl.mut)
most.mutated.genes <- names(head(sort(apply(mtx.mut, 2, function(col) length(which(!is.na(col)))), decreasing=TRUE), n=80))
genes <- sort(intersect(most.mutated.genes, goi)[1:64])
checkEquals(length(genes), 64)
save(patients, genes, file="sharedPatientsAndGenes.RData")
printf("saved patients (%d), genes (%d)", length(patients), length(genes))
| /r_modules/dataPackages/DEMOdz/inst/import/identifyPatientsAndGenes.R | permissive | pablopunk/Oncoscape | R | false | false | 2,756 | r | # identifyPatientsAndGenes.R
# for this example data package, we need a small number of patients and genes.
# the data is drawn from the TCGA GBM data used in oncoscape 1.1 and 1.2
# the genes contain at least a few mutations
# and their expression exhibits some variability
# the patients were picked so that half of them are short survivors, half of them long
#
# NA for missing values
# sample names for rownames
# gene symbols for colnames
# policies yet to be worked out for gene isoforms and multiple measurements for each sample
#
#----------------------------------------------------------------------------------------------------
library(RUnit)
# hand-chosen sample ids which are found in mrna, mut, and copy number data
long.survivors <- c("TCGA.02.0014", "TCGA.02.0021", "TCGA.02.0028", "TCGA.02.0080", "TCGA.02.0114",
"TCGA.06.0409", "TCGA.08.0344", "TCGA.12.0656", "TCGA.06.0125", "TCGA.02.0432")
short.survivors <- c("TCGA.12.0657", "TCGA.06.0140", "TCGA.06.0402", "TCGA.06.0201", "TCGA.06.0747",
"TCGA.06.0749", "TCGA.02.0033", "TCGA.06.0413", "TCGA.02.0037", "TCGA.06.0182")
# find all common genes, all common samples
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/mrnaGBM-304patients-1375genes.RData"))
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/mutGBM-574patients-1582genes.RData"))
print(load("~/oncodev/hbolouri/oncoDev12/Oncoscape/inst/extdata/tcgaGBM/cnvGBM-563patients-1582genes.RData"))
goi <- sort(intersect(colnames(tbl.mrna), intersect(colnames(tbl.mut), colnames(tbl.cn))))
checkEquals(length(goi), 1290)
soi <- sort(intersect(rownames(tbl.mrna), intersect(rownames(tbl.mut), rownames(tbl.cn))))
checkEquals(length(soi), 297)
checkTrue(all(short.survivors %in% soi))
checkTrue(all(long.survivors %in% soi))
patients <- sort(c(short.survivors, long.survivors))
mtx.mrna <- as.matrix(tbl.mrna[patients,])
checkEquals(dim(mtx.mrna), c(20, 1375))
# all good values
checkEquals(length(which(is.na(mtx.mrna))), 0)
checkEquals(fivenum(mtx.mrna), c(-4.1935652, -0.7241125, -0.0369353, 0.6967585, 7.8256909))
# identify the most variable genes
sd <- apply(mtx.mrna[, goi], 2, function(row) sd(row, na.rm=TRUE))
# get the 54 most variable genes
genes <- sort(names(sort(sd, decreasing=TRUE))[1:54])
mtx.mut <- as.matrix(tbl.mut)
most.mutated.genes <- names(head(sort(apply(mtx.mut, 2, function(col) length(which(!is.na(col)))), decreasing=TRUE), n=80))
genes <- sort(intersect(most.mutated.genes, goi)[1:64])
checkEquals(length(genes), 64)
save(patients, genes, file="sharedPatientsAndGenes.RData")
printf("saved patients (%d), genes (%d)", length(patients), length(genes))
|
#### PROJECT: Mimulus cardinalis demography 2010-2014
#### PURPOSE: Sample unique individuals from each site with replacement to create bootstrap datasets
############# From each bootstrapped dataset, vital rate models are created and IPMs are run to obtain bootstrapped lambdas
############# Replicate bootstrap datasets will be used to obtain confidence intervals around lambda estimates for each site
#### AUTHOR: Seema Sheth
#### DATE LAST MODIFIED: 20171110
# remove objects and clear workspace
rm(list = ls(all=TRUE))
# require packages
require(plyr)
require(dplyr)
# set working directory
setwd("/Users/ssheth/Google Drive/demography_PNAS_November2017")
#*******************************************************************************
#### 1. bring in M. cardinalis demography data from 2010-2014 ###
#*******************************************************************************
# Variables are:
# Site: population
# ID: unique identifier for each individual
# Region: latitudinal region that population is nested within
# Latitude: latitude of population
# Longitude: longitude of population
# Elevation: elevation of population
# Class: stage class (juvenile, adult, or NA) of plant at time = t
# Fec1: Total number of fruits per individual
# logSize: total stem length of the individual
# ClassNext: stage class (juvenile, adult, dead, or NA) of plant at time = t+1
# logSizeNext: same as "logSize" above, for t+1
# Surv: survival (1) or not (0) of individuals between time = t and time = t+1
# Year: annual transition of the long-term data at time = t (2010-2013)
# Fec0: Probability of flowering (1 if Class=="A" for adult, 0 if Class=="J" for juvenile)
# RegionRank: ordinal rank of regions from south to north
# SeedCt: mean seed count, rounded to the nearest integer, for each site
# read in data and sort by latitude
data=read.csv("Data/Mcard_demog_data_2010-2013.csv") %>% arrange(-Latitude)
# convert Site, Year and ID columns to factors
data$Site=factor(data$Site)
data$Year=factor(data$Year)
data$ID=factor(data$ID)
data$NotARecruit=factor(data$NotARecruit)
data$NotAnIndividual=factor(data$NotAnIndividual)
# Remove plants that should not have been recorded as new recruits
#### NOTE: these are plants that A. Angert noted as "wrong, definitely exclude (reasons include new plot, site not visited in prior year, ID within prior years' ranges, coordinates well outside of prior year's search)"
data=subset(data,NotARecruit!=1|is.na(NotARecruit))
unique(data$NotARecruit)
length(data$Site) # 16971 rows; NOTE: there are 8 rows in which NotAnIndividual=1 & NotARecruit=1
# obtain seed counts per fruit per site
seeds.per.site=tapply(data$SeedCt,data$Site,FUN=mean,na.rm=T) # obtain seed counts per fruit per site
seeds.per.site=data.frame(seeds.per.site,rownames(seeds.per.site)) # make into a data frame
colnames(seeds.per.site)=c("seed.ct","Site") # define column names for merging
#*******************************************************************************
#### 2. Create nested loop to obtain replicate bootstrap datasets for each site, sampling with replacement ###
#*******************************************************************************
# Obtain a data frame of unique IDs from each Site
ID.by.Site=unique(data[,1:2])
# Create a vector of unique Site names for subsetting; note this is sorted by decreasing latitude
site=unique(data$Site)
# Create empty list to be filled in loop
data.boot.rep=list()
id.boot=list()
# Set seed for random sampling to obtain reproducible results
seed=123
# Set number of bootstrap replicate datasets
n.boot=2000
# Create loop to obtain replicate bootstrap datasets
for (i in 1:length(site)) {
data.site=subset(data,Site==site[i]) # select data from site i
id.site=subset(ID.by.Site,Site==site[i]) # select list of unique individual IDs from site i
id.boot <- lapply(1:n.boot, function(j) {
set.seed(j+seed)
sample_n(id.site,size=nrow(id.site), replace = T)}) %>% ldply() # resample rows of site i's data with replacement and size=number of unique individuals in original dataset for each site and convert list to data frame
id.boot$Replicate=rep(seq(1:n.boot),each=nrow(id.site)) # create a column in data frame that corresponds to bootstrap replicate
data.boot=join(id.boot,data.site,type="left",match="all") # merge bootstrapped list of unique IDs to full dataset
data.boot.rep[[i]]=data.boot # add each site's dataframe of n.boot bootstrap replicates to list
}
# Convert list to data frame
bootstrapped.data <- do.call(rbind, data.boot.rep)
# Write bootstrapped datasets to .rds file
saveRDS(bootstrapped.data,"R_output/Mcard_demog_INDIV_BOOTSTRAP_data_2010-2013.rds")
| /PNAS_Seema_Scripts/Sheth_Angert_PNAS_2018_Rscripts/06_Bootstrapping_data.R | no_license | anstettd/a_Resurrection | R | false | false | 4,698 | r | #### PROJECT: Mimulus cardinalis demography 2010-2014
#### PURPOSE: Sample unique individuals from each site with replacement to create bootstrap datasets
############# From each bootstrapped dataset, vital rate models are created and IPMs are run to obtain bootstrapped lambdas
############# Replicate bootstrap datasets will be used to obtain confidence intervals around lambda estimates for each site
#### AUTHOR: Seema Sheth
#### DATE LAST MODIFIED: 20171110
# remove objects and clear workspace
rm(list = ls(all=TRUE))
# require packages
require(plyr)
require(dplyr)
# set working directory
setwd("/Users/ssheth/Google Drive/demography_PNAS_November2017")
#*******************************************************************************
#### 1. bring in M. cardinalis demography data from 2010-2014 ###
#*******************************************************************************
# Variables are:
# Site: population
# ID: unique identifier for each individual
# Region: latitudinal region that population is nested within
# Latitude: latitude of population
# Longitude: longitude of population
# Elevation: elevation of population
# Class: stage class (juvenile, adult, or NA) of plant at time = t
# Fec1: Total number of fruits per individual
# logSize: total stem length of the individual
# ClassNext: stage class (juvenile, adult, dead, or NA) of plant at time = t+1
# logSizeNext: same as "logSize" above, for t+1
# Surv: survival (1) or not (0) of individuals between time = t and time = t+1
# Year: annual transition of the long-term data at time = t (2010-2013)
# Fec0: Probability of flowering (1 if Class=="A" for adult, 0 if Class=="J" for juvenile)
# RegionRank: ordinal rank of regions from south to north
# SeedCt: mean seed count, rounded to the nearest integer, for each site
# read in data and sort by latitude
data=read.csv("Data/Mcard_demog_data_2010-2013.csv") %>% arrange(-Latitude)
# convert Site, Year and ID columns to factors
data$Site=factor(data$Site)
data$Year=factor(data$Year)
data$ID=factor(data$ID)
data$NotARecruit=factor(data$NotARecruit)
data$NotAnIndividual=factor(data$NotAnIndividual)
# Remove plants that should not have been recorded as new recruits
#### NOTE: these are plants that A. Angert noted as "wrong, definitely exclude (reasons include new plot, site not visited in prior year, ID within prior years' ranges, coordinates well outside of prior year's search)"
data=subset(data,NotARecruit!=1|is.na(NotARecruit))
unique(data$NotARecruit)
length(data$Site) # 16971 rows; NOTE: there are 8 rows in which NotAnIndividual=1 & NotARecruit=1
# obtain seed counts per fruit per site
seeds.per.site=tapply(data$SeedCt,data$Site,FUN=mean,na.rm=T) # obtain seed counts per fruit per site
seeds.per.site=data.frame(seeds.per.site,rownames(seeds.per.site)) # make into a data frame
colnames(seeds.per.site)=c("seed.ct","Site") # define column names for merging
#*******************************************************************************
#### 2. Create nested loop to obtain replicate bootstrap datasets for each site, sampling with replacement ###
#*******************************************************************************
# Obtain a data frame of unique IDs from each Site
ID.by.Site=unique(data[,1:2])
# Create a vector of unique Site names for subsetting; note this is sorted by decreasing latitude
site=unique(data$Site)
# Create empty list to be filled in loop
data.boot.rep=list()
id.boot=list()
# Set seed for random sampling to obtain reproducible results
seed=123
# Set number of bootstrap replicate datasets
n.boot=2000
# Create loop to obtain replicate bootstrap datasets
for (i in 1:length(site)) {
data.site=subset(data,Site==site[i]) # select data from site i
id.site=subset(ID.by.Site,Site==site[i]) # select list of unique individual IDs from site i
id.boot <- lapply(1:n.boot, function(j) {
set.seed(j+seed)
sample_n(id.site,size=nrow(id.site), replace = T)}) %>% ldply() # resample rows of site i's data with replacement and size=number of unique individuals in original dataset for each site and convert list to data frame
id.boot$Replicate=rep(seq(1:n.boot),each=nrow(id.site)) # create a column in data frame that corresponds to bootstrap replicate
data.boot=join(id.boot,data.site,type="left",match="all") # merge bootstrapped list of unique IDs to full dataset
data.boot.rep[[i]]=data.boot # add each site's dataframe of n.boot bootstrap replicates to list
}
# Convert list to data frame
bootstrapped.data <- do.call(rbind, data.boot.rep)
# Write bootstrapped datasets to .rds file
saveRDS(bootstrapped.data,"R_output/Mcard_demog_INDIV_BOOTSTRAP_data_2010-2013.rds")
|
library(testthat)
library(pkgtest)
test_check("pkgtest")
| /tests/testthat.R | permissive | kismet303/pkgtest | R | false | false | 58 | r | library(testthat)
library(pkgtest)
test_check("pkgtest")
|
#14th August 2015 | /GraphsGreece.R | no_license | cvives/test2 | R | false | false | 17 | r | #14th August 2015 |
\name{ri2rt}
\alias{ri2rt}
\title{Retention Time Index to Retention Time convertion}
\description{
Convert retention time indices to retention times indices based on observed FAME RI
and their standard values.
}
\usage{
ri2rt(riTime, rt.observed, ri.standard)
}
\arguments{
\item{riTime}{ And RI vector or matrix to convert to Retention Time.}
\item{rt.observed}{The observed FAME RT's. It could be a vector or a matrix.}
\item{ri.standard}{ The standard RI for each FAME }
}
\details{
This function is the inverse of \code{\link{rt2ri}}.
}
\value{
The converted RT
}
\author{Alvaro Cuadros-Inostroza, Matthew Hannah, Henning Redestig }
\seealso{ \code{\link{RIcorrect}}, \code{\link{FAMEoutliers}} }
\examples{
# RI standards
standard <- c(100, 200, 300, 400, 500)
# observed standard retention times
observed <- c(10.4, 19.3, 32.4, 40.2, 50.3)
# a random set of retention times
RI <- runif(100,90,600)
# the corrected RIs
RT <- ri2rt(RI, observed, standard)
}
| /man/ri2rt.Rd | no_license | acinostroza/TargetSearch | R | false | false | 1,004 | rd | \name{ri2rt}
\alias{ri2rt}
\title{Retention Time Index to Retention Time convertion}
\description{
Convert retention time indices to retention times indices based on observed FAME RI
and their standard values.
}
\usage{
ri2rt(riTime, rt.observed, ri.standard)
}
\arguments{
\item{riTime}{ And RI vector or matrix to convert to Retention Time.}
\item{rt.observed}{The observed FAME RT's. It could be a vector or a matrix.}
\item{ri.standard}{ The standard RI for each FAME }
}
\details{
This function is the inverse of \code{\link{rt2ri}}.
}
\value{
The converted RT
}
\author{Alvaro Cuadros-Inostroza, Matthew Hannah, Henning Redestig }
\seealso{ \code{\link{RIcorrect}}, \code{\link{FAMEoutliers}} }
\examples{
# RI standards
standard <- c(100, 200, 300, 400, 500)
# observed standard retention times
observed <- c(10.4, 19.3, 32.4, 40.2, 50.3)
# a random set of retention times
RI <- runif(100,90,600)
# the corrected RIs
RT <- ri2rt(RI, observed, standard)
}
|
library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(5)
system.time(
scantwo.perm.imp.10.5 <-
scantwo(LG.f2.after.crossover,pheno.col=21:22,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.10.5, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.10.5.Rdata")
| /F2/scantwo/scantwo_perm_10.5_new.R | no_license | leejimmy93/KIAT_cabernet | R | false | false | 680 | r | library(qtl)
library(snowfall)
load("/share/malooflab/Ruijuan/F2/QTL_analysis/data/LG.f2.after.crossover.43traits.Rdata")
LG.f2.after.crossover <- sim.geno(LG.f2.after.crossover,step=1,n.draws=32)
LG.f2.after.crossover <- calc.genoprob(LG.f2.after.crossover,step=1)
sfInit(parallel = TRUE, cpus = 16)
sfExport("LG.f2.after.crossover")
sfLibrary(qtl)
# run scantwo for all traits at once
set.seed(5)
system.time(
scantwo.perm.imp.10.5 <-
scantwo(LG.f2.after.crossover,pheno.col=21:22,method="hk",n.perm=10,n.cluster = 16)
)
sfStop()
# save output
save(scantwo.perm.imp.10.5, file = "/share/malooflab/Ruijuan/F2/QTL_analysis/output/scantwo/scantwo.perm.imp.10.5.Rdata")
|
applyFilters <- function(x, filter="all", custom.filter)
{
if(missing(x) || !is(x, "matrix")) stop("Please provide a valid matrix as input \n")
if(!any(grepl("^cg", rownames(x)))) stop("Probe names for the beta values matrix are not standard Illumina 450k probe names \n")
if(!(all(filter %in% c("none", "noncg", "sex", "snp", "rpt", "all", "custom")))) stop("Please provide a valid filter. See '?applyFilters' for valid filters \n")
if("none" %in% filter){
message("Not performing any filtering as Filter : none was specified \n\n")
return(x)
}
if("all" %in% filter){
data(filters)
if(length(filter) > 1) warning("Multiple filter types specified along with type 'all'. Overriding the other filters and applying only the 'all' filter \n")
message("Applying Filter : all \n\n")
filter.all <- filters$filter.all
x <- x[-which(rownames(x) %in% filter.all), , drop=FALSE]
return(x)
}
if("custom" %in% filter){
if(missing(custom.filter)) stop("Please provide a custom filter\n")
if(!is(custom.filter, "character")) stop("Custom filter should be a chracter vector containing probe names that are to be filtered out \n")
if(!any(grepl("^cg", custom.filter)) && !any(grepl("^rs", custom.filter)) && !any(grepl("^ch", custom.filter))) {
stop("Custom filter contains non-standard Illumina 450k probe names \n")
}
message("Applying provided custom filter \n\n")
x <- x[-which(rownames(x) %in% custom.filter), , drop=FALSE]
return(x)
}
if(!("none" %in% filter) && !("all" %in% filter) && !("custom" %in% filter)){
data(filters)
f <- paste("filter", filter, sep=".")
message(paste("Applying filters : ", paste(filter, collapse = ", "), "\n\n", sep=""))
filter.apply <- unique(unlist(lapply(f, function(x){filters[f]})))
x <- x[-which(rownames(x) %in% filter.apply), , drop=FALSE]
return(x)
}
}
percentMFilter <- function(x, percent = 10)
{
if(!is(x, "SimpleList")){
stop("Data should be a SimpleList object \n")
}
message(paste("Retaining probes methylated in", percent, "percent of Tumor samples\n", sep=" "))
datT <- x$DICHOTOMIZED$Tumor.Dichotomized
#datN <- x$DICHOTOMIZED$Normal.Dichotomized
datT.percentM <- datT[which(((rowSums(datT)/ncol(datT))*100) > percent),]
#keep.probes.T <- match(rownames(datT.percentM), rownames(datT))
#keep.probes.N <- match(rownames(datT.percentM), rownames(datN))
x$FILTERED <- SimpleList("Tumor.Filtered" = datT.percentM)
return(x)
}
| /R/filter.R | no_license | mbootwalla/MethylHose | R | false | false | 2,437 | r | applyFilters <- function(x, filter="all", custom.filter)
{
if(missing(x) || !is(x, "matrix")) stop("Please provide a valid matrix as input \n")
if(!any(grepl("^cg", rownames(x)))) stop("Probe names for the beta values matrix are not standard Illumina 450k probe names \n")
if(!(all(filter %in% c("none", "noncg", "sex", "snp", "rpt", "all", "custom")))) stop("Please provide a valid filter. See '?applyFilters' for valid filters \n")
if("none" %in% filter){
message("Not performing any filtering as Filter : none was specified \n\n")
return(x)
}
if("all" %in% filter){
data(filters)
if(length(filter) > 1) warning("Multiple filter types specified along with type 'all'. Overriding the other filters and applying only the 'all' filter \n")
message("Applying Filter : all \n\n")
filter.all <- filters$filter.all
x <- x[-which(rownames(x) %in% filter.all), , drop=FALSE]
return(x)
}
if("custom" %in% filter){
if(missing(custom.filter)) stop("Please provide a custom filter\n")
if(!is(custom.filter, "character")) stop("Custom filter should be a chracter vector containing probe names that are to be filtered out \n")
if(!any(grepl("^cg", custom.filter)) && !any(grepl("^rs", custom.filter)) && !any(grepl("^ch", custom.filter))) {
stop("Custom filter contains non-standard Illumina 450k probe names \n")
}
message("Applying provided custom filter \n\n")
x <- x[-which(rownames(x) %in% custom.filter), , drop=FALSE]
return(x)
}
if(!("none" %in% filter) && !("all" %in% filter) && !("custom" %in% filter)){
data(filters)
f <- paste("filter", filter, sep=".")
message(paste("Applying filters : ", paste(filter, collapse = ", "), "\n\n", sep=""))
filter.apply <- unique(unlist(lapply(f, function(x){filters[f]})))
x <- x[-which(rownames(x) %in% filter.apply), , drop=FALSE]
return(x)
}
}
percentMFilter <- function(x, percent = 10)
{
if(!is(x, "SimpleList")){
stop("Data should be a SimpleList object \n")
}
message(paste("Retaining probes methylated in", percent, "percent of Tumor samples\n", sep=" "))
datT <- x$DICHOTOMIZED$Tumor.Dichotomized
#datN <- x$DICHOTOMIZED$Normal.Dichotomized
datT.percentM <- datT[which(((rowSums(datT)/ncol(datT))*100) > percent),]
#keep.probes.T <- match(rownames(datT.percentM), rownames(datT))
#keep.probes.N <- match(rownames(datT.percentM), rownames(datN))
x$FILTERED <- SimpleList("Tumor.Filtered" = datT.percentM)
return(x)
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/update_bib.R
\name{update_bib}
\alias{update_bib}
\title{Update bib File}
\usage{
update_bib(report = getwd(), bib.loc = getOption("bib.loc"))
}
\arguments{
\item{report}{Path to the report project.}
\item{bib.loc}{Optional path to a .bib resource.}
}
\value{
Updates bib from master/global .bib file.
}
\description{
Updates the report directory .bib file with a global/master .bib file.
}
| /man/update_bib.Rd | no_license | 2ndFloorStuff/reports | R | false | false | 479 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/update_bib.R
\name{update_bib}
\alias{update_bib}
\title{Update bib File}
\usage{
update_bib(report = getwd(), bib.loc = getOption("bib.loc"))
}
\arguments{
\item{report}{Path to the report project.}
\item{bib.loc}{Optional path to a .bib resource.}
}
\value{
Updates bib from master/global .bib file.
}
\description{
Updates the report directory .bib file with a global/master .bib file.
}
|
# Your assignment is to write a pair of functions that cache the inverse of a matrix.
#
# Write the following functions:
# makeCacheMatrix: This function creates a special "matrix"
# object that can cache its inverse.
# cacheSolve: This function computes the inverse of the special "matrix"
# returned by makeCacheMatrix above. If the inverse has already been
# calculated (and the matrix has not changed), then the cachesolve should retrieve
# the inverse from the cache.
#First function creates an object(?) made of four functions, as well as variables inv and y.
#An invertible matrix is passed through as an argument x,
# creating the new object/function makeCacheMatrix. This doesn't create the inverted matrix,
# that is done in cacheSolve(), but it gives the user functions to be used with cacheSolve()
# so that the inverse matrix doesn't need to be calculated more than once for a given invertible matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Return a matrix that is the inverse of 'x',
# x being an object/variable created by the makeCacheMatrix function.
#If an inverted matrix isn't able to be found in x, cacheSolve 'solves' it and stores it in x.
cacheSolve <- function(x, ...) {
k <- x$getinverse()
if(!is.null(k)) {
message("getting cached inverse matrix")
return(k)
}
data <- x$get()
k <- solve(data, ...)
x$setinverse(k)
k
}
| /cachematrix.R | no_license | krocagator/ProgrammingAssignment2 | R | false | false | 1,863 | r | # Your assignment is to write a pair of functions that cache the inverse of a matrix.
#
# Write the following functions:
# makeCacheMatrix: This function creates a special "matrix"
# object that can cache its inverse.
# cacheSolve: This function computes the inverse of the special "matrix"
# returned by makeCacheMatrix above. If the inverse has already been
# calculated (and the matrix has not changed), then the cachesolve should retrieve
# the inverse from the cache.
#First function creates an object(?) made of four functions, as well as variables inv and y.
#An invertible matrix is passed through as an argument x,
# creating the new object/function makeCacheMatrix. This doesn't create the inverted matrix,
# that is done in cacheSolve(), but it gives the user functions to be used with cacheSolve()
# so that the inverse matrix doesn't need to be calculated more than once for a given invertible matrix.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Return a matrix that is the inverse of 'x',
# x being an object/variable created by the makeCacheMatrix function.
#If an inverted matrix isn't able to be found in x, cacheSolve 'solves' it and stores it in x.
cacheSolve <- function(x, ...) {
k <- x$getinverse()
if(!is.null(k)) {
message("getting cached inverse matrix")
return(k)
}
data <- x$get()
k <- solve(data, ...)
x$setinverse(k)
k
}
|
# Last modified 5/4/2016 by Andrew Lee
# Modifications include comments and suggestions to Ruimin.
# Overview: The code below uses an R package called "caret" to perform a generalized linear model ("glm"). While I used
# caret, an alternatie method (and probably simplier to use) would have been to just call glm(y ~ x, data = data) function
# directly and evaluate the results that way.
#
# GLM's are used for logistic regressions.
#
#
# The line commented out below is an example of me using glm for the logistic regression.
#fit <- glm(y ~ syscall + stratNo+ progType, data =dat.clean, family = binomial(link = "logit"))
# This loads the package "caret"
library(caret)
# Caret requires the use of a training set and a test set b/c of machine learning. The code below
# creates a training and test set for full dataset IO + CPU
set.seed(5207) #set a for data splitting
# trainIndex <- createDataPartition(dat.clean$y, p = .8,
# list = FALSE,
# times = 1)
# training <- dat.clean[trainIndex,]
# test <- dat.clean[-trainIndex,]
trainIndex <- createDataPartition(dat50$y, p = .8,
list = FALSE,
times = 1)
training <- dat50[trainIndex,]
test <- dat50[-trainIndex,]
# Doing the same thing except for IO dataset now
# set.seed(8931)
# trainIndexIO <- createDataPartition(datIO.clean$y, p = .8,
# list = FALSE,
# times = 1)
# trainingIO <- datIO.clean[trainIndexIO,]
# testIO <- datIO.clean[-trainIndexIO,]
# Same thing but for CPU dataset
# set.seed(1512)
# trainIndexCPU <- createDataPartition(datCPU.clean$y, p = .8,
# list = FALSE,
# times = 1)
# trainingCPU <- datCPU.clean[trainIndexCPU,]
# testCPU <- datCPU.clean[-trainIndexCPU,]
# A special function necessary for the caret package. Here we define the sampling method
# to be cross validation with 10 folds with 5 repetitions.
Control <- trainControl(method = "repeatedcv", number = 3, repeats = 2, verboseIter = T)
# set.seed here actually does nothing
set.seed(2741)
# Instead of using the glm function (see line 17), I use the caret function "train" which runs multiple glm models
# all with different sample sizes
lr <- train(y ~ syscall + stratNo , data = training, method = "glm", trControl = Control )
lr1 <- train(y ~ stratNo , data = training, method = "glm", trControl = Control )
lr2 <- train(y ~ syscall , data = training, method = "glm", trControl = Control )
# Data changes
# set.seed(3522)
# lrIO <- train(y ~ syscall + stratNo , data = trainingIO, method = "glm", trControl = Control )
#
# set.seed(8133)
# lrCPU <- train(y ~ syscall + stratNo , data = trainingCPU, method = "glm", trControl = Control )
######
# Now that the models are done running, I computed and analyzed the results.
sink("~/bear/Res/Res50_lr.txt")
summary(lr)
confusionMatrix(lr)
varImp(lr)
exp(confint(lr$finalModel))
exp(lr$finalModel$coefficients)
sink()
sink("~/bear/Res/Res50_lr1.txt")
summary(lr1)
confusionMatrix(lr1)
varImp(lr1)
exp(confint(lr1$finalModel))
exp(lr1$finalModel$coefficients)
sink()
sink("~/bear/Res/Res50_lr2.txt")
summary(lr2)
confusionMatrix(lr2)
varImp(lr2)
exp(confint(lr2$finalModel))
exp(lr2$finalModel$coefficients)
sink()
library(xlsx)
cos <- exp(lr$finalModel$coefficients)
write.xlsx(cos, "~/bear/Res/coefs_50.xlsx")
library(xlsx)
cos1 <- exp(lr1$finalModel$coefficients)
write.xlsx(cos1, "~/bear/Res/coefs1_50.xlsx")
cos2 <- exp(lr2$finalModel$coefficients)
write.xlsx(cos2, "~/bear/Res/coefs2_50.xlsx")
# sink() outputs the console data into a text file
sink("~/bear/Res/Res50_lr1.txt")
# Summary of the variables. I wanted to see which variables are significant, not significant.
summary(lr1)
# Confusion matrix tells me how well the predictions were.
confusionMatrix(lr1)
# Function in caret that tells me what variables are important and how important with respect to
# the other variables. Type "?varImp" for manual
varImp(lr1)
# This creates the odds ratios confdience intervals.
exp(confint(lr1$finalModel))
# This creates a single odds ratio.
exp(lr1$finalModel$coefficients)
# Stops sink function
sink()
#glmnet <- train(y ~ progType + syscall + stratNo , data = training, method = "glmnet", trControl = Control )
#rf <- train(y ~ progType + syscall + stratNo , data = training, method = "rf", trControl = Control )
| /Rcode/models50.R | no_license | gracesrm/Bear-ISSRE17 | R | false | false | 4,549 | r | # Last modified 5/4/2016 by Andrew Lee
# Modifications include comments and suggestions to Ruimin.
# Overview: The code below uses an R package called "caret" to perform a generalized linear model ("glm"). While I used
# caret, an alternatie method (and probably simplier to use) would have been to just call glm(y ~ x, data = data) function
# directly and evaluate the results that way.
#
# GLM's are used for logistic regressions.
#
#
# The line commented out below is an example of me using glm for the logistic regression.
#fit <- glm(y ~ syscall + stratNo+ progType, data =dat.clean, family = binomial(link = "logit"))
# This loads the package "caret"
library(caret)
# Caret requires the use of a training set and a test set b/c of machine learning. The code below
# creates a training and test set for full dataset IO + CPU
set.seed(5207) #set a for data splitting
# trainIndex <- createDataPartition(dat.clean$y, p = .8,
# list = FALSE,
# times = 1)
# training <- dat.clean[trainIndex,]
# test <- dat.clean[-trainIndex,]
trainIndex <- createDataPartition(dat50$y, p = .8,
list = FALSE,
times = 1)
training <- dat50[trainIndex,]
test <- dat50[-trainIndex,]
# Doing the same thing except for IO dataset now
# set.seed(8931)
# trainIndexIO <- createDataPartition(datIO.clean$y, p = .8,
# list = FALSE,
# times = 1)
# trainingIO <- datIO.clean[trainIndexIO,]
# testIO <- datIO.clean[-trainIndexIO,]
# Same thing but for CPU dataset
# set.seed(1512)
# trainIndexCPU <- createDataPartition(datCPU.clean$y, p = .8,
# list = FALSE,
# times = 1)
# trainingCPU <- datCPU.clean[trainIndexCPU,]
# testCPU <- datCPU.clean[-trainIndexCPU,]
# A special function necessary for the caret package. Here we define the sampling method
# to be cross validation with 10 folds with 5 repetitions.
Control <- trainControl(method = "repeatedcv", number = 3, repeats = 2, verboseIter = T)
# set.seed here actually does nothing
set.seed(2741)
# Instead of using the glm function (see line 17), I use the caret function "train" which runs multiple glm models
# all with different sample sizes
lr <- train(y ~ syscall + stratNo , data = training, method = "glm", trControl = Control )
lr1 <- train(y ~ stratNo , data = training, method = "glm", trControl = Control )
lr2 <- train(y ~ syscall , data = training, method = "glm", trControl = Control )
# Data changes
# set.seed(3522)
# lrIO <- train(y ~ syscall + stratNo , data = trainingIO, method = "glm", trControl = Control )
#
# set.seed(8133)
# lrCPU <- train(y ~ syscall + stratNo , data = trainingCPU, method = "glm", trControl = Control )
######
# Now that the models are done running, I computed and analyzed the results.
sink("~/bear/Res/Res50_lr.txt")
summary(lr)
confusionMatrix(lr)
varImp(lr)
exp(confint(lr$finalModel))
exp(lr$finalModel$coefficients)
sink()
sink("~/bear/Res/Res50_lr1.txt")
summary(lr1)
confusionMatrix(lr1)
varImp(lr1)
exp(confint(lr1$finalModel))
exp(lr1$finalModel$coefficients)
sink()
sink("~/bear/Res/Res50_lr2.txt")
summary(lr2)
confusionMatrix(lr2)
varImp(lr2)
exp(confint(lr2$finalModel))
exp(lr2$finalModel$coefficients)
sink()
library(xlsx)
cos <- exp(lr$finalModel$coefficients)
write.xlsx(cos, "~/bear/Res/coefs_50.xlsx")
library(xlsx)
cos1 <- exp(lr1$finalModel$coefficients)
write.xlsx(cos1, "~/bear/Res/coefs1_50.xlsx")
cos2 <- exp(lr2$finalModel$coefficients)
write.xlsx(cos2, "~/bear/Res/coefs2_50.xlsx")
# sink() outputs the console data into a text file
sink("~/bear/Res/Res50_lr1.txt")
# Summary of the variables. I wanted to see which variables are significant, not significant.
summary(lr1)
# Confusion matrix tells me how well the predictions were.
confusionMatrix(lr1)
# Function in caret that tells me what variables are important and how important with respect to
# the other variables. Type "?varImp" for manual
varImp(lr1)
# This creates the odds ratios confdience intervals.
exp(confint(lr1$finalModel))
# This creates a single odds ratio.
exp(lr1$finalModel$coefficients)
# Stops sink function
sink()
#glmnet <- train(y ~ progType + syscall + stratNo , data = training, method = "glmnet", trControl = Control )
#rf <- train(y ~ progType + syscall + stratNo , data = training, method = "rf", trControl = Control )
|
library(dplyr)
library(ggplot2)
set.seed(10)
# Functions -------------------------------------------------------------------
# Defines age categories that group corresponds to
labels = c("(15-20]", "(20-25]", "(25-30]", "(30-35]", "(35-40]",
"(40-45]", "(45-50]", "(50-55]", "(55-60]", "(60-65]", "(65-70]",
"(70-75]", "(75-80]", "80+")
# Age groups
grouping <- function(age){
if (age >= 15 & age < 20){
return (1)
} else if (age >= 20 & age < 25){
return (2)
} else if (age >=25 & age < 30){
return (3)
} else if (age >= 30 & age < 35){
return (4)
} else if (age >= 35 & age < 40){
return (5)
} else if (age >= 40 & age < 45){
return (6)
} else if (age >= 45 & age < 50){
return (7)
} else if (age >= 50 & age < 55){
return (8)
} else if (age >= 55 & age < 60){
return (9)
} else if (age >= 60 & age < 65){
return (10)
} else if (age >= 65 & age < 70){
return (11)
} else if (age >= 70 & age < 75){
return (12)
} else if (age >= 75 & age < 80){
return(13)
} else {
return (14)
}
}
age_spent <- function(row, y){
id <- as.numeric(row[2])
age_month <- as.numeric(row[9])
age <- age_month - 5*(y-1)
time_age_cat <- vector(mode = "numeric", length = 15)
if (age > 960){
if (age > 960 + 60){
time_age_cat[15] = 60
} else {
age_in_category <- age - 960
time_age_cat[15] <- age_in_category
time_age_cat[14] <- 5*12 - age_in_category
}
} else if (age > 900){
age_in_category <- age - 900
time_age_cat[14] <- age_in_category
time_age_cat[13] <- 5*12 - age_in_category
} else if (age > 840){
age_in_category <- age - 840
time_age_cat[13] <- age_in_category
time_age_cat[12] <- 5*12 - age_in_category
} else if (age > 780){
age_in_category <- age - 780
time_age_cat[12] <- age_in_category
time_age_cat[11] <- 5*12 - age_in_category
} else if (age > 720){
age_in_category <- age - 720
time_age_cat[11] <- age_in_category
time_age_cat[10] <- 5*12 - age_in_category
} else if (age > 660){
age_in_category <- age - 660
time_age_cat[10] <- age_in_category
time_age_cat[9] <- 5*12 - age_in_category
} else if (age > 600){
age_in_category <- age - 600
time_age_cat[9] <- age_in_category
time_age_cat[8] <- 5*12 - age_in_category
} else if (age > 540){
age_in_category <- age - 540
time_age_cat[8] <- age_in_category
time_age_cat[7] <- 5*12 - age_in_category
} else if (age > 480){
age_in_category <- age - 480
time_age_cat[7] <- age_in_category
time_age_cat[6] <- 5*12 - age_in_category
} else if (age > 420){
age_in_category <- age - 420
time_age_cat[6] <- age_in_category
time_age_cat[5] <- 5*12 - age_in_category
} else if (age > 360){
age_in_category <- age - 360
time_age_cat[5] <- age_in_category
time_age_cat[4] <- 5*12 - age_in_category
} else if (age > 300){
age_in_category <- age - 300
time_age_cat[4] <- age_in_category
time_age_cat[3] <- 5*12 - age_in_category
} else if (age > 240){
age_in_category <- age - 240
time_age_cat[3] <- age_in_category
time_age_cat[2] <- 5*12 - age_in_category
} else if (age > 180){
age_in_category <- age - 180
time_age_cat[2] <- age_in_category
time_age_cat[1] <- 5*12 - age_in_category
} else {
time_age_cat[1] <- 60
}
if(sum(time_age_cat) != 60){
print(id)
print((time_age_cat))
stop()
}
return (c(id, time_age_cat))
}
#-----------------------------------------------------------------------------
# Read in DHS data
all_data <- readRDS("data/DHS/za_fathers_16.RDS")
all_data <- select(all_data, - father_wi, -father_bmi, -father_smoking, -father_anemia)
# Survey year
year <- floor(all_data$survey_date[1])
# Add one to year to make labelling easier later
year_tmp <- year + 1
# Separate out men who are fathers
father_data <- all_data[which(!is.na(all_data$age_child_now)),]
# Separate out full data
full <- father_data[which(father_data$age_father_now > 0 & father_data$age_mother_now >0 & father_data$father_status == 1),]
not_nec_mothers <- father_data[which(father_data$age_father_now > 0),]
all_fertility <- NULL
for (y in 1:3){
# Makes a null fertility vector
fertility <- NULL
# Repeat bootstrapping 10 times
for (j in 1:10){
# Assigns fathers to missing children
for (i in 1:length(father_data$id_household)){
# Only adjust those with missing father data
if (is.na(father_data$age_father_now[i])){
# What is age of this child
age_child <- father_data$age_child_now[i]
# What is age of mother
age_mother <- father_data$age_mother_now[i]
# If father status is missing or unknown - work out if father should be alive
if (father_data$father_status[i] > 1){
other_children <- father_data[which(father_data$age_child_now == age_child),]
if (!is.na(age_mother)){
if (age_mother != 0){
other_children <- other_children[which(other_children$age_mother_now == age_mother),]
}
}
prop_live = sum(other_children$father_status == 1) / length(other_children$father_status)
p <- runif(1, 0, 1)
if (p < prop_live){
father_data$father_status[i] = 1
} else {
father_data$father_status[i] = 0
}
}
# Find other children with data that matches
if (age_mother == 0 | is.na(age_mother)){
others <- not_nec_mothers[which(not_nec_mothers$age_child_now == age_child),]
} else {
others <- full[which(full$age_child_now == age_child & full$age_mother_now),]
}
# Randomly select a child from the others list
new_father <- others[sample(1:length(others$id_household), 1),]
father_data$age_father_now[i] <- new_father$age_father_now
father_data$new_id[i] <- new_father$id
}
}
# Removes children with dead father
father_data <- father_data[which(father_data$father_status == 1),]
# Data where every child has age of father
father_data$age_father_birth <- father_data$age_father_now - father_data$age_child_now
# select children born in last 5 year
if (y == 1){
selected_children <- father_data[which(father_data$age_child_now < 5),]
} else {
selected_children <- father_data[which(father_data$age_child_now >= 5*(y-1) & father_data$age_child_now < 5*y),]
}
# Summarise number of children in each age
summ <- selected_children %>%
group_by(age_father_birth) %>%
summarize(count = n())
# Plots a histogram
#p <- ggplot(summ, aes(age_father_birth, count)) + geom_col() + theme_bw()
#print(p)
# Calculate number of children in each age category
summ$group <- sapply(summ$age_father_birth, grouping)
band_children <- summ %>%
group_by(group) %>%
summarise("births" = sum(count))
band_children$label <- labels[1:length(band_children$group)]
# Work out exposure
# Removes all lines where father doesn't have an age as we match them to other fathers
exposure <- all_data[!is.na(all_data$age_father_now),]
# Subset so only have one line per man
unique_men <- exposure %>% distinct(id_man, .keep_all= TRUE)
# Randomly assign each man a birth month
unique_men$man_age_month <- sample(0:11, length(unique_men$id_man), replace = TRUE)
# Work out mens age in months
unique_men$age_months <- unique_men$age_father_now*12 + unique_men$man_age_month
# Remove men (boys) under the age of 5*years
unique_men <- unique_men[which(unique_men$age_father_now > 5*y),]
# Calculate number of years spent in each age category
exposure_months <- t(apply(unique_men, 1, age_spent, y=y))
exposure_years <- data.frame("exposure_years" = colSums( exposure_months[,2:16])/12)
exposure_years$label <- c("under 15", labels)
# Calculate fertility
data <- left_join(exposure_years, band_children)
data$fertility <- data$births/data$exposure_years
fertility <- cbind(fertility, data$fertility)
}
fertility_rate = rowMeans(fertility)
for (i in 1:5){
year_tmp <- year_tmp - 1
all_fertility <- cbind(all_fertility, fertility_rate)
}
#p <- ggplot(all_fertility %>% filter(!is.na(fertility_rate))) +
# geom_point(aes(age, fertility_rate)) + theme_bw() +
# ylab("Male fertility rate") + xlab("Age category")
#print(p)
}
df_fertility <- data.frame(all_fertility)
names(df_fertility) <- paste0("y", year - 0:14)
df_fertility$ages <- factor(exposure_years$label, levels = c("under 15", labels))
saveRDS(df_fertility, file = "data/SouthAfrica/male_fertility.RDS")
p <- ggplot(df_fertility) +
geom_point(aes(ages, y2016, col = "2016")) +
geom_point(aes(ages, y2011, col = "2011")) +
geom_point(aes(ages, y2006, col = "2006")) +
scale_color_manual(values = c("2016" = "black", "2011" = "blue", "2006" = "red")) +
theme_bw()
print(p)
ggsave("za_male.png", p) | /TheLancet_global_minimum_estimates_2021/R/DHS/za_impute_fathers.R | permissive | ImperialCollegeLondon/covid19_orphans | R | false | false | 9,094 | r | library(dplyr)
library(ggplot2)
set.seed(10)
# Functions -------------------------------------------------------------------
# Defines age categories that group corresponds to
labels = c("(15-20]", "(20-25]", "(25-30]", "(30-35]", "(35-40]",
"(40-45]", "(45-50]", "(50-55]", "(55-60]", "(60-65]", "(65-70]",
"(70-75]", "(75-80]", "80+")
# Age groups
grouping <- function(age){
if (age >= 15 & age < 20){
return (1)
} else if (age >= 20 & age < 25){
return (2)
} else if (age >=25 & age < 30){
return (3)
} else if (age >= 30 & age < 35){
return (4)
} else if (age >= 35 & age < 40){
return (5)
} else if (age >= 40 & age < 45){
return (6)
} else if (age >= 45 & age < 50){
return (7)
} else if (age >= 50 & age < 55){
return (8)
} else if (age >= 55 & age < 60){
return (9)
} else if (age >= 60 & age < 65){
return (10)
} else if (age >= 65 & age < 70){
return (11)
} else if (age >= 70 & age < 75){
return (12)
} else if (age >= 75 & age < 80){
return(13)
} else {
return (14)
}
}
age_spent <- function(row, y){
id <- as.numeric(row[2])
age_month <- as.numeric(row[9])
age <- age_month - 5*(y-1)
time_age_cat <- vector(mode = "numeric", length = 15)
if (age > 960){
if (age > 960 + 60){
time_age_cat[15] = 60
} else {
age_in_category <- age - 960
time_age_cat[15] <- age_in_category
time_age_cat[14] <- 5*12 - age_in_category
}
} else if (age > 900){
age_in_category <- age - 900
time_age_cat[14] <- age_in_category
time_age_cat[13] <- 5*12 - age_in_category
} else if (age > 840){
age_in_category <- age - 840
time_age_cat[13] <- age_in_category
time_age_cat[12] <- 5*12 - age_in_category
} else if (age > 780){
age_in_category <- age - 780
time_age_cat[12] <- age_in_category
time_age_cat[11] <- 5*12 - age_in_category
} else if (age > 720){
age_in_category <- age - 720
time_age_cat[11] <- age_in_category
time_age_cat[10] <- 5*12 - age_in_category
} else if (age > 660){
age_in_category <- age - 660
time_age_cat[10] <- age_in_category
time_age_cat[9] <- 5*12 - age_in_category
} else if (age > 600){
age_in_category <- age - 600
time_age_cat[9] <- age_in_category
time_age_cat[8] <- 5*12 - age_in_category
} else if (age > 540){
age_in_category <- age - 540
time_age_cat[8] <- age_in_category
time_age_cat[7] <- 5*12 - age_in_category
} else if (age > 480){
age_in_category <- age - 480
time_age_cat[7] <- age_in_category
time_age_cat[6] <- 5*12 - age_in_category
} else if (age > 420){
age_in_category <- age - 420
time_age_cat[6] <- age_in_category
time_age_cat[5] <- 5*12 - age_in_category
} else if (age > 360){
age_in_category <- age - 360
time_age_cat[5] <- age_in_category
time_age_cat[4] <- 5*12 - age_in_category
} else if (age > 300){
age_in_category <- age - 300
time_age_cat[4] <- age_in_category
time_age_cat[3] <- 5*12 - age_in_category
} else if (age > 240){
age_in_category <- age - 240
time_age_cat[3] <- age_in_category
time_age_cat[2] <- 5*12 - age_in_category
} else if (age > 180){
age_in_category <- age - 180
time_age_cat[2] <- age_in_category
time_age_cat[1] <- 5*12 - age_in_category
} else {
time_age_cat[1] <- 60
}
if(sum(time_age_cat) != 60){
print(id)
print((time_age_cat))
stop()
}
return (c(id, time_age_cat))
}
#-----------------------------------------------------------------------------
# Read in DHS data
all_data <- readRDS("data/DHS/za_fathers_16.RDS")
all_data <- select(all_data, - father_wi, -father_bmi, -father_smoking, -father_anemia)
# Survey year
year <- floor(all_data$survey_date[1])
# Add one to year to make labelling easier later
year_tmp <- year + 1
# Separate out men who are fathers
father_data <- all_data[which(!is.na(all_data$age_child_now)),]
# Separate out full data
full <- father_data[which(father_data$age_father_now > 0 & father_data$age_mother_now >0 & father_data$father_status == 1),]
not_nec_mothers <- father_data[which(father_data$age_father_now > 0),]
all_fertility <- NULL
for (y in 1:3){
# Makes a null fertility vector
fertility <- NULL
# Repeat bootstrapping 10 times
for (j in 1:10){
# Assigns fathers to missing children
for (i in 1:length(father_data$id_household)){
# Only adjust those with missing father data
if (is.na(father_data$age_father_now[i])){
# What is age of this child
age_child <- father_data$age_child_now[i]
# What is age of mother
age_mother <- father_data$age_mother_now[i]
# If father status is missing or unknown - work out if father should be alive
if (father_data$father_status[i] > 1){
other_children <- father_data[which(father_data$age_child_now == age_child),]
if (!is.na(age_mother)){
if (age_mother != 0){
other_children <- other_children[which(other_children$age_mother_now == age_mother),]
}
}
prop_live = sum(other_children$father_status == 1) / length(other_children$father_status)
p <- runif(1, 0, 1)
if (p < prop_live){
father_data$father_status[i] = 1
} else {
father_data$father_status[i] = 0
}
}
# Find other children with data that matches
if (age_mother == 0 | is.na(age_mother)){
others <- not_nec_mothers[which(not_nec_mothers$age_child_now == age_child),]
} else {
others <- full[which(full$age_child_now == age_child & full$age_mother_now),]
}
# Randomly select a child from the others list
new_father <- others[sample(1:length(others$id_household), 1),]
father_data$age_father_now[i] <- new_father$age_father_now
father_data$new_id[i] <- new_father$id
}
}
# Removes children with dead father
father_data <- father_data[which(father_data$father_status == 1),]
# Data where every child has age of father
father_data$age_father_birth <- father_data$age_father_now - father_data$age_child_now
# select children born in last 5 year
if (y == 1){
selected_children <- father_data[which(father_data$age_child_now < 5),]
} else {
selected_children <- father_data[which(father_data$age_child_now >= 5*(y-1) & father_data$age_child_now < 5*y),]
}
# Summarise number of children in each age
summ <- selected_children %>%
group_by(age_father_birth) %>%
summarize(count = n())
# Plots a histogram
#p <- ggplot(summ, aes(age_father_birth, count)) + geom_col() + theme_bw()
#print(p)
# Calculate number of children in each age category
summ$group <- sapply(summ$age_father_birth, grouping)
band_children <- summ %>%
group_by(group) %>%
summarise("births" = sum(count))
band_children$label <- labels[1:length(band_children$group)]
# Work out exposure
# Removes all lines where father doesn't have an age as we match them to other fathers
exposure <- all_data[!is.na(all_data$age_father_now),]
# Subset so only have one line per man
unique_men <- exposure %>% distinct(id_man, .keep_all= TRUE)
# Randomly assign each man a birth month
unique_men$man_age_month <- sample(0:11, length(unique_men$id_man), replace = TRUE)
# Work out mens age in months
unique_men$age_months <- unique_men$age_father_now*12 + unique_men$man_age_month
# Remove men (boys) under the age of 5*years
unique_men <- unique_men[which(unique_men$age_father_now > 5*y),]
# Calculate number of years spent in each age category
exposure_months <- t(apply(unique_men, 1, age_spent, y=y))
exposure_years <- data.frame("exposure_years" = colSums( exposure_months[,2:16])/12)
exposure_years$label <- c("under 15", labels)
# Calculate fertility
data <- left_join(exposure_years, band_children)
data$fertility <- data$births/data$exposure_years
fertility <- cbind(fertility, data$fertility)
}
fertility_rate = rowMeans(fertility)
for (i in 1:5){
year_tmp <- year_tmp - 1
all_fertility <- cbind(all_fertility, fertility_rate)
}
#p <- ggplot(all_fertility %>% filter(!is.na(fertility_rate))) +
# geom_point(aes(age, fertility_rate)) + theme_bw() +
# ylab("Male fertility rate") + xlab("Age category")
#print(p)
}
df_fertility <- data.frame(all_fertility)
names(df_fertility) <- paste0("y", year - 0:14)
df_fertility$ages <- factor(exposure_years$label, levels = c("under 15", labels))
saveRDS(df_fertility, file = "data/SouthAfrica/male_fertility.RDS")
p <- ggplot(df_fertility) +
geom_point(aes(ages, y2016, col = "2016")) +
geom_point(aes(ages, y2011, col = "2011")) +
geom_point(aes(ages, y2006, col = "2006")) +
scale_color_manual(values = c("2016" = "black", "2011" = "blue", "2006" = "red")) +
theme_bw()
print(p)
ggsave("za_male.png", p) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Challenger}
\alias{Challenger}
\title{The Challenger Space Shuttle O-Ring Dataset}
\format{A dataframe with 5 variables:
\describe{
\item{oring_tot}{The number of O-rings at risk on a given flight}
\item{oring_dt}{The number experiencing thermal distress}
\item{temp}{The launch temperature (degrees F)}
\item{psi}{The leak-check pressure (psi)}
\item{flight}{The temporal order of flight}
}}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring}
Original dataset from: Draper,D. (1993). Assessment and propagation of model uncertainty. In Proceedings of the Fourth International Workshop on Artificial Intelligence and Statistics (pp. 497--509). Ft. Lauderdale, FL: Unpublished.
}
\usage{
Challenger
}
\description{
Edited from (Draper, 1993): The motivation for collecting this database was
the explosion of the USA Space Shuttle Challenger on 28 January, 1986. An
investigation ensued into the reliability of the shuttle's propulsion system.
The explosion was eventually traced to the failure of one of the three field
joints on one of the two solid booster rockets. Each of these six field
joints includes two O-rings, designated as primary and secondary, which fail
when phenomena called erosion and blowby both occur. The night before the
launch a decision had to be made regarding launch safety. The discussion
among engineers and managers leading to this decision included concern that
the probability of failure of the O-rings depended on the temperature t at
launch, which was forecase to be 31 degrees F. There are strong engineering
reasons based on the composition of O-rings to support the judgment that
failure probability may rise monotonically as temperature drops. One other
variable, the pressure at which safety testing for field join leaks was
performed, was available, but its relevance to the failure process was
unclear.
}
\keyword{datasets}
| /man/Challenger.Rd | no_license | courtiol/BeginR | R | false | true | 2,035 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{Challenger}
\alias{Challenger}
\title{The Challenger Space Shuttle O-Ring Dataset}
\format{A dataframe with 5 variables:
\describe{
\item{oring_tot}{The number of O-rings at risk on a given flight}
\item{oring_dt}{The number experiencing thermal distress}
\item{temp}{The launch temperature (degrees F)}
\item{psi}{The leak-check pressure (psi)}
\item{flight}{The temporal order of flight}
}}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Challenger+USA+Space+Shuttle+O-Ring}
Original dataset from: Draper,D. (1993). Assessment and propagation of model uncertainty. In Proceedings of the Fourth International Workshop on Artificial Intelligence and Statistics (pp. 497--509). Ft. Lauderdale, FL: Unpublished.
}
\usage{
Challenger
}
\description{
Edited from (Draper, 1993): The motivation for collecting this database was
the explosion of the USA Space Shuttle Challenger on 28 January, 1986. An
investigation ensued into the reliability of the shuttle's propulsion system.
The explosion was eventually traced to the failure of one of the three field
joints on one of the two solid booster rockets. Each of these six field
joints includes two O-rings, designated as primary and secondary, which fail
when phenomena called erosion and blowby both occur. The night before the
launch a decision had to be made regarding launch safety. The discussion
among engineers and managers leading to this decision included concern that
the probability of failure of the O-rings depended on the temperature t at
launch, which was forecase to be 31 degrees F. There are strong engineering
reasons based on the composition of O-rings to support the judgment that
failure probability may rise monotonically as temperature drops. One other
variable, the pressure at which safety testing for field join leaks was
performed, was available, but its relevance to the failure process was
unclear.
}
\keyword{datasets}
|
#' @docType methods
#' @rdname datatype-methods
#' @title Extract Image datatype attribute
#' @name datatype-methods
#' @aliases datatype,character-method
#' @import oro.nifti
#' @export
#' @description datatype method for character types
#' @param object is a filename to pass to \link{fslval}
#'
setMethod("datatype", "character", function(object) {
object = path.expand(object)
stopifnot(file.exists(object))
res = fslval(object, keyword = "datatype", verbose = FALSE)
res = as.numeric(res)
return(res)
})
| /R/datatype.R | no_license | muschellij2/fslr | R | false | false | 522 | r | #' @docType methods
#' @rdname datatype-methods
#' @title Extract Image datatype attribute
#' @name datatype-methods
#' @aliases datatype,character-method
#' @import oro.nifti
#' @export
#' @description datatype method for character types
#' @param object is a filename to pass to \link{fslval}
#'
setMethod("datatype", "character", function(object) {
object = path.expand(object)
stopifnot(file.exists(object))
res = fslval(object, keyword = "datatype", verbose = FALSE)
res = as.numeric(res)
return(res)
})
|
# Clean up R environment - This is not necesary, but I like to do it.
rm(list = ls())
# LOAD FUNCTIONS FROM myFLIC.R
setwd("/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC")
source("myFLIC.R")
############################################################
############################################################
############################################################
############################################################
# FAST RUN:
# Change the Threshold appropiately (e.g. change 0 to other number such as 1)
# 1. Set the path:
# 1.a. Manually:
path_to_files="/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC/60707m1/"
# 1.b. Using the select folder option:
path_to_files=select_folder()
# 2. Get the PI's:
# 2.a. Based on number of events (e.g. counts)
sum_PI(path_to_files,0,1,1)
# 2.b. Based on the duration of the events
sum_PI(path_to_files,0,2,1)
# 2.c. Get duration and counts for each well on each DFM
sum_PI(path_to_files,0,3,1)
sum_PI(path_to_files,0,3,4)
# STEP BY STEP - looks into individual DFM's
# NOT ALL FUNCTIONS ARE COMPLETEL
# 1. Set the path to the directory you want:
setwd("/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC/60414m1")
# 2. Open the DFM file of interest
DFM="DFM_3.csv"
raw_df = read_data_flic(DFM)
# 3. Remove the baseline.
# 3.a. Using average of all data:
#baseLine=baseLineCal(raw_df)
#df=dfm_base(raw_df,baseLine)
# 3.b. Using moving average
# Set to ~5 mins
df=dfm_base_ma(raw_df, 0)
# Select only the first 30 mins of the run (a.k.a. 1800 sec)
df1=df%>%arrange(ntime)%>%filter(ntime-ntime[1]<1800)
# Calculate Area, amplitude and time of all events in the DFM
# This data frame can be used to calculate the PI's
graphEvent= allEvents(df)
initialScatPlot(graphEvent)
# Saving the data.frame graphEvent into a file (change the name accordingly)
#write.csv(graphEvent, file = "suc340-suc340E15-stv5h_60502_ma.csv")
# You can further filter the data
# Using quatiles:
prob = c(0.01,0.99)
l_T=quantile(as.numeric(graphEvent$diff_time), prob = prob, na.rm=T)
l_A=quantile(graphEvent$area, prob = prob, na.rm=T)
l_Am=quantile(graphEvent$amplitude, prob = prob, na.rm=T)
graphEvent2=graphEvent%>%filter(diff_time<=l_T[[2]], area<=l_A[[2]], amplitude>l_Am[[1]])
initialScatPlot(graphEvent2)
# OR a manual filter:
# Exploring the whole data set and deciding the limits:
head(as.data.frame(graphEvent%>%arrange(desc(as.numeric(diff_time)))),20)
head(as.data.frame(graphEvent%>%arrange(desc(as.numeric(area)))),10)
head(as.data.frame(graphEvent%>%arrange((as.numeric(amplitude)))),50)
# Change values here:
graphEvent2=graphEvent%>%filter(diff_time<100)
graphEvent2=graphEvent2%>%filter(as.numeric(area)<200)
graphEvent2=graphEvent2%>%filter(amplitude>0.87)
# Determine PI based on duration by separating the events into licks (area > 80), tastes (areas between 10 and 80)
# and touches (area less than 10 and amplitude less than 80)
# Unfiltered data
PI_fil_area(graphEvent)
# Filtered data
PI_fil_area(graphEvent2)
# Repeat for other DFM's.
###################################################################################
#### WORK IN PROGRESS!!!!
###################################
# Determine the total time of the test for each observation:
# use the df created with dfm_base_ma or dfm_base
df$time=as.numeric(difftime(df$ntime,df$ntime[1], units="mins"))
# Bin the data in four to correspond for each 30 mins of the test
df$bin=cut(df$time, 4, labels=c("0-0.5","0.5-1","1-1.5","1.5-2"))
# Sum the number observations with higher values than the thershold - not equal to events.
df%>%group_by(bin)%>%summarise_each(funs(Sum=sum(.>0)),-position,-ntime, -time)%>%select(-bin)
# Sum the previous data by columns
colSums(df%>%group_by(bin)%>%summarise_each(funs(Sum=sum(.>0)),-position,-ntime, -time)%>%select(-bin))
# The following two do sort of the same but had some other features.
df%>%group_by(bin)%>%select(W1)%>%summarize(total = n(), larger = sum(W1>0), ratio = larger / total)
for (i in 1:length(names(df%>%select(matches('^W'))))){
a=df%>%select(matches('^W'))%>%select(i)%>%filter(.!=0)
print(names(a))
print(count(a)[[1]])
}
###################################
# 3D plots of the data distribution
scatterplot3d(x=graphEvent$amplitude, y = graphEvent$diff_time, z= graphEvent$area)
# Create columns for color and pch codes
graphEvent2$colorCode <- ifelse(graphEvent2$amplitude>60,"green",
ifelse(graphEvent2$amplitude<20, "red",
"blue"))
graphEvent2$pch <- ifelse(graphEvent2$amplitude>60,1,
ifelse(graphEvent2$amplitude<20, 2,
3))
# Make the 3d plots:
graph_3d(graphEvent2)
###################################
## PLOTING INDIVIDUAL EVENTS EACH ON A SINGLE GRAPH
# Use the df data frame, and add the well to be analyzed.
df1=new_df(df,1)
df2=new_df(df,2)
df3=new_df(df,3)
df4=new_df(df,4)
df5=new_df(df,5)
df6=new_df(df,6)
df7=new_df(df,7)
df8=new_df(df,8)
df9=new_df(df,9)
df10=new_df(df,10)
df11=new_df(df,11)
df12=new_df(df,12)
# No sense plot with all events one after each other
limY=c(0,200)
h= 10 # approximate limits between touches and tastes, these are amplitudes and the limit is set in area
i= 80 # approximate limits between licks and tastes
par(mfrow=c(6,2), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df1$Events,df1$W1, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df2$Events,df2$W2, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df3$Events,df3$W3, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df4$Events,df4$W4, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df5$Events,df5$W5, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df6$Events,df6$W6, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df7$Events,df7$W7, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df8$Events,df8$W8, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df9$Events,df9$W9, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df10$Events,df10$W10, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df11$Events,df11$W11, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df12$Events,df12$W12, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
# count the number of "observations" on each event
df1 %>% group_by(Events)%>%summarise(obs.=n())
df2 %>% group_by(Events)%>%summarise(obs.=n())
df3 %>% group_by(Events)%>%summarise(obs.=n())
df4 %>% group_by(Events)%>%summarise(obs.=n())
df5 %>% group_by(Events)%>%summarise(obs.=n())
df6 %>% group_by(Events)%>%summarise(obs.=n())
df7 %>% group_by(Events)%>%summarise(obs.=n())
df8 %>% group_by(Events)%>%summarise(obs.=n())
df9 %>% group_by(Events)%>%summarise(obs.=n())
df10 %>% group_by(Events)%>%summarise(obs.=n())
df11 %>% group_by(Events)%>%summarise(obs.=n())
df12 %>% group_by(Events)%>%summarise(obs.=n())
# Calculate the time of each observation from the first one on each event, on events that have 2 or more observations.
rfa1=df3%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa2=df4%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa11=df11%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa12=df12%>%group_by(Events)%>%filter(n()>1)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
unique(rfa1$Events)
unique(rfa2$Events)
head(as.data.frame(rfa1%>%filter(Events==1)%>%mutate(totaltime=ntime-raw_df$ntime[1])), 2000)
head(as.data.frame(rfa2%>%filter(Events==4)%>%mutate(totaltime=ntime-raw_df$ntime[1])), 2000)
# Change the ylim with the max # of rows
max(rfa1$W3)
# PLOT EACH EVENT
par(mfrow=c(4,2), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
for (i in (unique(rfa1$Events))){
rfa2=(rfa1%>%filter(Events==i))
plot(rfa2$time_from_uno,rfa2$W3, type="l", col="black", ann = T, axes=T, ylim=c(0,205), labels = T, tck=0)
}
par(mfrow=c(2,3), cex=0.4, cex.axis=0.9)
e_df11=df11%>%filter(Events<=7)
for (i in (unique(e_df11$Events))){
nombre=paste("W11(Suc)_Event",i,sep="_")
rfa2=(e_df11%>%filter(Events==i))%>%mutate(time=ntime-ntime[1])
plot(rfa2$time,rfa2$W11, type="l", col="black", ann = T, axes=T, ylim=c(0,200), labels = T, tck=0, main=nombre)
}
par(mfrow=c(4,3), cex=0.4, cex.axis=0.9)
e_df12=df12%>%filter(Events<=12)
for (i in (unique(e_df12$Events))){
nombre=paste("W12(EtOH)_Event",i,sep="_")
rfa2=(e_df12%>%filter(Events==i))%>%mutate(time=ntime-ntime[1])
plot(rfa2$time,rfa2$W12, type="l", col="black", ann = T, axes=T, ylim=c(0,200), labels = T, tck=0,
main=nombre)
}
# Ploting a single event. Just change the number after the "==", and the df (rfa1)
sinEve=(rfa1%>%filter(Events==1))
plot(sinEve$time_from_uno,sinEve$W1, type="l", col="black", ann = T, axes=T, ylim=c(0,205), labels = T, tck=0)
###################################
### PI USING BREAKPOINTS
### Determine breakpoints in the data
###
# Generate graphs with the distributions to get a visual
# Sort the data by value
y= graphEvent
par(mfrow=c(3,2), cex=0.5, cex.lab=0.8, cex.axis=0.8)
# by Area
area_dist=y%>%arrange((area))%>%mutate(pos=row_number())
plot(area_dist$pos,(area_dist$area))
plot(area_dist$pos,log((area_dist$area)))
# by Amplitude
ampl_dist=y%>%arrange(amplitude)%>%mutate(pos=row_number())
plot(ampl_dist$pos,ampl_dist$amplitude)
plot(ampl_dist$pos,log(ampl_dist$amplitude))
# by Time
t_dist=y%>%arrange(diff_time)%>%mutate(pos=row_number())
plot(t_dist$pos,t_dist$diff_time)
plot(t_dist$pos,log(t_dist$diff_time))
# Detemrmine the optimal breakpoints in each distribution
# break=2 can be change to other values if desired
area.ts<- ts((area_dist$area))
vv_area=breakpoints(area.ts~1,breaks=2)$breakpoints
amplitude.ts<- ts((ampl_dist$amplitude))
vv_amplitude=breakpoints(amplitude.ts~1,breaks=2)$breakpoints
time.ts<- ts((t_dist$diff_time))
vv_time=breakpoints(time.ts~1,breaks=2)$breakpoints
auno=time.ts[vv_area[[1]]]
ados=time.ts[vv_area[[2]]]
par(mfrow=c(3,1), cex=0.5, cex.lab=0.8, cex.axis=0.8)
plot(t_dist$pos[t_dist$diff_time<auno], t_dist$diff_time[t_dist$diff_time<auno], ylim=c(0,8))
plot(t_dist$pos[t_dist$diff_time>auno & t_dist$diff_time<ados], t_dist$diff_time[t_dist$diff_time>auno & t_dist$diff_time<ados], ylim=c(0,8))
plot(t_dist$pos[t_dist$diff_time>ados], t_dist$diff_time[t_dist$diff_time>ados], ylim=c(0,8))
# These printouts will give an idea of the actual values.
breakPoints_data(area.ts,vv_area)
breakPoints_data(amplitude.ts,vv_amplitude)
breakPoints_data(time.ts,vv_time)
# Test the breakpoints
# May or may not match the point selected above
fs.test <- Fstats(area_dist$area ~1)
ff=breakpoints(fs.test)
ff$breakpoints
ff
plot(fs.test)
head(y)
lines(breakpoints(fs.test))
arrange(y,diff_time)
# PI by area
PI_area=all_PI(y,1)
# PI by amplitude
PI_amplitude=all_PI(graphEvent2,2)
# PI by time
PI_time=all_PI(graphEvent2,3)
PI_area[[1]]
PI_amplitude[[1]]
PI_time[[1]]
# Create columns for color and pch codes
graphEvent2$colorCode <- ifelse(graphEvent2$well=="W1" | graphEvent2$well=="W2" | graphEvent2$well=="W3" | graphEvent2$well=="W4" | graphEvent2$well=="W5" | graphEvent2$well=="W6","green",
"blue")
graphEvent2$pch <- ifelse(graphEvent2$well=="W1" | graphEvent2$well=="W2" | graphEvent2$well=="W3" | graphEvent2$well=="W4" | graphEvent2$well=="W5" | graphEvent2$well=="W6",19,
15)
# Make the 3d plots:
graph_3d(graphEvent2,1,"h")
### Additional options can be added here:
with(graphEvent2, {
s3d<-scatterplot3d(diff_time, area, amplitude, # x axis, y axis and z axis
color = colorCode, pch=pch, type = "h",
xlim=c(0,1000), ylim=c(0,1000), zlim=c(0,1000)
#main="3-D DFM"
)
s3d.coords <- s3d$xyz.convert(diff_time, area, amplitude) # convert 3D coords to 2D projection
text(s3d.coords$x, s3d.coords$y, # x and y coordinates
labels=row.names(graphEvent2), # text to plot
cex=.5, pos=4) # shrink text 50% and place to right of points)
})
## SINGLE 3dPLot
rf <- colorRampPalette(rev(brewer.pal(11,'RdYlBu')))
r <- rf(nrow(graphEvent2))
with(graphEvent2, {
scatterplot3d(area, diff_time, amplitude, # x axis, y axis and z axis
color = r ,pch=19,
main="3-D DFM")
})
PI_Adrian=function(df1){ # PI using an "arbitrary" measure of time combined with an area
#licks are events that last more or equal to 600msec (0.6sec)
#tastes are event than last less than 400msec (0.4sec)
# if duration is between 400 and 600 msec, events are considered licks if their area is > 15, other wise are tastes
df=df1%>%filter(diff_time>0.2)
licks=df%>%filter(diff_time>0.6 | (diff_time<0.6 & diff_time>0.4 & area>15))%>%group_by(well)%>%summarize(licks=n())
tastes=df%>%filter((diff_time<0.6 & diff_time>0.4 & area<15 | diff_time<0.4))%>%group_by(well)%>%summarize(tastes=n())
mylist=list(licks,tastes)
resPI=list(PIs=(cbind.data.frame(PI_calc(licks),PI_calc(tastes))),
Events_count=(mergeDF(mylist)))
names(resPI[[1]])=paste(colnames(resPI[[1]]),sep="_")
names(resPI[[2]])=paste(colnames(resPI[[2]]),sep="_")
resPI
}
| /FLIC/my_FLIC.R | no_license | jpinzonc/Sleep_in_Drosophila | R | false | false | 14,190 | r | # Clean up R environment - This is not necesary, but I like to do it.
rm(list = ls())
# LOAD FUNCTIONS FROM myFLIC.R
setwd("/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC")
source("myFLIC.R")
############################################################
############################################################
############################################################
############################################################
# FAST RUN:
# Change the Threshold appropiately (e.g. change 0 to other number such as 1)
# 1. Set the path:
# 1.a. Manually:
path_to_files="/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC/60707m1/"
# 1.b. Using the select folder option:
path_to_files=select_folder()
# 2. Get the PI's:
# 2.a. Based on number of events (e.g. counts)
sum_PI(path_to_files,0,1,1)
# 2.b. Based on the duration of the events
sum_PI(path_to_files,0,2,1)
# 2.c. Get duration and counts for each well on each DFM
sum_PI(path_to_files,0,3,1)
sum_PI(path_to_files,0,3,4)
# STEP BY STEP - looks into individual DFM's
# NOT ALL FUNCTIONS ARE COMPLETEL
# 1. Set the path to the directory you want:
setwd("/Users/jpinzon/Documents/UTSW/Projects/Jmjc Project/03 Feeding preference/00 FLIC/60414m1")
# 2. Open the DFM file of interest
DFM="DFM_3.csv"
raw_df = read_data_flic(DFM)
# 3. Remove the baseline.
# 3.a. Using average of all data:
#baseLine=baseLineCal(raw_df)
#df=dfm_base(raw_df,baseLine)
# 3.b. Using moving average
# Set to ~5 mins
df=dfm_base_ma(raw_df, 0)
# Select only the first 30 mins of the run (a.k.a. 1800 sec)
df1=df%>%arrange(ntime)%>%filter(ntime-ntime[1]<1800)
# Calculate Area, amplitude and time of all events in the DFM
# This data frame can be used to calculate the PI's
graphEvent= allEvents(df)
initialScatPlot(graphEvent)
# Saving the data.frame graphEvent into a file (change the name accordingly)
#write.csv(graphEvent, file = "suc340-suc340E15-stv5h_60502_ma.csv")
# You can further filter the data
# Using quatiles:
prob = c(0.01,0.99)
l_T=quantile(as.numeric(graphEvent$diff_time), prob = prob, na.rm=T)
l_A=quantile(graphEvent$area, prob = prob, na.rm=T)
l_Am=quantile(graphEvent$amplitude, prob = prob, na.rm=T)
graphEvent2=graphEvent%>%filter(diff_time<=l_T[[2]], area<=l_A[[2]], amplitude>l_Am[[1]])
initialScatPlot(graphEvent2)
# OR a manual filter:
# Exploring the whole data set and deciding the limits:
head(as.data.frame(graphEvent%>%arrange(desc(as.numeric(diff_time)))),20)
head(as.data.frame(graphEvent%>%arrange(desc(as.numeric(area)))),10)
head(as.data.frame(graphEvent%>%arrange((as.numeric(amplitude)))),50)
# Change values here:
graphEvent2=graphEvent%>%filter(diff_time<100)
graphEvent2=graphEvent2%>%filter(as.numeric(area)<200)
graphEvent2=graphEvent2%>%filter(amplitude>0.87)
# Determine PI based on duration by separating the events into licks (area > 80), tastes (areas between 10 and 80)
# and touches (area less than 10 and amplitude less than 80)
# Unfiltered data
PI_fil_area(graphEvent)
# Filtered data
PI_fil_area(graphEvent2)
# Repeat for other DFM's.
###################################################################################
#### WORK IN PROGRESS!!!!
###################################
# Determine the total time of the test for each observation:
# use the df created with dfm_base_ma or dfm_base
df$time=as.numeric(difftime(df$ntime,df$ntime[1], units="mins"))
# Bin the data in four to correspond for each 30 mins of the test
df$bin=cut(df$time, 4, labels=c("0-0.5","0.5-1","1-1.5","1.5-2"))
# Sum the number observations with higher values than the thershold - not equal to events.
df%>%group_by(bin)%>%summarise_each(funs(Sum=sum(.>0)),-position,-ntime, -time)%>%select(-bin)
# Sum the previous data by columns
colSums(df%>%group_by(bin)%>%summarise_each(funs(Sum=sum(.>0)),-position,-ntime, -time)%>%select(-bin))
# The following two do sort of the same but had some other features.
df%>%group_by(bin)%>%select(W1)%>%summarize(total = n(), larger = sum(W1>0), ratio = larger / total)
for (i in 1:length(names(df%>%select(matches('^W'))))){
a=df%>%select(matches('^W'))%>%select(i)%>%filter(.!=0)
print(names(a))
print(count(a)[[1]])
}
###################################
# 3D plots of the data distribution
scatterplot3d(x=graphEvent$amplitude, y = graphEvent$diff_time, z= graphEvent$area)
# Create columns for color and pch codes
graphEvent2$colorCode <- ifelse(graphEvent2$amplitude>60,"green",
ifelse(graphEvent2$amplitude<20, "red",
"blue"))
graphEvent2$pch <- ifelse(graphEvent2$amplitude>60,1,
ifelse(graphEvent2$amplitude<20, 2,
3))
# Make the 3d plots:
graph_3d(graphEvent2)
###################################
## PLOTING INDIVIDUAL EVENTS EACH ON A SINGLE GRAPH
# Use the df data frame, and add the well to be analyzed.
df1=new_df(df,1)
df2=new_df(df,2)
df3=new_df(df,3)
df4=new_df(df,4)
df5=new_df(df,5)
df6=new_df(df,6)
df7=new_df(df,7)
df8=new_df(df,8)
df9=new_df(df,9)
df10=new_df(df,10)
df11=new_df(df,11)
df12=new_df(df,12)
# No sense plot with all events one after each other
limY=c(0,200)
h= 10 # approximate limits between touches and tastes, these are amplitudes and the limit is set in area
i= 80 # approximate limits between licks and tastes
par(mfrow=c(6,2), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df1$Events,df1$W1, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df2$Events,df2$W2, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df3$Events,df3$W3, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df4$Events,df4$W4, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df5$Events,df5$W5, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df6$Events,df6$W6, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df7$Events,df7$W7, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df8$Events,df8$W8, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df9$Events,df9$W9, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df10$Events,df10$W10, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
#par(mfrow=c(2,1), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
plot(df11$Events,df11$W11, type="h", main="A", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
plot(df12$Events,df12$W12, type="h", main="B", ylim=limY)
abline(h=h, col="red")
abline(h=i, col="blue")
# count the number of "observations" on each event
df1 %>% group_by(Events)%>%summarise(obs.=n())
df2 %>% group_by(Events)%>%summarise(obs.=n())
df3 %>% group_by(Events)%>%summarise(obs.=n())
df4 %>% group_by(Events)%>%summarise(obs.=n())
df5 %>% group_by(Events)%>%summarise(obs.=n())
df6 %>% group_by(Events)%>%summarise(obs.=n())
df7 %>% group_by(Events)%>%summarise(obs.=n())
df8 %>% group_by(Events)%>%summarise(obs.=n())
df9 %>% group_by(Events)%>%summarise(obs.=n())
df10 %>% group_by(Events)%>%summarise(obs.=n())
df11 %>% group_by(Events)%>%summarise(obs.=n())
df12 %>% group_by(Events)%>%summarise(obs.=n())
# Calculate the time of each observation from the first one on each event, on events that have 2 or more observations.
rfa1=df3%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa2=df4%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa11=df11%>%group_by(Events)%>%filter(n()>2)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
rfa12=df12%>%group_by(Events)%>%filter(n()>1)%>%arrange(ntime)%>%
mutate(time_from_uno = ntime-min(ntime))
unique(rfa1$Events)
unique(rfa2$Events)
head(as.data.frame(rfa1%>%filter(Events==1)%>%mutate(totaltime=ntime-raw_df$ntime[1])), 2000)
head(as.data.frame(rfa2%>%filter(Events==4)%>%mutate(totaltime=ntime-raw_df$ntime[1])), 2000)
# Change the ylim with the max # of rows
max(rfa1$W3)
# PLOT EACH EVENT
par(mfrow=c(4,2), cex=0.5, cex.axis=0.9, cex.lab=0.9, bg="white", tcl=-0.1)
for (i in (unique(rfa1$Events))){
rfa2=(rfa1%>%filter(Events==i))
plot(rfa2$time_from_uno,rfa2$W3, type="l", col="black", ann = T, axes=T, ylim=c(0,205), labels = T, tck=0)
}
par(mfrow=c(2,3), cex=0.4, cex.axis=0.9)
e_df11=df11%>%filter(Events<=7)
for (i in (unique(e_df11$Events))){
nombre=paste("W11(Suc)_Event",i,sep="_")
rfa2=(e_df11%>%filter(Events==i))%>%mutate(time=ntime-ntime[1])
plot(rfa2$time,rfa2$W11, type="l", col="black", ann = T, axes=T, ylim=c(0,200), labels = T, tck=0, main=nombre)
}
par(mfrow=c(4,3), cex=0.4, cex.axis=0.9)
e_df12=df12%>%filter(Events<=12)
for (i in (unique(e_df12$Events))){
nombre=paste("W12(EtOH)_Event",i,sep="_")
rfa2=(e_df12%>%filter(Events==i))%>%mutate(time=ntime-ntime[1])
plot(rfa2$time,rfa2$W12, type="l", col="black", ann = T, axes=T, ylim=c(0,200), labels = T, tck=0,
main=nombre)
}
# Ploting a single event. Just change the number after the "==", and the df (rfa1)
sinEve=(rfa1%>%filter(Events==1))
plot(sinEve$time_from_uno,sinEve$W1, type="l", col="black", ann = T, axes=T, ylim=c(0,205), labels = T, tck=0)
###################################
### PI USING BREAKPOINTS
### Determine breakpoints in the data
###
# Generate graphs with the distributions to get a visual
# Sort the data by value
y= graphEvent
par(mfrow=c(3,2), cex=0.5, cex.lab=0.8, cex.axis=0.8)
# by Area
area_dist=y%>%arrange((area))%>%mutate(pos=row_number())
plot(area_dist$pos,(area_dist$area))
plot(area_dist$pos,log((area_dist$area)))
# by Amplitude
ampl_dist=y%>%arrange(amplitude)%>%mutate(pos=row_number())
plot(ampl_dist$pos,ampl_dist$amplitude)
plot(ampl_dist$pos,log(ampl_dist$amplitude))
# by Time
t_dist=y%>%arrange(diff_time)%>%mutate(pos=row_number())
plot(t_dist$pos,t_dist$diff_time)
plot(t_dist$pos,log(t_dist$diff_time))
# Detemrmine the optimal breakpoints in each distribution
# break=2 can be change to other values if desired
area.ts<- ts((area_dist$area))
vv_area=breakpoints(area.ts~1,breaks=2)$breakpoints
amplitude.ts<- ts((ampl_dist$amplitude))
vv_amplitude=breakpoints(amplitude.ts~1,breaks=2)$breakpoints
time.ts<- ts((t_dist$diff_time))
vv_time=breakpoints(time.ts~1,breaks=2)$breakpoints
auno=time.ts[vv_area[[1]]]
ados=time.ts[vv_area[[2]]]
par(mfrow=c(3,1), cex=0.5, cex.lab=0.8, cex.axis=0.8)
plot(t_dist$pos[t_dist$diff_time<auno], t_dist$diff_time[t_dist$diff_time<auno], ylim=c(0,8))
plot(t_dist$pos[t_dist$diff_time>auno & t_dist$diff_time<ados], t_dist$diff_time[t_dist$diff_time>auno & t_dist$diff_time<ados], ylim=c(0,8))
plot(t_dist$pos[t_dist$diff_time>ados], t_dist$diff_time[t_dist$diff_time>ados], ylim=c(0,8))
# These printouts will give an idea of the actual values.
breakPoints_data(area.ts,vv_area)
breakPoints_data(amplitude.ts,vv_amplitude)
breakPoints_data(time.ts,vv_time)
# Test the breakpoints
# May or may not match the point selected above
fs.test <- Fstats(area_dist$area ~1)
ff=breakpoints(fs.test)
ff$breakpoints
ff
plot(fs.test)
head(y)
lines(breakpoints(fs.test))
arrange(y,diff_time)
# PI by area
PI_area=all_PI(y,1)
# PI by amplitude
PI_amplitude=all_PI(graphEvent2,2)
# PI by time
PI_time=all_PI(graphEvent2,3)
PI_area[[1]]
PI_amplitude[[1]]
PI_time[[1]]
# Create columns for color and pch codes
graphEvent2$colorCode <- ifelse(graphEvent2$well=="W1" | graphEvent2$well=="W2" | graphEvent2$well=="W3" | graphEvent2$well=="W4" | graphEvent2$well=="W5" | graphEvent2$well=="W6","green",
"blue")
graphEvent2$pch <- ifelse(graphEvent2$well=="W1" | graphEvent2$well=="W2" | graphEvent2$well=="W3" | graphEvent2$well=="W4" | graphEvent2$well=="W5" | graphEvent2$well=="W6",19,
15)
# Make the 3d plots:
graph_3d(graphEvent2,1,"h")
### Additional options can be added here:
with(graphEvent2, {
s3d<-scatterplot3d(diff_time, area, amplitude, # x axis, y axis and z axis
color = colorCode, pch=pch, type = "h",
xlim=c(0,1000), ylim=c(0,1000), zlim=c(0,1000)
#main="3-D DFM"
)
s3d.coords <- s3d$xyz.convert(diff_time, area, amplitude) # convert 3D coords to 2D projection
text(s3d.coords$x, s3d.coords$y, # x and y coordinates
labels=row.names(graphEvent2), # text to plot
cex=.5, pos=4) # shrink text 50% and place to right of points)
})
## SINGLE 3dPLot
rf <- colorRampPalette(rev(brewer.pal(11,'RdYlBu')))
r <- rf(nrow(graphEvent2))
with(graphEvent2, {
scatterplot3d(area, diff_time, amplitude, # x axis, y axis and z axis
color = r ,pch=19,
main="3-D DFM")
})
PI_Adrian=function(df1){ # PI using an "arbitrary" measure of time combined with an area
#licks are events that last more or equal to 600msec (0.6sec)
#tastes are event than last less than 400msec (0.4sec)
# if duration is between 400 and 600 msec, events are considered licks if their area is > 15, other wise are tastes
df=df1%>%filter(diff_time>0.2)
licks=df%>%filter(diff_time>0.6 | (diff_time<0.6 & diff_time>0.4 & area>15))%>%group_by(well)%>%summarize(licks=n())
tastes=df%>%filter((diff_time<0.6 & diff_time>0.4 & area<15 | diff_time<0.4))%>%group_by(well)%>%summarize(tastes=n())
mylist=list(licks,tastes)
resPI=list(PIs=(cbind.data.frame(PI_calc(licks),PI_calc(tastes))),
Events_count=(mergeDF(mylist)))
names(resPI[[1]])=paste(colnames(resPI[[1]]),sep="_")
names(resPI[[2]])=paste(colnames(resPI[[2]]),sep="_")
resPI
}
|
# Download Electric power consumption data from the link provided by the
# instructor
# unzip the file and save household_power_consumption.txt under folder data
# loading the data from the local drive
data <- read.csv2("./data/household_power_consumption.txt", as.is=T,
na.strings = "?")
# subsetting the data from the dates 2007-02-01 and 2007-02-02
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data <- data[data$Date == '2007-02-01' | data$Date == '2007-02-02',]
for(i in 3:9) data[,i] <- as.numeric(data[,i])
# plot Global Active ~ Time
png("plot2.png", width = 480, height = 480)
with(data, plot(strptime(paste(Date,Time), "%Y-%m-%d %H:%M:%S"),
Global_active_power, type="l", xlab="",
ylab="Global Active Power (Kilomatts)"))
# save the plot in plot2.png
dev.off()
| /plot2.R | no_license | LuLuDS/ExData_Plotting1 | R | false | false | 832 | r | # Download Electric power consumption data from the link provided by the
# instructor
# unzip the file and save household_power_consumption.txt under folder data
# loading the data from the local drive
data <- read.csv2("./data/household_power_consumption.txt", as.is=T,
na.strings = "?")
# subsetting the data from the dates 2007-02-01 and 2007-02-02
data$Date <- as.Date(data$Date,"%d/%m/%Y")
data <- data[data$Date == '2007-02-01' | data$Date == '2007-02-02',]
for(i in 3:9) data[,i] <- as.numeric(data[,i])
# plot Global Active ~ Time
png("plot2.png", width = 480, height = 480)
with(data, plot(strptime(paste(Date,Time), "%Y-%m-%d %H:%M:%S"),
Global_active_power, type="l", xlab="",
ylab="Global Active Power (Kilomatts)"))
# save the plot in plot2.png
dev.off()
|
# Load and preprocess PBMC4K data
library(TENxPBMCData)
library(celda)
library(Seurat)
pbmc4k <- TENxPBMCData(dataset = "pbmc4k")
counts(pbmc4k) <- as.matrix(counts(pbmc4k))
colnames(pbmc4k) <- colData(pbmc4k)$Sequence
pbmc4kdec <- decontX(pbmc4k)
decontXcounts(pbmc4kdec) <- as.matrix(decontXcounts(pbmc4kdec))
pbmc4kseurat <- CreateSeuratObject(counts = decontXcounts(pbmc4kdec),
project = "pbmc4kdec", min.cells = 3, min.features = 200)
pbmc4kseurat <- NormalizeData(pbmc4kseurat)
pbmc4kseurat <- FindVariableFeatures(pbmc4kseurat)
vf1 <- VariableFeatures(pbmc4kseurat)
# pbmc4kseurat2 <- CreateSeuratObject(counts = decontXcounts(pbmc4kdec),
# project = "pbmc4kdec", min.cells = 0, min.features = 0)
# pbmc4kseurat2 <- FindVariableFeatures(pbmc4kseurat2)
# vf2 <- VariableFeatures(pbmc4kseurat)
# all(vf1 == vf2)
#[1] TRUE
# unique gene names as rownames
rd <- rowData(pbmc4kdec)
rd$runique <- rd$Symbol_TENx
rd[duplicated(rd$Symbol_TENx), "runique"] <-
paste0(rd[duplicated(rd$Symbol_TENx), "runique"], ".1")
rowData(pbmc4kdec) <- rd
rownames(pbmc4kdec) <- rowData(pbmc4kdec)$runique
pbmc4kf <- pbmc4kdec[rowData(pbmc4kdec)$ENSEMBL_ID %in% vf1, ]
rownames(pbmc4kf) <- rowData(pbmc4kf)$Symbol_TENx
altExp(pbmc4kdec, "featureSubset") <- pbmc4kf
saveRDS(pbmc4kdec, file = "../data/pbmc4kdec.rds")
| /Celda/R/pbmc4kdec.R | permissive | campbio/Manuscripts | R | false | false | 1,320 | r | # Load and preprocess PBMC4K data
library(TENxPBMCData)
library(celda)
library(Seurat)
pbmc4k <- TENxPBMCData(dataset = "pbmc4k")
counts(pbmc4k) <- as.matrix(counts(pbmc4k))
colnames(pbmc4k) <- colData(pbmc4k)$Sequence
pbmc4kdec <- decontX(pbmc4k)
decontXcounts(pbmc4kdec) <- as.matrix(decontXcounts(pbmc4kdec))
pbmc4kseurat <- CreateSeuratObject(counts = decontXcounts(pbmc4kdec),
project = "pbmc4kdec", min.cells = 3, min.features = 200)
pbmc4kseurat <- NormalizeData(pbmc4kseurat)
pbmc4kseurat <- FindVariableFeatures(pbmc4kseurat)
vf1 <- VariableFeatures(pbmc4kseurat)
# pbmc4kseurat2 <- CreateSeuratObject(counts = decontXcounts(pbmc4kdec),
# project = "pbmc4kdec", min.cells = 0, min.features = 0)
# pbmc4kseurat2 <- FindVariableFeatures(pbmc4kseurat2)
# vf2 <- VariableFeatures(pbmc4kseurat)
# all(vf1 == vf2)
#[1] TRUE
# unique gene names as rownames
rd <- rowData(pbmc4kdec)
rd$runique <- rd$Symbol_TENx
rd[duplicated(rd$Symbol_TENx), "runique"] <-
paste0(rd[duplicated(rd$Symbol_TENx), "runique"], ".1")
rowData(pbmc4kdec) <- rd
rownames(pbmc4kdec) <- rowData(pbmc4kdec)$runique
pbmc4kf <- pbmc4kdec[rowData(pbmc4kdec)$ENSEMBL_ID %in% vf1, ]
rownames(pbmc4kf) <- rowData(pbmc4kf)$Symbol_TENx
altExp(pbmc4kdec, "featureSubset") <- pbmc4kf
saveRDS(pbmc4kdec, file = "../data/pbmc4kdec.rds")
|
## plot 3
# uncomment to set the source directory
#setwd("D:/Temporal/coursera/")
Sys.setlocale("LC_ALL", "English")
datos <- read.csv2("./household_power_consumption.txt",header = TRUE,
sep=";", dec = ".", na.strings = "?")
## only two dates
o2d <-datos[datos$Date %in% c("1/2/2007","2/2/2007") ,]
# concatenate date and time
datetime <- strptime(paste(o2d$Date, o2d$Time, sep = " "),"%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
# plot 1
plot(datetime, o2d$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
#plot 2
lines(datetime, o2d$Sub_metering_2,type = "l", col='red')
#plot 3
lines(datetime, o2d$Sub_metering_3,type = "l", col='blue')
# labels
legend("topright", lty = 1, lwd=2, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3") )
dev.off() | /plot3.R | no_license | JavierPy/ExData_Plotting1 | R | false | false | 848 | r | ## plot 3
# uncomment to set the source directory
#setwd("D:/Temporal/coursera/")
Sys.setlocale("LC_ALL", "English")
datos <- read.csv2("./household_power_consumption.txt",header = TRUE,
sep=";", dec = ".", na.strings = "?")
## only two dates
o2d <-datos[datos$Date %in% c("1/2/2007","2/2/2007") ,]
# concatenate date and time
datetime <- strptime(paste(o2d$Date, o2d$Time, sep = " "),"%d/%m/%Y %H:%M:%S")
png("plot3.png", width=480, height=480)
# plot 1
plot(datetime, o2d$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
#plot 2
lines(datetime, o2d$Sub_metering_2,type = "l", col='red')
#plot 3
lines(datetime, o2d$Sub_metering_3,type = "l", col='blue')
# labels
legend("topright", lty = 1, lwd=2, col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3") )
dev.off() |
#'
#'@title Calculate abundance, size comps size comps by year, east/west of 166W, and other factors from a size comps-by-stratum data frame or csv file.
#'
#'@description This function estimates size comps for abundance, size comps by year, east/west of 166W from a size comps-by-stratum data frame or csv file.
#'
#'@param tbl : data frame with size comps by stratum info from call to [calcSizeComps.ByStratum] or csv file with size comps by stratum info, or NULL
#'@param strata_toEW166 : data frame w/ conversion from original strata to EW166 strata
#'@param export : boolean flag to write results to csv file
#'@param out.csv : output file name
#'@param out.dir : output file directory
#'@param verbosity : integer flag indicating level of printed output (0=off,1=minimal,2=full)
#'
#'@return Dataframe w/ estimates of abundance, size comps size comps by year, strata as east/west of 166W, and other factors. Columns are \cr
#'\itemize{
#'\item YEAR = survey year
#'\item STRATUM = 'EAST' or 'WEST' of 166W
#'\item STRATUM_AREA = area of stratum
#'\item other user-defined factors (e.g., sex, shell_condition)
#'\item SIZE
#'\item numStations = number of stations included
#'\item numHauls = number of hauls included
#'\item numNonZeroHauls = number of hauls included
#'\item numIndivs = number of individuals sampled
#'\item totABUNDANCE = estimated abundance
#'\item totBIOMASS = estimated size comps
#'}
#'
#'@details Note: if tbl and in.csv are both NULL, the user is prompted to enter a csv file with size comps by stratum info. \cr
#'Notes: \cr
#'\itemize{
#'\item Area is in square nautical miles
#'\item Abundance is in 10^6 individuals
#'\item Biomass is in 10^3 mt
#'}
#'
#' @importFrom sqldf sqldf
#' @importFrom wtsUtilities selectFile
#'
#'@export
#'
calcSizeComps.EW166<-function(tbl=NULL,
strata_toEW166=tcsamSurveyData::Codes.TrawlSurvey()[["strata.EW166"]],
export=FALSE,
out.csv='SurveySizeComps.EW166.csv',
out.dir=NULL,
verbosity=0){
if (verbosity>1) cat("starting calcSizeComps.EW166\n");
in.csv<-NULL;
if (!is.data.frame(tbl)){
if (!is.character(tbl)) {
in.csv<-selectFile(ext="csv",caption="Select csv file with size comps-by-stratum info");
if (is.null(in.csv)|(in.csv=='')) return(NULL);
} else {
in.csv<-tbl;#tbl is a filename
}
if (verbosity>1) cat("Reading csv file for size comps-by-stratum info.\n",sep='')
tbl<-read.csv(in.csv,stringsAsFactors=FALSE);
if (verbosity>1) cat("Done reading input csv file.\n")
}
if (is.null(out.dir)) {
out.dir<-dirname(file.path('.'));
if (!is.null(in.csv)) {out.dir<-dirname(file.path(in.csv));}
}
if (verbosity>0) cat("Output directory for calcSizeComps.EW166 will be '",out.dir,"'\n",sep='');
#determine columns of size comps by stratum table
nc0f<-9;#number of columns if SIZE is the only 'factor' in the table
cols<-names(tbl);
nc<-length(cols);
if (nc==nc0f){cols<-'';} else
{cols<-cols[4:(3+nc-nc0f)];}#extract factor columns (including SIZE)
qry<-"select
t.YEAR,
s.revd as newSTRATUM,
t.STRATUM as oldSTRATUM,
STRATUM_AREA&&cols,
numStations,
numHauls,
numNonZeroHauls,
numIndivs,
totABUNDANCE,
totBIOMASS
from
tbl as t,
strata_toEW166 as s
where
t.STRATUM=s.orig
order by
t.YEAR,s.revd,t.STRATUM&&cols;"
if (nc==9) {
qry<-gsub("&&cols",'',qry);
} else {
qry<-gsub("&&cols",paste(',t.',cols,collapse="",sep=''),qry);
}
if (verbosity>1) cat("\nquery is:\n",qry,"\n");
tbl2<-sqldf::sqldf(qry);
qry<-"select
YEAR,
newSTRATUM as STRATUM,
sum(STRATUM_AREA) as STRATUM_AREA&&cols,
sum(numStations) as numStations,
sum(numHauls) as numHauls,
sum(numNonZeroHauls) as numNonZeroHauls,
sum(numIndivs) as numIndivs,
sum(totABUNDANCE) as totABUNDANCE,
sum(totBIOMASS) as totBIOMASS
from
tbl2
group by
YEAR,newSTRATUM&&cols
order by
YEAR,newSTRATUM&&cols;"
if (nc==9) {
qry<-gsub("&&cols",'',qry);
} else {
qry<-gsub("&&cols",paste(',',cols,collapse="",sep=''),qry);
}
if (verbosity>1) cat("\nquery is:\n",qry,"\n");
tbl1<-sqldf::sqldf(qry);
idx = tbl1$numNonZeroHauls<0;
tbl1$numNonZeroHauls[idx] = -1;
if (export){
if (!is.null(out.dir)){
if (verbosity>1) cat("\nTesting existence of folder '",out.dir,"'\n",sep='')
if (!file.exists(out.dir)){
if (verbosity>0) cat("Creating folder '",out.dir,"' for output.\n",sep='')
dir.create(out.dir);
} else {
if (verbosity>0) cat("Using folder '",out.dir,"' for output.\n",sep='')
}
out.csv<-file.path(out.dir,out.csv)
}
write.csv(tbl1,out.csv,na='',row.names=FALSE);
}
if (verbosity>1) cat("finished calcSizeComps.EW166\n");
return(tbl1)
}
| /R/calcSizeComps.EW166.R | permissive | wStockhausen/tcsamSurveyData | R | false | false | 5,454 | r | #'
#'@title Calculate abundance, size comps size comps by year, east/west of 166W, and other factors from a size comps-by-stratum data frame or csv file.
#'
#'@description This function estimates size comps for abundance, size comps by year, east/west of 166W from a size comps-by-stratum data frame or csv file.
#'
#'@param tbl : data frame with size comps by stratum info from call to [calcSizeComps.ByStratum] or csv file with size comps by stratum info, or NULL
#'@param strata_toEW166 : data frame w/ conversion from original strata to EW166 strata
#'@param export : boolean flag to write results to csv file
#'@param out.csv : output file name
#'@param out.dir : output file directory
#'@param verbosity : integer flag indicating level of printed output (0=off,1=minimal,2=full)
#'
#'@return Dataframe w/ estimates of abundance, size comps size comps by year, strata as east/west of 166W, and other factors. Columns are \cr
#'\itemize{
#'\item YEAR = survey year
#'\item STRATUM = 'EAST' or 'WEST' of 166W
#'\item STRATUM_AREA = area of stratum
#'\item other user-defined factors (e.g., sex, shell_condition)
#'\item SIZE
#'\item numStations = number of stations included
#'\item numHauls = number of hauls included
#'\item numNonZeroHauls = number of hauls included
#'\item numIndivs = number of individuals sampled
#'\item totABUNDANCE = estimated abundance
#'\item totBIOMASS = estimated size comps
#'}
#'
#'@details Note: if tbl and in.csv are both NULL, the user is prompted to enter a csv file with size comps by stratum info. \cr
#'Notes: \cr
#'\itemize{
#'\item Area is in square nautical miles
#'\item Abundance is in 10^6 individuals
#'\item Biomass is in 10^3 mt
#'}
#'
#' @importFrom sqldf sqldf
#' @importFrom wtsUtilities selectFile
#'
#'@export
#'
calcSizeComps.EW166<-function(tbl=NULL,
strata_toEW166=tcsamSurveyData::Codes.TrawlSurvey()[["strata.EW166"]],
export=FALSE,
out.csv='SurveySizeComps.EW166.csv',
out.dir=NULL,
verbosity=0){
if (verbosity>1) cat("starting calcSizeComps.EW166\n");
in.csv<-NULL;
if (!is.data.frame(tbl)){
if (!is.character(tbl)) {
in.csv<-selectFile(ext="csv",caption="Select csv file with size comps-by-stratum info");
if (is.null(in.csv)|(in.csv=='')) return(NULL);
} else {
in.csv<-tbl;#tbl is a filename
}
if (verbosity>1) cat("Reading csv file for size comps-by-stratum info.\n",sep='')
tbl<-read.csv(in.csv,stringsAsFactors=FALSE);
if (verbosity>1) cat("Done reading input csv file.\n")
}
if (is.null(out.dir)) {
out.dir<-dirname(file.path('.'));
if (!is.null(in.csv)) {out.dir<-dirname(file.path(in.csv));}
}
if (verbosity>0) cat("Output directory for calcSizeComps.EW166 will be '",out.dir,"'\n",sep='');
#determine columns of size comps by stratum table
nc0f<-9;#number of columns if SIZE is the only 'factor' in the table
cols<-names(tbl);
nc<-length(cols);
if (nc==nc0f){cols<-'';} else
{cols<-cols[4:(3+nc-nc0f)];}#extract factor columns (including SIZE)
qry<-"select
t.YEAR,
s.revd as newSTRATUM,
t.STRATUM as oldSTRATUM,
STRATUM_AREA&&cols,
numStations,
numHauls,
numNonZeroHauls,
numIndivs,
totABUNDANCE,
totBIOMASS
from
tbl as t,
strata_toEW166 as s
where
t.STRATUM=s.orig
order by
t.YEAR,s.revd,t.STRATUM&&cols;"
if (nc==9) {
qry<-gsub("&&cols",'',qry);
} else {
qry<-gsub("&&cols",paste(',t.',cols,collapse="",sep=''),qry);
}
if (verbosity>1) cat("\nquery is:\n",qry,"\n");
tbl2<-sqldf::sqldf(qry);
qry<-"select
YEAR,
newSTRATUM as STRATUM,
sum(STRATUM_AREA) as STRATUM_AREA&&cols,
sum(numStations) as numStations,
sum(numHauls) as numHauls,
sum(numNonZeroHauls) as numNonZeroHauls,
sum(numIndivs) as numIndivs,
sum(totABUNDANCE) as totABUNDANCE,
sum(totBIOMASS) as totBIOMASS
from
tbl2
group by
YEAR,newSTRATUM&&cols
order by
YEAR,newSTRATUM&&cols;"
if (nc==9) {
qry<-gsub("&&cols",'',qry);
} else {
qry<-gsub("&&cols",paste(',',cols,collapse="",sep=''),qry);
}
if (verbosity>1) cat("\nquery is:\n",qry,"\n");
tbl1<-sqldf::sqldf(qry);
idx = tbl1$numNonZeroHauls<0;
tbl1$numNonZeroHauls[idx] = -1;
if (export){
if (!is.null(out.dir)){
if (verbosity>1) cat("\nTesting existence of folder '",out.dir,"'\n",sep='')
if (!file.exists(out.dir)){
if (verbosity>0) cat("Creating folder '",out.dir,"' for output.\n",sep='')
dir.create(out.dir);
} else {
if (verbosity>0) cat("Using folder '",out.dir,"' for output.\n",sep='')
}
out.csv<-file.path(out.dir,out.csv)
}
write.csv(tbl1,out.csv,na='',row.names=FALSE);
}
if (verbosity>1) cat("finished calcSizeComps.EW166\n");
return(tbl1)
}
|
library(tidyverse)
library("ggpubr")
library(tidyverse)
require(ggthemr)
require(interactions)
require(mgcv)
library(formattable)
library(sparkline)
ggthemr("flat", text_size=16)
#Set a few color variables to make our table more visually appealing
blueDark = "#035AA6"
blueLight = "#4192D9"
greenDarkest = "#026873"
greenDark = "#038C8C"
greenLight = "#03A696"
# Description:
# From options pipeline, find the most optimium value for each option
DATASET <- "~/Documents/BIST/Major Project/data/options/22_06_20.csv"
OUTPUT <- "~/Documents/BIST/Major Project/results/options_support/"
setwd(OUTPUT)
save_plot <- function(plot, filename, width=20, height=20) {
ggsave(filename, plot=plot, width=width, height=height, units="cm", device="png", dpi="retina")
}
import_data <- function(path) {
data_raw <- read_csv(
path,
col_names = c("dataset", "tolerance", "support", "fuzz", "sensitivity", "precision"),
col_types = cols(dataset = col_character(), tolerance = col_double(), support = col_double(), fuzz = col_double(), sensitivity = col_double(), precision = col_double())
) %>%
filter(str_starts(dataset, "ont") | str_starts(dataset, "pacBio")) %>% # Remove any malformed rows
mutate(sequencer = ifelse(str_detect(dataset, "^pacBio"), "PacBio", "ONT")) %>%
mutate(s_and_p = (sensitivity + precision) / 2)
}
# Step 1. Import data and wrangle``
# ===============================
# Find the most optimum values for each option
data_raw <- import_data(DATASET)
# Print the counts for each
formattable(tribble(
~Type, ~N,
"ONT", data_raw %>% filter(sequencer == "ONT") %>% distinct(dataset) %>% count() %>% pull(n),
"ONT SIRVs", data_raw %>% filter(sequencer == "ONT") %>% drop_na() %>% distinct(dataset) %>% count() %>% pull(n),
"PacBio", data_raw %>% filter(sequencer == "PacBio") %>% distinct(dataset) %>% count() %>% pull(n),
"PacBio SIRVs", data_raw %>% filter(sequencer == "PacBio") %>% drop_na() %>% distinct(dataset) %>% count() %>% pull(n)
))
data_raw <- data_raw %>%
drop_na() %>%
group_by(dataset)
data <- data_raw %>% filter(sequencer == "ONT") # Only do for ONT as not enough datasets from PacBio
plot_option <- function(data, option_name) {
data %>%
pivot_longer(c(tolerance, support, fuzz), names_to="option", values_to="option_value") %>%
filter(option==option_name) %>%
ungroup() %>% group_by(option_value) %>%
pivot_longer(c(sensitivity, precision), names_to="y_type", values_to="y_value") %>%
ggplot(aes(x=option_value, y=y_value, linetype=y_type, color=y_type, fill=y_type)) +
ylab("%") + xlab("Option Value") + ylim(0, 100) +
theme(legend.title=element_blank(), plot.margin = margin(1,0.2,1,0.2, "cm")) +
geom_smooth()
}
# Step 2. Plot each option changing while holding others
# TODO: get these into one graph
# ======================================================
p_fuzz <- data %>%
filter(support==4,tolerance==0) %>%
plot_option(option_name="fuzz") + xlab("end_fuzz (nt)")
p_support <- data %>%
filter(fuzz==0,tolerance==0) %>%
plot_option(option_name="support") +
scale_x_continuous(breaks = c(2,4,6,8,10)) + xlab("min_read_support")
p_tolerance <- data %>%
filter(support==1,fuzz==0) %>%
plot_option(option_name="tolerance") + xlab("exon_overhang_tolerance (nt)")
save_plot(p_fuzz, filename="fuzz.png")
save_plot(p_support, filename="support.png")
save_plot(p_tolerance, filename="tolerance.png")
ggexport(
ggarrange(
p_fuzz, p_support, p_tolerance,
labels=c("end_fuzz", "min_read_support", "tolerance"),
font.label = list(size = 11, color = "black", face = "bold", family = NULL),
vjust = 2,
common.legend = TRUE, legend = "right",
nrow=2, ncol=2
),
filename="options.png"
)
# Step 3: Find interactions
# From https://m-clark.github.io/generalized-additive-models/application.html#multiple-predictors
# and http://environmentalcomputing.net/intro-to-gams/
# =========================
# make a grid of possible tolerances and supports
poss_tolerances <- seq(0,20)
poss_supports <- seq(1,10)
poss_fuzzs <- seq(0,5)
grid_support <- expand.grid(tolerance = poss_tolerances, fuzz = poss_fuzzs, support = poss_supports)
# Model support
# ================
# Tolerance affects precision but not sensitivity
mod_sensitivity = gam(sensitivity ~ s(support, k = 6) + s(fuzz, k=5), data=data)
mod_precision = gam(precision ~ s(tolerance) + s(support, k = 6) + s(fuzz, k=5), data=data)
predict_sensitivity <- predict(mod_sensitivity, grid_support)
predict_precision <- predict(mod_precision, grid_support)
predict_df <- grid_support %>%
mutate(sensitivity = predict_sensitivity) %>%
mutate(precision = predict_precision) %>%
mutate(s_and_p = (sensitivity + precision) / 2)
# Make a histogram of sensitivity and precisions to decide cutoff
save_plot(
predict_df %>%
pivot_longer(c(sensitivity, precision), names_to="key", values_to="value") %>%
ggplot(aes(x=value, fill=key)) +
geom_histogram(alpha=0.5, position="identity") +
xlab("%") +
scale_x_continuous(breaks = seq(0, 100, by = 10)) +
theme(legend.title=element_blank()),
filename = "ont_predicted_support_hist.png",
width = 30
)
pred_df <- predict_df %>%
filter(sensitivity > 40 & sensitivity > 40)
print(pred_df %>% filter(s_and_p == max(s_and_p)))
formattable(pred_df %>% filter(s_and_p == max(s_and_p)))
save_plot(
predict_df %>% filter(fuzz == 2) %>%
ggplot() +
aes(x = tolerance, y = support, z = s_and_p) +
xlab("tolerance") + ylab("min_read_support") +
geom_contour_filled(),
filename="contour_support_fuzz-0.png")
save_plot(
predict_df %>% filter(tolerance == 0) %>%
ggplot() +
aes(x = fuzz, y = support, z = s_and_p) +
xlab("end_fuzz") + ylab("min_read_support") +
geom_contour_filled(),
filename="contour_support_tolerance-0.png")
# Step 5. analysis for PacBio
# ===========================
data <- data_raw %>% filter(sequencer == "PacBio") %>% drop_na() %>%
group_by(dataset)
# Make a histogram of sensitivity and precisions to decide cutoff for sensitivity
save_plot(
data %>%
pivot_longer(c(sensitivity, precision), names_to="key", values_to="value") %>%
ggplot(aes(x=value, fill=key)) +
geom_histogram(alpha=0.5, position="identity") +
scale_x_continuous(breaks = seq(0, 100, by = 10)) +
theme(legend.title=element_blank()),
filename = "pacbio_hist.png"
)
make_summary <- function(data) {
data %>% select(tolerance, support, fuzz) %>%
group_map(
~ .x %>% map(~ if(length(unique(.x)) > 1) { stop("Can only make summary if one option per group") })
)
data %>% summarise(
exon_overhang_tolerance = min(tolerance),
fuzz = min(fuzz),
min_read_support = min(support),
precision = max(precision),
sensitivity = max(sensitivity)
)
}
make_table <- function(data) {
data %>%
formattable(list(
area(col = 7:8) ~ color_tile(greenLight, greenDark),
`precision` = color_tile(blueLight, blueDark)
))
}
filter_highest_s_and_p <- function(data) { data %>% filter(sensitivity > 60) %>% filter(s_and_p == max(s_and_p)) %>% filter(tolerance == min(tolerance))}
# Filter out precison > 80% and sensitivyt < 20% as shown in histogram
data %>% filter_highest_s_and_p() %>% mutate(method = "min_read_support") %>% make_summary() %>% make_table()
| /options_support.R | no_license | jacobwindsor/tmerge-analysis | R | false | false | 7,351 | r | library(tidyverse)
library("ggpubr")
library(tidyverse)
require(ggthemr)
require(interactions)
require(mgcv)
library(formattable)
library(sparkline)
ggthemr("flat", text_size=16)
#Set a few color variables to make our table more visually appealing
blueDark = "#035AA6"
blueLight = "#4192D9"
greenDarkest = "#026873"
greenDark = "#038C8C"
greenLight = "#03A696"
# Description:
# From options pipeline, find the most optimium value for each option
DATASET <- "~/Documents/BIST/Major Project/data/options/22_06_20.csv"
OUTPUT <- "~/Documents/BIST/Major Project/results/options_support/"
setwd(OUTPUT)
save_plot <- function(plot, filename, width=20, height=20) {
ggsave(filename, plot=plot, width=width, height=height, units="cm", device="png", dpi="retina")
}
import_data <- function(path) {
data_raw <- read_csv(
path,
col_names = c("dataset", "tolerance", "support", "fuzz", "sensitivity", "precision"),
col_types = cols(dataset = col_character(), tolerance = col_double(), support = col_double(), fuzz = col_double(), sensitivity = col_double(), precision = col_double())
) %>%
filter(str_starts(dataset, "ont") | str_starts(dataset, "pacBio")) %>% # Remove any malformed rows
mutate(sequencer = ifelse(str_detect(dataset, "^pacBio"), "PacBio", "ONT")) %>%
mutate(s_and_p = (sensitivity + precision) / 2)
}
# Step 1. Import data and wrangle``
# ===============================
# Find the most optimum values for each option
data_raw <- import_data(DATASET)
# Print the counts for each
formattable(tribble(
~Type, ~N,
"ONT", data_raw %>% filter(sequencer == "ONT") %>% distinct(dataset) %>% count() %>% pull(n),
"ONT SIRVs", data_raw %>% filter(sequencer == "ONT") %>% drop_na() %>% distinct(dataset) %>% count() %>% pull(n),
"PacBio", data_raw %>% filter(sequencer == "PacBio") %>% distinct(dataset) %>% count() %>% pull(n),
"PacBio SIRVs", data_raw %>% filter(sequencer == "PacBio") %>% drop_na() %>% distinct(dataset) %>% count() %>% pull(n)
))
data_raw <- data_raw %>%
drop_na() %>%
group_by(dataset)
data <- data_raw %>% filter(sequencer == "ONT") # Only do for ONT as not enough datasets from PacBio
plot_option <- function(data, option_name) {
data %>%
pivot_longer(c(tolerance, support, fuzz), names_to="option", values_to="option_value") %>%
filter(option==option_name) %>%
ungroup() %>% group_by(option_value) %>%
pivot_longer(c(sensitivity, precision), names_to="y_type", values_to="y_value") %>%
ggplot(aes(x=option_value, y=y_value, linetype=y_type, color=y_type, fill=y_type)) +
ylab("%") + xlab("Option Value") + ylim(0, 100) +
theme(legend.title=element_blank(), plot.margin = margin(1,0.2,1,0.2, "cm")) +
geom_smooth()
}
# Step 2. Plot each option changing while holding others
# TODO: get these into one graph
# ======================================================
p_fuzz <- data %>%
filter(support==4,tolerance==0) %>%
plot_option(option_name="fuzz") + xlab("end_fuzz (nt)")
p_support <- data %>%
filter(fuzz==0,tolerance==0) %>%
plot_option(option_name="support") +
scale_x_continuous(breaks = c(2,4,6,8,10)) + xlab("min_read_support")
p_tolerance <- data %>%
filter(support==1,fuzz==0) %>%
plot_option(option_name="tolerance") + xlab("exon_overhang_tolerance (nt)")
save_plot(p_fuzz, filename="fuzz.png")
save_plot(p_support, filename="support.png")
save_plot(p_tolerance, filename="tolerance.png")
ggexport(
ggarrange(
p_fuzz, p_support, p_tolerance,
labels=c("end_fuzz", "min_read_support", "tolerance"),
font.label = list(size = 11, color = "black", face = "bold", family = NULL),
vjust = 2,
common.legend = TRUE, legend = "right",
nrow=2, ncol=2
),
filename="options.png"
)
# Step 3: Find interactions
# From https://m-clark.github.io/generalized-additive-models/application.html#multiple-predictors
# and http://environmentalcomputing.net/intro-to-gams/
# =========================
# make a grid of possible tolerances and supports
poss_tolerances <- seq(0,20)
poss_supports <- seq(1,10)
poss_fuzzs <- seq(0,5)
grid_support <- expand.grid(tolerance = poss_tolerances, fuzz = poss_fuzzs, support = poss_supports)
# Model support
# ================
# Tolerance affects precision but not sensitivity
mod_sensitivity = gam(sensitivity ~ s(support, k = 6) + s(fuzz, k=5), data=data)
mod_precision = gam(precision ~ s(tolerance) + s(support, k = 6) + s(fuzz, k=5), data=data)
predict_sensitivity <- predict(mod_sensitivity, grid_support)
predict_precision <- predict(mod_precision, grid_support)
predict_df <- grid_support %>%
mutate(sensitivity = predict_sensitivity) %>%
mutate(precision = predict_precision) %>%
mutate(s_and_p = (sensitivity + precision) / 2)
# Make a histogram of sensitivity and precisions to decide cutoff
save_plot(
predict_df %>%
pivot_longer(c(sensitivity, precision), names_to="key", values_to="value") %>%
ggplot(aes(x=value, fill=key)) +
geom_histogram(alpha=0.5, position="identity") +
xlab("%") +
scale_x_continuous(breaks = seq(0, 100, by = 10)) +
theme(legend.title=element_blank()),
filename = "ont_predicted_support_hist.png",
width = 30
)
pred_df <- predict_df %>%
filter(sensitivity > 40 & sensitivity > 40)
print(pred_df %>% filter(s_and_p == max(s_and_p)))
formattable(pred_df %>% filter(s_and_p == max(s_and_p)))
save_plot(
predict_df %>% filter(fuzz == 2) %>%
ggplot() +
aes(x = tolerance, y = support, z = s_and_p) +
xlab("tolerance") + ylab("min_read_support") +
geom_contour_filled(),
filename="contour_support_fuzz-0.png")
save_plot(
predict_df %>% filter(tolerance == 0) %>%
ggplot() +
aes(x = fuzz, y = support, z = s_and_p) +
xlab("end_fuzz") + ylab("min_read_support") +
geom_contour_filled(),
filename="contour_support_tolerance-0.png")
# Step 5. analysis for PacBio
# ===========================
data <- data_raw %>% filter(sequencer == "PacBio") %>% drop_na() %>%
group_by(dataset)
# Make a histogram of sensitivity and precisions to decide cutoff for sensitivity
save_plot(
data %>%
pivot_longer(c(sensitivity, precision), names_to="key", values_to="value") %>%
ggplot(aes(x=value, fill=key)) +
geom_histogram(alpha=0.5, position="identity") +
scale_x_continuous(breaks = seq(0, 100, by = 10)) +
theme(legend.title=element_blank()),
filename = "pacbio_hist.png"
)
make_summary <- function(data) {
data %>% select(tolerance, support, fuzz) %>%
group_map(
~ .x %>% map(~ if(length(unique(.x)) > 1) { stop("Can only make summary if one option per group") })
)
data %>% summarise(
exon_overhang_tolerance = min(tolerance),
fuzz = min(fuzz),
min_read_support = min(support),
precision = max(precision),
sensitivity = max(sensitivity)
)
}
make_table <- function(data) {
data %>%
formattable(list(
area(col = 7:8) ~ color_tile(greenLight, greenDark),
`precision` = color_tile(blueLight, blueDark)
))
}
filter_highest_s_and_p <- function(data) { data %>% filter(sensitivity > 60) %>% filter(s_and_p == max(s_and_p)) %>% filter(tolerance == min(tolerance))}
# Filter out precison > 80% and sensitivyt < 20% as shown in histogram
data %>% filter_highest_s_and_p() %>% mutate(method = "min_read_support") %>% make_summary() %>% make_table()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1-preprocess.R
\name{to_categorical_invert}
\alias{to_categorical_invert}
\title{Make Categorical Mask}
\usage{
to_categorical_invert(y, mask = unique(y))
}
\arguments{
\item{y}{The outcome. An integer vector.}
\item{mask}{The values to mask. If missing, all values are masked.}
}
\value{
A matrix like \code{keras::to_categorical}.
}
\description{
This function one-hot encodes an integer vector as a matrix,
except that it masks certain outcomes. For example, consider
the three outcomes {0, 1, 2} where 2 is masked. The value 0
is coded as {1, 0, 0}. The value 1 is coded as {0, 1, 0}.
The value 2 is coded as {.5, .5, 0}. This blinds the
classifier to the value 2.
}
| /man/to_categorical_invert.Rd | no_license | tpq/caress | R | false | true | 755 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1-preprocess.R
\name{to_categorical_invert}
\alias{to_categorical_invert}
\title{Make Categorical Mask}
\usage{
to_categorical_invert(y, mask = unique(y))
}
\arguments{
\item{y}{The outcome. An integer vector.}
\item{mask}{The values to mask. If missing, all values are masked.}
}
\value{
A matrix like \code{keras::to_categorical}.
}
\description{
This function one-hot encodes an integer vector as a matrix,
except that it masks certain outcomes. For example, consider
the three outcomes {0, 1, 2} where 2 is masked. The value 0
is coded as {1, 0, 0}. The value 1 is coded as {0, 1, 0}.
The value 2 is coded as {.5, .5, 0}. This blinds the
classifier to the value 2.
}
|
# Read raw data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Question
# Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
# Required libraries
require(dplyr)
require(ggplot2)
# Compute aggregations to use with plot
emission_per_year_vehicle_baltimore_la <- NEI %.%
inner_join(SCC) %.%
filter(grepl("Vehicles", EI.Sector) & (fips == "24510" | fips == "06037")) %.%
group_by(year, fips) %.%
summarise(total_emissions = sum(Emissions))
emission_per_year_vehicle_baltimore_la[emission_per_year_vehicle_baltimore_la$fips == "24510", 2] <- "Baltimore City"
emission_per_year_vehicle_baltimore_la[emission_per_year_vehicle_baltimore_la$fips == "06037", 2] <- "Los Angeles County"
# Plot 6
png('plot6.png', width=960)
qplot(year, total_emissions, data = emission_per_year_vehicle_baltimore_la, facets = . ~ fips, geom = "line", main = "Plot 6 - Total emissions per year by Vehicles in Baltimore City and Los Angeles County")
dev.off()
# Answer
# Baltimore City has seen the greater changes from 1999-2008 in motor vehicle emissions.
# Los Angeles County, after a spike in 2005, returned to 1999 levels by 2008.
| /plot6.R | no_license | maurotrb/ExData_PeerAssessment2 | R | false | false | 1,346 | r | # Read raw data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Question
# Compare emissions from motor vehicle sources in Baltimore City with emissions
# from motor vehicle sources in Los Angeles County, California (fips == "06037").
# Which city has seen greater changes over time in motor vehicle emissions?
# Required libraries
require(dplyr)
require(ggplot2)
# Compute aggregations to use with plot
emission_per_year_vehicle_baltimore_la <- NEI %.%
inner_join(SCC) %.%
filter(grepl("Vehicles", EI.Sector) & (fips == "24510" | fips == "06037")) %.%
group_by(year, fips) %.%
summarise(total_emissions = sum(Emissions))
emission_per_year_vehicle_baltimore_la[emission_per_year_vehicle_baltimore_la$fips == "24510", 2] <- "Baltimore City"
emission_per_year_vehicle_baltimore_la[emission_per_year_vehicle_baltimore_la$fips == "06037", 2] <- "Los Angeles County"
# Plot 6
png('plot6.png', width=960)
qplot(year, total_emissions, data = emission_per_year_vehicle_baltimore_la, facets = . ~ fips, geom = "line", main = "Plot 6 - Total emissions per year by Vehicles in Baltimore City and Los Angeles County")
dev.off()
# Answer
# Baltimore City has seen the greater changes from 1999-2008 in motor vehicle emissions.
# Los Angeles County, after a spike in 2005, returned to 1999 levels by 2008.
|
################################################################################*
# Dataset 246, Channel Island fish in the Kelp Forest Monitoring Programming
#
# Data from http://esapubs.org/archive/ecol/E094/245/metadata.php
#
# NOTE: These data are from the Roving Diver Fish Count dataset (RDFC data.csv)
# which attempted to monitor all fish species present,
# and NOT the Fish density data which only monitored 13 species.
#
# Formatted by Allen Hurlbert
#
#-------------------------------------------------------------------------------*
# ---- SET-UP ----
#===============================================================================*
# This script is best viewed in RStudio. I like to reduced the size of my window
# to roughly the width of the section lines (as above). Additionally, ensure
# that your global options are set to soft-wrap by selecting:
# Tools/Global Options .../Code Editing/Soft-wrap R source files
# Load libraries:
library(stringr)
library(plyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(MASS)
# Source the functions file:
getwd()
source('scripts/R-scripts/core-transient_functions.R')
# Get data. First specify the dataset number ('datasetID') you are working with.
#####
datasetID = 246
list.files('data/raw_datasets')
dataset = read.csv(paste('data/raw_datasets/dataset_', datasetID, '.csv', sep = ''))
dataFormattingTable = read.csv('data_formatting_table.csv')
dataFormattingTable[,'Raw_datafile_name'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name',
#--! PROVIDE INFO !--#
'RDFC data.csv')
########################################################
# ANALYSIS CRITERIA #
########################################################
# Min number of time samples required
minNTime = 6
# Min number of species required
minSpRich = 10
# Ultimately, the largest number of spatial and
# temporal subsamples will be chosen to characterize
# an assemblage such that at least this fraction
# of site-years will be represented.
topFractionSites = 0.5
#######################################################
#-------------------------------------------------------------------------------*
# ---- EXPLORE THE DATASET ----
#===============================================================================*
# Here, you are predominantly interested in getting to know the dataset, and determine what the fields represent and which fields are relavent.
# View field names:
names(dataset)
# View how many records and fields:
dim(dataset)
# View the structure of the dataset:
# View first 6 rows of the dataset:
head(dataset)
# Here, we can see that there are some fields that we won't use. Let's remove them, note that I've given a new name here "dataset1", this is to ensure that we don't have to go back to square 1 if we've miscoded anything.
# If all fields will be used, then set unusedFields = 9999.
names(dataset)
#####
unusedFieldNames = c('record_id','observernumber','commonname', 'year')
unusedFields = which(names(dataset) %in% unusedFieldNames)
dataset1 = dataset[,-unusedFields]
# You also might want to change the names of the identified species field [to 'species'] and/or the identified site field [to 'site']. Just make sure you make specific comments on what the field name was before you made the change, as seen above.
# Explore, if everything looks okay, you're ready to move forward. If not, retrace your steps to look for and fix errors.
head(dataset1, 10)
# I've found it helpful to explore more than just the first 6 data points given with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a better snapshot of what the data looks like. Do this periodically throughout the formatting process
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Are the ONLY site identifiers the latitude and longitude of the observation or
# sample? (I.e., there are no site names or site IDs or other designations) Y/N
dataFormattingTable[,'LatLong_sites'] =
dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', # Fill value in below
#####
'N')
#-------------------------------------------------------------------------------*
# ---- FORMAT TIME DATA ----
#===============================================================================*
# Here, we need to extract the sampling dates.
# What is the name of the field that has information on sampling date?
# If date info is in separate columns (e.g., 'day', 'month', and 'year' cols),
# then write these field names as a vector from largest to smallest temporal grain.
#####
dateFieldName = c('record_date')
# If necessary, paste together date info from multiple columns into single field
if (length(dateFieldName) > 1) {
newDateField = dataset1[, dateFieldName[1]]
for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") }
dataset1$date = newDateField
datefield = 'date'
} else {
datefield = dateFieldName
}
# What is the format in which date data is recorded? For example, if it is
# recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would
# be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting.
#####
dateformat = '%d%b%Y'
# If date is only listed in years:
# dateformat = '%Y'
# If the date is just a year, then make sure it is of class numeric
# and not a factor. Otherwise change to a true date object.
if (dateformat == '%Y' | dateformat == '%y') {
date = as.numeric(as.character(dataset1[, datefield]))
} else {
date = as.POSIXct(strptime(dataset1[, datefield], dateformat))
}
# A check on the structure lets you know that date field is now a date object:
class(date)
# Give a double-check, if everything looks okay replace the column:
head(dataset1[, datefield])
head(date)
dataset2 = dataset1
# Delete the old date field
dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)]
# Assign the new date values in a field called 'date'
dataset2$date = date
# Check the results:
head(dataset2)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Notes_timeFormat. Provide a thorough description of any modifications that were made to the time field.
dataFormattingTable[,'Notes_timeFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', # Fill value in below
#####
'Temporal data provided as dates. The only modification to this field involved converting to a date object.')
# subannualTgrain. After exploring the time data, was this dataset sampled at a sub-annual temporal grain? Y/N
dataFormattingTable[,'subannualTgrain'] =
dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', # Fill value in below
#####
'Y')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SITE DATA ----
#===============================================================================*
# From the previous head commmand, we can see that sites are broken up into (potentially) 5 fields. Find the metadata link in the data formatting table use that link to determine how sites are characterized.
# -- If sampling is nested (e.g., site, block, plot, quad as in this study), use each of the identifying fields and separate each field with an underscore. For nested samples be sure the order of concatenated columns goes from coarser to finer scales (e.g. "km_m_cm")
# -- If sites are listed as lats and longs, use the finest available grain and separate lat and long fields with an underscore.
# -- If the site definition is clear, make a new site column as necessary.
# -- If the dataset is for just a single site, and there is no site column, then add one.
# Here, we will concatenate all of the potential fields that describe the site
# in hierarchical order from largest to smallest grain. Based on the dataset,
# fill in the fields that specify nested spatial grains below.
#####
site_grain_names = c("site")
# We will now create the site field with these codes concatenated if there
# are multiple grain fields. Otherwise, site will just be the single grain field.
num_grains = length(site_grain_names)
site = dataset2[, site_grain_names[1]]
if (num_grains > 1) {
for (i in 2:num_grains) {
site = paste(site, dataset2[, site_grain_names[i]], sep = "_")
}
}
# What is the spatial grain of the finest sampling scale? For example, this might be
# a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample.
dataFormattingTable[,'Raw_spatial_grain'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain',
#--! PROVIDE INFO !--#
2000)
dataFormattingTable[,'Raw_spatial_grain_unit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit',
#--! PROVIDE INFO !--#
'm2')
# BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for sites at the coarsest possilbe spatial grain.
siteCoarse = dataset2[, site_grain_names[1]]
if (dateformat == '%Y' | dateformat == '%y') {
dateYear = dataset2$date
} else {
dateYear = format(dataset2$date, '%Y')
}
datasetYearTest = data.frame(siteCoarse, dateYear)
ddply(datasetYearTest, .(siteCoarse), summarise,
lengthYears = length(unique(dateYear)))
# If the dataset has less than minNTime years per site, do not continue processing.
# Do some quality control by comparing the site fields in the dataset with the new vector of sites:
head(site)
# Check how evenly represented all of the sites are in the dataset. If this is the
# type of dataset where every site was sampled on a regular schedule, then you
# expect to see similar values here across sites. Sites that only show up a small
# percent of the time may reflect typos.
data.frame(table(site))
# All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2:
dataset3 = dataset2
dataset3$site = factor(site)
# Check the new dataset (are the columns as they should be?):
head(dataset3)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED!
# !DATA FORMATTING TABLE UPDATE!
# Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this one, it was coded as "site_block_plot_quad"). Alternatively, if the site were concatenated from latitude and longitude fields, the encoding would be "lat_long".
dataFormattingTable[,'Raw_siteUnit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', # Fill value below in quotes
#####
'site')
# spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or decimal lat longs that could be scaled up)? Y/N
dataFormattingTable[,'spatial_scale_variable'] =
dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable',
#####
'N') # Fill value here in quotes
# Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the site field during formatting.
dataFormattingTable[,'Notes_siteFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', # Fill value below in quotes
#####
'Site is a 2000 m2 (20 x 100 m transect) area surveyed over 30 minutes')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT COUNT DATA ----
#===============================================================================*
# Next, we need to explore the count records. For filling out the data formatting table, we need to change the name of the field which represents counts, densities, percent cover, etc to "count". Then we will clean up unnecessary values.
names(dataset3)
summary(dataset3)
# This dataset has two fields with abundance info, 'abundance' which is categorical,
# and 'count' which is numeric. Prior to 2003, no numeric values are provided.
# When 'abundance' is NA, "this is eqivalent to saying the fish was not observed,
# and later in the dataset is typically paired with a count of "0"."
# Here, we replace all NA's in the count field with numeric values based on the
# reported definition of abundance categories in the metadata:
# Abundance category Equivalent count Replaced value
# Single 1 1
# Few 2 - 10 6
# Common 11 - 100 55
# Many 100+ 100
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Single"] = 1
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Few"] = 6
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Common"] = 55
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Many"] = 100
dataset3$count[is.na(dataset3$count) & is.na(dataset3$abundance)] = 0
# Now drop the categorical abundance field
dataset3 = dataset3[, -which(names(dataset3) == 'abundance')]
# Fill in the original field name here
#####
countfield = 'count'
# Renaming it
names(dataset3)[which(names(dataset3) == countfield)] = 'count'
# Now we will remove zero counts and NA's:
summary(dataset3)
# Can usually tell if there are any zeros or NAs from that summary(). If there aren't any showing, still run these functions or continue with the update of dataset# so that you are consistent with this template.
# Subset to records > 0 (if applicable):
dataset4 = subset(dataset3, count > 0)
summary(dataset4)
# Check to make sure that by removing 0's that you haven't completely removed
# any sampling events in which nothing was observed. Compare the number of
# unique site-dates in dataset3 and dataset4.
# If there are no sampling events lost, then we can go ahead and use the
# smaller dataset4 which could save some time in subsequent analyses.
# If there are sampling events lost, then we'll keep the 0's (use dataset3).
numEventsd3 = nrow(unique(dataset3[, c('site', 'date')]))
numEventsd4 = nrow(unique(dataset4[, c('site', 'date')]))
if(numEventsd3 > numEventsd4) {
dataset4 = dataset3
} else {
dataset4 = dataset4
}
# Remove NA's:
dataset5 = na.omit(dataset4)
head(dataset5)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Possible values for countFormat field are density, cover, presence and count.
dataFormattingTable[,'countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'countFormat', # Fill value below in quotes
#####
'count')
dataFormattingTable[,'Notes_countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', # Fill value below in quotes
#####
'Data prior to 2003 include abundance categories rather than numeric counts that were converted to numeric values based on category midpoints.')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SPECIES DATA ----
#===============================================================================*
# Here, your primary goal is to ensure that all of your species are valid. To do so, you need to look at the list of unique species very carefully. Avoid being too liberal in interpretation, if you notice an entry that MIGHT be a problem, but you can't say with certainty, create an issue on GitHub.
# First, what is the field name in which species or taxonomic data are stored?
# It will get converted to 'species'
#####
speciesField = 'scientificname'
dataset5$species = dataset5[, speciesField]
dataset5 = dataset5[, -which(names(dataset5) == speciesField)]
# Look at the individual species present and how frequently they occur: This way you can more easily scan the species names (listed alphabetically) and identify potential misspellings, extra characters or blank space, or other issues.
data.frame(table(dataset5$species))
# If there are entries that only specify the genus while there are others that specify the species in addition to that same genus, they need to be regrouped in order to avoid ambiguity. For example, if there are entries of 'Cygnus', 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either species, but the observer could not identify it. This causes ambiguity in the data, and must be fixed by either 1. deleting the genus-only entry altogether, or 2. renaming the genus-species entries to just the genus-only entry.
# This decision can be fairly subjective, but generally if less than 25% of the entries are genus-only, then they can be deleted (using bad_sp). If more than 25% of the entries for that genus are only specified to the genus, then the genus-species entries should be renamed to be genus-only (using typo_name).
table(dataset5$species)
# If species names are coded (not scientific names) go back to study's metadata to learn what species should and shouldn't be in the data.
# In this example, a quick look at the metadata is not informative, unfortunately. Because of this, you should really stop here and post an issue on GitHub. With some more thorough digging, however, I've found the names represent "Kartez codes". Several species can be removed (double-checked with USDA plant codes at plants.usda.gov and another Sevilleta study (dataset 254) that provides species names for some codes). Some codes were identified with this pdf from White Sands: https://nhnm.unm.edu/sites/default/files/nonsensitive/publications/nhnm/U00MUL02NMUS.pdf
#####
bad_sp = c('baitfish unidentified, all','Embiotocidae spp., adult', 'Embiotocidae spp., all',
'Embiotocidae spp., juvenile', 'Gobiidae spp.', 'larval fish spp., all',
'Neoclinus spp., all','Sebastes atrovirens/carnatus/caurinus/chrysomelas, juvenile',
'Sebastes spp., adult', 'Sebastes spp., all', 'Sebastes spp., juvenile')
dataset6 = dataset5[!dataset5$species %in% bad_sp,]
# It may be useful to count the number of times each name occurs, as misspellings or typos will likely
# only show up one time.
table(dataset6$species)
# If you find any potential typos, try to confirm that the "mispelling" isn't actually a valid name.
# If not, then list the typos in typo_name, and the correct spellings in good_name,
# and then replace them using the for loop below:
# In this case, we are lumping juvenile and adult life stages into single taxonomic entities
#####
typo_name = c('Aulorhynchus flavidus, adult',
'Aulorhynchus flavidus, juvenile',
'Bathymasteridae spp., all',
'Caulolatilus princeps, adult',
'Chromis punctipinnis, adult',
'Chromis punctipinnis, juvenile',
'Citharichthys spp., all',
'Embiotoca jacksoni, adult',
'Embiotoca jacksoni, juvenile',
'Embiotoca lateralis, adult',
'Embiotoca lateralis, juvenile',
'Gibbonsia elegans, all',
'Gibbonsia montereyensis, all',
'Girella nigricans, adult',
'Girella nigricans, juvenile',
'Halichoeres semicinctus, female',
'Halichoeres semicinctus, male',
'Halichoeres semicinctus, juvenile',
'Heterostichus rostratus, adult',
'Heterostichus rostratus, juvenile',
'Hypsypops rubicundus, adult',
'Hypsypops rubicundus, juvenile',
'Medialuna californiensis, adult',
'Ophiodon elongatus, adult',
'Oxyjulis californica, adult',
'Oxyjulis californica, juvenile',
'Paralabrax clathratus, adult',
'Paralabrax clathratus, juvenile',
'Porichthys notatus, all',
'Rhacochilus vacca, adult',
'Rhacochilus vacca, juvenile',
'Scorpaena guttata, adult',
'Scorpaenichthys marmoratus, adult',
'Scorpaenichthys marmoratus, juvenile',
'Sebastes atrovirens, adult',
'Sebastes atrovirens, juvenile',
'Sebastes auriculatus, adult',
'Sebastes auriculatus, juvenile',
'Sebastes auriculatus , juvenile',
'Sebastes carnatus, adult',
'Sebastes carnatus, juvenile',
'Sebastes caurinus, adult',
'Sebastes caurinus, juvenile',
'Sebastes chrysomelas, adult',
'Sebastes chrysomelas/carnatus, juvenile',
'Sebastes miniatus, adult',
'Sebastes miniatus, juvenile',
'Sebastes mystinus, adult',
'Sebastes mystinus, juvenile',
'Sebastes paucispinis, adult',
'Sebastes paucispinis, juvenile',
'Sebastes rastrelliger, adult',
'Sebastes saxicola, adult',
'Sebastes saxicola, juvenile',
'Sebastes serranoides, adult',
'Sebastes serranoides/flavidus, juvenile', #only 2 flavidus adults in dataset
'Sebastes serriceps, adult',
'Sebastes serriceps, juvenile',
'Semicossyphus pulcher, male',
'Semicossyphus pulcher, female',
'Semicossyphus pulcher, juvenile',
'Stereolepis gigas, adult'
)
#####
good_name = c('Aulorhynchus flavidus, all',
'Aulorhynchus flavidus, all',
'Bathymasteridae, all',
'Caulolatilus princeps, all',
'Chromis punctipinnis, all',
'Citharichthys stigmaeus, all',
'Chromis punctipinnis, all',
'Embiotoca jacksoni, all',
'Embiotoca jacksoni, all',
'Embiotoca lateralis, all',
'Embiotoca lateralis, all',
'Gibbonsia spp., all',
'Gibbonsia spp., all',
'Girella nigricans, all',
'Girella nigricans, all',
'Halichoeres semicinctus, all',
'Halichoeres semicinctus, all',
'Halichoeres semicinctus, all',
'Heterostichus rostratus, all',
'Heterostichus rostratus, all',
'Hypsypops rubicundus, all',
'Hypsypops rubicundus, all',
'Medialuna californiensis, all',
'Ophiodon elongatus, all',
'Oxyjulis californica, all',
'Oxyjulis californica, all',
'Paralabrax clathratus, all',
'Paralabrax clathratus, all',
'Porichthys spp., all',
'Rhacochilus vacca, all',
'Rhacochilus vacca, all',
'Scorpaena guttata, all',
'Scorpaenichthys marmoratus, all',
'Scorpaenichthys marmoratus, all',
'Sebastes atrovirens, all',
'Sebastes atrovirens, all',
'Sebastes auriculatus, all',
'Sebastes auriculatus, all',
'Sebastes auriculatus, all',
'Sebastes carnatus, all',
'Sebastes carnatus, all',
'Sebastes caurinus, all',
'Sebastes caurinus, all',
'Sebastes chrysomelas, all',
'Sebastes chrysomelas, all',
'Sebastes miniatus, all',
'Sebastes miniatus, all',
'Sebastes mystinus, all',
'Sebastes mystinus, all',
'Sebastes paucispinis, all',
'Sebastes paucispinis, all',
'Sebastes rastrelliger, all',
'Sebastes saxicola, all',
'Sebastes saxicola, all',
'Sebastes serranoides, all',
'Sebastes serranoides, all',
'Sebastes serriceps, all',
'Sebastes serriceps, all',
'Semicossyphus pulcher, all',
'Semicossyphus pulcher, all',
'Semicossyphus pulcher, all',
'Stereolepis gigas, all'
)
if (length(typo_name) > 0) {
for (n in 1:length(typo_name)) {
dataset6$species[dataset6$species == typo_name[n]] = good_name[n]
}
}
# Reset the factor levels:
dataset6$species = factor(dataset6$species)
# Let's look at how the removal of bad species and altered the length of the dataset:
nrow(dataset5)
nrow(dataset6)
# Look at the head of the dataset to ensure everything is correct:
head(dataset6)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Column M. Notes_spFormat. Provide a THOROUGH description of any changes made
# to the species field, including why any species were removed.
dataFormattingTable[,'Notes_spFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', # Fill value below in quotes
#####
'Several unidentified taxa removed. Juvenile and adult (and occasionally male and female) classes of the same species were lumped into single entities.')
#-------------------------------------------------------------------------------*
# ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ----
#===============================================================================*
# Now we will make the final formatted dataset, add a datasetID field, check for errors, and remove records that cant be used for our purposes.
# First, lets add the datasetID:
dataset6$datasetID = datasetID
# Now make the compiled dataframe:
dataset7 = ddply(dataset6,.(datasetID, site, date, species),
summarize, count = sum(count))
# Explore the data frame:
dim(dataset7)
head(dataset7, 15)
summary(dataset7)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#-------------------------------------------------------------------------------*
# ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ----
#===============================================================================*
# Update the data formatting table (this may take a moment to process). Note that the inputs for this are 'datasetID', the datasetID and the dataset form that you consider to be fully formatted.
dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7)
# Take a final look at the dataset:
head(dataset7)
summary (dataset7)
# If everything is looks okay we're ready to write formatted data frame:
write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER!
# As we've now successfully created the formatted dataset, we will now update the format flag field.
dataFormattingTable[,'format_flag'] =
dataFormattingTableFieldUpdate(datasetID, 'format_flag', # Fill value below
#####
1)
# Flag codes are as follows:
# 0 = not currently worked on
# 1 = formatting complete
# 2 = formatting in process
# 3 = formatting halted, issue
# 4 = data unavailable
# 5 = data insufficient for generating occupancy data
# !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE!
###################################################################################*
# ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ----
###################################################################################*
# We have now formatted the dataset to the finest possible spatial and temporal grain, removed bad species, and added the dataset ID. It's now to make some scale decisions and determine the proportional occupancies.
# Load additional required libraries and dataset:
library(dplyr)
library(tidyr)
# Read in formatted dataset if skipping above formatting code (lines 1-450).
#dataset7 = read.csv(paste("data/formatted_datasets/dataset_",
# datasetID, ".csv", sep =''))
# Have a look at the dimensions of the dataset and number of sites:
dim(dataset7)
length(unique(dataset7$site))
length(unique(dataset7$date))
head(dataset7)
# Get the data formatting table for that dataset:
dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,]
# or read it in from the saved data_formatting_table.csv if skipping lines 1-450.
#dataDescription = subset(read.csv("data_formatting_table.csv"),
# dataset_ID == datasetID)
# Check relevant table values:
dataDescription$LatLong_sites
dataDescription$spatial_scale_variable
dataDescription$Raw_siteUnit
dataDescription$subannualTgrain
# Before proceeding, we need to make decisions about the spatial and temporal grains at
# which we will conduct our analyses. Except in unusual circumstances, the temporal
# grain will almost always be 'year', but the spatial grain that best represents the
# scale of a "community" will vary based on the sampling design and the taxonomic
# group. Justify your spatial scale below with a comment.
#####
tGrain = 'year'
# Refresh your memory about the spatial grain names if this is NOT a lat-long-only
# based dataset. Set sGrain = to the hierarchical scale for analysis.
# HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal
# a numerical value specifying the block size in degrees latitude for analysis.
site_grain_names
#####
sGrain = 'site'
# This is a reasonable choice of spatial grain because ...
# ...this is the only spatial resolution of the study. Sites are typically separated
# by tens to 100s of km, so it does not make sense to aggregate them in most cases.
# The function "richnessYearSubsetFun" below will subset the data to sites with an
# adequate number of years of sampling and species richness. If there are no
# adequate years, the function will return a custom error message and you can
# try resetting sGrain above to something coarser. Keep trying until this
# runs without an error. If a particular sGrain value led to an error in this
# function, you can make a note of that in the spatial grain justification comment
# above. If this function fails for ALL spatial grains, then this dataset will
# not be suitable for analysis and you can STOP HERE.
richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime,
minSpRich = minSpRich,
dataDescription)
head(richnessYearsTest)
dim(richnessYearsTest) ; dim(dataset7)
#Number of unique sites meeting criteria
goodSites = unique(richnessYearsTest$analysisSite)
length(goodSites)
# Now subset dataset7 to just those goodSites as defined. This is tricky though
# because assuming Sgrain is not the finest resolution, we will need to use
# grep to match site names that begin with the string in goodSites.
# The reason to do this is that sites which don't meet the criteria (e.g. not
# enough years of data) may also have low sampling intensity that constrains
# the subsampling level of the well sampled sites.
uniqueSites = unique(dataset7$site)
fullGoodSites = c()
for (s in goodSites) {
tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))])
fullGoodSites = c(fullGoodSites, tmp)
}
dataset8 = subset(dataset7, site %in% fullGoodSites)
# Once we've settled on spatial and temporal grains that pass our test above,
# we then need to 1) figure out what levels of spatial and temporal subsampling
# we should use to characterize that analysis grain, and 2) subset the
# formatted dataset down to that standardized level of subsampling.
# For example, if some sites had 20 spatial subsamples (e.g. quads) per year while
# others had only 16, or 10, we would identify the level of subsampling that
# at least 'topFractionSites' of sites met (with a default of 50%). We would
# discard "poorly subsampled" sites (based on this criterion) from further analysis.
# For the "well-sampled" sites, the function below randomly samples the
# appropriate number of subsamples for each year or site,
# and bases the characterization of the community in that site-year based on
# the aggregate of those standardized subsamples.
dataSubset = subsetDataFun(dataset8,
datasetID,
spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime, minSpRich = minSpRich,
proportionalThreshold = topFractionSites,
dataDescription)
subsettedData = dataSubset$data
# Take a look at the propOcc:
head(propOccFun(subsettedData))
hist(propOccFun(subsettedData)$propOcc)
# Take a look at the site summary frame:
siteSummaryFun(subsettedData)
# If everything looks good, write the files:
writePropOccSiteSummary(subsettedData)
# Save the spatial and temporal subsampling values to the data formatting table:
dataFormattingTable[,'Spatial_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w)
dataFormattingTable[,'Temporal_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z)
# Update Data Formatting Table with summary stats of the formatted,
# properly subsetted dataset
dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData)
# And write the final data formatting table:
write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F)
# Remove all objects except for functions from the environment:
rm(list = setdiff(ls(), lsf.str()))
| /scripts/R-scripts/data_cleaning_scripts/dwork_246_ahh.R | no_license | ethanwhite/core-transient | R | false | false | 34,220 | r | ################################################################################*
# Dataset 246, Channel Island fish in the Kelp Forest Monitoring Programming
#
# Data from http://esapubs.org/archive/ecol/E094/245/metadata.php
#
# NOTE: These data are from the Roving Diver Fish Count dataset (RDFC data.csv)
# which attempted to monitor all fish species present,
# and NOT the Fish density data which only monitored 13 species.
#
# Formatted by Allen Hurlbert
#
#-------------------------------------------------------------------------------*
# ---- SET-UP ----
#===============================================================================*
# This script is best viewed in RStudio. I like to reduced the size of my window
# to roughly the width of the section lines (as above). Additionally, ensure
# that your global options are set to soft-wrap by selecting:
# Tools/Global Options .../Code Editing/Soft-wrap R source files
# Load libraries:
library(stringr)
library(plyr)
library(ggplot2)
library(grid)
library(gridExtra)
library(MASS)
# Source the functions file:
getwd()
source('scripts/R-scripts/core-transient_functions.R')
# Get data. First specify the dataset number ('datasetID') you are working with.
#####
datasetID = 246
list.files('data/raw_datasets')
dataset = read.csv(paste('data/raw_datasets/dataset_', datasetID, '.csv', sep = ''))
dataFormattingTable = read.csv('data_formatting_table.csv')
dataFormattingTable[,'Raw_datafile_name'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_datafile_name',
#--! PROVIDE INFO !--#
'RDFC data.csv')
########################################################
# ANALYSIS CRITERIA #
########################################################
# Min number of time samples required
minNTime = 6
# Min number of species required
minSpRich = 10
# Ultimately, the largest number of spatial and
# temporal subsamples will be chosen to characterize
# an assemblage such that at least this fraction
# of site-years will be represented.
topFractionSites = 0.5
#######################################################
#-------------------------------------------------------------------------------*
# ---- EXPLORE THE DATASET ----
#===============================================================================*
# Here, you are predominantly interested in getting to know the dataset, and determine what the fields represent and which fields are relavent.
# View field names:
names(dataset)
# View how many records and fields:
dim(dataset)
# View the structure of the dataset:
# View first 6 rows of the dataset:
head(dataset)
# Here, we can see that there are some fields that we won't use. Let's remove them, note that I've given a new name here "dataset1", this is to ensure that we don't have to go back to square 1 if we've miscoded anything.
# If all fields will be used, then set unusedFields = 9999.
names(dataset)
#####
unusedFieldNames = c('record_id','observernumber','commonname', 'year')
unusedFields = which(names(dataset) %in% unusedFieldNames)
dataset1 = dataset[,-unusedFields]
# You also might want to change the names of the identified species field [to 'species'] and/or the identified site field [to 'site']. Just make sure you make specific comments on what the field name was before you made the change, as seen above.
# Explore, if everything looks okay, you're ready to move forward. If not, retrace your steps to look for and fix errors.
head(dataset1, 10)
# I've found it helpful to explore more than just the first 6 data points given with just a head(), so I used head(dataset#, 10) or even 20 to 50 to get a better snapshot of what the data looks like. Do this periodically throughout the formatting process
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Are the ONLY site identifiers the latitude and longitude of the observation or
# sample? (I.e., there are no site names or site IDs or other designations) Y/N
dataFormattingTable[,'LatLong_sites'] =
dataFormattingTableFieldUpdate(datasetID, 'LatLong_sites', # Fill value in below
#####
'N')
#-------------------------------------------------------------------------------*
# ---- FORMAT TIME DATA ----
#===============================================================================*
# Here, we need to extract the sampling dates.
# What is the name of the field that has information on sampling date?
# If date info is in separate columns (e.g., 'day', 'month', and 'year' cols),
# then write these field names as a vector from largest to smallest temporal grain.
#####
dateFieldName = c('record_date')
# If necessary, paste together date info from multiple columns into single field
if (length(dateFieldName) > 1) {
newDateField = dataset1[, dateFieldName[1]]
for (i in dateFieldName[2:length(dateFieldName)]) { newDateField = paste(newDateField, dataset[,i], sep = "-") }
dataset1$date = newDateField
datefield = 'date'
} else {
datefield = dateFieldName
}
# What is the format in which date data is recorded? For example, if it is
# recorded as 5/30/94, then this would be '%m/%d/%y', while 1994-5-30 would
# be '%Y-%m-%d'. Type "?strptime" for other examples of date formatting.
#####
dateformat = '%d%b%Y'
# If date is only listed in years:
# dateformat = '%Y'
# If the date is just a year, then make sure it is of class numeric
# and not a factor. Otherwise change to a true date object.
if (dateformat == '%Y' | dateformat == '%y') {
date = as.numeric(as.character(dataset1[, datefield]))
} else {
date = as.POSIXct(strptime(dataset1[, datefield], dateformat))
}
# A check on the structure lets you know that date field is now a date object:
class(date)
# Give a double-check, if everything looks okay replace the column:
head(dataset1[, datefield])
head(date)
dataset2 = dataset1
# Delete the old date field
dataset2 = dataset2[, -which(names(dataset2) %in% dateFieldName)]
# Assign the new date values in a field called 'date'
dataset2$date = date
# Check the results:
head(dataset2)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATE DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Notes_timeFormat. Provide a thorough description of any modifications that were made to the time field.
dataFormattingTable[,'Notes_timeFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_timeFormat', # Fill value in below
#####
'Temporal data provided as dates. The only modification to this field involved converting to a date object.')
# subannualTgrain. After exploring the time data, was this dataset sampled at a sub-annual temporal grain? Y/N
dataFormattingTable[,'subannualTgrain'] =
dataFormattingTableFieldUpdate(datasetID, 'subannualTgrain', # Fill value in below
#####
'Y')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SITE DATA ----
#===============================================================================*
# From the previous head commmand, we can see that sites are broken up into (potentially) 5 fields. Find the metadata link in the data formatting table use that link to determine how sites are characterized.
# -- If sampling is nested (e.g., site, block, plot, quad as in this study), use each of the identifying fields and separate each field with an underscore. For nested samples be sure the order of concatenated columns goes from coarser to finer scales (e.g. "km_m_cm")
# -- If sites are listed as lats and longs, use the finest available grain and separate lat and long fields with an underscore.
# -- If the site definition is clear, make a new site column as necessary.
# -- If the dataset is for just a single site, and there is no site column, then add one.
# Here, we will concatenate all of the potential fields that describe the site
# in hierarchical order from largest to smallest grain. Based on the dataset,
# fill in the fields that specify nested spatial grains below.
#####
site_grain_names = c("site")
# We will now create the site field with these codes concatenated if there
# are multiple grain fields. Otherwise, site will just be the single grain field.
num_grains = length(site_grain_names)
site = dataset2[, site_grain_names[1]]
if (num_grains > 1) {
for (i in 2:num_grains) {
site = paste(site, dataset2[, site_grain_names[i]], sep = "_")
}
}
# What is the spatial grain of the finest sampling scale? For example, this might be
# a 0.25 m2 quadrat, or a 5 m transect, or a 50 ml water sample.
dataFormattingTable[,'Raw_spatial_grain'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain',
#--! PROVIDE INFO !--#
2000)
dataFormattingTable[,'Raw_spatial_grain_unit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_spatial_grain_unit',
#--! PROVIDE INFO !--#
'm2')
# BEFORE YOU CONTINUE. We need to make sure that there are at least minNTime for sites at the coarsest possilbe spatial grain.
siteCoarse = dataset2[, site_grain_names[1]]
if (dateformat == '%Y' | dateformat == '%y') {
dateYear = dataset2$date
} else {
dateYear = format(dataset2$date, '%Y')
}
datasetYearTest = data.frame(siteCoarse, dateYear)
ddply(datasetYearTest, .(siteCoarse), summarise,
lengthYears = length(unique(dateYear)))
# If the dataset has less than minNTime years per site, do not continue processing.
# Do some quality control by comparing the site fields in the dataset with the new vector of sites:
head(site)
# Check how evenly represented all of the sites are in the dataset. If this is the
# type of dataset where every site was sampled on a regular schedule, then you
# expect to see similar values here across sites. Sites that only show up a small
# percent of the time may reflect typos.
data.frame(table(site))
# All looks correct, so replace the site column in the dataset (as a factor) and remove the unnecessary fields, start by renaming the dataset to dataset2:
dataset3 = dataset2
dataset3$site = factor(site)
# Check the new dataset (are the columns as they should be?):
head(dataset3)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SITE DATA WERE MODIFIED!
# !DATA FORMATTING TABLE UPDATE!
# Raw_siteUnit. How a site is coded (i.e. if the field was concatenated such as this one, it was coded as "site_block_plot_quad"). Alternatively, if the site were concatenated from latitude and longitude fields, the encoding would be "lat_long".
dataFormattingTable[,'Raw_siteUnit'] =
dataFormattingTableFieldUpdate(datasetID, 'Raw_siteUnit', # Fill value below in quotes
#####
'site')
# spatial_scale_variable. Is a site potentially nested (e.g., plot within a quad or decimal lat longs that could be scaled up)? Y/N
dataFormattingTable[,'spatial_scale_variable'] =
dataFormattingTableFieldUpdate(datasetID, 'spatial_scale_variable',
#####
'N') # Fill value here in quotes
# Notes_siteFormat. Use this field to THOROUGHLY describe any changes made to the site field during formatting.
dataFormattingTable[,'Notes_siteFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_siteFormat', # Fill value below in quotes
#####
'Site is a 2000 m2 (20 x 100 m transect) area surveyed over 30 minutes')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT COUNT DATA ----
#===============================================================================*
# Next, we need to explore the count records. For filling out the data formatting table, we need to change the name of the field which represents counts, densities, percent cover, etc to "count". Then we will clean up unnecessary values.
names(dataset3)
summary(dataset3)
# This dataset has two fields with abundance info, 'abundance' which is categorical,
# and 'count' which is numeric. Prior to 2003, no numeric values are provided.
# When 'abundance' is NA, "this is eqivalent to saying the fish was not observed,
# and later in the dataset is typically paired with a count of "0"."
# Here, we replace all NA's in the count field with numeric values based on the
# reported definition of abundance categories in the metadata:
# Abundance category Equivalent count Replaced value
# Single 1 1
# Few 2 - 10 6
# Common 11 - 100 55
# Many 100+ 100
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Single"] = 1
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Few"] = 6
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Common"] = 55
dataset3$count[is.na(dataset3$count) & dataset3$abundance == "Many"] = 100
dataset3$count[is.na(dataset3$count) & is.na(dataset3$abundance)] = 0
# Now drop the categorical abundance field
dataset3 = dataset3[, -which(names(dataset3) == 'abundance')]
# Fill in the original field name here
#####
countfield = 'count'
# Renaming it
names(dataset3)[which(names(dataset3) == countfield)] = 'count'
# Now we will remove zero counts and NA's:
summary(dataset3)
# Can usually tell if there are any zeros or NAs from that summary(). If there aren't any showing, still run these functions or continue with the update of dataset# so that you are consistent with this template.
# Subset to records > 0 (if applicable):
dataset4 = subset(dataset3, count > 0)
summary(dataset4)
# Check to make sure that by removing 0's that you haven't completely removed
# any sampling events in which nothing was observed. Compare the number of
# unique site-dates in dataset3 and dataset4.
# If there are no sampling events lost, then we can go ahead and use the
# smaller dataset4 which could save some time in subsequent analyses.
# If there are sampling events lost, then we'll keep the 0's (use dataset3).
numEventsd3 = nrow(unique(dataset3[, c('site', 'date')]))
numEventsd4 = nrow(unique(dataset4[, c('site', 'date')]))
if(numEventsd3 > numEventsd4) {
dataset4 = dataset3
} else {
dataset4 = dataset4
}
# Remove NA's:
dataset5 = na.omit(dataset4)
head(dataset5)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE COUNT DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Possible values for countFormat field are density, cover, presence and count.
dataFormattingTable[,'countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'countFormat', # Fill value below in quotes
#####
'count')
dataFormattingTable[,'Notes_countFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_countFormat', # Fill value below in quotes
#####
'Data prior to 2003 include abundance categories rather than numeric counts that were converted to numeric values based on category midpoints.')
#-------------------------------------------------------------------------------*
# ---- EXPLORE AND FORMAT SPECIES DATA ----
#===============================================================================*
# Here, your primary goal is to ensure that all of your species are valid. To do so, you need to look at the list of unique species very carefully. Avoid being too liberal in interpretation, if you notice an entry that MIGHT be a problem, but you can't say with certainty, create an issue on GitHub.
# First, what is the field name in which species or taxonomic data are stored?
# It will get converted to 'species'
#####
speciesField = 'scientificname'
dataset5$species = dataset5[, speciesField]
dataset5 = dataset5[, -which(names(dataset5) == speciesField)]
# Look at the individual species present and how frequently they occur: This way you can more easily scan the species names (listed alphabetically) and identify potential misspellings, extra characters or blank space, or other issues.
data.frame(table(dataset5$species))
# If there are entries that only specify the genus while there are others that specify the species in addition to that same genus, they need to be regrouped in order to avoid ambiguity. For example, if there are entries of 'Cygnus', 'Cygnus_columbianus', and 'Cygnus_cygnus', 'Cygnus' could refer to either species, but the observer could not identify it. This causes ambiguity in the data, and must be fixed by either 1. deleting the genus-only entry altogether, or 2. renaming the genus-species entries to just the genus-only entry.
# This decision can be fairly subjective, but generally if less than 25% of the entries are genus-only, then they can be deleted (using bad_sp). If more than 25% of the entries for that genus are only specified to the genus, then the genus-species entries should be renamed to be genus-only (using typo_name).
table(dataset5$species)
# If species names are coded (not scientific names) go back to study's metadata to learn what species should and shouldn't be in the data.
# In this example, a quick look at the metadata is not informative, unfortunately. Because of this, you should really stop here and post an issue on GitHub. With some more thorough digging, however, I've found the names represent "Kartez codes". Several species can be removed (double-checked with USDA plant codes at plants.usda.gov and another Sevilleta study (dataset 254) that provides species names for some codes). Some codes were identified with this pdf from White Sands: https://nhnm.unm.edu/sites/default/files/nonsensitive/publications/nhnm/U00MUL02NMUS.pdf
#####
bad_sp = c('baitfish unidentified, all','Embiotocidae spp., adult', 'Embiotocidae spp., all',
'Embiotocidae spp., juvenile', 'Gobiidae spp.', 'larval fish spp., all',
'Neoclinus spp., all','Sebastes atrovirens/carnatus/caurinus/chrysomelas, juvenile',
'Sebastes spp., adult', 'Sebastes spp., all', 'Sebastes spp., juvenile')
dataset6 = dataset5[!dataset5$species %in% bad_sp,]
# It may be useful to count the number of times each name occurs, as misspellings or typos will likely
# only show up one time.
table(dataset6$species)
# If you find any potential typos, try to confirm that the "mispelling" isn't actually a valid name.
# If not, then list the typos in typo_name, and the correct spellings in good_name,
# and then replace them using the for loop below:
# In this case, we are lumping juvenile and adult life stages into single taxonomic entities
#####
typo_name = c('Aulorhynchus flavidus, adult',
'Aulorhynchus flavidus, juvenile',
'Bathymasteridae spp., all',
'Caulolatilus princeps, adult',
'Chromis punctipinnis, adult',
'Chromis punctipinnis, juvenile',
'Citharichthys spp., all',
'Embiotoca jacksoni, adult',
'Embiotoca jacksoni, juvenile',
'Embiotoca lateralis, adult',
'Embiotoca lateralis, juvenile',
'Gibbonsia elegans, all',
'Gibbonsia montereyensis, all',
'Girella nigricans, adult',
'Girella nigricans, juvenile',
'Halichoeres semicinctus, female',
'Halichoeres semicinctus, male',
'Halichoeres semicinctus, juvenile',
'Heterostichus rostratus, adult',
'Heterostichus rostratus, juvenile',
'Hypsypops rubicundus, adult',
'Hypsypops rubicundus, juvenile',
'Medialuna californiensis, adult',
'Ophiodon elongatus, adult',
'Oxyjulis californica, adult',
'Oxyjulis californica, juvenile',
'Paralabrax clathratus, adult',
'Paralabrax clathratus, juvenile',
'Porichthys notatus, all',
'Rhacochilus vacca, adult',
'Rhacochilus vacca, juvenile',
'Scorpaena guttata, adult',
'Scorpaenichthys marmoratus, adult',
'Scorpaenichthys marmoratus, juvenile',
'Sebastes atrovirens, adult',
'Sebastes atrovirens, juvenile',
'Sebastes auriculatus, adult',
'Sebastes auriculatus, juvenile',
'Sebastes auriculatus , juvenile',
'Sebastes carnatus, adult',
'Sebastes carnatus, juvenile',
'Sebastes caurinus, adult',
'Sebastes caurinus, juvenile',
'Sebastes chrysomelas, adult',
'Sebastes chrysomelas/carnatus, juvenile',
'Sebastes miniatus, adult',
'Sebastes miniatus, juvenile',
'Sebastes mystinus, adult',
'Sebastes mystinus, juvenile',
'Sebastes paucispinis, adult',
'Sebastes paucispinis, juvenile',
'Sebastes rastrelliger, adult',
'Sebastes saxicola, adult',
'Sebastes saxicola, juvenile',
'Sebastes serranoides, adult',
'Sebastes serranoides/flavidus, juvenile', #only 2 flavidus adults in dataset
'Sebastes serriceps, adult',
'Sebastes serriceps, juvenile',
'Semicossyphus pulcher, male',
'Semicossyphus pulcher, female',
'Semicossyphus pulcher, juvenile',
'Stereolepis gigas, adult'
)
#####
good_name = c('Aulorhynchus flavidus, all',
'Aulorhynchus flavidus, all',
'Bathymasteridae, all',
'Caulolatilus princeps, all',
'Chromis punctipinnis, all',
'Citharichthys stigmaeus, all',
'Chromis punctipinnis, all',
'Embiotoca jacksoni, all',
'Embiotoca jacksoni, all',
'Embiotoca lateralis, all',
'Embiotoca lateralis, all',
'Gibbonsia spp., all',
'Gibbonsia spp., all',
'Girella nigricans, all',
'Girella nigricans, all',
'Halichoeres semicinctus, all',
'Halichoeres semicinctus, all',
'Halichoeres semicinctus, all',
'Heterostichus rostratus, all',
'Heterostichus rostratus, all',
'Hypsypops rubicundus, all',
'Hypsypops rubicundus, all',
'Medialuna californiensis, all',
'Ophiodon elongatus, all',
'Oxyjulis californica, all',
'Oxyjulis californica, all',
'Paralabrax clathratus, all',
'Paralabrax clathratus, all',
'Porichthys spp., all',
'Rhacochilus vacca, all',
'Rhacochilus vacca, all',
'Scorpaena guttata, all',
'Scorpaenichthys marmoratus, all',
'Scorpaenichthys marmoratus, all',
'Sebastes atrovirens, all',
'Sebastes atrovirens, all',
'Sebastes auriculatus, all',
'Sebastes auriculatus, all',
'Sebastes auriculatus, all',
'Sebastes carnatus, all',
'Sebastes carnatus, all',
'Sebastes caurinus, all',
'Sebastes caurinus, all',
'Sebastes chrysomelas, all',
'Sebastes chrysomelas, all',
'Sebastes miniatus, all',
'Sebastes miniatus, all',
'Sebastes mystinus, all',
'Sebastes mystinus, all',
'Sebastes paucispinis, all',
'Sebastes paucispinis, all',
'Sebastes rastrelliger, all',
'Sebastes saxicola, all',
'Sebastes saxicola, all',
'Sebastes serranoides, all',
'Sebastes serranoides, all',
'Sebastes serriceps, all',
'Sebastes serriceps, all',
'Semicossyphus pulcher, all',
'Semicossyphus pulcher, all',
'Semicossyphus pulcher, all',
'Stereolepis gigas, all'
)
if (length(typo_name) > 0) {
for (n in 1:length(typo_name)) {
dataset6$species[dataset6$species == typo_name[n]] = good_name[n]
}
}
# Reset the factor levels:
dataset6$species = factor(dataset6$species)
# Let's look at how the removal of bad species and altered the length of the dataset:
nrow(dataset5)
nrow(dataset6)
# Look at the head of the dataset to ensure everything is correct:
head(dataset6)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE SPECIES DATA WERE MODIFIED!
#!DATA FORMATTING TABLE UPDATE!
# Column M. Notes_spFormat. Provide a THOROUGH description of any changes made
# to the species field, including why any species were removed.
dataFormattingTable[,'Notes_spFormat'] =
dataFormattingTableFieldUpdate(datasetID, 'Notes_spFormat', # Fill value below in quotes
#####
'Several unidentified taxa removed. Juvenile and adult (and occasionally male and female) classes of the same species were lumped into single entities.')
#-------------------------------------------------------------------------------*
# ---- MAKE DATA FRAME OF COUNT BY SITES, SPECIES, AND YEAR ----
#===============================================================================*
# Now we will make the final formatted dataset, add a datasetID field, check for errors, and remove records that cant be used for our purposes.
# First, lets add the datasetID:
dataset6$datasetID = datasetID
# Now make the compiled dataframe:
dataset7 = ddply(dataset6,.(datasetID, site, date, species),
summarize, count = sum(count))
# Explore the data frame:
dim(dataset7)
head(dataset7, 15)
summary(dataset7)
# !GIT-ADD-COMMIT-PUSH AND DESCRIBE HOW THE DATA WERE MODIFIED!
#-------------------------------------------------------------------------------*
# ---- UPDATE THE DATA FORMATTING TABLE AND WRITE OUTPUT DATA FRAMES ----
#===============================================================================*
# Update the data formatting table (this may take a moment to process). Note that the inputs for this are 'datasetID', the datasetID and the dataset form that you consider to be fully formatted.
dataFormattingTable = dataFormattingTableUpdate(datasetID, dataset7)
# Take a final look at the dataset:
head(dataset7)
summary (dataset7)
# If everything is looks okay we're ready to write formatted data frame:
write.csv(dataset7, paste("data/formatted_datasets/dataset_", datasetID, ".csv", sep = ""), row.names = F)
# !GIT-ADD-COMMIT-PUSH THE FORMATTED DATASET IN THE DATA FILE, THEN GIT-ADD-COMMIT-PUSH THE UPDATED DATA FOLDER!
# As we've now successfully created the formatted dataset, we will now update the format flag field.
dataFormattingTable[,'format_flag'] =
dataFormattingTableFieldUpdate(datasetID, 'format_flag', # Fill value below
#####
1)
# Flag codes are as follows:
# 0 = not currently worked on
# 1 = formatting complete
# 2 = formatting in process
# 3 = formatting halted, issue
# 4 = data unavailable
# 5 = data insufficient for generating occupancy data
# !GIT-ADD-COMMIT-PUSH THE DATA FORMATTING TABLE!
###################################################################################*
# ---- END DATA FORMATTING. START PROPOCC AND DATA SUMMARY ----
###################################################################################*
# We have now formatted the dataset to the finest possible spatial and temporal grain, removed bad species, and added the dataset ID. It's now to make some scale decisions and determine the proportional occupancies.
# Load additional required libraries and dataset:
library(dplyr)
library(tidyr)
# Read in formatted dataset if skipping above formatting code (lines 1-450).
#dataset7 = read.csv(paste("data/formatted_datasets/dataset_",
# datasetID, ".csv", sep =''))
# Have a look at the dimensions of the dataset and number of sites:
dim(dataset7)
length(unique(dataset7$site))
length(unique(dataset7$date))
head(dataset7)
# Get the data formatting table for that dataset:
dataDescription = dataFormattingTable[dataFormattingTable$dataset_ID == datasetID,]
# or read it in from the saved data_formatting_table.csv if skipping lines 1-450.
#dataDescription = subset(read.csv("data_formatting_table.csv"),
# dataset_ID == datasetID)
# Check relevant table values:
dataDescription$LatLong_sites
dataDescription$spatial_scale_variable
dataDescription$Raw_siteUnit
dataDescription$subannualTgrain
# Before proceeding, we need to make decisions about the spatial and temporal grains at
# which we will conduct our analyses. Except in unusual circumstances, the temporal
# grain will almost always be 'year', but the spatial grain that best represents the
# scale of a "community" will vary based on the sampling design and the taxonomic
# group. Justify your spatial scale below with a comment.
#####
tGrain = 'year'
# Refresh your memory about the spatial grain names if this is NOT a lat-long-only
# based dataset. Set sGrain = to the hierarchical scale for analysis.
# HOWEVER, if the sites are purely defined by lat-longs, then sGrain should equal
# a numerical value specifying the block size in degrees latitude for analysis.
site_grain_names
#####
sGrain = 'site'
# This is a reasonable choice of spatial grain because ...
# ...this is the only spatial resolution of the study. Sites are typically separated
# by tens to 100s of km, so it does not make sense to aggregate them in most cases.
# The function "richnessYearSubsetFun" below will subset the data to sites with an
# adequate number of years of sampling and species richness. If there are no
# adequate years, the function will return a custom error message and you can
# try resetting sGrain above to something coarser. Keep trying until this
# runs without an error. If a particular sGrain value led to an error in this
# function, you can make a note of that in the spatial grain justification comment
# above. If this function fails for ALL spatial grains, then this dataset will
# not be suitable for analysis and you can STOP HERE.
richnessYearsTest = richnessYearSubsetFun(dataset7, spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime,
minSpRich = minSpRich,
dataDescription)
head(richnessYearsTest)
dim(richnessYearsTest) ; dim(dataset7)
#Number of unique sites meeting criteria
goodSites = unique(richnessYearsTest$analysisSite)
length(goodSites)
# Now subset dataset7 to just those goodSites as defined. This is tricky though
# because assuming Sgrain is not the finest resolution, we will need to use
# grep to match site names that begin with the string in goodSites.
# The reason to do this is that sites which don't meet the criteria (e.g. not
# enough years of data) may also have low sampling intensity that constrains
# the subsampling level of the well sampled sites.
uniqueSites = unique(dataset7$site)
fullGoodSites = c()
for (s in goodSites) {
tmp = as.character(uniqueSites[grepl(paste(s, "_", sep = ""), paste(uniqueSites, "_", sep = ""))])
fullGoodSites = c(fullGoodSites, tmp)
}
dataset8 = subset(dataset7, site %in% fullGoodSites)
# Once we've settled on spatial and temporal grains that pass our test above,
# we then need to 1) figure out what levels of spatial and temporal subsampling
# we should use to characterize that analysis grain, and 2) subset the
# formatted dataset down to that standardized level of subsampling.
# For example, if some sites had 20 spatial subsamples (e.g. quads) per year while
# others had only 16, or 10, we would identify the level of subsampling that
# at least 'topFractionSites' of sites met (with a default of 50%). We would
# discard "poorly subsampled" sites (based on this criterion) from further analysis.
# For the "well-sampled" sites, the function below randomly samples the
# appropriate number of subsamples for each year or site,
# and bases the characterization of the community in that site-year based on
# the aggregate of those standardized subsamples.
dataSubset = subsetDataFun(dataset8,
datasetID,
spatialGrain = sGrain,
temporalGrain = tGrain,
minNTime = minNTime, minSpRich = minSpRich,
proportionalThreshold = topFractionSites,
dataDescription)
subsettedData = dataSubset$data
# Take a look at the propOcc:
head(propOccFun(subsettedData))
hist(propOccFun(subsettedData)$propOcc)
# Take a look at the site summary frame:
siteSummaryFun(subsettedData)
# If everything looks good, write the files:
writePropOccSiteSummary(subsettedData)
# Save the spatial and temporal subsampling values to the data formatting table:
dataFormattingTable[,'Spatial_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Spatial_subsamples', dataSubset$w)
dataFormattingTable[,'Temporal_subsamples'] =
dataFormattingTableFieldUpdate(datasetID, 'Temporal_subsamples', dataSubset$z)
# Update Data Formatting Table with summary stats of the formatted,
# properly subsetted dataset
dataFormattingTable = dataFormattingTableUpdateFinished(datasetID, subsettedData)
# And write the final data formatting table:
write.csv(dataFormattingTable, 'data_formatting_table.csv', row.names = F)
# Remove all objects except for functions from the environment:
rm(list = setdiff(ls(), lsf.str()))
|
# Dataframe
days <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temp <- c(28,30.5,32,31.2,29.3,27.9,26.4)
snowed <- c('T','T','F','F','T','T','F')
help('data.frame')
RPI_Weather_Week <- data.frame(days, temp, snowed)
RPI_Weather_Week
head(RPI_Weather_Week)
str(RPI_Weather_Week)
summary(RPI_Weather_Week)
RPI_Weather_Week[1,]
RPI_Weather_Week[,1]
RPI_Weather_Week[,'snowed']
RPI_Weather_Week[,'days']
RPI_Weather_Week[,'temp']
RPI_Weather_Week[1:5,c('days','temp')]
RPI_Weather_Week[1:5,'temp']
RPI_Weather_Week$temp
subset(RPI_Weather_Week,subset=snowed==TRUE)
sorted.snowed <- order(RPI_Weather_Week['snowed'])
sorted.snowed
RPI_Weather_Week[sorted.snowed,]
dec.snow <- order(-RPI_Weather_Week$temp)
dec.snow
RPI_Weather_Week[dec.snow,]
empty.Dataframe <- data.frame()
v1 <- 1:10
v1
v2 <- letters[1:10]
df <- data.frame(v1,v2)
df
help(write.csv)
write.csv(df,file = 'saved_df1.csv')
df2 <- read.csv('saved_df1.csv')
df2
# EPI data
setwd('/Users/apple/Desktop/Data Analytics/R script')
EPI_data <- read.csv("2010EPI_data.csv", skip = 1, header = T)
head(EPI_data)
dim(EPI_data)
summary(EPI_data)
attach(EPI_data)
fix(EPI_data)
EPI
tf <- is.na(EPI)
E <- EPI[!tf]
#distribution
summary(EPI)
fivenum(EPI)
stem(EPI)
hist(EPI,seq(30.,95.,1.0),prob = TRUE)
hist(EPI)
help(lines)
lines(density(EPI,na.rm = TRUE, bw = 1.0))
rug(EPI)
#cumulative density function
plot(ecdf(EPI), do.points = FALSE, verticals = TRUE)
par(pty = 's')
qqnorm(EPI)
qqline(EPI)
#Simulated data from t-distribution:
x <- rt(250, df = 5)
qqnorm(x); qqline(x)
#Make a Q-Q plot against the generating distribution by: x<-seq(30,95,1)
qqplot(qt(ppoints(250), df = 5), x, xlab = "Q-Q plot for t dsn")
qqline(x)
boxplot(EPI,DALY)
#conditional filter
View(EPI)
EPILand<-EPI[!Landlock]
Eland <- EPILand[!is.na(EPILand)]
Eland
hist(Eland)
| /Lab 1.R | no_license | Xinzhujia/DataAnalytics2020_Xinzhu_Jia | R | false | false | 1,812 | r | # Dataframe
days <- c('Mon','Tue','Wed','Thur','Fri','Sat','Sun')
temp <- c(28,30.5,32,31.2,29.3,27.9,26.4)
snowed <- c('T','T','F','F','T','T','F')
help('data.frame')
RPI_Weather_Week <- data.frame(days, temp, snowed)
RPI_Weather_Week
head(RPI_Weather_Week)
str(RPI_Weather_Week)
summary(RPI_Weather_Week)
RPI_Weather_Week[1,]
RPI_Weather_Week[,1]
RPI_Weather_Week[,'snowed']
RPI_Weather_Week[,'days']
RPI_Weather_Week[,'temp']
RPI_Weather_Week[1:5,c('days','temp')]
RPI_Weather_Week[1:5,'temp']
RPI_Weather_Week$temp
subset(RPI_Weather_Week,subset=snowed==TRUE)
sorted.snowed <- order(RPI_Weather_Week['snowed'])
sorted.snowed
RPI_Weather_Week[sorted.snowed,]
dec.snow <- order(-RPI_Weather_Week$temp)
dec.snow
RPI_Weather_Week[dec.snow,]
empty.Dataframe <- data.frame()
v1 <- 1:10
v1
v2 <- letters[1:10]
df <- data.frame(v1,v2)
df
help(write.csv)
write.csv(df,file = 'saved_df1.csv')
df2 <- read.csv('saved_df1.csv')
df2
# EPI data
setwd('/Users/apple/Desktop/Data Analytics/R script')
EPI_data <- read.csv("2010EPI_data.csv", skip = 1, header = T)
head(EPI_data)
dim(EPI_data)
summary(EPI_data)
attach(EPI_data)
fix(EPI_data)
EPI
tf <- is.na(EPI)
E <- EPI[!tf]
#distribution
summary(EPI)
fivenum(EPI)
stem(EPI)
hist(EPI,seq(30.,95.,1.0),prob = TRUE)
hist(EPI)
help(lines)
lines(density(EPI,na.rm = TRUE, bw = 1.0))
rug(EPI)
#cumulative density function
plot(ecdf(EPI), do.points = FALSE, verticals = TRUE)
par(pty = 's')
qqnorm(EPI)
qqline(EPI)
#Simulated data from t-distribution:
x <- rt(250, df = 5)
qqnorm(x); qqline(x)
#Make a Q-Q plot against the generating distribution by: x<-seq(30,95,1)
qqplot(qt(ppoints(250), df = 5), x, xlab = "Q-Q plot for t dsn")
qqline(x)
boxplot(EPI,DALY)
#conditional filter
View(EPI)
EPILand<-EPI[!Landlock]
Eland <- EPILand[!is.na(EPILand)]
Eland
hist(Eland)
|
library(ggplot2)
library(gridExtra)
library(ProbBayes)
library(VGAM)
library(tidyverse)
library(coda)
library(reshape2)
library(ggridges)
library(runjags)
library(bayesplot)
#library(CalledStrike)
crcblue <- "#2905a1"
# Section 9.1 Introduction
## --------------------------------------------------------
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
## --------------------------------------------------------
ybar <- mean(data$JAN)
se <- sd(data$JAN) / sqrt(20)
(post1 <- normal_update(c(10, 3), c(ybar, se)))
## --------------------------------------------------------
many_normal_plots(list(c(10, 3),
c(ybar, se),
post1)) +
theme(legend.position = "none") +
theme(text=element_text(size=18)) +
xlab(expression(mu)) +
annotate(geom = "text",
x = 10, y = 0.15,
label = "Prior", size = 7) +
annotate(geom = "text",
x = 35, y = 0.10,
label = "Likelihood", size = 7) +
annotate(geom = "text",
x = 25, y = 0.15,
label = "Posterior", size = 7) +
ylab("Density")
## --------------------------------------------------------
ggplot(data.frame(x = c(0, 20)), aes(x)) +
stat_function(fun = dnorm, size = 1.5,
args = list(mean = 10, sd = 3)) +
stat_function(fun = dcauchy, size = 1.5,
args = list(location = 10, scale = 2),
linetype = "dashed") +
xlab(expression(mu)) +
annotate(geom = "text",
x = 5.5, y = 0.10,
label = "Normal", size = 7) +
annotate(geom = "text",
x = 13.5, y = 0.14,
label = "Cauchy", size = 7) +
theme(text=element_text(size=18))
# Section 9.2 Markov Chains
## --------------------------------------------------------
p <- c(0, 0, 1, 0, 0, 0)
P <- matrix(c(.5, .5, 0, 0, 0, 0,
.25, .5, .25, 0, 0, 0,
0, .25, .5, .25, 0, 0,
0, 0, .25, .5, .25, 0,
0, 0, 0, .25, .5, .25,
0, 0, 0, 0, .5, .5),
nrow=6, ncol=6, byrow=TRUE)
## --------------------------------------------------------
print(p %*% P, digits = 5)
## --------------------------------------------------------
print(p %*% P %*% P %*% P %*% P, digits = 5)
## --------------------------------------------------------
Pm <- diag(rep(1, 6))
for(j in 1:100){
Pm <- Pm %*% P
}
print(Pm, digits = 5)
## --------------------------------------------------------
set.seed(123)
s <- vector("numeric", 10000)
s[1] <- 3
for (j in 2:10000){
s[j] <- sample(1:6, size=1, prob=P[s[j - 1], ])
}
S <- data.frame(Iteration = 1:10000,
Location = s)
## --------------------------------------------------------
S %>% mutate(L1 = (Location == 1),
L2 = (Location == 2),
L3 = (Location == 3),
L4 = (Location == 4),
L5 = (Location == 5),
L6 = (Location == 6)) %>%
mutate(Proportion_1 = cumsum(L1) / Iteration,
Proportion_2 = cumsum(L2) / Iteration,
Proportion_3 = cumsum(L3) / Iteration,
Proportion_4 = cumsum(L4) / Iteration,
Proportion_5 = cumsum(L5) / Iteration,
Proportion_6 = cumsum(L6) / Iteration) %>%
select(Iteration, Proportion_1, Proportion_2, Proportion_3,
Proportion_4, Proportion_5, Proportion_6) -> S1
gather(S1, Outcome, Probability, -Iteration) -> S2
ggplot(S2, aes(Iteration, Probability)) +
geom_line() +
facet_wrap(~ Outcome, ncol = 3) +
ylim(0, .4) +
ylab("Relative Frequency") +
theme(text=element_text(size=18)) +
scale_x_continuous(breaks = c(0, 3000, 6000, 9000))
## --------------------------------------------------------
w <- matrix(c(.1,.2,.2,.2,.2,.1), nrow=1, ncol=6)
w %*% P
# Section 9.3 The Metropolis Algorithm
## --------------------------------------------------------
pd <- function(x){
values <- c(5, 10, 4, 4, 20, 20, 12, 5)
ifelse(x %in% 1:length(values), values[x], 0)
}
prob_dist <- data.frame(x = 1:8,
prob = pd(1:8))
## --------------------------------------------------------
### TO UPDATE
prob_plot(prob_dist, Color = crcblue) +
theme(text=element_text(size=18)) +
ylab("Probability")
## --------------------------------------------------------
random_walk <- function(pd, start, num_steps){
y <- rep(0, num_steps)
current <- start
for (j in 1:num_steps){
candidate <- current + sample(c(-1, 1), 1)
prob <- pd(candidate) / pd(current)
if (runif(1) < prob) current <- candidate
y[j] <- current
}
return(y)
}
## --------------------------------------------------------
set.seed(123)
out <- random_walk(pd, 4, 10000)
data.frame(out) %>% group_by(out) %>%
summarize(N = n(), Prob = N / 10000) -> S
## --------------------------------------------------------
prob_dist %>% mutate(Prob = prob / sum(prob)) ->
prob_dist
df <- rbind(data.frame(x = prob_dist$x,
Prob = prob_dist$Prob,
Type = "Actual"),
data.frame(x = S$out, Prob = S$Prob,
Type = "Simulated")
)
ggplot(df, aes(x, Prob, fill=Type)) +
geom_bar(stat = "identity", color = "black",
position = position_dodge()) +
ylab("Probability") +
scale_fill_manual(values=c("gray", crcblue)) +
theme(text=element_text(size=18))
## --------------------------------------------------------
## Figure 8.6
## --------------------------------------------------------
metropolis <- function(logpost, current, C, iter, ...){
S <- rep(0, iter)
n_accept <- 0
for(j in 1:iter){
candidate <- runif(1, min=current - C,
max=current + C)
prob <- exp(logpost(candidate, ...) -
logpost(current, ...))
accept <- ifelse(runif(1) < prob, "yes", "no")
current <- ifelse(accept == "yes",
candidate, current)
S[j] <- current
n_accept <- n_accept + (accept == "yes")
}
list(S=S, accept_rate=n_accept / iter)
}
# Section 9.4 Cauchy-Normal Problem
## --------------------------------------------------------
lpost <- function(theta, s){
dcauchy(theta, s$loc, s$scale, log = TRUE) +
dnorm(s$ybar, theta, s$se, log = TRUE)
}
## --------------------------------------------------------
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
s <- list(loc = 10, scale = 2,
ybar = mean(data$JAN),
se = sd(data$JAN) / sqrt(20))
## --------------------------------------------------------
out <- metropolis(lpost, 5, 20, 10000, s)
out$accept_rate
## --------------------------------------------------------
manyC <- c(0.3, 3, 30, 200)
M <- NULL
set.seed(1223)
for(j in 1:4){
out <- metropolis(lpost, 0, manyC[j], 5000, s)
M <- rbind(M, data.frame(X = out$S,
Iteration = 1:5000,
C = paste("C = ",manyC[j],
", Accept = ", out$accept_rate)))
}
## --------------------------------------------------------
ggplot(M, aes(Iteration, X)) +
geom_line(color = crcblue) +
facet_wrap(~ C, nrow=2) +
theme(text=element_text(size=18)) +
scale_x_continuous(breaks = c(0, 2000, 4000))
## --------------------------------------------------------
mu <- seq(0, 40, length.out = 200)
prior <- dcauchy(mu, 10, 2)
likelihood <- dnorm(mu, s$ybar, s$se)
posterior <- density(out$S, bw = 0.6)
P1 <- data.frame(MU = mu,
Density = prior,
Type = "Prior")
P2 <- data.frame(MU = mu,
Density = likelihood,
Type = "Likelihood")
P3 <- data.frame(MU = posterior$x,
Density = posterior$y,
Type = "Posterior")
P <- rbind(P1, P2, P3)
ggplot(P, aes(MU, Density, linetype = Type)) +
geom_line(size = 1.3, color = crcblue) +
xlab(expression(mu)) +
annotate(geom = "text",
x = 4.5, y = 0.10,
label = "Prior", size = 7) +
annotate(geom = "text",
x = 35, y = 0.10,
label = "Likelihood", size = 7) +
annotate(geom = "text",
x = 18, y = 0.10,
label = "Posterior", size = 7) +
theme(text=element_text(size=18)) +
theme(legend.position = "none")
# Section 9.5 Gibbs Sampling
## --------------------------------------------------------
p <- matrix(c(4, 3, 2, 1,
3, 4, 3, 2,
2, 3, 4, 3,
1, 2, 3, 4) / 40, 4, 4, byrow = TRUE)
dimnames(p)[[1]] <- 1:4
dimnames(p)[[2]] <- 1:4
p
## --------------------------------------------------------
p[, 1]
## --------------------------------------------------------
p[2, ]
## --------------------------------------------------------
gibbs_discrete <- function(p, i = 1, iter = 1000){
x <- matrix(0, iter, 2)
nX <- dim(p)[1]
nY <- dim(p)[2]
for(k in 1:iter){
j <- sample(1:nY, 1, prob = p[i, ])
i <- sample(1:nX, 1, prob = p[, j])
x[k, ] <- c(i, j)
}
x
}
## --------------------------------------------------------
sp <- data.frame(gibbs_discrete(p))
names(sp) <- c("X", "Y")
table(sp) / 1000
## --------------------------------------------------------
gibbs_betabin <- function(n, a, b, p = 0.5, iter = 1000){
x <- matrix(0, iter, 2)
for(k in 1:iter){
y <- rbinom(1, size = n, prob = p )
p <- rbeta(1, y + a, n - y + b )
x[k, ] <- c(y, p)
}
x
}
## --------------------------------------------------------
set.seed(123)
sp <- data.frame(gibbs_betabin(20, 5, 5))
ggplot(data.frame(Y=sp$X1), aes(Y)) +
geom_bar(width=0.5, fill=crcblue) +
ylab("Frequency") +
theme(text=element_text(size=18))
## --------------------------------------------------------
gibbs_normal <- function(s, phi = 0.002, iter = 1000){
ybar <- mean(s$y)
n <- length(s$y)
mu0 <- s$mu0
phi0 <- s$phi0
a <- s$a
b <- s$b
x <- matrix(0, iter, 2)
for(k in 1:iter){
mun <- (phi0 * mu0 + n * phi * ybar) /
(phi0 + n * phi)
sigman <- sqrt(1 / (phi0 + n * phi))
mu <- rnorm(1, mean = mun, sd = sigman)
an <- n / 2 + a
bn <- sum((s$y - mu) ^ 2) / 2 + b
phi <- rgamma(1, shape = an, rate = bn)
x[k, ] <- c(mu, phi)
}
x
}
## --------------------------------------------------------
s <- list(y = data$JAN, mu0 = 10, phi0 = 1/3^2, a = 1, b = 1)
## --------------------------------------------------------
set.seed(123)
out <- gibbs_normal(s, iter=10000)
df <- data.frame(out)
names(df) <- c("mean", "precision")
df$standard_deviation <- sqrt(1 / df$precision)
## --------------------------------------------------------
ggplot(df, aes(mean, standard_deviation)) +
geom_point(size = 0.2, color = crcblue) +
theme(text=element_text(size=18)) +
xlab(expression(mu)) +
ylab(expression(sigma))
# Section 9.6 MCMC Inputs and Diagnostics
## --------------------------------------------------------
lpost <- function(theta, s){
dcauchy(theta, s$loc, s$scale, log = TRUE) +
dnorm(s$ybar, theta, s$se, log = TRUE)
}
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
s <- list(loc = 10, scale = 2,
ybar = mean(data$JAN),
se = sd(data$JAN) / sqrt(20))
## --------------------------------------------------------
buffalo_metrop <- metropolis(lpost, 10,
20, 5000, s)
buffalo_metrop$accept_rate
## --------------------------------------------------------
df <- data.frame(mean = buffalo_metrop$S)
df$Iteration <- 1:5000
ggplot(df, aes(Iteration, mean)) +
geom_line(color = crcblue) +
increasefont() +
ylab(expression(mu))
## --------------------------------------------------------
df_mcmc <- mcmc(df)
ac_data <- data.frame(Lag = 0:20,
Autocorrelation =
autocorr(df_mcmc[, "mean"],
lags = 0:20)[,,1])
ggplot(ac_data, aes(Lag, Autocorrelation)) +
geom_col(fill = crcblue, width = 0.5) +
theme(text=element_text(size=18)) +
ylim(-1, 1) +
geom_hline(yintercept = 0)
# Section 9.7 Using JAGS
## --------------------------------------------------------
modelString = "
model{
## sampling
for (i in 1:N) {
y[i] ~ dnorm(mu, phi)
}
## priors
mu ~ dnorm(mu0, phi0)
phi ~ dgamma(a, b)
sigma <- sqrt(pow(phi, -1))
}
"
## --------------------------------------------------------
buffalo <- read.csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
y <- data$JAN
N <- length(y)
the_data <- list("y" = y, "N" = N,
"mu0"=10, "phi0"=1/3^2,
"a"=1,"b"=1)
## --------------------------------------------------------
initsfunction <- function(chain){
.RNG.seed <- c(1,2)[chain]
.RNG.name <- c("base::Super-Duper",
"base::Wichmann-Hill")[chain]
return(list(.RNG.seed=.RNG.seed,
.RNG.name=.RNG.name))
}
## --------------------------------------------------------
posterior <- run.jags(modelString,
n.chains = 1,
data = the_data,
monitor = c("mu", "sigma"),
adapt = 1000,
burnin = 5000,
sample = 5000,
inits = initsfunction)
## --------------------------------------------------------
plot(posterior, vars = "mu")
## --------------------------------------------------------
plot(posterior, vars = "sigma")
## --------------------------------------------------------
print(posterior, digits = 3)
## --------------------------------------------------------
InitialValues <- list(
list(mu = 2, phi = 1 / 4),
list(mu = 30, phi = 1 / 900)
)
## --------------------------------------------------------
posterior <- run.jags(modelString,
n.chains = 2,
data = the_data,
monitor = c("mu", "sigma"),
adapt = 1000,
burnin = 5000,
sample = 5000,
inits = InitialValues)
## --------------------------------------------------------
summary(posterior$mcmc[[1]], digits = 3)
## --------------------------------------------------------
summary(posterior$mcmc[[2]], digits = 3)
## --------------------------------------------------------
post <- data.frame(posterior$mcmc[[1]])
## --------------------------------------------------------
postpred_sim <- function(j){
rnorm(20, mean = post[j, "mu"],
sd = post[j, "sigma"])
}
## --------------------------------------------------------
set.seed(123)
print(postpred_sim(1), digits = 3)
## --------------------------------------------------------
set.seed(123)
ypred <- t(sapply(1:5000, postpred_sim))
## --------------------------------------------------------
df <- NULL
for(j in 1:8){
dfnew <- data.frame(Type = paste("Sample", j), Snowfall = ypred[j, ])
df <- rbind(df, dfnew)
}
df <- rbind(df, data.frame(Type = "Observed", Snowfall = y))
## --------------------------------------------------------
ggplot(df, aes(Snowfall)) +
geom_histogram(bins = 10, fill = crcblue, color = "white") +
facet_wrap(~ Type, ncol = 3) +
theme(text=element_text(size=18))
## --------------------------------------------------------
postpred_max <- apply(ypred, 1, max)
## --------------------------------------------------------
ggplot(data.frame(Maximum = postpred_max), aes(Maximum)) +
geom_histogram(bins = 20,
fill = crcblue,
color = "white") +
geom_vline(xintercept = max(y), size = 1.5) +
annotate(geom = "text", x = 78, y = 750, label="Observed
Maximum", size = 6) +
theme(text=element_text(size=18))
## --------------------------------------------------------
modelString = "
model{
## sampling
yF ~ dbin(pF, nF)
yM ~ dbin(pM, nM)
logit(pF) <- theta - lambda / 2
logit(pM) <- theta + lambda / 2
## priors
theta ~ dnorm(mu0, phi0)
lambda ~ dnorm(0, phi)
}
"
## --------------------------------------------------------
the_data <- list("yF" = 75, "nF" = 151,
"yM" = 39, "nM" = 93,
"mu0" = 0, "phi0" = 0.001, "phi" = 2)
## --------------------------------------------------------
initsfunction <- function(chain){
.RNG.seed <- c(1,2)[chain]
.RNG.name <- c("base::Super-Duper",
"base::Wichmann-Hill")[chain]
return(list(.RNG.seed=.RNG.seed,
.RNG.name=.RNG.name))
}
## --------------------------------------------------------
posterior <- run.jags(modelString,
data = the_data,
n.chains = 1,
monitor = c("pF", "pM", "lambda"),
adapt = 1000,
burnin = 5000,
sample = 5000)
## --------------------------------------------------------
post <- data.frame(posterior$mcmc[[1]])
## --------------------------------------------------------
ggplot(post, aes(lambda)) +
geom_density(size = 1.5, color = crcblue) +
geom_vline(xintercept = 0, size = 1.5) +
theme(text=element_text(size=18)) +
xlab(expression(lambda))
## --------------------------------------------------------
post %>%
summarize(Prob = mean(lambda < 0))
| /R Code/chapter 9/scripts/Chapter9_Script.R | no_license | monika76five/ProbBayes | R | false | false | 17,134 | r |
library(ggplot2)
library(gridExtra)
library(ProbBayes)
library(VGAM)
library(tidyverse)
library(coda)
library(reshape2)
library(ggridges)
library(runjags)
library(bayesplot)
#library(CalledStrike)
crcblue <- "#2905a1"
# Section 9.1 Introduction
## --------------------------------------------------------
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
## --------------------------------------------------------
ybar <- mean(data$JAN)
se <- sd(data$JAN) / sqrt(20)
(post1 <- normal_update(c(10, 3), c(ybar, se)))
## --------------------------------------------------------
many_normal_plots(list(c(10, 3),
c(ybar, se),
post1)) +
theme(legend.position = "none") +
theme(text=element_text(size=18)) +
xlab(expression(mu)) +
annotate(geom = "text",
x = 10, y = 0.15,
label = "Prior", size = 7) +
annotate(geom = "text",
x = 35, y = 0.10,
label = "Likelihood", size = 7) +
annotate(geom = "text",
x = 25, y = 0.15,
label = "Posterior", size = 7) +
ylab("Density")
## --------------------------------------------------------
ggplot(data.frame(x = c(0, 20)), aes(x)) +
stat_function(fun = dnorm, size = 1.5,
args = list(mean = 10, sd = 3)) +
stat_function(fun = dcauchy, size = 1.5,
args = list(location = 10, scale = 2),
linetype = "dashed") +
xlab(expression(mu)) +
annotate(geom = "text",
x = 5.5, y = 0.10,
label = "Normal", size = 7) +
annotate(geom = "text",
x = 13.5, y = 0.14,
label = "Cauchy", size = 7) +
theme(text=element_text(size=18))
# Section 9.2 Markov Chains
## --------------------------------------------------------
p <- c(0, 0, 1, 0, 0, 0)
P <- matrix(c(.5, .5, 0, 0, 0, 0,
.25, .5, .25, 0, 0, 0,
0, .25, .5, .25, 0, 0,
0, 0, .25, .5, .25, 0,
0, 0, 0, .25, .5, .25,
0, 0, 0, 0, .5, .5),
nrow=6, ncol=6, byrow=TRUE)
## --------------------------------------------------------
print(p %*% P, digits = 5)
## --------------------------------------------------------
print(p %*% P %*% P %*% P %*% P, digits = 5)
## --------------------------------------------------------
Pm <- diag(rep(1, 6))
for(j in 1:100){
Pm <- Pm %*% P
}
print(Pm, digits = 5)
## --------------------------------------------------------
set.seed(123)
s <- vector("numeric", 10000)
s[1] <- 3
for (j in 2:10000){
s[j] <- sample(1:6, size=1, prob=P[s[j - 1], ])
}
S <- data.frame(Iteration = 1:10000,
Location = s)
## --------------------------------------------------------
S %>% mutate(L1 = (Location == 1),
L2 = (Location == 2),
L3 = (Location == 3),
L4 = (Location == 4),
L5 = (Location == 5),
L6 = (Location == 6)) %>%
mutate(Proportion_1 = cumsum(L1) / Iteration,
Proportion_2 = cumsum(L2) / Iteration,
Proportion_3 = cumsum(L3) / Iteration,
Proportion_4 = cumsum(L4) / Iteration,
Proportion_5 = cumsum(L5) / Iteration,
Proportion_6 = cumsum(L6) / Iteration) %>%
select(Iteration, Proportion_1, Proportion_2, Proportion_3,
Proportion_4, Proportion_5, Proportion_6) -> S1
gather(S1, Outcome, Probability, -Iteration) -> S2
ggplot(S2, aes(Iteration, Probability)) +
geom_line() +
facet_wrap(~ Outcome, ncol = 3) +
ylim(0, .4) +
ylab("Relative Frequency") +
theme(text=element_text(size=18)) +
scale_x_continuous(breaks = c(0, 3000, 6000, 9000))
## --------------------------------------------------------
w <- matrix(c(.1,.2,.2,.2,.2,.1), nrow=1, ncol=6)
w %*% P
# Section 9.3 The Metropolis Algorithm
## --------------------------------------------------------
pd <- function(x){
values <- c(5, 10, 4, 4, 20, 20, 12, 5)
ifelse(x %in% 1:length(values), values[x], 0)
}
prob_dist <- data.frame(x = 1:8,
prob = pd(1:8))
## --------------------------------------------------------
### TO UPDATE
prob_plot(prob_dist, Color = crcblue) +
theme(text=element_text(size=18)) +
ylab("Probability")
## --------------------------------------------------------
random_walk <- function(pd, start, num_steps){
y <- rep(0, num_steps)
current <- start
for (j in 1:num_steps){
candidate <- current + sample(c(-1, 1), 1)
prob <- pd(candidate) / pd(current)
if (runif(1) < prob) current <- candidate
y[j] <- current
}
return(y)
}
## --------------------------------------------------------
set.seed(123)
out <- random_walk(pd, 4, 10000)
data.frame(out) %>% group_by(out) %>%
summarize(N = n(), Prob = N / 10000) -> S
## --------------------------------------------------------
prob_dist %>% mutate(Prob = prob / sum(prob)) ->
prob_dist
df <- rbind(data.frame(x = prob_dist$x,
Prob = prob_dist$Prob,
Type = "Actual"),
data.frame(x = S$out, Prob = S$Prob,
Type = "Simulated")
)
ggplot(df, aes(x, Prob, fill=Type)) +
geom_bar(stat = "identity", color = "black",
position = position_dodge()) +
ylab("Probability") +
scale_fill_manual(values=c("gray", crcblue)) +
theme(text=element_text(size=18))
## --------------------------------------------------------
## Figure 8.6
## --------------------------------------------------------
metropolis <- function(logpost, current, C, iter, ...){
S <- rep(0, iter)
n_accept <- 0
for(j in 1:iter){
candidate <- runif(1, min=current - C,
max=current + C)
prob <- exp(logpost(candidate, ...) -
logpost(current, ...))
accept <- ifelse(runif(1) < prob, "yes", "no")
current <- ifelse(accept == "yes",
candidate, current)
S[j] <- current
n_accept <- n_accept + (accept == "yes")
}
list(S=S, accept_rate=n_accept / iter)
}
# Section 9.4 Cauchy-Normal Problem
## --------------------------------------------------------
lpost <- function(theta, s){
dcauchy(theta, s$loc, s$scale, log = TRUE) +
dnorm(s$ybar, theta, s$se, log = TRUE)
}
## --------------------------------------------------------
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
s <- list(loc = 10, scale = 2,
ybar = mean(data$JAN),
se = sd(data$JAN) / sqrt(20))
## --------------------------------------------------------
out <- metropolis(lpost, 5, 20, 10000, s)
out$accept_rate
## --------------------------------------------------------
manyC <- c(0.3, 3, 30, 200)
M <- NULL
set.seed(1223)
for(j in 1:4){
out <- metropolis(lpost, 0, manyC[j], 5000, s)
M <- rbind(M, data.frame(X = out$S,
Iteration = 1:5000,
C = paste("C = ",manyC[j],
", Accept = ", out$accept_rate)))
}
## --------------------------------------------------------
ggplot(M, aes(Iteration, X)) +
geom_line(color = crcblue) +
facet_wrap(~ C, nrow=2) +
theme(text=element_text(size=18)) +
scale_x_continuous(breaks = c(0, 2000, 4000))
## --------------------------------------------------------
mu <- seq(0, 40, length.out = 200)
prior <- dcauchy(mu, 10, 2)
likelihood <- dnorm(mu, s$ybar, s$se)
posterior <- density(out$S, bw = 0.6)
P1 <- data.frame(MU = mu,
Density = prior,
Type = "Prior")
P2 <- data.frame(MU = mu,
Density = likelihood,
Type = "Likelihood")
P3 <- data.frame(MU = posterior$x,
Density = posterior$y,
Type = "Posterior")
P <- rbind(P1, P2, P3)
ggplot(P, aes(MU, Density, linetype = Type)) +
geom_line(size = 1.3, color = crcblue) +
xlab(expression(mu)) +
annotate(geom = "text",
x = 4.5, y = 0.10,
label = "Prior", size = 7) +
annotate(geom = "text",
x = 35, y = 0.10,
label = "Likelihood", size = 7) +
annotate(geom = "text",
x = 18, y = 0.10,
label = "Posterior", size = 7) +
theme(text=element_text(size=18)) +
theme(legend.position = "none")
# Section 9.5 Gibbs Sampling
## --------------------------------------------------------
p <- matrix(c(4, 3, 2, 1,
3, 4, 3, 2,
2, 3, 4, 3,
1, 2, 3, 4) / 40, 4, 4, byrow = TRUE)
dimnames(p)[[1]] <- 1:4
dimnames(p)[[2]] <- 1:4
p
## --------------------------------------------------------
p[, 1]
## --------------------------------------------------------
p[2, ]
## --------------------------------------------------------
gibbs_discrete <- function(p, i = 1, iter = 1000){
x <- matrix(0, iter, 2)
nX <- dim(p)[1]
nY <- dim(p)[2]
for(k in 1:iter){
j <- sample(1:nY, 1, prob = p[i, ])
i <- sample(1:nX, 1, prob = p[, j])
x[k, ] <- c(i, j)
}
x
}
## --------------------------------------------------------
sp <- data.frame(gibbs_discrete(p))
names(sp) <- c("X", "Y")
table(sp) / 1000
## --------------------------------------------------------
gibbs_betabin <- function(n, a, b, p = 0.5, iter = 1000){
x <- matrix(0, iter, 2)
for(k in 1:iter){
y <- rbinom(1, size = n, prob = p )
p <- rbeta(1, y + a, n - y + b )
x[k, ] <- c(y, p)
}
x
}
## --------------------------------------------------------
set.seed(123)
sp <- data.frame(gibbs_betabin(20, 5, 5))
ggplot(data.frame(Y=sp$X1), aes(Y)) +
geom_bar(width=0.5, fill=crcblue) +
ylab("Frequency") +
theme(text=element_text(size=18))
## --------------------------------------------------------
gibbs_normal <- function(s, phi = 0.002, iter = 1000){
ybar <- mean(s$y)
n <- length(s$y)
mu0 <- s$mu0
phi0 <- s$phi0
a <- s$a
b <- s$b
x <- matrix(0, iter, 2)
for(k in 1:iter){
mun <- (phi0 * mu0 + n * phi * ybar) /
(phi0 + n * phi)
sigman <- sqrt(1 / (phi0 + n * phi))
mu <- rnorm(1, mean = mun, sd = sigman)
an <- n / 2 + a
bn <- sum((s$y - mu) ^ 2) / 2 + b
phi <- rgamma(1, shape = an, rate = bn)
x[k, ] <- c(mu, phi)
}
x
}
## --------------------------------------------------------
s <- list(y = data$JAN, mu0 = 10, phi0 = 1/3^2, a = 1, b = 1)
## --------------------------------------------------------
set.seed(123)
out <- gibbs_normal(s, iter=10000)
df <- data.frame(out)
names(df) <- c("mean", "precision")
df$standard_deviation <- sqrt(1 / df$precision)
## --------------------------------------------------------
ggplot(df, aes(mean, standard_deviation)) +
geom_point(size = 0.2, color = crcblue) +
theme(text=element_text(size=18)) +
xlab(expression(mu)) +
ylab(expression(sigma))
# Section 9.6 MCMC Inputs and Diagnostics
## --------------------------------------------------------
lpost <- function(theta, s){
dcauchy(theta, s$loc, s$scale, log = TRUE) +
dnorm(s$ybar, theta, s$se, log = TRUE)
}
buffalo <- read_csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
s <- list(loc = 10, scale = 2,
ybar = mean(data$JAN),
se = sd(data$JAN) / sqrt(20))
## --------------------------------------------------------
buffalo_metrop <- metropolis(lpost, 10,
20, 5000, s)
buffalo_metrop$accept_rate
## --------------------------------------------------------
df <- data.frame(mean = buffalo_metrop$S)
df$Iteration <- 1:5000
ggplot(df, aes(Iteration, mean)) +
geom_line(color = crcblue) +
increasefont() +
ylab(expression(mu))
## --------------------------------------------------------
df_mcmc <- mcmc(df)
ac_data <- data.frame(Lag = 0:20,
Autocorrelation =
autocorr(df_mcmc[, "mean"],
lags = 0:20)[,,1])
ggplot(ac_data, aes(Lag, Autocorrelation)) +
geom_col(fill = crcblue, width = 0.5) +
theme(text=element_text(size=18)) +
ylim(-1, 1) +
geom_hline(yintercept = 0)
# Section 9.7 Using JAGS
## --------------------------------------------------------
modelString = "
model{
## sampling
for (i in 1:N) {
y[i] ~ dnorm(mu, phi)
}
## priors
mu ~ dnorm(mu0, phi0)
phi ~ dgamma(a, b)
sigma <- sqrt(pow(phi, -1))
}
"
## --------------------------------------------------------
buffalo <- read.csv("../data/buffalo_snowfall.csv")
data <- buffalo[59:78, c("SEASON", "JAN")]
y <- data$JAN
N <- length(y)
the_data <- list("y" = y, "N" = N,
"mu0"=10, "phi0"=1/3^2,
"a"=1,"b"=1)
## --------------------------------------------------------
initsfunction <- function(chain){
.RNG.seed <- c(1,2)[chain]
.RNG.name <- c("base::Super-Duper",
"base::Wichmann-Hill")[chain]
return(list(.RNG.seed=.RNG.seed,
.RNG.name=.RNG.name))
}
## --------------------------------------------------------
posterior <- run.jags(modelString,
n.chains = 1,
data = the_data,
monitor = c("mu", "sigma"),
adapt = 1000,
burnin = 5000,
sample = 5000,
inits = initsfunction)
## --------------------------------------------------------
plot(posterior, vars = "mu")
## --------------------------------------------------------
plot(posterior, vars = "sigma")
## --------------------------------------------------------
print(posterior, digits = 3)
## --------------------------------------------------------
InitialValues <- list(
list(mu = 2, phi = 1 / 4),
list(mu = 30, phi = 1 / 900)
)
## --------------------------------------------------------
posterior <- run.jags(modelString,
n.chains = 2,
data = the_data,
monitor = c("mu", "sigma"),
adapt = 1000,
burnin = 5000,
sample = 5000,
inits = InitialValues)
## --------------------------------------------------------
summary(posterior$mcmc[[1]], digits = 3)
## --------------------------------------------------------
summary(posterior$mcmc[[2]], digits = 3)
## --------------------------------------------------------
post <- data.frame(posterior$mcmc[[1]])
## --------------------------------------------------------
postpred_sim <- function(j){
rnorm(20, mean = post[j, "mu"],
sd = post[j, "sigma"])
}
## --------------------------------------------------------
set.seed(123)
print(postpred_sim(1), digits = 3)
## --------------------------------------------------------
set.seed(123)
ypred <- t(sapply(1:5000, postpred_sim))
## --------------------------------------------------------
df <- NULL
for(j in 1:8){
dfnew <- data.frame(Type = paste("Sample", j), Snowfall = ypred[j, ])
df <- rbind(df, dfnew)
}
df <- rbind(df, data.frame(Type = "Observed", Snowfall = y))
## --------------------------------------------------------
ggplot(df, aes(Snowfall)) +
geom_histogram(bins = 10, fill = crcblue, color = "white") +
facet_wrap(~ Type, ncol = 3) +
theme(text=element_text(size=18))
## --------------------------------------------------------
postpred_max <- apply(ypred, 1, max)
## --------------------------------------------------------
ggplot(data.frame(Maximum = postpred_max), aes(Maximum)) +
geom_histogram(bins = 20,
fill = crcblue,
color = "white") +
geom_vline(xintercept = max(y), size = 1.5) +
annotate(geom = "text", x = 78, y = 750, label="Observed
Maximum", size = 6) +
theme(text=element_text(size=18))
## --------------------------------------------------------
modelString = "
model{
## sampling
yF ~ dbin(pF, nF)
yM ~ dbin(pM, nM)
logit(pF) <- theta - lambda / 2
logit(pM) <- theta + lambda / 2
## priors
theta ~ dnorm(mu0, phi0)
lambda ~ dnorm(0, phi)
}
"
## --------------------------------------------------------
the_data <- list("yF" = 75, "nF" = 151,
"yM" = 39, "nM" = 93,
"mu0" = 0, "phi0" = 0.001, "phi" = 2)
## --------------------------------------------------------
initsfunction <- function(chain){
.RNG.seed <- c(1,2)[chain]
.RNG.name <- c("base::Super-Duper",
"base::Wichmann-Hill")[chain]
return(list(.RNG.seed=.RNG.seed,
.RNG.name=.RNG.name))
}
## --------------------------------------------------------
posterior <- run.jags(modelString,
data = the_data,
n.chains = 1,
monitor = c("pF", "pM", "lambda"),
adapt = 1000,
burnin = 5000,
sample = 5000)
## --------------------------------------------------------
post <- data.frame(posterior$mcmc[[1]])
## --------------------------------------------------------
ggplot(post, aes(lambda)) +
geom_density(size = 1.5, color = crcblue) +
geom_vline(xintercept = 0, size = 1.5) +
theme(text=element_text(size=18)) +
xlab(expression(lambda))
## --------------------------------------------------------
post %>%
summarize(Prob = mean(lambda < 0))
|
library(FARSAssignment)
context("functions")
test_that("fars_read returns a data frame", {
filename <- system.file("extdata", "accident_2013.csv.bz2", package = "FARSAssignment")
df <- fars_read(filename)
expect_is(df, "data.frame")
})
test_that("fars_read generates an error if the file doesn't exist", {
expect_error(fars_read("bogus"), "file 'bogus' does not exist")
})
test_that("make_filename returns a proper filename", {
expect_match(make_filename(2015), "accident_2015.csv.bz2")
})
test_that("fars_read_years returns a list of data frames", {
result <- fars_read_years(2013:2014)
expect_is(result, "list")
expect_is(result[[1]], "data.frame")
})
test_that("fars_read_years generates a warning for invalid year", {
expect_warning(fars_read_years(2000), "invalid year: 2000")
})
test_that("fars_summarize_years returns a data frame", {
df <- fars_summarize_years(2013:2014)
expect_is(df, "data.frame")
})
# TODO: figure out how to test fars_map_state
| /tests/testthat/test_fars_functions.R | permissive | cdv04/FARSAssignment | R | false | false | 1,008 | r | library(FARSAssignment)
context("functions")
test_that("fars_read returns a data frame", {
filename <- system.file("extdata", "accident_2013.csv.bz2", package = "FARSAssignment")
df <- fars_read(filename)
expect_is(df, "data.frame")
})
test_that("fars_read generates an error if the file doesn't exist", {
expect_error(fars_read("bogus"), "file 'bogus' does not exist")
})
test_that("make_filename returns a proper filename", {
expect_match(make_filename(2015), "accident_2015.csv.bz2")
})
test_that("fars_read_years returns a list of data frames", {
result <- fars_read_years(2013:2014)
expect_is(result, "list")
expect_is(result[[1]], "data.frame")
})
test_that("fars_read_years generates a warning for invalid year", {
expect_warning(fars_read_years(2000), "invalid year: 2000")
})
test_that("fars_summarize_years returns a data frame", {
df <- fars_summarize_years(2013:2014)
expect_is(df, "data.frame")
})
# TODO: figure out how to test fars_map_state
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outcome_multi.R
\name{fit_gsynth_multi}
\alias{fit_gsynth_multi}
\title{Use gsynth to fit factor model with}
\usage{
fit_gsynth_multi(X, trt, r = 0, r.end = 5, force = 3, CV = 1)
}
\arguments{
\item{X}{Matrix of outcomes}
\item{trt}{Vector of treatment status for each unit}
\item{r}{Number of factors to use (or start with if CV==1)}
\item{r.end}{Max number of factors to consider if CV==1}
\item{CV}{Whether to do CV (0=no CV, 1=yes CV)}
\item{force=c(0, 1, 2, 3)}{Fixed effects (0=none, 1=unit, 2=time, 3=two-way)}
}
\value{
\itemize{
\item{y0hat }{Predicted outcome under control}
\item{params }{Regression parameters}}
}
\description{
Use gsynth to fit factor model with
}
| /man/fit_gsynth_multi.Rd | permissive | jgrennan/augsynth | R | false | true | 781 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/outcome_multi.R
\name{fit_gsynth_multi}
\alias{fit_gsynth_multi}
\title{Use gsynth to fit factor model with}
\usage{
fit_gsynth_multi(X, trt, r = 0, r.end = 5, force = 3, CV = 1)
}
\arguments{
\item{X}{Matrix of outcomes}
\item{trt}{Vector of treatment status for each unit}
\item{r}{Number of factors to use (or start with if CV==1)}
\item{r.end}{Max number of factors to consider if CV==1}
\item{CV}{Whether to do CV (0=no CV, 1=yes CV)}
\item{force=c(0, 1, 2, 3)}{Fixed effects (0=none, 1=unit, 2=time, 3=two-way)}
}
\value{
\itemize{
\item{y0hat }{Predicted outcome under control}
\item{params }{Regression parameters}}
}
\description{
Use gsynth to fit factor model with
}
|
library(tidytree)
### Name: child
### Title: child
### Aliases: child child.tbl_tree
### ** Examples
library(ape)
tree <- rtree(4)
x <- as_tibble(tree)
child(x, 4)
| /data/genthat_extracted_code/tidytree/examples/child.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 171 | r | library(tidytree)
### Name: child
### Title: child
### Aliases: child child.tbl_tree
### ** Examples
library(ape)
tree <- rtree(4)
x <- as_tibble(tree)
child(x, 4)
|
#' Compute the objective function on a grid of params and show simulated moments
#'
#' Will vary each parameter independently in a chosen range and report the value
#' of the resulting simulated moments in relation to the moments in the data.
#' Can be used to construct a heuristic identification argument. Basically it can
#' be seen which parameter affects which dimension of the model output, i.e. which
#' simulated moment.
#' @param mcf object of class mopt
#' @param ns number of points in each dimension to evaluate
#' @param pad from bounds of parameter ranges. e.g. p in [0,1], avoid 0 and 1 with pad>0.
#' @param file \code{/path/to/your/file}
#' @return list with info and a data.frame \code{slices} summarizing all information of the exercise: parameter values,
#' simulated moments, data moments. Input to \code{\link{plot.slices(slices)}}.
#' @export
#' @example examples/example-slices.r
compute.slices <- function(mcf,ns=30,pad=0.1,file="est.slices.RData") {
# storing the time
# =============================
last_time = as.numeric(proc.time()[3])
p2 = mcf$initial_value #predict(mcf,'p.all')
cat('evaluate objective function at starting point\n')
maxres = MOPT_OBJ_FUNC(p2)
cat(sprintf('%d nodes, %d parameters, %d points per grid \n',mcf$N,length(mcf$params_to_sample),ns))
rr = data.frame()
pp = mcf$params_to_sample[1]
nn=1
for (pp in mcf$params_to_sample) {
# we create a range, let's span the entire domain
lb = mcf$pdesc[pp,'lb']
ub = mcf$pdesc[pp,'ub']
prange = seq( lb+(ub-lb)*pad/2, lb+(ub-lb)*(1-pad/2) ,l=ns)
# we put the values in a list of parameters
# TODO watch out as some of the cluster functions take vectors
# while others take lists.
ptmp =p2
ps = list()
j=0
for (val in prange) {
j=j+1
ptmp[[pp]] = val
ptmp$param_value = val
ptmp$chain=j
ps[[j]] = ptmp
}
cat('sending evaluations for ',pp,' in (', lb+(ub-lb)*pad/2,',',lb+(ub-lb)*(1-pad/2),')\n')
if (mcf$mode =='mpi'){
# rs = clusterApplyLB(cl=mcf$cl,x=ps,fun=mopt_obj_wrapper,objfunc=mcf$objfunc)
#} else {
# rs = mcf$mylbapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
#}
rs = mpi.parLapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
} else if (mcf$mode=='multicore') {
rs = mclapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc,mc.cores = mcf$N )
} else {
rs = lapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
}
rr1 = data.frame()
for ( jj in 1:length(rs) ) {
if (is.atomic(rs[[jj]])) next;
if (is.null(rs[[jj]])) next;
if (rs[[jj]]$status==-1) next;
sm = rs[[jj]]$sm
sm$names = paste('sm',sm$names,sep='.')
sm = cast(sm, ~names)[-1] #convert long to wide
# get everything but the submoments
rr1.tmp = data.frame( parvalue=prange[jj],
as.data.frame( ps[[jj]] ) ,
as.data.frame( rs[[jj]][setdiff(names(rs[[jj]]),c('sm','p','chain'))] ),
sm)
rr1 = rbind(rr1, rr1.tmp)
}
cat('got ', nrow(rr1), ' values\n')
if (nrow(rr1)>0) {
rr1$param = pp
rr = rbind(rr,rr1)
}
cat('done with ',pp,'(',nn,'/',length(mcf$params_to_sample),')\n')
nn = nn+1
}
save(rr,mcf,file=file)
res = list()
res$p.start = p2
res$v.start = maxres
res$slices = rr
return(res)
}
#' plot.slices
#'
#' generates plots for each moments/parameter combinations
#' depends on output of function compute.slices(), which saves
#' a dataframe with plotting data and the used mopt config into a
#' a file \code{est.slices.RData}
#' @param file path/to/est.slices.RData if not in getwd()
#' @param outpath path to directory where to save plots
#' @param type string indicating file type to save plot as. currently png and pdf only.
#' @export
plot.slices <- function(file=NULL,outpath='',type="png") {
# we want to keep all submoments, value, param and param_value
if (is.null(file)) {
load('est.slices.RData')
} else {
load(file)
}
rr$conv=as.numeric(rr$status)>0
rr.m = melt(rr,id=c('param','param_value','conv'))
rr.m = subset(rr.m,rr.m$variable=='value' | str_detect(rr.m$variable,'sm'))
rr.m$variable = gsub('sm.','',rr.m$variable)
rr.m$from = 'model'
rr.m$value = as.numeric(rr.m$value)
rr.m=data.table(rr.m)
# check if we have got the right number of moments in mcf as
# in rr.m
# this data.frame holds the initial values
# of the parameters
init.param.data = data.frame(value = unlist(mcf$initial_value) , param=names(mcf$initial_value))
# we subset it to the ones we sampled over
init.param.data = subset(init.param.data,param %in% unique(rr.m$param))
for (pp in unique(rr.m$variable)) {
if (pp == 'value') next;
gp <- ggplot(subset(rr.m,variable==pp & from=='model')) + geom_point(aes(x=param_value,y=value,color=conv),size=1) +
geom_line(aes(x=param_value,y=value,group='param'),size=0.3) +
geom_hline(aes(yintercept=value),data=subset(mcf$data.moments,moment==pp),linetype=2) +
geom_vline(aes(xintercept=value),data=init.param.data,linetype=2,color='red') +
facet_wrap(~param,scales='free_x',ncol=3) +
scale_y_continuous(paste('value of',pp))+ scale_x_continuous('varying parameter') + theme_bw()
#print(gp)
if (type=="png") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.png',sep=''),width=10.6, height=5.93)
if (type=="pdf") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.pdf',sep=''),width=10.6, height=5.93)
}
pp ='value'
# check if all are NA or Inf
if (rr.m[variable==pp,( all(!is.finite(value)) | all(is.na(value)) )] ) {
# do nothing
warning('ALL values in the objective function are NA or Inf!\n')
} else {
gp <- ggplot(subset(rr.m,variable==pp & from=='model')) +
geom_point(aes(x=param_value,y=value,color=conv),size=1) +
geom_line(aes(x=param_value,y=value,group='param'),size=0.3) +
geom_vline(aes(xintercept=value),data=init.param.data,linetype=2,color='red') +
facet_wrap(~param,scales='free_x',ncol=3) +
scale_y_continuous('objective function')+
scale_x_continuous('varying parameter') + theme_bw()
#print(gp)
if (type=="png") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.png',sep=''),width=10.6, height=5.93)
if (type=="pdf") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.pdf',sep=''),width=10.6, height=5.93)
}
}
#' Compute the objective function on a grid of params and show custom model output
#'
#' Essentially the same as \code{\link{compute.slices}}, but does not report simulated
#' moments but other model output. Useful for model output that is multidimensional.
#' It's a simplified version of \code{\link{compute.slices}} in that it does not further
#' process the model output: it return a list with nests "parameter name", "value of parameter",
#' "model output".
#' For example instead of reporting the mean of a certain statistic, this function can
#' return a matrix or a higher dimensional array. Say you want to return the life-cycle
#' profile of a certain model variable x. This will be a vector of length N, where N is
#' the number of periods in the model. The user has to design the MOPT_OBJ_FUN in such a way
#' that it returns the required output. There are 2 requirements for what \code{MOPT_OBJ_FUN} has to return.
#' First it has to be a list, second, the list needs components "status" (indicating whether a particular evaluation
#' is valid in some sense) and "output", which contains your custom model output.
#' @param mcf object of class mopt
#' @param ns number of points in each dimension to evaluate
#' @param pad from bounds of parameter ranges. e.g. p in [0,1], avoid 0 and 1 with pad>0.
#' @param file \code{/path/to/your/file.RData}
#' @return list by paramter name, parameter value index, containing the value of the parameter vector and a list \code{data} containing
#' your custom model output.
#' @export
#' @example examples/example-slices2.r
compute.slices2 <- function(mcf,ns=30,pad=0.1,file="est.slices.RData") {
# reading configuration
# =====================
pdesc = mcf$pdesc
p = mcf$initial_value
# storing the time
# =============================
last_time = as.numeric(proc.time()[3])
p2 = mcf$initial_value #predict(mcf,'p.all')
cat('evaluate objective function at starting point\n')
maxres = MOPT_OBJ_FUNC(p2)
cat(sprintf('%d nodes, %d parameters, %d points per grid \n',mcf$N,length(mcf$params_to_sample),ns))
rr = data.frame()
pp = mcf$params_to_sample[1]
nn=1
out <- vector("list",length(mcf$params_to_sample))
names(out) <- mcf$params_to_sample
for (pp in mcf$params_to_sample) {
# we create a range, let's span the entire domain
lb = mcf$pdesc[pp,'lb']
ub = mcf$pdesc[pp,'ub']
prange = seq( lb+(ub-lb)*pad/2, lb+(ub-lb)*(1-pad/2) ,l=ns)
# we put the values in a list of parameters
# TODO watch out as some of the cluster functions take vectors
# while others take lists.
ptmp =p2
ps = list()
j=0
for (val in prange) {
j=j+1
ptmp[[pp]] = val
ptmp$param_value = val
ptmp$chain=j
ps[[j]] = ptmp
}
cat('sending evaluations for ',pp,' in (', lb+(ub-lb)*pad/2,',',lb+(ub-lb)*(1-pad/2),')\n')
if (mcf$mode =='mpi'){
rs = clusterApplyLB(cl=mcf$cl,x=ps,fun=mopt_obj_wrapper_custom,objfunc=mcf$objfunc)
} else {
rs = mcf$mylbapply(ps,mopt_obj_wrapper_custom,objfunc=mcf$objfunc)
}
# get model output for each parameter value
out[[pp]] <- list()
for ( jj in 1:length(rs) ) {
if (is.null(rs[[jj]])) next;
if (rs[[jj]]$status==-1) next;
out[[pp]][[jj]] <- list()
out[[pp]][[jj]]$pars <- ps[[jj]]
out[[pp]][[jj]]$data <- rs[[jj]]$output
}
cat('got ', length(out[[pp]]), ' values\n')
cat('done with ',pp,'(',nn,'/',length(mcf$params_to_sample),')\n')
nn = nn+1
}
save(out,mcf,file=file)
return(out)
}
| /R/fun.slices.r | no_license | lionup/mopt | R | false | false | 10,038 | r |
#' Compute the objective function on a grid of params and show simulated moments
#'
#' Will vary each parameter independently in a chosen range and report the value
#' of the resulting simulated moments in relation to the moments in the data.
#' Can be used to construct a heuristic identification argument. Basically it can
#' be seen which parameter affects which dimension of the model output, i.e. which
#' simulated moment.
#' @param mcf object of class mopt
#' @param ns number of points in each dimension to evaluate
#' @param pad from bounds of parameter ranges. e.g. p in [0,1], avoid 0 and 1 with pad>0.
#' @param file \code{/path/to/your/file}
#' @return list with info and a data.frame \code{slices} summarizing all information of the exercise: parameter values,
#' simulated moments, data moments. Input to \code{\link{plot.slices(slices)}}.
#' @export
#' @example examples/example-slices.r
compute.slices <- function(mcf,ns=30,pad=0.1,file="est.slices.RData") {
# storing the time
# =============================
last_time = as.numeric(proc.time()[3])
p2 = mcf$initial_value #predict(mcf,'p.all')
cat('evaluate objective function at starting point\n')
maxres = MOPT_OBJ_FUNC(p2)
cat(sprintf('%d nodes, %d parameters, %d points per grid \n',mcf$N,length(mcf$params_to_sample),ns))
rr = data.frame()
pp = mcf$params_to_sample[1]
nn=1
for (pp in mcf$params_to_sample) {
# we create a range, let's span the entire domain
lb = mcf$pdesc[pp,'lb']
ub = mcf$pdesc[pp,'ub']
prange = seq( lb+(ub-lb)*pad/2, lb+(ub-lb)*(1-pad/2) ,l=ns)
# we put the values in a list of parameters
# TODO watch out as some of the cluster functions take vectors
# while others take lists.
ptmp =p2
ps = list()
j=0
for (val in prange) {
j=j+1
ptmp[[pp]] = val
ptmp$param_value = val
ptmp$chain=j
ps[[j]] = ptmp
}
cat('sending evaluations for ',pp,' in (', lb+(ub-lb)*pad/2,',',lb+(ub-lb)*(1-pad/2),')\n')
if (mcf$mode =='mpi'){
# rs = clusterApplyLB(cl=mcf$cl,x=ps,fun=mopt_obj_wrapper,objfunc=mcf$objfunc)
#} else {
# rs = mcf$mylbapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
#}
rs = mpi.parLapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
} else if (mcf$mode=='multicore') {
rs = mclapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc,mc.cores = mcf$N )
} else {
rs = lapply(ps,mopt_obj_wrapper,objfunc=mcf$objfunc)
}
rr1 = data.frame()
for ( jj in 1:length(rs) ) {
if (is.atomic(rs[[jj]])) next;
if (is.null(rs[[jj]])) next;
if (rs[[jj]]$status==-1) next;
sm = rs[[jj]]$sm
sm$names = paste('sm',sm$names,sep='.')
sm = cast(sm, ~names)[-1] #convert long to wide
# get everything but the submoments
rr1.tmp = data.frame( parvalue=prange[jj],
as.data.frame( ps[[jj]] ) ,
as.data.frame( rs[[jj]][setdiff(names(rs[[jj]]),c('sm','p','chain'))] ),
sm)
rr1 = rbind(rr1, rr1.tmp)
}
cat('got ', nrow(rr1), ' values\n')
if (nrow(rr1)>0) {
rr1$param = pp
rr = rbind(rr,rr1)
}
cat('done with ',pp,'(',nn,'/',length(mcf$params_to_sample),')\n')
nn = nn+1
}
save(rr,mcf,file=file)
res = list()
res$p.start = p2
res$v.start = maxres
res$slices = rr
return(res)
}
#' plot.slices
#'
#' generates plots for each moments/parameter combinations
#' depends on output of function compute.slices(), which saves
#' a dataframe with plotting data and the used mopt config into a
#' a file \code{est.slices.RData}
#' @param file path/to/est.slices.RData if not in getwd()
#' @param outpath path to directory where to save plots
#' @param type string indicating file type to save plot as. currently png and pdf only.
#' @export
plot.slices <- function(file=NULL,outpath='',type="png") {
# we want to keep all submoments, value, param and param_value
if (is.null(file)) {
load('est.slices.RData')
} else {
load(file)
}
rr$conv=as.numeric(rr$status)>0
rr.m = melt(rr,id=c('param','param_value','conv'))
rr.m = subset(rr.m,rr.m$variable=='value' | str_detect(rr.m$variable,'sm'))
rr.m$variable = gsub('sm.','',rr.m$variable)
rr.m$from = 'model'
rr.m$value = as.numeric(rr.m$value)
rr.m=data.table(rr.m)
# check if we have got the right number of moments in mcf as
# in rr.m
# this data.frame holds the initial values
# of the parameters
init.param.data = data.frame(value = unlist(mcf$initial_value) , param=names(mcf$initial_value))
# we subset it to the ones we sampled over
init.param.data = subset(init.param.data,param %in% unique(rr.m$param))
for (pp in unique(rr.m$variable)) {
if (pp == 'value') next;
gp <- ggplot(subset(rr.m,variable==pp & from=='model')) + geom_point(aes(x=param_value,y=value,color=conv),size=1) +
geom_line(aes(x=param_value,y=value,group='param'),size=0.3) +
geom_hline(aes(yintercept=value),data=subset(mcf$data.moments,moment==pp),linetype=2) +
geom_vline(aes(xintercept=value),data=init.param.data,linetype=2,color='red') +
facet_wrap(~param,scales='free_x',ncol=3) +
scale_y_continuous(paste('value of',pp))+ scale_x_continuous('varying parameter') + theme_bw()
#print(gp)
if (type=="png") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.png',sep=''),width=10.6, height=5.93)
if (type=="pdf") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.pdf',sep=''),width=10.6, height=5.93)
}
pp ='value'
# check if all are NA or Inf
if (rr.m[variable==pp,( all(!is.finite(value)) | all(is.na(value)) )] ) {
# do nothing
warning('ALL values in the objective function are NA or Inf!\n')
} else {
gp <- ggplot(subset(rr.m,variable==pp & from=='model')) +
geom_point(aes(x=param_value,y=value,color=conv),size=1) +
geom_line(aes(x=param_value,y=value,group='param'),size=0.3) +
geom_vline(aes(xintercept=value),data=init.param.data,linetype=2,color='red') +
facet_wrap(~param,scales='free_x',ncol=3) +
scale_y_continuous('objective function')+
scale_x_continuous('varying parameter') + theme_bw()
#print(gp)
if (type=="png") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.png',sep=''),width=10.6, height=5.93)
if (type=="pdf") ggsave(paste(outpath,'plot_ParamVsMoment_',pp,'.pdf',sep=''),width=10.6, height=5.93)
}
}
#' Compute the objective function on a grid of params and show custom model output
#'
#' Essentially the same as \code{\link{compute.slices}}, but does not report simulated
#' moments but other model output. Useful for model output that is multidimensional.
#' It's a simplified version of \code{\link{compute.slices}} in that it does not further
#' process the model output: it return a list with nests "parameter name", "value of parameter",
#' "model output".
#' For example instead of reporting the mean of a certain statistic, this function can
#' return a matrix or a higher dimensional array. Say you want to return the life-cycle
#' profile of a certain model variable x. This will be a vector of length N, where N is
#' the number of periods in the model. The user has to design the MOPT_OBJ_FUN in such a way
#' that it returns the required output. There are 2 requirements for what \code{MOPT_OBJ_FUN} has to return.
#' First it has to be a list, second, the list needs components "status" (indicating whether a particular evaluation
#' is valid in some sense) and "output", which contains your custom model output.
#' @param mcf object of class mopt
#' @param ns number of points in each dimension to evaluate
#' @param pad from bounds of parameter ranges. e.g. p in [0,1], avoid 0 and 1 with pad>0.
#' @param file \code{/path/to/your/file.RData}
#' @return list by paramter name, parameter value index, containing the value of the parameter vector and a list \code{data} containing
#' your custom model output.
#' @export
#' @example examples/example-slices2.r
compute.slices2 <- function(mcf,ns=30,pad=0.1,file="est.slices.RData") {
# reading configuration
# =====================
pdesc = mcf$pdesc
p = mcf$initial_value
# storing the time
# =============================
last_time = as.numeric(proc.time()[3])
p2 = mcf$initial_value #predict(mcf,'p.all')
cat('evaluate objective function at starting point\n')
maxres = MOPT_OBJ_FUNC(p2)
cat(sprintf('%d nodes, %d parameters, %d points per grid \n',mcf$N,length(mcf$params_to_sample),ns))
rr = data.frame()
pp = mcf$params_to_sample[1]
nn=1
out <- vector("list",length(mcf$params_to_sample))
names(out) <- mcf$params_to_sample
for (pp in mcf$params_to_sample) {
# we create a range, let's span the entire domain
lb = mcf$pdesc[pp,'lb']
ub = mcf$pdesc[pp,'ub']
prange = seq( lb+(ub-lb)*pad/2, lb+(ub-lb)*(1-pad/2) ,l=ns)
# we put the values in a list of parameters
# TODO watch out as some of the cluster functions take vectors
# while others take lists.
ptmp =p2
ps = list()
j=0
for (val in prange) {
j=j+1
ptmp[[pp]] = val
ptmp$param_value = val
ptmp$chain=j
ps[[j]] = ptmp
}
cat('sending evaluations for ',pp,' in (', lb+(ub-lb)*pad/2,',',lb+(ub-lb)*(1-pad/2),')\n')
if (mcf$mode =='mpi'){
rs = clusterApplyLB(cl=mcf$cl,x=ps,fun=mopt_obj_wrapper_custom,objfunc=mcf$objfunc)
} else {
rs = mcf$mylbapply(ps,mopt_obj_wrapper_custom,objfunc=mcf$objfunc)
}
# get model output for each parameter value
out[[pp]] <- list()
for ( jj in 1:length(rs) ) {
if (is.null(rs[[jj]])) next;
if (rs[[jj]]$status==-1) next;
out[[pp]][[jj]] <- list()
out[[pp]][[jj]]$pars <- ps[[jj]]
out[[pp]][[jj]]$data <- rs[[jj]]$output
}
cat('got ', length(out[[pp]]), ' values\n')
cat('done with ',pp,'(',nn,'/',length(mcf$params_to_sample),')\n')
nn = nn+1
}
save(out,mcf,file=file)
return(out)
}
|
library(whisker)
whisker_template <- readLines("tools/gallery-template.Rmd")
code_partial <- readLines("tools/code-partial.Rmd")
gallery_dirs <- list.dirs("vignettes/gallery", recursive = FALSE)
gallery_dirs_list <- iteratelist(basename(gallery_dirs), value="dir")
create_thumbnail <- function(data) {
current_dir <- getwd()
on.exit(setwd(current_dir))
setwd(file.path("vignettes/gallery", data$name))
render_script <- paste(data$name, ".js", sep = "")
render_command <- paste(
"r2d3::r2d3(",
data$preview_args,
", script = \"",
render_script,
"\", sizing = htmlwidgets::sizingPolicy(browser.fill = TRUE, padding = 0)",
")",
sep = ""
)
renderer <- parse(text = render_command)
widget <- eval(renderer)
render_dir <- tempfile()
dir.create(render_dir)
render_file <- file.path(normalizePath(render_dir), "index.html")
htmlwidgets::saveWidget(widget, render_file)
webshot_url <- paste("file://", render_file, sep = "")
webshot_target <- paste("../../images/", data$name, "_thumbnail.png", sep = "")
webshot::webshot(
webshot_url,
webshot_target,
vwidth = 692,
vheight = 474,
delay = 3
)
}
for (dir in gallery_dirs) {
message("Processing: ", basename(dir))
# base name
name <- basename(dir)
# preview args
d3_script <- file.path(dir, paste0(name, ".js"))
script_preview <- readLines(d3_script)[[1]]
preview_args <- strsplit(script_preview, "!preview\\s+r2d3\\s+")[[1]][[2]]
# code files
list_files <- function(lang, mask = NULL) {
files <- list.files(dir, pattern = glob2rx(paste0("*.", lang)))
if (!is.null(mask))
files <- files[!grepl(mask, files)]
main_js <- paste0(name, ".js")
if (main_js %in% files)
files <- unique(c(main_js, files))
lapply(files, function(file) list(lang = lang, file = file))
}
code_files <- c(
list_files("js", mask = glob2rx("*.min.js")),
list_files("css")
)
# prime data
data <- list(
name = name,
dirs = gallery_dirs_list,
preview_args = preview_args,
code_files = code_files
)
# render template
gallery_rmd <- file.path(dir, "index.Rmd")
output <- whisker.render(whisker_template, data = data, partials = list(
code_partial = code_partial
))
cat(output, file = gallery_rmd)
# create thumbnail
create_thumbnail(data)
}
| /tools/gallery.R | permissive | thiyangt/r2d3 | R | false | false | 2,380 | r |
library(whisker)
whisker_template <- readLines("tools/gallery-template.Rmd")
code_partial <- readLines("tools/code-partial.Rmd")
gallery_dirs <- list.dirs("vignettes/gallery", recursive = FALSE)
gallery_dirs_list <- iteratelist(basename(gallery_dirs), value="dir")
create_thumbnail <- function(data) {
current_dir <- getwd()
on.exit(setwd(current_dir))
setwd(file.path("vignettes/gallery", data$name))
render_script <- paste(data$name, ".js", sep = "")
render_command <- paste(
"r2d3::r2d3(",
data$preview_args,
", script = \"",
render_script,
"\", sizing = htmlwidgets::sizingPolicy(browser.fill = TRUE, padding = 0)",
")",
sep = ""
)
renderer <- parse(text = render_command)
widget <- eval(renderer)
render_dir <- tempfile()
dir.create(render_dir)
render_file <- file.path(normalizePath(render_dir), "index.html")
htmlwidgets::saveWidget(widget, render_file)
webshot_url <- paste("file://", render_file, sep = "")
webshot_target <- paste("../../images/", data$name, "_thumbnail.png", sep = "")
webshot::webshot(
webshot_url,
webshot_target,
vwidth = 692,
vheight = 474,
delay = 3
)
}
for (dir in gallery_dirs) {
message("Processing: ", basename(dir))
# base name
name <- basename(dir)
# preview args
d3_script <- file.path(dir, paste0(name, ".js"))
script_preview <- readLines(d3_script)[[1]]
preview_args <- strsplit(script_preview, "!preview\\s+r2d3\\s+")[[1]][[2]]
# code files
list_files <- function(lang, mask = NULL) {
files <- list.files(dir, pattern = glob2rx(paste0("*.", lang)))
if (!is.null(mask))
files <- files[!grepl(mask, files)]
main_js <- paste0(name, ".js")
if (main_js %in% files)
files <- unique(c(main_js, files))
lapply(files, function(file) list(lang = lang, file = file))
}
code_files <- c(
list_files("js", mask = glob2rx("*.min.js")),
list_files("css")
)
# prime data
data <- list(
name = name,
dirs = gallery_dirs_list,
preview_args = preview_args,
code_files = code_files
)
# render template
gallery_rmd <- file.path(dir, "index.Rmd")
output <- whisker.render(whisker_template, data = data, partials = list(
code_partial = code_partial
))
cat(output, file = gallery_rmd)
# create thumbnail
create_thumbnail(data)
}
|
## makeCacheMatrix creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(mat) {
x <<- mat
i<<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<-inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# cacheSolve computes the inverse of the matrix returned by
# makeCacheMatrix function above. If the inverse has already
# been calculated and the matrix is same, then the cachesolve
# should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inverse<- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
| /cachematrix.R | no_license | Prashantchand/ProgrammingAssignment2 | R | false | false | 835 | r | ## makeCacheMatrix creates a matrix object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(mat) {
x <<- mat
i<<- NULL
}
get <- function() x
setinverse <- function(inverse) i <<-inverse
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# cacheSolve computes the inverse of the matrix returned by
# makeCacheMatrix function above. If the inverse has already
# been calculated and the matrix is same, then the cachesolve
# should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
inverse<- x$getinverse()
if(!is.null(inverse)) {
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data)
x$setinverse(inverse)
inverse
}
|
#!/usr/bin/env Rscript
# Author: Zaka Yuen , JCSMR, ANU
# Created on July 2020
# Modified on Mar 2021
# This script is to:
# -create a per-read prediction output that is formatted to be used in METEORE for the subsequent consensus prediction
library(dplyr)
args = commandArgs(trailingOnly=TRUE)
df <- read.table(args[1], header = TRUE, sep = "\t", stringsAsFactors = TRUE)
df <- df %>% select(1,2,4,3,5,6)
df$Score <- df$mod_log_prob - df$can_log_prob
df <- df %>% select(1,2,3,4,6)
colnames(df) <- c("ID", "Chr", "Pos", "Strand", "Score")
df <- df %>% mutate(Strand = if_else(Strand == 1, "+", "-"))
df[,"Pos"] <- df[,"Pos"]+1
df$Pos <- format(df$Pos,scientific=FALSE)
write.table(df, file=args[2], sep = "\t", row.names = FALSE, col.names = TRUE, quote = FALSE)
| /script/format_megalodon.R | permissive | mmmads/METEORE | R | false | false | 769 | r | #!/usr/bin/env Rscript
# Author: Zaka Yuen , JCSMR, ANU
# Created on July 2020
# Modified on Mar 2021
# This script is to:
# -create a per-read prediction output that is formatted to be used in METEORE for the subsequent consensus prediction
library(dplyr)
args = commandArgs(trailingOnly=TRUE)
df <- read.table(args[1], header = TRUE, sep = "\t", stringsAsFactors = TRUE)
df <- df %>% select(1,2,4,3,5,6)
df$Score <- df$mod_log_prob - df$can_log_prob
df <- df %>% select(1,2,3,4,6)
colnames(df) <- c("ID", "Chr", "Pos", "Strand", "Score")
df <- df %>% mutate(Strand = if_else(Strand == 1, "+", "-"))
df[,"Pos"] <- df[,"Pos"]+1
df$Pos <- format(df$Pos,scientific=FALSE)
write.table(df, file=args[2], sep = "\t", row.names = FALSE, col.names = TRUE, quote = FALSE)
|
getAbundanceThreshold <- function(sites, numGenes){
orderedAbundances <- sites[order(-sites$estAbundProp)]
#'unique' functionally removes anything that's duplicated, thus preserving order
abundCutoff <- orderedAbundances$nearest_refSeq_gene==unique(orderedAbundances$nearest_refSeq_gene)[numGenes]
abundCutoff <- orderedAbundances[which(abundCutoff)[1]]$estAbundProp
abundCutoff
}
filterLowAbund <- function(sites, abundCutoff){
sites$maskedRefGeneName <- ifelse(sites$estAbundProp > abundCutoff,
sites$nearest_refSeq_gene,
"LowAbund")
sites
}
getAbundanceSums <- function(sites, splitBy){
splitBy <- mcols(sites)[,splitBy]
splitSites <- split(sites, apply(as.data.frame(splitBy), 1, paste, collapse=""))
do.call(rbind, lapply(splitSites, function(sites){
res <- aggregate(estAbundProp~maskedRefGeneName, mcols(sites), sum)
res$Timepoint <- sites[1]$Timepoint
res$CellType <- sites[1]$CellType
res
}))
}
| /abundanceFilteringUtils.R | no_license | esherm/geneTherapyPatientReportMaker | R | false | false | 1,026 | r | getAbundanceThreshold <- function(sites, numGenes){
orderedAbundances <- sites[order(-sites$estAbundProp)]
#'unique' functionally removes anything that's duplicated, thus preserving order
abundCutoff <- orderedAbundances$nearest_refSeq_gene==unique(orderedAbundances$nearest_refSeq_gene)[numGenes]
abundCutoff <- orderedAbundances[which(abundCutoff)[1]]$estAbundProp
abundCutoff
}
filterLowAbund <- function(sites, abundCutoff){
sites$maskedRefGeneName <- ifelse(sites$estAbundProp > abundCutoff,
sites$nearest_refSeq_gene,
"LowAbund")
sites
}
getAbundanceSums <- function(sites, splitBy){
splitBy <- mcols(sites)[,splitBy]
splitSites <- split(sites, apply(as.data.frame(splitBy), 1, paste, collapse=""))
do.call(rbind, lapply(splitSites, function(sites){
res <- aggregate(estAbundProp~maskedRefGeneName, mcols(sites), sum)
res$Timepoint <- sites[1]$Timepoint
res$CellType <- sites[1]$CellType
res
}))
}
|
# Three functions to deal with detrending. The estimation of the
# trend is done on monthly quantities. After detrending, the load
# is leveled to the first historical month (not to the last). It
# should not matter because it is always done in pairs remove/add.trend.
#
# Given a dataframe with columns:
# data = [year month day hour x] column "hour" is optional
#
# Written by Adrian Dragulescu on 13-Sep-2004
#---------------------------------------------------------
remove.trend <- function(data, save, options){
# Works on the historical data
R <- nrow(data)
aux1 <- paste(data$year[1],data$month[1],data$day[1],sep="-")
aux2 <- paste(data$year[R],data$month[R],data$day[R],sep="-")
rangeMths <- seq(as.Date(aux1), as.Date(aux2), "month")
aux <- as.Date(paste(data$year,data$month,"1", sep="-"))
histMths <- unique(aux)
ind <- which(is.element(rangeMths,histMths)==FALSE)
rangeInd <- 1:length(rangeMths)
if (length(ind)!=0){rangeInd <- rangeInd[-ind]} # if missing months
options$one.column <- 1
mth.Load <- summarize.monthly(data, options)
options$one.column <- 0
reg.DT <- lm(mth.Load$x ~ rangeInd) # detrending regression
no.Months <- length(histMths)
slope <- as.numeric(coefficients(reg.DT)[2])
x.detrended <- data$x
aux <- as.character(aux)
for (mth in 1:no.Months){
str <- paste(substr(as.character(histMths[mth]),1,7),"-01",sep="")
ind <- which(aux==str)
x.detrended[ind] <- data$x[ind]-slope*rangeInd[mth]/length(ind)
}
if (save$Analysis$all){plot.hist.trend(reg.DT,save,options)}
return(list(reg.DT, x.detrended))
}
#---------------------------------------------------------
add.trend <- function(data, reg.DT, options){
# add a trend to a weather normalized forecasted load
rangeMths <- seq(as.Date(options$historical$startDate),
as.Date(options$forecast$endDate), by="month")
forecastMths <- seq(as.Date(options$forecast$startDate),
as.Date(options$forecast$endDate), by="month")
rangeInd <- 1:length(rangeMths)
ind <- which(is.element(rangeMths,forecastMths)==TRUE)
rangeInd <- rangeInd[ind] # pick only the forecast months
no.Months <- length(forecastMths)
if (tolower(options$forecast$loadgrowth)=="historical"){
slope <- as.numeric(coefficients(reg.DT)[2])
} else {
slope <- options$forecast$loadgrowth/12 # monthly
}
adder <- array(0, dim=nrow(data$x))
aux <- as.Date(paste(data$year,data$month,"1", sep="-"))
aux <- as.character(aux)
for (mth in 1:no.Months){
str <- paste(substr(as.character(forecastMths[mth]),1,7),"-01",sep="")
ind <- which(aux==str)
eom <- seq(forecastMths[mth], by="month", length=2)[2]
no.days.in.mth <- as.numeric(eom - forecastMths[mth]) # if missing data
if (length(intersect(colnames(data),"hour"))==1){
no.days.in.mth <- 24*no.days.in.mth} # for hourly time series
adder[ind] <- slope*rangeInd[mth]/no.days.in.mth
}
data$x <- data$x + adder
data <- cbind(data, adder)
return(data)
}
#---------------------------------------------------------
plot.hist.trend <- function(reg.DT, save, options){
mth.Load <- reg.DT$model$mth.Load
rangeInd <- reg.DT$model$rangeInd
dateMth <- seq(as.Date(options$historical$startDate),
as.Date(options$historical$endDate), "month")
dateMthPOS <- strptime(as.character(dateMth), format="%Y-%m-%d")
L <- length(as.character(dateMthPOS))
main=paste("Historical ",options$loadName,sep="")
fileName <- paste(save$dir$plots,"histload_trend.pdf", sep="")
if (save$Analysis$all){pdf(fileName, width=7.25, heigh=3.5)}
plot(dateMthPOS, mth.Load, xlab="", xaxt="n", col="blue",
ylab="Monthly Load, MWh")
title(main)
axis.POSIXct(1, dateMthPOS, at=dateMthPOS[seq(1,L, by=2)],
format="%b%y", las=2)
lines(dateMthPOS, predict(reg.DT, data.frame(rangeInd=rangeInd)), col="gray")
if (save$Analysis$all){dev.off()}
caption <- "Linear regression of monthly load data vs.\\ index of month: "
formula <- "lm(formula = mth.Load\\$x \$\\sim \$ rangeInd)"
caption <- paste(caption, formula,". The load growth ", sep="")
aux <- summary(reg.DT)$coefficients; aux <- aux["rangeInd","Pr(>|t|)"]
if (aux>0.05){yesno <- "is not"}else{yesno <- "is"}
growthRate <- 100*12*coefficients(reg.DT)[2]/coefficients(reg.DT)[1]
growthRate <- signif(as.numeric(growthRate),2)
caption <- paste(caption, yesno, " statistically significant at the 95\\% significance level. The annualized load growth rate is ", growthRate, "\\%.", sep="")
if (!options$detrend){yesno <- "not"}else{yesno <- ""}
caption <- paste(caption, "For RMG analysis, historical data has", yesno, "been detrended.")
fileName <- paste(save$dir$Rtables,"trendReg.tex", sep="")
print.xtable(xtable(reg.DT, caption=caption), file=fileName)
}
| /R Extension/RMG/Models/Load/trend.functions.R | no_license | uhasan1/QLExtension-backup | R | false | false | 4,855 | r | # Three functions to deal with detrending. The estimation of the
# trend is done on monthly quantities. After detrending, the load
# is leveled to the first historical month (not to the last). It
# should not matter because it is always done in pairs remove/add.trend.
#
# Given a dataframe with columns:
# data = [year month day hour x] column "hour" is optional
#
# Written by Adrian Dragulescu on 13-Sep-2004
#---------------------------------------------------------
remove.trend <- function(data, save, options){
# Works on the historical data
R <- nrow(data)
aux1 <- paste(data$year[1],data$month[1],data$day[1],sep="-")
aux2 <- paste(data$year[R],data$month[R],data$day[R],sep="-")
rangeMths <- seq(as.Date(aux1), as.Date(aux2), "month")
aux <- as.Date(paste(data$year,data$month,"1", sep="-"))
histMths <- unique(aux)
ind <- which(is.element(rangeMths,histMths)==FALSE)
rangeInd <- 1:length(rangeMths)
if (length(ind)!=0){rangeInd <- rangeInd[-ind]} # if missing months
options$one.column <- 1
mth.Load <- summarize.monthly(data, options)
options$one.column <- 0
reg.DT <- lm(mth.Load$x ~ rangeInd) # detrending regression
no.Months <- length(histMths)
slope <- as.numeric(coefficients(reg.DT)[2])
x.detrended <- data$x
aux <- as.character(aux)
for (mth in 1:no.Months){
str <- paste(substr(as.character(histMths[mth]),1,7),"-01",sep="")
ind <- which(aux==str)
x.detrended[ind] <- data$x[ind]-slope*rangeInd[mth]/length(ind)
}
if (save$Analysis$all){plot.hist.trend(reg.DT,save,options)}
return(list(reg.DT, x.detrended))
}
#---------------------------------------------------------
add.trend <- function(data, reg.DT, options){
# add a trend to a weather normalized forecasted load
rangeMths <- seq(as.Date(options$historical$startDate),
as.Date(options$forecast$endDate), by="month")
forecastMths <- seq(as.Date(options$forecast$startDate),
as.Date(options$forecast$endDate), by="month")
rangeInd <- 1:length(rangeMths)
ind <- which(is.element(rangeMths,forecastMths)==TRUE)
rangeInd <- rangeInd[ind] # pick only the forecast months
no.Months <- length(forecastMths)
if (tolower(options$forecast$loadgrowth)=="historical"){
slope <- as.numeric(coefficients(reg.DT)[2])
} else {
slope <- options$forecast$loadgrowth/12 # monthly
}
adder <- array(0, dim=nrow(data$x))
aux <- as.Date(paste(data$year,data$month,"1", sep="-"))
aux <- as.character(aux)
for (mth in 1:no.Months){
str <- paste(substr(as.character(forecastMths[mth]),1,7),"-01",sep="")
ind <- which(aux==str)
eom <- seq(forecastMths[mth], by="month", length=2)[2]
no.days.in.mth <- as.numeric(eom - forecastMths[mth]) # if missing data
if (length(intersect(colnames(data),"hour"))==1){
no.days.in.mth <- 24*no.days.in.mth} # for hourly time series
adder[ind] <- slope*rangeInd[mth]/no.days.in.mth
}
data$x <- data$x + adder
data <- cbind(data, adder)
return(data)
}
#---------------------------------------------------------
plot.hist.trend <- function(reg.DT, save, options){
mth.Load <- reg.DT$model$mth.Load
rangeInd <- reg.DT$model$rangeInd
dateMth <- seq(as.Date(options$historical$startDate),
as.Date(options$historical$endDate), "month")
dateMthPOS <- strptime(as.character(dateMth), format="%Y-%m-%d")
L <- length(as.character(dateMthPOS))
main=paste("Historical ",options$loadName,sep="")
fileName <- paste(save$dir$plots,"histload_trend.pdf", sep="")
if (save$Analysis$all){pdf(fileName, width=7.25, heigh=3.5)}
plot(dateMthPOS, mth.Load, xlab="", xaxt="n", col="blue",
ylab="Monthly Load, MWh")
title(main)
axis.POSIXct(1, dateMthPOS, at=dateMthPOS[seq(1,L, by=2)],
format="%b%y", las=2)
lines(dateMthPOS, predict(reg.DT, data.frame(rangeInd=rangeInd)), col="gray")
if (save$Analysis$all){dev.off()}
caption <- "Linear regression of monthly load data vs.\\ index of month: "
formula <- "lm(formula = mth.Load\\$x \$\\sim \$ rangeInd)"
caption <- paste(caption, formula,". The load growth ", sep="")
aux <- summary(reg.DT)$coefficients; aux <- aux["rangeInd","Pr(>|t|)"]
if (aux>0.05){yesno <- "is not"}else{yesno <- "is"}
growthRate <- 100*12*coefficients(reg.DT)[2]/coefficients(reg.DT)[1]
growthRate <- signif(as.numeric(growthRate),2)
caption <- paste(caption, yesno, " statistically significant at the 95\\% significance level. The annualized load growth rate is ", growthRate, "\\%.", sep="")
if (!options$detrend){yesno <- "not"}else{yesno <- ""}
caption <- paste(caption, "For RMG analysis, historical data has", yesno, "been detrended.")
fileName <- paste(save$dir$Rtables,"trendReg.tex", sep="")
print.xtable(xtable(reg.DT, caption=caption), file=fileName)
}
|
estfun.mlm <-
function(x, ...) {
psi <- t(t(model.response(model.frame(x))) - as.vector(coef(x)))
colnames(psi) <- colnames(coef(x))
return(psi)
}
| /R/estfun.mlm.R | no_license | cran/CADFtest | R | false | false | 165 | r | estfun.mlm <-
function(x, ...) {
psi <- t(t(model.response(model.frame(x))) - as.vector(coef(x)))
colnames(psi) <- colnames(coef(x))
return(psi)
}
|
params <-
list(EVAL = TRUE)
## ---- SETTINGS-knitr, include=FALSE--------------------------------------
stopifnot(require(knitr))
opts_chunk$set(
comment=NA,
message = FALSE,
warning = FALSE,
eval = params$EVAL,
dev = "png",
dpi = 150,
fig.asp = 0.618,
fig.width = 5,
out.width = "60%",
fig.align = "center"
)
## ---- SETTINGS-gg, include=FALSE-----------------------------------------
library(ggplot2)
theme_set(bayesplot::theme_default())
## ---- SETTINGS-rstan, include=FALSE--------------------------------------
ITER <- 500L
CHAINS <- 2L
CORES <- 2L
SEED <- 12345
## ---- SETTINGS-loo, include=FALSE----------------------------------------
loo.cores <- if (exists("CORES")) CORES else 1L
options(mc.cores = loo.cores)
## ---- default-prior-1, results="hide"------------------------------------
library("rstanarm")
default_prior_test <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1)
## ---- default-prior-summary----------------------------------------------
prior_summary(default_prior_test)
## ---- echo=FALSE---------------------------------------------------------
priors <- prior_summary(default_prior_test)
fr2 <- function(x) format(round(x, 2), nsmall = 2)
## ---- no-autoscale, results="hide"---------------------------------------
test_no_autoscale <-
update(
default_prior_test,
prior = normal(0, 5, autoscale = FALSE),
prior_intercept = student_t(4, 0, 10, autoscale = FALSE),
prior_aux = cauchy(0, 3, autoscale = FALSE)
)
## ---- no-autoscale-prior-summary-----------------------------------------
prior_summary(test_no_autoscale)
## ------------------------------------------------------------------------
p <- 1 - 2 * pnorm(-250, mean = 0, sd = 500)
print(paste("Pr(-250 < theta < 250) =", round(p, 2)))
## ---- fig.cap="_There is much more probability mass outside the interval (-250, 250)._"----
theta <- rnorm(1e5, mean = 0, sd = 500)
p_approx <- mean(abs(theta) < 250)
print(paste("Pr(-250 < theta < 250) =", round(p_approx, 2)))
d <- data.frame(theta, clr = abs(theta) > 250)
ggplot(d, aes(x = theta, fill = clr)) +
geom_histogram(binwidth = 5, show.legend = FALSE) +
scale_y_continuous(name = "", labels = NULL, expand = c(0,0)) +
scale_x_continuous(name = expression(theta), breaks = c(-1000, -250, 250, 1000))
## ---- flat-prior-1, echo=FALSE, results="hide"---------------------------
flat_prior_test <- stan_glm(mpg ~ wt, data = mtcars, prior = NULL, iter = 10, chains = 1)
## ---- flat-prior-2, eval=FALSE-------------------------------------------
# flat_prior_test <- stan_glm(mpg ~ wt, data = mtcars, prior = NULL)
## ---- flat-prior-summary-------------------------------------------------
prior_summary(flat_prior_test)
## ---- eval=FALSE---------------------------------------------------------
# my_prior <- normal(location = c(-10, 0), scale = c(5, 2), autoscale = FALSE)
# stan_glm(y ~ x1 + x2, data = dat, prior = my_prior)
| /data/genthat_extracted_code/rstanarm/vignettes/priors.R | no_license | surayaaramli/typeRrh | R | false | false | 2,940 | r | params <-
list(EVAL = TRUE)
## ---- SETTINGS-knitr, include=FALSE--------------------------------------
stopifnot(require(knitr))
opts_chunk$set(
comment=NA,
message = FALSE,
warning = FALSE,
eval = params$EVAL,
dev = "png",
dpi = 150,
fig.asp = 0.618,
fig.width = 5,
out.width = "60%",
fig.align = "center"
)
## ---- SETTINGS-gg, include=FALSE-----------------------------------------
library(ggplot2)
theme_set(bayesplot::theme_default())
## ---- SETTINGS-rstan, include=FALSE--------------------------------------
ITER <- 500L
CHAINS <- 2L
CORES <- 2L
SEED <- 12345
## ---- SETTINGS-loo, include=FALSE----------------------------------------
loo.cores <- if (exists("CORES")) CORES else 1L
options(mc.cores = loo.cores)
## ---- default-prior-1, results="hide"------------------------------------
library("rstanarm")
default_prior_test <- stan_glm(mpg ~ wt + am, data = mtcars, chains = 1)
## ---- default-prior-summary----------------------------------------------
prior_summary(default_prior_test)
## ---- echo=FALSE---------------------------------------------------------
priors <- prior_summary(default_prior_test)
fr2 <- function(x) format(round(x, 2), nsmall = 2)
## ---- no-autoscale, results="hide"---------------------------------------
test_no_autoscale <-
update(
default_prior_test,
prior = normal(0, 5, autoscale = FALSE),
prior_intercept = student_t(4, 0, 10, autoscale = FALSE),
prior_aux = cauchy(0, 3, autoscale = FALSE)
)
## ---- no-autoscale-prior-summary-----------------------------------------
prior_summary(test_no_autoscale)
## ------------------------------------------------------------------------
p <- 1 - 2 * pnorm(-250, mean = 0, sd = 500)
print(paste("Pr(-250 < theta < 250) =", round(p, 2)))
## ---- fig.cap="_There is much more probability mass outside the interval (-250, 250)._"----
theta <- rnorm(1e5, mean = 0, sd = 500)
p_approx <- mean(abs(theta) < 250)
print(paste("Pr(-250 < theta < 250) =", round(p_approx, 2)))
d <- data.frame(theta, clr = abs(theta) > 250)
ggplot(d, aes(x = theta, fill = clr)) +
geom_histogram(binwidth = 5, show.legend = FALSE) +
scale_y_continuous(name = "", labels = NULL, expand = c(0,0)) +
scale_x_continuous(name = expression(theta), breaks = c(-1000, -250, 250, 1000))
## ---- flat-prior-1, echo=FALSE, results="hide"---------------------------
flat_prior_test <- stan_glm(mpg ~ wt, data = mtcars, prior = NULL, iter = 10, chains = 1)
## ---- flat-prior-2, eval=FALSE-------------------------------------------
# flat_prior_test <- stan_glm(mpg ~ wt, data = mtcars, prior = NULL)
## ---- flat-prior-summary-------------------------------------------------
prior_summary(flat_prior_test)
## ---- eval=FALSE---------------------------------------------------------
# my_prior <- normal(location = c(-10, 0), scale = c(5, 2), autoscale = FALSE)
# stan_glm(y ~ x1 + x2, data = dat, prior = my_prior)
|
## Put comments here that give an overall description of what your
## functions do
## The makeCaxheMatrix function creates a special "vector" which is
## containing following functions
## 1. set the value of the vector
## 2. get the value of the vector
## 3. set the inverse matrix
## 4. get the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Returns inverse matrix from the cached data or calls function
## to get inverse matrix if cached data is NULL
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
| /cachematrix.R | no_license | jehokang/ProgrammingAssignment2 | R | false | false | 1,014 | r | ## Put comments here that give an overall description of what your
## functions do
## The makeCaxheMatrix function creates a special "vector" which is
## containing following functions
## 1. set the value of the vector
## 2. get the value of the vector
## 3. set the inverse matrix
## 4. get the inverse matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y){
x <<- y
m <<- NULL
}
get <- function() x
setsolve <- function(solve) m <<- solve
getsolve <- function() m
list(set = set, get = get,
setsolve = setsolve,
getsolve = getsolve)
}
## Returns inverse matrix from the cached data or calls function
## to get inverse matrix if cached data is NULL
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getsolve()
if (!is.null(m)){
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setsolve(m)
m
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Shelter.R
\name{profileDet}
\alias{profileDet}
\title{Finds the deterministic vertical wind profile for a community}
\usage{
profileDet(base.params, slices = 10)
}
\arguments{
\item{base.params}{A parameter file}
\item{slices}{The number of vertical slices to measure}
}
\value{
table
}
\description{
Finds the deterministic vertical wind profile for a community
}
| /man/profileDet.Rd | permissive | pzylstra/Impact | R | false | true | 444 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Shelter.R
\name{profileDet}
\alias{profileDet}
\title{Finds the deterministic vertical wind profile for a community}
\usage{
profileDet(base.params, slices = 10)
}
\arguments{
\item{base.params}{A parameter file}
\item{slices}{The number of vertical slices to measure}
}
\value{
table
}
\description{
Finds the deterministic vertical wind profile for a community
}
|
#!/usr/bin/env Rscript
#
# Copyright 2016-2018 Yong-Xin Liu <metagenome@126.com>
# If used this script, please cited:
# Zhang, J., Zhang, N., Liu, Y.X., Zhang, X., Hu, B., Qin, Y., Xu, H., Wang, H., Guo, X., Qian, J., et al. (2018).
# Root microbiota shift in rice correlates with resident time in the field and developmental stage. Sci China Life Sci 61,
# https://doi.org/10.1007/s11427-018-9284-4
# 手动运行脚本请,需要设置工作目录,使用 Ctrl+Shift+H 或 Session - Set Work Directory - Choose Directory / To Source File Location 设置工作目录
# 1. 程序功能描述和主要步骤
# 程序功能:Beta多样性主坐标轴分析及组间统计
# Functions: PCoA analysis of samples and groups comparing
# Main steps:
# - Reads distance matrix input.txt
# - Calculate orrdinate by PCoA and show in scatter plot
# - Adonis calculate significant between groups distance and group inner distance
# 程序使用示例
# USAGE
# # 展示样品间距离分布,统计组间是否显著,也用于异常样品筛选
#
# Rscript ./script/beta_pcoa.r -h
#
# # 默认基于bray_curtis距离
# Rscript ./script/beta_pcoa.r
#
# # 完整默认参数
# Rscript ./script/beta_pcoa.r -i beta/bray_curtis.txt -t bray_curtis \
# -d doc/design.txt -n group \
# -o beta/pcoa_bray_curtis \
# -w 4 -e 2.5
#
# # 基于unifrac距离
# Rscript ./script/beta_pcoa.r -t unifrac
options(warn = -1)
# 1.2 解析命令行
# 设置清华源加速下载
site="https://mirrors.tuna.tsinghua.edu.cn/CRAN"
# 判断命令行解析是否安装,安装并加载
if (!suppressWarnings(suppressMessages(require("optparse", character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))) {
install.packages(p, repos=site)
require("optparse",character.only=T)
}
# 解析命令行
if (TRUE){
option_list = list(
make_option(c("-t", "--type"), type="character", default="bray_curtis",
help="Distance type; 距离类型, 可选bray_curtis, bray_curtis_binary, euclidean, jaccard, jaccard_binary, manhatten, unifrac, unifrac_binary [default %default]"),
make_option(c("-i", "--input"), type="character", default="",
help="Input beta distance; 距离矩阵,默认beta目录下与t同名,可指定 [default %default]"),
make_option(c("-d", "--design"), type="character", default="../design.txt",
help="design file; 实验设计文件 [default %default]"),
make_option(c("-n", "--group"), type="character", default="group",
help="name of group type; 分组列名 [default %default]"),
make_option(c("-w", "--width"), type="numeric", default=4,
help="Width of figure; 图片宽 [default %default]"),
make_option(c("-e", "--height"), type="numeric", default=2.5,
help="Height of figure; 图片高 [default %default]"),
make_option(c("-o", "--output"), type="character", default="",
help="output directory or prefix; 输出文件前缀, 有txt和矢量图pdf [default %default]")
)
opts = parse_args(OptionParser(option_list=option_list))
# 调置如果无调设置输出,根据其它参数设置默认输出
if (opts$input==""){opts$input=paste("",opts$type, ".txt", sep = "")}
if (opts$output==""){opts$output=paste("pcoa_",opts$type, sep = "")}
# 显示输入输出确认是否正确
print(paste("The distance matrix file is ", opts$input, sep = ""))
print(paste("Type of distance type is ", opts$type, sep = ""))
print(paste("The design file is ", opts$design, sep = ""))
print(paste("The group name is ", opts$group, sep = ""))
print(paste("The output file prefix is ", opts$output, sep = ""))
}
# 2. 依赖关系检查、安装和加载
# 2.1 安装CRAN来源常用包
site="https://mirrors.tuna.tsinghua.edu.cn/CRAN"
# 依赖包列表:参数解析、数据变换、绘图和开发包安装、安装依赖、ggplot主题
package_list = c("reshape2","ggplot2","vegan")
# 判断R包加载是否成功来决定是否安装后再加载
for(p in package_list){
if(!suppressWarnings(suppressMessages(require(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
install.packages(p, repos=site)
suppressWarnings(suppressMessages(library(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 2.2 安装bioconductor常用包
package_list = c("digest","ggrepel")
for(p in package_list){
if(!suppressWarnings(suppressMessages(require(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
source("https://bioconductor.org/biocLite.R")
biocLite(p)
suppressWarnings(suppressMessages(library(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 2.3 安装Github常用包
# 参数解析、数据变换、绘图和开发包安装
package_list = c("kassambara/ggpubr")
for(p in package_list){
q=unlist(strsplit(p,split = "/"))[2]
if(!suppressWarnings(suppressMessages(require(q, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
install_github(p)
suppressWarnings(suppressMessages(library(q, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 3. 读取输入文件
# 读取距离矩阵文件
dis = read.table(opts$input, header=T, row.names= 1, sep="\t", comment.char="")
# 读取实验设计
design = read.table(opts$design, header=T, row.names= 1, sep="\t", comment.char="")
# 提取样品组信息,默认为group可指定
sampFile = as.data.frame(design[,opts$group],row.names = row.names(design))
colnames(sampFile)[1] = "group"
# 4. 统计与绘图
# vegan:cmdscale计算矩阵矩阵中主坐标轴坐标,取前3维
pcoa = cmdscale(dis, k=3, eig=T) # k is dimension, 3 is recommended; eig is eigenvalues
points = as.data.frame(pcoa$points) # get coordinate string, format to dataframme
eig = pcoa$eig
points = cbind(points, sampFile[rownames(points),])
colnames(points) = c("x", "y", "z","group")
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=group)) + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title=paste(opts$type," PCoA",sep="")) +
stat_ellipse(level=0.68) + theme_classic()
p
# 保存pdf和png格式方便查看和编辑
ggsave(paste(opts$output, ".pdf", sep=""), p, width = opts$width, height = opts$height)
# ggsave(paste(opts$output, ".png", sep=""), p, width = opts$width, height = opts$height)
print(paste(opts$output, ".pdf finished.", sep = ""))
# 添加样品标签
p=p+geom_text_repel(label=paste(rownames(points)),colour="black",size=3)
p
# 保存pdf和png格式方便查看和编辑
ggsave(paste(opts$output, "_label.pdf", sep=""), p, width = opts$width, height = opts$height)
# ggsave(paste(opts$output, "_label.png", sep=""), p, width = opts$width, height = opts$height)
print(paste(opts$output, "_label.pdf finished.", sep = ""))
# Compare each group beta by vegan adonis in bray_curtis
da_adonis = function(sampleV){
sampleA = as.matrix(sampleV$sampA)
sampleB = as.matrix(sampleV$sampB)
design2 = subset(sampFile, group %in% c(sampleA,sampleB))
if (length(unique(design2$group))>1) {
sub_dis_table = dis_table[rownames(design2),rownames(design2)]
sub_dis_table = as.dist(sub_dis_table, diag = FALSE, upper = FALSE)
adonis_table = adonis(sub_dis_table~group, data=design2, permutations = 10000)
adonis_pvalue = adonis_table$aov.tab$`Pr(>F)`[1]
print(paste("In ",opts$type," pvalue between", sampleA, "and", sampleB, "is", adonis_pvalue, sep=" "))
adonis_pvalue = paste(opts$type, sampleA, sampleB, adonis_pvalue, sep="\t")
write.table(adonis_pvalue, file=paste(opts$output, ".txt", sep=""), append = TRUE, sep="\t", quote=F, row.names=F, col.names=F)
}
}
# loop for each group pair
dis_table = as.matrix(dis)
if (TRUE) {
compare_data = as.vector(unique(design[[opts$group]]))
len_compare_data = length(compare_data)
for(i in 1:(len_compare_data-1)) {
for(j in (i+1):len_compare_data) {
tmp_compare = as.data.frame(cbind(sampA=compare_data[i],sampB=compare_data[j]))
print(tmp_compare)
da_adonis(tmp_compare)
}
}
}else {
compare_data = read.table("doc/compare.txt", sep="\t", check.names=F, quote='', comment.char="")
colnames(compare_data) = c("sampA", "sampB")
for(i in 1:dim(compare_data)[1]){da_adonis(compare_data[i,])}
}
print(paste("Adnois statistics result in",opts$output, ".txt is finished.", sep = ""))
# 5. 保存图表
# 提示工作完成
print(paste("Output in ", opts$output, ".txt/pdf finished.", sep = ""))
| /fig.4LP_diversity/beta_pcoa.R | no_license | YJL900223/LP_air | R | false | false | 8,671 | r | #!/usr/bin/env Rscript
#
# Copyright 2016-2018 Yong-Xin Liu <metagenome@126.com>
# If used this script, please cited:
# Zhang, J., Zhang, N., Liu, Y.X., Zhang, X., Hu, B., Qin, Y., Xu, H., Wang, H., Guo, X., Qian, J., et al. (2018).
# Root microbiota shift in rice correlates with resident time in the field and developmental stage. Sci China Life Sci 61,
# https://doi.org/10.1007/s11427-018-9284-4
# 手动运行脚本请,需要设置工作目录,使用 Ctrl+Shift+H 或 Session - Set Work Directory - Choose Directory / To Source File Location 设置工作目录
# 1. 程序功能描述和主要步骤
# 程序功能:Beta多样性主坐标轴分析及组间统计
# Functions: PCoA analysis of samples and groups comparing
# Main steps:
# - Reads distance matrix input.txt
# - Calculate orrdinate by PCoA and show in scatter plot
# - Adonis calculate significant between groups distance and group inner distance
# 程序使用示例
# USAGE
# # 展示样品间距离分布,统计组间是否显著,也用于异常样品筛选
#
# Rscript ./script/beta_pcoa.r -h
#
# # 默认基于bray_curtis距离
# Rscript ./script/beta_pcoa.r
#
# # 完整默认参数
# Rscript ./script/beta_pcoa.r -i beta/bray_curtis.txt -t bray_curtis \
# -d doc/design.txt -n group \
# -o beta/pcoa_bray_curtis \
# -w 4 -e 2.5
#
# # 基于unifrac距离
# Rscript ./script/beta_pcoa.r -t unifrac
options(warn = -1)
# 1.2 解析命令行
# 设置清华源加速下载
site="https://mirrors.tuna.tsinghua.edu.cn/CRAN"
# 判断命令行解析是否安装,安装并加载
if (!suppressWarnings(suppressMessages(require("optparse", character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))) {
install.packages(p, repos=site)
require("optparse",character.only=T)
}
# 解析命令行
if (TRUE){
option_list = list(
make_option(c("-t", "--type"), type="character", default="bray_curtis",
help="Distance type; 距离类型, 可选bray_curtis, bray_curtis_binary, euclidean, jaccard, jaccard_binary, manhatten, unifrac, unifrac_binary [default %default]"),
make_option(c("-i", "--input"), type="character", default="",
help="Input beta distance; 距离矩阵,默认beta目录下与t同名,可指定 [default %default]"),
make_option(c("-d", "--design"), type="character", default="../design.txt",
help="design file; 实验设计文件 [default %default]"),
make_option(c("-n", "--group"), type="character", default="group",
help="name of group type; 分组列名 [default %default]"),
make_option(c("-w", "--width"), type="numeric", default=4,
help="Width of figure; 图片宽 [default %default]"),
make_option(c("-e", "--height"), type="numeric", default=2.5,
help="Height of figure; 图片高 [default %default]"),
make_option(c("-o", "--output"), type="character", default="",
help="output directory or prefix; 输出文件前缀, 有txt和矢量图pdf [default %default]")
)
opts = parse_args(OptionParser(option_list=option_list))
# 调置如果无调设置输出,根据其它参数设置默认输出
if (opts$input==""){opts$input=paste("",opts$type, ".txt", sep = "")}
if (opts$output==""){opts$output=paste("pcoa_",opts$type, sep = "")}
# 显示输入输出确认是否正确
print(paste("The distance matrix file is ", opts$input, sep = ""))
print(paste("Type of distance type is ", opts$type, sep = ""))
print(paste("The design file is ", opts$design, sep = ""))
print(paste("The group name is ", opts$group, sep = ""))
print(paste("The output file prefix is ", opts$output, sep = ""))
}
# 2. 依赖关系检查、安装和加载
# 2.1 安装CRAN来源常用包
site="https://mirrors.tuna.tsinghua.edu.cn/CRAN"
# 依赖包列表:参数解析、数据变换、绘图和开发包安装、安装依赖、ggplot主题
package_list = c("reshape2","ggplot2","vegan")
# 判断R包加载是否成功来决定是否安装后再加载
for(p in package_list){
if(!suppressWarnings(suppressMessages(require(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
install.packages(p, repos=site)
suppressWarnings(suppressMessages(library(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 2.2 安装bioconductor常用包
package_list = c("digest","ggrepel")
for(p in package_list){
if(!suppressWarnings(suppressMessages(require(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
source("https://bioconductor.org/biocLite.R")
biocLite(p)
suppressWarnings(suppressMessages(library(p, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 2.3 安装Github常用包
# 参数解析、数据变换、绘图和开发包安装
package_list = c("kassambara/ggpubr")
for(p in package_list){
q=unlist(strsplit(p,split = "/"))[2]
if(!suppressWarnings(suppressMessages(require(q, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))){
install_github(p)
suppressWarnings(suppressMessages(library(q, character.only = TRUE, quietly = TRUE, warn.conflicts = FALSE)))
}
}
# 3. 读取输入文件
# 读取距离矩阵文件
dis = read.table(opts$input, header=T, row.names= 1, sep="\t", comment.char="")
# 读取实验设计
design = read.table(opts$design, header=T, row.names= 1, sep="\t", comment.char="")
# 提取样品组信息,默认为group可指定
sampFile = as.data.frame(design[,opts$group],row.names = row.names(design))
colnames(sampFile)[1] = "group"
# 4. 统计与绘图
# vegan:cmdscale计算矩阵矩阵中主坐标轴坐标,取前3维
pcoa = cmdscale(dis, k=3, eig=T) # k is dimension, 3 is recommended; eig is eigenvalues
points = as.data.frame(pcoa$points) # get coordinate string, format to dataframme
eig = pcoa$eig
points = cbind(points, sampFile[rownames(points),])
colnames(points) = c("x", "y", "z","group")
# plot PCo 1 and 2
p = ggplot(points, aes(x=x, y=y, color=group)) + geom_point(alpha=.7, size=2) +
labs(x=paste("PCoA 1 (", format(100 * eig[1] / sum(eig), digits=4), "%)", sep=""),
y=paste("PCoA 2 (", format(100 * eig[2] / sum(eig), digits=4), "%)", sep=""),
title=paste(opts$type," PCoA",sep="")) +
stat_ellipse(level=0.68) + theme_classic()
p
# 保存pdf和png格式方便查看和编辑
ggsave(paste(opts$output, ".pdf", sep=""), p, width = opts$width, height = opts$height)
# ggsave(paste(opts$output, ".png", sep=""), p, width = opts$width, height = opts$height)
print(paste(opts$output, ".pdf finished.", sep = ""))
# 添加样品标签
p=p+geom_text_repel(label=paste(rownames(points)),colour="black",size=3)
p
# 保存pdf和png格式方便查看和编辑
ggsave(paste(opts$output, "_label.pdf", sep=""), p, width = opts$width, height = opts$height)
# ggsave(paste(opts$output, "_label.png", sep=""), p, width = opts$width, height = opts$height)
print(paste(opts$output, "_label.pdf finished.", sep = ""))
# Compare each group beta by vegan adonis in bray_curtis
da_adonis = function(sampleV){
sampleA = as.matrix(sampleV$sampA)
sampleB = as.matrix(sampleV$sampB)
design2 = subset(sampFile, group %in% c(sampleA,sampleB))
if (length(unique(design2$group))>1) {
sub_dis_table = dis_table[rownames(design2),rownames(design2)]
sub_dis_table = as.dist(sub_dis_table, diag = FALSE, upper = FALSE)
adonis_table = adonis(sub_dis_table~group, data=design2, permutations = 10000)
adonis_pvalue = adonis_table$aov.tab$`Pr(>F)`[1]
print(paste("In ",opts$type," pvalue between", sampleA, "and", sampleB, "is", adonis_pvalue, sep=" "))
adonis_pvalue = paste(opts$type, sampleA, sampleB, adonis_pvalue, sep="\t")
write.table(adonis_pvalue, file=paste(opts$output, ".txt", sep=""), append = TRUE, sep="\t", quote=F, row.names=F, col.names=F)
}
}
# loop for each group pair
dis_table = as.matrix(dis)
if (TRUE) {
compare_data = as.vector(unique(design[[opts$group]]))
len_compare_data = length(compare_data)
for(i in 1:(len_compare_data-1)) {
for(j in (i+1):len_compare_data) {
tmp_compare = as.data.frame(cbind(sampA=compare_data[i],sampB=compare_data[j]))
print(tmp_compare)
da_adonis(tmp_compare)
}
}
}else {
compare_data = read.table("doc/compare.txt", sep="\t", check.names=F, quote='', comment.char="")
colnames(compare_data) = c("sampA", "sampB")
for(i in 1:dim(compare_data)[1]){da_adonis(compare_data[i,])}
}
print(paste("Adnois statistics result in",opts$output, ".txt is finished.", sep = ""))
# 5. 保存图表
# 提示工作完成
print(paste("Output in ", opts$output, ".txt/pdf finished.", sep = ""))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exec.R
\name{exec}
\alias{exec}
\title{Execute a function}
\usage{
exec(f, ..., .env = caller_env())
}
\arguments{
\item{f}{A function, or function name as a string.}
\item{...}{Arguments to function.
These dots support \link{tidy-dots} features.}
\item{.env}{Environment in which to evaluate the call. This will be
most useful if \code{f} is a string, or the function has side-effects.}
}
\description{
This function constructs and evaluates a call to \code{f}.
It was two primary uses:
\itemize{
\item To call a function with arguments stored in a list (if the function
doesn't support \link{tidy-dots})
\item To call every function stored in a list (in conjunction with \code{map()}/
\code{\link[=lapply]{lapply()}})
}
}
\examples{
args <- list(x = c(1:10, 100, NA), na.rm = TRUE)
exec("mean", !!!args)
exec("mean", !!!args, trim = 0.2)
fs <- list(a = function() "a", b = function() "b")
lapply(fs, exec)
# Compare to do.call it will not automatically inline expressions
# into the evaluated call.
x <- 10
args <- exprs(x1 = x + 1, x2 = x * 2)
exec(list, !!!args)
do.call(list, args)
# exec() is not designed to generate pretty function calls. This is
# most easily seen if you call a function that captures the call:
f <- disp ~ cyl
exec("lm", f, data = mtcars)
# If you need finer control over the generated call, you'll need to
# construct it yourself. This may require creating a new environment
# with carefully constructed bindings
data_env <- env(data = mtcars)
eval(expr(lm(!!f, data)), data_env)
}
| /man/exec.Rd | no_license | makarevichy/rlang | R | false | true | 1,595 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exec.R
\name{exec}
\alias{exec}
\title{Execute a function}
\usage{
exec(f, ..., .env = caller_env())
}
\arguments{
\item{f}{A function, or function name as a string.}
\item{...}{Arguments to function.
These dots support \link{tidy-dots} features.}
\item{.env}{Environment in which to evaluate the call. This will be
most useful if \code{f} is a string, or the function has side-effects.}
}
\description{
This function constructs and evaluates a call to \code{f}.
It was two primary uses:
\itemize{
\item To call a function with arguments stored in a list (if the function
doesn't support \link{tidy-dots})
\item To call every function stored in a list (in conjunction with \code{map()}/
\code{\link[=lapply]{lapply()}})
}
}
\examples{
args <- list(x = c(1:10, 100, NA), na.rm = TRUE)
exec("mean", !!!args)
exec("mean", !!!args, trim = 0.2)
fs <- list(a = function() "a", b = function() "b")
lapply(fs, exec)
# Compare to do.call it will not automatically inline expressions
# into the evaluated call.
x <- 10
args <- exprs(x1 = x + 1, x2 = x * 2)
exec(list, !!!args)
do.call(list, args)
# exec() is not designed to generate pretty function calls. This is
# most easily seen if you call a function that captures the call:
f <- disp ~ cyl
exec("lm", f, data = mtcars)
# If you need finer control over the generated call, you'll need to
# construct it yourself. This may require creating a new environment
# with carefully constructed bindings
data_env <- env(data = mtcars)
eval(expr(lm(!!f, data)), data_env)
}
|
predPreyModel <- function(gens = 1000, initPrey = 100,
initPred = 10, a = 0.01, r = 0.2,
m = 0.05, cc = 0.1) {
# Lotka-Volterra predator-prey model
# gens = total number of time steps
# initPrey and initPred are intial population sizes
# a = attack rate
# r = growth rate
# m = predator mortality rate
# cc = conversion constant (converting prey consumed into predator births)
prey <- rep(initPrey, gens) # pre-allocate array for results on prey abundance
pred <- rep(initPred, gens) # results on predator abundance
for ( i in 2:gens ) {
# main loop to iterate population dynamics
prey[i] <- prey[i-1] + r * prey[i-1] - a*pred[i-1]*prey[i-1]
if ( prey[i] < 0 ) { #check if prey are extinct
prey[i] <- 0
}
pred[i] <- pred[i-1] + cc*a*pred[i-1]*prey[i-1] - m*pred[i-1]
if ( pred[i] < 0 ) { # check if predators are extinct
pred[i] <- 0
}
}
return(cbind(prey,pred))
}
# a parameter study of the attack rate:
#unchanging parameters:
gens <- 1000
initPrey <- 100
initPred <- 10
r <- 0.2
m <- 0.05
cc <- 0.1
# the paramter that will be varied:
aRange <- seq(from = 0.001, to = 0.1, by = 0.001)
nReps <- length(aRange)
timeSteps <- 1000
preyData <- matrix(data = 0, nrow = timeSteps, ncol = nReps) # preallocate
predData <- preyData # preallocate
for ( i in 1:nReps ) {
aval <- aRange[i] # working value of attack rate
results <- predPreyModel(gens = gens, initPrey = initPrey,
initPred = initPred, a = aval, r = r, m = m,
cc = cc) # run the model
preyData[,i] <- results[,"prey"] # store results
predData[,i] <- results[,"pred"]
}
# write the data from the parameter study to a .csv
# besides a, we have additional parameters that aren't obvious from the data.
# initPrey and initPred are obvious from the data; they are the values present
# at time step 1. gens is also obvious: last step. That leaves r, m, and cc
rcol <- rep(r, nReps) # vector of r values
mcol <- rep(m, nReps) # vector of m values
cccol <- rep(cc, nReps) # vector of cc values
runIDs <- 1:nReps # vector of unique run identifiers
paramCols <- 5 # number of parameter columns; avoiding magic numbers
# pre-allocating objects for prey and predator data:
preyResultsMatrix <- matrix(data = 0, nrow = nReps, ncol = (paramCols + gens))
preyResultsMatrix[,1:paramCols] <- cbind(runIDs, rcol, mcol, cccol, aRange)
predResultsMatrix <- preyResultsMatrix
# need to transpose the data because the time series
# were in columns in the "preyData" and "predData" objects,
# but we need each time series to now be a row
preyResultsMatrix[, (paramCols+1):(paramCols + gens)] <- t(preyData)
predResultsMatrix[, (paramCols+1):(paramCols + gens)] <- t(predData)
# need column names for when we write to .csv:
myColNames <- c("runID", "r", "m", "cc", "a", paste(1:gens))
colnames(preyResultsMatrix) <- myColNames
colnames(predResultsMatrix) <- myColNames
# write the data to a .csv:
# make sure we're in the right directory:
setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/")
# make a sub-directory if it doesn't already exist:
if ( !dir.exists("AttackRateStudy") ) {
dir.create("AttackRateStudy")
}
# actually write the data:
setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/AttackRateStudy/")
source("../../../ExampleScripts/SafeFileWrite.R") # get the MySafeWriteCSV() function
mySafeWriteCSV(data = preyResultsMatrix, namebase = "PreyDataAttackRateStudy.csv")
mySafeWriteCSV(data = predResultsMatrix, namebase = "PredatorDataAttackRateStudy.csv")
# meta-scripting: make a script of parameter values:
myParamFile <- getUnusedFilename("parameters.R") # make sure the name is good
cat(paste("Writing parameters to: '", myParamFile, "'\n", sep = ""))
sink(myParamFile) # open the file for writing
# Add useful contextual information:
cat("# Source-able script of parameters used in Predator-Prey attack rate study\n")
cat(paste("# Produced on ", date(), "\n"))
# now write each of the parameters:
cat(paste("gens <- ", gens, "\n", sep = ""))
cat(paste("initPrey <- ", initPrey, "\n", sep = ""))
cat(paste("initPred <- ", initPred, "\n", sep = ""))
cat(paste("r <- ", r, "\n", sep = ""))
cat(paste("m <- ", m, "\n", sep = ""))
cat(paste("cc <- ", cc, "\n", sep = ""))
cat("aRange <- seq(from = 0.001, to = 0.1, by = 0.001)\n")
sink() # close the file
# alternate meta-script with more contextual info:
sink(myParamFile) # open the file for writing
# Add useful contextual information:
cat("# Source-able script of parameters used in Predator-Prey attack rate study\n")
cat(paste("# Produced on ", date(), "\n", sep = ""))
# now write each of the parameters:
cat(paste("gens <- ", gens, " # total generations\n", sep = ""))
cat(paste("initPrey <- ", initPrey, " # initial prey abundance at time step 1\n", sep = ""))
cat(paste("initPred <- ", initPred, " # initial predator abundance at time step 1\n", sep = ""))
cat(paste("r <- ", r, " # intrinsic growth rate of prey\n", sep = ""))
cat(paste("m <- ", m, " # intrinsic mortality rate of predators\n", sep = ""))
cat(paste("cc <- ", cc, " # conversion constant for prey consumed into predator births\n", sep = ""))
cat("aRange <- seq(from = 0.001, to = 0.1, by = 0.001) # range of attack rate values used\n")
sink() # close the file
############################################################################
# # how about a second parameter study, looking at variation in two parameters?
# # first parameter: growth rate:
# rvals <- seq(from = 0.01, to = 0.1, by = 0.01)
# numRvals <- length(rvals)
# #second parameter: predator mortality:
# mvals <- seq(from = 0.01, to = 0.1, by = 0.01)
# numMvals <- length(mvals)
# # other parameters:
# a <- 0.001
# cc <- 0.1
# initPrey <- 150
# initPred <- 50
# gens <- 1000
# # preallocate data structures:
# totalNumRuns <- numRvals * numMvals
# preyData <- matrix(data = 0, ncol = totalNumRuns, nrow = gens)
# predData <- preyData
# preyHeaders <- rep("", totalNumRuns)
# predHeaders <- preyHeaders
# # two-parameter study calls for nested for loops:
# count <- 1
# for ( i in 1:numRvals ){
# rval <- rvals[i]
# for ( j in 1:numMvals ) {
# mval <- mvals[j]
# results <- predPreyModel(r = rval, m = mval) # run the model
# preyData[,count] <- results[,"prey"] # store results
# predData[,count] <- results[,"pred"]
#
# # make data headers (column names):
# preyHeaders[count] <- paste("prey.r.", rval, ".m.", mval, sep = "")
# predHeaders[count] <- paste("pred.r.", rval, ".m.", mval, sep = "")
#
# # increment counter variable that keeps place in results arrays:
# count <- count + 1
# }
# }
# # assign column names:
# colnames(preyData) <- preyHeaders
# colnames(predData) <- predHeaders
# # create data object for writing to .csv:
# time <- 1:gens
# allData <- cbind(time, preyData, predData)
# setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/")
# mySafeWriteCSV(data = allData, namebase = "PredPreyStudyrAndm")
| /Datasets/L-V_Pred-PreyModel/predPreyModelParameterStudy.R | no_license | elmc2755/CompBio_on_git | R | false | false | 7,012 | r | predPreyModel <- function(gens = 1000, initPrey = 100,
initPred = 10, a = 0.01, r = 0.2,
m = 0.05, cc = 0.1) {
# Lotka-Volterra predator-prey model
# gens = total number of time steps
# initPrey and initPred are intial population sizes
# a = attack rate
# r = growth rate
# m = predator mortality rate
# cc = conversion constant (converting prey consumed into predator births)
prey <- rep(initPrey, gens) # pre-allocate array for results on prey abundance
pred <- rep(initPred, gens) # results on predator abundance
for ( i in 2:gens ) {
# main loop to iterate population dynamics
prey[i] <- prey[i-1] + r * prey[i-1] - a*pred[i-1]*prey[i-1]
if ( prey[i] < 0 ) { #check if prey are extinct
prey[i] <- 0
}
pred[i] <- pred[i-1] + cc*a*pred[i-1]*prey[i-1] - m*pred[i-1]
if ( pred[i] < 0 ) { # check if predators are extinct
pred[i] <- 0
}
}
return(cbind(prey,pred))
}
# a parameter study of the attack rate:
#unchanging parameters:
gens <- 1000
initPrey <- 100
initPred <- 10
r <- 0.2
m <- 0.05
cc <- 0.1
# the paramter that will be varied:
aRange <- seq(from = 0.001, to = 0.1, by = 0.001)
nReps <- length(aRange)
timeSteps <- 1000
preyData <- matrix(data = 0, nrow = timeSteps, ncol = nReps) # preallocate
predData <- preyData # preallocate
for ( i in 1:nReps ) {
aval <- aRange[i] # working value of attack rate
results <- predPreyModel(gens = gens, initPrey = initPrey,
initPred = initPred, a = aval, r = r, m = m,
cc = cc) # run the model
preyData[,i] <- results[,"prey"] # store results
predData[,i] <- results[,"pred"]
}
# write the data from the parameter study to a .csv
# besides a, we have additional parameters that aren't obvious from the data.
# initPrey and initPred are obvious from the data; they are the values present
# at time step 1. gens is also obvious: last step. That leaves r, m, and cc
rcol <- rep(r, nReps) # vector of r values
mcol <- rep(m, nReps) # vector of m values
cccol <- rep(cc, nReps) # vector of cc values
runIDs <- 1:nReps # vector of unique run identifiers
paramCols <- 5 # number of parameter columns; avoiding magic numbers
# pre-allocating objects for prey and predator data:
preyResultsMatrix <- matrix(data = 0, nrow = nReps, ncol = (paramCols + gens))
preyResultsMatrix[,1:paramCols] <- cbind(runIDs, rcol, mcol, cccol, aRange)
predResultsMatrix <- preyResultsMatrix
# need to transpose the data because the time series
# were in columns in the "preyData" and "predData" objects,
# but we need each time series to now be a row
preyResultsMatrix[, (paramCols+1):(paramCols + gens)] <- t(preyData)
predResultsMatrix[, (paramCols+1):(paramCols + gens)] <- t(predData)
# need column names for when we write to .csv:
myColNames <- c("runID", "r", "m", "cc", "a", paste(1:gens))
colnames(preyResultsMatrix) <- myColNames
colnames(predResultsMatrix) <- myColNames
# write the data to a .csv:
# make sure we're in the right directory:
setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/")
# make a sub-directory if it doesn't already exist:
if ( !dir.exists("AttackRateStudy") ) {
dir.create("AttackRateStudy")
}
# actually write the data:
setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/AttackRateStudy/")
source("../../../ExampleScripts/SafeFileWrite.R") # get the MySafeWriteCSV() function
mySafeWriteCSV(data = preyResultsMatrix, namebase = "PreyDataAttackRateStudy.csv")
mySafeWriteCSV(data = predResultsMatrix, namebase = "PredatorDataAttackRateStudy.csv")
# meta-scripting: make a script of parameter values:
myParamFile <- getUnusedFilename("parameters.R") # make sure the name is good
cat(paste("Writing parameters to: '", myParamFile, "'\n", sep = ""))
sink(myParamFile) # open the file for writing
# Add useful contextual information:
cat("# Source-able script of parameters used in Predator-Prey attack rate study\n")
cat(paste("# Produced on ", date(), "\n"))
# now write each of the parameters:
cat(paste("gens <- ", gens, "\n", sep = ""))
cat(paste("initPrey <- ", initPrey, "\n", sep = ""))
cat(paste("initPred <- ", initPred, "\n", sep = ""))
cat(paste("r <- ", r, "\n", sep = ""))
cat(paste("m <- ", m, "\n", sep = ""))
cat(paste("cc <- ", cc, "\n", sep = ""))
cat("aRange <- seq(from = 0.001, to = 0.1, by = 0.001)\n")
sink() # close the file
# alternate meta-script with more contextual info:
sink(myParamFile) # open the file for writing
# Add useful contextual information:
cat("# Source-able script of parameters used in Predator-Prey attack rate study\n")
cat(paste("# Produced on ", date(), "\n", sep = ""))
# now write each of the parameters:
cat(paste("gens <- ", gens, " # total generations\n", sep = ""))
cat(paste("initPrey <- ", initPrey, " # initial prey abundance at time step 1\n", sep = ""))
cat(paste("initPred <- ", initPred, " # initial predator abundance at time step 1\n", sep = ""))
cat(paste("r <- ", r, " # intrinsic growth rate of prey\n", sep = ""))
cat(paste("m <- ", m, " # intrinsic mortality rate of predators\n", sep = ""))
cat(paste("cc <- ", cc, " # conversion constant for prey consumed into predator births\n", sep = ""))
cat("aRange <- seq(from = 0.001, to = 0.1, by = 0.001) # range of attack rate values used\n")
sink() # close the file
############################################################################
# # how about a second parameter study, looking at variation in two parameters?
# # first parameter: growth rate:
# rvals <- seq(from = 0.01, to = 0.1, by = 0.01)
# numRvals <- length(rvals)
# #second parameter: predator mortality:
# mvals <- seq(from = 0.01, to = 0.1, by = 0.01)
# numMvals <- length(mvals)
# # other parameters:
# a <- 0.001
# cc <- 0.1
# initPrey <- 150
# initPred <- 50
# gens <- 1000
# # preallocate data structures:
# totalNumRuns <- numRvals * numMvals
# preyData <- matrix(data = 0, ncol = totalNumRuns, nrow = gens)
# predData <- preyData
# preyHeaders <- rep("", totalNumRuns)
# predHeaders <- preyHeaders
# # two-parameter study calls for nested for loops:
# count <- 1
# for ( i in 1:numRvals ){
# rval <- rvals[i]
# for ( j in 1:numMvals ) {
# mval <- mvals[j]
# results <- predPreyModel(r = rval, m = mval) # run the model
# preyData[,count] <- results[,"prey"] # store results
# predData[,count] <- results[,"pred"]
#
# # make data headers (column names):
# preyHeaders[count] <- paste("prey.r.", rval, ".m.", mval, sep = "")
# predHeaders[count] <- paste("pred.r.", rval, ".m.", mval, sep = "")
#
# # increment counter variable that keeps place in results arrays:
# count <- count + 1
# }
# }
# # assign column names:
# colnames(preyData) <- preyHeaders
# colnames(predData) <- predHeaders
# # create data object for writing to .csv:
# time <- 1:gens
# allData <- cbind(time, preyData, predData)
# setwd("~/compbio/CompBio_on_git/Models/L-V_Pred-PreyModel/")
# mySafeWriteCSV(data = allData, namebase = "PredPreyStudyrAndm")
|
# s.GBM3.R
# ::rtemis::
# 2015-8 Efstathios D. Gennatas egenn.github.io
#
# Notes: gbm.more currently fails with distribution "multinomial" due to a bug in gbm.
#' Gradient Boosting Machine [C, R, S]
#'
#' Train a GBM model using `gbm-developers/gbm3`
#'
#' Early stopping is implemented by fitting `n.trees` initially, checking the (smoothed) validation
#' error curve, and adding `n.new.trees` if needed, until error does not reduce or `max.trees` is
#' reached.
#' [gS] in the argument description indicates that multiple values can be passed, in
#' which case tuning will be performed using grid search. gS is supported for:
#' interaction.depth, shrinkage, bag.fraction, mFeatures, and n.minobsinnode
#' This function includes a workaround for when `gbm.fit` fails.
#' If an error is detected, `gbm.fit` is rerun until successful and the procedure continues normally
#' @inheritParams s.GLM
#' @inheritParams s.CART
#' @param n.trees Integer: Initial number of trees to fit
#' @param interaction.depth [gS] Integer: Interaction depth
#' @param shrinkage [gS] Float: Shrinkage (learning rate)
#' @param n.minobsinnode [gS] Integer: Minimum number of observation allowed in node
#' @param bag.fraction [gS] Float (0, 1): Fraction of cases to use to train each tree.
#' Helps avoid overfitting. Default = .75
#' @param mFeatures [gS] Integer: Number of features to randomly choose from all available features to train at each
#' step. Default = NULL which results in using all features.
#' @param save.res.mod Logical: If TRUE, save gbm model for each grid run. For diagnostic purposes only:
#' Object size adds up quickly
#' @param stratify.var If resampling is stratified, stratify against this variable. Defaults to outcome
#' @param outdir String: If defined, save log, 'plot.all' plots (see above) and RDS file of complete output
#' @param save.rds Logical: If outdir is defined, should all data be saved in RDS file? s.SVDnetGBM will save
#' mod.gbm, so no need to save again.
#' @param relInf Logical: If TRUE (Default), estimate variables' relative influence.
#' @param varImp Logical: If TRUE, estimate variable importance by permutation (as in random forests;
#' noted as experimental in gbm). Takes longer than (default) relative influence.
#' The two measures are highly correlated.
#' @author Efstathios D. Gennatas
#' @seealso [elevate] for external cross-validation
#' @family Supervised Learning
#' @family Tree-based methods
#' @family Ensembles
#' @export
s.GBM3 <- function(x, y = NULL,
x.test = NULL, y.test = NULL,
weights = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
upsample.seed = NULL,
distribution = NULL,
interaction.depth = 2,
shrinkage = .01,
bag.fraction = 0.9,
mFeatures = NULL,
n.minobsinnode = 5,
n.trees = 2000,
max.trees = 5000,
force.n.trees = NULL,
n.tree.window = 0,
gbm.select.smooth = TRUE,
smoother = c("loess", "supsmu"),
n.new.trees = 500,
min.trees = 50,
failsafe.trees = 1000,
imetrics = FALSE,
.gs = FALSE,
grid.resample.rtset = rtset.resample("kfold", 5),
grid.search.type = c("exhaustive", "randomized"),
grid.randomized.p = .1,
metric = NULL,
maximize = NULL,
plot.tune.error = FALSE,
exclude.test.lt.train = FALSE,
exclude.lt.min.trees = FALSE,
res.fail.thres = .99,
n.extra.trees = 0,
n.cores = rtCores,
gbm.cores = 1,
relInf = TRUE,
varImp = FALSE,
offset = NULL,
var.monotone = NULL,
keep.data = TRUE,
var.names = NULL,
response.name = "y",
group = NULL,
plot.perf = FALSE,
plot.res = ifelse(!is.null(outdir), TRUE, FALSE),
plot.fitted = NULL,
plot.predicted = NULL,
plotRelInf = FALSE,
plotVarImp = FALSE,
print.plot = TRUE,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
x.name = NULL, y.name = NULL,
question = NULL,
verbose = TRUE,
trace = 0,
grid.verbose = TRUE,
gbm.fit.verbose = FALSE,
outdir = NULL,
save.gridrun = FALSE,
save.error.diagnostics = FALSE,
save.rds = TRUE,
save.res = FALSE,
save.res.mod = FALSE,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ...) {
# [ INTRO ] ====
if (missing(x)) {
print(args(s.GBM3))
return(invisible(9))
}
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
logFile <- if (!is.null(outdir)) {
paste0(outdir, "/", sys.calls()[[1]][[1]], ".", format(Sys.time(), "%Y%m%d.%H%M%S"), ".log")
} else {
NULL
}
start.time <- intro(verbose = verbose, logFile = logFile)
mod.name <- "GBM3"
if (is.null(force.n.trees) && n.trees < min.trees) {
warning("You requested n.trees = ", n.trees, ", but specified min.trees = ", min.trees,
"\n I'll go ahead and specify n.trees = ", min.trees)
n.trees <- min.trees
}
# [ DEPENDENCIES ] ====
if (!depCheck("gbm3", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ ARGUMENTS ] ====
if (save.res.mod) save.res <- TRUE
if (is.null(x.name)) x.name <- getName(x, "x")
if (is.null(y.name)) y.name <- getName(y, "y")
if (!verbose) print.plot <- FALSE
verbose <- verbose | !is.null(logFile)
if (save.mod & is.null(outdir)) outdir <- paste0("./s.", mod.name)
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
# if (plot.res) {
# plot.res.outdir <- paste0(outdir, "/plotRes/")
# if (!dir.exists(plot.res.outdir)) dir.create(plot.res.outdir)
# }
if (n.trees > max.trees) {
if (verbose) msg("n.trees specified is greater than max.trees, setting n.trees to", max.trees)
n.trees <- max.trees
}
grid.search.type <- match.arg(grid.search.type)
smoother <- match.arg(smoother)
# [ DATA ] ====
dt <- dataPrepare(x, y, x.test, y.test,
ipw = ipw, ipw.type = ipw.type,
upsample = upsample, upsample.seed = upsample.seed,
verbose = verbose)
x <- dt$x
y <- dt$y
x.test <- dt$x.test
y.test <- dt$y.test
xnames <- dt$xnames
type <- dt$type
.weights <- if (is.null(weights) & ipw) dt$weights else weights
x0 <- if (upsample) dt$x0 else x
y0 <- if (upsample) dt$y0 else y
n.classes <- length(levels(y0))
if (type == "Classificationn" && n.classes != 2) stop("GBM3 only supports binary classification")
if (verbose) dataSummary(x, y, x.test, y.test, type)
if (print.plot) {
if (is.null(plot.fitted)) plot.fitted <- if (is.null(y.test)) TRUE else FALSE
if (is.null(plot.predicted)) plot.predicted <- if (!is.null(y.test)) TRUE else FALSE
} else {
plot.fitted <- plot.predicted <- FALSE
}
if (type == "Classification") nlevels <- length(levels(y))
if (is.null(distribution)) {
if (type == "Classification") {
distribution <- ifelse(length(levels(y)) > 2, "multinomial", "bernoulli")
} else if (type == "Survival") {
distribution <- "coxph"
} else {
distribution <- "gaussian"
}
}
# Keep original inputs (after dataPrepare)
.x <- x
.y <- y
.x.test <- x.test
.y.test <- y.test
# Name of loss function - in order to get the correct name for quantile regression
loss <- ifelse(length(distribution) == 1, distribution, as.character(distribution$name))
# For Bernoulli, convert to {0, 1}
if (loss == "bernoulli") {
.y <- as.integer(.y) - 1
.y.test <- as.integer(.y.test) - 1
}
if (verbose) msg("Running Gradient Boosting", type, "with a", loss[[1]], "loss function...", newline = TRUE)
# [ GRID SEARCH ] ====
if (is.null(metric)) {
if (type == "Classification") {
metric <- "Balanced Accuracy"
if (is.null(maximize)) maximize <- TRUE
} else if (type == "Regression") {
metric <- "MSE"
if (is.null(maximize)) maximize <- FALSE
}
}
.final <- FALSE
gc <- gridCheck(interaction.depth, shrinkage, bag.fraction, mFeatures, n.minobsinnode)
if (!.gs && (gc | is.null(force.n.trees))) {
gs <- gridSearchLearn(x = x0, y = y0,
mod = mod.name,
resample.rtset = grid.resample.rtset,
grid.params = list(interaction.depth = interaction.depth,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
n.minobsinnode = n.minobsinnode),
fixed.params = list(n.trees = n.trees,
max.trees = max.trees,
n.tree.window = n.tree.window,
gbm.select.smooth = gbm.select.smooth,
n.new.trees = n.new.trees,
min.trees = min.trees,
failsafe.trees = failsafe.trees,
ipw = ipw,
ipw.type = ipw.type,
upsample = upsample,
upsample.seed = upsample.seed,
relInf = FALSE,
plot.tune.error = plot.tune.error,
.gs = TRUE),
search.type = grid.search.type,
weights = weights,
metric = metric,
maximize = maximize,
save.mod = save.gridrun,
verbose = verbose,
grid.verbose = grid.verbose,
n.cores = n.cores)
interaction.depth <- gs$best.tune$interaction.depth
shrinkage <- gs$best.tune$shrinkage
bag.fraction <- gs$best.tune$bag.fraction
mFeatures <- gs$best.tune$mFeatures
n.minobsinnode <- gs$best.tune$n.minobsinnode
n.trees <- gs$best.tune$n.trees
if (n.trees == -1) {
warning("Tuning failed to find n.trees, defaulting to failsafe.trees = ", failsafe.trees)
n.trees <- failsafe.trees
}
if (n.trees < min.trees) {
warning("Tuning returned ", n.trees, " trees; using min.trees = ", min.trees, " instead")
n.trees <- min.trees
}
# Now ready to train final full model
.final <- TRUE
.gs <- FALSE
} else {
gs <- NULL
}
if (!is.null(force.n.trees)) n.trees <- force.n.trees
parameters <- list(n.trees = n.trees,
interaction.depth = interaction.depth,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
n.minobsinnode = n.minobsinnode,
weights = .weights)
if (verbose) {
parameterSummary(n.trees,
interaction.depth,
shrinkage,
bag.fraction,
mFeatures,
n.minobsinnode,
weights)
}
# [ GBM3 ] ====
if (!is.null(logFile)) sink() # pause writing to log
# If we are in .gs, rbind train and test to get perf to tune n.trees
# .xtrain and .ytrain to allow diff b/n .gs and full model
if (.gs) {
.x.train <- rbind(.x, .x.test) # will be split to train/test by nTrain
.y.train <- c(.y, .y.test)
} else {
### Fit the final model on the whole internal set using the optimal n of trees estimated above
# inlc.hack to check model is good: add small valid set to see if valid.error gets estimated
.x.train <- rbind(.x, .x[1, , drop = FALSE])
.y.train <- c(.y, .y[1])
if (verbose) msg("Training GBM3 on full training set...", newline = TRUE)
}
mod <- gbm3::gbm.fit(x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
while (all(is.na(mod$valid.error))) {
msg("### Caught gbm.fit error; retrying... ###")
if (save.error.diagnostics) {
saveRDS(list(mod = mod,
x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group),
paste0("~/Desktop/s.GBM3.panic.rds"))
}
warning("Caught gbm.fit error: retraining last model and continuing")
if (!is.null(logFile)) sink() # pause logging
mod <- gbm3::gbm.fit(x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
}
# If we are in .gs, use the best n.trees to get fitted and predicted values,
# error.train, and error.test.
if (.gs) {
gst <- gbm3.select.trees(mod,
smooth = gbm.select.smooth,
plot = plot.tune.error,
verbose = verbose)
n.trees <- gst$n.trees
valid.error.smooth <- gst$valid.error.smooth
if (plot.tune.error) mplot3.xy(seq(valid.error.smooth),
list(Training = mod$train.error,
Validation = mod$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(mod$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
if (trace > 0) msg("### n.trees is", n.trees)
while (n.trees >= (mod$params$num_trees - n.tree.window) & mod$params$num_trees < max.trees) {
n.new.trees <- min(n.new.trees, max.trees - mod$params$num_trees)
if (verbose) msg("Adding", n.new.trees, "more trees to trained GBM model...",
"\n * current mod$params$num_trees =", mod$params$num_trees,
"\n * best n.trees = ", n.trees,
"\n * max.trees =", max.trees)
mod <- gbm3::gbm_more(mod, num_new_trees = n.new.trees, is_verbose = gbm.fit.verbose)
# CHECK: does this need to be checked the same way as mod above?
gst <- gbm3.select.trees(mod,
smooth = gbm.select.smooth,
smoother = smoother,
plot = plot.tune.error,
verbose = verbose)
n.trees <- gst$n.trees
valid.error.smooth <- gst$valid.error.smooth
if (plot.tune.error) mplot3.xy(seq(valid.error.smooth),
list(Training = mod$train.error,
Validation = mod$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(mod$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
}
if (n.trees == max.trees & verbose) msg("Reached max.trees =", max.trees)
}
# [ FITTED ] ====
fitted.prob <- NULL
if (type == "Regression" | type == "Survival") {
if (distribution == "poisson") {
fitted <- predict(mod, .x, n.trees = n.trees, type = "response")
} else {
fitted <- predict(mod, .x, n.trees = n.trees)
}
} else {
if (distribution == "multinomial") {
# Get probabilities per class
fitted.prob <- fitted <- predict(mod, .x, n.trees = n.trees, type = "response")
fitted <- apply(fitted, 1, function(x) levels(y)[which.max(x)])
} else {
# Bernoulli: convert {0, 1} back to factor
fitted.prob <- 1 - predict(mod, .x, n.trees = n.trees, type = "response")
fitted <- factor(ifelse(fitted.prob >= .5, 1, 0), levels = c(1, 0))
levels(fitted) <- levels(y)
}
}
error.train <- modError(y, fitted, fitted.prob)
if (verbose) errorSummary(error.train, mod.name)
### Relative influence & variable importance
# Estimating rel inf takes time, var imp even more so.
# Do not estimate unless you need them.
mod.summary.rel <- NULL
if (relInf) {
if (verbose) msg("Calculating relative influence of variables...")
mod.summary.rel <- summary(mod, plot_it = plotRelInf,
order = FALSE, method = gbm3::relative_influence)
if (plotRelInf) mtext(paste0(y.name, " ~ ", x.name, " GBM relative influence"), padj = -2)
}
mod.summary.perm <- NULL
if (varImp) {
if (verbose) msg("Calculating variable importance by permutation testing...")
# similar to random forests (stated as experimental)
mod.summary.perm <- summary(mod, plotit = plotVarImp,
order = FALSE, method = gbm3::permutation_relative_influence)
if (plotVarImp) mtext(paste0(y.name, " ~ ", x.name,
" GBM permutation-based variable importance"), padj = -2)
}
# [ PREDICTED ] ====
predicted.prob <- predicted <- error.test <- NULL
if (!is.null(.x.test)) {
if (type == "Regression" | type == "Survival") {
if (distribution == "poisson") {
if (trace > 0) msg("Using predict for Poisson Regression with type = response")
predicted <- predict(mod, x.test, n.trees = n.trees, type = "response")
} else {
if (verbose) msg("Using predict for", type, "with type = link")
predicted <- predict(mod, x.test, n.trees = n.trees)
}
} else {
if (distribution == "multinomial") {
if (trace > 0) msg("Using predict for multinomial classification with type = response")
# Get probabilities per class
predicted.prob <- predicted <- predict(mod, x.test, n.trees = n.trees, type = "response")
# Now get the predicted classes
predicted <- apply(predicted, 1, function(x) levels(y.test)[which.max(x)])
} else {
# Bernoulli: convert {0, 1} back to factor
predicted.prob <- 1 - predict(mod, x.test, n.trees = n.trees, type = "response")
predicted <- factor(ifelse(predicted.prob >= .5, 1, 0), levels = c(1, 0))
levels(predicted) <- levels(y)
}
}
if (!is.null(y.test)) {
error.test <- modError(y.test, predicted, predicted.prob)
if (verbose) errorSummary(error.test, mod.name)
}
}
# [ OUTRO ] ====
extra <- list(gridSearch = gs,
mod.summary.rel = mod.summary.rel,
mod.summary.perm = mod.summary.perm)
if (imetrics) {
extra$imetrics <- list(n.trees = n.trees,
depth = interaction.depth,
n.nodes = (2 ^ interaction.depth) * n.trees)
}
rt <- rtModSet(rtclass = "rtMod",
mod = mod,
mod.name = mod.name,
type = type,
parameters = parameters,
y.train = y,
y.test = y.test,
x.name = x.name,
y.name = y.name,
xnames = xnames,
fitted = fitted,
fitted.prob = fitted.prob,
se.fit = NULL,
error.train = error.train,
predicted = predicted,
predicted.prob = predicted.prob,
se.prediction = NULL,
error.test = error.test,
varimp = if (!is.null(mod.summary.rel)) mod.summary.rel[, 2, drop = FALSE] else NULL,
question = question,
extra = extra)
rtMod.out(rt,
print.plot,
plot.fitted,
plot.predicted,
y.test,
mod.name,
outdir,
save.mod,
verbose,
plot.theme)
outro(start.time, verbose = verbose, sinkOff = ifelse(is.null(logFile), FALSE, TRUE))
rt
} # rtemis::s.GBM
gbm3.select.trees <- function(object,
smooth = TRUE,
smoother = "loess",
plot = FALSE,
verbose = FALSE) {
n.trees <- object$params$num_trees
# if (smooth) {
# dat <- data.frame(n.trees = seq(n.trees), valid.error = object$valid.error)
# dat <- complete.cases(dat)
# }
valid.error.smooth <- if (smooth) {
if (smoother == "loess") {
valid.error.smooth <- loess(object$valid.error ~ seq(n.trees))$fitted
} else {
valid.error.smooth <- supsmu(seq(n.trees), object$valid.error)$y
}
} else {
NULL
}
valid.error <- if (smooth) valid.error.smooth else object$valid.error
if (plot) mplot3.xy(seq(n.trees), list(Training = object$train.error,
Validation = object$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(object$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
list(n.trees = which.min(valid.error),
valid.error.smooth = valid.error.smooth)
} # rtemis::gbm3.select.trees
| /R/s.GBM3.R | no_license | muschellij2/rtemis | R | false | false | 24,962 | r | # s.GBM3.R
# ::rtemis::
# 2015-8 Efstathios D. Gennatas egenn.github.io
#
# Notes: gbm.more currently fails with distribution "multinomial" due to a bug in gbm.
#' Gradient Boosting Machine [C, R, S]
#'
#' Train a GBM model using `gbm-developers/gbm3`
#'
#' Early stopping is implemented by fitting `n.trees` initially, checking the (smoothed) validation
#' error curve, and adding `n.new.trees` if needed, until error does not reduce or `max.trees` is
#' reached.
#' [gS] in the argument description indicates that multiple values can be passed, in
#' which case tuning will be performed using grid search. gS is supported for:
#' interaction.depth, shrinkage, bag.fraction, mFeatures, and n.minobsinnode
#' This function includes a workaround for when `gbm.fit` fails.
#' If an error is detected, `gbm.fit` is rerun until successful and the procedure continues normally
#' @inheritParams s.GLM
#' @inheritParams s.CART
#' @param n.trees Integer: Initial number of trees to fit
#' @param interaction.depth [gS] Integer: Interaction depth
#' @param shrinkage [gS] Float: Shrinkage (learning rate)
#' @param n.minobsinnode [gS] Integer: Minimum number of observation allowed in node
#' @param bag.fraction [gS] Float (0, 1): Fraction of cases to use to train each tree.
#' Helps avoid overfitting. Default = .75
#' @param mFeatures [gS] Integer: Number of features to randomly choose from all available features to train at each
#' step. Default = NULL which results in using all features.
#' @param save.res.mod Logical: If TRUE, save gbm model for each grid run. For diagnostic purposes only:
#' Object size adds up quickly
#' @param stratify.var If resampling is stratified, stratify against this variable. Defaults to outcome
#' @param outdir String: If defined, save log, 'plot.all' plots (see above) and RDS file of complete output
#' @param save.rds Logical: If outdir is defined, should all data be saved in RDS file? s.SVDnetGBM will save
#' mod.gbm, so no need to save again.
#' @param relInf Logical: If TRUE (Default), estimate variables' relative influence.
#' @param varImp Logical: If TRUE, estimate variable importance by permutation (as in random forests;
#' noted as experimental in gbm). Takes longer than (default) relative influence.
#' The two measures are highly correlated.
#' @author Efstathios D. Gennatas
#' @seealso [elevate] for external cross-validation
#' @family Supervised Learning
#' @family Tree-based methods
#' @family Ensembles
#' @export
s.GBM3 <- function(x, y = NULL,
x.test = NULL, y.test = NULL,
weights = NULL,
ipw = TRUE,
ipw.type = 2,
upsample = FALSE,
upsample.seed = NULL,
distribution = NULL,
interaction.depth = 2,
shrinkage = .01,
bag.fraction = 0.9,
mFeatures = NULL,
n.minobsinnode = 5,
n.trees = 2000,
max.trees = 5000,
force.n.trees = NULL,
n.tree.window = 0,
gbm.select.smooth = TRUE,
smoother = c("loess", "supsmu"),
n.new.trees = 500,
min.trees = 50,
failsafe.trees = 1000,
imetrics = FALSE,
.gs = FALSE,
grid.resample.rtset = rtset.resample("kfold", 5),
grid.search.type = c("exhaustive", "randomized"),
grid.randomized.p = .1,
metric = NULL,
maximize = NULL,
plot.tune.error = FALSE,
exclude.test.lt.train = FALSE,
exclude.lt.min.trees = FALSE,
res.fail.thres = .99,
n.extra.trees = 0,
n.cores = rtCores,
gbm.cores = 1,
relInf = TRUE,
varImp = FALSE,
offset = NULL,
var.monotone = NULL,
keep.data = TRUE,
var.names = NULL,
response.name = "y",
group = NULL,
plot.perf = FALSE,
plot.res = ifelse(!is.null(outdir), TRUE, FALSE),
plot.fitted = NULL,
plot.predicted = NULL,
plotRelInf = FALSE,
plotVarImp = FALSE,
print.plot = TRUE,
plot.theme = getOption("rt.fit.theme", "lightgrid"),
x.name = NULL, y.name = NULL,
question = NULL,
verbose = TRUE,
trace = 0,
grid.verbose = TRUE,
gbm.fit.verbose = FALSE,
outdir = NULL,
save.gridrun = FALSE,
save.error.diagnostics = FALSE,
save.rds = TRUE,
save.res = FALSE,
save.res.mod = FALSE,
save.mod = ifelse(!is.null(outdir), TRUE, FALSE), ...) {
# [ INTRO ] ====
if (missing(x)) {
print(args(s.GBM3))
return(invisible(9))
}
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
logFile <- if (!is.null(outdir)) {
paste0(outdir, "/", sys.calls()[[1]][[1]], ".", format(Sys.time(), "%Y%m%d.%H%M%S"), ".log")
} else {
NULL
}
start.time <- intro(verbose = verbose, logFile = logFile)
mod.name <- "GBM3"
if (is.null(force.n.trees) && n.trees < min.trees) {
warning("You requested n.trees = ", n.trees, ", but specified min.trees = ", min.trees,
"\n I'll go ahead and specify n.trees = ", min.trees)
n.trees <- min.trees
}
# [ DEPENDENCIES ] ====
if (!depCheck("gbm3", verbose = FALSE)) {
cat("\n"); stop("Please install dependencies and try again")
}
# [ ARGUMENTS ] ====
if (save.res.mod) save.res <- TRUE
if (is.null(x.name)) x.name <- getName(x, "x")
if (is.null(y.name)) y.name <- getName(y, "y")
if (!verbose) print.plot <- FALSE
verbose <- verbose | !is.null(logFile)
if (save.mod & is.null(outdir)) outdir <- paste0("./s.", mod.name)
if (!is.null(outdir)) outdir <- paste0(normalizePath(outdir, mustWork = FALSE), "/")
# if (plot.res) {
# plot.res.outdir <- paste0(outdir, "/plotRes/")
# if (!dir.exists(plot.res.outdir)) dir.create(plot.res.outdir)
# }
if (n.trees > max.trees) {
if (verbose) msg("n.trees specified is greater than max.trees, setting n.trees to", max.trees)
n.trees <- max.trees
}
grid.search.type <- match.arg(grid.search.type)
smoother <- match.arg(smoother)
# [ DATA ] ====
dt <- dataPrepare(x, y, x.test, y.test,
ipw = ipw, ipw.type = ipw.type,
upsample = upsample, upsample.seed = upsample.seed,
verbose = verbose)
x <- dt$x
y <- dt$y
x.test <- dt$x.test
y.test <- dt$y.test
xnames <- dt$xnames
type <- dt$type
.weights <- if (is.null(weights) & ipw) dt$weights else weights
x0 <- if (upsample) dt$x0 else x
y0 <- if (upsample) dt$y0 else y
n.classes <- length(levels(y0))
if (type == "Classificationn" && n.classes != 2) stop("GBM3 only supports binary classification")
if (verbose) dataSummary(x, y, x.test, y.test, type)
if (print.plot) {
if (is.null(plot.fitted)) plot.fitted <- if (is.null(y.test)) TRUE else FALSE
if (is.null(plot.predicted)) plot.predicted <- if (!is.null(y.test)) TRUE else FALSE
} else {
plot.fitted <- plot.predicted <- FALSE
}
if (type == "Classification") nlevels <- length(levels(y))
if (is.null(distribution)) {
if (type == "Classification") {
distribution <- ifelse(length(levels(y)) > 2, "multinomial", "bernoulli")
} else if (type == "Survival") {
distribution <- "coxph"
} else {
distribution <- "gaussian"
}
}
# Keep original inputs (after dataPrepare)
.x <- x
.y <- y
.x.test <- x.test
.y.test <- y.test
# Name of loss function - in order to get the correct name for quantile regression
loss <- ifelse(length(distribution) == 1, distribution, as.character(distribution$name))
# For Bernoulli, convert to {0, 1}
if (loss == "bernoulli") {
.y <- as.integer(.y) - 1
.y.test <- as.integer(.y.test) - 1
}
if (verbose) msg("Running Gradient Boosting", type, "with a", loss[[1]], "loss function...", newline = TRUE)
# [ GRID SEARCH ] ====
if (is.null(metric)) {
if (type == "Classification") {
metric <- "Balanced Accuracy"
if (is.null(maximize)) maximize <- TRUE
} else if (type == "Regression") {
metric <- "MSE"
if (is.null(maximize)) maximize <- FALSE
}
}
.final <- FALSE
gc <- gridCheck(interaction.depth, shrinkage, bag.fraction, mFeatures, n.minobsinnode)
if (!.gs && (gc | is.null(force.n.trees))) {
gs <- gridSearchLearn(x = x0, y = y0,
mod = mod.name,
resample.rtset = grid.resample.rtset,
grid.params = list(interaction.depth = interaction.depth,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
n.minobsinnode = n.minobsinnode),
fixed.params = list(n.trees = n.trees,
max.trees = max.trees,
n.tree.window = n.tree.window,
gbm.select.smooth = gbm.select.smooth,
n.new.trees = n.new.trees,
min.trees = min.trees,
failsafe.trees = failsafe.trees,
ipw = ipw,
ipw.type = ipw.type,
upsample = upsample,
upsample.seed = upsample.seed,
relInf = FALSE,
plot.tune.error = plot.tune.error,
.gs = TRUE),
search.type = grid.search.type,
weights = weights,
metric = metric,
maximize = maximize,
save.mod = save.gridrun,
verbose = verbose,
grid.verbose = grid.verbose,
n.cores = n.cores)
interaction.depth <- gs$best.tune$interaction.depth
shrinkage <- gs$best.tune$shrinkage
bag.fraction <- gs$best.tune$bag.fraction
mFeatures <- gs$best.tune$mFeatures
n.minobsinnode <- gs$best.tune$n.minobsinnode
n.trees <- gs$best.tune$n.trees
if (n.trees == -1) {
warning("Tuning failed to find n.trees, defaulting to failsafe.trees = ", failsafe.trees)
n.trees <- failsafe.trees
}
if (n.trees < min.trees) {
warning("Tuning returned ", n.trees, " trees; using min.trees = ", min.trees, " instead")
n.trees <- min.trees
}
# Now ready to train final full model
.final <- TRUE
.gs <- FALSE
} else {
gs <- NULL
}
if (!is.null(force.n.trees)) n.trees <- force.n.trees
parameters <- list(n.trees = n.trees,
interaction.depth = interaction.depth,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
n.minobsinnode = n.minobsinnode,
weights = .weights)
if (verbose) {
parameterSummary(n.trees,
interaction.depth,
shrinkage,
bag.fraction,
mFeatures,
n.minobsinnode,
weights)
}
# [ GBM3 ] ====
if (!is.null(logFile)) sink() # pause writing to log
# If we are in .gs, rbind train and test to get perf to tune n.trees
# .xtrain and .ytrain to allow diff b/n .gs and full model
if (.gs) {
.x.train <- rbind(.x, .x.test) # will be split to train/test by nTrain
.y.train <- c(.y, .y.test)
} else {
### Fit the final model on the whole internal set using the optimal n of trees estimated above
# inlc.hack to check model is good: add small valid set to see if valid.error gets estimated
.x.train <- rbind(.x, .x[1, , drop = FALSE])
.y.train <- c(.y, .y[1])
if (verbose) msg("Training GBM3 on full training set...", newline = TRUE)
}
mod <- gbm3::gbm.fit(x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
while (all(is.na(mod$valid.error))) {
msg("### Caught gbm.fit error; retrying... ###")
if (save.error.diagnostics) {
saveRDS(list(mod = mod,
x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group),
paste0("~/Desktop/s.GBM3.panic.rds"))
}
warning("Caught gbm.fit error: retraining last model and continuing")
if (!is.null(logFile)) sink() # pause logging
mod <- gbm3::gbm.fit(x = .x.train, y = .y.train,
offset = offset,
distribution = distribution,
w = .weights,
var.monotone = var.monotone,
n.trees = n.trees,
interaction.depth = interaction.depth,
n.minobsinnode = n.minobsinnode,
shrinkage = shrinkage,
bag.fraction = bag.fraction,
mFeatures = mFeatures,
nTrain = NROW(.x),
keep.data = keep.data,
verbose = gbm.fit.verbose,
var.names = var.names,
response.name = response.name,
group = group)
if (!is.null(logFile)) sink(logFile, append = TRUE, split = verbose) # Resume writing to log
}
# If we are in .gs, use the best n.trees to get fitted and predicted values,
# error.train, and error.test.
if (.gs) {
gst <- gbm3.select.trees(mod,
smooth = gbm.select.smooth,
plot = plot.tune.error,
verbose = verbose)
n.trees <- gst$n.trees
valid.error.smooth <- gst$valid.error.smooth
if (plot.tune.error) mplot3.xy(seq(valid.error.smooth),
list(Training = mod$train.error,
Validation = mod$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(mod$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
if (trace > 0) msg("### n.trees is", n.trees)
while (n.trees >= (mod$params$num_trees - n.tree.window) & mod$params$num_trees < max.trees) {
n.new.trees <- min(n.new.trees, max.trees - mod$params$num_trees)
if (verbose) msg("Adding", n.new.trees, "more trees to trained GBM model...",
"\n * current mod$params$num_trees =", mod$params$num_trees,
"\n * best n.trees = ", n.trees,
"\n * max.trees =", max.trees)
mod <- gbm3::gbm_more(mod, num_new_trees = n.new.trees, is_verbose = gbm.fit.verbose)
# CHECK: does this need to be checked the same way as mod above?
gst <- gbm3.select.trees(mod,
smooth = gbm.select.smooth,
smoother = smoother,
plot = plot.tune.error,
verbose = verbose)
n.trees <- gst$n.trees
valid.error.smooth <- gst$valid.error.smooth
if (plot.tune.error) mplot3.xy(seq(valid.error.smooth),
list(Training = mod$train.error,
Validation = mod$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(mod$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
}
if (n.trees == max.trees & verbose) msg("Reached max.trees =", max.trees)
}
# [ FITTED ] ====
fitted.prob <- NULL
if (type == "Regression" | type == "Survival") {
if (distribution == "poisson") {
fitted <- predict(mod, .x, n.trees = n.trees, type = "response")
} else {
fitted <- predict(mod, .x, n.trees = n.trees)
}
} else {
if (distribution == "multinomial") {
# Get probabilities per class
fitted.prob <- fitted <- predict(mod, .x, n.trees = n.trees, type = "response")
fitted <- apply(fitted, 1, function(x) levels(y)[which.max(x)])
} else {
# Bernoulli: convert {0, 1} back to factor
fitted.prob <- 1 - predict(mod, .x, n.trees = n.trees, type = "response")
fitted <- factor(ifelse(fitted.prob >= .5, 1, 0), levels = c(1, 0))
levels(fitted) <- levels(y)
}
}
error.train <- modError(y, fitted, fitted.prob)
if (verbose) errorSummary(error.train, mod.name)
### Relative influence & variable importance
# Estimating rel inf takes time, var imp even more so.
# Do not estimate unless you need them.
mod.summary.rel <- NULL
if (relInf) {
if (verbose) msg("Calculating relative influence of variables...")
mod.summary.rel <- summary(mod, plot_it = plotRelInf,
order = FALSE, method = gbm3::relative_influence)
if (plotRelInf) mtext(paste0(y.name, " ~ ", x.name, " GBM relative influence"), padj = -2)
}
mod.summary.perm <- NULL
if (varImp) {
if (verbose) msg("Calculating variable importance by permutation testing...")
# similar to random forests (stated as experimental)
mod.summary.perm <- summary(mod, plotit = plotVarImp,
order = FALSE, method = gbm3::permutation_relative_influence)
if (plotVarImp) mtext(paste0(y.name, " ~ ", x.name,
" GBM permutation-based variable importance"), padj = -2)
}
# [ PREDICTED ] ====
predicted.prob <- predicted <- error.test <- NULL
if (!is.null(.x.test)) {
if (type == "Regression" | type == "Survival") {
if (distribution == "poisson") {
if (trace > 0) msg("Using predict for Poisson Regression with type = response")
predicted <- predict(mod, x.test, n.trees = n.trees, type = "response")
} else {
if (verbose) msg("Using predict for", type, "with type = link")
predicted <- predict(mod, x.test, n.trees = n.trees)
}
} else {
if (distribution == "multinomial") {
if (trace > 0) msg("Using predict for multinomial classification with type = response")
# Get probabilities per class
predicted.prob <- predicted <- predict(mod, x.test, n.trees = n.trees, type = "response")
# Now get the predicted classes
predicted <- apply(predicted, 1, function(x) levels(y.test)[which.max(x)])
} else {
# Bernoulli: convert {0, 1} back to factor
predicted.prob <- 1 - predict(mod, x.test, n.trees = n.trees, type = "response")
predicted <- factor(ifelse(predicted.prob >= .5, 1, 0), levels = c(1, 0))
levels(predicted) <- levels(y)
}
}
if (!is.null(y.test)) {
error.test <- modError(y.test, predicted, predicted.prob)
if (verbose) errorSummary(error.test, mod.name)
}
}
# [ OUTRO ] ====
extra <- list(gridSearch = gs,
mod.summary.rel = mod.summary.rel,
mod.summary.perm = mod.summary.perm)
if (imetrics) {
extra$imetrics <- list(n.trees = n.trees,
depth = interaction.depth,
n.nodes = (2 ^ interaction.depth) * n.trees)
}
rt <- rtModSet(rtclass = "rtMod",
mod = mod,
mod.name = mod.name,
type = type,
parameters = parameters,
y.train = y,
y.test = y.test,
x.name = x.name,
y.name = y.name,
xnames = xnames,
fitted = fitted,
fitted.prob = fitted.prob,
se.fit = NULL,
error.train = error.train,
predicted = predicted,
predicted.prob = predicted.prob,
se.prediction = NULL,
error.test = error.test,
varimp = if (!is.null(mod.summary.rel)) mod.summary.rel[, 2, drop = FALSE] else NULL,
question = question,
extra = extra)
rtMod.out(rt,
print.plot,
plot.fitted,
plot.predicted,
y.test,
mod.name,
outdir,
save.mod,
verbose,
plot.theme)
outro(start.time, verbose = verbose, sinkOff = ifelse(is.null(logFile), FALSE, TRUE))
rt
} # rtemis::s.GBM
gbm3.select.trees <- function(object,
smooth = TRUE,
smoother = "loess",
plot = FALSE,
verbose = FALSE) {
n.trees <- object$params$num_trees
# if (smooth) {
# dat <- data.frame(n.trees = seq(n.trees), valid.error = object$valid.error)
# dat <- complete.cases(dat)
# }
valid.error.smooth <- if (smooth) {
if (smoother == "loess") {
valid.error.smooth <- loess(object$valid.error ~ seq(n.trees))$fitted
} else {
valid.error.smooth <- supsmu(seq(n.trees), object$valid.error)$y
}
} else {
NULL
}
valid.error <- if (smooth) valid.error.smooth else object$valid.error
if (plot) mplot3.xy(seq(n.trees), list(Training = object$train.error,
Validation = object$valid.error,
`Smoothed Validation` = valid.error.smooth),
type = 'l', group.adj = .95,
line.col = c(ucsfCol$teal, ucsfCol$red, ucsfCol$purple),
vline = c(which.min(object$valid.error), which.min(valid.error.smooth)),
vline.col = c(ucsfCol$red, ucsfCol$purple),
xlab = "N trees", ylab = "Loss")
list(n.trees = which.min(valid.error),
valid.error.smooth = valid.error.smooth)
} # rtemis::gbm3.select.trees
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.docdb_operations.R
\name{describe_db_cluster_parameter_groups}
\alias{describe_db_cluster_parameter_groups}
\title{Returns a list of DBClusterParameterGroup descriptions}
\usage{
describe_db_cluster_parameter_groups(DBClusterParameterGroupName = NULL,
Filters = NULL, MaxRecords = NULL, Marker = NULL)
}
\arguments{
\item{DBClusterParameterGroupName}{The name of a specific DB cluster parameter group to return details for.
Constraints:
\itemize{
\item If provided, must match the name of an existing \code{DBClusterParameterGroup}.
}}
\item{Filters}{This parameter is not currently supported.}
\item{MaxRecords}{The maximum number of records to include in the response. If more records exist than the specified \code{MaxRecords} value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.}
\item{Marker}{An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by \code{MaxRecords}.}
}
\description{
Returns a list of \code{DBClusterParameterGroup} descriptions. If a \code{DBClusterParameterGroupName} parameter is specified, the list contains only the description of the specified DB cluster parameter group.
}
\section{Accepted Parameters}{
\preformatted{describe_db_cluster_parameter_groups(
DBClusterParameterGroupName = "string",
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxRecords = 123,
Marker = "string"
)
}
}
| /service/paws.docdb/man/describe_db_cluster_parameter_groups.Rd | permissive | CR-Mercado/paws | R | false | true | 1,703 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/paws.docdb_operations.R
\name{describe_db_cluster_parameter_groups}
\alias{describe_db_cluster_parameter_groups}
\title{Returns a list of DBClusterParameterGroup descriptions}
\usage{
describe_db_cluster_parameter_groups(DBClusterParameterGroupName = NULL,
Filters = NULL, MaxRecords = NULL, Marker = NULL)
}
\arguments{
\item{DBClusterParameterGroupName}{The name of a specific DB cluster parameter group to return details for.
Constraints:
\itemize{
\item If provided, must match the name of an existing \code{DBClusterParameterGroup}.
}}
\item{Filters}{This parameter is not currently supported.}
\item{MaxRecords}{The maximum number of records to include in the response. If more records exist than the specified \code{MaxRecords} value, a pagination token (marker) is included in the response so that the remaining results can be retrieved.
Default: 100
Constraints: Minimum 20, maximum 100.}
\item{Marker}{An optional pagination token provided by a previous request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by \code{MaxRecords}.}
}
\description{
Returns a list of \code{DBClusterParameterGroup} descriptions. If a \code{DBClusterParameterGroupName} parameter is specified, the list contains only the description of the specified DB cluster parameter group.
}
\section{Accepted Parameters}{
\preformatted{describe_db_cluster_parameter_groups(
DBClusterParameterGroupName = "string",
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxRecords = 123,
Marker = "string"
)
}
}
|
## Put comments here that give an overall description of what your
## functions do
## Creates a 'cached' version of a matrix, whose inverse will be
## stored between requests as long as the matrix is unchanged.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() { x }
set <- function(y) {
x <<- y
inv <<- NULL
}
getinv <- function() { inv }
setinv <- function(i) { inv <<- i }
list(get = get, set = set, getinv = getinv, setinv = setinv)
}
## Returns a cached matrix's inverse, recomputing it if necessary.
cacheSolve <- function(x, ...) {
i <- x$getinv()
if (is.null(i)) {
xmat <- x$get()
i <- solve(xmat, ...)
x$setinv(i)
} else {
message("getting cached inverse")
}
i
}
| /cachematrix.R | no_license | unlimitedbladeworks/ProgrammingAssignment2 | R | false | false | 798 | r | ## Put comments here that give an overall description of what your
## functions do
## Creates a 'cached' version of a matrix, whose inverse will be
## stored between requests as long as the matrix is unchanged.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
get <- function() { x }
set <- function(y) {
x <<- y
inv <<- NULL
}
getinv <- function() { inv }
setinv <- function(i) { inv <<- i }
list(get = get, set = set, getinv = getinv, setinv = setinv)
}
## Returns a cached matrix's inverse, recomputing it if necessary.
cacheSolve <- function(x, ...) {
i <- x$getinv()
if (is.null(i)) {
xmat <- x$get()
i <- solve(xmat, ...)
x$setinv(i)
} else {
message("getting cached inverse")
}
i
}
|
libraryUI <- function(id) {
ns <- NS(id)
columns(
column(
width = 4,
card(
header = navInput(
appearance = "tabs",
id = ns("nav"),
choices = c("Filters", "Instructions"),
values = c("filters", "instructions"),
selected = "filters"
),
navContent(
navPane(
id = ns("pane_filters"),
p(
"Type or paste gene symbols in the text box below to generate a downloadable table of drugs targetting those genes.",
"One gene per line."
),
formInput(
id = ns("gene_form"),
formGroup(
label = "Find ligands for gene symbols",
input = shiny::textAreaInput(
inputId = ns("gene_list"),
label = NULL,
rows = 5
),
help = div(
"This tool uses HUGO symbols Please see",
tags$a(
target = "_blank", href = "https://genenames.org",
"genenames.org"
),
"for help."
)
),
formGroup(
label = "Example gene lists",
input = {
sel <- selectInput(
id = ns("gene_example"),
choices = names(data_genes), # data/load.R
selected = "Dark_Kinome"
)
sel$children[[1]]$attribs$placeholder <- "Dark_Kinome"
sel
},
help = "Selecting a choice will populate the input above with an example list of genes."
),
formSubmit(
label = "Submit"
) %>%
background("orange"),
actionButton(
ns("reset_gene_list"),
"Clear list",
icon = icon("redo"),
onclick = glue("$('#{ns('gene_list')}')[0].value = null;")
) %>%
background("orange")
) %>%
margin(bottom = 3),
navContent(
navPane(
id = ns("pane_results"),
p(
shiny::textOutput(
outputId = ns("gene_targets"),
inline = TRUE
),
linkInput(
id = ns("gene_unknowns"),
label = shiny::icon("exclamation-circle")
) %>%
font(color = "orange")
) %>%
display("flex") %>%
flex(justify = "between", align = "center") %>%
margin(top = 4, bottom = 4) %>%
font(size = "lg"),
formGroup(
label = tags$h6("Selectivity levels") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_probes"),
choices = c("Most selective", "Semi-selective", "Poly-selective", "Unknown"),
# values = c("most_selective", "semi_selective", "poly_selective", "unknown_selective"),
selected = "Most selective"
) %>%
active("orange"),
help = "Choose the selectivity levels for which you want chemical probes to be included in the library."
),
formGroup(
label = tags$h6("Clinical phases") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_phase"),
choices = c("Approved", "Phase III", "Phase II", "Phase I"),
values = c(4, 3, 2, 1),
selected = 4
) %>%
active("orange"),
help = "Select compounds in clinical development to be added to the library."
),
formGroup(
label = tags$h6("Expert opinion compounds") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_expert"),
choices = "chemicalprobes.org 4.0 star rating",
values = "chem_probe"
) %>%
active("orange"),
help = "Select compounds that are endorsed by other users to be added to the library."
),
formGroup(
label = tags$h6("Output table") %>% margin(b = 0),
input = radiobarInput(
id = ns("table_display"),
choices = c("Display per entry", "Display per compound"),
values = c("entry", "compound"),
selected = "entry"
) %>%
active("orange")
),
formGroup(
label = tags$h6("Maximum Kd for query target (nM)") %>% margin(b = 0),
div(
class = "logify-slider active--orange",
shiny::sliderInput(
inputId = ns("filter_affinity"),
label = NULL,
min = 0,
max = 14,
step = 1,
value = 8
)
)
),
formGroup(
label = tags$h6("Minimum number of measurements") %>% margin(b = 0),
div(
class = "active--orange",
shiny::sliderInput(
inputId = ns("filter_measurement"),
label = NULL,
min = 1,
max = 40,
value = 2
)
)
)
)
)
),
navPane(
id = ns("pane_instructions"),
p("The Library app helps you build custom small molecule libraries"),
p("To use the Library app:"),
tags$ol(
class = "pl-4",
tags$li(
"Submit a list of targets that you want to build the library for (in HUGO nomenclature), or select one of the pre-selected gene lists."
),
tags$li(
"Select up to which selectivity level you want to be included."
),
tags$li(
"Select which approval phases you want to include for clinical compounds."
),
tags$li(
"Select whether to include the compounds from chemicalprobes.org (4.0 star rating only)."
),
tags$li(
"Choose whether to view the table per target or per compound"
),
tags$li(
"Download the library."
)
)
)
)
)
),
column(
width = 8,
card(
h3("Output table"),
div(
DT::dataTableOutput(
outputId = ns("table_results"),
height = "625px"
),
mod_ui_download_button(ns("output_table_csv_dl"), "Download CSV"),
mod_ui_download_button(ns("output_table_xlsx_dl"), "Download Excel")
)
) %>%
margin(b = 3),
mod_ui_chembl_tabs(ns("chembl_tabs_1"))
)
)
}
libraryServer <- function(input, output, session, load_example) {
ns <- session$ns
# Define genes found in our data
liganded_genes <- unique(
data_optimal_compounds$symbol,
data_chemical_probes$symbol
)
# nav ----
observeEvent(input$nav, {
req(input$nav)
switch(
input$nav,
filters = showNavPane(ns("pane_filters")),
instructions = showNavPane(ns("pane_instructions"))
)
})
# Load an example gene list
observeEvent(input$gene_example, {
shiny::updateTextAreaInput(
session = session,
inputId = "gene_list",
value = paste0(data_genes[[input$gene_example]], collapse = "\n")
)
})
observeEvent(load_example(), once = TRUE, {
session$sendCustomMessage("click.library.sm", list())
})
observeEvent(input$gene_form, {
showNavPane(ns("pane_results"))
})
r_gene_list <- reactive({
if (is.null(input$gene_list)) {
return(NULL)
}
strsplit(input$gene_list, "\n")[[1]]
})
r_gene_unknown <- reactive({
dplyr::setdiff(r_gene_list(), data_gene_info$symbol)
})
r_gene_known <- reactive({
dplyr::intersect(liganded_genes, r_gene_list())
})
observeEvent(input$gene_unknowns, {
req(length(r_gene_unknown()) > 0)
showModal(
modal(
id = NULL,
header = h5("Unqualified targets"),
p(paste0("The following ", length(r_gene_unknown()), " targets do not have any annotated ligands: ")),
p(paste(r_gene_unknown(), collapse = ", "))
)
)
})
output$gene_targets <- renderText({
if (is.null(r_gene_list()) || length(r_gene_list()) < 1) {
"No genes upload yet"
} else {
paste(length(r_gene_known()), "target(s) with at least one ligand")
}
})
r_selection_selectivity <- reactive({
req(r_gene_known())
# browser()
data_optimal_compounds[
symbol %in% r_gene_known() &
selectivity_class %in% input$filter_probes &
reason_included == "selectivity"
]
})
r_selection_clinical <- reactive({
req(r_gene_known())
data_optimal_compounds[
symbol %in% r_gene_known() &
max_phase %in% input$filter_phase &
reason_included == "clinical"
]
})
r_selection_chemprobes <- reactive({
req(r_gene_known())
data_chemical_probes[
symbol %in% r_gene_known() &
avg_rating == 4
][
, reason_included := "expert_opinion"
]
})
r_selection_table <- reactive({
req(
r_gene_known(),
r_selection_selectivity(),
r_selection_clinical()
)
rbindlist(
list(
r_selection_selectivity(),
r_selection_clinical()
),
fill = TRUE
)[
affinity_Q1 <= 2**input$filter_affinity &
affinity_N >= input$filter_measurement
] %>%
{
if (isTRUE("chem_probe" %in% input$filter_expert))
rbindlist(list(., r_selection_chemprobes()), fill = TRUE)
else .
}
})
r_table_entry <- reactive({
r_selection_table() %>%
dplyr::inner_join(
data_cmpd_info %>%
dplyr::select(lspci_id, chembl_id, pref_name),
by = "lspci_id"
) %>%
dplyr::distinct() %>%
dplyr::select(
symbol, chembl_id,
pref_name, selectivity_class, max_phase, affinity_Q1, affinity_N,
gene_id, reason_included, lspci_id
) %>%
dplyr::mutate_at( # rounds mean and SD to closest 0.1 if greater than 1.
vars(affinity_Q1), # if less than one, rounds to two significant digits.
~ ifelse(. > 1, round(., 1), signif(., 2))
)
})
r_table_compound <- reactive({
r_selection_table() %>%
dplyr::inner_join(
data_cmpd_info %>%
dplyr::select(
lspci_id, chembl_id, pref_name
),
by = "lspci_id"
) %>%
dplyr::distinct() %>%
dplyr::group_by(
lspci_id, chembl_id, pref_name, max_phase
) %>%
dplyr::summarise(
reason_included = paste(symbol, ": ", reason_included, collapse = "; ")
) %>%
dplyr::ungroup() %>%
dplyr::mutate_at(
vars(max_phase),
as.integer
)
})
r_tbl_data <- reactive({
req(input$table_display)
.data <- if (input$table_display == "entry") {
r_table_entry()
} else if (input$table_display == "compound") {
r_table_compound()
}
})
r_tbl <- reactive({
.data <- r_tbl_data()
DT::datatable(
data = .data,
extensions = c("Buttons"),
fillContainer = FALSE,
filter = "top",
selection = "multiple",
rownames = FALSE,
options = list(
autoWidth = TRUE,
buttons = list(
list(
extend = "colvis",
text = "Additional columns"
)
),
columnDefs = list(
list(
targets = grep(
pattern = "^(lspci_id)$",
x = names(.data),
invert = FALSE
) - 1,
visible = FALSE
)
),
dom = "lfrtipB",
fixedHeader = list(
header = TRUE
),
pagingType = "numbers",
searchHighlight = TRUE,
scrollX = FALSE
)
)
})
output$table_results <- DT::renderDataTable(
r_tbl(),
server = FALSE
)
r_download_name <- reactive({
create_download_filename(
c("compound", "library")
)
})
callModule(mod_server_download_button, "output_table_xlsx_dl", r_tbl_data, "excel", r_download_name)
callModule(mod_server_download_button, "output_table_csv_dl", r_tbl_data, "csv", r_download_name)
# table row selection ----
r_tbl_selection <- reactive({
sort(input$table_results_rows_selected)
})
r_selection_drugs <- reactive({
if (is.null(r_tbl_selection())) {
return(integer())
}
r_tbl_data()$lspci_id[r_tbl_selection()]
})
o_chembl_tabs <- callModule(
mod_server_chembl_tabs, "chembl_tabs_1", data_cmpd_info, r_selection_drugs, lspci_id_name_map
)
}
| /modules/library.R | permissive | nmoret/sms-website | R | false | false | 13,651 | r | libraryUI <- function(id) {
ns <- NS(id)
columns(
column(
width = 4,
card(
header = navInput(
appearance = "tabs",
id = ns("nav"),
choices = c("Filters", "Instructions"),
values = c("filters", "instructions"),
selected = "filters"
),
navContent(
navPane(
id = ns("pane_filters"),
p(
"Type or paste gene symbols in the text box below to generate a downloadable table of drugs targetting those genes.",
"One gene per line."
),
formInput(
id = ns("gene_form"),
formGroup(
label = "Find ligands for gene symbols",
input = shiny::textAreaInput(
inputId = ns("gene_list"),
label = NULL,
rows = 5
),
help = div(
"This tool uses HUGO symbols Please see",
tags$a(
target = "_blank", href = "https://genenames.org",
"genenames.org"
),
"for help."
)
),
formGroup(
label = "Example gene lists",
input = {
sel <- selectInput(
id = ns("gene_example"),
choices = names(data_genes), # data/load.R
selected = "Dark_Kinome"
)
sel$children[[1]]$attribs$placeholder <- "Dark_Kinome"
sel
},
help = "Selecting a choice will populate the input above with an example list of genes."
),
formSubmit(
label = "Submit"
) %>%
background("orange"),
actionButton(
ns("reset_gene_list"),
"Clear list",
icon = icon("redo"),
onclick = glue("$('#{ns('gene_list')}')[0].value = null;")
) %>%
background("orange")
) %>%
margin(bottom = 3),
navContent(
navPane(
id = ns("pane_results"),
p(
shiny::textOutput(
outputId = ns("gene_targets"),
inline = TRUE
),
linkInput(
id = ns("gene_unknowns"),
label = shiny::icon("exclamation-circle")
) %>%
font(color = "orange")
) %>%
display("flex") %>%
flex(justify = "between", align = "center") %>%
margin(top = 4, bottom = 4) %>%
font(size = "lg"),
formGroup(
label = tags$h6("Selectivity levels") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_probes"),
choices = c("Most selective", "Semi-selective", "Poly-selective", "Unknown"),
# values = c("most_selective", "semi_selective", "poly_selective", "unknown_selective"),
selected = "Most selective"
) %>%
active("orange"),
help = "Choose the selectivity levels for which you want chemical probes to be included in the library."
),
formGroup(
label = tags$h6("Clinical phases") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_phase"),
choices = c("Approved", "Phase III", "Phase II", "Phase I"),
values = c(4, 3, 2, 1),
selected = 4
) %>%
active("orange"),
help = "Select compounds in clinical development to be added to the library."
),
formGroup(
label = tags$h6("Expert opinion compounds") %>% margin(b = 0),
input = checkboxInput(
inline = TRUE,
id = ns("filter_expert"),
choices = "chemicalprobes.org 4.0 star rating",
values = "chem_probe"
) %>%
active("orange"),
help = "Select compounds that are endorsed by other users to be added to the library."
),
formGroup(
label = tags$h6("Output table") %>% margin(b = 0),
input = radiobarInput(
id = ns("table_display"),
choices = c("Display per entry", "Display per compound"),
values = c("entry", "compound"),
selected = "entry"
) %>%
active("orange")
),
formGroup(
label = tags$h6("Maximum Kd for query target (nM)") %>% margin(b = 0),
div(
class = "logify-slider active--orange",
shiny::sliderInput(
inputId = ns("filter_affinity"),
label = NULL,
min = 0,
max = 14,
step = 1,
value = 8
)
)
),
formGroup(
label = tags$h6("Minimum number of measurements") %>% margin(b = 0),
div(
class = "active--orange",
shiny::sliderInput(
inputId = ns("filter_measurement"),
label = NULL,
min = 1,
max = 40,
value = 2
)
)
)
)
)
),
navPane(
id = ns("pane_instructions"),
p("The Library app helps you build custom small molecule libraries"),
p("To use the Library app:"),
tags$ol(
class = "pl-4",
tags$li(
"Submit a list of targets that you want to build the library for (in HUGO nomenclature), or select one of the pre-selected gene lists."
),
tags$li(
"Select up to which selectivity level you want to be included."
),
tags$li(
"Select which approval phases you want to include for clinical compounds."
),
tags$li(
"Select whether to include the compounds from chemicalprobes.org (4.0 star rating only)."
),
tags$li(
"Choose whether to view the table per target or per compound"
),
tags$li(
"Download the library."
)
)
)
)
)
),
column(
width = 8,
card(
h3("Output table"),
div(
DT::dataTableOutput(
outputId = ns("table_results"),
height = "625px"
),
mod_ui_download_button(ns("output_table_csv_dl"), "Download CSV"),
mod_ui_download_button(ns("output_table_xlsx_dl"), "Download Excel")
)
) %>%
margin(b = 3),
mod_ui_chembl_tabs(ns("chembl_tabs_1"))
)
)
}
libraryServer <- function(input, output, session, load_example) {
ns <- session$ns
# Define genes found in our data
liganded_genes <- unique(
data_optimal_compounds$symbol,
data_chemical_probes$symbol
)
# nav ----
observeEvent(input$nav, {
req(input$nav)
switch(
input$nav,
filters = showNavPane(ns("pane_filters")),
instructions = showNavPane(ns("pane_instructions"))
)
})
# Load an example gene list
observeEvent(input$gene_example, {
shiny::updateTextAreaInput(
session = session,
inputId = "gene_list",
value = paste0(data_genes[[input$gene_example]], collapse = "\n")
)
})
observeEvent(load_example(), once = TRUE, {
session$sendCustomMessage("click.library.sm", list())
})
observeEvent(input$gene_form, {
showNavPane(ns("pane_results"))
})
r_gene_list <- reactive({
if (is.null(input$gene_list)) {
return(NULL)
}
strsplit(input$gene_list, "\n")[[1]]
})
r_gene_unknown <- reactive({
dplyr::setdiff(r_gene_list(), data_gene_info$symbol)
})
r_gene_known <- reactive({
dplyr::intersect(liganded_genes, r_gene_list())
})
observeEvent(input$gene_unknowns, {
req(length(r_gene_unknown()) > 0)
showModal(
modal(
id = NULL,
header = h5("Unqualified targets"),
p(paste0("The following ", length(r_gene_unknown()), " targets do not have any annotated ligands: ")),
p(paste(r_gene_unknown(), collapse = ", "))
)
)
})
output$gene_targets <- renderText({
if (is.null(r_gene_list()) || length(r_gene_list()) < 1) {
"No genes upload yet"
} else {
paste(length(r_gene_known()), "target(s) with at least one ligand")
}
})
r_selection_selectivity <- reactive({
req(r_gene_known())
# browser()
data_optimal_compounds[
symbol %in% r_gene_known() &
selectivity_class %in% input$filter_probes &
reason_included == "selectivity"
]
})
r_selection_clinical <- reactive({
req(r_gene_known())
data_optimal_compounds[
symbol %in% r_gene_known() &
max_phase %in% input$filter_phase &
reason_included == "clinical"
]
})
r_selection_chemprobes <- reactive({
req(r_gene_known())
data_chemical_probes[
symbol %in% r_gene_known() &
avg_rating == 4
][
, reason_included := "expert_opinion"
]
})
r_selection_table <- reactive({
req(
r_gene_known(),
r_selection_selectivity(),
r_selection_clinical()
)
rbindlist(
list(
r_selection_selectivity(),
r_selection_clinical()
),
fill = TRUE
)[
affinity_Q1 <= 2**input$filter_affinity &
affinity_N >= input$filter_measurement
] %>%
{
if (isTRUE("chem_probe" %in% input$filter_expert))
rbindlist(list(., r_selection_chemprobes()), fill = TRUE)
else .
}
})
r_table_entry <- reactive({
r_selection_table() %>%
dplyr::inner_join(
data_cmpd_info %>%
dplyr::select(lspci_id, chembl_id, pref_name),
by = "lspci_id"
) %>%
dplyr::distinct() %>%
dplyr::select(
symbol, chembl_id,
pref_name, selectivity_class, max_phase, affinity_Q1, affinity_N,
gene_id, reason_included, lspci_id
) %>%
dplyr::mutate_at( # rounds mean and SD to closest 0.1 if greater than 1.
vars(affinity_Q1), # if less than one, rounds to two significant digits.
~ ifelse(. > 1, round(., 1), signif(., 2))
)
})
r_table_compound <- reactive({
r_selection_table() %>%
dplyr::inner_join(
data_cmpd_info %>%
dplyr::select(
lspci_id, chembl_id, pref_name
),
by = "lspci_id"
) %>%
dplyr::distinct() %>%
dplyr::group_by(
lspci_id, chembl_id, pref_name, max_phase
) %>%
dplyr::summarise(
reason_included = paste(symbol, ": ", reason_included, collapse = "; ")
) %>%
dplyr::ungroup() %>%
dplyr::mutate_at(
vars(max_phase),
as.integer
)
})
r_tbl_data <- reactive({
req(input$table_display)
.data <- if (input$table_display == "entry") {
r_table_entry()
} else if (input$table_display == "compound") {
r_table_compound()
}
})
r_tbl <- reactive({
.data <- r_tbl_data()
DT::datatable(
data = .data,
extensions = c("Buttons"),
fillContainer = FALSE,
filter = "top",
selection = "multiple",
rownames = FALSE,
options = list(
autoWidth = TRUE,
buttons = list(
list(
extend = "colvis",
text = "Additional columns"
)
),
columnDefs = list(
list(
targets = grep(
pattern = "^(lspci_id)$",
x = names(.data),
invert = FALSE
) - 1,
visible = FALSE
)
),
dom = "lfrtipB",
fixedHeader = list(
header = TRUE
),
pagingType = "numbers",
searchHighlight = TRUE,
scrollX = FALSE
)
)
})
output$table_results <- DT::renderDataTable(
r_tbl(),
server = FALSE
)
r_download_name <- reactive({
create_download_filename(
c("compound", "library")
)
})
callModule(mod_server_download_button, "output_table_xlsx_dl", r_tbl_data, "excel", r_download_name)
callModule(mod_server_download_button, "output_table_csv_dl", r_tbl_data, "csv", r_download_name)
# table row selection ----
r_tbl_selection <- reactive({
sort(input$table_results_rows_selected)
})
r_selection_drugs <- reactive({
if (is.null(r_tbl_selection())) {
return(integer())
}
r_tbl_data()$lspci_id[r_tbl_selection()]
})
o_chembl_tabs <- callModule(
mod_server_chembl_tabs, "chembl_tabs_1", data_cmpd_info, r_selection_drugs, lspci_id_name_map
)
}
|
#' Extract the languages that are used in a git repo
#'
#' @param repo_name string of the form :user:/:repo: that identifies a repo
#' @param git_api the url of the github api to access
#'
#' @return a list containing the languages and number of bytes of codes written
#' @export
#'
#' @examples
#' \dontrun{
#' get_languages(repo_name = "tkrabel/rcoder)
#' }
get_language <- function(repo_name, git_api = "https://api.github.com") {
"{git_api}/repos/{repo_name}/languages" %>%
glue() %>%
GET() %>%
content(., type = "text", encoding = "UTF-8") %>%
fromJSON()
}
#' Get the rate limit on the github api
#'
#' @param git_api string defining the api's url
#'
#' @return list containing the total and remainign hits allowed as well as the time the counter for the ratelimit will reset
#' @export
#'
#' @examples
#' \dontrun{
#' get_ratelimit()
#' }
get_ratelimit <- function(git_api = "https://api.github.com") {
info <- GET(glue("{git_api}/rate_limit")) %>%
content(., type = "text", encoding = "UTF-8") %>%
fromJSON() %$%
rate
info$reset <- info$reset %>% as.POSIXct(origin = "1970-01-01 00:00:00")
info
}
| /R/utils/github_api_handlers.R | no_license | tkrabel/rcoder | R | false | false | 1,146 | r | #' Extract the languages that are used in a git repo
#'
#' @param repo_name string of the form :user:/:repo: that identifies a repo
#' @param git_api the url of the github api to access
#'
#' @return a list containing the languages and number of bytes of codes written
#' @export
#'
#' @examples
#' \dontrun{
#' get_languages(repo_name = "tkrabel/rcoder)
#' }
get_language <- function(repo_name, git_api = "https://api.github.com") {
"{git_api}/repos/{repo_name}/languages" %>%
glue() %>%
GET() %>%
content(., type = "text", encoding = "UTF-8") %>%
fromJSON()
}
#' Get the rate limit on the github api
#'
#' @param git_api string defining the api's url
#'
#' @return list containing the total and remainign hits allowed as well as the time the counter for the ratelimit will reset
#' @export
#'
#' @examples
#' \dontrun{
#' get_ratelimit()
#' }
get_ratelimit <- function(git_api = "https://api.github.com") {
info <- GET(glue("{git_api}/rate_limit")) %>%
content(., type = "text", encoding = "UTF-8") %>%
fromJSON() %$%
rate
info$reset <- info$reset %>% as.POSIXct(origin = "1970-01-01 00:00:00")
info
}
|
#install packages:
library(ggplot2)
install.packages("readxl")
library("readxl")
#import datafile from excel:
data <-read_excel("C:/Users/olive/Desktop/4740/cancer_data.xlsx")
typeof(data)
df <- data.frame(data)
#checking dataframe and its property(such as dimension, Length);
#Also seeing imformation of variables in the data(eg:data type,max,min,median,mean):
head(df)
nrow(df)
ncol(df)
dim(df)
head(df,4)
tail(df,4)
summary(df)
str(df)
fix(df)
View(df)
data$Level <- as.factor(df$Level)
data$Level
table(df$Level)
magnitude.counts <- table(df$Level)
#checking missing value:
sum(is.na(df))
#We can see that there is no missing value in the dataset.
#
install.packages("skimr")
library(skimr)
skim(df)
#group data by Levels then perform skim
df %>%
dplyr::group_by(Level) %>%
skim()
##########
#Data visualization
#scatter
plot(df$Age,df$Gender,col='red',xlab='Age',ylab='Gender')
#histogram
hist(df$Age,col='blue')
#boxplot
boxplot(df$Age, df$Gender, df$Air.Pollution,df$Alcohol.use,df$Dust.Allergy, df$OccuPational.Hazards, df$Genetic.Risk,df$chronic.Lung.Disease,
main = '',
names = c('Age','Gender','Air Pollution','Alcohol use','Dust Allergy','OccuPational Hazards','Genetic Risk','chronic Lung Disease'),
col = 'green'
)
boxplot(df$Balanced.Diet, df$Obesity, df$Smoking,df$Passive.Smoker,df$Chest.Pain,df$Coughing.of.Blood,df$Fatigue,df$Weight.Loss,
main = '',
names = c('Balanced Diet','Obesity','Smoking','Passive Smoker','Chest Pain','Coughing of Blood','Fatigue','Weight Loss'),
col = 'green'
)
boxplot(df$Shortness.of.Breath, df$Wheezing, df$Swallowing.Difficulty,df$Clubbing.of.Finger.Nails,df$Frequent.Cold,df$Dry.Cough,df$Snoring,
main = '',
names = c('Shortness of Breath','Wheezing','Swallowing Difficulty','Clubbing of Finger Nails','Frequent Cold','Dry Cough','Snoring'),
col = 'green'
)
#potential outliers
dplyr::filter(df, Age>70)
boxplot.stats(df$Age)$out
#Then we use Mahalanobis Distance to find the outliers:
colMeans(df[,c(2:24)])
#find the covariance among variables:
cov(df[,c(2:24)])
#find the correlation among variables:
df2=cor(df[,c(2:24)])
cor(df[,c(2:24)])
library('caret')
hc = findCorrelation(df2, cutoff=0.9) # putt any value as a "cutoff"
hc = sort(hc) #there is no correlation among variables under cutoff=0.9
reduced_Data = df2[,-c(hc)]
print (reduced_Data)
MD <- mahalanobis(df[,c(2:24)],colMeans(df[,c(2:24)]),cov(df[,c(2:24)]))
df$MD <-round(MD,3)
head(df)
MD[1:200] %>% round(2)
#Mahalanobis outliers (set to 50)
df$outlier_maha <- FALSE
df$outlier_maha[df$MD >50] <- TRUE
head(df)
summary(df)
#Therefore, based on the summary(df), we have 30 data which could be outliers.
#Those are:
dplyr::filter(df, outlier_maha == TRUE)
#Then drop those outliers:
df_clean<- df[-which(df$outlier_maha ==TRUE),]
df_clean
dim(df_clean)
df_clean <-df_clean[,1:25] # the dataset after dropping outliers
dim(df_clean)
############
#KNN method:
set.seed(1)
##Generate a random number that is 80% of the total number of rows in data set.
ran <- sample(1:nrow(df_clean), 0.8 * nrow(df_clean))
ran
##the normalization function is created
nor <-function(x) { (x -min(x))/(max(x)-min(x)) }
##Run normalization on the 2nd-24th columns of data set because they are the predictors
data_norm <- as.data.frame(lapply(df_clean[,c(2:24)], nor))
summary(data_norm)
##extract training set
data_train <- data_norm[ran,] #X for training data
##extract testing set
data_test <- data_norm[-ran,] #X for test data
#extract 25th column(Level) of train dataset because it will be used as 'cl' argument in knn function.
data_target_category <- df_clean[ran,25]
##extract 25th column(Level) if test dataset to measure the accuracy
data_test_category <- df_clean[-ran,25]
##load the package class
library(class)
##run knn function
pred <- knn(data_train,data_test,cl=data_target_category,k=20)
##create confusion matrix
tab <- table(pred,data_test_category)
##this function divides the correct predictions by total number of predictions that tell us how accurate the model is.
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
accuracy(tab)
#In the data, I have run the k-nearest neighbor algorithm that gave me 96.90722% accurate result.
#Compared to different values of k, I found that the accuracy 96.90722% is good enough under k value equals to 20.
#First, I normalized the data to convert the 2nd-24th columns of variables into a standardized 0-to-1 form so that we can fit them into one box (one graph) ;
#the main objective is to predict whether a level is low, median, or high and that is why I excluded the column 25 and stored it into another variable called data_target_category.
#Then, I separated the normalized values into training and testing dataset. Imagine it this way, that the values from training dataset are firstly drawn on a graph and after we run knn function with all the necessary arguments,
#we introduce testing datasets values into the graph and calculate Euclidean distance with each and every already stored point in graph.
#Now, although we know which level it is in testing dataset, we still predict the values and store them in variable called 'pred' so that we can compare predicted values with original testing datasets values.
#This way we understand the accuracy of our model.
#I also use similar code of KNN method using professor Yang's code for comparision, I got the same accuracy in the following code:
set.seed(1)
##the normalization function is created
nor <-function(x) { (x -min(x))/(max(x)-min(x)) }
##Run normalization on the 2nd-24th columns of data set because they are the predictors
data_norm <- as.data.frame(lapply(df_clean[,c(2:24)], nor))
train.Level=df_clean$Level[ran] #Y for training data
knn.pred=knn(data_train,data_test,train.Level,k=20)
table(knn.pred,df_clean$Level[-ran])
mean(knn.pred!=df_clean$Level[-ran]) #test error
accuracy_knn=mean(knn.pred==df_clean$Level[-ran])
accuracy_knn #96.9% accuracy when k=20
| /4740_final_project_eda&knn_bingjie.R | no_license | JIanying-Liang/STSCI4740-Project | R | false | false | 6,245 | r | #install packages:
library(ggplot2)
install.packages("readxl")
library("readxl")
#import datafile from excel:
data <-read_excel("C:/Users/olive/Desktop/4740/cancer_data.xlsx")
typeof(data)
df <- data.frame(data)
#checking dataframe and its property(such as dimension, Length);
#Also seeing imformation of variables in the data(eg:data type,max,min,median,mean):
head(df)
nrow(df)
ncol(df)
dim(df)
head(df,4)
tail(df,4)
summary(df)
str(df)
fix(df)
View(df)
data$Level <- as.factor(df$Level)
data$Level
table(df$Level)
magnitude.counts <- table(df$Level)
#checking missing value:
sum(is.na(df))
#We can see that there is no missing value in the dataset.
#
install.packages("skimr")
library(skimr)
skim(df)
#group data by Levels then perform skim
df %>%
dplyr::group_by(Level) %>%
skim()
##########
#Data visualization
#scatter
plot(df$Age,df$Gender,col='red',xlab='Age',ylab='Gender')
#histogram
hist(df$Age,col='blue')
#boxplot
boxplot(df$Age, df$Gender, df$Air.Pollution,df$Alcohol.use,df$Dust.Allergy, df$OccuPational.Hazards, df$Genetic.Risk,df$chronic.Lung.Disease,
main = '',
names = c('Age','Gender','Air Pollution','Alcohol use','Dust Allergy','OccuPational Hazards','Genetic Risk','chronic Lung Disease'),
col = 'green'
)
boxplot(df$Balanced.Diet, df$Obesity, df$Smoking,df$Passive.Smoker,df$Chest.Pain,df$Coughing.of.Blood,df$Fatigue,df$Weight.Loss,
main = '',
names = c('Balanced Diet','Obesity','Smoking','Passive Smoker','Chest Pain','Coughing of Blood','Fatigue','Weight Loss'),
col = 'green'
)
boxplot(df$Shortness.of.Breath, df$Wheezing, df$Swallowing.Difficulty,df$Clubbing.of.Finger.Nails,df$Frequent.Cold,df$Dry.Cough,df$Snoring,
main = '',
names = c('Shortness of Breath','Wheezing','Swallowing Difficulty','Clubbing of Finger Nails','Frequent Cold','Dry Cough','Snoring'),
col = 'green'
)
#potential outliers
dplyr::filter(df, Age>70)
boxplot.stats(df$Age)$out
#Then we use Mahalanobis Distance to find the outliers:
colMeans(df[,c(2:24)])
#find the covariance among variables:
cov(df[,c(2:24)])
#find the correlation among variables:
df2=cor(df[,c(2:24)])
cor(df[,c(2:24)])
library('caret')
hc = findCorrelation(df2, cutoff=0.9) # putt any value as a "cutoff"
hc = sort(hc) #there is no correlation among variables under cutoff=0.9
reduced_Data = df2[,-c(hc)]
print (reduced_Data)
MD <- mahalanobis(df[,c(2:24)],colMeans(df[,c(2:24)]),cov(df[,c(2:24)]))
df$MD <-round(MD,3)
head(df)
MD[1:200] %>% round(2)
#Mahalanobis outliers (set to 50)
df$outlier_maha <- FALSE
df$outlier_maha[df$MD >50] <- TRUE
head(df)
summary(df)
#Therefore, based on the summary(df), we have 30 data which could be outliers.
#Those are:
dplyr::filter(df, outlier_maha == TRUE)
#Then drop those outliers:
df_clean<- df[-which(df$outlier_maha ==TRUE),]
df_clean
dim(df_clean)
df_clean <-df_clean[,1:25] # the dataset after dropping outliers
dim(df_clean)
############
#KNN method:
set.seed(1)
##Generate a random number that is 80% of the total number of rows in data set.
ran <- sample(1:nrow(df_clean), 0.8 * nrow(df_clean))
ran
##the normalization function is created
nor <-function(x) { (x -min(x))/(max(x)-min(x)) }
##Run normalization on the 2nd-24th columns of data set because they are the predictors
data_norm <- as.data.frame(lapply(df_clean[,c(2:24)], nor))
summary(data_norm)
##extract training set
data_train <- data_norm[ran,] #X for training data
##extract testing set
data_test <- data_norm[-ran,] #X for test data
#extract 25th column(Level) of train dataset because it will be used as 'cl' argument in knn function.
data_target_category <- df_clean[ran,25]
##extract 25th column(Level) if test dataset to measure the accuracy
data_test_category <- df_clean[-ran,25]
##load the package class
library(class)
##run knn function
pred <- knn(data_train,data_test,cl=data_target_category,k=20)
##create confusion matrix
tab <- table(pred,data_test_category)
##this function divides the correct predictions by total number of predictions that tell us how accurate the model is.
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
accuracy(tab)
#In the data, I have run the k-nearest neighbor algorithm that gave me 96.90722% accurate result.
#Compared to different values of k, I found that the accuracy 96.90722% is good enough under k value equals to 20.
#First, I normalized the data to convert the 2nd-24th columns of variables into a standardized 0-to-1 form so that we can fit them into one box (one graph) ;
#the main objective is to predict whether a level is low, median, or high and that is why I excluded the column 25 and stored it into another variable called data_target_category.
#Then, I separated the normalized values into training and testing dataset. Imagine it this way, that the values from training dataset are firstly drawn on a graph and after we run knn function with all the necessary arguments,
#we introduce testing datasets values into the graph and calculate Euclidean distance with each and every already stored point in graph.
#Now, although we know which level it is in testing dataset, we still predict the values and store them in variable called 'pred' so that we can compare predicted values with original testing datasets values.
#This way we understand the accuracy of our model.
#I also use similar code of KNN method using professor Yang's code for comparision, I got the same accuracy in the following code:
set.seed(1)
##the normalization function is created
nor <-function(x) { (x -min(x))/(max(x)-min(x)) }
##Run normalization on the 2nd-24th columns of data set because they are the predictors
data_norm <- as.data.frame(lapply(df_clean[,c(2:24)], nor))
train.Level=df_clean$Level[ran] #Y for training data
knn.pred=knn(data_train,data_test,train.Level,k=20)
table(knn.pred,df_clean$Level[-ran])
mean(knn.pred!=df_clean$Level[-ran]) #test error
accuracy_knn=mean(knn.pred==df_clean$Level[-ran])
accuracy_knn #96.9% accuracy when k=20
|
library(testthat)
library(standardize)
test_check("standardize")
| /Rcheck/tests/testthat.R | no_license | CDEager/standardize | R | false | false | 66 | r | library(testthat)
library(standardize)
test_check("standardize")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.